content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""Valid URL Configuration for testing purposes"""
from django.views.generic import RedirectView
GITHUB = RedirectView.as_view(
url="https://github.com/jambonsw/django-url-check"
)
| python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A node transformer that includes utilities for SCT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import gast
import six
from tensorflow.contrib.py2tf.pyct import pretty_printer
class PyFlowParseError(SyntaxError):
pass
class Base(gast.NodeTransformer):
"""Base class for specialized transformers."""
def __init__(self, context):
"""Initialize the transformer. Subclasses should call this.
Args:
context: An EntityContext.
"""
self._lineno = 0
self._col_offset = 0
self.context = context
def visit(self, node):
try:
source_code = self.context.source_code
source_file = self.context.source_file
if source_code and hasattr(node, 'lineno'):
self._lineno = node.lineno
self._col_offset = node.col_offset
return super(Base, self).visit(node)
except (ValueError, AttributeError, NotImplementedError) as e:
msg = '%s: %s\nOccurred at node:\n%s' % (e.__class__.__name__, str(e),
pretty_printer.fmt(node))
if source_code:
line = source_code.splitlines()[self._lineno - 1]
else:
line = '<no source available>'
six.reraise(PyFlowParseError,
PyFlowParseError(
msg,
(source_file, self._lineno, self._col_offset + 1, line)),
sys.exc_info()[2])
| python |
#!/usr/bin/env python3
import os
import sys
import argparse
import tempfile
import shutil
from saturation.utils import (normalize_args, export_to_file, get_macs_command_line, parse_outputs, save_plot)
def get_parser():
parser = argparse.ArgumentParser(description='SatScript', add_help=True)
parser.add_argument("-b", "--bam", type=str, help="Path to the BAM file", required=True)
parser.add_argument("-m", "--macs", type=str, help="Path to the MACS2 log file", required=True)
parser.add_argument("-o", "--output", type=str, help="Output filename prefix", default="./")
parser.add_argument("-s", "--suffix", type=str, help="Output suffixes for reads, islands, surface, frip and saturation files", nargs=5,
default=["reads.png", "islands.png", "surface.png", "frip.png", "saturation.txt"])
parser.add_argument("-p", "--percentage", type=str, help="Target percentage", nargs="*", default=["25", "50", "75", "90", "95", "98", "99", "99.5", "100"])
parser.add_argument("-t", "--temp", type=str, help="Temp folder", default=".")
parser.add_argument("-r", "--resolution", type=int, help="Output picture resolution, dpi", default=85)
return parser
def export_results(args, output_data):
percent = [line[0] for line in output_data]
total_mapped = [line[1] for line in output_data]
macs2_reads = [line[2] for line in output_data]
islands = [line[3] for line in output_data]
surface = [line[4] for line in output_data]
frip_score = [line[5] for line in output_data]
save_plot(filename=args.output + args.suffix[0],
res_dpi=args.resolution,
title="Reads",
x_data=percent,
y_data=[total_mapped, macs2_reads],
labels=["Total mapped reads", "Reads used by MACS"],
styles=["ro-", "bo-"],
axis=["%", "reads"])
save_plot(filename=args.output + args.suffix[1],
res_dpi=args.resolution,
title="Islands",
x_data=percent,
y_data=[islands],
labels=["islands"],
styles=["bo-"],
axis=["%", "islands"])
save_plot(filename=args.output + args.suffix[2],
res_dpi=args.resolution,
title="Surface",
x_data=percent,
y_data=[surface],
labels=["surface"],
styles=["bo-"],
axis=["%", "surface, bp"])
save_plot(filename=args.output + args.suffix[3],
res_dpi=args.resolution,
title="Fraction of Reads in Peaks",
x_data=percent,
y_data=[frip_score],
labels=["FRIP Score"],
styles=["bo-"],
axis=["%", "FRIP Score, %"],
y_max=100)
export_to_file(args.output + args.suffix[4], "\n".join([" ".join(map(str, line)) for line in output_data]))
def main(argsl=None):
if argsl is None:
argsl = sys.argv[1:]
args,_ = get_parser().parse_known_args(argsl)
args = normalize_args(args, ["percentage", "suffix", "output", "resolution"])
print(args)
macs_command_line = get_macs_command_line(args.macs)
temp_folder = tempfile.mkdtemp(prefix=os.path.join(args.temp, "tmp_"))
try:
output_data = []
for target_percent in args.percentage:
randsample_output = os.path.join(temp_folder, target_percent + ".bed")
callpeak_output = os.path.join(temp_folder, target_percent)
bedmap_output = os.path.join(temp_folder, target_percent + "_reads_at_peaks.txt")
randsample_cmd = " ".join(["macs2", "randsample", "-t", args.bam, "-p", target_percent, "-o", randsample_output])
print("Run:", randsample_cmd)
os.system(randsample_cmd)
callpeak_cmd = " ".join(["macs2", macs_command_line, "-t", randsample_output, "-n", callpeak_output])
print("Run:", callpeak_cmd)
os.system(callpeak_cmd)
broad_peak_file = callpeak_output + "_peaks.broadPeak"
narrow_peak_file = callpeak_output + "_peaks.narrowPeak"
peak_file = broad_peak_file if os.path.exists(broad_peak_file) else narrow_peak_file
bedmap_cmd = " ".join(["bedmap --bp-ovr 1 --count", randsample_output, peak_file, " | awk '{s=($1>0)?s+1:s}; END{print s}' > ", bedmap_output])
print("Run:", bedmap_cmd)
os.system(bedmap_cmd)
result = parse_outputs(xlsfile=callpeak_output + "_peaks.xls",
bedmap_output=bedmap_output,
target_percent=target_percent)
output_data.append(result)
export_results(args, output_data)
except Exception as err:
print("Error", err)
raise
finally:
shutil.rmtree(temp_folder)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) | python |
def decorate(func):
def decorated():
print("==" * 20)
print("before")
func()
print("after")
return decorated
@decorate
def target():
print("target 함수")
target()
## output
"""
========================================
before
target 함수
after
"""
def target2():
print("target2 함수 실행함")
target2 = decorate(target2)
target2()
## output
"""
========================================
before
target2 함수 실행함
after
"""
| python |
"""This script is used to plot a gene2vec embedding"""
# imports
import argparse
import pandas as pd
import numpy as np
import plotly.express as px
import mygene
import math
import os
# describe program
parser = argparse.ArgumentParser(description='Plots an embedding of a gene2vec hidden layer.')
# arguments
parser.add_argument('--embedding',
type=str,
help='File path of the gene2vec embedding to be plotted.')
parser.add_argument('--out',
type=str,
help='File path of output plot.',
default=None)
parser.add_argument('--plot-title',
dest='plot_title',
type=str,
help='Custom title for plot.',
default=None)
parser.add_argument('--alg',
type=str,
choices=['umap', 'pca', 'mds', 'tsne'],
default='umap',
help='The dimension reduction algorithm to used to produce the embedding.')
parser.add_argument('--species',
default=9606,
help='Species name or taxid used to generate the gene embedding.')
parser.add_argument('--dim',
type=int,
default=2,
help='Dimension of the embedding.')
# parse args
args = parser.parse_args()
# user defined functions
def load_embedding(filename):
geneList = list()
vectorList = list()
f = open(filename)
for line in f:
values = line.split()
gene = values[0]
vector = np.asarray(values[1:], dtype="float32")
geneList.append(gene)
vectorList.append(vector)
f.close()
return np.asarray(vectorList), np.asarray(geneList)
def infer_gene_rep(x) -> str:
# check for entrez id
if type(x) == int:
return 'Entrez ID'
elif type(x) == str:
# check for ensembl id
if 'ENS' in x:
return 'Ensembl ID'
else:
# default it gene symbol
return 'Gene Symbol'
def query_gene_info(gene_ids, species=9606):
# infer type of gene id
gene_rep = infer_gene_rep(gene_ids[0].item())
# build querying object
mg = mygene.MyGeneInfo()
# excute query based upon species and gene rep
if gene_rep == "Gene Symbol":
gene_info = mg.querymany(gene_ids, scopes='symbol', species=species, as_dataframe=True)
gene_info = gene_info.groupby("symbol").agg(unique_non_null)
gene_info["symbol"] = gene_info.index
return gene_info
elif gene_rep == "Entrez ID":
gene_info = mg.querymany(gene_ids, scopes='entrezgene', species=species, as_dataframe=True)
gene_info = gene_info.groupby("entrezgene").agg(unique_non_null)
gene_info["entrezgene"] = gene_info.index
return gene_info
elif gene_rep == "Ensembl ID":
gene_info = mg.getgenes(gene_ids, fields='name,symbol,entrezgene,taxid', as_dataframe=True)
gene_info = gene_info.groupby("query").agg(unique_non_null)
gene_info["query"] = gene_info.index
return gene_info
def unique_non_null(x):
# drop na entry and get unique values
y = x.dropna().unique()
if y.size == 1:
return y.item()
elif y.size == 0:
return pd.NA
else:
return y
if __name__=="__main__":
# load gene2vec embedding
print("\nRunning:")
print(f"\t[*] Loading the Gene2vec embedding: {os.path.abspath(args.embedding)}...")
wv, vocabulary = load_embedding(args.embedding)
print(f"\t\t- Number of Genes: {'{:,}'.format(vocabulary.size)}.")
print(f"\t\t- Embedding Dimension: {wv.shape[1]}.")
# find gene info
print(f"\t[*] Querying NCBI for gene info...")
gene_info = query_gene_info(vocabulary, args.species)
# define dimension reduction algorithm
if args.alg == 'umap':
from umap import UMAP
reduce = UMAP(n_components=args.dim)
elif args.alg == 'pca':
from sklearn.decomposition import PCA
reduce = PCA(n_components=args.dim, whiten=True)
# reduce dimension
print(f"\t[*] Reducing the dimension of Gene2vec embedding with {args.alg.upper()}(dim={args.dim})...")
wv_red = reduce.fit_transform(wv)
# create dataframe for plotting
gene_rep = infer_gene_rep(vocabulary[0].item())
df = pd.DataFrame(index=vocabulary, data=wv_red)
df.loc[gene_info.index.values, "Gene Symbol"] = gene_info['symbol']
df.loc[gene_info.index.values, "Tax ID"] = gene_info['taxid']
df.loc[gene_info.index.values, "Entrez ID"] = gene_info['entrezgene']
df.loc[gene_info.index.values, "Name"] = gene_info['name']
if gene_rep == "Ensembl ID":
df.loc[vocabulary, "Ensembl ID"] = vocabulary
elif gene_rep == "Gene Symbol":
df.loc[vocabulary, "Gene Symbol"] = vocabulary
elif gene_rep == "Entrez ID":
df.loc[vocabulary, "Entrez ID"] = vocabulary
# replace na
df.fillna('NA', inplace=True)
# generate hover data
hover_data = df.filter(regex="Symbol|ID|Name").columns
hover_data = {col: True for col in hover_data}
# format columns
col_dict = {0: f'{args.alg.upper()} 1', 1: f'{args.alg.upper()} 2', 2: f'{args.alg.upper()} 3'}
df.rename(columns=col_dict, inplace=True)
# plot
print("\t[*] Generating interactive plot via plotly...")
if args.dim == 2:
fig = px.scatter(df, x=col_dict[0], y=col_dict[1],
hover_data=hover_data,
#color_continuous_scale="RdBu",
#opacity=.7,
size_max=8)
fig.update_traces(marker=dict(color='rgba(255, 255, 255, 0.1)'))
if args.dim == 3:
fig = px.scatter_3d(df, x=col_dict[0], y=col_dict[1], z=col_dict[2],
hover_data=hover_data,
#color_continuous_scale="RdBu",
#opacity=.7,
size_max=8)
fig.update_traces(marker=dict(color='rgba(10, 10, 10, 0.01)'))
# update plot layout
if args.plot_title is None:
args.plot_title = f"Gene2vec Embedding using {args.alg.upper()}"
fig.update_layout(template='plotly_dark',
title=args.plot_title,
font=dict(size=18))
# save to file
if args.out is None:
embedding_name = os.path.basename(args.embedding).rstrip('.txt')
args.out = f"../figures/{embedding_name}_{args.alg}_{args.dim}.html"
fig.write_html(args.out)
fig.write_json(args.out.replace('.html', '.json'))
print(f"\t[*] Plot saved to {os.path.abspath(args.out)}(.json).")
print("Complete!\n")
| python |
class MedianFinder:
def __init__(self):
def addNum(self, num: int) -> None:
def findMedian(self) -> float:
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
| python |
from sys import stdin
mb_per_month = int(stdin.readline())
n_of_months = int(stdin.readline())
current_num_of_mb = mb_per_month
for n in range(n_of_months):
current_num_of_mb = (current_num_of_mb - int(stdin.readline())) + mb_per_month
print current_num_of_mb
| python |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.test import Client, TestCase # noqa: 401
from django.urls import reverse
from django.utils import timezone
from .models import Choice, Question
class PollViewTests(TestCase):
def setUp(self):
question = Question(
question_text="This is a test question",
pub_date=timezone.now()
)
question.save()
self.question = question
choice = Choice(
choice_text="This is a test choice",
votes=0
)
choice.question = question
choice.save()
self.choice = choice
self.client = Client()
def test_index_view(self):
response = self.client.get('/')
assert response.status_code == 200
assert self.question.question_text in str(response.content)
def test_detail_view(self):
response = self.client.get(
reverse('polls:detail', args=(self.question.id,)))
assert response.status_code == 200
assert self.question.question_text in str(response.content)
assert self.choice.choice_text in str(response.content)
def test_results_view(self):
response = self.client.get(
reverse('polls:results', args=(self.question.id,)))
assert response.status_code == 200
assert self.question.question_text in str(response.content)
assert self.choice.choice_text in str(response.content)
| python |
from typing import Any, Callable, List, TypeVar
from vkwave.bots.core.dispatching.filters.base import (
BaseFilter,
AsyncFuncFilter,
SyncFuncFilter,
)
from vkwave.bots.core.dispatching.handler.base import BaseHandler
from vkwave.bots.core.dispatching.handler.record import HandlerRecord
F = TypeVar("F", bound=Callable[..., Any])
class HandlerRegistrar:
def __init__(self):
self.default_filters: List[BaseFilter] = []
self.handlers: List[BaseHandler] = []
def add_default_filter(self, filter: BaseFilter):
if isinstance(filter, (AsyncFuncFilter, SyncFuncFilter)):
raise ValueError(
"You should add custom filters derived from `BaseFilter` for using default as filter"
)
self.default_filters.append(filter)
def with_decorator(self, *filters: BaseFilter):
def decorator(func: Callable[..., Any]):
record = self.new()
record.with_filters(*filters)
record.handle(func)
handler = record.ready()
self.register(handler)
return func
return decorator
def new(self) -> HandlerRecord:
record = HandlerRecord()
return record
def register(self, handler: BaseHandler):
for dfilter in self.default_filters:
to_include: bool = True
for afilter in handler.filter_manager.filters:
if type(dfilter) is type(afilter):
to_include = False
break
if to_include:
handler.filter_manager.add_filter(dfilter)
self.handlers.append(handler)
| python |
#!/usr/bin/python
import os
import re
from optparse import OptionParser
SUFFIX=".out"
def main () :
global filename
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="the file to update", metavar="FILE")
parser.add_option("-n", "--name", dest="name",
help="the name to replace the original name with", metavar="NAME")
parser.add_option("-c", "--fromname", dest="fromname",
help="the name be replaced", metavar="FROMNAME")
(options, args) = parser.parse_args()
if not options.filename :
print "You must specify the file to modify"
exit(-1)
if not options.name :
print "You must specify the name to replace Tim with"
exit(-1)
if not options.fromname :
print "You must specify the name to be replaced"
exit(-1)
fin = open(options.filename, 'r')
fout = open(options.filename + SUFFIX, 'w')
for line in fin :
fout.write(re.sub(options.fromname, options.name, line))
fin.close()
fout.close()
main()
| python |
import vvx_nego
if __name__ == "__main__":
#hogeの部分をエンジンが有るpathに変更して実行してください
vvn = vvx_nego.VoicevoxNegotiation("hoge\\run.exe")
vvn.request_audio_query("これは", speaker=1)
vvn.request_synthesis(vvn.audio_query, speaker=1)
vvn.multi_synthesis.append(vvn.synthesis)
vvn.request_audio_query("読み上げを実行する", speaker=3)
vvn.request_synthesis(vvn.audio_query, speaker=3)
vvn.multi_synthesis.append(vvn.synthesis)
vvn.request_audio_query("サンプルコードです", speaker=5)
vvn.request_synthesis(vvn.audio_query, speaker=5)
vvn.multi_synthesis.append(vvn.synthesis)
vvn.request_connect_waves(vvn.multi_synthesis)
#音が出ます
vvn.local_play_synthesis(vvn.synthesis)
input()
| python |
__author__ = 'Aditya Roy'
import unittest
from time import sleep
from WebAutomation.Test.TestUtility.ScreenShot import SS
from WebAutomation.Src.PageObject.Pages.ConfirmationPage import Confirmation
from WebAutomation.Src.PageObject.Pages.HomePage import Home
from WebAutomation.Src.TestBase.EnvironmentSetUp import EnvironmentSetup
from WebAutomation.Src.PageObject.Pages.RegistrationPage import Register
class MercuryTours_Registration(EnvironmentSetup):
def test_RegistrationFlow(self):
# Screenshots relative paths
ss_path = "/Test_MercuryTours_Registration/"
driver = self.driver
self.driver.get("http://newtours.demoaut.com")
self.driver.set_page_load_timeout(20)
# Creating object of SS screenshots utility
ss = SS(driver)
#calling home page object to click on Register Link
home = Home(driver)
if home.getRegister().is_displayed():
print("Register Link displaying")
home.getRegister().click()
sleep(4)
#calling registration page object to proceed with registration flow
reg = Register(driver)
if reg.getRegis_txt().is_displayed():
print(reg.regis_txt.text)
ss.ScreenShot(ss_path+"Registration.png")
else:
print("Registration page not loaded")
try:
reg.setFirstName("Aditya")
reg.setLastName("Roy")
reg.setPhone("7501498896")
reg.setEmail("[email protected]")
reg.setCountry("INDIA")
reg.setUserName("[email protected]")
reg.setPassword(123456)
reg.setConfirmPassword(123456)
sleep(2)
ss.ScreenShot(ss_path+"RegistrationData.png")
reg.submitRegistration()
sleep(4)
ss.ScreenShot(ss_path+"PostRegistration.png")
except Exception as e:
print("Exception occurred "+e)
#calling Post Registration check
post = Confirmation(driver)
print(post.thankYou.text)
if (post.UserID.text).find("[email protected]"):
print("Registration Process Successful")
else:
print("User Failed to register properly")
if __name__ == '__main__':
unittest.main()
| python |
# 写入csv文件
import csv
with open('data.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id', 'name', 'age'])
writer.writerow(['10001', 'Mike', 20])
writer.writerow(['10002', 'Bob', 22])
writer.writerow(['10003', 'Jordan', 21])
| python |
#!/usr/bin/env python
import decimal
import hashlib
import json
import sys
# Parse the query.
query = json.load(sys.stdin)
# Build the JSON template.
boolean_keys = [
'ActionsEnabled',
]
list_keys = [
'AlarmActions',
'Dimensions',
'InsufficientDataActions',
'OKActions',
]
alarm = {}
for key, value in query.items():
if key in boolean_keys:
value = value.lower() in ('1', 'true')
elif key in list_keys:
value = json.loads(value)
if value:
alarm[key] = value
content = json.dumps(alarm, indent=2, sort_keys=True)
etag = hashlib.md5(content.encode('utf-8')).hexdigest()
# Output the result to Terraform.
json.dump({
'key': etag,
'content': content,
'etag': etag,
}, sys.stdout, indent=2)
sys.stdout.write('\n')
| python |
from django.urls import path
from .ajax import CustomerRequirementAjaxView
urlpatterns = [
path('customer/', CustomerRequirementAjaxView.as_view(), name='customerRequirementAjax'),
]
| python |
import settings
from PyQt5.QtCore import QObject, QEvent
from PyQt5.QtCore import Qt
from enum import Enum
import cv2
import numpy as np
from skimage.draw import rectangle, line
#
# class Mode(Enum):
# SHOW = 1
# DRAW = 2
# ERASE = 3
class GrabCutToolInteractor(QObject):
def __init__(self, viewer, parent=None):
super().__init__(parent)
self.viewer = viewer
# self.mode = Mode.SHOW
self.rect_start = ()
self.rect_end = ()
self.c = 0
self.bgd_model = np.zeros((1, 65), np.float64)
self.fgd_model = np.zeros((1, 65), np.float64)
self.m_pos = ()
def eventFilter(self, watched_obj, e):
if e.type() == QEvent.MouseButtonPress:
self.on_mouse_pressed(e)
return True
elif e.type() == QEvent.MouseMove:
self.on_mouse_moved(e)
return True
elif e.type() == QEvent.MouseButtonRelease:
self.on_mouse_released(e)
return True
else:
return super().eventFilter(watched_obj, e)
def on_mouse_pressed(self, e):
if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())):
return
image_coords = self.viewer.pos_to_image_coords(e.pos())
self.rect_start = (image_coords[0], image_coords[1])
self.m_pos = (image_coords[0], image_coords[1])
if e.buttons() == Qt.LeftButton:
self.viewer.tool_mask[image_coords[0], image_coords[1]] = [0, 128, 255, 255]
elif e.buttons() == Qt.RightButton:
self.viewer.tool_mask[image_coords[0], image_coords[1]] = [255, 0, 0, 255]
def on_mouse_moved(self, e):
if not self.rect_start:
return
if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())):
return
image_coords = self.viewer.pos_to_image_coords(e.pos())
# self.draw_rect(image_coords[0], image_coords[1])
rr, cc = line(self.m_pos[0], self.m_pos[1], image_coords[0], image_coords[1])
if e.buttons() == Qt.LeftButton:
self.viewer.tool_mask[rr, cc] = [0, 128, 255, 255]
elif e.buttons() == Qt.RightButton:
self.viewer.tool_mask[rr, cc] = [255, 0, 0, 255]
self.m_pos = (image_coords[0], image_coords[1])
self.mask_grab_cut()
self.viewer.update_scaled_combined_image()
def draw_rect(self, row, col):
rr, cc = rectangle(self.rect_start, end=(row, col), shape=self.viewer.tool_mask.shape[:2])
self.viewer.tool_mask[rr, cc] = [255, 0, 0, 255]
def on_mouse_released(self, e):
if not self.rect_start:
return
if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())):
return
image_coords = self.viewer.pos_to_image_coords(e.pos())
self.rect_end = (image_coords[0], image_coords[1])
# self.grab_cut()
# if self.c == 1:
self.mask_grab_cut()
# self.grab_cut()
# self.c = 1
self.rect_start = ()
# self.draw_brush_event(e)
# Erase tool mask
# self.viewer.tool_mask.fill(0)
self.viewer.update_scaled_combined_image()
def mask_grab_cut(self):
print('mask_grab_cut')
# wherever it is marked white (sure foreground), change mask=1
# wherever it is marked black (sure background), change mask=0
mask = np.zeros(self.viewer.image.shape[:2], np.uint8)
mask.fill(2)
# print('before', mask.shape)
# aaa = (self.viewer.tool_mask == [0, 128, 255, 255]).all(axis=2)
# print(aaa.shape)
# print(aaa)
# print('bbb')
mask[np.where((self.viewer.tool_mask == settings.TOOL_FOREGROUND).all(axis=2))] = 1
mask[np.where((self.viewer.tool_mask == settings.TOOL_BACKGROUND).all(axis=2))] = 0
print(np.unique(mask))
# print('after')
try:
mask, self.bgd_model, self.fgd_model = cv2.grabCut(self.viewer.image, mask, None, self.bgd_model, self.fgd_model, 1, cv2.GC_INIT_WITH_MASK)
# mask, self.bgd_model, self.fgd_model = cv2.grabCut(self.viewer.image, mask, None, self.bgd_model,
# self.fgd_model, 5, cv2.GC_INIT_WITH_MASK)
except:
print('exception')
print(np.unique(mask))
self.viewer.mask[np.where(((mask == 1) | (mask == 3)))] = settings.MASK_COLOR
self.viewer.mask[np.where(((mask == 0) | (mask == 2)))] = settings.NO_MASK_COLOR
def grab_cut(self):
bgd_model = np.zeros((1, 65), np.float64)
fgd_model = np.zeros((1, 65), np.float64)
mask = np.zeros(self.viewer.image.shape[:2], np.uint8)
print(mask.shape)
rect_width = self.rect_end[1] - self.rect_start[1]
rect_height = self.rect_end[0] - self.rect_start[0]
rect = (self.rect_start[1], self.rect_start[0], rect_width, rect_height)
print(rect)
try:
cv2.grabCut(self.viewer.image, mask, rect, bgd_model, fgd_model, 5, cv2.GC_INIT_WITH_RECT)
except:
print('exception grabCut')
# cv2.GC_PR_BGD
# cv2.GC_FGD
# print(np.where((mask == 2) | (mask == 0)))
# self.viewer.mask = np.where((mask == 2) | (mask == 0), settings.MASK_COLOR)
#
# print(mask)
# print(mask.shape)
# mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype(np.uint8)
self.viewer.mask[np.where(((mask == 1) | (mask == 3)))] = settings.MASK_COLOR
self.viewer.mask[np.where(((mask == 0) | (mask == 2)))] = settings.NO_MASK_COLOR
# self.viewer.mask = np.where((mask == 1) | (mask == 3), settings.MASK_COLOR, settings.NO_MASK_COLOR)
# mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
# img = img * mask2[:, :, np.newaxis]
def draw_brush_event(self, e):
if not (self.viewer.has_image() and self.viewer.is_over_image(e.pos())):
return
image_coords = self.viewer.pos_to_image_coords(e.pos())
self.update_mode(e)
self.draw_brush(image_coords[0], image_coords[1])
self.viewer.update_scaled_combined_image()
def draw_brush(self, row, col):
# Erase old tool mask
self.viewer.tool_mask.fill(0)
rr, cc = circle(row, col, 22, self.viewer.tool_mask.shape)
# self.tool_mask[rr, cc] = [0, 255, 0, 255]
samples = self.viewer.image[rr, cc][:, 0] # use only first channel
samples = samples.astype(np.float32)
number_of_clusters = 2
if number_of_clusters > samples.size:
return
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = cv2.kmeans(samples, number_of_clusters, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
label = label.ravel() # 2D array (one column) to 1D array without copy
center_pixel_indexes = np.where(np.logical_and(rr == row, cc == col))[0]
if center_pixel_indexes.size != 1: # there are situations, when the center pixel is out of image
return
center_pixel_index = center_pixel_indexes[0]
center_pixel_label = label[center_pixel_index]
if self.mode == Mode.ERASE:
self.viewer.tool_mask[rr, cc] = [0, 0, 255, 255]
else:
brush_circle = self.viewer.tool_mask[rr, cc]
brush_circle[label == center_pixel_label] = [0, 128, 255, 255]
brush_circle[label != center_pixel_label] = [255, 0, 0, 255]
self.viewer.tool_mask[rr, cc] = brush_circle
if self.mode == Mode.DRAW:
brush_circle = self.viewer.mask[rr, cc]
brush_circle[label == center_pixel_label] = settings.MASK_COLOR
self.viewer.mask[rr, cc] = brush_circle
elif self.mode == Mode.ERASE:
self.viewer.mask[rr, cc] = [0, 0, 0, 0] | python |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 14:12:10 2018
@author: joshcole
#F1_Data Analysis Fake Loan Company
"""
import pandas as pd
loan_data=pd.read_csv("drop_location/train_loan data.csv")
print (loan_data)
| python |
import tensorflow_hub as hub
import tensorflow as tf
import numpy as np
def predict_image(im):
model = tf.keras.models.load_model('CNN_model.h5', custom_objects={'KerasLayer': hub.KerasLayer})
im = np.asarray(im)
image = tf.image.resize(im, (256, 256))
img = image/255.0
image = tf.expand_dims(img, axis=0)
preds = model.predict(image)
probs, class_idx = tf.math.top_k(preds, k=1)
class_names = ['Tomato___Bacterial_spot','Tomato___Early_blight' ,'Tomato___Late_blight','Tomato___Leaf_Mold' ,'Tomato___Septoria_leaf_spot' ,'Tomato___Spider_mites Two-spotted_spider_mite' ,'Tomato___Target_Spot' ,'Tomato___Tomato_Yellow_Leaf_Curl_Virus' ,'Tomato___Tomato_mosaic_virus' ,'Tomato___healthy']
classes=[]
for i in class_idx.numpy()[0]:
classes.append(class_names[i])
return classes[0] | python |
#!/usr/bin/env python3
import os, sys, json
import numpy as np
import pandas as pd
import functools as fct
import collections as cols
from alignclf import create_clf_data
if __name__ == '__main__':
result_dnames = [
'clst-2018-12-generic_50-inc0-net1',
'clst-2018-12-generic_50-inc0-net2',
# 'clst-2018-12-generic_50-inc0-net3',
# 'clst-2018-12-generic_50-inc0-net4',
# 'clst-2018-12-generic_50-inc0-net5',
'clst-2018-12-sese_25-inc0-net1',
'clst-2018-12-sese_25-inc0-net2',
# 'clst-2018-12-sese_25-inc0-net3',
# 'clst-2018-12-sese_25-inc0-net4',
# 'clst-2018-12-sese_25-inc0-net5'
]
# find out the subset of logs
for result_dname in result_dnames:
result_dir = os.path.join('.', 'results-agg', result_dname)
print('Processing {}'.format(result_dname))
model_log_sets = []
dir_map = dict()
for d in os.listdir(result_dir):
dirpath = os.path.join(result_dir, d)
if not os.path.isdir(dirpath):
continue
model_log_set = set()
for replay_d in os.listdir(dirpath):
replay_dirpath = os.path.join(dirpath, replay_d)
if not os.path.isdir(replay_dirpath):
continue
configs_fp = os.path.join(replay_dirpath, 'configs.json')
with open(configs_fp) as f:
configs_dict = json.load(f)
log = configs_dict['log']
model = configs_dict['model']
if 'recomposeStrategy' in configs_dict:
algo_type = 'recomp' + '-' + configs_dict['algorithmType']
else:
algo_type = configs_dict['algorithmType']
if model not in dir_map:
dir_map[model] = cols.defaultdict(list)
dir_map[model][log].append((algo_type, replay_dirpath))
model_log_set.add((model, log))
model_log_sets.append(model_log_set)
model_logs = list(fct.reduce(lambda s1, s2: s1.intersection(s2), model_log_sets))
model_log_dict = cols.defaultdict(list)
for model, log in model_logs:
model_log_dict[model].append(log)
# print('Model and logs: {}'.format(model_logs))
# print('Model log set: {}'.format(model_log_sets))
clf_df_list = list()
for model, logs in model_log_dict.items():
if not logs:
continue
for log in logs:
result_df_dict = dict()
for algo_type, dirpath in dir_map[model][log]:
is_mono = 'recomp' not in algo_type
# print('algo_type: {}'.format(algo_type))
if is_mono:
result_fp = os.path.join(dirpath, 'trace-stats-enriched.csv')
result_df = pd.read_csv(result_fp)
result_df[create_clf_data.RESULT_DIR] = dirpath
result_df = create_clf_data.process_df(result_df)
else:
result_fp = os.path.join(dirpath, 'trace-stats.csv')
result_df = pd.read_csv(result_fp)
result_df[create_clf_data.RESULT_DIR] = dirpath
result_df = create_clf_data.process_recomposing_df(result_df)
result_df_dict[algo_type] = result_df
clf_df = create_clf_data.to_clf_df(result_df_dict)
columns = list(clf_df.columns)
clf_df['model'] = model
clf_df['log'] = log
columns = [('model', ''), ('log', '')] + columns
clf_df = clf_df[columns]
clf_df_list.append(clf_df)
clf_df = pd.concat(clf_df_list, axis=0)
out_fp = os.path.join(result_dir, '{}-predictive-output.csv'.format(result_dname))
clf_df.to_csv(out_fp, index=False)
| python |
from setuptools import setup
from setuptools import find_packages
version = '0.0.1'
classifiers = """
Development Status :: 3 - Alpha
Intended Audience :: Developers
Operating System :: OS Independent
Programming Language :: JavaScript
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
""".strip().splitlines()
setup(
name='dataportal_pmr_services',
version=version,
description='Services of PMR',
long_description=open('README.md').read(),
classifiers=classifiers,
keywords='',
author='Auckland Bioengineering Institute',
url='https://github.com/alan-wu/dataportal_pmr_services',
packages=find_packages('src', exclude=['ez_setup']),
package_dir={'': 'src'},
namespace_packages=['dataportal_map'],
zip_safe=False,
install_requires=[
'setuptools>=12',
'requests',
'pmr2.client @ https://api.github.com/repos/alan-wu/pmr2.client/tarball/scaffold',
'pmr2.wfctrl @ https://api.github.com/repos/PMR2/pmr2.wfctrl/tarball/master',
],
include_package_data=True,
python_requires='>=3.5',
# test_suite="",
)
| python |
#====creating a function for insertion sort==========
def insertion_sort(list1):
#===outer loop================
for i in range(1, len(list1)):
value = list1[i]
j = i-1
while j >= 0 and value < list1[j]:
list1[j+1] = list1[j]
j -= 1
list1[j+1] = value
return list1
#=====drive code===========
list1 = [10, 7, 5, 4, 15]
print("The unsorted list", list1)
print("The sorted list1 is", insertion_sort(list1))
| python |
import pandas as pd
import requests
url = 'http://localhost:9696/predict'
sample_data_points = [
{'timestamp': '2016-12-22 08:00:00', 't1': 5.0, 't2': 2.0, 'hum': 100.0, 'wind_speed': 13.0, 'weather_code': 4, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # actual=2510
{'timestamp': '2016-08-11 15:00:00', 't1': 22.5, 't2': 22.5, 'hum': 51.5, 'wind_speed': 22.0, 'weather_code': 2, 'is_holiday': 0, 'is_weekend': 0, 'season': 1}, # actual=1862
{'timestamp': '2016-12-30 10:00:00', 't1': 4.0, 't2': 1.5, 'hum': 100.0, 'wind_speed': 10.0, 'weather_code': 4, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # actual=601
{'timestamp': '2016-12-07 06:00:00', 't1': 10.5, 't2': 10.0, 'hum': 94.0, 'wind_speed': 12.0, 'weather_code': 3, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # actual=592
{'timestamp': '2016-11-22 22:00:00', 't1': 8.5, 't2': 7.5, 'hum': 87.0, 'wind_speed': 8.0, 'weather_code': 7, 'is_holiday': 0, 'is_weekend': 0, 'season': 2}, # actual=571
{'timestamp': '2016-12-25 23:00:00', 't1': 13.0, 't2': 13.0, 'hum': 79.5, 'wind_speed': 28.0, 'weather_code': 4, 'is_holiday': 0, 'is_weekend': 1, 'season': 3}, # actual=662
{'timestamp': '2016-12-28 20:00:00', 't1': 3.5, 't2': 1.5, 'hum': 96.5, 'wind_speed': 7.0, 'weather_code': 1, 'is_holiday': 0, 'is_weekend': 0, 'season': 3}, # acutal=414
{'timestamp': '2016-12-26 08:00:00', 't1': 8.0, 't2': 5.0, 'hum': 82.0, 'wind_speed': 22.0, 'weather_code': 1, 'is_holiday': 1, 'is_weekend': 0, 'season': 3}, # actual=263
]
details = sample_data_points[3]
prediction = requests.post(url,json=details).json()
print(f"input data: {details}")
print(f"predicted bike shares: {prediction}")
| python |
import random
from vprasanja import slovar, vprasanja_multiple_izbire, riziki
#======================================================================================
#Definicija konstant
#======================================================================================
STEVILO_DOVOLJENIH_NAPAK = 5
STEVILO_PRAVILNIH = 9
STEVILO_KVIZ_MULTIPLE = 4
STEVILO_KVIZ_RIZIKI = 8
PRAVILEN_ODGOVOR = "+"
NI_ODGOVORA = "0"
NAPACEN_ODGOVOR = "-"
ZMAGA = "W"
PORAZ = "X"
ZACETEK = "S"
KVIZ_MULTIPLE = "M"
KVIZ_RIZIKI = "R"
#=============================================================================================
#Razred Igra
#=============================================================================================
class Igra:
def __init__(self, st_vprasanj):
self.trenutno_vprasanje_idx = 0
self.pravilni_odgovori = 0
self.vprasanja_mul = random.sample(list(vprasanja_multiple_izbire), st_vprasanj) #[1, 2,...]
self.vprasanja = random.sample(list(slovar), st_vprasanj) #[5, 7, ...]
self.vprasanja_riziki = random.sample(list(riziki), 1) #želim da bo na eno igro samo en video (vrne npr [1])
def trenutno_vprasanje(self):
if self.pravilni_odgovori >= STEVILO_KVIZ_RIZIKI:
# želim da izpiše vseh 5 (oz 4) vprašanja
vpr_2 = int(self.vprasanja_riziki[0]) # vrne npr 1
return riziki.get(vpr_2) # vrne {"tip": "tip_2", "vprasanje": [{'vpr':'', 'odg': [odg]}, {:[]}, ], "mozni_odg": [], "video": "https"}
if self.pravilni_odgovori in range(STEVILO_KVIZ_MULTIPLE, STEVILO_KVIZ_RIZIKI):
vpr_1 = self.vprasanja_mul[self.trenutno_vprasanje_idx] #vrne npr 18
return vprasanja_multiple_izbire.get(vpr_1) #{'tip': 'tip_1', 'vprasanje': 'Koliko je vredna težina na sliki 18?', 'odgovor': '0.4', 'mozni_odg': [0.4, 0.5, 0.6], 'slika': 'http'}
else:
vpr_0 = self.vprasanja[self.trenutno_vprasanje_idx] #vrne npr 4
return slovar.get(vpr_0) #{'tip': 'tip_0', 'vprasanje': '?', 'primer_odg':'', 'odgovor': ''}
def stevilo_napacnih(self):
return self.trenutno_vprasanje_idx - self.pravilni_odgovori
def stevilo_pravilnih(self):
return self.pravilni_odgovori
def tip_2(self):
return self.pravilni_odgovori == STEVILO_KVIZ_RIZIKI
def tip_1(self):
return self.pravilni_odgovori in range(STEVILO_KVIZ_MULTIPLE, STEVILO_KVIZ_RIZIKI)
def zmaga(self):
return self.pravilni_odgovori == STEVILO_PRAVILNIH
def poraz(self):
return self.stevilo_napacnih() > STEVILO_DOVOLJENIH_NAPAK
def enakost_odgovorov(self, odgovor):
if self.pravilni_odgovori >= STEVILO_KVIZ_RIZIKI:
seznam_vpr = self.trenutno_vprasanje().get('vprasanje') # [{'vpr':'','odg':[]}, {vpr:odg}, ...]
pravilen_odgovor = []
for slovar_vpr in seznam_vpr:
for odg in slovar_vpr.get('odg'):
pravilen_odgovor.append(odg)
self.trenutno_vprasanje_idx += 1
#iz serverja odgovori: [('odgovor_0', '2 rotaciji'), ('odgovor_1', '3 rotacije')]
samo_odgovori = []
for polje, vrednost in odgovor:
samo_odgovori.append(vrednost) #['2 rotaciji', '3 rotacije']
#za preverjanje
#print('test_2')
#print(samo_odgovori)
#print('test_3')
#print(pravilen_odgovor)
return samo_odgovori == pravilen_odgovor
if self.pravilni_odgovori in range(STEVILO_KVIZ_MULTIPLE, STEVILO_KVIZ_RIZIKI):
pravilen_odgovor = self.trenutno_vprasanje().get("odgovor") # vrne npr 0.4
self.trenutno_vprasanje_idx += 1
return odgovor == pravilen_odgovor #vrne True
else:
pravilen_odgovor = self.trenutno_vprasanje().get("odgovor") # vrne list
self.trenutno_vprasanje_idx += 1
return any(x.upper().replace(" ","") == odgovor.upper().replace(" ","") for x in pravilen_odgovor)
#odgovorom, ki pridejo iz serverja ostranim space in jih dam v velike črke,
#to naredim še za odgovore iz slovarja, če bo kdo slučajno kdaj dodajal vprašanja
def ugibaj(self, odgovor):
if odgovor == "":
return NI_ODGOVORA #vrne "0"
if self.enakost_odgovorov(odgovor) == True:
self.pravilni_odgovori += 1
if self.tip_2():
return KVIZ_RIZIKI
elif self.tip_1():
return KVIZ_MULTIPLE
if self.zmaga():
return ZMAGA
return PRAVILEN_ODGOVOR
else:
if self.poraz():
return PORAZ
return NAPACEN_ODGOVOR
#===========================================================================================
#Funkcija, ki vrne novo igro.
#===========================================================================================
def nova_igra():
return Igra(STEVILO_PRAVILNIH + STEVILO_DOVOLJENIH_NAPAK)
# STEVILO_PRAVILNIH + STEVILO_DOVOLJENIH_NAPAK ne sme biti večje od št vprašanj v slovarjih
#================================================================================================
#Razred Kviz
#================================================================================================
class Kviz:
def __init__(self):
self.igre = {}
def prost_id_igre(self):
if self.igre == {}:
return 0
else:
return max(self.igre.keys()) + 1 #dict_keys([1, 2]), max vrne 2, prost_id_igre vrne 3
def nova_igra(self):
igra = nova_igra() #Igra(st_vprasanj)
id_igre = self.prost_id_igre()
self.igre[id_igre] = (igra, ZACETEK) #igre[id_igre] vrne vrednosti pri tem ključu
return id_igre
def ugibaj(self, id_igre, odgovor):
igra = self.igre[id_igre][0]
stanje = igra.ugibaj(odgovor)
self.igre[id_igre] = (igra, stanje) # stanje "R", "M", "W", "X" in "0", "-", "+"
| python |
from .p2pnet import build
# build the P2PNet model
# set training to 'True' during training
def build_model(args, training=False):
return build(args, training) | python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import abc
import json
import os
import random
from collections import OrderedDict
from pprint import pformat
import pydash as ps
import torch
import torch.nn as nn
import numpy as np
import tensorflow as tf
from ray.tune.trial import Trial, json_to_resources
# -------------------- Seed:Global -------------------- #
def set_global_seeds(seed):
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# -------------------- Info:Describe -------------------- #
def get_cls_name(obj, lower=False):
r"""
Get the class name of an object
"""
class_name = obj.__class__.__name__
if lower:
class_name = class_name.lower()
return class_name
def get_cls_attr(obj):
r"""
Get the class attr of an object as dict
"""
attr_dict = {}
for k, v in obj.__dict__.items():
if hasattr(v, '__dict__'):
val = str(v)
else:
val = v
attr_dict[k] = val
return attr_dict
def describe(cls):
desc_list = [f'{get_cls_name(cls)}:']
for k, v in get_cls_attr(cls).items():
if k == 'config':
continue
elif ps.is_dict(v) or ps.is_dict(ps.head(v)):
desc_v = pformat(v)
else:
desc_v = v
desc_list.append(f'- {k} = {desc_v}') # \t| type -> {type(desc_v)}')
desc = '\n'.join(desc_list)
return desc
# -------------------- Parser:Create -------------------- #
def make_parser(parser_creator=None, **kwargs):
"""Returns a base argument parser for the ray.tune tool.
Args:
parser_creator: A constructor for the parser class.
kwargs: Non-positional args to be passed into the
parser class constructor.
"""
if parser_creator:
parser = parser_creator(**kwargs)
else:
parser = argparse.ArgumentParser(**kwargs)
# Note: keep this in sync with rllib/train.py
parser.add_argument(
"--run",
default=None,
type=str,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
parser.add_argument(
"--stop",
default="{}",
type=json.loads,
help="The stopping criteria, specified in JSON. The keys may be any "
"field returned by 'train()' e.g. "
"'{\"time_total_s\": 600, \"training_iteration\": 100000}' to stop "
"after 600 seconds or 100k iterations, whichever is reached first.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams), "
"specified in JSON.")
parser.add_argument(
"--resources-per-trial",
default=None,
type=json_to_resources,
help="Override the machine resources to allocate per trial, e.g. "
"'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned "
"unless you specify them here. For RLlib, you probably want to "
"leave this alone and use RLlib configs to control parallelism.")
parser.add_argument(
"--num-samples",
default=1,
type=int,
help="Number of times to repeat each trial.")
parser.add_argument(
"--checkpoint-freq",
default=0,
type=int,
help="How many training iterations between checkpoints. "
"A value of 0 (default) disables checkpointing.")
parser.add_argument(
"--checkpoint-at-end",
action="store_true",
help="Whether to checkpoint at the end of the experiment. "
"Default is False.")
parser.add_argument(
"--keep-checkpoints-num",
default=None,
type=int,
help="Number of last checkpoints to keep. Others get "
"deleted. Default (None) keeps all checkpoints.")
parser.add_argument(
"--checkpoint-score-attr",
default="training_iteration",
type=str,
help="Specifies by which attribute to rank the best checkpoint. "
"Default is increasing order. If attribute starts with min- it "
"will rank attribute in decreasing order. Example: "
"min-validation_loss")
parser.add_argument(
"--export-formats",
default=None,
help="List of formats that exported at the end of the experiment. "
"Default is None. For RLlib, 'checkpoint' and 'model' are "
"supported for TensorFlow policy graphs.")
parser.add_argument(
"--max-failures",
default=3,
type=int,
help="Try to recover a trial from its last checkpoint at least this "
"many times. Only applies if checkpointing is enabled.")
parser.add_argument(
"--scheduler",
default="FIFO",
type=str,
help="FIFO (default), MedianStopping, AsyncHyperBand, "
"HyperBand, or HyperOpt.")
parser.add_argument(
"--scheduler-config",
default="{}",
type=json.loads,
help="Config options to pass to the scheduler.")
# Note: this currently only makes sense when running a single trial
parser.add_argument(
"--restore",
default=None,
type=str,
help="If specified, restore from this checkpoint.")
return parser
# -------------------- Parser:Convert -------------------- #
class DotDict(dict):
"""
Dictionary to access attributes
"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
| python |
import re
import geopandas as gpd
from ...tessellation import tilers
import shapely
import pytest
poly = [[[116.1440758191, 39.8846396072],
[116.3449987678, 39.8846396072],
[116.3449987678, 40.0430521004],
[116.1440758191, 40.0430521004],
[116.1440758191, 39.8846396072]]]
geom = [shapely.geometry.Polygon(p) for p in poly]
bbox = gpd.GeoDataFrame(geometry=geom, crs="EPSG:4326")
@pytest.mark.parametrize('tiler_type', ["squared", "h3_tessellation"])
@pytest.mark.parametrize('base_shape', ['Beijing, China', bbox])
@pytest.mark.parametrize('meters', [15000])
def test_tiler_get(tiler_type, base_shape, meters):
tessellation = tilers.tiler.get(tiler_type, base_shape=base_shape, meters=meters)
assert isinstance(tessellation, gpd.GeoDataFrame)
# Arrange
@pytest.fixture()
def h3_tess():
return tilers.H3TessellationTiler()
@pytest.mark.parametrize("input_meters, expected_res", [(500, 8), (1500, 7), (5000, 6)])
def test__meters_to_res(h3_tess, input_meters, expected_res):
assert h3_tess._meters_to_res(input_meters) == expected_res
def test__get_appropriate_res(h3_tess):
assert h3_tess._get_appropriate_res(bbox, 5000) == 8
# test UserWarning is triggered for input hexs
# that are larger than the base_shape
def test_warning(h3_tess):
with pytest.warns(UserWarning) as uws:
pattern=r".*Try something smaller.*"
h3_tess._get_appropriate_res(bbox, 50000)
# check that 2 warnings were raised
assert len(uws) == 2
# check that the message matches
assert re.match(pattern, uws[1].message.args[0])
| python |
from flask_wtf import FlaskForm
from wtforms import DecimalField, StringField, SubmitField
from wtforms.validators import DataRequired
class UpdateRatingMovieForm(FlaskForm):
new_rating = DecimalField("Your Rating Out of 10 e.g. 7.5", validators=[DataRequired()])
new_review = StringField("Your Review", validators=[DataRequired()])
submit = SubmitField("Done")
class AddNewMovieForm(FlaskForm):
new_movie_title = StringField("Movie Title", validators=[DataRequired()])
add_button = SubmitField("Add Movie") | python |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt._1btcxe import _1btcxe
class getbtc (_1btcxe):
def describe(self):
return self.deep_extend(super(getbtc, self).describe(), {
'id': 'getbtc',
'name': 'GetBTC',
'countries': ['VC', 'RU'], # Saint Vincent and the Grenadines, Russia, CIS
'rateLimit': 1000,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/33801902-03c43462-dd7b-11e7-992e-077e4cd015b9.jpg',
'api': 'https://getbtc.org/api',
'www': 'https://getbtc.org',
'doc': 'https://getbtc.org/api-docs.php',
},
'has': {
'fetchTrades': False,
},
'fees': {
'trading': {
'taker': 0.20 / 100,
'maker': 0.20 / 100,
},
},
'markets': {
'BTC/USD': {'lot': 1e-08, 'symbol': 'BTC/USD', 'quote': 'USD', 'base': 'BTC', 'precision': {'amount': 8, 'price': 8}, 'id': 'USD', 'limits': {'amount': {'max': None, 'min': 1e-08}, 'price': {'max': 'None', 'min': 1e-08}}},
'BTC/EUR': {'lot': 1e-08, 'symbol': 'BTC/EUR', 'quote': 'EUR', 'base': 'BTC', 'precision': {'amount': 8, 'price': 8}, 'id': 'EUR', 'limits': {'amount': {'max': None, 'min': 1e-08}, 'price': {'max': 'None', 'min': 1e-08}}},
'BTC/RUB': {'lot': 1e-08, 'symbol': 'BTC/RUB', 'quote': 'RUB', 'base': 'BTC', 'precision': {'amount': 8, 'price': 8}, 'id': 'RUB', 'limits': {'amount': {'max': None, 'min': 1e-08}, 'price': {'max': 'None', 'min': 1e-08}}},
},
})
| python |
from __future__ import print_function
from __future__ import absolute_import
import myhdl
from myhdl import instance
# @todo: move "interfaces" to system (or interfaces)
from ...cores.sdram import SDRAMInterface
from ...system import MemoryMapped
# @todo: utilize FIFOBus
from ...system import FIFOBus
def sdram_controller_model(sdram_intf, internal_intf):
""" Model the transaction between the internal bus and external SDRAM
:param sdram_intf: Interface to the SDRAM device
:param internal_intf: Internal interface
:return: myhdl generators
Not convertible.
"""
assert isinstance(sdram_intf, SDRAMInterface)
assert isinstance(internal_intf, (MemoryMapped, )) # @todo: add FIFOBus
# short-cuts
ix, ex = internal_intf, sdram_intf
def translate_address(addr):
#@todo: add correct translation
row_addr, col_addr = 0, addr
return row_addr, col_addr
@instance
def mproc():
"""
Emulated using the interface transactors, performs the
following:
- address translation
- arbitration
"""
while True:
addr = ix.get_address()
row_addr, col_addr = translate_address(addr)
if ix.is_write:
data = ix.get_write_data()
yield ex.write(data, row_addr, col_addr)
yield ix.acktrans()
elif ix.is_read:
yield ex.read(row_addr, col_addr)
read_data = ex.get_read_data()
yield ix.acktrans(read_data)
yield ix.clock.posedge
return mproc
| python |
array = []
for i in range (16):
# array.append([i,0])
array.append([i,5])
print(array) | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from Data_Structure.Linked_List import *
print("** Singly Linked List **")
list1 = Singly_Linked_List.Singly_Linked_List()
for i in range(1, 11):
list1.append(i)
print("-- Added 10 data at the list --")
list1.ListSize()
list1.remove(5)
list1.ListSize()
list1.lprint()
print("\n** Doubly Linked List **")
list2 = Doubly_Linked_List.Doubly_Linked_List()
print("-- Added 20 data at the list --")
for i in range(1, 21):
list2.append(i)
list2.ListSize()
list2.remove(15)
list2.ListSize()
list2.lprint()
| python |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 05:59:46 2018
@author: zefa
"""
import os
import numpy as np
import cv2
MAX_HEIGHT = 720
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
class SequenceControl(object):
def __init__(self, path=None):
self.name = 'not set'
self.path = path
self.max_height = float(MAX_HEIGHT)
def getName(self):
"""
Returns the name of the sequence.
"""
return self.name
def numberOfImages(self):
"""
Returns the number of images in the video.
"""
return self.frameCount
def currentFrameNumber(self):
"""
Returns the current frame number.
"""
return self.fNr
def getImage(self):
"""
Returns the current image of the video or None if video is None.
"""
return self.img
def loadImage(self, fNr, labels, result=None):
"""
Load the selected (fNr) image. Has to be reimplemented by child class.
"""
raise NotImplementedError
def _processImage(self, img, labels, result):
# set rgb ordering
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if result is not None:
self._labelInstances(img, labels, result)
# scale if necessary
if self.scale != 1:
img = cv2.resize(img, None, fx=self.scale, fy=self.scale)
return img
def _labelInstances(self, image, labels, result):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
"""
# get the data
boxes, masks, class_ids, scores = [result[k] for k in ['rois','masks','class_ids','scores']]
selected_ids = [l.getClassIndex() for l in labels]
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
for i in range(N):
if not np.any(boxes[i]):
continue
if class_ids[i] not in selected_ids:
continue
col = labels[selected_ids.index(class_ids[i])].getColor()
# add mask
image = apply_mask(image, masks[:,:,i], col, alpha=0.4)
return image
def __iter__(self):
raise NotImplementedError
| python |
from xicam.plugins.datahandlerplugin import DataHandlerPlugin, start_doc, descriptor_doc, event_doc, stop_doc, \
embedded_local_event_doc
import os
import fabio
import uuid
import re
import functools
from pathlib import Path
class EDFPlugin(DataHandlerPlugin):
name = 'EDFPlugin'
DEFAULT_EXTENTIONS = ['.edf']
descriptor_keys = ['ByteOrder', 'HeaderID', 'VersionNumber', 'Dim_1', 'Dim_2', 'count_time', 'object_keys']
def __init__(self, path):
super(EDFPlugin, self).__init__()
self.path = path
self.fimg = fabio.open(path)
def __call__(self, *args, **kwargs):
return self.fimg.data
@staticmethod
@functools.lru_cache(maxsize=10, typed=False)
def parseTXTFile(path):
p = Path(path)
if not p.suffix == '.txt':
path = str(p.with_suffix('.txt'))
if not os.path.isfile(path):
return dict()
with open(path, 'r') as f:
lines = f.readlines()
paras = dict()
# The 7.3.3 txt format is messy, with keyless values, and extra whitespaces
keylesslines = 0
for line in lines:
cells = [_f for _f in re.split('[=:]+', line) if _f]
key = cells[0].strip()
if cells.__len__() == 2:
cells[1] = cells[1].split('/')[0]
paras[key] = key_cast(key, cells[1].strip())
elif cells.__len__() == 1:
keylesslines += 1
paras['Keyless value #' + str(keylesslines)] = key
return paras
@staticmethod
@functools.lru_cache(maxsize=10, typed=False)
def parseDataFile(path):
md = fabio.open(path).header
md.update({'object_keys': {'pilatus2M': ['primary']}})
return md
def key_cast(key, value):
return conversions[key_type_map.get(key, 'str')](value)
_ALS_KEY_MAP = {
'ABS(Vertical Beam Position)': 'event',
'AI Channel 6': 'event',
'AI Channel 7': 'event',
'AIs': 'event',
'AO Waveform': 'event',
'Alpha_scan_I0_intensities': 'event',
'Alpha_scan_I1_intensities': 'event',
'Alpha_scan_diode_intensities': 'event',
'Alpha_scan_positions': 'event',
'Beam Current Over Threshold': 'event',
'Beam Current': 'event',
'Beamline Pass Beam AI': 'event',
'Beamline Pass Beam': 'event',
'Beamline Shutter AI': 'event',
'Beamline Shutter Closed': 'event',
'Beamline Shutter Open': 'event',
'Beamstop X': 'event',
'Beamstop Y': 'event',
'Bruker pulses': 'event',
'ByteOrder': ['start', 'event'],
'DIOs': 'event',
'DataType': ['start', 'event'],
'Date': ['start', 'event'],
'Detector Horizontal': 'event',
'Detector Left Motor': 'event',
'Detector Right Motor': 'event',
'Detector Vertical': 'event',
'Dim_1': ['descriptor', 'event'],
'Dim_2': ['descriptor', 'event'],
'EZ fast tension stage': 'event',
'Exit Slit bottom': 'event',
'Exit Slit left': 'event',
'Exit Slit right': 'event',
'Exit Slit top': 'event',
'Feedback Interlock': 'event',
'Flight Tube Horizontal': 'event',
'Flight Tube Vertical': 'event',
'GIWAXS beamstop X': 'event',
'GIWAXS beamstop Y thorlabs': 'event',
'GIWAXS beamstop Y': 'event',
'Gate Shutter': 'event',
'Gate': 'event',
'GiSAXS Beamstop Counter': 'event',
'GiSAXS Beamstop': 'event',
'Hacked Ager Stage': 'event',
'HeaderID': ['start', 'event'],
'I1 AI': 'event',
'I1': 'event',
'Image': ['event', 'event'],
'Izero AI': 'event',
'Izero': 'event',
'Keyless value #1': 'event',
'Keyless value #2': 'event',
'Keyless value #3': 'event',
'Kramer strain data': 'event',
'M1 Alignment Tune': 'event',
'M1 Bend': 'event',
'M1 Pitch': 'event',
'M201 Feedback': 'event',
'Mono Angle': 'event',
'Motorized Lab Jack': 'event',
'Motorized Lab Jack1': 'event',
'Motors': 'event',
'PCO Invert': 'event',
'PHI Alignment Beamstop': 'event',
'Pilatus 100K exp out': 'event',
'Pilatus 1M Trigger Pulse': 'event',
'Pilatus 300KW trigger pulse': 'event',
'Printing motor': 'event',
'SAXS Protector': 'event',
'Sample Alpha Stage': 'event',
'Sample Phi Stage': 'event',
'Sample Rotation Stage ESP': 'event',
'Sample Rotation Stage Miller': 'event',
'Sample Rotation Stage': 'event',
'Sample Thickness Stage': 'event',
'Sample X Stage Fine': 'event',
'Sample X Stage': 'event',
'Sample Y Stage Arthur': 'event',
'Sample Y Stage': 'event',
'Sample Y Stage_old': 'event',
'Size': ['descriptor', 'event'],
'Slit 1 in Position': 'event',
'Slit 2 in Position': 'event',
'Slit Bottom Good': 'event',
'Slit Top Good': 'event',
'Slit1 bottom': 'event',
'Slit1 left': 'event',
'Slit1 right': 'event',
'Slit1 top': 'event',
'Sum of Slit Current': 'event',
'Temp Beamline Shutter Open': 'event',
'VersionNumber': ['start', 'event'],
'Vertical Beam Position': 'event',
'Xtal2 Pico 1 Feedback': 'event',
'Xtal2 Pico 1': 'event',
'Xtal2 Pico 2 Feedback': 'event',
'Xtal2 Pico 2': 'event',
'Xtal2 Pico 3 Feedback': 'event',
'Xtal2 Pico 3': 'event',
'count_time': ['descriptor', 'event'],
'run': ['event', 'event'],
'slit1 bottom current': 'event',
'slit1 top current': 'event',
'title': ['event', 'event'],
}
key_type_map = {'HeaderID': 'str',
'Image': 'int',
'VersionNumber': 'str',
'ByteOrder': 'str',
'DataType': 'str',
'Dim_1': 'int',
'Dim_2': 'int',
'Size': 'int',
'Date': 'date',
'count_time': 'float',
'title': 'str',
'run': 'int',
'Keyless value #1': 'float',
'Keyless value #2': 'float',
'Keyless value #3': 'float',
'Motors': 'int',
'Sample X Stage': 'float',
'Sample Y Stage': 'float',
'Sample Thickness Stage': 'float',
'Sample X Stage Fine': 'float',
'Sample Alpha Stage': 'float',
'Sample Phi Stage': 'float',
'M201 Feedback': 'float',
'M1 Pitch': 'float',
'Sample Rotation Stage': 'float',
'M1 Bend': 'float',
'Detector Horizontal': 'float',
'Detector Vertical': 'float',
'Slit1 top': 'float',
'Slit1 bottom': 'float',
'Slit1 right': 'float',
'Slit1 left': 'float',
'Exit Slit top': 'float',
'Exit Slit bottom': 'float',
'Exit Slit left': 'float',
'Exit Slit right': 'float',
'GIWAXS beamstop X': 'float',
'GIWAXS beamstop Y': 'float',
'Beamstop X': 'float',
'Beamstop Y': 'float',
'Detector Right Motor': 'float',
'Detector Left Motor': 'float',
'Motorized Lab Jack': 'float',
'M1 Alignment Tune': 'float',
'EZ fast tension stage': 'float',
'Motorized Lab Jack1': 'float',
'Sample Rotation Stage ESP': 'float',
'Printing motor': 'float',
'GIWAXS beamstop Y thorlabs': 'float',
'Sample Y Stage Arthur': 'float',
'Flight Tube Horizontal': 'float',
'Flight Tube Vertical': 'float',
'Hacked Ager Stage': 'float',
'Sample Rotation Stage Miller': 'float',
'Mono Angle': 'float',
'Xtal2 Pico 1 Feedback': 'float',
'Xtal2 Pico 2 Feedback': 'float',
'Xtal2 Pico 3 Feedback': 'float',
'Xtal2 Pico 1': 'float',
'Xtal2 Pico 2': 'float',
'Xtal2 Pico 3': 'float',
'Sample Y Stage_old': 'float',
'AO Waveform': 'float',
'DIOs': 'int',
'SAXS Protector': 'float',
'Beamline Shutter Closed': 'float',
'Beam Current Over Threshold': 'float',
'Slit 1 in Position': 'float',
'Slit 2 in Position': 'float',
'Temp Beamline Shutter Open': 'float',
'Beamline Shutter Open': 'float',
'Feedback Interlock': 'float',
'Beamline Pass Beam': 'float',
'Gate Shutter': 'float',
'Bruker pulses': 'float',
'Slit Top Good': 'float',
'Slit Bottom Good': 'float',
'AIs': 'int',
'Beam Current': 'float',
'Beamline Shutter AI': 'float',
'Beamline Pass Beam AI': 'float',
'slit1 bottom current': 'float',
'slit1 top current': 'float',
'GiSAXS Beamstop': 'float',
'Izero AI': 'float',
'I1 AI': 'float',
'PHI Alignment Beamstop': 'float',
'AI Channel 6': 'float',
'AI Channel 7': 'float',
'Vertical Beam Position': 'float',
'Pilatus 1M Trigger Pulse': 'float',
'Pilatus 300KW trigger pulse': 'float',
'PCO Invert': 'float',
'Gate': 'float',
'Izero': 'float',
'I1': 'float',
'GiSAXS Beamstop Counter': 'float',
'Sum of Slit Current': 'float',
'Pilatus 100K exp out': 'float',
'Kramer strain data': 'float',
'ABS(Vertical Beam Position)': 'float',
'Alpha_scan_positions': 'tabdelimitedfloat',
'Alpha_scan_I0_intensities': 'tabdelimitedfloat',
'Alpha_scan_I1_intensities': 'tabdelimitedfloat',
'Alpha_scan_diode_intensities': 'tabdelimitedfloat'
}
conversions = {'int': lambda x: int(x.strip()),
'float': lambda x: float(x.strip()),
'str': lambda x: x.strip(),
'date': lambda x: x.strip(),
'tabdelimitedfloat': lambda x: list(map(float, x.split('\t'))) if x else []}
def _data_keys_from_value(v, src_name, object_name):
kind_map = {'i': 'integer',
'f': 'number',
'U': 'string',
'S': 'string'}
return {'dtype': kind_map[np.array([v]).dtype.kind],
'shape': [],
'source': src_name,
'object_name': object_name}
def _gen_descriptor_from_dict(ev_data, src_name):
data_keys = {}
confiuration = {}
obj_keys = {}
for k, v in ev_data.items():
data_keys[k] = _data_keys_from_value(v, src_name, k)
obj_keys[k] = [k]
confiuration[k] = {'data': {},
'data_keys': {},
'timestamps': {}}
return {'data_keys': data_keys,
'time': time.time(),
'uid': str(uuid.uuid4()),
'configuration': confiuration,
'object_keys': obj_keys}
| python |
import datetime as dt
def dt_to_str(dt_seconds):
"""
Converts delta time into string "hh:mm:ss"
"""
return str(dt.timedelta(seconds=dt_seconds))
| python |
'''
Project: Farnsworth
Author: Karandeep Singh Nagra
'''
from datetime import timedelta
import json
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout, login
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.timezone import now
import inflect
p = inflect.engine()
from utils.variables import ANONYMOUS_USERNAME, MESSAGES
from base.decorators import admin_required, profile_required, \
president_admin_required, ajax_capable
from base.models import UserProfile
from base.redirects import red_home
from managers.models import Manager, RequestType, Request, Response, Announcement
from managers.forms import ManagerForm, RequestTypeForm, RequestForm, ResponseForm, \
ManagerResponseForm, VoteForm, AnnouncementForm, PinForm
from managers.ajax import build_ajax_votes
from threads.models import Thread, Message
def add_archive_context(request):
request_count = Request.objects.all().count()
expired_count = Request.objects.filter(status=Request.EXPIRED).count()
filled_count = Request.objects.filter(status=Request.FILLED).count()
closed_count = Request.objects.filter(status=Request.CLOSED).count()
open_count = Request.objects.filter(status=Request.OPEN).count()
response_count = Response.objects.all().count()
announcement_count = Announcement.objects.all().count()
nodes = [
"{} total {}".format(request_count, p.plural("request", request_count)),
[
"{} {}".format(expired_count, p.plural("expired", expired_count)),
"{} {}".format(filled_count, p.plural("filled", filled_count)),
"{} {}".format(closed_count, p.plural("closed", closed_count)),
"{} {}".format(open_count, p.plural("open", open_count)),
],
"{} {}".format(response_count, p.plural("response", response_count)),
"{} {}".format(announcement_count, p.plural("announcement", announcement_count)),
]
render_list = [
(
"All Requests",
reverse("managers:all_requests"),
"glyphicon-inbox",
Request.objects.all().count(),
),
(
"All Announcements",
reverse("managers:all_announcements"),
"glyphicon-bullhorn",
Announcement.objects.all().count(),
),
]
return nodes, render_list
@admin_required
def anonymous_login_view(request):
''' View for an admin to log her/himself out and login the anonymous user. '''
logout(request)
try:
spineless = User.objects.get(username=ANONYMOUS_USERNAME)
except User.DoesNotExist:
random_password = User.objects.make_random_password()
spineless = User.objects.create_user(username=ANONYMOUS_USERNAME, first_name="Anonymous", last_name="Coward", password=random_password)
spineless.is_active = False
spineless.save()
spineless_profile = UserProfile.objects.get(user=spineless)
spineless_profile.status = UserProfile.ALUMNUS
spineless_profile.save()
spineless.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, spineless)
request.session['ANONYMOUS_SESSION'] = True
messages.add_message(request, messages.INFO, MESSAGES['ANONYMOUS_LOGIN'])
return HttpResponseRedirect(reverse('homepage'))
@admin_required
def end_anonymous_session_view(request):
''' End the anonymous session if the user is a superuser. '''
request.session['ANONYMOUS_SESSION'] = False
messages.add_message(request, messages.INFO, MESSAGES['ANONYMOUS_SESSION_ENDED'])
return HttpResponseRedirect(reverse('utilities'))
@profile_required
def list_managers_view(request):
''' Show a list of manager positions with links to view in detail. '''
managerset = Manager.objects.filter(active=True)
return render_to_response('list_managers.html', {
'page_name': "Managers",
'managerset': managerset,
}, context_instance=RequestContext(request))
@profile_required
def manager_view(request, managerTitle):
''' View the details of a manager position.
Parameters:
request is an HTTP request
managerTitle is the URL title of the manager.
'''
targetManager = get_object_or_404(Manager, url_title=managerTitle)
if not targetManager.active:
messages.add_message(request, messages.ERROR, MESSAGES['INACTIVE_MANAGER'].format(managerTitle=targetManager.title))
return HttpResponseRedirect(reverse('managers:list_managers'))
else:
return render_to_response('view_manager.html', {
'page_name': "View Manager",
'targetManager': targetManager,
}, context_instance=RequestContext(request))
@president_admin_required
def meta_manager_view(request):
'''
A manager of managers. Display a list of current managers, with links to modify them.
Also display a link to add a new manager. Restricted to presidents and superadmins.
'''
managers = Manager.objects.all()
return render_to_response('meta_manager.html', {
'page_name': "Admin - Meta-Manager",
'managerset': managers,
}, context_instance=RequestContext(request))
@president_admin_required
def add_manager_view(request):
''' View to add a new manager position. Restricted to superadmins and presidents. '''
form = ManagerForm(request.POST or None)
if form.is_valid():
manager = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['MANAGER_ADDED'].format(managerTitle=manager.title))
return HttpResponseRedirect(reverse('managers:add_manager'))
return render_to_response('edit_manager.html', {
'page_name': "Admin - Add Manager",
'managerset': Manager.objects.all(),
'form': form,
}, context_instance=RequestContext(request))
@president_admin_required
def edit_manager_view(request, managerTitle):
''' View to modify an existing manager.
Parameters:
request is an HTTP request
managerTitle is URL title of the manager.
'''
targetManager = get_object_or_404(Manager, url_title=managerTitle)
form = ManagerForm(
request.POST or None,
instance=targetManager,
)
if form.is_valid():
manager = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['MANAGER_SAVED'].format(managerTitle=manager.title))
return HttpResponseRedirect(reverse('managers:meta_manager'))
return render_to_response('edit_manager.html', {
'page_name': "Admin - Edit Manager",
'form': form,
"managerset": Manager.objects.all(),
'manager_title': targetManager.title,
}, context_instance=RequestContext(request))
@president_admin_required
def manage_request_types_view(request):
''' Manage requests. Display a list of request types with links to edit them.
Also display a link to add a new request type. Restricted to presidents and superadmins.
'''
request_types = RequestType.objects.all()
return render_to_response('manage_request_types.html', {
'page_name': "Admin - Manage Request Types",
'request_types': request_types
}, context_instance=RequestContext(request))
@president_admin_required
def add_request_type_view(request):
''' View to add a new request type. Restricted to presidents and superadmins. '''
form = RequestTypeForm(request.POST or None)
if form.is_valid():
rtype = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['REQUEST_TYPE_ADDED'].format(typeName=rtype.name))
return HttpResponseRedirect(reverse('managers:manage_request_types'))
return render_to_response('edit_request_type.html', {
'page_name': "Admin - Add Request Type",
'request_types': RequestType.objects.all(),
'form': form,
}, context_instance=RequestContext(request))
@president_admin_required
def edit_request_type_view(request, typeName):
''' View to edit a new request type. Restricted to presidents and superadmins.
Parameters:
request is an HTTP request
typeName is the request type's URL name.
'''
requestType = get_object_or_404(RequestType, url_name=typeName)
form = RequestTypeForm(
request.POST or None,
instance=requestType,
)
if form.is_valid():
rtype = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['REQUEST_TYPE_SAVED'].format(typeName=rtype.name))
return HttpResponseRedirect(reverse('managers:manage_request_types'))
return render_to_response('edit_request_type.html', {
'page_name': "Admin - Edit Request Type",
'request_types': RequestType.objects.all(),
'form': form,
'requestType': requestType,
}, context_instance=RequestContext(request))
@profile_required
def requests_view(request, requestType):
'''
Generic request view. Parameters:
request is the HTTP request
requestType is URL name of a RequestType.
e.g. "food", "maintenance", "network", "site"
'''
userProfile = UserProfile.objects.get(user=request.user)
request_type = get_object_or_404(RequestType, url_name=requestType)
page_name = "{0} Requests".format(request_type.name.title())
if not request_type.enabled:
message = "{0} requests have been disabled.".format(request_type.name.title())
return red_home(request, message)
relevant_managers = request_type.managers.filter(active=True)
manager = any(i.incumbent == userProfile for i in relevant_managers)
request_form = RequestForm(
request.POST if "submit_request" in request.POST else None,
profile=userProfile,
request_type=request_type,
)
if request_form.is_valid():
request_form.save()
return HttpResponseRedirect(reverse('managers:requests', kwargs={'requestType': requestType}))
# number of requests loaded
x = 0
# A pseudo-dictionary, actually a list with items of form (request,
# [request_responses_list], response_form, upvote, vote_form)
requests_dict = list()
requests = Request.objects.filter(request_type=request_type)
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user), private=True,
)
for req in requests:
request_responses = Response.objects.filter(request=req)
if manager:
response_form = ManagerResponseForm(
request.POST if "add_response-{0}".format(req.pk) in request.POST else None,
initial={'action': Response.NONE},
prefix="{0}".format(req.pk),
profile=userProfile,
request=req,
)
else:
response_form = ResponseForm(
request.POST if "add_response-{0}".format(req.pk) in request.POST else None,
prefix="{0}".format(req.pk),
profile=userProfile,
request=req,
)
upvote = userProfile in req.upvotes.all()
vote_form = VoteForm(
request.POST if "vote-{0}".format(req.pk) in request.POST else None,
profile=userProfile,
request=req,
)
if response_form.is_valid():
response_form.save()
return HttpResponseRedirect(reverse('managers:requests',
kwargs={'requestType': requestType}))
if vote_form.is_valid():
vote_form.save()
return HttpResponseRedirect(reverse('managers:requests',
kwargs={'requestType': requestType}))
requests_dict.append((req, request_responses, response_form, upvote, vote_form))
x += 1
if x >= settings.MAX_REQUESTS:
break
return render_to_response('requests.html', {
'manager': manager,
'request_type': request_type,
'page_name': page_name,
'request_form': request_form,
'requests_dict': requests_dict,
'relevant_managers': relevant_managers,
}, context_instance=RequestContext(request))
@profile_required
def my_requests_view(request):
'''
Show user his/her requests, sorted by request_type.
'''
page_name = "Your Requests"
userProfile = UserProfile.objects.get(user=request.user)
my_requests = Request.objects.filter(owner=userProfile)
# A pseudo dictionary, actually a list with items of form
# (request_type.name.title(), request_form, type_manager, [(request,
# [list_of_request_responses], response_form, upvote, vote_form),...],
# relevant_managers)
request_dict = list()
for request_type in RequestType.objects.all():
relevant_managers = request_type.managers.filter(active=True)
type_manager = any(i.incumbent == userProfile for i in
relevant_managers)
# Items are of form (request, [list_of_request_responses],
# response_form),...])
requests_list = list()
type_requests = my_requests.filter(request_type=request_type)
for req in type_requests:
responses_list = Response.objects.filter(request=req)
if type_manager:
response_form = ManagerResponseForm(
request.POST if "add_response-{0}".format(req.pk) in request.POST else None,
initial={'action': Response.NONE},
profile=userProfile,
request=req,
prefix="response-{0}".format(req.pk),
)
else:
response_form = ResponseForm(
request.POST if "add_response-{0}".format(req.pk) in request.POST else None,
profile=userProfile,
request=req,
prefix="response-{0}".format(req.pk),
)
upvote = userProfile in req.upvotes.all()
vote_form = VoteForm(
request.POST if "vote-{0}".format(req.pk) in request.POST else None,
request.POST or None,
profile=userProfile,
request=req,
prefix="vote-{0}",
)
if response_form.is_valid():
response_form.save()
return HttpResponseRedirect(reverse('managers:my_requests'))
if vote_form.is_valid():
vote_form.save()
return HttpResponseRedirect(reverse('managers:my_requests'))
requests_list.append((req, responses_list, response_form, upvote, vote_form))
request_form = RequestForm(
request.POST if "submit_request" in request.POST else None,
profile=userProfile,
request_type=request_type,
prefix="request-{0}".format(request_type.pk),
)
if request_form.is_valid():
request_form.save()
return HttpResponseRedirect(reverse('managers:my_requests'))
request_dict.append((request_type, request_form, type_manager,
requests_list, relevant_managers))
return render_to_response('my_requests.html', {
'page_name': page_name,
'request_dict': request_dict,
}, context_instance=RequestContext(request))
@profile_required
def list_my_requests_view(request):
'''
Show user his/her requests in list form.
'''
userProfile = UserProfile.objects.get(user=request.user)
requests = Request.objects.filter(owner=userProfile)
return render_to_response('list_requests.html', {
'page_name': "Your Requests",
'requests': requests,
}, context_instance=RequestContext(request))
@profile_required
def list_user_requests_view(request, targetUsername):
'''
Show user his/her requests in list form.
'''
if targetUsername == request.user.username:
return list_my_requests_view(request)
targetUser = get_object_or_404(User, username=targetUsername)
targetProfile = get_object_or_404(UserProfile, user=targetUser)
page_name = "{0}'s Requests".format(targetUsername)
requests = Request.objects.filter(owner=targetProfile).exclude(
~Q(owner__user=request.user), private=True,
)
return render_to_response('list_requests.html', {
'page_name': page_name,
'requests': requests,
'targetUsername': targetUsername,
}, context_instance=RequestContext(request))
@profile_required
def all_requests_view(request):
'''
Show user a list of enabled request types, the number of requests of each
type and a link to see them all.
'''
# Pseudo-dictionary, actually a list with items of form
# (request_type.name.title(), number_of_type_requests, name, enabled,
# glyphicon)
types_dict = list()
for request_type in RequestType.objects.all():
requests = Request.objects.filter(request_type=request_type)
# Hide the count for private requests
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user), private=True,
)
number_of_requests = requests.count()
types_dict.append((
request_type.name.title(), number_of_requests,
request_type.url_name, request_type.enabled,
request_type.glyphicon,
))
return render_to_response('all_requests.html', {
'page_name': "Archives - All Requests",
'types_dict': types_dict,
}, context_instance=RequestContext(request))
@profile_required
def list_all_requests_view(request, requestType):
'''
Show all the requests for a given type in list form.
'''
request_type = get_object_or_404(RequestType, url_name=requestType)
requests = Request.objects.filter(request_type=request_type)
# Hide the count for private requests
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user), private=True,
)
page_name = "Archives - All {0} Requests".format(request_type.name.title())
return render_to_response('list_requests.html', {
'page_name': page_name,
'requests': requests,
'request_type': request_type,
}, context_instance=RequestContext(request))
@profile_required
@ajax_capable
def request_view(request, request_pk):
'''
The view of a single request.
'''
if request.is_ajax():
if not request.user.is_authenticated():
return HttpResponse(json.dumps(dict()),
content_type="application/json")
try:
relevant_request = Request.objects.get(pk=request_pk)
except Request.DoesNotExist:
return HttpResponse(json.dumps(dict()),
content_type="application/json")
try:
user_profile = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
return HttpResponse(json.dumps(dict()),
content_type="application/json")
upvote = user_profile in relevant_request.upvotes.all()
vote_form = VoteForm(
request.POST if "upvote" in request.POST else None,
profile=user_profile,
request=relevant_request,
)
if vote_form.is_valid():
vote_form.save()
response = dict()
response['vote_count_{pk}'.format(pk=request_pk)] = \
relevant_request.upvotes.all().count()
list_string = 'vote_list_{pk}'.format(pk=request_pk)
vote_string = 'in_votes_{pk}'.format(pk=request_pk)
count_string = 'vote_count_{pk}'.format(pk=request_pk)
response[list_string], response[vote_string], \
response[count_string] = build_ajax_votes(
relevant_request,
user_profile
)
return HttpResponse(json.dumps(response),
content_type="application/json")
return HttpResponse(json.dumps(dict()),
content_type="application/json")
relevant_request = get_object_or_404(Request, pk=request_pk)
if relevant_request.private:
if relevant_request.owner.user != request.user or \
relevant_request.request_type.managers.filter(incumbent__user=request.user):
return HttpResponseRedirect(
reverse("managers:requests",
kwargs={"requestType": relevant_request.request_type.url_name}))
userProfile = UserProfile.objects.get(user=request.user)
request_responses = Response.objects.filter(request=relevant_request)
relevant_managers = relevant_request.request_type.managers.filter(active=True)
manager = any(i.incumbent == userProfile for i in relevant_managers)
if manager:
response_form = ManagerResponseForm(
request.POST if "add_response" in request.POST else None,
initial={'action': Response.NONE},
profile=userProfile,
request=relevant_request,
)
else:
response_form = ResponseForm(
request.POST if "add_response" in request.POST else None,
profile=userProfile,
request=relevant_request,
prefix="response",
)
upvote = userProfile in relevant_request.upvotes.all()
vote_form = VoteForm(
request.POST if "upvote" in request.POST else None,
profile=userProfile,
request=relevant_request,
)
if response_form.is_valid():
response_form.save()
return HttpResponseRedirect(reverse('managers:view_request', kwargs={
'request_pk': relevant_request.pk,
}))
if vote_form.is_valid():
vote_form.save()
return HttpResponseRedirect(reverse('managers:view_request', kwargs={
'request_pk': relevant_request.pk,
}))
upvote = userProfile in relevant_request.upvotes.all()
return render_to_response('view_request.html', {
'page_name': "View Request",
'relevant_request': relevant_request,
'request_responses': request_responses,
'upvote': upvote,
'vote_form': vote_form,
'response_form': response_form,
'relevant_managers': relevant_managers,
}, context_instance=RequestContext(request))
@profile_required
def announcement_view(request, announcement_pk):
''' The view of a single manager announcement. '''
announce = get_object_or_404(Announcement, pk=announcement_pk)
page_name = "View Announcement"
profile = UserProfile.objects.get(user=request.user)
pin_form = PinForm(
request.POST if "pin" in request.POST else None,
instance=announce,
)
can_edit = announce.incumbent == profile or request.user.is_superuser
if pin_form.is_valid():
pin_form.save()
return HttpResponseRedirect(
reverse('managers:view_announcement', kwargs={"announcement_pk": announcement_pk}),
)
return render_to_response('view_announcement.html', {
'page_name': page_name,
'pin_form': pin_form,
'can_edit': can_edit,
'announcement': announce,
}, context_instance=RequestContext(request))
@profile_required
def edit_announcement_view(request, announcement_pk):
''' The view of a single manager announcement. '''
announce = get_object_or_404(Announcement, pk=announcement_pk)
profile = UserProfile.objects.get(user=request.user)
if not (announce.incumbent == profile or request.user.is_superuser):
return HttpResponseRedirect(
reverse('managers:view_announcement', kwargs={"announcement_pk": announcement_pk}),
)
page_name = "Edit Announcement"
announcement_form = AnnouncementForm(
request.POST or None,
instance=announce,
profile=profile,
editing=True,
)
if announcement_form.is_valid():
announcement_form.save(request)
return HttpResponseRedirect(
reverse('managers:view_announcement', kwargs={"announcement_pk": announcement_pk}),
)
return render_to_response('edit_announcement.html', {
'page_name': page_name,
'announcement_form': announcement_form,
}, context_instance=RequestContext(request))
@profile_required
def announcements_view(request):
''' The view of manager announcements. '''
page_name = "Manager Announcements"
userProfile = UserProfile.objects.get(user=request.user)
announcement_form = None
manager_positions = Manager.objects.filter(incumbent=userProfile)
if manager_positions:
announcement_form = AnnouncementForm(
request.POST if "post_announcement" in request.POST else None,
profile=userProfile,
)
if announcement_form and announcement_form.is_valid():
announcement_form.save(request)
return HttpResponseRedirect(reverse('managers:announcements'))
# A pseudo-dictionary, actually a list with items of form:
# (announcement, announcement_pin_form)
announcements_dict = list()
for a in Announcement.objects.filter(pinned=True):
pin_form = None
if (a.manager.incumbent == userProfile) or request.user.is_superuser:
pin_form = PinForm(
request.POST if "pin-{0}".format(a.pk) else None,
instance=a,
)
if pin_form.is_valid():
pin_form.save()
return HttpResponseRedirect(reverse('managers:announcements'))
announcements_dict.append((a, pin_form))
# Oldest genesis of an pinned announcement to be displayed.
within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE)
for a in Announcement.objects.filter(pinned=False, post_date__gte=within_life):
pin_form = None
if request.user.is_superuser or (a.manager.incumbent == userProfile):
pin_form = PinForm(
request.POST if "pin-{0}".format(a.pk) else None,
instance=a,
)
announcements_dict.append((a, pin_form))
return render_to_response('announcements.html', {
'page_name': page_name,
'manager_positions': manager_positions,
'announcements_dict': announcements_dict,
'announcement_form': announcement_form,
}, context_instance=RequestContext(request))
@profile_required
def all_announcements_view(request):
''' The view of manager announcements. '''
page_name = "Archives - All Announcements"
userProfile = UserProfile.objects.get(user=request.user)
announcement_form = None
manager_positions = Manager.objects.filter(incumbent=userProfile)
if manager_positions:
announcement_form = AnnouncementForm(
request.POST if "post_announcement" in request.POST else None,
profile=userProfile,
)
if announcement_form.is_valid():
announcement_form.save()
return HttpResponseRedirect(reverse('managers:all_announcements'))
# A pseudo-dictionary, actually a list with items of form (announcement,
# announcement_pin_form)
announcements_dict = list()
for a in Announcement.objects.all():
pin_form = None
if a.manager.incumbent == userProfile or request.user.is_superuser:
pin_form = PinForm(
request.POST if "pin-{0}".format(a.pk) in request.POST else None,
instance=a,
)
if pin_form.is_valid():
pin_form.save()
return HttpResponseRedirect(reverse('managers:all_announcements'))
announcements_dict.append((a, pin_form))
return render_to_response('announcements.html', {
'page_name': page_name,
'manager_positions': manager_positions,
'announcements_dict': announcements_dict,
'announcement_form': announcement_form,
}, context_instance=RequestContext(request))
@admin_required
def recount_view(request):
'''
Recount number_of_messages for all threads and number_of_responses for all
requests.
'''
requests_changed = 0
for req in Request.objects.all():
recount = Response.objects.filter(request=req).count()
if req.number_of_responses != recount:
req.number_of_responses = recount
req.save()
requests_changed += 1
threads_changed = 0
for thread in Thread.objects.all():
recount = Message.objects.filter(thread=thread).count()
if thread.number_of_messages != recount:
thread.number_of_messages = recount
thread.save()
threads_changed += 1
messages.add_message(
request, messages.SUCCESS,
MESSAGES['RECOUNTED'].format(
requests_changed=requests_changed,
request_count=Request.objects.all().count(),
threads_changed=threads_changed,
thread_count=Thread.objects.all().count(),
),
)
return HttpResponseRedirect(reverse('utilities'))
| python |
#!/usr/bin/env python2.7
"""
Function-Class-Method browser for python files.
"""
# Copyright (c) 2013 - 2017 Carwyn Pelley
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import re
def main(fnme):
with open(fnme, 'r') as fh:
lines = fh.readlines()
parsed = []
for ind, line in enumerate(lines):
pattern = ['^[\s]*{}\s'.format(ident) for ident in
['cdef', 'cpdef', 'def', 'class']]
pattern = '|'.join(pattern)
if re.match(pattern, line):
print_line = line.replace('\n', '')
print_line = print_line.replace(':', '')
print_line = "{}:{}:'{}'".format(fnme, ind + 1, print_line)
parsed.append(print_line)
print print_line
if __name__ == '__main__':
if len(sys.argv) is 2:
fnme = sys.argv[1]
else:
sys.exit[0]
main(fnme)
| python |
#!/usr/bin/env python
"""
utils.py
"""
import os, warnings, numpy as np, pandas as pd
from glob import glob
from typing import List
from itertools import accumulate, chain, repeat
from .constants import FRAME, TRACK, TRACK_LENGTH, PY, PX
######################
## TRACKS UTILITIES ##
######################
def track_length(tracks: pd.DataFrame) -> pd.DataFrame:
"""
Add a new column to a trajectory dataframe with the trajectory
length in frames.
args
----
tracks : pandas.DataFrame. Must have the column
*TRACK*.
returns
-------
pandas.DataFrame, with the "track_length" column. Overwritten
if it already exists.
"""
if TRACK_LENGTH in tracks.columns:
tracks = tracks.drop(TRACK_LENGTH, axis=1)
return tracks.join(tracks.groupby(TRACK).size().rename(TRACK_LENGTH), on=TRACK)
def assign_index_in_track(tracks):
"""
Given a set of trajectories, determine the index of each localization in the
context of its respective trajectory.
args
----
tracks : pandas.DataFrame, containing the "trajectory" and "frame"
columns
returns
-------
pandas.DataFrame, the same dataframe with a new column, "index_in_track"
"""
tracks = tracks.sort_values(by=[TRACK, FRAME])
tracks["one"] = 1
tracks["index_in_track"] = tracks.groupby(TRACK)["one"].cumsum() - 1
tracks = tracks.drop("one", axis=1)
return tracks
def concat_tracks(*tracks):
"""
Join some trajectory dataframes together into a larger dataframe,
while preserving uniqe trajectory indices.
args
----
tracks : pandas.DataFrame with the "trajectory" column
returns
-------
pandas.DataFrame, the concatenated trajectories
"""
n = len(tracks)
# Sort the tracks dataframes by their size. The only important thing
# here is that if at least one of the tracks dataframes is nonempty,
# we need to put that one first.
df_lens = [len(t) for t in tracks]
try:
tracks = [t for _, t in sorted(zip(df_lens, tracks))][::-1]
except ValueError:
pass
# Iteratively concatenate each dataframe to the first while
# incrementing the trajectory index as necessary
out = tracks[0].assign(dataframe_index=0)
c_idx = out[TRACK].max() + 1
for t in range(1, n):
# Get the next set of trajectories and keep track of the origin
# dataframe
new = tracks[t].assign(dataframe_index=t)
# Ignore negative trajectory indices (facilitating a user filter)
new.loc[new["trajectory"]>=0, "trajectory"] += c_idx
# Increment the total number of trajectories
c_idx = new["trajectory"].max() + 1
# Concatenate
out = pd.concat([out, new], ignore_index=True, sort=False)
return out
#############################
## TRACK LOADING UTILITIES ##
#############################
def load_tracks(*csv_paths, out_csv=None, start_frame=0,
drop_singlets=False, suffix=".csv"):
"""
Given a set of trajectories stored as CSVs, concatenate all
of them, storing the paths to the original CSVs in the resulting
dataframe, and optionally save the result to another CSV.
If passed a directory instead of a set of CSV paths, find all
the CSVs in that directory that end with *suffix*, load the
trajectories, and concatenate them.
args
----
csv_paths : list of str, a set of trajectory CSVs.
Each must contain the "y", "x", "trajectory",
and "frame" columns
out_csv : str, path to save to
start_frame : int, exclude any trajectories that begin before
this frame
drop_singlets : bool, drop singlet localizations before
concatenating
suffix ; str, suffix of CSVs if passing a directory
returns
-------
pandas.DataFrame, the concatenated result
"""
n = len(csv_paths)
if n == 0:
warnings.warn("no paths passed")
return pd.DataFrame([], columns=["trajectory", "frame", "y", "x"], dtype=object)
# If passed a directory instead of a set of file paths, just load all
# the CSVs from that directory
if os.path.isdir(csv_paths[0]):
return load_tracks_dir(csv_paths[0], start_frame=start_frame,
drop_singlets=drop_singlets, suffix=suffix)
if start_frame is None:
start_frame = 0
def drop_before_start_frame(tracks, start_frame):
"""
Drop all trajectories that start before a specific frame.
"""
if tracks.empty or (start_frame is None) or (start_frame <= tracks["frame"].min()):
return tracks
tracks = tracks.join(
(tracks.groupby("trajectory")["frame"].first() >= start_frame).rename("_take"),
on="trajectory"
)
tracks = tracks[tracks["_take"]]
tracks = tracks.drop("_take", axis=1)
return tracks
def drop_singlets_dataframe(tracks):
"""
Drop all singlets and unassigned localizations from a
pandas.DataFrame with trajectory information.
"""
if tracks.empty:
return tracks
tracks = track_length(tracks)
tracks = tracks[np.logical_and(tracks["track_length"]>1,
tracks["trajectory"]>=0)]
return tracks
def loader(path):
tracks = pd.read_csv(path)
if drop_singlets:
tracks = drop_singlets_dataframe(tracks)
tracks = drop_before_start_frame(tracks, start_frame)
return tracks
# Load the trajectories into memory
tracks = []
for path in csv_paths:
tracks.append(loader(path))
# Concatenate
tracks = concat_tracks(*tracks)
# Map the original path back to each file
for i, path in enumerate(csv_paths):
tracks.loc[tracks["dataframe_index"]==i, "source_file"] = \
os.path.abspath(path)
# Optionally save concatenated trajectories to a new CSV
if not out_csv is None:
tracks.to_csv(out_csv, index=False)
return tracks
def load_tracks_dir(dirname, suffix=".csv", start_frame=0,
drop_singlets=False):
"""
Load all of the trajectory CSVs in a target directory
into a single pandas.DataFrame.
args
----
dirname : str, directory with the track CSVs
suffix : str, extension for the track CSVs
start_frame : int, exclude all tracks before this
frame
drop_singlets : bool, don't include single-point
trajectories
returns
-------
pandas.DataFrame with an extra column, "origin_file",
with the path to the CSV from which these
trajectories were taken
"""
# Find target files
if os.path.isdir(dirname):
target_csvs = glob(os.path.join(dirname, "*{}".format(suffix)))
if len(target_csvs) == 0:
raise IOError("Could not find trajectory CSVs in directory {}".format(dirname))
elif os.path.isfile(dirname):
target_csvs = [dirname]
# Concatenate trajectories
tracks = [pd.read_csv(j) for j in target_csvs]
tracks = concat_tracks(*tracks)
# Exclude points before the start frame
if isinstance(start_frame, int) and \
(start_frame > tracks["frame"].min()) and \
(not tracks.empty):
tracks = tracks.join(
(tracks.groupby("trajectory")["frame"].first() >= start_frame).rename("_take"),
on="trajectory"
)
tracks = tracks[tracks["_take"]]
tracks = tracks.drop("_take", axis=1)
# Exclude trajectories that are too short
tracks = track_length(tracks)
if drop_singlets:
tracks = tracks[tracks["track_length"] > 1]
return tracks
####################
## JUMP COMPUTERS ##
####################
def tracks_to_jumps(tracks, n_frames=1, start_frame=None, pixel_size_um=0.16,
pos_cols=["y", "x"]):
"""
Convert trajectories in pandas.DataFrame format to an internal "jumps"
format, specified in the *returns* section of this docstring.
args
----
tracks : pandas.DataFrame
n_frames : int, the number of frames over which to compute
the jump. For instance, if n_frames = 1, then only
compute jumps between consecutive frames
start_frame : int, disregard jumps before this frame
pixel_size_um : float, size of pixels in microns
pos_cols : list of str, the columns with the spatial
coordinates of each point in pixels
returns
-------
*jumps*, a 2D ndarray of shape (n_jumps, 6+). Each row corresponds
to a single jump from the dataset.
The columns of *vecs* have the following meaning:
jumps[:,0] -> length of the origin trajectory in frames
jumps[:,1] -> index of the origin trajectory in *tracks*
jumps[:,2] -> frame corresponding to the first point in
the jump
jumps[:,3] -> sum of squared jumps across all spatial
dimensions in squared microns
jumps[:,4:] -> jumps in each Euclidean dimension in microns
"""
def bail():
return np.zeros((0, 6), dtype=np.float64)
# If passed an empty dataframe, bail
if tracks.empty: return bail()
# Do not modify the original dataframe
tracks = tracks.copy()
# Calculate the original trajectory length and exclude
# singlets and negative trajectory indices
tracks = track_length(tracks)
tracks = tracks[np.logical_and(
tracks["trajectory"] >= 0,
tracks["track_length"] > 1
)]
# Only consider trajectories after some start frame
if not start_frame is None:
tracks = tracks[tracks["frame"] >= start_frame]
# If no trajectories remain, bail
if tracks.empty: return bail()
# Convert from pixels to um
tracks[pos_cols] *= pixel_size_um
# Work with an ndarray, for speed
tracks = tracks.sort_values(by=["trajectory", "frame"])
T = np.asarray(tracks[["track_length", "trajectory", "frame", pos_cols[0]] + pos_cols])
# Allowing for gaps, consider every possible comparison that
# leads to the correct frame interval
target_jumps = []
for j in range(1, n_frames+1):
# Compute jumps
jumps = T[j:,:] - T[:-j,:]
# Only consider vectors between points originating
# from the same trajectory and from the target frame
# interval
same_track = jumps[:,1] == 0
target_interval = jumps[:,2] == n_frames
take = np.logical_and(same_track, target_interval)
# Map the corresponding track lengths, track indices,
# and frame indices back to each jump
jumps[:,:3] = T[:-j,:3]
jumps = jumps[take, :]
# Calculate the corresponding 2D squared jump and accumulate
if jumps.shape[0] > 0:
jumps[:,3] = (jumps[:,4:]**2).sum(axis=1)
target_jumps.append(jumps)
# Concatenate
if len(target_jumps) > 0:
return np.concatenate(target_jumps, axis=0)
else:
return bail()
def sum_squared_jumps(jumps, max_jumps_per_track=None, pos_cols=["y", "x"]):
"""
For each trajectory in a dataset, calculate the sum of its squared
jumps across all spatial dimensions.
args
----
jumps : 2D ndarray, all jumps in the dataset as
calculated by *tracks_to_jumps*
max_jumps_per_track : int, the maximum number of jumps
to consider from any single trajectory
returns
-------
pandas.DataFrame. Each row corresponds to a trajectory, with
the following columns:
"sum_sq_jump": the summed squared jumps of that trajectory
in microns
"trajectory" : the index of the origin trajectory
"frame" : the first frame of the first jumps in the
origin trajectory
"n_jumps" : the number of jumps used in *sum_sq_jump*
"""
out_cols = ["sum_sq_jump", "trajectory", "frame", "n_jumps"]
# If there are no jumps in this set of trajectories, bail
if jumps.shape[0] == 0:
return pd.DataFrame(index=[], columns=out_cols, dtype=object)
# Format as a dataframe, indexed by jump
cols = ["track_length", "trajectory", "frame", "sq_jump"] + list(pos_cols)
jumps = pd.DataFrame(jumps, columns=cols)
n_tracks = jumps["trajectory"].nunique()
# Limit the number of jumps to consider per trajectory, if desired
if not max_jumps_per_track is None:
jumps = assign_index_in_track(jumps)
tracks = jumps[jumps["index_in_track"] <= max_jumps_per_track]
# Output dataframe, indexed by trajectory
sum_jumps = pd.DataFrame(index=np.arange(n_tracks), columns=out_cols, dtype=object)
# Calculate the sum of squared jumps for each trajectory
sum_jumps["sum_sq_jump"] = np.asarray(jumps.groupby("trajectory")["sq_jump"].sum())
# Calculate the number of jumps in each trajectory
sum_jumps["n_jumps"] = np.asarray(jumps.groupby("trajectory").size())
# Map back the indices of the origin trajectories
sum_jumps["trajectory"] = np.asarray(jumps.groupby("trajectory").apply(lambda i: i.name)).astype(np.int64)
# Map back the frame indices
sum_jumps["frame"] = np.asarray(jumps.groupby("trajectory")["frame"].first()).astype(np.int64)
return sum_jumps
def split_jumps(jumps, splitsize=8):
"""
Split a set of long trajectories into shorter trajectories.
Example 1
---------
If we have a trajectory of 6 jumps and
splitsize = 3, then we split this trajectory into two
trajectories of 3 jumps, comprising the first and second halves
of the original trajectory.
Example 2
---------
If we have a trajectory of 10 jumps and splitsize = 4,
then we split this trajectory into 3 trajectories. The
first two are 4 jumps each, and the third is the last 2
jumps of the original trajectory.
args
----
jumps : 2D ndarray, a set of trajectory-indexed
jumps; output of *tracks_to_jumps*
splitsize : int, the maximum size of a trajectory
after splitting
returns
-------
1D ndarray of shape (n_tracks), the indices of the
new trajectories. These start from 0 and go to the
highest new trajectory index; numerically they have
no relation to the original trajectory indices.
"""
# If passed empty input, return empty output
if jumps.shape[0] == 0:
return np.zeros(0, dtype=np.int64)
# The original set of trajectory indices
orig_indices = jumps[:,1].astype(np.int64)
# The set of modified trajectory indices
new_indices = np.zeros(orig_indices.shape[0], dtype=np.int64)
# The current (new) trajectory index
c = 0
# The length of the current trajectory in # of jumps
L = 0
# Iterate through the original set of trajectory indices
prev_index = orig_indices[0]
for i, index in enumerate(orig_indices):
# Extend the existing trajectory
L += 1
# We're in the same original trajectory
if index == prev_index:
# Haven't exceeded the split trajectory size limit
if L < splitsize:
new_indices[i] = c
# Break into a new trajectory
else:
L = 0
c += 1
new_indices[i] = c
# We've passed into a different original trajectory
else:
prev_index = index
L = 0
c += 1
new_indices[i] = c
return new_indices
#####################
## OTHER UTILITIES ##
#####################
def normalize_2d(arr: np.ndarray, axis: int):
""" Normalize a 2D array over one of its axes.
args
----
arr : 2D numpy.ndarray
axis : int, axis to normalize over
returns
-------
*arr* such as that arr.sum(axis=axis) == 1.0
"""
if len(arr.shape) != 2:
raise ValueError(f"arr has shape {arr.shape}; expected 2D")
if axis == 1:
S = arr.sum(axis=1)
nonzero = S > 0
arr[nonzero,:] = (arr[nonzero,:].T / S[nonzero]).T
else:
S = arr.sum(axis=0)
nonzero = S > 0
arr[:,nonzero] = arr[:,nonzero] / S[nonzero]
return arr
def cartesian_product(*arrays: np.ndarray):
""" Cartesian product of multiple 1D numpy.ndarrays.
Source: https://stackoverflow.com/a/45378609
args
----
arrays : 1D numpy.ndarray
returns
-------
"""
la = len(arrays)
L = *map(len, arrays), la
dtype = np.result_type(*arrays)
arr = np.empty(L, dtype=dtype)
arrs = *accumulate(chain((arr,), repeat(0, la-1)), np.ndarray.__getitem__),
idx = slice(None), *repeat(None, la-1)
for i in range(la-1, 0, -1):
arrs[i][..., i] = arrays[i][idx[:la-i]]
arrs[i-1][1:] = arrs[i]
arr[..., 0] = arrays[0][idx]
return arr.reshape(-1, la)
| python |
from django.conf import settings
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.views.generic import TemplateView
from core.helpers import NotifySettings
from core.views import BaseNotifyFormView
from ukef.forms import UKEFContactForm
class HomeView(TemplateView):
template_name = 'ukef/home_page.html'
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['trade_finance_bullets'] = ['working capital support', 'bond support', 'credit insurance']
context['project_finance_bullets'] = [
'UKEF buyer credit guarantees', 'direct lending', 'credit and bond insurance']
return context
class LandingView(TemplateView):
template_name = 'ukef/landing_page.html'
class ContactView(BaseNotifyFormView):
template_name = 'ukef/contact_form.html'
form_class = UKEFContactForm
success_url = reverse_lazy('uk-export-contract-success')
notify_settings = NotifySettings(
agent_template=settings.UKEF_CONTACT_AGENT_NOTIFY_TEMPLATE_ID,
agent_email=settings.UKEF_CONTACT_AGENT_EMAIL_ADDRESS,
user_template=settings.UKEF_CONTACT_USER_NOTIFY_TEMPLATE_ID,
)
def form_valid(self, form):
user_email = form.cleaned_data['email']
self.request.session['user_email'] = user_email
return super().form_valid(form)
class SuccessPageView(TemplateView):
template_name = 'ukef/contact_form_success.html'
def get(self, *args, **kwargs):
if not self.request.session.get('user_email'):
return HttpResponseRedirect(reverse_lazy('uk-export-contact'))
return super().get(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['user_email'] = self.request.session.get('user_email')
return super().get_context_data(**kwargs)
class HowWeAssessPageView(TemplateView):
template_name = 'ukef/how_we_assess.html'
class WhatWeOfferView(TemplateView):
template_name = 'ukef/what_we_offer.html'
class CountryCoverView(TemplateView):
template_name = 'ukef/country_cover.html'
| python |
import stackprinter
def test_frame_formatting():
""" pin plaintext output """
msg = stackprinter.format()
lines = msg.split('\n')
expected = ['File "test_formatting.py", line 6, in test_frame_formatting',
' 4 def test_frame_formatting():',
' 5 """ pin plaintext output """',
'--> 6 msg = stackprinter.format()',
" 7 lines = msg.split('\\n')",
' ..................................................',
" stackprinter.format = <function 'format' __init__.py:17>",
' ..................................................',
'',
'']
for k, (our_line, expected_line) in enumerate(zip(lines[-len(expected):], expected)):
if k == 0:
assert our_line[-52:] == expected_line[-52:]
elif k == 6:
assert our_line[:58] == expected_line[:58]
else:
assert our_line == expected_line
# for scheme in stackprinter.colorschemes.__all__:
# stackprinter.format(style=scheme, suppressed_paths=[r"lib/python.*"])
def test_exception_formatting():
from source import Hovercraft
try:
Hovercraft().eels
except:
msg_plain = stackprinter.format()
msg_color = stackprinter.format(style='darkbg')
lines = msg_plain.split('\n')
assert lines[0].endswith('eels')
assert lines[-1] == 'Exception: ahoi!'
print(msg_plain)
print(msg_color)
def test_none_tuple_formatting():
output = stackprinter.format((None, None, None))
assert output == "NoneType: None"
def test_none_value_formatting():
output = stackprinter.format((TypeError, None, None))
assert output == "TypeError: None"
| python |
import sys
import getpass
from controllers.main_controller import MainController
from interface.main_menu import MainMenu
from utils.hospital_errors import *
from database_layer.database import *
from utils.hospital_constants import *
class StartMenu:
db = Database()
@classmethod
def run(cls):
print(HospitalConstants.start_menu_options)
start_option = input("Option: ")
if start_option == '1':
# it is okay to actually make the sign in method to return true or false
# we can make it return either the whole object, the title (doctor or patient) ot None
username = input("Username: ")
password = getpass.getpass("Password: ")
try:
current_user = MainController.sign_in(username, password)
except InvalidPasswordError:
print("Password does not match criteria!")
sys.exit(1)
except DatabaseConnectionError:
print("Sign in failed! Try again!")
sys.exit(1)
else:
if current_user:
MainMenu.show_options(current_user)
else:
print("Wrong username or password!")
sys.exit(1)
elif start_option == '2':
print("Are you a doctor or a patient?")
title = input("Position: ")
if title not in ["doctor", "patient"]:
print("Unknown positon! Try again!")
sys.exit(1)
username = input("Username: ")
full_name = input("Full name: ")
password = getpass.getpass("Password: ")
verification_password = getpass.getpass("Repeat password: ")
try:
user = MainController.sign_up(username, password, verification_password, title, full_name)
user_info = {}
if title == "doctor":
position = input("Enter your position: ")
user_info.update({"position" : position})
elif title == "patient":
condition = input("Enter your condition: ")
age = input("Enter your age: ")
user_info.update({"condition" : condition, "age" : age})
current_user = MainController.connect_tables(title, username, user_info)
except UserAlreadyExistsError:
print("Sign up failed! Username already taken!")
sys.exit(1)
except DatabaseConnectionError:
print("Sign up failed! Try again!")
sys.exit(1)
except PasswordsDontMatchError:
print("Sign up failed! Passwords don\'t match! ")
sys.exit(1)
except InvalidPasswordError:
print("Passwords does not match criteria!")
sys.exit(1)
else:
MainMenu.show_options(title)
else:
sys.exit(1) | python |
import json # importing json module
class Utils:
def stringify(self, obj):
return json.dumps(obj)
def parseJson(self, string):
try:
return json.loads(string)
#pass
except:
return string
#pass
| python |
from typing import List, Optional
from sqlalchemy import desc
from sqlalchemy.ext.asyncio.session import AsyncSession
from sqlalchemy.sql.expression import select
from app.database.dbo.mottak import WorkflowMetadata as WorkflowMetadata_DBO
from app.domain.models.WorkflowMetadata import WorkflowMetadata, WorkflowMetadataTypes
async def create_workflow_metadata(db: AsyncSession, workflow_metadata: WorkflowMetadata) -> WorkflowMetadata_DBO:
dbo = WorkflowMetadata_DBO(
overforingspakke_id=workflow_metadata.overforingspakke_id,
workflow_type=workflow_metadata.workflow_type,
workflow_name=workflow_metadata.workflow_name,
workflow_uid=workflow_metadata.workflow_uid,
)
db.add(dbo)
await db.flush()
return dbo
async def get_all_with_overforingspakke_id(
db: AsyncSession,
overforingspakke_id: int,
workflow_type: Optional[WorkflowMetadataTypes],
skip: int,
limit: int,
) -> List[WorkflowMetadata_DBO]:
query = (
select(WorkflowMetadata_DBO)
.where(WorkflowMetadata_DBO.overforingspakke_id == overforingspakke_id)
)
if workflow_type is not None:
query = query.where(WorkflowMetadata_DBO.workflow_type == workflow_type)
result = await db.execute(
query.order_by(desc(WorkflowMetadata_DBO.opprettet))
.limit(None if limit == -1 else limit)
.offset(skip)
)
return result.scalars().all()
| python |
import pickle
pickle_in=open("instances_dev.pickle","rb")
data=pickle.load(pickle_in)
for i in range(10):
print(data[i])
| python |
import FWCore.ParameterSet.Config as cms
from L1Trigger.VertexFinder.VertexProducer_cff import VertexProducer
L1FastTrackingJets = cms.EDProducer("L1FastTrackingJetProducer",
L1TrackInputTag = cms.InputTag("TTTracksFromTrackletEmulation", "Level1TTTracks"),
L1PrimaryVertexTag=cms.InputTag("VertexProducer", VertexProducer.l1VertexCollectionName.value()),
GenInfo = cms.InputTag("TTTrackAssociatorFromPixelDigis", "Level1TTTracks"),
trk_zMax = cms.double(15.), # max track z0 [cm]
trk_chi2dofMax = cms.double(10.), # max track chi2/dof
trk_bendChi2Max = cms.double(2.2),# max bendChi2 cut
trk_ptMin = cms.double(2.0), # minimum track pt [GeV]
trk_etaMax = cms.double(2.5), # maximum track eta
trk_nStubMin = cms.int32(4), # minimum number of stubs in track
trk_nPSStubMin = cms.int32(-1), # minimum number of PS stubs in track
deltaZ0Cut=cms.double(0.5), # cluster tracks within |dz|<X
doTightChi2 = cms.bool( True ), # chi2dof < 5 for tracks with PT > 20
coneSize=cms.double(0.4), #cone size for anti-kt fast jet
displaced = cms.bool(False), # use prompt/displaced tracks
selectTrkMatchGenTight=cms.bool(True),
selectTrkMatchGenLoose=cms.bool(False),
selectTrkMatchGenOrPU=cms.bool(False)
)
L1FastTrackingJetsExtended = cms.EDProducer("L1FastTrackingJetProducer",
L1TrackInputTag = cms.InputTag("TTTracksFromExtendedTrackletEmulation", "Level1TTTracks"),
L1PrimaryVertexTag=cms.InputTag("VertexProducer", VertexProducer.l1VertexCollectionName.value()),
GenInfo = cms.InputTag("TTTrackAssociatorFromPixelDigisExtended", "Level1TTTracks"),
trk_zMax = cms.double(15.), # max track z0 [cm]
trk_chi2dofMax = cms.double(40.), # max track chi2 for extended tracks
trk_bendChi2Max = cms.double(2.4),#Bendchi2 cut for extended tracks
trk_ptMin = cms.double(3.0), # minimum track pt [GeV]
trk_etaMax = cms.double(2.5), # maximum track eta
trk_nStubMin = cms.int32(4), # minimum number of stubs on track
trk_nPSStubMin = cms.int32(-1), # minimum number of stubs in PS modules on track
deltaZ0Cut=cms.double(3.0), #cluster tracks within |dz|<X
doTightChi2 = cms.bool( True ), # chi2dof < 5 for tracks with PT > 20
coneSize=cms.double(0.4), #cone size for anti-kt fast jet
displaced = cms.bool(True), # use prompt/displaced tracks
selectTrkMatchGenTight=cms.bool(True),
selectTrkMatchGenLoose=cms.bool(False),
selectTrkMatchGenOrPU=cms.bool(False)
)
| python |
import wikipedia
while True:
ans = input("Question: ")
wikipedia.set_lang("es")
print (wikipedia.summary(ans, sentences=2))
| python |
import json
import sqlite3
#Initiating the database
connection=sqlite3.connect(database='roaster_db.sqlite')
curr=connection.cursor()#Cursor initiated
#Creating tables for the database
# Do some setup
curr.executescript('''
DROP TABLE IF EXISTS User;
DROP TABLE IF EXISTS Member;
DROP TABLE IF EXISTS Course;
CREATE TABLE User (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Course (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
title TEXT UNIQUE
);
CREATE TABLE Member (
user_id INTEGER,
course_id INTEGER,
role INTEGER,
PRIMARY KEY (user_id, course_id)
)
''')
#Now reading the json file
filename=open('roster_data.json')#File opened
raw_data=filename.read()#Reading the file
dataset=json.loads(raw_data)#Now loaded the json data it looks similar to simple dataset
for element in dataset:
name=element[0]
title=element[1]
role=element[2]
curr.execute('insert or ignore into User(name) values(?)',(name,))
user_id=curr.execute('select id from User where name=?',(name,)).fetchone()[0]#Extracting user id
curr.execute('insert or ignore into Course(title) values(?)',(title,))
c_id=curr.execute('select id from Course where title=?',(title,)).fetchone()[0]
curr.execute('insert or ignore into Member(user_id,course_id,role) values(?,?,?)',(user_id,c_id,role))
connection.commit()
| python |
import glob
import os
import json
import dateutil.parser
import datetime
import re
COMPLETE_NUM_ACTIONS=18
TECHNICAL_DIFFICULTIES = '7h9r8g p964wg jcqf4w 9qxf5g'.split()
# Also exclude this person, who wrote about things other than restaurants.
TECHNICAL_DIFFICULTIES.append('49g68p')
INCOMPLETE_BUT_OK = 'hfj33r'.split()
def get_log_data(log_file, earliest):
size = os.path.getsize(log_file),
meta = None
num_nexts = 0
with open(log_file) as f:
for idx, line in enumerate(f):
if idx > 50 and meta is None:
return
line = json.loads(line)
if line.get('type') == 'next' or line.get('externalAction') == 'completeSurvey':
num_nexts += 1
elif line.get('type') == 'externalAction':
timestamp = dateutil.parser.parse(line['timestamp'])
if timestamp < earliest:
return
match = re.match(r'c=(\w+)&p=(\d+)', line['externalAction'])
if not match:
continue
config, pid = match.groups()
meta = dict(timestamp=timestamp, config=config, pid=int(pid), participant_id=line['participant_id'], size=size)
if meta:
return dict(meta, num_nexts=num_nexts)
earliest = datetime.datetime(2017, 9, 1)
log_files = []
for log_file in glob.glob('logs/*.jsonl'):
data = get_log_data(log_file, earliest)
if data is not None:
print(data)
log_files.append(data)
import toolz
participants = []
for pid, group in toolz.groupby('pid', log_files).items():
participants.append(max(group, key=lambda e: e['size']))
for participant in participants:
participant['complete'] = (
participant['num_nexts'] == COMPLETE_NUM_ACTIONS
or participant['participant_id'] in INCOMPLETE_BUT_OK)
# For payment:
paid_pids = {int(line.strip()) for line in open('sona-paid.txt')}
participants.sort(key=lambda x: x['pid'])
not_yet_paid = []
for participant in participants:
if participant['pid'] not in paid_pids:
not_yet_paid.append(participant)
assert len(not_yet_paid) + len(paid_pids) == len(participants)
# Dump a CSV by Sona participant id for those we haven't paid who are complete...
print("Complete and not yet paid:")
print('\n'.join(
'{pid},{participant_id}'.format(**participant)
for participant in not_yet_paid
if participant['complete']))
print("\nIncomplete and not yet paid:")
print('\n'.join(
'{pid},{participant_id},{num_nexts}'.format(**participant)
for participant in not_yet_paid
if not participant['complete']))
# For analysis:
completed_participants = [
p for p in participants
if p['participant_id'] not in TECHNICAL_DIFFICULTIES
and p['complete']]
# Dump a list of participant_ids
print()
completed_participants.sort(key=lambda x: x['timestamp'])
print(len(completed_participants))
print(' '.join(participant['participant_id'] for participant in completed_participants))
| python |
print('='*8,'Aluguel de Um Carro','='*8)
d = int(input('Por quantos dias o carro foi alugado?'))
km = float(input('Quantos km foram rodados com o carro?'))
pa = 60*d + 0.15*km
print('''O valor do aluguel a ser pago por este carro com {} dias alugados e {:.2f}km rodados sera de:
{}R${:.2f}{}.'''.format(d,km,'\033[32m',pa,'\033[m'))
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains custom widgets to handle file/folder browser related tasks
"""
from __future__ import print_function, division, absolute_import
import os
import sys
import subprocess
from Qt.QtCore import Signal, Property, QSize
from Qt.QtWidgets import QSizePolicy, QFileDialog
from tpDcc.libs.qt.widgets import buttons
def browse_file(self):
filter_list = 'File({})'.format(' '.join(['*' + e for e in self.filters])) if self.filters else 'Any File(*)'
if self.multiple:
r_files, _ = QFileDialog.getOpenFileNames(self, 'Browse Files', self.path, filter_list)
if r_files:
self.filesChanged.emit(r_files)
self.path = r_files[0]
else:
r_file, _ = QFileDialog.getOpenFileName(self, 'Browse File', self.path, filter_list)
if r_file:
self.fileChanged.emit(r_file)
self.path = r_file
def browse_folder(self):
r_folder = QFileDialog.getExistingDirectory(self, 'Browse Folder', self.path)
if not r_folder:
return
if self.multiple:
self.foldersChanged.emit([r_folder])
else:
self.folderChanged.emit(r_folder)
self.path = r_folder
def save_file(self):
filter_list = 'File({})'.format(' '.join(['*' + e for e in self.filters])) if self.filters else 'Any File(*)'
r_file, _ = QFileDialog.getSaveFileName(self, 'Save File', self.path, filter_list)
if not r_file:
return
self.fileChanged.emit(r_file)
self.path = r_file
class ClickBrowserFileButton(buttons.BaseButton, object):
fileChanged = Signal(str)
filesChanged = Signal(list)
_on_browse_file = browse_file
def __init__(self, text='Browse', multiple=False, parent=None):
super(ClickBrowserFileButton, self).__init__(text=text, parent=parent)
self._path = None
self._multiple = multiple
self._filters = list()
self.setToolTip('Click to browse file')
self.clicked.connect(self._on_browse_file)
def _get_filters(self):
"""
Returns browse filters
:return: list(str)
"""
return self._filters
def _set_filters(self, value):
"""
Sets browse filters
:param value: list(str)
"""
self._filters = value
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
def _get_multiple(self):
"""
Returns whether or not browse can select multiple files
:return: bool
"""
return self._multiple
def _set_multiple(self, flag):
"""
Sets whether or not browse can select multiple files
:param flag: bool
"""
self._multiple = flag
filters = Property(list, _get_filters, _set_filters)
path = Property(str, _get_path, _set_path)
multiple = Property(bool, _get_multiple, _set_multiple)
class ClickBrowserFolderButton(buttons.BaseButton, object):
folderChanged = Signal(str)
foldersChanged = Signal(list)
_on_browse_folder = browse_folder
def __init__(self, text='', multiple=False, parent=None):
super(ClickBrowserFolderButton, self).__init__(text=text, parent=parent)
self._path = None
self._multiple = multiple
self.setToolTip('Click to browse folder')
self.clicked.connect(self._on_browse_folder)
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
def _get_multiple(self):
"""
Returns whether or not browse can select multiple files
:return: bool
"""
return self._multiple
def _set_multiple(self, flag):
"""
Sets whether or not browse can select multiple files
:param flag: bool
"""
self._multiple = flag
path = Property(str, _get_path, _set_path)
multiple = Property(bool, _get_multiple, _set_multiple)
class ClickBrowserFileToolButton(buttons.BaseToolButton, object):
fileChanged = Signal(str)
filesChanged = Signal(list)
_on_browse_file = browse_file
def __init__(self, multiple=False, parent=None):
super(ClickBrowserFileToolButton, self).__init__(parent=parent)
self._path = None
self._multiple = multiple
self._filters = list()
self.image('folder')
self.icon_only()
self.setToolTip('Click to browse file')
self.clicked.connect(self._on_browse_file)
# =================================================================================================================
# PROPERTIES
# =================================================================================================================
def _get_filters(self):
"""
Returns browse filters
:return: list(str)
"""
return self._filters
def _set_filters(self, value):
"""
Sets browse filters
:param value: list(str)
"""
self._filters = value
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
def _get_multiple(self):
"""
Returns whether or not browse can select multiple files
:return: bool
"""
return self._multiple
def _set_multiple(self, flag):
"""
Sets whether or not browse can select multiple files
:param flag: bool
"""
self._multiple = flag
filters = Property(list, _get_filters, _set_filters)
path = Property(str, _get_path, _set_path)
multiple = Property(bool, _get_multiple, _set_multiple)
# =================================================================================================================
# BASE
# =================================================================================================================
def set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self.path = value
class ClickSaveFileToolButton(buttons.BaseToolButton, object):
fileChanged = Signal(str)
_on_browse_file = browse_file
def __init__(self, multiple=False, parent=None):
super(ClickSaveFileToolButton, self).__init__(parent=parent)
self._path = None
self._multiple = multiple
self._filters = list()
self.image('save')
self.icon_only()
self.setToolTip('Click to save file')
self.clicked.connect(self._on_browse_file)
# =================================================================================================================
# PROPERTIES
# =================================================================================================================
def _get_filters(self):
"""
Returns browse filters
:return: list(str)
"""
return self._filters
def _set_filters(self, value):
"""
Sets browse filters
:param value: list(str)
"""
self._filters = value
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
filters = Property(list, _get_filters, _set_filters)
path = Property(str, _get_path, _set_path)
# =================================================================================================================
# BASE
# =================================================================================================================
def set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self.path = value
# @mixin.property_mixin
class ClickBrowserFolderToolButton(buttons.BaseToolButton, object):
folderChanged = Signal(str)
foldersChanged = Signal(list)
_on_browse_folder = browse_folder
def __init__(self, multiple=False, parent=None):
super(ClickBrowserFolderToolButton, self).__init__(parent=parent)
self._path = None
self._multiple = multiple
self.image('folder')
self.icon_only()
self.setToolTip('Click to browse folder')
self.clicked.connect(self._on_browse_folder)
# =================================================================================================================
# PROPERTIES
# =================================================================================================================
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
def _get_multiple(self):
"""
Returns whether or not browse can select multiple files
:return: bool
"""
return self._multiple
def _set_multiple(self, flag):
"""
Sets whether or not browse can select multiple files
:param flag: bool
"""
self._multiple = flag
path = Property(str, _get_path, _set_path)
multiple = Property(bool, _get_multiple, _set_multiple)
# =================================================================================================================
# BASE
# =================================================================================================================
def set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self.path = value
class DragFileButton(buttons.BaseToolButton, object):
fileChanged = Signal(str)
filesChanged = Signal(list)
_on_browse_file = browse_file
def __init__(self, text='', multiple=False, parent=None):
super(DragFileButton, self).__init__(parent=parent)
self._path = None
self._multiple = multiple
self._filters = list()
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.text_under_icon()
self.setText(text)
self.theme_size = 60
self.image('attach')
self.setIconSize(QSize(60, 60))
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setToolTip('Click to browse file or drag file here')
self.clicked.connect(self._on_browse_file)
# =================================================================================================================
# PROPERTIES
# =================================================================================================================
def _get_filters(self):
"""
Returns browse filters
:return: list(str)
"""
return self._filters
def _set_filters(self, value):
"""
Sets browse filters
:param value: list(str)
"""
self._filters = value
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
def _get_multiple(self):
"""
Returns whether or not browse can select multiple files
:return: bool
"""
return self._multiple
def _set_multiple(self, flag):
"""
Sets whether or not browse can select multiple files
:param flag: bool
"""
self._multiple = flag
filters = Property(list, _get_filters, _set_filters)
path = Property(str, _get_path, _set_path)
multiple = Property(bool, _get_multiple, _set_multiple)
# =================================================================================================================
# OVERRIDES
# =================================================================================================================
def dragEnterEvent(self, event):
"""
Overrides base QToolButton dragEnterEvent to validate dragged files
:param event: QDragEvent
"""
if event.mimeData().hasFormat("text/uri-list"):
file_list = self._get_valid_file_list(event.mimeData().urls())
count = len(file_list)
if count == 1 or (count > 1 and self._multiple):
event.acceptProposedAction()
return
def dropEvent(self, event):
"""
Overrides base QToolButton dropEvent Event to accept dropped files
:param event: QDropEvent
"""
file_list = self._get_valid_file_list(event.mimeData().urls())
if self._multiple:
self.filesChanged.emit(file_list)
self.set_path(file_list)
else:
self.fileChanged.emit(file_list[0])
self.set_path(file_list[0])
# =================================================================================================================
# BASE
# =================================================================================================================
def get_path(self):
"""
Returns file path
:return: str
"""
return self._path
def set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self.path = value
# =================================================================================================================
# INTERNAL
# =================================================================================================================
def _get_valid_file_list(self, url_list):
"""
Returns lits of valid dropped files
:param url_list:
:return: list(str)
"""
file_list = list()
for url in url_list:
file_name = url.toLocalFile()
if sys.platform == 'darwin':
sub_process = subprocess.Popen(
'osascript -e \'get posix path of posix file \"file://{}\" -- kthxbai\''.format(file_name),
stdout=subprocess.PIPE, shell=True)
file_name = sub_process.communicate()[0].strip()
sub_process.wait()
if os.path.isfile(file_name):
if self.property('format'):
if os.path.splitext(file_name)[-1] in self.property('format'):
file_list.append(file_name)
else:
file_list.append(file_name)
return file_list
# @mixin.cursor_mixin
# @mixin.property_mixin
class DragFolderButton(buttons.BaseToolButton, object):
folderChanged = Signal(str)
foldersChanged = Signal(list)
_on_browse_folder = browse_folder
def __init__(self, multiple=False, parent=None):
super(DragFolderButton, self).__init__(parent=parent)
self._path = None
self._multiple = multiple
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.text_under_icon()
self.theme_size = 60
self.image('folder')
self.setText('Click or drag folder here')
self.setIconSize(QSize(60, 60))
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setToolTip('Click to browse folder or drag folder here')
self.clicked.connect(self._on_browse_folder)
# =================================================================================================================
# PROPERTIES
# =================================================================================================================
def _get_path(self):
"""
Returns last browse file path
:return: str
"""
return self._path
def _set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self._path = value
def _get_multiple(self):
"""
Returns whether or not browse can select multiple files
:return: bool
"""
return self._multiple
def _set_multiple(self, flag):
"""
Sets whether or not browse can select multiple files
:param flag: bool
"""
self._multiple = flag
path = Property(str, _get_path, _set_path)
multiple = Property(bool, _get_multiple, _set_multiple)
# =================================================================================================================
# OVERRIDES
# =================================================================================================================
def dragEnterEvent(self, event):
"""
Overrides base QToolButton dragEnterEvent to validate dragged files
:param event: QDragEvent
"""
if event.mimeData().hasFormat("text/uri-list"):
folder_list = [url.toLocalFile() for url in event.mimeData().urls() if os.path.isdir(url.toLocalFile())]
count = len(folder_list)
if count == 1 or (count > 1 and self._multiple):
event.acceptProposedAction()
return
def dropEvent(self, event):
"""
Overrides base QToolButton dropEvent Event to accept dropped files
:param event: QDropEvent
"""
folder_list = [url.toLocalFile() for url in event.mimeData().urls() if os.path.isdir(url.toLocalFile())]
if self._multiple:
self.foldersChanged.emit(folder_list)
self.set_path(folder_list)
else:
self.folderChanged.emit(folder_list[0])
self.set_path(folder_list[0])
# =================================================================================================================
# BASE
# =================================================================================================================
def get_path(self):
"""
Returns file path
:return: str
"""
return self._path
def set_path(self, value):
"""
Sets browse start path
:param value: str
"""
self.path = value
| python |
#!/usr/bin/env python
# encoding: utf-8
"""
tl_stock.py
Copyright (c) 2015 Rob Mason
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Twitter: @Teslaliving
Blog: http://teslaliving.net
Description:
Stock quote helper functions
"""
import urllib.request, urllib.parse, urllib.error
import json
import os
def get_stock_quote(stock, log):
log.debug("Get current stock quote for %s" % stock)
token = os.getenv("TL_IEXAPI_TOKEN")
data = urllib.request.urlopen(f"https://cloud.iexapis.com/stable/stock/{stock}/quote?token={token}").read()
results = json.loads(data)
if results:
quote = results['latestPrice']
else:
quote = None
return quote
| python |
# -*- coding: utf-8 -*-
# Copyright 2017 Mobicage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.2@@
from framework.plugin_loader import Plugin, get_plugin
from plugins.veurne_trash.admin import StatsHandler
from plugins.veurne_trash.cron import BroadcastNotificationsHandler
from plugins.veurne_trash.rogerthat_callbacks import system_api_call
from framework.utils.plugins import Handler
from plugins.rogerthat_api.rogerthat_api_plugin import RogerthatApiPlugin
class VeurneTrashPlugin(Plugin):
def __init__(self, configuration):
super(VeurneTrashPlugin, self).__init__(configuration)
rogerthat_api_plugin = get_plugin('rogerthat_api')
assert isinstance(rogerthat_api_plugin, RogerthatApiPlugin)
rogerthat_api_plugin.subscribe('system.api_call', system_api_call)
def get_handlers(self, auth):
if auth == Handler.AUTH_ADMIN:
yield Handler(url='/admin/cron/notifications/broadcast', handler=BroadcastNotificationsHandler)
yield Handler(url='/admin/stats', handler=StatsHandler)
| python |
import sys
sys.path.append('../')
import jupman
import local
def add(x,y):
#jupman-raise
return x + y
#/jupman-raise
def sub(x,y):
return help_func(x,y)
#jupman-strip
# stripped stuff is not present in exercises
def help_func(x,y):
return x - y
#/jupman-strip
#jupman-purge
# purged stuff not present in exercises nor in solutions
def disappear(x):
return x
#/jupman-purge
# everything after next comment will be discarded
# write here
def f(x):
return x + 1 | python |
import pytest
import os
from matplotlib.testing.compare import compare_images
gold = "testing/gold"
scratch = "testing/scratch"
def compare( a, b ):
results = compare_images( a, b, 1 )
return (results is None)
def test_cinema_image_compare():
try:
os.makedirs(scratch)
except OSError as error:
pass
assert compare( os.path.join(gold, "comparison", "000.png" ), os.path.join(gold, "comparison", "000.png" ) )
| python |
from cloudshell_power_lib.Orchestration import power_off_resources_in_sandbox
from cloudshell.workflow.orchestration.sandbox import Sandbox
from cloudshell.workflow.orchestration.teardown.default_teardown_orchestrator import DefaultTeardownWorkflow
import cloudshell.helpers.scripts.cloudshell_dev_helpers as dev_helpers
dev_helpers.attach_to_cloudshell()
sandbox = Sandbox()
DefaultTeardownWorkflow().register(sandbox)
sandbox.workflow.add_to_teardown(power_off_resources_in_sandbox, components=None)
sandbox.execute_teardown()
| python |
"""
The base fighter implementation
"""
from __future__ import absolute_import, print_function, division
from cagefight.cagefighter import CageFighter
import random
import math
class LightningFighter(CageFighter):
"""
Lightning ball wars fighter
"""
def __init__(self, world, fighterid):
self.world = world
self.fighterid = fighterid
self.posx = None
self.posy = None
self.size = 10
self.colour = CageFighter.colours[
fighterid % len(CageFighter.colours)
]
self.power = self.world.fighter_power
self.cooldown = 0
self._name = 'lightning_fighter_%s' % (
self.fighterid,
)
@property
def canfire(self):
"""
Check if the gun is cool and we have the power to fire
"""
return (
1 if self.cooldown == 0 else 0
and self.power > 30
)
def start(self):
"""
Called prior to the first render to prepare the starting state.
"""
hw = self.world.width / 2
qw = self.world.width / 4
hh = self.world.height / 2
qh = self.world.height / 4
self.posx = (random.randint(qw, qw + hw) + hw) % self.world.width
self.posy = (random.randint(qh, qh + hh) + hh) % self.world.height
def next(self, filepath):
"""
Progress the game state to the next tick.
"""
if self.power <= 0:
# dead
return
details = self.get_instructions(filepath)
if 'name' in details:
self._name = '%s_%s' % (
details['name'],
self.fighterid,
)
if self.cooldown > 0:
self.cooldown -= 1
if 'fire' in details:
if self.canfire:
self.power -= 30
self.cooldown = 10
radians = details['fire']
proj = self.world.get_projectile()
proj.owner = self.fighterid
proj.posx = self.posx
proj.posy = self.posy
proj.deltax = math.cos(radians) * self.world.projectile_speed
proj.deltay = math.sin(radians) * self.world.projectile_speed
self.world.add_projectile(proj)
elif 'move' in details:
radians = details['move']
self.posx += math.cos(radians) * self.world.fighter_speed
self.posy += math.sin(radians) * self.world.fighter_speed
def save(self):
"""
Serialize current position
"""
return {
'x': self.posx,
'y': self.posy,
'power': self.power,
'canfire': self.canfire,
'cooldown': self.cooldown,
}
def save_view(self):
"""
In addition to own details add details of food and players that are in sight
"""
result = self.save()
result['food'] = [
food for food in self.world.food if (
(food['x']- self.posx) ** 2
+ (food['y'] - self.posy) ** 2
) < self.world.view_range ** 2
]
result['enemy'] = [
{
'x': fighter.posx,
'y': fighter.posy,
} for fighter in self.world.fighters if (
fighter.fighterid != self.fighterid
and (
(fighter.posx - self.posx) ** 2
+ (fighter.posy - self.posy) ** 2
) < self.world.view_range ** 2
and fighter.power > 0
)
]
return result
def load(self, jsonobj):
"""
Deserialize current position
"""
self.posx = jsonobj['x']
self.posy = jsonobj['y']
self.power = jsonobj['power']
self.cooldown = jsonobj['cooldown']
def name(self):
"""
name fighters
"""
return self._name
def text_result(self):
"""
fighter result
"""
return str(self.power)
def render(self, im):
"""
Render the display to an image for the provided game mp4 output
"""
if self.power <= 0:
# dead
return
hs = self.size / 2
self.world.draw_ball(im, self.posx - hs, self.posy - hs, self.size, self.colour)
def collision(self, x, y):
"""
Determine if a collision with the specified position has occurred.
"""
return self.world.collision(x, y, self.posx, self.posy, self.size)
| python |
from pydub import AudioSegment
import webrtcvad
import numpy as np
import speechpy
import torch
import torch.autograd as grad
import torch.nn.functional as F
from model.hparam import hp
import os
from model.frame import Frame
def get_logmel_fb(segment, len_window=25, stride=10, filters=40):
'''
Gives the log mel filter bank features for each utterance in a audio
:param segment: a pydub AudioSegment object
:param len_window: the length of each sliding window for the features to be extracted from
:param stride: the non-overlapping part for each window
:param filters: the number of filters (features)
:returns:
the logmel fb featues
:type: numpy.ndarray
'''
sample_rate = segment.frame_rate
signals = np.array(segment.get_array_of_samples())
#converting to ms
len_window /= 1000
stride /= 1000
if len(signals.shape) != 1:
signals = signals[:,0] #Getting only the first channel data
return speechpy.feature.lmfe(signals,sample_rate,frame_length=len_window,frame_stride=stride,num_filters=filters)
def adjust_file(audiofile):
'''
Adjusts an audiofile for vad and network
:param audiofile: an audio file
:type audiofile: pydub.AudioSegment
:returns:
new, Adjusted audio file
:type: pydub.AudioSegment
'''
audiofile = audiofile.set_frame_rate(16000)
audiofile = audiofile.set_channels(1)
audiofile.export('tmp.wav', format='wav')
audiofile = AudioSegment.from_file('tmp.wav')
os.remove('tmp.wav')
return audiofile
def vad(audiofile, frame_len=hp.diarization.frame_len, max_frame_len=hp.diarization.max_frame_len ,agressiveness=1):
'''
Performes Voice Activity Detection on an audio file
:param audiofile: the audio file to perform the vad on
:type audiofile: pydub.AudioSegment
:param agressiveness: the agressiveness for the vad (from 1 - 3)
:returns: the voice frames from the file and a list of voice activity timestamps
'''
vad = webrtcvad.Vad()
sample_rate = audiofile.frame_rate
speech = [Frame()]
vad.set_mode(agressiveness) #Agressiveness of the vad
for ts,frame in enumerate(audiofile[::frame_len]):
if len(frame) == frame_len:
if vad.is_speech(frame.raw_data, sample_rate):
if len(speech[-1]) + frame_len <= max_frame_len:
speech[-1] += Frame(ts * frame_len,(ts+1) * frame_len, frame)
else:
speech.append(Frame())
elif len(speech[-1]) != 0:
speech.append(Frame())
# handling an empty frame at the end
if len(speech[-1]) == 0:
speech.pop()
return speech
def get_full_audio(frames):
'''
Gets the concated audio from frames
:param frames: the frames to concat
:type frames: list
:returns: the concated frames
'''
full_audio = AudioSegment.empty()
for f in frames:
full_audio += f
return full_audio
####--- GE2E loss utils ---####
def get_centroids(embeddings):
'''
Calculates the centroids for each embeddings which belongs to the same speaker
:param embeddings: the embeddings (d-vectors) of each speaker
:type embeddings: np.ndarray with shape of N x M x F (num_speakers,num_utterances,num_features)
:returns:
the centroids of each speaker (from a pool of utterances)
:type: np.ndarray with shape of N x F (num_speakers,num_features)
'''
centroids = []
for speaker in embeddings:
centroid = speaker.sum() / len(speaker) # calculate centroid per speaker
centroids.append(centroid)
centroids = torch.stack(centroids)
return centroids
def get_centroid(embeddings, speaker_num, utterance_num):
'''
Calculates the centoid of a pool of embeddings for a specific speaker.
The calculation ignores the embedding which is the last output of the network
:param embeddings: all of the embeddings outputed from the network
:type embeddings: np.ndarray with shape of N x M x F (num_speakers,num_utterances,num_features)
:param speaker_num: the number of the speaker in which the network outputed the last embedding
:param utterance_num: the number of the utterance in which the network outputed the last embedding
'''
centroid = 0
for utterance_id, utterance in enumerate(embeddings[speaker_num]):
if utterance_id == utterance_num:
continue
centroid = centroid + utterance
centroid = centroid/(len(embeddings[speaker_num])-1)
return centroid
def get_cossim(embeddings, centroids):
'''
Calculates the similarity matrix as defined in the article
:param embeddings:
:type embeddings:
:param centroids:
:type centroids:
:returns:
the similarity matrix
:type: np.ndarray with shape of N x M x C (num_speakers, num_utterances, num_centroids)
'''
cossim = torch.zeros(embeddings.size(0),embeddings.size(1),centroids.size(0))
for speaker_num, speaker in enumerate(embeddings):
for utterance_num, utterance in enumerate(speaker):
for centroid_num, centroid in enumerate(centroids):
if speaker_num == centroid_num:
centroid = get_centroid(embeddings, speaker_num, utterance_num)
output = F.cosine_similarity(utterance,centroid,dim=0)+1e-6
cossim[speaker_num][utterance_num][centroid_num] = output
return cossim
def calc_loss(sim_matrix):
'''
Calculates the GE2E loss from the similarity matrix (performes softmax on each cell in the matrix)
:param sim_matrix: the similarity matrix between speakers d-vectors and their centroids
:type sim_matrix: np.ndarray with shape of N x M x C (num_speakers, num_utterances, num_centroids)
:returns:
the total loss and the loss per embedding
:type loss: float
:type per_embedding_loss: np.ndarray of shape N x M (num_speakers,num_utterances)
'''
per_embedding_loss = torch.zeros(sim_matrix.size(0), sim_matrix.size(1))
for j in range(len(sim_matrix)):
for i in range(sim_matrix.size(1)):
per_embedding_loss[j][i] = -(sim_matrix[j][i][j] - ((torch.exp(sim_matrix[j][i]).sum()+1e-6).log_()))
#loss with sigmoid
#maxargs = torch.argsort(torch.sigmoid(sim_matrix[j][i]), dim=0, descending=True)
#per_embedding_loss[j][i] = 1 - torch.sigmoid(sim_matrix[j][i][j]) + torch.sigmoid(sim_matrix[j][i])[maxargs[1] if maxargs[0] == j else maxargs[0]].item()
#maybe better loss than the current one
#per_embedding_loss[j][i] = -(sim_matrix[j][i][j] - torch.logsumexp(sim_matrix[j][i].float(), 0))
loss = per_embedding_loss.sum()
return loss, per_embedding_loss | python |
"""
Silly placeholder file for the template.
"""
def hello() -> str:
return "Hello {{cookiecutter.project_slug}}"
| python |
"""
input:
1
5
1 5 2 3 4
output:
12
"""
def solve(N, a):
res = 0
for i in range(N - 1, 0, -1):
if a[i] < a[i - 1]:
a[i - 1] -= (a[i - 1] - a[i])
res += a[i]
return res + a[0]
T = int(input())
for _ in range(T):
N = int(input())
a = list(map(int, input().split()))
out_ = solve(N, a)
print(out_)
| python |
from streamsvg import Drawing
s = Drawing()
s.addNode("a")
s.addNode("b", [(0,4), (5,10)])
s.addNode("c", [(4,9)])
s.addNode("d", [(1,3)])
s.addLink("a", "b", 2, 2, color='blue', width=3)
s.addLink("b", "d", 2, 2, color='blue', width=3)
s.addLink("a", "c", 5, 5, color='blue', width=3)
s.addLink("b", "c", 6, 6, color='blue', width=3)
s.addLink("b", "c", 7, 7, color='blue', width=3)
s.addLink("b", "c", 8, 8, color='blue', width=3)
s.addLink("a", "b", 8, 8, color='blue', width=3)
s.addLink("a", "b", 1, 3)
s.addLink("b", "d", 2, 3)
s.addLink("a", "c", 4.5, 7.5, height=0.40)
s.addLink("a", "b", 7, 8)
s.addLink("b", "c", 6, 9)
s.addTimeNodeMark(2, "b", color="#FF9896", width=2)
s.addNodeCluster("a", [(2,6),(8,9)], color='blue', width=5)
s.addNodeCluster("b", [(6,7),(8,10)], color='blue', width=5)
s.addNodeCluster("c", [(5,6),(7,9)], color='blue', width=5)
s.addNodeCluster("d", [(2,3)], color='blue', width=5)
s.addTimeLine(ticks=2)
| python |
# -*- coding: utf-8 -*-
#
# bifacial_radiance documentation build configuration file, created by
# sphinx-quickstart on Tuesday Sep 24 18:48:33 2019.
#
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
import sys
import os
"""
# Mock modules so RTD works
try:
from mock import Mock as MagicMock
except ImportError:
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = []
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
"""
import pandas as pd
pd.show_versions()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../../../'))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx.ext.doctest',
#'autoapi.extension',
'sphinx.ext.todo'
]
# Document Python Code
#autodoc_mock_imports = ['bs4', 'requests']
#autoapi_type = 'python'
#autoapi_dirs = '../../../bifacial_radiance'
napoleon_use_rtype = False # group rtype on same line together with return
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bifacial_radiance'
copyright = u'2019, NREL'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import bifacial_radiance
# The short X.Y version.
version = '%s' % (bifacial_radiance.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['whatsnew/*', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'bifacial_radiancedoc'
# A workaround for the responsive tables always having annoying scrollbars.
def setup(app):
app.add_css_file("no_scrollbars.css")
"""
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bifacial_radiance.tex', u'bifacial_radiance\\_Python Documentation',
u'NREL, github contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
"""
# extlinks alias
extlinks = {'issue': ('https://github.com/NREL/bifacial_radiance/issues/%s', 'GH'),
'pull': ('https://github.com/NREL/bifacial_radiance/pull/%s', 'GH'),
'wiki': ('https://github.com/NREL/bifacial_radiance/wiki/%s', 'wiki '),
'doi': ('http://dx.doi.org/%s', 'DOI: '),
'ghuser': ('https://github.com/%s', '@')}
"""
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bifacial_radiance', u'bifacial_radiance Documentation',
[u'NREL, github contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bifacial_radiance', u'bifacial_radiance Documentation',
u'NREL, github contributors', 'bifacial_radiance', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.7/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
nbsphinx_allow_errors = True
ipython_warning_is_error = False
""" | python |
import os
from oletools.olevba3 import VBA_Parser
# Set this to True if you would like to keep "Attribute VB_Name"
KEEP_NAME = False
def parse(workbook_path):
vba_path = workbook_path + '.vba'
vba_parser = VBA_Parser(workbook_path)
vba_modules = vba_parser.extract_all_macros() if vba_parser.detect_vba_macros() else []
for _, _, filename, content in vba_modules:
lines = []
if '\r\n' in content:
lines = content.split('\r\n')
else:
lines = content.split('\n')
if lines:
content = []
for line in lines:
if line.startswith('Attribute') and 'VB_' in line:
if 'VB_Name' in line and KEEP_NAME:
content.append(line)
else:
content.append(line)
if content and content[-1] == '':
content.pop(len(content)-1)
non_empty_lines_of_code = len([c for c in content if c])
if non_empty_lines_of_code > 0:
if not os.path.exists(os.path.join(vba_path)):
os.makedirs(vba_path)
with open(os.path.join(vba_path, filename), 'w', encoding='utf-8') as f:
f.write('\n'.join(content))
if __name__ == '__main__':
parse('xl-ese.xlsm')
| python |
import django_filters
from django_filters import DateFilter, CharFilter
from .models import *
class Client_Filter(django_filters.FilterSet):
class Meta:
model = Client
fields = [
'name',
'address',
'phone_no'
]
class Staff_Filter(django_filters.FilterSet):
class Meta:
model = Client
fields = [
'name',
'address',
'phone_no'
]
class Visitor_Filter(django_filters.FilterSet):
class Meta:
model = Client
fields = [
'name',
'address',
'phone_no'
]
| python |
from airypi.remote_obj import RemoteObj
from flask import session, request
from airypi import utils
import json
import gpio
from airypi.callback_dict import CallbackDict
from airypi import event_loop
class Device:
RPI = 'RASPBERRY_PI'
ANDROID = 'ANDROID'
handler_for_type = {}
event_loop_for_type = {'RASPBERRY_PI': event_loop.RPiEventLoop, 'ANDROID': event_loop.AndroidEventLoop}
@staticmethod
def id():
return utils.get_hidden_session('device')['id']
'''@staticmethod
def register_for(device_type):
def real_register_for(cls):
def wrapper(*args):
print device_type
Device.handler_for_type[device_type] = cls
for method in cls.__dict__.iteritems():
if hasattr(method, "device_event"):
event_loop_class = Device.event_loop_for_type[device_type]
event_loop_class.callback_dict[method.event_name] = method
return wrapper
return real_register_for'''
class register_for(object):
def __init__(self, device_type):
self.device_type = device_type
def __call__(self, cls):
Device.handler_for_type[self.device_type] = cls
for method in cls.__dict__.iteritems():
if hasattr(method, "device_event"):
event_loop_class = Device.event_loop_for_type[self.device_type]
event_loop_class.callback_dict[method.event_name] = method
return cls
@staticmethod
def event(event_name, func):
func.event_name = event_name
| python |
from math import exp
import numpy as np
import random
import time
class AnnealingSolver:
# 3*81: rows, cols, 3x3
optimal_energy = -243
# marks original values
def get_fixed_positions(self, sudoku):
original = []
for row in sudoku:
original.append([-1 if x > 0 else 0 for x in row])
return np.array(original)
# initial step to fill empty slots with random nr
def fill_empty_with_random(self, sudoku, fixed_positions):
# get count of missing values
vals = [0, 0, 0, 0, 0, 0, 0, 0, 0]
for i, row in enumerate(sudoku):
for j, col in enumerate(row):
if sudoku[i][j] != 0:
vals[sudoku[i][j] - 1] += 1
missing_vals = [9-x for x in vals]
# fill missing values with missing_vals randomly
for i, row in enumerate(sudoku):
for j, col in enumerate(row):
if fixed_positions[i][j] != -1:
while True:
rand = random.randint(0, 8)
if missing_vals[rand] != 0:
sudoku[i][j] = rand + 1
missing_vals[rand] += -1
break
# calculate fitness
def calc_energy(self, sudoku):
energy = 0
for i, row in enumerate(sudoku):
energy += len(np.unique(sudoku[i]))
# columns
transposed = sudoku.transpose()
for i, col in enumerate(transposed):
energy += len(np.unique(transposed[i]))
# every 3x3 TODO ugly
from_row, to_row = 0, 3
from_col, to_col = 0, 3
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 0, 3
from_col, to_col = 3, 6
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 0, 3
from_col, to_col = 6, 9
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 3, 6
from_col, to_col = 0, 3
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 3, 6
from_col, to_col = 3, 6
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 3, 6
from_col, to_col = 6, 9
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 6, 9
from_col, to_col = 0, 3
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 6, 9
from_col, to_col = 3, 6
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
from_row, to_row = 6, 9
from_col, to_col = 6, 9
values = []
sub_arr = sudoku[from_row:to_row, from_col:to_col]
for sub_row in sub_arr:
for val in sub_row:
values.append(val)
energy += len(np.unique(values))
return -energy
# switch places of 2 random numbers
def create_random_neighbor(self, sudoku, fixed_positions):
while True:
i1 = random.randint(0, 8)
j1 = random.randint(0, 8)
i2 = random.randint(0, 8)
j2 = random.randint(0, 8)
if fixed_positions[i1][j1] == -1 or fixed_positions[i2][j2] == -1:
continue
v1 = sudoku[i1][j1]
v2 = sudoku[i2][j2]
if v1 == v2:
continue
sudoku[i1][j1] = v2
sudoku[i2][j2] = v1
break
return sudoku
def solve(self, sudoku):
original_sudoku = sudoku.copy()
fixed_positions = self.get_fixed_positions(original_sudoku)
current_best = sudoku.copy()
self.fill_empty_with_random(current_best, fixed_positions)
max_temp = 200
for temp in range(max_temp, 0, -1):
for epoch in range(1000):
energy_current = self.calc_energy(current_best)
next_neigbhour = self.create_random_neighbor(current_best.copy(), fixed_positions)
energy_new = self.calc_energy(next_neigbhour)
# found the solution
if energy_new == self.optimal_energy:
return next_neigbhour
delta_energy = energy_current - energy_new
r = random.random()
if delta_energy > 0:
current_best = next_neigbhour.copy()
elif delta_energy != 0 and exp((delta_energy*max_temp)/(temp)) > r:
current_best = next_neigbhour.copy()
# didn't solve
return original_sudoku
| python |
""" UDF is called user define function
UDF is very useful when you want to transform your data frame, and there is no pre-defined
Spark sql functions already available.
To define a spark udf, you have three options:
1. use pyspark.sql.functions.udf, this works for select, withColumn.
udf(lambda_function, return_type). The default return_type is String. If you omit
return_type, the value returned by lambda function will be convert it to String.
2. use @udf(returnType=<>) annotation, this works for select, withColumn.
3. use spark.udf.register, this works for sql.
But, remember two important things about UDF
- UDF is not optimized at all. So you can quickly come across performance issues.
- UDF need to treat null value explicitly.
"""
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType, StringType
def name_count(name: str) -> int:
return len(name)
# The pyspark.sql.functions.udf function takes a python function, and it returns
# org.apache.spark.sql.expressions.UserDefinedFunction class object. In our case
# it's Name_Count_UDF. And this object can used inside select or withColumn.
Name_Count_UDF = udf(lambda x: name_count(x), IntegerType())
Null_Safe_Name_Count_UDF = udf(lambda x: name_count(x) if not (x is None) else None, IntegerType())
# We can also use @udf to define a spark udf.
@udf(returnType=StringType())
def add_hello(name: str) -> str:
return "{} {}".format("hello", name)
""" Exp1,
In this example, we show how to use udf inside a select
"""
def exp1(df: DataFrame):
df1 = df.select("name", Name_Count_UDF("name").alias("length"), add_hello("name").alias("msg"))
print("Exp1 udf in select")
df1.printSchema()
df1.show()
""" Exp2,
In this example, we show how to use udf inside a withColumn
"""
def exp2(df: DataFrame):
df1 = df.withColumn("length", Name_Count_UDF("name")).withColumn("msg", add_hello("name"))
print("Exp2 udf in withColumn")
df1.printSchema()
df1.show()
""" Exp3
In this example, we show how to register and use udf inside sql
"""
def exp3(spark: SparkSession, df: DataFrame):
# register the function for sql
spark.udf.register("Count_Name_UDF", name_count, IntegerType())
df.createOrReplaceTempView("name_table")
df1 = spark.sql("select name, Count_Name_UDF(name) as length, from name_table")
print("Exp3 udf in sql statement: ")
df1.show()
def exp4(spark: SparkSession):
data1 = [("haha ",),
("toto",),
("titi",),
(None,)]
df1 = spark.createDataFrame(data1, schema=['name'])
print("Source data frame: ")
df1.printSchema()
df1.show()
# try to replace Null_Safe_Name_Count_UDF by Name_Count_UDF, and see what happens
#
try:
df1.select("name", Null_Safe_Name_Count_UDF("name")).show()
except Exception as e:
print("udf failed error msg: {}".format(e))
def exp5():
pass
def main():
spark = SparkSession.builder.master("local[2]").appName("UdfExample").getOrCreate()
data = [("haha ",),
("toto",),
("titi",)]
df = spark.createDataFrame(data, schema=['name'])
print("Source data frame: ")
df.printSchema()
df.show()
# exp1
# exp1(df)
# exp2
# exp2(df)
# exp3
# exp3(spark, df)
# exp4
exp4(spark)
if __name__ == "__main__":
main()
| python |
from flask.ext.restful import fields
from app import db
from . import User
class PlanEntry(db.Model):
eid = db.Column(db.Integer, primary_key=True)
plan_id = db.Column(db.Integer, db.ForeignKey('plan.pid'))
plan = db.relationship('Plan', back_populates='entries')
timestamp = db.Column(db.Time)
mandatory = db.Column(db.Boolean, default=True)
marshal_fields = {
'eid': fields.Integer(default=0),
'timestamp': fields.String,
'mandatory': fields.Boolean
}
class Plan(db.Model):
pid = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.uid'))
user = db.relationship('User', back_populates='plans')
entries = db.relationship('PlanEntry', back_populates='plan')
marshal_fields = {
'pid': fields.Integer(default=0),
'entries': fields.Nested(PlanEntry.marshal_fields)
}
| python |
import pyttsx3
import time
CALLS = {
"F": "Step Forwards",
"B": "Step Bak",
"L": "Step Left",
"R": "Step Right",
"ROT": "About turn",
"CLAP": "Clapp"
}
class Caller:
def __init__(self):
self.engine = pyttsx3.init()
self.engine.setProperty("rate", 140)
def say_command(self, cmd):
call = CALLS.get(cmd, cmd)
t = time.time()
self.engine.say(call)
self.engine.runAndWait()
time.sleep(time.time()+1.5-t)
def call(self, cmds):
for cmd in cmds:
self.say_command(cmd)
TEST_DANCE = [
"B",
"F",
"R",
"L",
"B",
"CLAP",
"ROT"
]
def test():
Caller().call(TEST_DANCE)
if __name__ == "__main__":
test()
| python |
from typing import List
from src import util
from PIL import Image, ImageDraw
from src.config import ConfigContentType
from .bounding_box import BoundingBox
from .effect_processor import EffectProcessor
from .text_procecssor import TextProcessor
from .shape_processor import ShapeProcessor
from src.font_scanner import FontLibrary
from src.image_scanner import ImageLibrary
def sprite_content(content: ConfigContentType, box: BoundingBox, im_library: ImageLibrary, font_library: FontLibrary) -> Image.Image:
sprite = im_library.get_random_sprite()
sprite_im = Image.open(im_library.get_filename(sprite)).convert('RGBA')
return sprite_im
def shape_content(content: ConfigContentType, box: BoundingBox, im_library: ImageLibrary, font_library: FontLibrary) -> Image.Image:
shape_im = Image.new('RGBA', (box.width, box.height), (0, 0, 0, 0))
sp = ShapeProcessor()
return sp.process_shape(content.shapes, shape_im)
def text_content(content: ConfigContentType, box: BoundingBox, im_library: ImageLibrary, font_library: FontLibrary) -> Image.Image:
text_im = Image.new('RGBA', (box.width, box.height), (0, 0, 0, 0))
tp = TextProcessor(font_library)
return tp.process_text(content, text_im)
supported_content = {
'sprite': sprite_content,
'shape': shape_content,
'text': text_content
}
class ContentProcessor:
im: Image
im_library: ImageLibrary
font_library: FontLibrary
bounding_box: BoundingBox
def __init__(self, im: Image, im_library: ImageLibrary, font_library: FontLibrary, bounding_box: BoundingBox):
self.im = im
self.im_library = im_library
self.font_library = font_library
self.bounding_box = bounding_box
def process_content(self, contents: List[ConfigContentType]) -> None:
for content in contents:
if util.should(content.chance) is True:
content_im = supported_content[content.type](content, self.bounding_box, self.im_library, self.font_library)
self.draw_content(content, content_im)
def draw_content(self, content: ConfigContentType, content_im: Image.Image) -> None:
ep = EffectProcessor()
content_im = ep.process_effects(content_im, content.effects)
box = self.bounding_box
mask_im = None
if util.should(content.draw_chances.clipping) is True:
mask_im = self.im.getchannel('A')
mask_dr = ImageDraw.Draw(mask_im)
mask_dr.rectangle([(0, 0), (mask_im.width - 1, mask_im.height - 1)])
mask_dr.rectangle([(box.x, box.y), (box.x2, box.y2)])
if util.should(content.draw_chances.resize) is True:
fit_mode = 'contain'
else:
fit_mode = content.fit
pos = util.determine_image_position(fit_mode, content_im.width, content_im.height, box.width, box.height)
resized = content_im.resize((pos[2], pos[3]), resample=Image.LANCZOS)
## self.im.paste(resized, box=(pos[0], pos[1]), mask=mask_im)
self.im.paste(resized, box=(pos[0] + self.bounding_box.x, pos[1] + self.bounding_box.y), mask=resized)
| python |
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
import os
import re
import xml.etree.ElementTree as ET
###############################################################################
# Notes to Self
###############################################################################
# Parser extracts the XML tree. Run type conversion and substitution for things
# that are constant (e.g. <arg name="a" value="1"> and "$(arg a)").
# For unknown stuff, store a pair (type, name) and add the attribute name to an
# *unknown* list in the parsed tag element.
# Parser should report true errors (e.g. "$(arg undeclared)").
# Later, analyser picks a tag, iterates over the *unknown* and injects
# configuration context to try to resolve the remaining expressions.
# If an expression cannot be resolved inside an "if" or "unless",
# the entity is created but marked as conditional.
# If an expression cannot be resolved for some other attribute,
# a configuration error is reported.
# Draft:
# - work on a copy, do not change original tree
# attributes = dict(tag.attributes)
# try:
# for key in tag.unknown:
# attributes[key] = resolve(attributes[key], configuration)
# configuration.register(...)
# except SubstitutionError as e:
# configuration.errors.append(...)
###############################################################################
# Substitution Expressions
###############################################################################
class UnresolvedValue(object):
def __init__(self):
# ----- parts is a list of strings and tuples, where the tuples
# represent the unknown bits (substitution command, value)
self.parts = []
def append(self, part):
assert isinstance(part, (basestring, tuple))
self.parts.append(part)
@property
def resolvable(self):
for part in self.parts:
if isinstance(part, tuple):
return False
return True
def try_convert(self, conversion = str):
if self.resolvable:
return conversion("".join(self.parts))
return self
def __repr__(self):
return self.__str__()
def __str__(self):
s = ""
for part in self.parts:
if isinstance(part, tuple):
s += "$(" + " ".join(part) + ")"
else:
s += part
return s
class SubstitutionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SubstitutionParser(object):
PATTERN = re.compile(r"\$\(([^$()]+?)\)")
ERROR_PATTERN = re.compile(r"\$\([^$()]*?\$[^$()]*?\)")
COMMANDS = ("find", "env", "optenv", "dirname", "anon", "arg", "eval")
def __init__(self, args = None, env = None, pkgs = None, anon = None,
dirname = None, pkg_depends = None, env_depends = None):
self.arguments = args if not args is None else {}
self.environment = env if not env is None else {}
self.packages = pkgs if not pkgs is None else {}
self.anonymous = anon if not anon is None else {}
self.dirname = dirname
self.pkg_depends = pkg_depends if not pkg_depends is None else set()
self.env_depends = env_depends if not env_depends is None else set()
def sub(self, value, conversion = str):
"""Resolve substitution arguments in the given string.
Return a literal value if resolution is possible.
Otherwise, return an UnresolvedValue instance.
"""
if value.startswith("$(eval ") and value.endswith(")"):
# eval has special handling in roslaunch
result = UnresolvedValue()
result.append(("eval", value[7:-1]))
return result
if self.ERROR_PATTERN.search(value):
raise SubstitutionError("'$' cannot appear within expression")
match = self.PATTERN.search(value)
if not match:
return self.convert_str(value, conversion)
result = UnresolvedValue()
rest = value
while match:
parts = [part for part in match.group(1).split() if part]
if not parts[0] in self.COMMANDS:
raise SubstitutionError("invalid command: " + parts[0])
prefix = rest[:match.start()]
if prefix:
result.append(prefix)
result.append(getattr(self, "_" + parts[0])(parts))
rest = rest[match.end():]
match = self.PATTERN.search(rest)
if rest:
result.append(rest)
return self.convert_unresolved(result, conversion)
def resolve(self, value, conversion = str, strict = False):
if not isinstance(value, UnresolvedValue):
return value
parts = []
for part in value.parts:
if isinstance(part, basestring):
parts.append(part)
else:
assert isinstance(part, tuple)
value = getattr(self, "_" + part[0])(part)
if isinstance(value, tuple):
# a SubstitutionError here cannot be distinguished
# from one coming from getattr above
if not strict:
return None
raise SubstitutionError("cannot resolve: " + str(value))
parts.append(value)
return self.convert_str("".join(parts), conversion)
def to_bool(self, value):
if value is True or value == "1" or str(value).lower() == "true":
return True
if value is False or value == "0" or str(value).lower() == "false":
return False
raise SubstitutionError("invalid boolean value: " + value)
def to_float(self, value):
try:
return float(value)
except ValueError as e:
raise SubstitutionError("invalid number value: " + value)
def to_int(self, value):
try:
return int(value)
except ValueError as e:
raise SubstitutionError("invalid int value: " + value)
def convert_str(self, value, conversion):
if conversion == bool:
return self.to_bool(value)
if conversion == float:
return self.to_float(value)
if conversion == int:
return self.to_int(value)
return conversion(value)
def convert_unresolved(self, value, conversion):
if conversion == bool:
return value.try_convert(conversion = self.to_bool)
if conversion == float:
return value.try_convert(conversion = self.to_float)
if conversion == int:
return value.try_convert(conversion = self.to_int)
return value.try_convert(conversion = conversion)
def _find(self, parts):
if len(parts) != 2:
raise SubstitutionError("find takes exactly one argument")
name = parts[1]
self.pkg_depends.add(name)
try:
package = self.packages.get("package:" + name)
except KeyError:
package = None
if package:
if package.path:
return package.path
return ("find", name)
raise SubstitutionError("unknown package: " + name)
def _arg(self, parts):
if len(parts) != 2:
raise SubstitutionError("arg takes exactly one argument")
name = parts[1]
if name in self.arguments:
value = self.arguments[name]
if value is None or isinstance(value, UnresolvedValue):
return ("arg", name)
return value
raise SubstitutionError("undeclared arg: " + name)
def _anon(self, parts):
if len(parts) != 2:
raise SubstitutionError("anon takes exactly one argument")
name = parts[1]
if name in self.anonymous:
return self.anonymous[name]
value = self._anonymous_name(name)
self.anonymous[name] = value
return value
def _env(self, parts):
if len(parts) != 2:
raise SubstitutionError("env takes exactly one argument")
self.env_depends.add(parts[1])
return self.environment.get(parts[1], tuple(parts))
def _optenv(self, parts):
if len(parts) != 2 and len(parts) != 3:
raise SubstitutionError("optenv takes one or two arguments")
self.env_depends.add(parts[1])
return self.environment.get(parts[1], tuple(parts))
def _dirname(self, parts):
if len(parts) > 1:
raise SubstitutionError("dirname does not take arguments")
if self.dirname is None:
return ("dirname",)
return self.dirname
def _eval(self, parts):
raise SubstitutionError("eval must appear at the start")
def _anonymous_name(self, name):
try:
from rosgraph.names import anonymous_name
return anonymous_name(name)
except ImportError:
import random, socket, sys, warnings
warnings.warn("Could not import the 'rosgraph' package; "
"resorting to fallback behaviour.")
# Behaviour copied from rosgraph.names
name = "{}_{}_{}_{}".format(name, socket.gethostname(),
os.getpid(), random.randint(0, sys.maxsize))
return name.replace('.', '_').replace('-', '_').replace(':', '_')
###############################################################################
# Launch XML Parser
###############################################################################
class LaunchParserError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ErrorTag(object):
_ATTRIBUTES = {}
_EMPTY_LIST = ()
def __init__(self, text):
self.text = text
self.attributes = self._ATTRIBUTES
self.children = self._EMPTY_LIST
self.unknown = self._EMPTY_LIST
@property
def tag(self):
return "error"
def append(self, child):
raise NotImplementedError("error nodes do not have children")
class BaseLaunchTag(object):
CHILDREN = ()
REQUIRED = ()
ATTRIBUTES = {
"if": bool,
"unless": bool
}
def __init__(self, text, attributes):
self.text = text
self.attributes = attributes
for key in self.REQUIRED:
if not attributes.get(key):
raise LaunchParserError("missing required attribute: " + key)
self.children = []
self.unknown = []
for key, value in attributes.iteritems():
if isinstance(value, UnresolvedValue):
self.unknown.append(key)
if "if" in attributes and "unless" in attributes:
raise LaunchParserError("cannot declare both 'if' and 'unless'")
# ----- A condition is a tuple (target, value), where target is what
# the condition should evaluate to ("if" = True, "unless" = False).
if "unless" in attributes:
self.condition = (False, attributes["unless"])
else:
self.condition = (True, attributes.get("if", True))
@property
def tag(self):
raise NotImplementedError("subclasses must override 'tag'")
def append(self, child):
if child.tag in self.CHILDREN or child.tag == "error":
self.children.append(child)
else:
self.children.append(ErrorTag("invalid child tag: " + child.tag))
class LaunchTag(BaseLaunchTag):
CHILDREN = ("node", "include", "remap", "param", "rosparam",
"group", "arg", "env", "machine", "test")
ATTRIBUTES = {}
@property
def tag(self):
return "launch"
class NodeTag(BaseLaunchTag):
CHILDREN = ("remap", "param", "rosparam", "env")
REQUIRED = ("pkg", "type")
ATTRIBUTES = {
"if": bool,
"unless": bool,
"pkg": str,
"type": str,
"name": str,
"args": str,
"machine": str,
"respawn": bool,
"respawn_delay": float,
"required": bool,
"ns": str,
"clear_params": bool,
"output": str,
"cwd": str,
"launch-prefix": str
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.package = attributes["pkg"]
self.type = attributes["type"]
self.name = attributes.get("name")
self.argv = attributes.get("args")
self.machine = attributes.get("machine")
self.respawn = attributes.get("respawn", False)
self.respawn_delay = attributes.get("respawn_delay", 0.0)
self.required = attributes.get("required", False)
self.namespace = attributes.get("ns")
self.clear_params = attributes.get("clear_params", False)
self.output = attributes.get("output", "log")
self.cwd = attributes.get("cwd", "ROS_HOME")
self.prefix = attributes.get("launch-prefix")
@property
def tag(self):
return "node"
class IncludeTag(BaseLaunchTag):
CHILDREN = ("arg", "env")
REQUIRED = ("file",)
ATTRIBUTES = {
"if": bool,
"unless": bool,
"file": str,
"ns": str,
"clear_params": bool,
"pass_all_args": bool
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.file = attributes["file"]
self.namespace = attributes.get("ns")
self.clear_params = attributes.get("clear_params", False)
self.pass_all_args = attributes.get("pass_all_args", False)
@property
def tag(self):
return "include"
class RemapTag(BaseLaunchTag):
REQUIRED = ("from", "to")
ATTRIBUTES = {
"if": bool,
"unless": bool,
"from": str,
"to": str
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.origin = attributes["from"]
self.target = attributes["to"]
@property
def tag(self):
return "remap"
class ParamTag(BaseLaunchTag):
REQUIRED = ("name",)
ATTRIBUTES = {
"if": bool,
"unless": bool,
"name": str,
"value": str,
"type": str,
"textfile": str,
"binfile": str,
"command": str
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.name = attributes["name"]
self.value = attributes.get("value")
self.type = attributes.get("type")
self.textfile = attributes.get("textfile")
self.binfile = attributes.get("binfile")
self.command = attributes.get("command")
if (self.value is None and self.textfile is None
and self.binfile is None and self.command is None):
raise LaunchParserError("missing required attribute: value")
@property
def tag(self):
return "param"
class RosParamTag(BaseLaunchTag):
ATTRIBUTES = {
"if": bool,
"unless": bool,
"command": str,
"file": str,
"param": str,
"ns": str,
"subst_value": bool
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.command = attributes.get("command", "load")
self.file = attributes.get("file")
self.name = attributes.get("param")
self.namespace = attributes.get("ns")
self.substitute = attributes.get("subst_value", False)
if self.command == "load":
if self.file is None and not text:
raise LaunchParserError("missing required attribute: file")
elif self.command == "dump":
if self.file is None:
raise LaunchParserError("missing required attribute: file")
elif self.command == "delete" and self.name is None:
raise LaunchParserError("missing required attribute: name")
@property
def tag(self):
return "rosparam"
class GroupTag(BaseLaunchTag):
CHILDREN = ("node", "include", "remap", "param", "rosparam",
"group", "arg", "env", "machine", "test")
ATTRIBUTES = {
"if": bool,
"unless": bool,
"ns": str,
"clear_params": bool
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.namespace = attributes.get("ns")
self.clear_params = attributes.get("clear_params", False)
@property
def tag(self):
return "group"
class ArgTag(BaseLaunchTag):
REQUIRED = ("name",)
ATTRIBUTES = {
"if": bool,
"unless": bool,
"name": str,
"value": str,
"default": str,
"doc": str
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.name = attributes["name"]
self.value = attributes.get("value")
self.default = attributes.get("default")
self.description = attributes.get("doc")
if not self.value is None and not self.default is None:
raise LaunchParserError("incompatible attributes: value, default")
@property
def tag(self):
return "arg"
class EnvTag(BaseLaunchTag):
REQUIRED = ("name", "value")
ATTRIBUTES = {
"if": bool,
"unless": bool,
"name": str,
"value": str
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.name = attributes["name"]
self.value = attributes["value"]
@property
def tag(self):
return "env"
class MachineTag(BaseLaunchTag):
REQUIRED = ("name", "address")
ATTRIBUTES = {
"if": bool,
"unless": bool,
"name": str,
"address": str,
"env-loader": str,
"default": bool,
"user": str,
"password": str,
"timeout": float
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.name = attributes["name"]
self.address = attributes["address"]
self.loader = attributes.get("env-loader")
self.default = attributes.get("default", "false")
self.user = attributes.get("user")
self.password = attributes.get("password")
self.timeout = attributes.get("timeout", 10.0)
@property
def tag(self):
return "machine"
class TestTag(BaseLaunchTag):
CHILDREN = ("remap", "param", "rosparam", "env")
REQUIRED = ("test-name", "pkg", "type")
ATTRIBUTES = {
"if": bool,
"unless": bool,
"test-name": str,
"pkg": str,
"type": str,
"name": str,
"args": str,
"ns": str,
"clear_params": bool,
"cwd": str,
"launch-prefix": str,
"retry": int,
"time-limit": float
}
def __init__(self, text, attributes):
BaseLaunchTag.__init__(self, text, attributes)
self.test_name = attributes["test-name"]
self.package = attributes["pkg"]
self.type = attributes["type"]
self.name = attributes.get("name", self.test_name)
self.argv = attributes.get("args")
self.namespace = attributes.get("ns")
self.clear_params = attributes.get("clear_params", False)
self.cwd = attributes.get("cwd", "ROS_HOME")
self.prefix = attributes.get("launch-prefix")
self.retry = attributes.get("retry", 0)
self.time_limit = attributes.get("time-limit", 60.0)
@property
def tag(self):
return "test"
class LaunchParser(object):
TAGS = {
"launch": LaunchTag,
"node": NodeTag,
"include": IncludeTag,
"remap": RemapTag,
"param": ParamTag,
"rosparam": RosParamTag,
"group": GroupTag,
"arg": ArgTag,
"env": EnvTag,
"machine": MachineTag,
"test": TestTag
}
def __init__(self, pkgs = None):
self.sub_parser = None
self.packages = pkgs if not pkgs is None else {}
def parse(self, filepath):
if not filepath or not os.path.isfile(filepath):
raise LaunchParserError("not a file: " + str(filepath))
try:
self.sub_parser = SubstitutionParser(pkgs = self.packages)
xml_root = ET.parse(filepath).getroot()
if not xml_root.tag == "launch":
raise LaunchParserError("invalid root tag: " + xml_root.tag)
return self._parse_tag(xml_root)
except ET.ParseError as e:
raise LaunchParserError(str(e))
def _parse_tag(self, tag):
if not tag.tag in self.TAGS:
return ErrorTag("unknown tag: " + tag.tag)
cls = self.TAGS[tag.tag]
try:
attributes = self._attributes(tag, cls.ATTRIBUTES)
except SubstitutionError as e:
return ErrorTag(e.value)
text = tag.text.strip() if tag.text else ""
element = cls(text, attributes)
if element.tag == "arg" and isinstance(element.name, basestring):
self.sub_parser.arguments[element.name] = element.value
for child in tag:
element.append(self._parse_tag(child))
return element
def _attributes(self, tag, schema):
attributes = {}
sub = self.sub_parser.sub # shortcut to make line below shorter
for key, value in tag.attrib.iteritems():
if not key in schema:
continue # TODO raise an error vs. future compatibility
attributes[key] = sub(value, conversion = schema[key])
return attributes
###############################################################################
# Tests
###############################################################################
def _test_substitution():
parser = SubstitutionParser()
value = parser.sub("value")
assert value == "value"
value = parser.sub("1", int)
assert value == 1
value = parser.sub("1", bool)
assert value is True
value = parser.sub("1.0", float)
assert value == 1.0
value = parser.sub("$(env VAR)")
assert isinstance(value, UnresolvedValue)
assert len(value.parts) == 1
assert not value.resolvable
assert value.try_convert() is value
value = parser.sub("$(eval 1 + 1)")
assert isinstance(value, UnresolvedValue)
assert len(value.parts) == 1
value = parser.sub("value$(env NAME)$(env VAR)")
assert isinstance(value, UnresolvedValue)
assert len(value.parts) == 3
assert value.parts[0] == "value"
assert value.parts[1] == ("env", "NAME")
assert value.parts[2] == ("env", "VAR")
parser.arguments["test"] = "value"
value = parser.sub("$(arg test)")
assert value == "value"
value = parser.sub("$$(arg test)$")
assert value == "$value$"
parser.environment["TEST"] = "value"
value = parser.sub("$(env TEST)")
assert value == "value"
value = parser.sub("$(optenv TEST)")
assert value == "value"
try:
parser.sub("$(arg $(arg name))")
assert False
except SubstitutionError as e:
pass
try:
parser.sub("$($)")
assert False
except SubstitutionError as e:
pass
try:
parser.sub("va$(eval 'lue')")
assert False
except SubstitutionError as e:
pass
try:
parser.sub("value$(arg name)$(env VAR)")
assert False
except SubstitutionError as e:
pass
def _test_launch():
parser = LaunchParser()
tree = parser.parse("minimal.launch")
assert isinstance(tree, LaunchTag)
assert not tree.unknown
assert not tree.attributes
assert not tree.text
assert tree.condition == (True, True)
assert len(tree.children) == 2
assert isinstance(tree.children[0], NodeTag)
assert isinstance(tree.children[1], NodeTag)
node = tree.children[0]
assert not node.text
assert not node.unknown
assert not node.children
assert node.attributes["pkg"] == "fictibot_drivers"
assert node.attributes["type"] == "fictibot_driver"
assert node.attributes["name"] == "fictibase"
assert node.name == "fictibase"
assert node.package == "fictibot_drivers"
assert node.type == "fictibot_driver"
node = tree.children[1]
assert not node.text
assert not node.unknown
assert not node.children
assert node.attributes["pkg"] == "fictibot_controller"
assert node.attributes["type"] == "fictibot_controller"
assert node.attributes["name"] == "ficticontrol"
assert node.name == "ficticontrol"
assert node.package == "fictibot_controller"
assert node.type == "fictibot_controller"
if __name__ == "__main__":
_test_substitution()
_test_launch()
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-06 23:56
from __future__ import unicode_literals
import brazil_fields.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Empresa',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('data', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('cnpj', brazil_fields.fields.CNPJField(max_length=14)),
('razao_social', models.CharField(max_length=200, verbose_name='razão social')),
('nome_fantasia', models.CharField(max_length=100, verbose_name='nome fantasia')),
],
options={
'verbose_name': 'empresa',
'verbose_name_plural': 'empresas',
},
),
]
| python |
from abc import abstractmethod
from typing import List, Dict
from src.bounding_box import BoundingBox
from src.utils.enumerators import BBType, BBFormat
import torch.nn.functional as F
class ModelEvaluator:
def __init__(self):
self._gt_bboxes = []
self._predicted_bboxes = []
self._img_count = 0
def get_gt_bboxes(self) -> List[BoundingBox]:
"""
Returns a list containing the ground truth bounding boxes
:return:
"""
return self._gt_bboxes
def get_predicted_bboxes(self) -> List[BoundingBox]:
"""
Returns a list containing the predicted bounding boxes
:return:
"""
return self._predicted_bboxes
def add_predictions(self, targets, predictions):
img_count_temp = self._img_count
for target in targets:
for label, [x, y, w, h] in zip(target['labels'].tolist(), target['boxes'].tolist()):
self._gt_bboxes.append(BoundingBox(
image_name=str(self._img_count),
class_id=str(label),
coordinates=(x - w / 2, y - h / 2, w, h),
bb_type=BBType.GROUND_TRUTH,
format=BBFormat.XYWH,
))
self._img_count += 1
pred_logits, pred_boxes_images = predictions['pred_logits'], predictions['pred_boxes']
prob = F.softmax(pred_logits, -1)
scores_images, labels_images = prob[..., :-1].max(-1)
for scores, labels, pred_boxes in zip(scores_images, labels_images, pred_boxes_images):
for score, label, [x, y, w, h] in zip(scores, labels, pred_boxes):
label = label.item()
score = score.item()
if label >= 0:
self._predicted_bboxes.append(
BoundingBox(
image_name=str(img_count_temp),
class_id=str(label),
coordinates=(x - w / 2, y - h / 2, w, h),
bb_type=BBType.DETECTED,
format=BBFormat.XYWH,
confidence=score
)
)
img_count_temp += 1
@abstractmethod
def get_metrics(self) -> Dict:
pass
| python |
import torch
from typing import List, Dict, Tuple, Iterable
from ray import tune
from torch import optim
from tqdm import trange
from G2G.model.graph_wrapper import GraphWrapper
from G2G.model.model import Predictor
from G2G.utils import get_all_combo, prepare_input, get_score
from G2G.decorators.decorators import logger, Formatter, timer
def train_tune(config: Dict):
gn = config["gn"]
dim = config["dim"]
predictor = Predictor(dim, dim, config['hidden'], config['k'], config['dropout'])
max_iter = config["max_iter"]
x = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/x-gn:{gn}-dim:{dim}-dataset.pt")
y = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/y-gn:{gn}-dim:{dim}-dataset.pt")
x_val = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/x-val-gn:{gn}-dim:{dim}-dataset.pt")
y_val = torch.load(f"/home/malattia/Workspace/Tesi/G2G/dataset/y-val-gn:{gn}-dim:{dim}-dataset.pt")
lr = config["lr"]
return train(predictor, x, y, {"lr": lr, "iterations": max_iter}, tqdm_enabled=False, tune_on=True,
validation_x=x_val, validation_y=y_val)
# @logger(Formatter(lambda x: "Training results:\nAccuracy: " + str(x[1]) + "\nLast loss: " + str(x[2][-1].item())))
@timer
def train(predictor: Predictor, x: List[GraphWrapper], y: Dict[str, Dict[Tuple[int, int], torch.Tensor]], config: Dict,
validation_x: List[GraphWrapper] = None, validation_y: Dict[str, Dict[Tuple[int, int], torch.Tensor]] = None,
checkpoint: int = 0, tqdm_enabled: bool = True, tune_on: bool = False) \
-> Tuple[Predictor, torch.Tensor, Dict[str, float], Dict[str, float]]:
# config = {iterations: int, lr: float}
optimizer = optim.Adam(predictor.parameters(), lr=config["lr"])
custom_range: Iterable = trange(config["iterations"]) if tqdm_enabled else range(config["iterations"])
loss_history = torch.zeros(config["iterations"])
dim: int = x[0].laplacian.shape[0]
predictor.train()
for epoch in custom_range:
for graph in x:
for c in get_all_combo(dim):
optimizer.zero_grad()
A_hat = predictor(prepare_input(c[0], c[1], dim, graph.laplacian), graph.laplacian)
loss = predictor.loss(A_hat, y[str(graph)][(c[0], c[1])])
loss.backward()
optimizer.step()
loss_history[epoch] += loss.detach().item()
if checkpoint != 0 and epoch != 0 and epoch % checkpoint == 0:
torch.save(predictor.state_dict(),
f"../dataset/model-gn:{len(x)}-dim:{dim}-hidden:{predictor.GCN2.weight.shape[2]}-k:{predictor.GCN2.weight.shape[0]}.pt")
print("Score on training set:\n", get_score(predictor, x, y))
if validation_x is not None and validation_y is not None:
print("Score on validation set:\n", get_score(predictor, validation_x, validation_y))
print("Loss: ", loss_history[epoch] / len(x))
predictor.eval()
val = get_score(predictor, validation_x, validation_y) \
if validation_x is not None and validation_y is not None else None
acc = get_score(predictor, x, y)
if tune_on and validation_x is not None and validation_y is not None:
tune.track.log(mean_accuracy=val['long'])
torch.save(predictor.state_dict(),
f"/home/malattia/Workspace/Tesi/G2G/dataset/model-gn:{len(x)}-dim:{dim}-hidden:{predictor.GCN2.weight.shape[2]}-k:{predictor.GCN2.weight.shape[0]}.pt")
return predictor, loss_history, acc, val
| python |
print((2**int(input()))%(10**9+7)) | python |
from utils.db.mongo_orm import *
class TestCase(Model):
class Meta:
database = db
collection = 'testCase'
# Common Fields
_id = ObjectIdField()
name = StringField()
description = StringField()
isDeleted = BooleanField(field_name='isDeleted', default=False)
status = BooleanField(field_name='status', default=False)
projectId = ObjectIdField()
testSuiteId = ObjectIdField()
createAt = DateField()
createUser = StringField()
lastUpdateTime = DateField()
lastUpdateUser = StringField()
# 执行顺序
sequence = IntField(field_name='sequence', default=0)
# api content
testCaseType = StringField()
service = StringField(field_name='service')
requestProtocol = StringField()
requestMethod = StringField()
domain = StringField()
route = StringField()
delaySeconds = IntField(field_name='delaySeconds', default=0)
# 数据初始化
dataInitializes = ListField(field_name='dataInitializes',
default=[{'dbConfigId': '', 'dbType': '', 'mongoCrud': '', 'collection': '',
'query': '', 'set': '', 'sql': ''}],
expected_structure={
'expectedTypeRange': [list],
'expectedValueRange': [
{
'expectedTypeRange': [dict],
'expectedDict': {
'dbConfigId': {'expectedTypeRange': []},
'dbType': {'expectedTypeRange': []},
'mongoCrud': {'expectedTypeRange': []},
'collection': {'expectedTypeRange': []},
'query': {'expectedTypeRange': []},
'set': {'expectedTypeRange': []},
'sql': {'expectedTypeRange': []},
}
}
]
})
headers = ListField(field_name='headers',
default=[
{'name': 'Accept', 'value': 'application/json'},
{'name': 'Content-Type', 'value': 'application/json'}
],
expected_structure={
'expectedTypeRange': [list],
'expectedValueRange': [
{
'expectedTypeRange': [dict],
'expectedDict': {
'name': {'expectedTypeRange': [str]},
'value': {'expectedTypeRange': [str]}
}
},
{
'expectedTypeRange': [dict],
'expectedDict': {
'interrelate': {'expectedTypeRange': []},
'name': {'expectedTypeRange': [str]},
'value': {'expectedTypeRange': []}
}
}
]
})
parameterType = StringField(field_name='service', default='json') # json or form or file
filePath = StringField() # if parameterType = file, enable filePath
requestBody = ListField(field_name='requestBody', default=[{}],
expected_structure={
'expectedTypeRange': [list],
'expectedValueRange': [{
'expectedTypeRange': [dict],
'expectedDict': {
}
}]
})
isJsonArray = BooleanField(field_name='isJsonArray', default=False)
isClearCookie = BooleanField(field_name='isClearCookie', default=False)
setGlobalVars = ListField(field_name='setGlobalVars',
default=[{'name': '', 'query': []}],
expected_structure={
'expectedTypeRange': [list],
'expectedValueRange': [{
'expectedTypeRange': [dict],
'expectedDict': {
'name': {'expectedTypeRange': [str]},
'query': {
'expectedTypeRange': [list],
'expectedValueRange': [
{'expectedTypeRange': [str]}
]
}
}
}]
})
# validate
checkResponseCode = StringField()
checkResponseBody = ListField(field_name='checkResponseBody',
default=[{'regex': '', 'query': []}],
expected_structure={
'expectedTypeRange': [list, type(None)],
'expectedValueRange': [{
'expectedTypeRange': [dict],
'expectedDict': {
'regex': {'expectedTypeRange': [str]},
'query': {
'expectedTypeRange': [list],
'expectedValueRange': [
{'expectedTypeRange': [str]}
]
}
}
}]
})
checkResponseNumber = ListField(field_name='checkResponseNumber',
default=[{
"expressions": {
'firstArg': '',
'operator': '',
'secondArg': '',
'judgeCharacter': '',
'expectResult': ''
}
}],
expected_structure={
'expectedTypeRange': [list, type(None)],
'expectedValueRange': [{
'expectedTypeRange': [dict],
'expectedDict': {
'expressions': {
'expectedTypeRange': [dict],
'expectedDict': {
'firstArg': {'expectedTypeRange': [str]},
'operator': {'expectedTypeRange': [str]},
'secondArg': {'expectedTypeRange': [str]},
'judgeCharacter': {'expectedTypeRange': [str]},
'expectResult': {'expectedTypeRange': [str]}
}
}
}
}]
})
checkSpendSeconds = IntField(field_name='checkSpendSeconds', default=0)
testStatus = BooleanField(field_name='testStatus', default=False) # 测试状态, true代表测试进行中
lastManualResult = DictField(field_name='lastManualResult', default={})
def __str__(self):
return "name: {}".format(self.name)
if __name__ == "__main__":
pass
| python |
# * Utils Function
from tools.Wave_Class import Wave
import math
def auto_frame_count(waves, h, w, tr):
max_time = 0.0
to_check = ((0, 0), (0, h), (0, w), (h, w))
for wave in waves:
temp_func = wave.distanceFunction()
for p in to_check:
temp_dist = temp_func(p[0], p[1])
temp_time = temp_dist / wave.wavespeed
if temp_time > max_time:
max_time = temp_time
return math.ceil((max_time / tr) * 1.1)
| python |
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: i22_tomo_loader
:platform: Unix
:synopsis: A class for loading I22
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
from savu.plugins.utils import register_plugin
from savu.plugins.loaders.base_loader import BaseLoader
import h5py
import logging
import numpy as np
@register_plugin
class I22TomoLoader(BaseLoader):
def __init__(self, name='I22TomoLoader'):
super(I22TomoLoader, self).__init__(name)
def setup(self):
"""
"""
exp = self.exp
data_obj = exp.create_data_object('in_data', 'tomo')
data_obj.backing_file = \
h5py.File(exp.meta_data.get("data_file"), 'r')
data_obj.data = data_obj.backing_file['entry/result/data']
data_obj.set_shape(data_obj.data.shape)
logging.warning('the data as shape %s' % str(data_obj.data.shape))
data_obj.set_axis_labels('y.units', 'x.units',
'rotation_angle.degrees', 'Q.angstrom^-1')
data_obj.add_pattern('PROJECTION', core_dims=(1, 0), slice_dims=(2, 3))
data_obj.add_pattern('SINOGRAM', core_dims=(2, 1), slice_dims=(0, 3))
data_obj.add_pattern('SPECTRUM', core_dims=(3,), slice_dims=(0, 1, 2))
mData = data_obj.meta_data
mData.set("Q", data_obj.backing_file['entry/result/q'][()])
mData.set("x", np.arange(data_obj.data.shape[1]))
mData.set("y", np.arange(data_obj.data.shape[0]))
mData.set("rotation_angle", data_obj.backing_file[
'entry/result/theta'][()])
self.set_data_reduction_params(data_obj)
| python |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 18:24:57 2019
@author: jone
"""
#%% Simple Demo
import cv2
import numpy as np
# callback 함수
def draw_circle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (x, y), 100, (255, 0, 0), -1)
# 빈 이미지 생성
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
while(1):
cv2.imshow('image', img)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
#%% Advanced Demo
import cv2
import numpy as np
drawing = False # Mouse가 클릭된 상태 확인
mode = True # True이면 사각형, False면 원
ix, iy = -1, -1
# mouse callback 함수
def draw_circle(event, x, y, flags, param):
global ix, iy, drawing, mode
if event == cv2.EVENT_LBUTTONDOWN: # 마우스를 누른 상태
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE: # 마우스 이동
if drawing == True: # 마우스를 누른 상태일 경우
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (255, 0, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 255, 0), -1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img, (ix, iy), (x, y), (255, 0, 0), -1)
else:
cv2.circle(img, (x, y), 5, (0, 255, 0), -1)
img = np.zeros((512, 512, 3), np.uint8)
cv2.namedWindow('image')
cv2.setMouseCallback('image', draw_circle)
while True:
cv2.imshow('image', img)
k = cv2.waitKey(1) & 0xFF
if k == ord('m'): # 사각형, 원 Mode 변경
mode = not mode
elif k == 27: # Esc 누르면 종료
break
cv2.destroyAllWindows() | python |
# -*- coding: utf-8 -*-
import CTK
def commit():
print CTK.post
return {'ret': 'ok'}
def default():
submit = CTK.Submitter('/commit')
submit += CTK.RawHTML ("<h2>Can set, without initial value</h2>")
submit += CTK.StarRating ({'name': 'test_rate1', 'can_set': True})
submit += CTK.RawHTML ("<h2>Can set, with initial value</h2>")
submit += CTK.StarRating ({'name': 'test_rate2', 'selected': '3', 'can_set': True})
submit += CTK.RawHTML ("<h2>Cannot edit value</h2>")
submit += CTK.StarRating ({'name': 'test_rate3', 'selected': '4'})
submit += CTK.RawHTML ("<h2>No auto-submit</h2>")
submit += CTK.StarRating ({'name': 'test_rate4', 'can_set': True, 'class': 'noauto'})
page = CTK.Page()
page += CTK.RawHTML('<h1>Demo StarRating</h1>')
page += submit
return page.Render()
CTK.publish ('', default)
CTK.publish ('/commit', commit, method="POST")
CTK.run (port=8000)
| python |
import hashlib
# Status definitions and subdir names
STATUS = {"PENDING": "queue",
"STARTED": "inprogress",
"DONE": "results",
"ERROR": "errors"}
def get_id(doc):
"""
Calculate the id (hash) of the given document
:param doc: The document (string)
:return: a task id (hash)
"""
if len(doc) == 34 and doc.startswith("0x"): # it sure looks like a hash
return doc
m = hashlib.md5() # md5 hash generator
if isinstance(doc, str):
doc = doc.encode("utf-8") # encoding
m.update(doc) # generating the has
return "0x" + m.hexdigest()
| python |
from imported.submodules import submodulea
def bar():
print("imported.modulee.bar()")
submodulea.foo()
| python |
from flask import Flask, render_template, jsonify, request, url_for
import json
app = Flask(__name__)
values_list = ['id', 'summary', 'host_is_superhost', 'latitude', 'longitude',
'property_type', 'room_type', 'accomodates', 'bathrooms',
'bedrooms', 'beds', 'security_deposit', 'cleaning_fee',
'extra_people', 'minimum_nights', 'cancellation_policy']
def create_json(code, description, dictionary=None):
temp = {
"meta": {
"code": code,
"description": description
}
}
if dictionary is not None:
temp['response'] = dictionary
return temp
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is not None:
func()
@app.route('/')
def home():
return "Hello World"
shutdown_server()
@app.route('/get-predict/<id>', methods=['GET'])
def get_predict(id=None):
from models import get_listing
id = id
if id is None:
return create_json(202, "No listing_id was passed")
else:
try:
listing = get_listing(id)
except Exception as e:
f = open("listing.log", "a")
f.write("No ID was found with ID: {}".format(id) + "\n")
f.close()
return create_json(201, "No listing found with ID: {}".format(id))
else:
f = open("listing.log", "a")
f.write("Listing ID: {} Prediction: {}".format(listing[0],
listing[1]) + "\n")
f.close()
t = {"listing_id": listing[0], "listing_prediction": listing[1]}
return create_json(200, "Listing Found", t)
shutdown_server()
@app.route('/predict', methods=['POST'])
def index():
if not request.is_json:
return create_json(203, "Format is not a JSON. Check headers.")
test = request.json
missing = []
for value in values_list:
if value not in test.keys():
missing.append(value)
if len(missing) > 0:
return create_json(204, "Missing values in request",
{"values": missing})
from models import predict
from keras.models import model_from_json
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("model.h5")
try:
req_data = request.get_json(force=True)
id, summary, host, lat, lng, prop_type, room, accom, baths, bedrooms,\
beds, dep, fee, extra, mini, cancel = \
req_data['id'], req_data['summary'], req_data['host_is_superhost'], \
req_data['latitude'], req_data['longitude'], req_data[
'property_type'], \
req_data['room_type'], req_data['accomodates'], req_data['bathrooms'], \
req_data['bedrooms'], req_data['beds'], req_data['security_deposit'], \
req_data['cleaning_fee'], req_data['extra_people'], req_data[
'minimum_nights'], req_data['cancellation_policy']
except Exception as e:
return create_json(400, e)
else:
try:
result = predict(id, summary, host, lat, lng, prop_type, room,
accom, baths, bedrooms, beds, dep, fee, extra,
mini, cancel, model)
req_data['prediction'] = result[1]
f = open("predict.log", "a")
f.write(json.dumps(req_data) + "\n")
f.close()
t = {"listing_id": result[0], "listing_prediction": result[1]}
return create_json(200, "Listing Updated", t)
except Exception as e:
return "{}".format(e)
shutdown_server()
| python |
import cv2 as cv
import numpy as np
titleWindow = 'Introduction_to_svm.py'
print("Takes a moment to compute resulting image...")
# Set up training data
## [setup1]
labels = np.array([1, -1, -1, -1])
trainingData = np.matrix([[501, 10], [255, 10], [501, 255], [10, 501]], dtype=np.float32)
## [setup1]
# Train the SVM
## [init]
svm = cv.ml.SVM_create()
svm.setType(cv.ml.SVM_C_SVC)
svm.setKernel(cv.ml.SVM_LINEAR)
svm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6))
## [init]
## [train]
svm.train(trainingData, cv.ml.ROW_SAMPLE, labels)
## [train]
# Data for visual representation
width = 512
height = 512
image = np.zeros((height, width, 3), dtype=np.uint8)
# Show the decision regions given by the SVM
## [show]
green = (0,255,0)
blue = (255,0,0)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
sampleMat = np.matrix([[j,i]], dtype=np.float32)
response = svm.predict(sampleMat)[1]
if response == 1:
image[i,j] = green
elif response == -1:
image[i,j] = blue
## [show]
# Show the training data
## [show_data]
thickness = -1
cv.circle(image, (501, 10), 5, ( 0, 0, 0), thickness)
cv.circle(image, (255, 10), 5, (255, 255, 255), thickness)
cv.circle(image, (501, 255), 5, (255, 255, 255), thickness)
cv.circle(image, ( 10, 501), 5, (255, 255, 255), thickness)
## [show_data]
# Show support vectors
## [show_vectors]
thickness = 2
sv = svm.getUncompressedSupportVectors()
for i in range(sv.shape[0]):
cv.circle(image, (sv[i,0], sv[i,1]), 6, (128, 128, 128), thickness)
## [show_vectors]
#cv.imwrite('result.png', image) # save the image
cv.imshow('SVM Simple Example', image) # show it to the user
cv.waitKey()
| python |
import cv2
import numpy as np
import copy
from shapes.shape import Shape
from shapes.ep import p2e, e2p, column
class BBox(Shape):
@classmethod
def from_region(cls, region):
yx = region.centroid()
tmp = cls(yx[1], yx[0], -np.rad2deg(region.theta_), 2 * region.major_axis_, 2 * region.minor_axis_,
region.frame())
return tmp
@classmethod
def from_planar_object(cls, another_object):
xmin, ymin, width, height = cv2.boundingRect(another_object.to_poly())
xmax = xmin + width
ymax = ymin + height
return cls(xmin, ymin, xmax, ymax)
@classmethod
def from_dict(cls, region_dict, frame=None):
d = region_dict
if 'x' in d and 'y' in d and 'width' in d and 'height' in d:
return cls(d['x'], d['y'], d['x'] + d['width'], d['y'] + d['height'], frame)
@classmethod
def from_xywh(cls, x, y, width, height, frame=None):
return cls(x, y, x + width, y + height, frame)
@classmethod
def from_xycenter_wh(cls, x_center, y_center, width, height, frame=None):
return cls(x_center - width / 2, y_center - height / 2, x_center + width / 2, y_center + height / 2, frame)
def __init__(self, xmin=None, ymin=None, xmax=None, ymax=None, frame=None):
super(BBox, self).__init__(frame)
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __str__(self):
return('BBox xymin ({xmin:.1f},{ymin:.1f}) xymax ({xmax:.1f},{ymax:.1f}), '\
'width height ({width:.1f},{height:.1f}), frame {frame}'.format(
width=self.width, height=self.height, **self.__dict__))
@property
def xy(self):
return np.array((self.xmin + self.width / 2, self.ymin + self.height / 2))
@property
def width(self):
return self.xmax - self.xmin
@property
def height(self):
return self.ymax - self.ymin
def to_poly(self):
return [(self.xmin, self.ymin), (self.xmin, self.ymax), (self.xmax, self.ymax), (self.xmax, self.ymin)]
def is_strictly_outside_bounds(self, xmin, ymin, xmax, ymax):
return self.iou(BBox(xmin, ymin, xmax, ymax)) == 0
def is_strictly_outside_bbox(self, bbox):
return self.is_strictly_outside_bounds(*bbox.to_array()[:4])
def is_partially_outside_bounds(self, xmin, ymin, xmax, ymax):
return self.iou(BBox(xmin, ymin, xmax, ymax)) > 0 and not self.is_inside_bounds(xmin, ymin, xmax, ymax)
def is_partially_outside_bbox(self, bbox):
return self.is_partially_outside_bounds(*bbox.to_array()[:4])
def is_inside_bounds(self, xmin, ymin, xmax, ymax):
return self.xmin > xmin and self.ymin > ymin and self.xmax < xmax and self.ymax < ymax
def is_inside_bbox(self, bbox):
return self.is_inside_bounds(*bbox.to_array()[:4])
def cut(self, viewport_bbox):
if self.is_strictly_outside_bbox(viewport_bbox):
return None
elif self.is_inside_bbox(viewport_bbox):
return self
else:
assert self.is_partially_outside_bbox(viewport_bbox)
return self.intersection(viewport_bbox)
def intersection(self, other):
xmin = max(self.xmin, other.xmin)
ymin = max(self.ymin, other.ymin)
xmax = min(self.xmax, other.xmax)
ymax = min(self.ymax, other.ymax)
if ymin >= ymax or xmin >= xmax:
return None
else:
assert self.frame == other.frame
return BBox(xmin, ymin, xmax, ymax, self.frame)
def to_array(self):
return np.array([self.xmin, self.ymin, self.xmax, self.ymax, self.frame])
@property
def area(self):
return self.width * self.height
def iou(self, bbox):
# source: https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# determine the (x, y)-coordinates of the intersection rectangle
intersection = self.intersection(bbox)
if intersection is None:
return 0
# compute the area of intersection rectangle
# interArea = max(0, inter_xmax - inter_xmin + 1) * max(0, inter_ymax - inter_ymin + 1)
# interArea = max(0, inter_xmax - inter_xmin) * max(0, inter_ymax - inter_ymin)
interArea = intersection.area
# compute the area of both the prediction and ground-truth
# rectangles
# boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
# boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
return interArea / float(self.area + bbox.area - interArea)
def __sub__(self, other):
return np.linalg.norm(self.xy - other.xy)
def rotate(self, angle_deg_cw, rotation_center_xy=None):
assert False
if rotation_center_xy is None:
rotation_center_xy = self.xy
self.angle_deg += angle_deg_cw
rot = cv2.getRotationMatrix2D(tuple(rotation_center_xy), -angle_deg_cw, 1.)
self.xy = p2e(np.vstack((rot, (0, 0, 1))).dot(e2p(column(self.xy)))).flatten()
return self
def move(self, delta_xy):
self.xmin += delta_xy[0]
self.xmax += delta_xy[0]
self.ymin += delta_xy[1]
self.ymax += delta_xy[1]
return self
def draw(self, ax=None, label=None, color=None):
import matplotlib.pylab as plt
from matplotlib.patches import Rectangle
if ax is None:
ax = plt.gca()
if color is None:
color = 'r'
ax.add_patch(Rectangle((self.xmin, self.ymin), self.width, self.height,
facecolor='none', edgecolor=color,
label=label, linewidth=1))
if label is not None:
plt.annotate(label, self.xy) # , xytext=(0, -self.height / 2), textcoords='offset pixels')
def draw_to_image(self, img, label=None, color=None):
if color is None:
color = (0, 0, 255)
round_tuple = lambda x: tuple([int(round(num)) for num in x])
cv2.rectangle(img, round_tuple((self.xmin, self.ymin)),
round_tuple((self.xmax, self.ymax)), color)
if label is not None:
font_size = 1
font_thickness = 1
font_face = cv2.FONT_HERSHEY_SIMPLEX
text_size, _ = cv2.getTextSize(label, font_face, font_size, font_thickness)
cv2.putText(img, label, round_tuple((self.xy[0] - (text_size[0] / 2), self.ymin - text_size[1])),
font_face, font_size, (255, 255, 255), font_thickness)
| python |
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import (
InvalidSelectorException,
NoSuchElementException)
# By.id positive
def test_should_be_able_to_find_asingle_element_by_id(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.ID, "linkId")
assert element.get_attribute("id") == "linkId"
def test_should_be_able_to_find_asingle_element_by_numeric_id(driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.ID, "2")
assert element.get_attribute("id") == "2"
def test_should_be_able_to_find_an_element_with_css_escape(driver, pages):
pages.load("idElements.html")
element = driver.find_element(By.ID, "with.dots")
assert element.get_attribute("id") == "with.dots"
def test_should_be_able_to_find_multiple_elements_by_id(driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.ID, "test_id")
assert len(elements) == 2
def test_should_be_able_to_find_multiple_elements_by_numeric_id(driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.ID, "2")
assert len(elements) == 8
# By.id negative
def test_should_not_be_able_to_locate_by_id_asingle_element_that_does_not_exist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "non_Existent_Button")
def test_should_not_be_able_to_locate_by_id_multiple_elements_that_do_not_exist(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "non_Existent_Button")
assert len(elements) == 0
def test_finding_asingle_element_by_empty_id_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "")
def test_finding_multiple_elements_by_empty_id_should_return_empty_list(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "")
assert len(elements) == 0
def test_finding_asingle_element_by_id_with_space_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.ID, "nonexistent button")
def test_finding_multiple_elements_by_id_with_space_should_return_empty_list(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.ID, "nonexistent button")
assert len(elements) == 0
# By.name positive
def test_should_be_able_to_find_asingle_element_by_name(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "checky")
assert element.get_attribute("value") == "furrfu"
def test_should_be_able_to_find_multiple_elements_by_name(driver, pages):
pages.load("nestedElements.html")
elements = driver.find_elements(By.NAME, "checky")
assert len(elements) > 1
def test_should_be_able_to_find_an_element_that_does_not_support_the_name_property(driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.NAME, "div1")
assert element.get_attribute("name") == "div1"
# By.name negative
def test_should_not_be_able_to_locate_by_name_asingle_element_that_does_not_exist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "non_Existent_Button")
def test_should_not_be_able_to_locate_by_name_multiple_elements_that_do_not_exist(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "non_Existent_Button")
assert len(elements) == 0
def test_finding_asingle_element_by_empty_name_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "")
def test_finding_multiple_elements_by_empty_name_should_return_empty_list(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "")
assert len(elements) == 0
def test_finding_asingle_element_by_name_with_space_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.NAME, "nonexistent button")
def test_finding_multiple_elements_by_name_with_space_should_return_empty_list(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.NAME, "nonexistent button")
assert len(elements) == 0
# By.tag_Name positive
def test_should_be_able_to_find_asingle_element_by_tag_name(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.TAG_NAME, "input")
assert element.tag_name.lower() == "input"
def test_should_be_able_to_find_multiple_elements_by_tag_name(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "input")
assert len(elements) > 1
# By.tag_Name negative
def test_should_not_be_able_to_locate_by_tag_name_asingle_element_that_does_not_exist(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "non_Existent_Button")
def test_should_not_be_able_to_locate_by_tag_name_multiple_elements_that_do_not_exist(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "non_Existent_Button")
assert len(elements) == 0
def test_finding_asingle_element_by_empty_tag_name_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.TAG_NAME, "")
def test_finding_multiple_elements_by_empty_tag_name_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.TAG_NAME, "")
def test_finding_asingle_element_by_tag_name_with_space_should_throw(driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "nonexistent button")
def test_finding_multiple_elements_by_tag_name_with_space_should_return_empty_list(driver, pages):
pages.load("formPage.html")
elements = driver.find_elements(By.TAG_NAME, "nonexistent button")
assert len(elements) == 0
# By.class_Name positive
def test_should_be_able_to_find_asingle_element_by_class(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "extraDiv")
assert "Another div starts here." in element.text
def test_should_be_able_to_find_multiple_elements_by_class_name(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CLASS_NAME, "nameC")
assert len(elements) > 1
def test_should_find_element_by_class_when_it_is_the_first_name_among_many(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameA")
assert element.text == "An H2 title"
def test_should_find_element_by_class_when_it_is_the_last_name_among_many(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameC")
assert element.text == "An H2 title"
def test_should_find_element_by_class_when_it_is_in_the_middle_among_many(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "nameBnoise")
assert element.text == "An H2 title"
def test_should_find_element_by_class_when_its_name_is_surrounded_by_whitespace(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CLASS_NAME, "spaceAround")
assert element.text == "Spaced out"
def test_should_find_elements_by_class_when_its_name_is_surrounded_by_whitespace(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CLASS_NAME, "spaceAround")
assert len(elements) == 1
assert elements[0].text == "Spaced out"
# By.class_Name negative
def test_should_not_find_element_by_class_when_the_name_queried_is_shorter_than_candidate_name(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "name_B")
def test_finding_asingle_element_by_empty_class_name_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "")
def test_finding_multiple_elements_by_empty_class_name_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CLASS_NAME, "")
def test_finding_asingle_element_by_compound_class_name_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "a b")
def test_finding_asingle_element_by_invalid_class_name_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CLASS_NAME, "!@#$%^&*")
def test_finding_multiple_elements_by_invalid_class_name_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CLASS_NAME, "!@#$%^&*")
# By.xpath positive
def test_should_be_able_to_find_asingle_element_by_xpath(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.XPATH, "//h1")
assert element.text == "XHTML Might Be The Future"
def test_should_be_able_to_find_multiple_elements_by_xpath(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.XPATH, "//div")
assert len(elements) == 13
def test_should_be_able_to_find_many_elements_repeatedly_by_xpath(driver, pages):
pages.load("xhtmlTest.html")
xpath = "//node()[contains(@id,'id')]"
assert len(driver.find_elements(By.XPATH, xpath)) == 3
xpath = "//node()[contains(@id,'nope')]"
assert len(driver.find_elements(By.XPATH, xpath)) == 0
def test_should_be_able_to_identify_elements_by_class(driver, pages):
pages.load("xhtmlTest.html")
header = driver.find_element(By.XPATH, "//h1[@class='header']")
assert header.text == "XHTML Might Be The Future"
def test_should_be_able_to_find_an_element_by_xpath_with_multiple_attributes(driver, pages):
pages.load("formPage.html")
element = driver.find_element(
By.XPATH, "//form[@name='optional']/input[@type='submit' and @value='Click!']")
assert element.tag_name.lower() == "input"
assert element.get_attribute("value") == "Click!"
def test_finding_alink_by_xpath_should_locate_an_element_with_the_given_text(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.XPATH, "//a[text()='click me']")
assert element.text == "click me"
def test_finding_alink_by_xpath_using_contains_keyword_should_work(driver, pages):
pages.load("nestedElements.html")
element = driver.find_element(By.XPATH, "//a[contains(.,'hello world')]")
assert "hello world" in element.text
# @pytest.mark.xfail_chrome(raises=InvalidSelectorException)
# @pytest.mark.xfail_chromiumedge(raises=InvalidSelectorException)
# @pytest.mark.xfail_firefox(raises=InvalidSelectorException)
# @pytest.mark.xfail_remote(raises=InvalidSelectorException)
# @pytest.mark.xfail_safari(raises=NoSuchElementException)
# @pytest.mark.xfail_webkitgtk(raises=InvalidSelectorException)
# def test_Should_Be_Able_To_Find_Element_By_XPath_With_Namespace(driver, pages):
# pages.load("svgPage.html")
# element = driver.find_element(By.XPATH, "//svg:svg//svg:text")
# assert element.text == "Test Chart"
def test_should_be_able_to_find_element_by_xpath_in_xml_document(driver, pages):
pages.load("simple.xml")
element = driver.find_element(By.XPATH, "//foo")
assert "baz" in element.text
# By.xpath negative
def test_should_throw_an_exception_when_there_is_no_link_to_click(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.XPATH, "//a[@id='Not here']")
def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_driver_find_element(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.XPATH, "this][isnot][valid")
def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_driver_find_elements(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.XPATH, "this][isnot][valid")
def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_element_find_element(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_element(By.XPATH, "this][isnot][valid")
def test_should_throw_invalid_selector_exception_when_xpath_is_syntactically_invalid_in_element_find_elements(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_elements(By.XPATH, "this][isnot][valid")
def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_driver_find_element(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_element(By.XPATH, "count(//input)")
def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_driver_find_elements(driver, pages):
pages.load("formPage.html")
with pytest.raises(InvalidSelectorException):
driver.find_elements(By.XPATH, "count(//input)")
def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_element_find_element(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_element(By.XPATH, "count(//input)")
def test_should_throw_invalid_selector_exception_when_xpath_returns_wrong_type_in_element_find_elements(driver, pages):
pages.load("formPage.html")
body = driver.find_element(By.TAG_NAME, "body")
with pytest.raises(InvalidSelectorException):
body.find_elements(By.XPATH, "count(//input)")
# By.css_Selector positive
def test_should_be_able_to_find_asingle_element_by_css_selector(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CSS_SELECTOR, "div.content")
assert element.tag_name.lower() == "div"
assert element.get_attribute("class") == "content"
def test_should_be_able_to_find_multiple_elements_by_css_selector(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, "p")
assert len(elements) > 1
def test_should_be_able_to_find_asingle_element_by_compound_css_selector(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.CSS_SELECTOR, "div.extraDiv, div.content")
assert element.tag_name.lower() == "div"
assert element.get_attribute("class") == "content"
def test_should_be_able_to_find_multiple_elements_by_compound_css_selector(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, "div.extraDiv, div.content")
assert len(elements) > 1
assert elements[0].get_attribute("class") == "content"
assert elements[1].get_attribute("class") == "extraDiv"
def test_should_be_able_to_find_an_element_by_boolean_attribute_using_css_selector(driver, pages):
pages.load("locators_tests/boolean_attribute_selected.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected='selected']")
assert element.get_attribute("value") == "two"
def test_should_be_able_to_find_an_element_by_boolean_attribute_using_short_css_selector(driver, pages):
pages.load("locators_tests/boolean_attribute_selected.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected]")
assert element.get_attribute("value") == "two"
def test_should_be_able_to_find_an_element_by_boolean_attribute_using_short_css_selector_on_html_4_page(driver, pages):
pages.load("locators_tests/boolean_attribute_selected_html4.html")
element = driver.find_element(By.CSS_SELECTOR, "option[selected]")
assert element.get_attribute("value") == "two"
# By.css_Selector negative
def test_should_not_find_element_by_css_selector_when_there_is_no_such_element(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, ".there-is-no-such-class")
def test_should_not_find_elements_by_css_selector_when_there_is_no_such_element(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.CSS_SELECTOR, ".there-is-no-such-class")
assert len(elements) == 0
def test_finding_asingle_element_by_empty_css_selector_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, "")
def test_finding_multiple_elements_by_empty_css_selector_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CSS_SELECTOR, "")
def test_finding_asingle_element_by_invalid_css_selector_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.CSS_SELECTOR, "//a/b/c[@id='1']")
def test_finding_multiple_elements_by_invalid_css_selector_should_throw(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_elements(By.CSS_SELECTOR, "//a/b/c[@id='1']")
# By.link_Text positive
def test_should_be_able_to_find_alink_by_text(driver, pages):
pages.load("xhtmlTest.html")
link = driver.find_element(By.LINK_TEXT, "click me")
assert link.text == "click me"
def test_should_be_able_to_find_multiple_links_by_text(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "click me")
assert len(elements) == 2
def test_should_find_element_by_link_text_containing_equals_sign(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.LINK_TEXT, "Link=equalssign")
assert element.get_attribute("id") == "linkWithEqualsSign"
def test_should_find_multiple_elements_by_link_text_containing_equals_sign(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "Link=equalssign")
assert 1 == len(elements)
assert elements[0].get_attribute("id") == "linkWithEqualsSign"
def test_finds_by_link_text_on_xhtml_page(driver, pages):
pages.load("actualXhtmlPage.xhtml")
link_Text = "Foo"
element = driver.find_element(By.LINK_TEXT, link_Text)
assert element.text == link_Text
def test_link_with_formatting_tags(driver, pages):
pages.load("simpleTest.html")
elem = driver.find_element(By.ID, "links")
res = elem.find_element(By.PARTIAL_LINK_TEXT, "link with formatting tags")
assert res.text == "link with formatting tags"
@pytest.mark.xfail_safari
def test_driver_can_get_link_by_link_test_ignoring_trailing_whitespace(driver, pages):
pages.load("simpleTest.html")
link = driver.find_element(By.LINK_TEXT, "link with trailing space")
assert link.get_attribute("id") == "linkWithTrailingSpace"
assert link.text == "link with trailing space"
# By.link_Text negative
def test_should_not_be_able_to_locate_by_link_text_asingle_element_that_does_not_exist(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.find_element(By.LINK_TEXT, "Not here either")
def test_should_not_be_able_to_locate_by_link_text_multiple_elements_that_do_not_exist(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.LINK_TEXT, "Not here either")
assert len(elements) == 0
# By.partial_Link_Text positive
def test_should_be_able_to_find_multiple_elements_by_partial_link_text(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "ick me")
assert len(elements) == 2
def test_should_be_able_to_find_asingle_element_by_partial_link_text(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.PARTIAL_LINK_TEXT, "anon")
assert "anon" in element.text
def test_should_find_element_by_partial_link_text_containing_equals_sign(driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element(By.PARTIAL_LINK_TEXT, "Link=")
assert element.get_attribute("id") == "linkWithEqualsSign"
def test_should_find_multiple_elements_by_partial_link_text_containing_equals_sign(driver, pages):
pages.load("xhtmlTest.html")
elements = driver.find_elements(By.PARTIAL_LINK_TEXT, "Link=")
assert len(elements) == 1
assert elements[0].get_attribute("id") == "linkWithEqualsSign"
# Misc tests
def test_driver_should_be_able_to_find_elements_after_loading_more_than_one_page_at_atime(driver, pages):
pages.load("formPage.html")
pages.load("xhtmlTest.html")
link = driver.find_element(By.LINK_TEXT, "click me")
assert link.text == "click me"
# You don't want to ask why this is here
def test_when_finding_by_name_should_not_return_by_id(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
element = driver.find_element(By.ID, "id-name1")
assert element.get_attribute("value") == "id"
element = driver.find_element(By.NAME, "id-name2")
assert element.get_attribute("value") == "name"
element = driver.find_element(By.ID, "id-name2")
assert element.get_attribute("value") == "id"
def test_should_be_able_to_find_ahidden_elements_by_name(driver, pages):
pages.load("formPage.html")
element = driver.find_element(By.NAME, "hidden")
assert element.get_attribute("name") == "hidden"
def test_should_not_be_able_to_find_an_element_on_ablank_page(driver, pages):
driver.get("about:blank")
with pytest.raises(NoSuchElementException):
driver.find_element(By.TAG_NAME, "a")
| python |
from .effector import Effector
from .evidence import Evidence
from .gene import Gene
from .operon import Operon
from .organism import Organism
from .pathway import Pathway
from .publication import Publication
from .regulator import Regulator
from .regulatory_family import RegulatoryFamily
from .regulatory_interaction import RegulatoryInteraction
from .source import Source
from .tfbs import TFBS
| python |
"""
SHA-256 PRNG prototype in Python
"""
import numpy as np
import sys
import struct
# Import base class for PRNGs
import random
# Import library of cryptographic hash functions
import hashlib
# Define useful constants
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
HASHLEN = 256 # Number of bits in a hash output
RECIP_HASHLEN = 2**-HASHLEN
################################################################################
############################## Int from Hash ###################################
################################################################################
def int_from_hash_py2(hash):
'''
Convert byte(s) to ints, specific for Python versions < 3.
Parameters
----------
hash : bytes
Hash or list of hashes to convert to integers
Returns
-------
int or list ndarray of ints
'''
if isinstance(hash, list):
hash_int = np.array([int(h.encode('hex'), 16) for h in hash])
else:
hash_int = int(hash.encode('hex'), 16)
return hash_int
def int_from_hash_py3(hash):
'''
Convert byte(s) to ints, specific for Python 3.
Parameters
----------
hash : bytes
Hash or list of hashes to convert to integers
Returns
-------
int or list ndarray of ints
'''
if isinstance(hash, list):
hash_int = np.array([int.from_bytes(h, 'big') for h in hash])
else:
hash_int = int.from_bytes(hash, 'big')
return hash_int
if sys.version_info[0] < 3:
int_from_hash = int_from_hash_py2
else:
int_from_hash = int_from_hash_py3
################################################################################
############################## SHA-256 Class ###################################
################################################################################
class SHA256(random.Random):
"""
PRNG based on the SHA-256 cryptographic hash function.
"""
def __init__(self, seed=None):
"""
Initialize an instance of the SHA-256 PRNG.
Parameters
----------
seed : {None, int, string} (optional)
Random seed used to initialize the PRNG. It can be an integer of arbitrary length,
a string of arbitrary length, or `None`. Default is `None`.
"""
self.seed(seed)
self.hashfun = "SHA-256"
self._basehash()
def __repr__(self):
"""
>>> r = SHA256(5)
>>> repr(r)
'SHA256 PRNG with seed 5 and counter 0'
>>> str(r)
'SHA256 PRNG with seed 5 and counter 0'
"""
stringrepr = self.__class__.__name__ + " PRNG with seed " + \
str(self.baseseed) + " and counter " + str(self.counter)
return stringrepr
def _basehash(self):
"""
Initialize the SHA256 hash function with given seed
"""
if self.baseseed is not None:
hashinput = (str(self.baseseed) + ',').encode()
self.basehash = hashlib.sha256(hashinput)
else:
self.basehash = None
def seed(self, baseseed=None):
"""
Initialize internal seed and hashable object with counter 0.
Parameters
----------
baseseed : {None, int, string} (optional)
Random seed used to initialize the PRNG. It can be an integer of arbitrary length,
a string of arbitrary length, or `None`. Default is `None`.
counter : int (optional)
Integer that counts how many times the PRNG has been called. The counter
is used to update the internal state after each step. Default is 0.
"""
if not hasattr(self, 'baseseed') or baseseed != self.baseseed:
self.baseseed = baseseed
self._basehash()
self.counter = 0
self.randbits = None
self.randbits_remaining = 0
def setstate(self, baseseed=None, counter=0):
"""
Set the state (seed and counter)
Parameters
----------
baseseed : {None, int, string} (optional)
Random seed used to initialize the PRNG. It can be an integer of arbitrary length,
a string of arbitrary length, or `None`. Default is `None`.
counter : int (optional)
Integer that counts how many times the PRNG has been called. The counter
is used to update the internal state after each step. Default is 0.
"""
(self.baseseed, self.counter) = (baseseed, counter)
self._basehash()
self.basehash.update(b'\x00'*counter)
def getstate(self):
"""
Get the current state of the PRNG
"""
return (self.baseseed, self.counter)
def jumpahead(self, n):
"""
Jump ahead n steps in the period
>>> r = SHA256(5)
>>> r.jumpahead(5)
>>> repr(r)
'SHA256 PRNG with seed 5 and counter 5'
"""
self.counter += n
self.basehash.update(b'\x00'*n)
def next(self):
"""
Increment the counter and basehash by one
"""
self.jumpahead(1)
def nextRandom(self):
"""
Generate the next hash value
>>> r = SHA256(12345678901234567890)
>>> r.next()
>>> r.nextRandom()
4da594a8ab6064d666eab2bdf20cb4480e819e0c3102ca353de57caae1d11fd1
"""
# Apply SHA-256, interpreting digest output as integer
# to yield 256-bit integer (a python "long integer")
hash_output = self.basehash.digest()
self.next()
return hash_output
def random(self, size=None):
"""
Generate random numbers between 0 and 1.
size controls the number of ints generated. If size=None, just one is produced.
The following tests match the output of Ron's and Philip's implementations.
>>> r = SHA256(12345678901234567890)
>>> r.random(2)
array([0.9272915426537484, 0.1916135318809483], dtype=object)
>>> r.random((2, 2))
array([[0.5846237047310486, 0.18694233108130068],
[0.9022661737961881, 0.052310932788987144]], dtype=object)
Parameters
----------
size : {int, tuple, None}
If None (default), return a single random number.
If size is an int, return that many random numbers.
If size is a tuple, it determines the shape of an array
filled with random numbers.
"""
if size == None:
hash_output = self.nextRandom()
return int_from_hash(hash_output)*RECIP_HASHLEN
else:
size2 = np.prod(size)
hash_output = [self.nextRandom() for i in range(size2)]
res = int_from_hash(hash_output)*RECIP_HASHLEN
return np.reshape(res, size)
def randint_trunc(self, a, b, size=None):
"""
Deprecated. For large values of (b-a), this algorithm does not produce integers
uniformly at random.
Generate random integers between a (inclusive) and b (exclusive).
size controls the number of ints generated. If size=None, just one is produced.
>>> r = SHA256(12345678901234567890)
>>> r.randint_trunc(0, 5, size=3)
array([0, 0, 0])
Parameters
----------
a : int
lower limit (included in samples)
b : int
upper limit (not included in samples)
size : {int, tuple, None}
If None (default), return a single random number.
If size is an int, return that many random numbers.
If size is a tuple, it determines the shape of an array
filled with random numbers.
"""
assert a <= b, "lower and upper limits are switched"
if size == None:
return a + (int_from_hash(self.nextRandom()) % (b-a))
else:
return np.reshape(np.array([a + (int_from_hash(self.nextRandom()) % (b-a)) \
for i in np.arange(np.prod(size))]), size)
def getrandbits(self, k):
"""
Generate k pseudorandom bits.
If self.randbits contains at least k bits, returns k of those bits and removes them.
If self.randbits has fewer than k bits, calls self.nextRandom() as many times as needed to
populate self.randbits with at least k random bits, returns those k, and keeps
any remaining bits in self.randbits
Parameters
----------
k : int
number of pseudorandom bits
"""
if self.randbits is None: # initialize the cache
self.randbits = int_from_hash(self.nextRandom())
self.randbits_remaining = HASHLEN
while k > self.randbits_remaining: # pre-pend more random bits
# accounts for leading 0s
self.randbits = (int_from_hash(self.nextRandom()) << \
self.randbits_remaining | self.randbits)
self.randbits_remaining = self.randbits_remaining + HASHLEN
val = (self.randbits & int(2**k-1)) # harvest least significant k bits
self.randbits_remaining = self.randbits_remaining - k
self.randbits = self.randbits >> k # discard the k harvested bits
return val
def randbelow_from_randbits(self, n):
"""
Generate a random integer between 0 (inclusive) and n (exclusive).
Raises ValueError if n==0.
Parameters
----------
n : int
upper limit
"""
k = int(n-1).bit_length()
r = self.getrandbits(k) # 0 <= r < 2**k
while int(r) >= n:
r = self.getrandbits(k)
return int(r)
def randint(self, a, b, size=None):
"""
Generate random integers between a (inclusive) and b (exclusive).
size controls the number of ints generated. If size=None, just one is produced.
>>> r = SHA256(12345678901234567890)
>>> r.randint(0, 5, size=3)
array([3, 2, 4])
Parameters
----------
a : int
lower limit (included in samples)
b : int
upper limit (not included in samples)
size : {int, tuple, None}
If None (default), return a single random number.
If size is an int, return that many random numbers.
If size is a tuple, it determines the shape of an array
filled with random numbers.
"""
assert a <= b, "lower and upper limits are switched"
if size == None:
return a + self.randbelow_from_randbits(b-a)
else:
return np.reshape(np.array([a + self.randbelow_from_randbits(b-a) \
for i in np.arange(np.prod(size))]), size)
| python |
#
# Copyright (c) 2020 Saarland University.
#
# This file is part of AM Parser
# (see https://github.com/coli-saar/am-parser/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
from typing import Dict, Optional, Any, List
import logging
from overrides import overrides
import torch
from torch.nn.modules import Dropout
from allennlp.common.checks import check_dimensions_match, ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder, Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask
from graph_dependency_parser.components.weight_sharer import MTLWeightSharer
from graph_dependency_parser.components.AMTask import AMTask
from graph_dependency_parser.components.spacy_token_embedder import TokenToVec
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("graph_dependency_parser")
class GraphDependencyParser(Model):
"""
This dependency graph_dependency_parser is a blueprint for several graph-based dependency parsers.
There are several possible edge models and loss functions.
For decoding, the CLE algorithm is used (during training attachments scores are usually based on greedy decoding)
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use to generate representations
of tokens.
edge_model: ``components.edge_models.EdgeModel``, required.
The edge model to be used.
loss_function: ``components.losses.EdgeLoss``, required.
The (edge) loss function to be used.
supertagger: ``components.supertagger.FragmentSupertagger``, required.
The supertagging model that predicts graph constants (graph fragments + types)
lexlabeltagger: ``components.supertagger.LexlabelTagger``, required.
The supertagging model that predicts lexical labels for the supertags.
supertagger_loss: ``components.losses.supertagging.SupertaggingLoss``, required.
The loss function for the supertagging model.
lexlabel_loss: ``components.losses.supertagging.SupertaggingLoss``, required.
The loss function for the lexical label tagger.
loss_mixing : Dict[str,float] = None,
The mixing coefficients for the different losses. Valid loss names are "edge_existence",
"edge_label","supertagging" and "lexlabel".
pos_tag_embedding : ``Embedding``, optional.
Used to embed the ``pos_tags`` ``SequenceLabelField`` we get as input to the model.
lemma_embedding : ``Embedding``, optional.
Used to embed the ``lemmas`` ``SequenceLabelField`` we get as input to the model.
ne_embedding : ``Embedding``, optional.
Used to embed the ``ner_labels`` ``SequenceLabelField`` we get as input to the model.
use_mst_decoding_for_validation : ``bool``, optional (default = True).
Whether to use Edmond's algorithm to find the optimal minimum spanning tree during validation.
If false, decoding is greedy.
dropout : ``float``, optional, (default = 0.0)
The variational dropout applied to the output of the encoder and MLP layers.
input_dropout : ``float``, optional, (default = 0.0)
The dropout applied to the embedded text input.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
validation_evaluator: ``ValidationEvaluator``, optional (default=``None``)
If provided, will be used to compute external validation metrics after each epoch.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: MTLWeightSharer,
tasks: List[AMTask],
pos_tag_embedding: Embedding = None,
lemma_embedding: Embedding = None,
ne_embedding: Embedding = None,
input_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
tok2vec : Optional[TokenToVec] = None) -> None:
super(GraphDependencyParser, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.encoder = encoder
self.tok2vec = tok2vec
self._pos_tag_embedding = pos_tag_embedding or None
self._lemma_embedding = lemma_embedding
self._ne_embedding = ne_embedding
self._input_dropout = Dropout(input_dropout)
self._head_sentinel = torch.nn.Parameter(torch.randn([1, 1, encoder.get_output_dim()]))
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
if self._lemma_embedding is not None:
representation_dim += lemma_embedding.get_output_dim()
if self._ne_embedding is not None:
representation_dim += ne_embedding.get_output_dim()
assert len(tasks) > 0, "List of tasks must not be empty"
self.tasks : Dict[str, AMTask] = {t.name : t for t in tasks}
if self.tok2vec:
representation_dim += self.tok2vec.get_output_dim()
check_dimensions_match(representation_dim, encoder.get_input_dim(),
"text field embedding dim", "encoder input dim")
for t in tasks:
t.check_all_dimensions_match(encoder.get_output_dim())
for formalism,task in sorted(self.tasks.items(), key=lambda nt: nt[0]):
#sort by name of formalism for consistent ordering
self.add_module(formalism,task)
initializer(self)
@overrides
def forward(self, # type: ignore
words: Dict[str, torch.LongTensor],
pos_tags: torch.LongTensor,
lemmas: torch.LongTensor,
ner_tags: torch.LongTensor,
metadata: List[Dict[str, Any]],
supertags: torch.LongTensor = None,
lexlabels: torch.LongTensor = None,
head_tags: torch.LongTensor = None,
head_indices: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
words : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, sequence_length)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
pos_tags : ``torch.LongTensor``, required
The output of a ``SequenceLabelField`` containing POS tags.
POS tags are required regardless of whether they are used in the model,
because they are used to filter the evaluation metric to only consider
heads of words which are not punctuation.
metadata : List[Dict[str, Any]], optional (default=None)
A dictionary of metadata for each batch element which has keys:
words : ``List[str]``, required.
The tokens in the original sentence.
pos : ``List[str]``, required.
The dependencies POS tags for each word.
head_tags : = edge_labels torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold edge labels for the arcs
in the dependency parse. Has shape ``(batch_size, sequence_length)``.
head_indices : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer indices denoting the parent of every
word in the dependency parse. Has shape ``(batch_size, sequence_length)``.
Returns
-------
An output dictionary consisting of:
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
arc_loss : ``torch.FloatTensor``
The loss contribution from the unlabeled arcs.
edge_label_loss : ``torch.FloatTensor``
The loss contribution from the edge labels.
heads : ``torch.FloatTensor``
The predicted head indices for each word. A tensor
of shape (batch_size, sequence_length).
edge_labels : ``torch.FloatTensor``
The predicted head types for each arc. A tensor
of shape (batch_size, sequence_length).
mask : ``torch.LongTensor``
A mask denoting the padded elements in the batch.
"""
t0 = time.time()
if 'formalism' not in metadata[0]:
raise ConfigurationError("metadata is missing 'formalism' key.\
Please use the amconll dataset reader.")
formalism_of_batch = metadata[0]['formalism']
for entry in metadata:
if entry['formalism'] != formalism_of_batch:
raise ConfigurationError("Two formalisms in the same batch.")
if not formalism_of_batch in self.tasks.keys():
raise ConfigurationError(f"Got formalism {formalism_of_batch} but I only have these tasks: {list(self.tasks.keys())}")
if self.tok2vec:
token_ids = words["tokens"]
embedded_text_input = self.tok2vec.embed(self.vocab, token_ids) #shape (batch_size, seq len, encoder dim)
concatenated_input = [embedded_text_input, self.text_field_embedder(words)]
else:
embedded_text_input = self.text_field_embedder(words)
concatenated_input = [embedded_text_input]
if pos_tags is not None and self._pos_tag_embedding is not None:
concatenated_input.append(self._pos_tag_embedding(pos_tags))
elif self._pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
if self._lemma_embedding is not None:
concatenated_input.append(self._lemma_embedding(lemmas))
if self._ne_embedding is not None:
concatenated_input.append(self._ne_embedding(ner_tags))
if len(concatenated_input) > 1:
embedded_text_input = torch.cat(concatenated_input, -1)
mask = get_text_field_mask(words)
embedded_text_input = self._input_dropout(embedded_text_input)
encoded_text_parsing, encoded_text_tagging = self.encoder(formalism_of_batch, embedded_text_input, mask) #potentially weight-sharing
batch_size, seq_len, encoding_dim = encoded_text_parsing.size()
head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)
# Concatenate the artificial root onto the sentence representation.
encoded_text_parsing = torch.cat([head_sentinel, encoded_text_parsing], 1)
if encoded_text_tagging is not None: #might be none when batch is of formalism without tagging (UD)
batch_size, seq_len, encoding_dim = encoded_text_tagging.size()
head_sentinel = self._head_sentinel.expand(batch_size, 1, encoding_dim)
# Concatenate the artificial root onto the sentence representation.
encoded_text_tagging = torch.cat([head_sentinel, encoded_text_tagging], 1)
mask = torch.cat([mask.new_ones(batch_size, 1), mask], 1)
if head_indices is not None:
head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1)
if head_tags is not None:
head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)
ret = self.tasks[formalism_of_batch](encoded_text_parsing, encoded_text_tagging, mask, pos_tags, metadata, supertags, lexlabels, head_tags, head_indices)
t1 = time.time()
# Save time and batch size, but save it separately for each batch element.
ret["batch_size"] = torch.ones(batch_size, dtype=torch.long) * batch_size
ret["batch_time"] = torch.ones(batch_size) * (t1-t0)
return ret
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
In contrast to its name, this function does not perform the decoding but only prepares it.
Therefore, we take the result of forward and perform the following steps (for each sentence in batch):
- remove padding
- identifiy the root of the sentence, group other root-candidates under the proper root
- collect a selection of supertags to speed up computation (top k selection is done later)
:param output_dict: result of forward
:return: output_dict with the following keys added:
- lexlabels: nested list: contains for each sentence, for each word the most likely lexical label (w/o artificial root)
- supertags: nested list: contains for each sentence, for each word the most likely lexical label (w/o artificial root)
"""
formalism = output_dict.pop("formalism")
return self.tasks[formalism].decode(output_dict)
@overrides
def get_metrics(self, reset: bool = False, model_path = None) -> Dict[str, float]:
r = dict()
for name,task in self.tasks.items():
for metric, val in task.metrics(parser_model=self, reset=reset, model_path=model_path).items():
r[name+"_"+metric] = val
return r
| python |
from django.db import IntegrityError
from django.db.models import Count, Q, IntegerField, CharField
from django.db.models.functions import Coalesce
from django.shortcuts import get_object_or_404
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from games.models import (
SwitchGame,
SwitchGameUS,
SwitchGameEU,
SwitchGameMedia,
SwitchGamePrice,
)
from classification.models import (
ConfirmedHighlight,
ConfirmedTag,
Recomendation,
Review,
SuggestedTag,
)
@api_view(['GET'])
@permission_classes((IsAuthenticated, IsAdminUser))
def all_games(request):
games = SwitchGame.objects.all() \
.annotate(game_title=Coalesce('game_eu__title', 'game_us__title')) \
.annotate(game_image=Coalesce(
'game_eu__image_sq_url', 'game_us__front_box_art',
output_field=CharField())) \
.annotate(likes=Count(
'recomendation',
filter=Q(recomendation__recomends=True),
output_field=IntegerField())) \
.annotate(dislikes=Count(
'recomendation',
filter=Q(recomendation__recomends=False),
output_field=IntegerField())) \
.annotate(highlighted=Count(
'confirmedhighlight',
filter=Q(confirmedhighlight__confirmed_by='STF'),
output_field=IntegerField())) \
.order_by('game_title')
response = []
for game in games:
response.append({
'id': game.id,
'title': game.game_title,
'code_unique': game.game_code_unique,
'likes': game.likes,
'dislikes': game.dislikes,
'image_eu_square': game.game_image,
'highlighted': game.highlighted > 0,
'hide': game.hide
})
return Response(response, status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes((IsAuthenticated, IsAdminUser))
def game_get_simple(request, game_id):
game = SwitchGame.objects \
.filter(id=game_id) \
.annotate(game_title=Coalesce('game_eu__title', 'game_us__title')) \
.annotate(game_image=Coalesce(
'game_eu__image_sq_url', 'game_us__front_box_art',
output_field=CharField()))
if game.count() == 0:
return Response(status=status.HTTP_404_NOT_FOUND)
response = game_to_json_simple(game[0], request.user)
return Response(response, status=status.HTTP_200_OK)
@api_view(['POST', 'DELETE'])
@permission_classes((IsAuthenticated, IsAdminUser))
def game_hide(request, game_id):
game = get_object_or_404(SwitchGame, id=game_id)
if request.method == 'POST':
game.hide = True
elif request.method == 'DELETE':
game.hide = False
try:
game.save()
return Response(status=status.HTTP_200_OK)
except Exception as e:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['POST'])
@permission_classes((IsAuthenticated, IsAdminUser))
def game_merge(request, game1_id, game2_id):
game1 = get_object_or_404(SwitchGame, id=game1_id)
game2 = get_object_or_404(SwitchGame, id=game2_id)
# If one of the games is already complete, return error
if (game1.game_us and game1.game_eu) or (game2.game_us and game2.game_eu):
return Response(status=status.HTTP_400_BAD_REQUEST)
# If each game has one different region, merge them. Else return error
if not game1.game_us and game2.game_us:
game1.game_us = game2.game_us
elif not game1.game_eu and game2.game_eu:
game1.game_eu = game2.game_eu
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
# Copy recomendations, reviews, tag votes and media from game2 to game1
media = SwitchGameMedia.objects.filter(game_id=game2_id)
reviews = Review.objects.filter(game_id=game2_id)
recomendations = Recomendation.objects.filter(game_id=game2_id)
suggested_tags = SuggestedTag.objects.filter(game_id=game2_id)
confirmed_tags = ConfirmedTag.objects.filter(
game_id=game2_id, confirmed_by='NTD')
prices = SwitchGamePrice.objects.filter(game_id=game2_id)
# Reorder but don't save yet
game1_media_count = SwitchGameMedia.objects.filter(game_id=game1_id) \
.count()
for m in media:
m.order = m.order + game1_media_count
# Try to move recomendations, reviews, suggested/ confirmed tags and media
for query in [
media, reviews, recomendations, suggested_tags, confirmed_tags, prices
]:
for item in query:
item.game_id = game1_id
try:
item.save()
except IntegrityError:
item.delete()
try:
game2.delete()
game1.save()
return Response(status=status.HTTP_200_OK)
except Exception as e:
return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def game_to_json_simple(game, user):
game_json = {
'title': game.game_title,
'game_code': game.game_code_unique,
'game_image': game.game_image,
}
return game_json
| python |
import os
from flask import Flask
from flask import render_template
from flask_assets import Environment
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from config.environments import app_config
db = SQLAlchemy()
def get_config_name():
return os.getenv('FLASK_CONFIG') or 'development'
def create_app():
app = Flask(
__name__,
instance_relative_config=True,
static_url_path='/static',
static_folder='../static',
)
app.config.from_object(app_config[get_config_name()])
app.config.from_pyfile('config.py')
# Database
db.init_app(app)
# Migrations
migrate = Migrate(app, db)
configure_migrations(app, db, migrate)
configure_error_handlers(app)
configure_views(app)
return app
def configure_migrations(app, db, migrate):
from .models import import_models
import_models(app, db, migrate)
def configure_views(app):
from .views import register_views
register_views(app)
def configure_error_handlers(app):
@app.errorhandler(404)
def not_found(error):
return (render_template('404.html'), 404)
@app.route('/favicon.ico')
def favicon():
return ''
| python |
#
# Copyright (c) 2019 Juniper Networks, Inc. All rights reserved.
#
from cfgm_common.exceptions import BadRequest, NoIdError
from cfgm_common.exceptions import HttpError, RequestSizeError
from vnc_api.gen.resource_client import AccessControlList
from schema_transformer.resources._resource_base import ResourceBaseST
from schema_transformer.utils import _raise_and_send_uve_to_sandesh
def _access_control_list_update(acl_obj, name, obj, entries):
if acl_obj is None:
if entries is None:
return None
acl_obj = AccessControlList(name, obj, entries)
try:
ResourceBaseST._vnc_lib.access_control_list_create(acl_obj)
return acl_obj
except (NoIdError, BadRequest) as e:
ResourceBaseST._logger.error(
"Error while creating acl %s for %s: %s" %
(name, obj.get_fq_name_str(), str(e)))
except RequestSizeError:
# log the error and raise an alarm
ResourceBaseST._logger.error(
"Bottle request size error while creating acl %s for %s" %
(name, obj.get_fq_name_str()))
err_info = {'acl rule limit exceeded': True}
_raise_and_send_uve_to_sandesh('ACL', err_info,
ResourceBaseST._sandesh)
return None
else:
if entries is None:
try:
ResourceBaseST._vnc_lib.access_control_list_delete(
id=acl_obj.uuid)
except NoIdError:
pass
return None
entries_hash = hash(entries)
# if entries did not change, just return the object
if acl_obj.get_access_control_list_hash() == entries_hash:
return acl_obj
# Set new value of entries on the ACL
acl_obj.set_access_control_list_entries(entries)
acl_obj.set_access_control_list_hash(entries_hash)
try:
ResourceBaseST._vnc_lib.access_control_list_update(acl_obj)
except HttpError as he:
ResourceBaseST._logger.error(
"HTTP error while updating acl %s for %s: %d, %s" %
(name, obj.get_fq_name_str(), he.status_code, he.content))
except NoIdError:
ResourceBaseST._logger.error(
"NoIdError while updating acl %s for %s" %
(name, obj.get_fq_name_str()))
except RequestSizeError:
# log the error and raise an alarm
ResourceBaseST._logger.error(
"Bottle request size error while creating acl %s for %s" %
(name, obj.get_fq_name_str()))
err_info = {'acl rule limit exceeded': True}
_raise_and_send_uve_to_sandesh('ACL', err_info,
ResourceBaseST._sandesh)
return acl_obj
# end _access_control_list_update
| python |
#
# ==================================
# | |
# | Utility functions for CBGB |
# | |
# ==================================
#
from collections import OrderedDict
from modules import gb
import importlib
import modules.active_cfg
cfg = importlib.import_module("configs." + modules.active_cfg.module_name)
# ====== removeComments ========
# Takes a list of code lines and removes comments.
# For fixed format files, any character at position 0 is a comment.
# For lines containing '!' everything after '!' is removed.
def removeComments(code_lines):
code_lines_nocomment = []
for line in code_lines:
if len(line) == 0:
code_lines_nocomment.append('')
continue
if (cfg.format == 'fixed') and (line[0] != ' '):
new_line = ''
elif '!' in line:
pos = line.find('!')
new_line = line[:pos]
else:
new_line = line
code_lines_nocomment.append(new_line)
return code_lines_nocomment
# ====== END: removeComments ========
# ====== removeBlankLines ========
# Removes any empty (all whitespace) strings from a list of strings.
def removeBlankLines(code_lines):
# Walk through the list of code lines backwards and discard
# any lines that contain nothing but whitespace.
for i in range(len(code_lines))[::-1]:
if code_lines[i].strip() == '':
code_lines.pop(i)
return code_lines
# ====== END: removeBlankLines ========
# ====== removeLeadingTrailingBlanks ========
# Removes leading and trailing blanks from the strings
# in a list of strings.
def removeLeadingTrailingBlanks(code_lines):
for i in range(len(code_lines)):
code_lines[i] = code_lines[i].lstrip().rstrip()
return code_lines
# ====== END: removeLeadingTrailingBlanks ========
# ====== removeStatementLabels ========
# Replaces statement labels with empty spaces.
# (A statement label is a number given as the first
# non-blank part of a statement.)
def removeStatementLabels(code_lines):
for i in range(len(code_lines)):
line = code_lines[i]
if cfg.format == 'fixed':
label = line[0:5].strip()
if label.isdigit():
code_lines[i] = line.replace(label, ' '*len(label), 1)
elif cfg.format == 'free':
line_list = line.split()
if (len(line_list) > 0):
label = line_list[0]
if label.isdigit():
code_lines[i] = line.replace(label, ' '*len(label), 1)
else:
raise RuntimeError("cfg.format must be set to either 'fixed' or 'free'.")
return code_lines
# ====== END: removeStatementLabels ========
# ====== removeKeywords ========
# Replaces Fortran keywords that CBGB doesn't
# care about with empty spaces.
def removeKeywords(code_lines):
for i in range(len(code_lines)):
line = code_lines[i]
line = line.replace("::", " ")
line = line.replace("intent(in)", " ")
line = line.replace("intent(out)", " ")
line = line.replace("intent (in)", " ")
line = line.replace("intent (out)", " ")
# Add more keywords here...
code_lines[i] = line
return code_lines
# ====== END: removeKeywords ========
# ====== allSingleSpace ========
# Replaces multiple spaces with a single space.
def allSingleSpace(code_lines):
for i in range(len(code_lines)):
line = code_lines[i]
line = ' '.join(line.split())
code_lines[i] = line
return code_lines
# ====== END: allSingleSpace ========
# ====== joinContinuedLines ========
def joinContinuedLines(code_lines):
joined_code_lines = ['']
if cfg.format == 'fixed':
for line in code_lines:
# Check for line continuation (any character at column 6).
# (This assumes that len(line) >= 6 for all lines in code_lines,
# which should be OK due to prior code formatting.)
try:
# - If found, append to previous line.
if line[5] not in [' ','\t']:
joined_code_lines[-1] += line[6:]
# - If not found, store current_line and start constructing a new.
else:
joined_code_lines.append(line)
except:
print [line]
raise
elif cfg.format == 'free':
continue_line = False
for line in code_lines:
if continue_line:
if line.lstrip()[0] == '&':
joined_code_lines[-1] += line.lstrip()[1:].rstrip().rstrip('&')
else:
joined_code_lines[-1] += line.rstrip().rstrip('&')
else:
joined_code_lines.append(line.rstrip().rstrip('&'))
# Check for line continuation. (Line ends with '&'.)
if line.rstrip()[-1] == '&':
continue_line = True
else:
continue_line = False
else:
raise RuntimeError("cfg.format must be set to either 'fixed' or 'free'.")
if joined_code_lines[0] == '':
joined_code_lines.pop(0)
return joined_code_lines
# ====== END: joinContinuedLines ========
# ====== getCodeParts ========
def getCodeParts(code_lines, prepend_module_name=False):
code_parts_dict = OrderedDict()
unnamed_part_counter = 1
start_line = 0
end_line = 0
current_part = 'general'
current_module = ''
for i, line in enumerate(code_lines):
#
# Detect beginning/end of a module
#
if current_part == 'general':
# Detect beginning of a module
if 'module ' in line[0:7].lower():
current_module = line.split()[1]
# Detect end of a module
if current_module != '':
if (line.replace(' ','').strip().lower() in ['end','endmodule', 'endmodule'+current_module.lower()]):
current_module = ''
#
# Detect start of program/function/subroutine, end current 'general' part
#
if current_part == 'general':
new_part = ''
if 'subroutine ' in line[0:11].lower():
new_part = 'subroutine'
elif ('function ' in line[0:9].lower()) or (' function ' in line.lower()):
new_part = 'function'
elif 'program ' in line[0:8].lower():
new_part = 'program'
# If the beginning of a new code part is found:
# - store the line numbers for the current 'general' code part
# - set start_line for the new code part
# - identify a name for the new code part
if new_part in ['subroutine', 'function', 'program']:
# Store lines (if any) from current 'general' part
if (start_line < i):
if current_part == 'general':
name_long = 'unnamed_' + current_part + '_' + str(unnamed_part_counter)
unnamed_part_counter += 1
code_parts_dict[name_long] = {
'category' : current_part,
'code_lines' : code_lines[start_line:i],
'module' : current_module
}
# Restart line count for new code part
start_line = i
# Identify name for new code part
name = getCodePartName(line, new_part)
if (name == 'unnamed_' + new_part):
name = name + '_' + str(unnamed_part_counter)
unnamed_part_counter += 1
# line_list = line.split()
# line_list_lowercase = line.lower().split()
# keyword_index = line_list_lowercase.index(new_part)
# if len(line_list) == keyword_index+1:
# name_long = 'unnamed_' + new_part + '_' + str(unnamed_part_counter)
# unnamed_part_counter += 1
# else:
# # name_item = line_list[line_list.index(new_part)+1]
# name_item = line_list[keyword_index+1]
# if '(' in name_item:
# name = name_item[:name_item.find('(')]
# else:
# name = name_item
if (current_module != '') and (prepend_module_name):
name_long = current_module + '::' + name
else:
name_long = name
# Update current_part
current_part = new_part
#
# Detect end of program/function/subroutine, start new 'general' part
#
elif (current_part in ['subroutine', 'function', 'program']) and (line.replace(' ','').strip().lower() in ['end','end'+current_part, 'end'+current_part+name.lower()]):
# Store in dict
if (start_line < i):
if current_part == 'general':
name_long = 'unnamed_' + current_part + '_' + str(unnamed_part_counter)
unnamed_part_counter += 1
code_parts_dict[name_long] = {
'category' : current_part,
'code_lines' : code_lines[start_line:i+1],
'module' : current_module
}
# Set variables for the next code part
start_line = i+1
current_part = 'general'
#
# end loop over code lines
#
# Store final bit:
if (start_line < i):
if current_part == 'general':
name_long = 'unnamed_' + current_part + '_' + str(unnamed_part_counter)
unnamed_part_counter += 1
code_parts_dict[name_long] = {
'category' : current_part,
'code_lines' : code_lines[start_line:i+1],
'module' : current_module
}
return code_parts_dict
# ====== END: getCodeParts ========
# ====== getCodePartName ========
def getCodePartName(code_line, keyword):
line_list = code_line.split()
line_list_lowercase = code_line.lower().split()
keyword_index = line_list_lowercase.index(keyword)
if len(line_list) == keyword_index+1:
name = 'unnamed_' + keyword
else:
name_item = line_list[keyword_index+1]
if '(' in name_item:
name = name_item[:name_item.find('(')]
else:
name = name_item
return name
# ====== END: getCodePartName ========
# ====== getImplicitDefs ========
# Return a dict with the following structure:
# {
# 'a': ('double precision',1),
# 'b': ('real',8),
# 'c': (None,None),
# ...
# }
#
def getImplicitDefs(code_lines):
implicit_defs = gb.default_implicit_types
for i,line in enumerate(code_lines):
# Split line into words
line_list = line.split()
# Look for 'implicit' statement
if line_list[0].lower() == 'implicit':
# If 'implicit none', then no other 'implicit' statements are allowed
if line_list[1].lower() == 'none':
return dict.fromkeys(gb.alphabet,(None,None))
# Remove the 'implicit' keyword
typedef_line = ' '.join(line_list[1:])
# If there are multiple implicit statements on a single line,
# split them up and treat them separately.
for temp_line in typedef_line.split(')'):
# Do a bunch of string manipulations to identify
# the type name (e.g. 'double precision') and
# character specifications (e.g. 'a-z').
if temp_line == '':
continue
temp_line = temp_line.replace('(','')
temp_line = temp_line.replace(',',' ')
temp_line = temp_line.strip()
while ' -' in temp_line:
temp_line = temp_line.replace(' -','-')
while '- ' in temp_line:
temp_line = temp_line.replace('- ','-')
temp_line = ' '.join(temp_line.split())
temp_line_list = temp_line.split()
char_list = []
type_name_list = []
for entry in temp_line_list:
if ((len(entry)==1) and (entry in gb.alphabet)) or (len(entry)==3 and (entry[1]=='-')):
char_list.append(entry)
else:
type_name_list.append(entry)
full_type_name = ''.join(type_name_list)
if '*' in full_type_name:
type_name, type_size_str = full_type_name.split('*')
type_size = int(type_size_str)
else:
type_name = full_type_name
type_size = 1
# Loop through the character specifiers in char_list
# and set the correct types in the implicit_defs dict
for char in char_list:
if (len(char)==1) and (char in gb.alphabet):
implicit_defs[char.lower()] = (type_name,type_size)
elif (len(char)==3 ) and (char[1]=='-'):
start_char = char[0]
end_char = char[2]
for key_char in implicit_defs.keys():
if (key_char >= start_char) and (key_char <= end_char):
implicit_defs[key_char.lower()] = (type_name,type_size)
return implicit_defs
# ====== END: getImplicitDefs ========
# ====== getParameterDefs ========
# Return a dict with the following structure:
# {
# 'some_variable' : '1234'
# 'another_variable': '10'
# ...
# }
#
# Note: Currently, only integer parameters are useful (array dimensions and indices).
#
def getParameterDefs(code_lines):
parameter_defs = {}
for i,line in enumerate(code_lines):
# Look for 'parameter' statement
if line[0:9].lower() == 'parameter':
# Remove 'parameter'
line = line[9:]
# Remove blanks
line = line.replace(' ','')
# Remove parenthesis
line = line.lstrip('(').rstrip(')')
# Split at comma
parameter_entries = line.split(',')
for entry in parameter_entries:
# Split at '=' symbol
var_name, value_str = entry.split('=')
try:
value = eval(value_str)
except:
print ' WARNING: Could not interpret the parameter "%s" with value "%s". Ignoring it.' % (var_name, value_str)
continue
# At the moment, CBGB can only make use of integer parameters. (Their only use is for array dimensions and indices.)
if not isinstance( value, ( int, long ) ):
print ' INFO: Ignoring parameter "%s" with value "%s" as it was not recognized as an integer.' % (var_name, value_str)
continue
value = int(value)
# Adding variable to parameter_defs dictionary
parameter_defs[var_name] = value
return parameter_defs
# ====== END: getParameterDefs ========
# ====== getCommonBlockDicts ========
def getCommonBlockDicts(code_lines):
cb_dicts = []
for line in code_lines:
# Remove whitespaces
line = line.replace(' ','')
# Ignore lines that don't start with 'common/'
if (len(line) < 7) or (line[:7].lower() != 'common/'):
continue
# Identify common block name and names of member variables
line_list = line.split('/')
cb_name = line_list[1]
var_seq_str = line_list[2]
var_dicts = parseVariableSequence(var_seq_str)
var_names = var_dicts.keys()
cb_dicts.append( {'name':cb_name, 'member_names':var_names} )
return cb_dicts
# ====== END: getCommonBlockDicts ========
# ====== isVariableDecl ========
def isVariableDecl(line_in, return_type=False):
is_variable_decl = False
type_name = ''
type_size = 1
line = line_in
line = line.replace(',',' ').replace('*',' * ').replace('::',' ')
line = line.replace('(', ' (').replace(')',') ')
line = ' '.join(line.split())
line_list = line.split()
for i in [3,2,1]:
check_type = ''.join(line_list[:i]).lower()
print 'DEBUG: Is this a type? : ', [line_in], [check_type]
# Check that we can deal with this Fortran type.
if check_type in gb.type_translation_dict.keys():
# If type is 'character*', identify the integer that specifies the
# string length.
if check_type=='character':
if (line_list[1] == '*') and (line_list[2].isdigit()):
check_type += '*' + line_list[2]
if '*' in check_type:
type_name, type_size_str = check_type.split('*')
type_size = int(type_size_str)
else:
type_name = check_type
is_variable_decl = True
print 'DEBUG: --- YES!'
break
if return_type:
return is_variable_decl, type_name, type_size
else:
return is_variable_decl
# ====== END: isVariableDecl ========
# ====== isDimensionStatement ========
def isDimensionStatement(line_in):
is_dim_stmnt = False
line = line_in
line_list = line.split()
if (len(line_list) > 1) and (line_list[0].lower() == 'dimension'):
is_dim_stmnt = True
return is_dim_stmnt
# ====== END: isDimensionStatement ========
# ====== getArrayIndicesTuples ========
# Example:
# Input: '-2:10,7,1:2'
# Output: [(-2,7), (1,7), (1,2)]
def getArrayIndicesTuples(dimensions_str, parameter_defs):
indicies_tuples = []
# Check for empty dimensions string
if dimensions_str == '':
return indicies_tuples
# Check for assumed-shape arrays. We can't deal with that yet...
if dimensions_str == ':':
raise RuntimeError
# Loop over comma-separated entries in dimensions_str
for dim_str in dimensions_str.split(','):
if ':' in dim_str:
# start_index, end_index = [int(s) for s in dim_str.split(':')]
start_index_str, end_index_str = [s for s in dim_str.split(':')]
if start_index_str in parameter_defs.keys():
start_index = int( parameter_defs[start_index_str] )
else:
start_index = int(start_index_str)
if end_index_str in parameter_defs.keys():
end_index = int( parameter_defs[end_index_str] )
else:
end_index = int(end_index_str)
else:
start_index = 1
end_index_str = dim_str
if end_index_str in parameter_defs.keys():
end_index = int( parameter_defs[end_index_str] )
else:
end_index = int(end_index_str)
indicies_tuples.append( (start_index,end_index) )
return indicies_tuples
# ====== END: getArrayIndicesTuples ========
# ====== getVariablesDict ========
def getVariablesDict(code_lines, get_variables):
if len(get_variables) == 0:
return OrderedDict()
return_var_dicts = OrderedDict.fromkeys(get_variables, value=None)
implicit_defs = getImplicitDefs(code_lines)
for line in code_lines:
#
# First, make use of all variable type declaration lines
#
is_var_decl, type_name, type_size = isVariableDecl(line, return_type=True)
if is_var_decl:
# Remove type name from beginning of line so that
# only the list of variable names remain.
full_type_name = type_name + '*' + str(type_size)
line_list = line.split()
i = 1
while i <= len(line_list):
if ''.join(line_list[:i]).lower() in full_type_name:
i += 1
continue
else:
break
var_seq = ''.join(line_list[i-1:])
# Parse line to extract info on the different variables
var_dicts = parseVariableSequence(var_seq)
# Append type_name and type_size to var_dicts
for var_name in var_dicts.keys():
# - Add type name
var_dicts[var_name]['type'] = type_name
# - Use the maximum of the sizes specified in the type name and in the variable sequence
# (Normally one of these should be 1 by default.)
var_dicts[var_name]['size'] = max(type_size,var_dicts[var_name]['size'])
# Check for character array type:
if (var_dicts[var_name]['type'] == 'character'):
dim_str = var_dicts[var_name]['dimension']
size = var_dicts[var_name]['size']
if (dim_str == '') and (size > 1):
var_dicts[var_name]['dimension'] = '1:%i' % size
# For requested variables, append the variable dicts to return_var_dicts
for var_name in var_dicts.keys():
if var_name in get_variables:
return_var_dicts[var_name] = var_dicts[var_name]
#
# Then, check all the 'dimension' statements
#
is_dim_stmnt = isDimensionStatement(line)
if is_dim_stmnt:
# Remove whitespace and 'dimension' keyword
line = line.replace(' ','')
line = line.replace('dimension','',1)
# Parse line to extract info on the different variables
dim_var_dicts = parseVariableSequence(line)
# For variables that already exist in return_var_dicts, simply
# update the 'dimension'. For variables that don't exist in
# return_var_dicts, create a new entry based on implicit types.
for var_name in dim_var_dicts.keys():
if var_name in get_variables:
# If info on this variable has not yet been added to return_var_dicts,
# insert a complete dict
if return_var_dicts[var_name] == None:
# Get type from implicit types
first_char = var_name[0]
type_name, type_size = implicit_defs[first_char.lower()]
if type_name == None or type_size == None:
raise RuntimeError("No type declaration (neither explicit nor implicit) was found for variable '%s'." % var_name)
return_var_dicts[var_name] = {
'type' : type_name,
'dimension': dim_var_dicts[var_name]['dimension'],
'size' : type_size
}
# If info on this variable already exists, simply update the 'dimension' entry in the
# correct dict
else:
return_var_dicts[var_name]['dimension'] = dim_var_dicts[var_name]['dimension']
#
# END: Loop over code lines
#
#
# Finally, add any missing variables that have not appeared in explicit type
# declarations or 'dimension' statements
#
for get_var_name in get_variables:
if return_var_dicts[get_var_name] == None:
# Get type from implicit types
first_char = get_var_name[0]
type_name, type_size = implicit_defs[first_char.lower()]
if type_name == None or type_size == None:
raise RuntimeError("No type declaration (neither explicit nor implicit) was found for variable '%s'." % get_var_name)
return_var_dicts[get_var_name] = {
'type' : type_name,
'dimension': '',
'size' : type_size
}
return return_var_dicts
# ====== END: getVariablesDict ========
# ====== parseVariableSequence ========
# Input : "var1*100, var2(1:20)*20, var3"
#
# Output: {
# 'var1': { 'size': 100, 'dimension': '' },
# 'var2': { 'size': 20, 'dimension': '(1:20)' },
# 'var3': { 'size': 1, 'dimension': '' }
# }
def parseVariableSequence(var_seq_str):
result_dict = OrderedDict()
line = var_seq_str
# Remove all whitespace
line = line.replace(' ','')
# Split into separate variables by detecting commas
# (excluding commas inside brackets).
i = 0
bracket_balance = 0
while i < len(line):
char = line[i]
# Keep track of the brackets
if char == '(':
bracket_balance += 1
elif char == ')':
bracket_balance -= 1
# If a comma is found, replace it with a whitespace
if (char == ',') and (bracket_balance == 0):
line = line[:i] + ' ' + line[i+1:]
# Increment index
i += 1
# Split line at whitespaces
var_str_list = line.split()
for var_str in var_str_list:
# Check for dimension bracket and size integer
has_dim_bracket = bool('(' in var_str and ')' in var_str)
has_size_int = bool('*' in var_str)
# Insert whitespace to separate variable name, dimension bracket and size integer
var_str = var_str.replace('(',' ').replace(')',' ').replace('*',' ')
# Split at whitespace
var_str_list = var_str.split()
# Identify name, dimension, size
if has_dim_bracket and has_size_int:
var_name = var_str_list[0]
var_dim_str = var_str_list[1]
var_size = int(var_str_list[2])
elif has_dim_bracket and not has_size_int:
var_name = var_str_list[0]
var_dim_str = var_str_list[1]
var_size = 1
elif has_size_int and not has_dim_bracket:
var_name = var_str_list[0]
var_dim_str = ''
var_size = int(var_str_list[1])
else:
var_name = var_str_list[0]
var_dim_str = ''
var_size = 1
# Append to result_dict
result_dict[var_name] = {'dimension': var_dim_str, 'size': var_size}
return result_dict
# ====== END: parseVariableSequence ========
# ====== getFunctionArgumentNames ========
def getFunctionArgumentNames(code_line):
# Input : "subroutine some_subroutine(arg1,arg2,arg3)"
#
# Output: ["arg1","arg2","arg3"]
arg_names = []
if ('(' not in code_line) or (')' not in code_line):
return arg_names
# Pick out argument sequence
arg_seq_str = code_line.split('(')[-1].split(')')[0]
# Strip away any whitespace
arg_seq_str = ''.join(arg_seq_str.split())
# Construct list
if arg_seq_str != '':
arg_names = arg_seq_str.split(',')
# Return resulting list
return arg_names
# ====== END: getFunctionArgumentNames ========
# ====== getFunctionReturnType ========
def getFunctionReturnType(code_lines):
f_decl_line = code_lines[0]
f_decl_line_list = f_decl_line.split()
f_index = f_decl_line.lower().split().index('function')
# Get function name
f_name = getCodePartName(f_decl_line, 'function')
# Grab content in declaration line preceding the 'function' keyword
# and append the function name to form a regular variable declaration:
f_return_type_line = ' '.join(f_decl_line_list[:f_index] + [f_name])
# If f_return_type_line forms a valid type declaration, use it.
# Otherwise, search the function body for a declaration.
is_decl = isVariableDecl(f_return_type_line)
if is_decl:
result_dict = getVariablesDict([f_return_type_line], [f_name])
return_type_dict = result_dict[f_name]
else:
result_dict = getVariablesDict(code_lines[1:], [f_name])
return_type_dict = result_dict[f_name]
return return_type_dict
# ====== END: getFunctionReturnType ========
# # ====== getFunctionDict ========
# def getFunctionDict(code_lines):
# f_dict = OrderedDict()
# # Get function/subroutine name
# f_dict['name'] = getF
# return f_dict
# # ====== END: getFunctionDict ========
# ====== generateTypeDeclCommonBlock ========
def generateTypeDeclCommonBlock(cb_dict, var_info_dict, parameter_defs):
indent = ' '*4
code = ''
cb_name = cb_dict['name']
cb_type_name = cb_name + '_type'
code += 'struct %s\n' % cb_type_name
code += '{\n'
for var_name, var_dict in var_info_dict.items():
try:
c_type_name = getCTypeName(var_dict, parameter_defs)
except RuntimeError:
print " ERROR: Failed to translate variable '%s' in common block '%s' to C type." % (var_name, cb_name)
raise
code += indent + c_type_name + ' ' + var_name + ';\n'
code += '};\n'
return code
# ====== END: generateTypeDeclCommonBlock ========
# ====== generateFrontendCommonBlock ========
def generateFrontendCommonBlock(cb_dict):
code = ''
cb_name = cb_dict['name']
cb_type_name = cb_name + '_type'
cb_capability_name = cfg.cb_capability_prefix + cb_name + cfg.cb_capability_suffix
cb_mangled_symbol = getMangledSymbolName(cb_name)
code += 'BE_VARIABLE(%s, %s, "%s", "%s")\n' % (cb_name, cb_type_name, cb_mangled_symbol, cb_capability_name)
return code
# ====== END: generateFrontendCommonBlock ========
# ====== generateFrontendFunction ========
def generateFrontendFunction(f_dict, parameter_defs):
code = ''
module_name = f_dict['module']
f_name_short = f_dict['name']
if module_name != '':
f_name = module_name + '_' + f_name_short
else:
f_name = f_name_short
arg_info_dict = f_dict['arg_info']
# Get correct C type for the return type.
# - if function:
if 'return_type_info' in f_dict.keys():
ret_type_info_dict = f_dict['return_type_info']
try:
f_return_type_c = getCTypeName(ret_type_info_dict, parameter_defs)
except RuntimeError:
print " ERROR: Failed to translate the return type of function '%s' to C type." % (f_name)
raise
# - if subroutine:
else:
f_return_type_c = 'void'
# Generate mangled symbol name
f_mangled_symbol = getMangledSymbolName(f_name_short, module=module_name)
# Construct capability name
if (cfg.module_name_in_capability) and (module_name != ''):
f_capability_name = cfg.f_capability_prefix + f_name + cfg.f_capability_suffix
else:
f_capability_name = cfg.f_capability_prefix + f_name_short + cfg.f_capability_suffix
# Construct argument list
arg_bracket = '('
for arg_name, d in arg_info_dict.items():
try:
c_type_name = getCTypeName(d, parameter_defs)
except RuntimeError:
print " ERROR: Failed to translate the argument '%s' in %s '%s' to C type." % (arg_name, f_dict['category'], f_name_short)
raise
arg_bracket += c_type_name + '&, '
arg_bracket = arg_bracket.rstrip(', ')
arg_bracket += ')'
# Generate BE_FUNCTION macro call
code += 'BE_FUNCTION(%s, %s, %s "%s", "%s")\n' % (f_name, f_return_type_c, arg_bracket, f_mangled_symbol, f_capability_name)
return code
# ====== END: generateFrontendFunction ========
# ====== getMangledSymbolName ========
def getMangledSymbolName(identifier, module=''):
if cfg.name_mangling == 'gfortran':
if module != '':
mangled_symbol = '__' + module.lower() + '_MOD_' + identifier.lower()
else:
mangled_symbol = identifier.lower() + '_'
elif cfg.name_mangling == 'ifort':
if module != '':
mangled_symbol = module.lower() + '_MP_' + identifier.lower() + '_'
else:
mangled_symbol = identifier.lower() + '_'
elif cfg.name_mangling == 'g77':
if '_' in identifier:
mangled_symbol = identifier.lower() + '__'
else:
mangled_symbol = identifier.lower() + '_'
else:
raise RuntimeError("cfg.name_mangling must be set to either 'gfortran', 'ifort' or 'g77'.")
return mangled_symbol
# ====== END: getMangledSymbolName ========
# ====== getCTypeName ========
def getCTypeName(var_dict, parameter_defs):
fortran_type_name = var_dict['type']
if (fortran_type_name != 'character') and (var_dict['size'] > 1):
fortran_type_name += '*' + str(var_dict['size'])
c_type_base_name = gb.type_translation_dict[fortran_type_name]
try:
array_indices_tuples = getArrayIndicesTuples(var_dict['dimension'], parameter_defs)
except RuntimeError:
print ' ERROR: Cannot determine the correct size for variable of type %s(%s).' % (fortran_type_name, var_dict['dimension'])
raise
# Is this variable an array?
if (fortran_type_name != 'character') and (len(array_indices_tuples) > 0):
is_array = True
elif (fortran_type_name == 'character') and (len(array_indices_tuples) > 1):
is_array = True
else:
is_array = False
# For arrays, construct a string of comma-separated array indices
if is_array:
all_indices_list = [i for tpl in array_indices_tuples for i in tpl]
all_indices_str = ','.join( map(str,all_indices_list) )
#
# Determine the correct C++ type name
#
# Special treatment for the character type
if (fortran_type_name == 'character') and (var_dict['size'] > 1):
if is_array:
template_bracket = '< %i,%s >' % (var_dict['size'], all_indices_str)
c_type_name = 'FstringArray' + template_bracket
else:
c_type_name = 'Fstring<%i>' % var_dict['size']
# All other types
else:
if is_array:
template_bracket = '< %s,%s >' % (c_type_base_name, all_indices_str)
c_type_name = 'Farray' + template_bracket
else:
c_type_name = c_type_base_name
# Return result
return c_type_name
# ====== END: getCTypeName ========
# ====== addNamespace ========
# Encapsulate code string in a namespace
def addNamespace(code, namespace_name, indent=4):
# Add indentation
code_lines = [' '*indent + line for line in code.splitlines()]
code = '\n'.join(code_lines)
# Add namespace
code = 'namespace ' + namespace_name + '\n' + '{\n' + code + '\n}\n'
return code
# ====== END: addNamespace ========
| python |
import numpy as np
from Bio.SVDSuperimposer import SVDSuperimposer
from sklearn.utils.validation import check_is_fitted
from sklearn.base import TransformerMixin, BaseEstimator
"""
BioPythonの関数をsklearnのモデルのように利用する関数/クラス群。
last update: 21 Jun, 2021
Authors: Keisuke Yanagisawa
"""
__all__ = [
"SuperImposer"
]
class SuperImposer(TransformerMixin, BaseEstimator):
"""
構造重ね合わせを行うBioPythonのクラスを
scikit-learnのインターフェースでwrapしたクラス。
"""
def __init__(self):
pass
def _reset(self):
if hasattr(self, "rot_"):
del self.rot_
del self.tran_
def _superimpose(self, coords, reference_coords):
sup = SVDSuperimposer()
sup.set(reference_coords, coords)
sup.run()
self.rot_, self.tran_ = sup.get_rotran()
def fit(self, coords, reference_coords):
"""
与えられた2つの点群をなるべく重ねるような並行・回転移動を算出します。
与えられた2つの点群はそれぞれ対応関係があることを仮定します。
すなわち、それぞれの0番目の要素同士がなるべく重なるように、
1番目の要素同士がなるべく重なるように…と重ね合わせを行います。
Parameters
----------
coords : list
重ね合わせのために移動させる点群
reference_coords : list
重ね合わせ先の点群
Returns
-------
SuperImposer
fit済みのオブジェクト
"""
self._reset()
self._superimpose(coords, reference_coords)
return self
def transform(self, coords):
"""
fit()で計算された並進・回転に基づいて
与えられた点群を移動させます。
Parameters
----------
coords : list
移動させる点群
"""
check_is_fitted(self)
coords = np.array(coords)
return np.dot(coords, self.rot_) + self.tran_
def inverse_transform(self, coords):
"""
逆方向の移動を行います。
Parameters
----------
coords : list
transform()した後の点群
Returns
-------
np.array
transform()する前の点群座標
"""
coords = np.array(coords)
check_is_fitted(self)
return np.dot(coords - self.tran_, np.linalg.inv(self.rot_))
| python |
"""Build IDE required files from python folder structure from command line.
"""
import argparse
from ideskeleton import build
def main():
"""Build IDE files from python folder structure."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"source_path",
help="path of the folder structure used to generate the IDE skeleton",
type=str)
parser.add_argument(
"-f",
"--force",
help="force overwrite existing solution and project files",
action="store_true")
parser.add_argument(
"-i",
"--ide",
help="choose IDE",
type=str,
choices=["vstudio"])
args = parser.parse_args()
if not args.ide:
args.ide = "vstudio"
build(args.source_path, args.force, args.ide)
main()
| python |
import pytest
from httpx import AsyncClient
from mock import patch
from models.schemas.status import StatusEnum
from resources import strings
pytestmark = pytest.mark.asyncio
@patch("api.routes.health.create_service_bus_status")
@patch("api.routes.health.create_state_store_status")
async def test_health_response_contains_cosmos_status(health_check_cosmos_mock, health_check_service_bus_mock, app,
client: AsyncClient) -> None:
message = ""
health_check_cosmos_mock.return_value = StatusEnum.ok, message
health_check_service_bus_mock.return_value = StatusEnum.ok, message
response = await client.get(app.url_path_for(strings.API_GET_HEALTH_STATUS))
assert {"message": message, "service": strings.COSMOS_DB, "status": strings.OK} in response.json()["services"]
@patch("api.routes.health.create_service_bus_status")
@patch("api.routes.health.create_state_store_status")
async def test_health_response_contains_service_bus_status(health_check_cosmos_mock, health_check_service_bus_mock, app,
client: AsyncClient) -> None:
message = ""
health_check_cosmos_mock.return_value = StatusEnum.ok, message
health_check_service_bus_mock.return_value = StatusEnum.ok, message
response = await client.get(app.url_path_for(strings.API_GET_HEALTH_STATUS))
assert {"message": message, "service": strings.SERVICE_BUS, "status": strings.OK} in response.json()["services"]
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/30 下午12:27
# @Title : 26. 删除排序数组中的重复项
# @Link : https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array/
QUESTION = """
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定数组 nums = [1,1,2],
函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,2,2,3,3,4],
函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
你不需要考虑数组中超出新长度后面的元素。
说明:
为什么返回数值是整数,但输出的答案是数组呢?
请注意,输入数组是以“引用”方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
你可以想象内部操作如下:
// nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝
int len = removeDuplicates(nums);
// 在函数里修改输入数组对于调用者是可见的。
// 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。
for (int i = 0; i < len; i++) {
print(nums[i]);
}
"""
THINKING = """
双指针的思想
a指针指向需要修改的数据的索引
b指针指向遍历数据的索引
a指针从索引1开始更新,因为第一个肯定是要保留的
b指针从0开始遍历,比较后面的数字,如果相等,则a不动,b+1,如果不等则a, b同时+1,且把后面的数字更新到a的所在位置
"""
from typing import List
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if not nums:
return 0
l = len(nums)
result = 1
for i in range(l-1):
if nums[i] != nums[i+1]:
nums[result] = nums[i+1]
result += 1
return result
if __name__ == '__main__':
s = Solution()
nums = [1, 2, 2]
print(s.removeDuplicates(nums))
| python |
__package__ = 'archivebox.core'
import uuid
from django.db import models
from django.utils.functional import cached_property
from ..util import parse_date
from ..index.schema import Link
class Snapshot(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
url = models.URLField(unique=True)
timestamp = models.CharField(max_length=32, unique=True, db_index=True)
title = models.CharField(max_length=128, null=True, blank=True, db_index=True)
tags = models.CharField(max_length=256, null=True, blank=True, db_index=True)
added = models.DateTimeField(auto_now_add=True, db_index=True)
updated = models.DateTimeField(null=True, blank=True, db_index=True)
# bookmarked = models.DateTimeField()
keys = ('url', 'timestamp', 'title', 'tags', 'updated')
def __repr__(self) -> str:
title = self.title or '-'
return f'[{self.timestamp}] {self.url[:64]} ({title[:64]})'
def __str__(self) -> str:
title = self.title or '-'
return f'[{self.timestamp}] {self.url[:64]} ({title[:64]})'
@classmethod
def from_json(cls, info: dict):
info = {k: v for k, v in info.items() if k in cls.keys}
return cls(**info)
def as_json(self, *args) -> dict:
args = args or self.keys
return {
key: getattr(self, key)
for key in args
}
def as_link(self) -> Link:
return Link.from_json(self.as_json())
def as_link_with_details(self) -> Link:
from ..index import load_link_details
return load_link_details(self.as_link())
@cached_property
def bookmarked(self):
return parse_date(self.timestamp)
@cached_property
def is_archived(self):
return self.as_link().is_archived
@cached_property
def num_outputs(self):
return self.as_link().num_outputs
@cached_property
def url_hash(self):
return self.as_link().url_hash
@cached_property
def base_url(self):
return self.as_link().base_url
@cached_property
def link_dir(self):
return self.as_link().link_dir
@cached_property
def archive_path(self):
return self.as_link().archive_path
@cached_property
def archive_size(self):
return self.as_link().archive_size
@cached_property
def history(self):
from ..index import load_link_details
return load_link_details(self.as_link()).history
@cached_property
def latest_title(self):
if ('title' in self.history
and self.history['title']
and (self.history['title'][-1].status == 'succeeded')
and self.history['title'][-1].output.strip()):
return self.history['title'][-1].output.strip()
return None
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# IMPORT STANDARD LIBRARIES
import re
_LINE_ENDER = re.compile(r'(?P<prefix>\s*).+(?::)(?:#.+)?$')
def _get_indent(text):
'''str: Find the indentation of a line of text.'''
return text[:len(text) - len(text.lstrip())]
def _add_indent(text, indent=1):
'''Add another set of indentation to `text`.'''
if '\t' in text:
return text + ('\t' * indent)
# TODO : Get indent number from Vim settings. Not just `' '`
return text + (' ' * indent)
def find_best_indent(lines):
'''Find the next line's indentation.
If the next line is the start of Python block then the indentation is
"current indentation plus one more level of indent" so that value will be
returned instead.
Args:
lines (iter[str]): Some lines of Python source code.
Returns:
str: The found indentation, if any.
'''
for line in lines:
if not line.strip():
continue
indent = _get_indent(line)
needs_more_indentation = _LINE_ENDER.match(line)
if needs_more_indentation:
return _add_indent(indent)
return indent
return ''
| python |
#!/usr/bin/env python3
import csv
import typer
def read_csv(file_name: str):
print(f'FILE NAME {file_name}')
""" Opens a csv file and returns a list with the
contents of the first column (in reality returns a
list of all the rows contained in the file)
Args:
file_name (str): file name and location
Returns:
csv_content (list): list with the contents of the
first column
"""
try:
csv_content = []
with open(file_name) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
csv_content.append(row[0])
return csv_content
except:
print('Unexpected error')
def main(file_name: str = typer.Argument(...)):
""" Program receives the name of a csv file and parses the data and
returns a list of its contents
Args:
file_name (str): file name and location
Returns:
csv_content (list): csv file content stored in a list
"""
print(read_csv(file_name))
if __name__ == '__main__':
typer.run(main) | python |
import sys
import java.lang.Class
import org.python.core.PyReflectedFunction as reflectedfunction
import org.python.core.PyReflectedField as reflectedfield
import java.lang.reflect.Field as Field
import java.lang.reflect.Method as Method
import java.lang.annotation.Annotation as JavaAnnotation
from java.lang import*
from jcompile import*
from org.jynx import JyGateway
from jynx.lib.javaparser import ImportFinder
__all__ = ["jproperty",
"JavaCompiler",
"signature",
"annotation",
"JavaClass",
"JavaClassMaker",
"type_name",
"module_name",
"package_name",
"createJavaAnnotation",
"getAnnotations",
"bean_property"]
javakwds = set(['void','boolean','char','byte','short','int','long','float','double','public',
'public','protected','private','static','abstract','final','native','synchronized',
'transient','volatile','strictfp'])
primtype = {"int": "Integer",
"char": "Character",
"double": "Double",
"byte": "Byte",
"long": "Long",
"short": "Short",
"boolean": "Boolean",
"float": "Float"}
def find_constructors(cls):
for C in cls.mro():
if hasattr(C, "getDeclaredConstructors"):
return C.getDeclaredConstructors()
return []
def package_name(T):
name = T.__module__.replace("$", ".")
return name if not name.startswith("[L") else name[2:]
def isList(T):
if T.__module__.startswith("[L"):
return True
return False
def module_name(T):
name = T.__name__.replace("$", ".")
return (name if not name[-1] == ";" else name[:-1])
def type_name(T):
try:
pkg = package_name(T)
except AttributeError:
pkg = ""
if pkg:
return pkg+"."+module_name(T)
else:
return module_name(T)
class TypeExtractor(object):
modules = {}
blacklist = ["org.python.proxies", "__builtin__"]
def __init__(self):
self.classes = set()
def extract(self, obj, takelast = True):
'''
Extract type info from type data.
'''
if isinstance(obj, type):
if issubclass(obj, java.lang.Object):
name = type_name(obj)
else:
return self
elif isinstance(obj, str):
if " " in obj:
name = obj.split(" ")[-1]
else:
name = obj
else:
raise ValueError("No type or type name")
if "." in name:
k = name.rfind(".")
pre, post = name[:k], name[k+1:]
if name not in self.blacklist and pre not in self.blacklist:
S = self.modules.get(post, set())
if S:
if name not in S:
self.classes.add(pre+".*")
elif takelast:
self.classes.add(name)
else:
self.classes.add(pre)
elif takelast:
self.classes.add(name)
else:
self.classes.add(pre)
S.add(name)
self.modules[post] = S
return self
class jproperty(object):
def __init__(self, type_info, transfer = None, initializer = None, **kwd):
self.type_info = type_info
self.annotation = []
self.initializer = initializer
if transfer:
try:
self.annotation = transfer.java_annotations[:]
transfer.java_annotations = []
except AttributeError:
pass
self._name = ''
def get_name(self, obj):
if not self._name:
for name, item in obj.__class__.__dict__.items():
if item == self:
self._name = name
break
else:
raise AttributeError("Cannot access property value of %s"%self)
return self._name
def __get__(self, obj, objtype = None):
name = self.get_name(obj)
return getattr(obj.javaobj, name)
def __set__(self, obj, value):
name = self.get_name(obj)
setattr(obj.javaobj, name, value)
def find_base_class(cls):
bases = cls.__bases__
if len(bases) == 1 and "org.python.proxies" in bases[0].__module__:
bases = bases[0].__bases__
return bases[0]
class Translator(object):
blacklist = ["org.python.proxies", "__builtin__"]
def __init__(self, cls, **kwd):
self.cls = cls
self.module = sys.modules[cls.__dict__["__module__"]]
self.packages = set()
self.imports = []
self.options = kwd
def get_all_classes(self):
for name, value in self.module.__dict__.items():
if issubclass(type(value), java.lang.Class):
for C in TypeExtractor().extract(value).classes:
self.packages.add("import "+C+";")
def extract_name(self, T):
self.packages.add("import "+package_name(T)+"."+module_name(T)+";")
def extract_package(self, pkg, takelast = True):
if "." in pkg:
k = pkg.rfind(".")
pre, post = pkg[:k], pkg[k+1:]
if pre == "__builtin__":
return ''
if pkg not in self.blacklist and pre not in self.blacklist:
if takelast:
self.packages.add("import "+pkg+";")
else:
self.packages.add("import "+pre+";")
return post
return pkg
def extract_method(self, method, annotations):
try:
D = method.argslist[0].data
data = str(method.argslist[0].data)
except AttributeError:
data = str(method)
D = None
K = data.find("(")
head, args = data[:K], data[K:]
head_splitted = head.split()
if "abstract" in head_splitted:
head_splitted.remove("abstract")
elif "native" in head_splitted:
head_splitted.remove("native")
if len(head_splitted)>2:
funcname = head_splitted[-1]
return_type = head_splitted[-2]
prefix = head_splitted[:-2]
elif head_splitted[0] in ("public", "private", "protected"):
funcname = head_splitted[-1]
prefix = [head_splitted[0]]
return_type = ''
else:
funcname = head_splitted[-1]
return_type = head_splitted[-2]
prefix = ["public"]
prefix = " ".join(prefix)
prefix = prefix.replace("protected", "public")
if D:
RT = D.getReturnType()
return_type = module_name(RT)
self.extract_package(type_name(RT))
funcname = D.getName()
argtypes = [self.extract_package(type_name(T)) for T in D.getParameterTypes()]
n = len(argtypes)
funcargs = [argtypes[i]+" "+"arg"+str(i) for i in range(n)]
callargs = ["arg"+str(i) for i in range(n)]
# extract exceptions
exc_types = []
for ET in D.getExceptionTypes():
self.extract_package(type_name(ET))
exc_types.append(module_name(ET))
if exc_types:
exc = " throws "+",".join(exc_types)+" "
else:
exc = ""
self.extract_package(type_name(D.clazz))
return prefix+" "+return_type, return_type, funcargs, callargs, funcname, argtypes, exc
else:
argtypes = [T.strip() for T in args.strip()[1:-1].split(",") if T]
funcname = self.extract_package(funcname, takelast = False)
return_type = self.extract_package(return_type)
argtypes = [self.extract_package(T) for T in argtypes]
n = len(argtypes)
funcargs = [argtypes[i]+" "+"arg"+str(i) for i in range(n)]
callargs = ["arg"+str(i) for i in range(n)]
return prefix+" "+return_type, return_type, funcargs, callargs, funcname, argtypes, ""
def build_member(self, data, annotations):
prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(data+"()", annotations)
anno = ''
if annotations:
anno = ' '.join(annotations)+" "
return anno+data+";"
def build_method(self, method, annotations, overload):
caller = "jaobject" if "supercall" not in self.options else "super"
prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations)
args = "("+", ".join(funcargs)+")"
prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix
if return_type == "void":
body = "{ %s."%caller+funcname+"("+",".join(callargs)+"); }"
else:
body = "{ return %s."%caller+funcname+"("+",".join(callargs)+"); }"
return " "+prefix+" "+(overload if overload else funcname)+args+exc_type+body+"\n"
def build_jy_method_sig(self, method, name, annotations, overload):
prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations)
funcname = name
args = "("+", ".join(funcargs)+")"
prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix
n = len(callargs)
body = ['']
return_cast = return_type
if return_type in primtype:
return_cast = primtype[return_type]
if n:
body.append("PyObject args[] = new PyObject[%s];"%n)
#body.append("for(int i=0;i<%s;i++) {"%n)
for i in range(n):
body.append("args[%s] = Py.java2py(arg%s);"%(i,i))
#body.append("}")
if return_type == "void":
body.append('jyobject.invoke("'+funcname+'"'+", args);")
else:
body.append('return (%s)jyobject.invoke("'%return_cast+funcname+'"'+', args).__tojava__(%s.class);'%return_type)
else:
if return_type == "void":
body.append('jyobject.invoke("'+funcname+'"'+");")
else:
body.append('return (%s)jyobject.invoke("'%return_cast+funcname+'"'+").__tojava__(%s.class);"%return_type)
return " "+prefix+" "+(overload if overload else funcname)+args+"{" +"\n ".join(body)+"\n }\n"
def build_jy_class_method(self, clsname, method, name, annotations, overload):
prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations)
funcname = name
args = "("+", ".join(funcargs)+")"
prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix
n = len(callargs)
body = ['']
return_cast = return_type
if return_type in primtype:
return_cast = primtype[return_type]
if n:
call = 'JyGateway.callStatic("%s", "%s", args)'%(clsname, funcname)
body.append("PyObject args[] = new PyObject[%s];"%n)
#body.append("for(int i=0;i<%s;i++) {"%n)
for i in range(n):
body.append("args[%s] = Py.java2py(arg%s);"%(i,i))
#body.append("}")
else:
call = 'JyGateway.callStatic("%s", "%s", null)'%(clsname, funcname)
if return_type == "void":
body.append(call+";")
else:
body.append('return (%s)%s.__tojava__(%s.class);'%(return_cast, call, return_type))
return " "+prefix+" "+(overload if overload else funcname)+args+"{" +"\n ".join(body)+"\n }\n"
def build_jy_method(self, method, name, annotations, overload):
prefix, return_type, funcargs, callargs, funcname, types, exc_type = self.extract_method(method, annotations)
funcname = name
prefix = "\n ".join([str(anno) for anno in annotations])+"\n "+prefix
if return_type == "PyObject":
args = "(PyObject[] args)"
if "void" in prefix:
body = "{ "+'jyobject.invoke("'+funcname+'"'+", args); }"
else:
body = "{ return "+'jyobject.invoke("'+funcname+'"'+", args); }"
else:
args = "()"
if "void" in prefix:
body = "{ "+'jyobject.invoke("'+funcname+'"'+"); }"
else:
body = "{ return "+'jyobject.invoke("'+funcname+'"'+"); }"
return " "+prefix+" "+(overload if overload else funcname)+args+body+"\n"
def default_imports(self):
self.imports.append("import org.jynx.JyGateway;")
self.imports.append("import org.jynx.gen.*;")
self.imports.append("import org.python.core.PyObject;")
self.imports.append("import org.python.core.Py;")
def add_package(self, packagename):
self.packages.add("import %s;"%packagename)
def add_jajyobjects(self, base, classdef):
jaanno = self.options.get("jaobject_annotation", "")
if jaanno:
jaanno = " "+jaanno
jyanno = self.options.get("jyobject_annotation", "")
if jyanno:
jyanno = " "+jyanno
classdef.append(" %s private PyObject jyobject;\n"%jyanno)
classdef.append(" %s private "%jaanno+module_name(base)+" jaobject;\n")
def build_ja_constructor(self, method, annotations, jatype, jytype):
prefix, return_type, funcargs, callargs, funcname, partypes, exc_type = self.extract_method(method, annotations)
n = len(partypes)
# print "CONS", method, prefix, funcargs, callargs, funcname, partypes
args = ",".join([partypes[i]+" "+"arg"+str(i) for i in range(n)])
head = prefix+" "+jytype+"("+args+") {"
body = []
arglist = ",".join("arg"+str(i) for i in range(n))
body.append("super("+arglist+")")
if n:
body.append("Object values[] = {%s}"%arglist)
body.append('jyobject = JyGateway.newInstance("%s", this, values)'%jytype)
body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype))
else:
body.append('jyobject = JyGateway.newInstance("%s", this, null)'%jytype)
body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype))
B = ";\n ".join(body)
return " "+head+"\n "+B+";\n }\n"
def build_jy_constructor(self, argcount, jatype, jytype):
if argcount>1:
args = "PyObject[] args"
else:
args = ""
head = "public "+jytype+"("+args+") {"
body = []
if args:
body.append('jyobject = JyGateway.newInstance("%s", this, args)'%jytype)
body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype))
else:
body.append('jyobject = JyGateway.newInstance("%s", this, null)'%jytype)
body.append('jaobject = (%s)jyobject.__tojava__(%s.class)'%(jatype, jatype))
B = ";\n ".join(body)
return " "+head+"\n "+B+";\n }\n"
def build_class(self):
self.get_all_classes()
cls = self.cls
attrs = cls.__dict__
clsname = module_name(cls)
methods = []
members = []
cons = []
base = find_base_class(self.cls)
self.extract_name(base)
anno_imports = set()
try:
for anno in cls.java_annotations:
anno_imports.update(anno.anno_imports)
except AttributeError:
pass
for name, value in cls.__dict__.items():
# print self.packages
#print "---------------------------"
#print name, value
overload = (value.overload if hasattr(value, "overload") else "")
if hasattr(value, "java_annotations"):
annotations = value.java_annotations
else:
annotations = []
for anno in annotations:
anno_imports.update(anno.anno_imports)
if isinstance(value, jproperty):
annos = []
for anno in value.annotation:
annos.append(str(anno))
anno_imports.update(anno.anno_imports)
if value.initializer:
members.append(self.build_member(value.type_info+" "+name+" = "+value.initializer, annos))
else:
members.append(self.build_member(value.type_info+" "+name, annos))
elif name == "plain_methods":
methods+=value
elif name == "mapping_attributes":
continue
elif hasattr(value, "__call__"):
if name == "__init__":
try:
n = value.func_code.co_argcount
c = self.build_jy_constructor(n, module_name(base), module_name(cls))
cons.append(c)
except AttributeError:
pass
continue
elif name in base.__dict__:
methods.append(self.build_method(base.__dict__[name], annotations, overload))
continue
if hasattr(value, "java_signature"):
if "static" in value.java_signature:
setattr(cls, name, classmethod(value))
methods.append(self.build_jy_class_method(module_name(cls),
value.java_signature,
name,
annotations, overload))
else:
methods.append(self.build_jy_method_sig(value.java_signature,
name,
annotations, overload))
else:
methods.append(self.build_jy_method("public PyObject "+name+"()",
name,
annotations, overload))
elif isinstance(value, (classmethod, staticmethod)):
F = getattr(cls, name)
if hasattr(F, "java_annotations"):
annotations = F.java_annotations
else:
annotations = []
if hasattr(F, "java_signature"):
methods.append(self.build_jy_class_method(module_name(cls),
F.java_signature,
name,
annotations, overload))
else:
methods.append(build_jy_class_method(module_name(cls),
"public static PyObject "+name+"()",
name,
annotations, overload))
cons += [self.build_ja_constructor(c, [], module_name(base), module_name(cls)) for c in find_constructors(cls)]
self.imports += ["import "+cl+";" for cl in anno_imports]
self.default_imports()
annotations = ([str(anno) for anno in cls.java_annotations] if hasattr(cls, "java_annotations") else [])
if base.isInterface():
self.extract_name(base)
classdef = self.imports+[""]+annotations+["public class "+module_name(cls)+" implements "+base.__name__+" {"]
else:
classdef = self.imports+[""]+annotations+["public class "+module_name(cls)+" extends "+base.__name__+" {"]
for mem in members:
classdef.append(" "+mem)
self.add_jajyobjects(base, classdef)
for c in cons:
classdef.append(c)
for m in methods:
classdef.append(m)
classdef.append("}")
for pkg in self.options.get("pkg",[]):
self.add_package(pkg)
classcode = "\n".join(list(self.packages)+[""]+classdef)
return classcode
class signature(object):
multimethod = {}
def __init__(self, sig, overload = False):
self.java_signature = sig
self.java_annotations = []
self.overload = overload
@classmethod
def overload_handler(cls, C):
for name in cls.multimethod:
try:
delattr(C, name)
cnt, L = cls.multimethod[name]
for f in L:
setattr(C, f.__name__, f)
except AttributeError:
pass
cls.multimethod = {}
def __call__(self, f):
try:
f.java_signature = self.java_signature
if self.java_annotations:
f.java_annotations = self.java_annotations
if self.overload:
f.overload = f.__name__
except AttributeError:
f.im_func.java_signature = self.java_signature
if self.java_annotations:
f.im_func.java_annotations = self.java_annotations
if self.overload:
f.im_func.overload = f.__name__
if self.overload:
name = f.__name__
cnt, L = signature.multimethod.get(name, (-1, []))
cnt+=1
f.__name__ = f.__name__+"__"+str(cnt)
L.append(f)
signature.multimethod[name] = (cnt, L)
return f
def add_imports(source, packages):
source = source.strip()
if source.startswith("package "):
source.split("\n")
return "\n".join(source[0]+["import "+pkg+";" for pkg in packages]+source[1:])
else:
return "\n".join(["import "+pkg+";" for pkg in packages])+"\n"+source
class annotation_gen(object):
def __init__(self, anno):
self.anno = anno
self.name = module_name(anno)
self.java_signature = None
self.anno_imports = set()
self.fill_imports()
def fill_imports(self):
self.arg_cnt = 0
# print "ANNO", self.anno
for key, value in self.anno.__dict__.items():
if isinstance(value, reflectedfunction):
try:
T = value.argslist[0].data.returnType
self.anno_imports.update(TypeExtractor().extract(T).classes)
self.arg_cnt+=1
except AttributeError:
pass
self.anno_imports.update(TypeExtractor().extract(self.anno).classes)
def has_arguments(self):
return bool(self.arg_cnt)
def getAnnotation(self):
return self.anno
def add_signature(self, anno):
if self.java_signature:
anno.java_signature = self.java_signature
return anno
def new_annotation(self, arg = ''):
return annotation(self.anno, arg)
def create_annotation(self, **kwds):
args = []
add_imports = set()
allowed_kwds = self.anno.__dict__.keys()
for key, value in kwds.items():
if not key in allowed_kwds:
raise TypeError("Unknown keyword argument '%s' for annotation %s"%(key, type_name(self.anno)))
if hasattr(value, "__iter__"):
Value = []
for item in value:
if isinstance(item, (annotation, annotation_gen)):
add_imports.update(item.anno_imports)
Value.append(str(item))
elif isinstance(item, java.lang.Enum):
Value.append(type_name(type(item))+"."+str(item))
elif isinstance(item, str):
Value.append('"'+item+'"')
else:
Value.append(str(item))
value = '{'+','.join(Value)+'}'
elif isinstance(value, basestring):
value = '"'+value+'"'
elif isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, java.lang.Class):
add_imports.add(type_name(value))
value = module_name(value)+".class"
elif not isinstance(value, (int, float, str, annotation)):
try:
T = type(value)
value = package_name(T)+"."+module_name(T)+"."+str(value)
except AttributeError:
pass
args.append("%s = %s"%(key, value))
if args:
anno = self.new_annotation("("+",".join(args)+")")
else:
anno = self.new_annotation()
anno.anno_imports = self.anno_imports | add_imports
# print "ANNO", anno, anno.anno_imports
if self.java_signature:
anno.java_signature = self.java_signature
return anno
def __call__(self, __obj = None, **kwds):
if kwds:
return self.create_annotation(**kwds)
elif __obj:
if isinstance(__obj, signature):
self.java_signature = __obj.java_signature
return self
elif hasattr(__obj, "__call__"):
anno = self.new_annotation()
anno.anno_imports = self.anno_imports
return self.add_signature(anno)(__obj)
else:
kwds["value"] = __obj
return self.create_annotation(**kwds)
else:
anno = self.new_annotation()
anno.anno_imports = self.anno_imports
return self.add_signature(anno)
def __repr__(self):
return "@"+self.name
class annotation(object):
def __init__(self, anno, arg = ''):
'''
:param anno: Java annotation class.
:param arg: additional arguments used to construct the annotation.
'''
self.anno = anno
self.arg = arg
self.sub_annotations = []
self.java_annotations = []
self.java_signature = []
self.anno_imports = set()
def anno_repr(self):
return module_name(self.anno)+self.arg
def getAnnotation(self):
return self.anno
@classmethod
def new_anno_generator(self, anno):
return annotation_gen(anno)
@classmethod
def extract(cls, *jannoclasses):
assert jannoclasses
_annotations = []
for anno in jannoclasses:
annogen = cls.new_anno_generator(anno)
if annogen.has_arguments():
_annotations.append(annogen)
else:
_annotations.append(annogen())
return (_annotations[0] if len(_annotations) == 1 else _annotations)
def __call__(self, obj):
if isinstance(obj, signature):
self.java_signature = obj.java_signature
return self
elif hasattr(obj, "__iter__"):
lst = []
for item in obj:
if isinstance(item, (annotation, annotation_gen)):
self.anno_imports.update(item.anno_imports)
lst.append(obj)
self.sub_annotations = lst
return self
elif isinstance(obj, annotation):
obj.java_annotations+=self.java_annotations+[self]
obj.anno_imports.update(self.anno_imports)
if self.java_signature:
obj.java_signature = self.java_signature
elif hasattr(obj, "java_annotations"):
obj.java_annotations.append(self)
if self.java_signature:
try:
obj.java_signature = self.java_signature
except AttributeError:
obj.im_func.java_signature = self.java_signature
else:
try:
obj.java_annotations = [self]
if self.java_signature:
obj.java_signature = self.java_signature
except AttributeError:
obj.im_func.java_annotations = [self]
if self.java_signature:
obj.im_func.java_signature = self.java_signature
return obj
def __repr__(self):
if self.sub_annotations:
if len(self.sub_annotations) == 1:
return "@"+self.anno_repr()+"("+str(self.sub_annotations)[1:-1]+")"
else:
return "@"+self.anno_repr()+"( {"+str(self.sub_annotations)[1:-1]+"} )"
else:
return "@"+self.anno_repr()
class JavaClassMaker(object):
def __init__(self, store = False, display = False, **options):
self.store = store
self.display = display
self.options = options
self.annotations = []
self.preprocessor = [self.make_bean]
self.postprocessor = []
def make_bean(self, cls):
setattr(cls, "plain_methods", [])
setattr(cls, "mapping_attributes",[])
for key, val in cls.__dict__.items():
if hasattr(val, "bean_property"):
cls.mapping_attributes.append(key)
if isinstance(val.bean_property, str):
T = val.bean_property
else:
T = module_name(val.bean_property)
setattr(cls, key, jproperty("private "+T, val))
Name = key.capitalize()
cls.plain_methods.append(" public %s get%s() { return %s; }"%(T, Name, key))
cls.plain_methods.append(" public void set%s(%s value) { %s = value; }"%(Name, T, key))
return cls
def __call__(self, cls):
signature.overload_handler(cls)
for trans in self.preprocessor:
cls = trans(cls)
for anno in self.annotations:
cls = anno(cls)
source = Translator(cls, **self.options).build_class()
if self.options.get("display_before"):
print source
packages, missing = ImportFinder(cls, source).findPackages()
if packages:
source = add_imports(source, packages)
if self.display:
print source
javacls = JavaCompiler(store=self.store).createClass(module_name(cls), source)
javacls.java_source = source
for trans in self.postprocessor:
trans(cls, javacls)
def newInstance(javaobj, *args):
jyobj = cls(*args)
jyobj.javaobj = javaobj
return jyobj
def callStatic(funcname, *args):
f = getattr(cls,funcname)
return f(*args)
JyGateway.registry[module_name(cls)] = {"newInstance":newInstance, "callStatic":callStatic}
return javacls
def getAnnotations(obj):
'''
Returns list of Java annotations of ``obj``.
'''
if isinstance(obj, reflectedfunction):
return obj.argslist[0].data.getAnnotations()
elif isinstance(obj, java.lang.Class):
return java.lang.Class.getAnnotations(obj)
elif isinstance(obj, reflectedfield):
return Field.getAnnotations(obj.field)
return []
def bean_property(sig):
'''
Decorator used to mark simple functions as Entity Bean properties.
'''
def annotate(f):
setattr(f, "bean_property", sig)
return f
return annotate
def JavaClass(cls=None, **kwd):
if "store" not in kwd:
kwd["store"] = True
if cls:
return JavaClassMaker(**kwd)(cls)
else:
return JavaClassMaker(**kwd)
def WrapperClass(cls=None, **kwd):
if "store" not in kwd:
kwd["store"] = True
kwd["supercall"] = True
return JavaClass(cls, **kwd)
| python |
from ..model.elapsed_time_fractions import ElapsedTimeFractions
def calculate_time_fractions(elapsed_time_ns: int) -> ElapsedTimeFractions:
"""Elapsed time is in nanoseconds and should be calculated as difference between start and stop time using on the time.perf_counter_ns() function."""
microseconds, nanoseconds = divmod(elapsed_time_ns, 1000)
# As divmod() can be slow, let's return 0s as a tuple if divmod() isn't needed:
milliseconds, microseconds = divmod(microseconds, 1000) if microseconds > 0 else (0, 0)
seconds, milliseconds = divmod(milliseconds, 1000) if milliseconds > 0 else (0, 0)
minutes, seconds = divmod(seconds, 60) if seconds > 0 else (0, 0)
hours, minutes = divmod(minutes, 60) if minutes > 0 else (0, 0)
days, hours = divmod(hours, 24) if hours > 0 else (0, 0)
return ElapsedTimeFractions(
nanoseconds=int(nanoseconds),
microseconds=int(microseconds),
milliseconds=int(milliseconds),
seconds=int(seconds),
minutes=int(minutes),
hours=int(hours),
days=int(days))
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.