max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
packaging/pack1/andrew_mod1.py | AndreiHondrari/python_exploration | 3 | 11400 | <reponame>AndreiHondrari/python_exploration<gh_stars>1-10
def something() -> None:
print("Andrew says: `something`.")
| 2.15625 | 2 |
app/api/v2/views/blacklist.py | MaggieChege/STORE-MANAGER-API-V2 | 0 | 11401 | blacklist=set()
def get_blacklist():
return blacklist
def add_to_blacklist(jti):
return blacklist.add(jti)
| 1.789063 | 2 |
apps/tasks/api/views.py | dayvidemerson/django-rest-example | 0 | 11402 | <gh_stars>0
from rest_framework import viewsets
from rest_framework import generics
from ..models import Task
from .serializers import TaskSerializer
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
| 1.71875 | 2 |
jarvis/__init__.py | jduncan8142/JARVIS | 0 | 11403 | __version__ = "0.0.3"
__author__ = "<NAME>"
__support__ = "<EMAIL>"
| 1.039063 | 1 |
src/figcli/test/cli/action.py | figtools/figgy-cli | 36 | 11404 | from typing import Union, List
import pexpect
from figcli.utils.utils import Utils
import sys
class FiggyAction:
"""
Actions prevent cyclic dependencies, and are designed for leveraging FiggyCli for cleanup steps when running inside
of tests.
"""
def __init__(self, command, extra_args=""):
self.c = Utils.default_colors()
self.command = command
self.extra_args = extra_args
self._child = self.spawn(command)
print(f"{self.c.fg_yl}Executing action: {self._child.args}{self.c.rs}")
self._child.logfile = sys.stdout
self._child.delaybeforesend = .5
def spawn(self, command: str):
return pexpect.spawn(command, timeout=10, encoding='utf-8')
def expect_multiple(self, regexes: List[str]):
print(f'Expecting: {regexes}')
return self._child.expect(regexes)
def expect(self, regex: Union[List[str], str], retry=True):
print(f'Expecting: {regex}')
expect_list = [regex] + [pexpect.TIMEOUT] if isinstance(regex, str) else regex + [pexpect.TIMEOUT]
result = self._child.expect(expect_list)
if result == len(expect_list) - 1 and retry:
self.alert(f"EXPECT FAILED: {regex} initiating retry!")
self._child = self.spawn(self.command)
return self.expect(regex, retry=False)
else:
return result
def sendline(self, line: str):
print(f'Sending: {line}')
self._child.sendline(line)
def wait(self):
self._child.wait()
def alert(self, msg: str):
print(f"{self.c.fg_yl}-----------------------------------------{self.c.rs}")
print(f"{self.c.fg_rd} ALERT: {msg}{self.c.rs}")
print(f"{self.c.fg_yl}-----------------------------------------{self.c.rs}") | 2.28125 | 2 |
test/python/test_elementwise_ops.py | avijit-chakroborty/ngraph-bridge | 142 | 11405 | <reponame>avijit-chakroborty/ngraph-bridge
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge elementwise operations test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
class TestElementwiseOperations(NgraphTest):
@pytest.mark.parametrize(("v1", "v2", "expected"),
((1.0, -1.0, [1.0]), (100, 200, ([200],)),
([0.0, 5.0, 10.0], [6.0],
(np.array([[6.0, 6.0, 10.0]]),))))
def test_maximum(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.maximum(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [True]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, False, True]]),))))
def test_less_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [False]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [True]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[True, False, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, False, True]]),))))
def test_less(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.less(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([True],)), (-1.0, 1000, [False]),
(200, 200, ([True],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, True]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[True, True, False]]),))))
def test_greater_equal(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater_equal(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(
("v1", "v2", "expected"),
((1.4, 1.0, [True]), (-1.0, -1.0, ([False],)), (-1.0, 1000, [False]),
(200, 200, ([False],)), ([-1.0, 1.0, -4], [0.1, 0.1, -4],
(np.array([[False, True, False]]),)),
([-1.0, 1.0, -4], [-1.0], (np.array([[False, True, False]]),))))
def test_greater(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.float32, shape=(None))
val2 = tf.compat.v1.placeholder(tf.float32, shape=(None))
out = tf.greater(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("v1", "v2", "expected"),
((True, True, [True]), (True, False, ([False],)),
(1.0, -2.0, ([True],)), (False, 100, ([False],)),
([False, True, False], [True],
(np.array([[False, True, False]]),))))
def test_logical_and(self, v1, v2, expected):
val1 = tf.compat.v1.placeholder(tf.bool, shape=(None))
val2 = tf.compat.v1.placeholder(tf.bool, shape=(None))
out = tf.logical_and(val1, val2)
sess_fn = lambda sess: sess.run((out,),
feed_dict={
val1: (v1,),
val2: (v2,)
})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
@pytest.mark.parametrize(("test_input", "expected"), ((False, True),
(True, False)))
def test_logicalnot_1d(self, test_input, expected):
val = tf.compat.v1.placeholder(tf.bool, shape=(1,))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: (test_input,)})[
0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
def test_logicalnot_2d(self):
test_input = ((True, False, True), (False, True, False))
expected = np.logical_not(test_input)
val = tf.compat.v1.placeholder(tf.bool, shape=(2, 3))
out = tf.logical_not(val)
sess_fn = lambda sess: sess.run((out,), feed_dict={val: test_input})[0]
assert (self.with_ngraph(sess_fn) == self.without_ngraph(sess_fn)).all()
assert (self.with_ngraph(sess_fn) == expected).all()
| 1.742188 | 2 |
qc/slips.py | mfkiwl/UREGA-qc | 0 | 11406 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>.
@mail: <EMAIL>
"""
# from qc.__version__ import __version__
import georinex as gr
import numpy as np
from matplotlib.pyplot import figure, show
import matplotlib.pyplot as plt
obs = gr.load(
'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx',
# tlim=['2021-11-03T12:00', '2021-11-03T12:30'])
tlim=['2021-11-03T05:30', '2021-11-03T07:30'])
# tlim=['2021-11-03T15:00', '2021-11-03T18:00'])
# hdr = gr.rinexheader(
# 'tests/test_data/Rinex3/KLSQ00GRL_R_20213070000_01D_15S_MO.rnx')
# rnx_version = 3
# %% Starting test
# Copying helper functions from Multipath class - later on, it could be turned
# into a separate class with helper functions
# Pick GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# %%
# 5:30 to 7:30, G08 and G21 give 2 cycle slips # [290:300]
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
plt.title('Single-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %%
# Plot all loaded sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
for i in range(0, len(svG)):
test = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label=svG[i], linewidth=2.0)
#ax.plot(L2test.time, L2test, label='L2', linewidth=0.5)
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %%
# Plot separate sats, L1 and L2
ax = figure(figsize=(10, 6)).gca()
test = obs.sel(sv='E21').dropna(dim='time', how='all')
L1test = test['L1C']
L2test = test['L2W']
ax.plot(L1test.time, L1test, label='L1', linewidth=2.0)
ax.plot(L2test.time, L2test, label='L2', linewidth=1.0)
ax.grid()
# ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('Carrier phases')
show()
# %% Dual-frequency Melbourne-Wuebbena testing
# 'G01','G06','G08','G10','G12','G14','G17','G19','G21','G22','G24','G30','G32'
sat = 'G21'
sattest = obs.sel(sv=sat).dropna(dim='time', how='all')
# G02 data vars with no-nan: C1C, D1C, L1C, S1C, C1W, C2W, D2W, L2W, S1W, S2W
freq = [1575.42, 1227.60, 1176.45] # L1, L2, L5 for GPS
f1 = freq[0]*1e6
f2 = freq[1]*1e6
P1 = sattest['C1C']
P2 = sattest['C2W']
L1 = sattest['L1C'] # GPS
L2 = sattest['L2W'] # GPS
# L1 = sattest['L1C'] # Galileo
# L2 = sattest['L8Q'] # Galileo
L6 = (1/(f1-f2))*(f1*L1 - f2*L2) - (1/(f1+f2))*(f1*P1 + f2*P2)
sigma_L6 = np.std(L6)
k = 4 # criterion factor
criterion = k*sigma_L6
slips_nr = 0
L6_diff = []
for i in range(1, len(L6)):
L6_diff.append(np.abs(L6[i] - L6[i-1]))
if (np.abs(L6[i] - L6[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L6_diff, label=sat)
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L6')
plt.title('Dual-frequency Melbourne-Wuebbena')
show()
print('Slips:', slips_nr, ', Slip criterion:', criterion.values)
# %% Work in Progress
class Slips:
"""
Class for cycle slip detection of RINEX files.
Provides options for different detection algorithms.
Parameters
----------
L1 : TYPE
DESCRIPTION.
Returns
-------
L4 : TYPE
DESCRIPTION.
"""
def __init__(self):
pass
def slips_MW_single_freq(self, obs):
"""
Cycle slip detection algorithm 1.
Based on Melbourne-Wuebbena,
but only on carrier phase data (single-frequency)
(from Vaclavovic-Dousa 2016 article)
Parameters
----------
obs : TYPE
DESCRIPTION.
Returns
-------
None.
"""
# Select a list of GPS satellites
svG = []
for i in range(0, len(obs.sv)):
if str(obs.sv[i].values)[0] == 'G':
svG.append(str(obs.sv[i].values))
else:
continue
# Melbourne-Wuebbena parameters (predetermined)
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
# For each tracked satellite
for i in range(0, len(svG)):
current_sat = obs.sel(sv=svG[i]).dropna(dim='time', how='all')
L1 = current_sat['L1C']
L2 = current_sat['L2W']
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for j in range(1, len(L4)):
L4_diff.append(np.abs(L4[j] - L4[j-1]))
if (np.abs(L4[j] - L4[j-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
print('Sat:', svG[i],
', Slips:', slips_nr,
', Slip criterion:', criterion.values)
def plot_slips(self, obs, sat_nr: str):
"""
Plot cycle slips for one satellite vehicle.
Parameters
----------
obs : TYPE
DESCRIPTION.
sat_nr : str
DESCRIPTION.
Returns
-------
None.
"""
sat = obs.sel(sv=sat_nr).dropna(dim='time', how='all')
I_max = 0.4 # Maximal ionospheric delay [m/h]
k = 4 # criterion factor
L1 = sat['L1C']
L2 = sat['L2W']
L4 = np.abs(L1 - L2)
sigma_L4 = np.std(L4)
criterion = k*sigma_L4 + I_max
slips_nr = 0
L4_diff = []
for i in range(1, len(L4)):
L4_diff.append(np.abs(L4[i] - L4[i-1]))
if (np.abs(L4[i] - L4[i-1]) > criterion):
# If satisfied, raise cycle-slip flag
slips_nr = slips_nr + 1
ax = figure(figsize=(10, 6)).gca()
ax.plot(L2.time[1:], L4_diff, label=sat_nr, linewidth=1.0)
# labelfull = 'Slip limit: ', criterion.values
plt.axhline(y=criterion, label='Slip limit', linestyle='-', color='r')
ax.grid()
ax.legend()
plt.xlabel('Time [epochs]')
plt.ylabel('L4')
show()
print('Sat:', sat_nr,
', Slips:', slips_nr,
', Slip criterion:', criterion.values)
# %% Testing first algorithm
sliptest = Slips().slips_MW_single_freq(obs)
# %% Testing plot function
sliptest = Slips().plot_slips(obs, 'G08')
| 2.03125 | 2 |
tests/test_utils.py | jga/goldfinchsong | 0 | 11407 | from collections import OrderedDict
from datetime import datetime, timezone
import unittest
from os.path import join
from tinydb import TinyDB, storages
from goldfinchsong import utils
IMAGE_NAMES = ['goldfinch1.jpg', 'goldfinch2.jpg', 'goldfinch3.jpg',
'goldfinch4.jpg', 'goldfinch5.jpg']
TEST_TEXT1 = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
TEST_TEXT2 = 'This is a test of the goldfinchsong project. Tests ' \
'abbreviations, vowel elision, length checking, and other logic. ' \
'Tests are important!'
class LoadContentTests(unittest.TestCase):
def test_basic_load(self):
image_directory = 'tests/images/'
db = TinyDB(storage=storages.MemoryStorage)
content = utils.load_content(db, image_directory)
full_image_path = content[0]
image_file = full_image_path.replace(image_directory, '')
status_text = content[1]
self.assertTrue(image_file in IMAGE_NAMES)
self.assertEqual(image_file.replace('.jpg', ''), status_text)
def test_storage_in_db(self):
image_directory = 'tests/images/'
# let's load a list of tweets into the db
db = TinyDB(storage=storages.MemoryStorage)
image_names = [
'goldfinch1.jpg',
'goldfinch2.jpg',
'goldfinch3.jpg',
'goldfinch4.jpg'
]
for image_name in image_names:
delivery_timestamp = datetime.now(tz=timezone.utc).isoformat()
tweet = {'image': image_name, 'delivered_on': delivery_timestamp}
db.insert(tweet)
content = utils.load_content(db, image_directory)
self.assertEqual(content[2], 'goldfinch5.jpg')
tweets = db.all()
self.assertEqual(len(tweets), 4, msg=tweets)
class UtilitiesTests(unittest.TestCase):
def test_apply_abbreviations(self):
text_conversions = {
'abbreviations': 'abbr',
'goldfinchsong': 'gf',
'important': 'impt'
}
# exhausts all conversions before reaching limit
new_text1 = utils.apply_abbreviations(TEST_TEXT1, text_conversions)
expected_text1 = 'This is a test of the gf project. This test checks ' \
'abbr, vowel elision, length checking, and other logic. ' \
'Tests are impt!'
self.assertEqual(expected_text1, new_text1)
new_text2 = utils.apply_abbreviations(TEST_TEXT2, text_conversions)
self.assertTrue(len(new_text2) <= 117)
def test_apply_vowel_elision(self):
result_text = utils.apply_vowel_elision(TEST_TEXT1)
expected_text = 'This is a tst of the gldfnchsng prjct. Ths tst chcks ' \
'abbrvtns, vwl elsn, lngth chckng, and othr lgc. Tsts ' \
'are imprtnt!'
self.assertEqual(expected_text, result_text)
def test_assemble_elided_status(self):
complete_words = ['test', 'a', 'is', 'This']
elided_words = ['systm', 'gldfnch', 'the', 'of']
result = utils.assemble_elided_status(complete_words, elided_words)
self.assertEqual('This is a test of the gldfnch systm', result)
def test_chop_words(self):
result_text = utils.chop_words(TEST_TEXT1)
expected_text = 'This is a test of the goldfinchsong project. This test checks ' \
'abbreviations, vowel elision, length checking, and'
self.assertEqual(expected_text, result_text)
def test_is_image(self):
image_files = [
'image.gif',
'image.jpg',
'image.jpeg',
'image.png',
'image.GIF',
'image.JPG',
'image.JPEG',
'image.PNG',
'image.GiF',
'image.JpG',
'image.JpEg',
'image.PnG'
]
for image_file in image_files:
self.assertTrue(utils.is_image_file(image_file))
def test_is_not_image(self):
image_files = [
'image.docx',
'image.pdf',
'image.md',
'image.html',
'image.css',
'image.odt',
'image.sh',
'image.xlsx',
'image.txt',
'image.c',
'image.py',
'image'
]
for image_file in image_files:
self.assertFalse(utils.is_image_file(image_file))
def test_trim_file_extensions(self):
image_files = [
'image.gif',
'image.jpg',
'image.jpeg',
'image.png',
'image.GIF',
'image.JPG',
'image.JPEG',
'image.PNG',
'image.GiF',
'image.JpG',
'image.JpEg',
'image.PnG'
]
for image_file in image_files:
self.assertEqual(utils.trim_file_extension(image_file), 'image')
def test_to_compact_text(self):
text_conversions = {
'abbreviations': 'abbrs',
'goldfinchsong': 'gfnch',
'important': 'importnt'
}
candidate_text1 = utils.to_compact_text(TEST_TEXT1, 100, text_conversions)
expected_text1 = 'Ths is a tst of the gfnch prjct. Ths tst chcks abbrs, ' \
'vwl elsn, lngth chckng, and othr lgc. Tsts are'
self.assertEqual(expected_text1, candidate_text1)
candidate_text2 = utils.to_compact_text(TEST_TEXT1, 50, text_conversions)
expected_text2 = 'Ths is a tst of the gfnch prjct. Ths tst chcks'
self.assertEqual(expected_text2, candidate_text2)
candidate_text3 = utils.to_compact_text(TEST_TEXT1, 20, text_conversions)
expected_text3 = 'Ths is a tst of the'
self.assertEqual(expected_text3, candidate_text3)
def test_extract_status_text(self):
conversion_data = (
('abbreviations', 'abbrs'),
('goldfinchsong', 'gfnch'),
('important', 'importnt'),
)
text_conversions = OrderedDict(conversion_data)
file = 'Some_goldfinchsong_image-file_with_a_very_long_set_of_' \
'characters_and_abbreviations_that_conveys_important_info.png'
candidate_text1 = utils.extract_status_text(file, text_conversions, maximum_length=100,)
expected_text1 = 'Some gfnch image-file with a very long set of characters and abbrs that conveys important info'
self.assertEqual(expected_text1, candidate_text1)
candidate_text2 = utils.extract_status_text(file, text_conversions, maximum_length=70,)
expected_text2 = 'Sme gfnch imge-fle wth a vry lng st of chrctrs and abbrs tht cnvys'
self.assertEqual(expected_text2, candidate_text2)
def test_get_unused_files(self):
available_files = list()
for index in range(1,101):
image_name = 'image{0}.png'.format(index)
available_files.append(image_name)
db = TinyDB(storage=storages.MemoryStorage)
for id in range(1,52):
image_name = 'image{0}.png'.format(id)
db.insert({'image': image_name})
unused_files = utils.get_unused_files(db, available_files)
self.assertEqual(len(unused_files), 49)
self.assertEqual(unused_files[0], 'image52.png')
self.assertEqual(unused_files[5], 'image57.png')
self.assertEqual(unused_files[10], 'image62.png')
self.assertEqual(unused_files[15], 'image67.png')
self.assertEqual(unused_files[20], 'image72.png')
self.assertEqual(unused_files[33], 'image85.png')
self.assertEqual(unused_files[48], 'image100.png')
def test_db_purge_when_all_posted(self):
available_files = list()
for index in range(1,101):
image_name = 'image{0}.png'.format(index)
available_files.append(image_name)
db = TinyDB(storage=storages.MemoryStorage)
for id in range(1,106):
image_name = 'image{0}.png'.format(id)
db.insert({'image': image_name})
self.assertEqual(len(db.all()), 105)
unused_files = utils.get_unused_files(db, available_files)
self.assertEqual(len(unused_files), 100)
self.assertEqual(unused_files[0], 'image1.png')
self.assertEqual(unused_files[5], 'image6.png')
self.assertEqual(unused_files[10], 'image11.png')
self.assertEqual(unused_files[33], 'image34.png')
self.assertEqual(unused_files[50], 'image51.png')
| 2.703125 | 3 |
lingvo/tasks/image/input_generator.py | allenwang28/lingvo | 2,611 | 11408 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input generator for image data."""
import os
import lingvo.compat as tf
from lingvo.core import base_input_generator
from tensorflow.python.ops import io_ops
class _MnistInputBase(base_input_generator.BaseTinyDatasetInput):
"""Base input params for MNIST."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data_dtype = tf.uint8
p.data_shape = (28, 28, 1)
p.label_dtype = tf.uint8
return p
def _Preprocess(self, raw):
data = tf.stack([
tf.image.per_image_standardization(img) for img in tf.unstack(raw)
])
data.set_shape(raw.shape)
return data
class MnistTrainInput(_MnistInputBase):
"""MNist training set."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data = 'x_train'
p.label = 'y_train'
p.num_samples = 60000
p.batch_size = 256
p.repeat = True
return p
class MnistTestInput(_MnistInputBase):
"""MNist test set."""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.data = 'x_test'
p.label = 'y_test'
p.num_samples = 10000
p.batch_size = 256
p.repeat = False
return p
def _GetRandomImages(batch_size):
images = tf.random.uniform((batch_size, 28, 28, 1), 0, 255, tf.int32)
return tf.cast(images, tf.uint8)
def _GetRandomLabels(batch_size):
labels = tf.random.categorical(0.1 * tf.ones((1, 10)), batch_size)
return tf.cast(labels, tf.uint8)
def FakeMnistData(tmpdir, train_size=60000, test_size=10000):
"""Fake Mnist data for unit tests."""
data_path = os.path.join(tmpdir, 'ckpt')
with tf.Graph().as_default():
tf.random.set_seed(91)
with tf.Session() as sess:
sess.run(
io_ops.save_v2(
data_path,
tensor_names=['x_train', 'y_train', 'x_test', 'y_test'],
shape_and_slices=['', '', '', ''],
tensors=[
_GetRandomImages(train_size),
_GetRandomLabels(train_size),
_GetRandomImages(test_size),
_GetRandomLabels(test_size)
]))
return data_path
| 2.359375 | 2 |
src/solutions/part1/q389_find_diff.py | hychrisli/PyAlgorithms | 0 | 11409 | from src.base.solution import Solution
from src.tests.part1.q389_test_find_diff import FindDiffTestCases
class FindDiff(Solution):
def verify_output(self, test_output, output):
return test_output[0] == output[0]
def run_test(self, input):
return self.findTheDifference(input[0], input[1])
def gen_test_cases(self):
return FindDiffTestCases()
def print_output(self, output):
super(FindDiff, self).print_output(output)
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
lkp = dict()
for ch in s:
lkp[ch] = lkp.get(ch, 0) + 1
for ch in t:
lkp[ch] = lkp.get(ch, 0) - 1
if lkp[ch] < 0:
return ch
if __name__ == '__main__':
solution = FindDiff()
solution.run_tests() | 2.859375 | 3 |
mydict.py | zengboming/python | 0 | 11410 | <reponame>zengboming/python
#unit
#mydict.py
class Dict(dict):
def __init__(self,**kw):
super(Dict,self).__init__(**kw)
def __getattr__(self,key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object han no attribute'%s'" %key)
def __setattr__(self,key,value):
self[key]=value
| 2.984375 | 3 |
copy_annotations/conflict.py | abhinav-kumar-thakur/TabularCellTypeClassification | 19 | 11411 | import contextlib
import os
import tempfile
import warnings
from enum import Enum
import mip
class IISFinderAlgorithm(Enum):
DELETION_FILTER = 1
ADDITIVE_ALGORITHM = 2
class SubRelaxationInfeasible(Exception):
pass
class NonRelaxableModel(Exception):
pass
class ConflictFinder:
"""This class groups some IIS (Irreducible Infeasible Set) search algorithms"""
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
def find_iis(
self, method: IISFinderAlgorithm = IISFinderAlgorithm.DELETION_FILTER,
cbc_verbose: bool = False
) -> mip.ConstrList:
"""main method to find an IIS, this method is just a grouping of the other implementations
Args:
model (mip.Model): Infeasible model where to find the IIS
method (str, optional): name of the method to use ["deletion-filter", "additive_algorithm"]. Defaults to 'deletion-filter".
Returns:
mip.ConstrList: IIS constraint list
"""
# assert ,is not because time limit
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
if method == IISFinderAlgorithm.DELETION_FILTER:
return self.deletion_filter()
if method == IISFinderAlgorithm.ADDITIVE_ALGORITHM:
return self.additive_algorithm()
def deletion_filter(self) -> mip.ConstrList:
"""deletion filter algorithm for search an IIS
Args:
model (mip.Model): Infeasible model
Returns:
mip.ConstrList: IIS
"""
# 1. create a model with all constraints but one
aux_model = self.model.copy()
aux_model.objective = 1
aux_model.emphasis = 1 # feasibility
aux_model.preprocess = 1 # -1 automatic, 0 off, 1 on.
print("starting deletion_filter algorithm")
for inc_crt in self.model.constrs:
aux_model_inc_crt = aux_model.constr_by_name(
inc_crt.name
) # find constraint by name
aux_model.remove(aux_model_inc_crt) # temporally remove inc_crt
aux_model.optimize()
status = aux_model.status
# 2. test feasibility, if feasible, return dropped constraint to the set
# 2.1 else removed it permanently
# print('status {}'.format(status))
if status == mip.OptimizationStatus.INFEASIBLE:
# print("removing permanently {}".format(inc_crt.name))
continue
elif status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
aux_model.add_constr(
inc_crt.expr, name=inc_crt.name, priority=inc_crt.priority
)
iis = aux_model.constrs
return iis
def additive_algorithm(self) -> mip.ConstrList:
"""Additive algorithm to find an IIS
Returns:
mip.ConstrList: IIS
"""
# Create some aux models to test feasibility of the set of constraints
aux_model_testing = mip.Model()
for var in self.model.vars:
aux_model_testing.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
# obj= var.obj,
# column=var.column #!! libc++abi.dylib: terminating with uncaught exception of type CoinError
)
aux_model_testing.objective = 1
aux_model_testing.emphasis = 1 # feasibility
aux_model_testing.preprocess = 1 # -1 automatic, 0 off, 1 on.
aux_model_iis = (
aux_model_testing.copy()
) # a second aux model to test feasibility of the incumbent iis
# algorithm start
all_constraints = self.model.constrs
testing_crt_set = mip.ConstrList(model=aux_model_testing) # T
iis = mip.ConstrList(model=aux_model_iis) # I
while True:
for crt in all_constraints:
testing_crt_set.add(crt.expr, name=crt.name)
aux_model_testing.constrs = testing_crt_set
aux_model_testing.optimize()
if aux_model_testing.status == mip.OptimizationStatus.INFEASIBLE:
iis.add(crt.expr, name=crt.name)
aux_model_iis.constrs = iis
aux_model_iis.optimize()
if aux_model_iis.status == mip.OptimizationStatus.INFEASIBLE:
return iis
elif aux_model_iis.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
testing_crt_set = mip.ConstrList(model=aux_model_testing)
for (
crt
) in (
iis
): # basically this loop is for set T=I // aux_model_iis = iis.copy()
testing_crt_set.add(crt.expr, name=crt.name)
break
def deletion_filter_milp_ir_lc_bd(self) -> mip.ConstrList:
"""Integer deletion filter algorithm (milp_ir_lc_bd)
Raises:
NotImplementedError: [description]
Returns:
mip.ConstrList: [description]
"""
raise NotImplementedError("WIP")
# major constraint sets definition
t_aux_model = mip.Model(name="t_auxiliary_model")
iis_aux_model = mip.Model(name="t_auxiliary_model")
linear_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constraints
variable_bound_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constrants related specifically for the variable bounds
integer_varlist_crt = mip.VarList(
model=t_aux_model
) # the nature vars constraints for vartype in Integer/Binary
# fill the above sets with the constraints
for crt in self.model.constrs:
linear_constraints.add(crt.expr, name=crt.name)
for var in self.model.vars:
if var.lb != -mip.INF:
variable_bound_constraints.add(
var >= var.lb, name="{}_lb_crt".format(var.name)
)
if var.ub != mip.INF:
variable_bound_constraints.add(
var <= var.ub, name="{}_ub_crt".format(var.name)
)
for var in self.model.vars:
if var.var_type in (mip.INTEGER, mip.BINARY):
integer_varlist_crt.add(var)
status = "IIS"
# add all LC,BD to the incumbent, T= LC + BD
for (
var
) in (
self.model.vars
): # add all variables as if they where CONTINUOUS and without bonds (because this will be separated)
iis_aux_model.add_var(
name=var.name, lb=-mip.INF, ub=mip.INF, var_type=mip.CONTINUOUS
)
for crt in linear_constraints + variable_bound_constraints:
iis_aux_model.add_constr(crt.expr, name=crt.name, priority=crt.priority)
iis_aux_model.optimize()
if iis_aux_model.status == mip.OptimizationStatus.INFEASIBLE:
# if infeasible means that this is a particular version of an LP
return self.deletion_filter() # (STEP 2)
# add all the integer constraints to the model
iis_aux_model.vars.remove(
[var for var in integer_varlist_crt]
) # remove all integer variables
for var in integer_varlist_crt:
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=var.var_type, # this will add the var with his original type
)
# filter IR constraints that create infeasibility (STEP 1)
for var in integer_varlist_crt:
iis_aux_model.vars.remove(iis_aux_model.var_by_name(var.name))
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS, # relax the integer constraint over var
)
iis_aux_model.optimize()
# if infeasible then update incumbent T = T-{ir_var_crt}
# else continue
# STEP 2 filter lc constraints
# STEP 3 filter BD constraints
# return IS o IIS
def deletion_filter_milp_lc_ir_bd(self) -> mip.ConstrList:
raise NotImplementedError # TODO
class ConflictRelaxer:
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
self.iis_num_iterations = 0
self.iis_iterations = []
self.relax_slack_iterations = []
@property
def slack_by_crt(self) -> dict:
answ = {}
for slack_dict_iter in self.relax_slack_iterations:
for crt_name in slack_dict_iter.keys():
if crt_name in answ.keys():
answ[crt_name] += slack_dict_iter[crt_name]
else:
answ[crt_name] = slack_dict_iter[crt_name]
return answ
def hierarchy_relaxer(
self,
relaxer_objective: str = "min_abs_slack_val",
default_priority: mip.constants.ConstraintPriority = mip.constants.ConstraintPriority.MANDATORY,
cbc_verbose: bool = False
) -> mip.Model:
"""hierarchy relaxer algorithm, it's gonna find a IIS and then relax it using the objective function defined (`relaxer_objective`) and then update the model
with the relaxed constraints. This process runs until there's not more IIS on the model.
Args:
relaxer_objective (str, optional): objective function of the relaxer model (IIS relaxer model). Defaults to 'min_abs_slack_val'.
default_priority (ConstraintPriority, optional): If a constraint does not have a supported substring priority in the name, it will assign a default priority.
Defaults to ConstraintPriority.MANDATORY.
Raises:
NonRelaxableModel: [description]
Returns:
mip.Model: relaxed model
"""
relaxed_model = self.model.copy()
relaxed_model._status = self.model._status # TODO solve this in a different way
# map unmaped constraitns to default
for crt in relaxed_model.constrs:
if not crt.priority:
crt.priority = default_priority
iis_it = 0
iis_dict = {}
taboo_list_iis = []
cf = ConflictFinder(relaxed_model)
while True:
# 1. find iis
iis = cf.find_iis(IISFinderAlgorithm.DELETION_FILTER)
self.iis_iterations.append([crt.name for crt in iis]) # track iteration
self.iis_num_iterations += 1 # track iteration
iis_priority_set = set([crt.priority for crt in iis])
# check if "relaxable" model mapping
if iis_priority_set == set([mip.constants.ConstraintPriority.MANDATORY]):
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
# 2. relax iis
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
for level, relaxing_level in enumerate(sorted(iis_priority_set, key=lambda x: x.value)):
# highest case (raise exception)
if relaxing_level == mip.constants.ConstraintPriority.MANDATORY:
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
try:
slack_dict = self.relax_iis(iis, relaxer_objective=relaxer_objective, lowest_priority=relaxing_level)
except SubRelaxationInfeasible as e:
warnings.warn(f'Warning relaxing more than one level, currently on l{level} : {relaxing_level}')
continue
else:
# relaxable iis, this is will continue with the next iteration then
break
self.relax_slack_iterations.append(slack_dict)
# 3. add the slack variables to the original problem
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model = self.relax_constraints(relaxed_model, slack_dict)
# 4. check if feasible
relaxed_model.emphasis = 1 # feasibility
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model.optimize()
if relaxed_model.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
print("finished relaxation process !")
break
else:
print(
"relaxed the current IIS, still infeasible, searching for a new IIS to relax"
)
print("relaxed constraints {0}".format(list(slack_dict.keys())))
iis_it += 1
# print(f'found iis_{iis_it} = {[crt.name for crt in iis]}')
iis_dict[iis_it] = {}
iis_crt = [crt.name for crt in iis]
iis_dict[iis_it]['iis'] = [{'name': crt.name, 'priority': str(crt.priority).split('.')[1]} for crt in iis]
print(f'found iis_{iis_it} : len = {len(iis_crt)} in_taboo = {(iis_crt in taboo_list_iis)}')
taboo_list_iis.append(iis_crt)
iis_dict[iis_it]['slack'] = slack_dict
return relaxed_model
@classmethod
def relax_iis(
cls, iis: mip.ConstrList, relaxer_objective: str = "min_abs_slack_val", lowest_priority: 'mip.constants.ConstraintPriority' = None
) -> dict:
"""This function is the sub module that finds the optimum relaxation for an IIS, given a crt priority mapping and a objective function
Args:
iis (mip.ConstrList): IIS constraint list
relaxer_objective (str, optional): objective function to use when relaxing. Defaults to 'min_abs_slack_val'.
Returns:
dict: a slack variable dictionary with the value of the {constraint_name:slack.value} pair to be added to each constraint in order to make the IIS feasible
"""
relax_iis_model = mip.Model()
if lowest_priority is None:
lowest_priority = min([crt.priority for crt in iis])
to_relax_crts = [crt for crt in iis if crt.priority == lowest_priority or crt.priority < lowest_priority]
# create a model that only contains the iis
slack_vars = {}
abs_slack_vars = {}
abs_slack_cod_vars = {}
for crt in iis:
# print(crt.name, crt.priority)
for var in crt._Constr__model.vars:
relax_iis_model.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
obj=var.obj,
)
if crt in to_relax_crts:
# if this is a -to be relax- constraint
slack_vars[crt.name] = relax_iis_model.add_var(
name="{0}__{1}".format(crt.name, "slack"),
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS,
)
abs_slack_vars[crt.name] = relax_iis_model.add_var(
name="{0}_abs".format(slack_vars[crt.name].name),
lb=0,
ub=mip.INF,
var_type=mip.CONTINUOUS,
)
# add relaxed constraint to model
relax_expr = crt.expr + slack_vars[crt.name]
relax_iis_model.add_constr(
relax_expr,
name="{}_relaxed".format(crt.name),
)
# add abs(slack) variable encoding constraints
relax_iis_model.add_constr(
abs_slack_vars[crt.name] >= slack_vars[crt.name],
name="{}_positive_min_bound".format(slack_vars[crt.name].name),
)
relax_iis_model.add_constr(
abs_slack_vars[crt.name] >= -slack_vars[crt.name],
name="{}_negative_min_bound".format(slack_vars[crt.name].name),
)
else:
# if not to be relaxed we added directly to the model
relax_iis_model.add_constr(
crt.expr, name="{}_original".format(crt.name), priority=crt.priority
)
# find the min abs value of the slack variables
relax_iis_model.objective = mip.xsum(list(abs_slack_vars.values()))
relax_iis_model.sense = mip.MINIMIZE
relax_iis_model.optimize()
if relax_iis_model.status == mip.OptimizationStatus.INFEASIBLE:
raise SubRelaxationInfeasible("sub relaxation model infeasible, this could mean that in the IIS the mandatory constraints are infeasible sometimes")
slack_dict = {}
for crt in to_relax_crts:
slack_dict[crt.name] = slack_vars[crt.name].x
return slack_dict
@classmethod
def relax_constraints(cls, relaxed_model: mip.Model, slack_dict: dict) -> mip.Model:
"""this method creates a modification of the model `relaxed_model` where all the constraints in the slack_dict are
modified in order to add the slack values to make the IIS disappear
Args:
relaxed_model (mip.Model): model to relax
slack_dict (dict): pairs {constraint_name: slack_var.value}
Returns:
mip.Model: a modification of the original model where all the constraints are modified with the slack values
"""
for crt_name in slack_dict.keys():
crt_original = relaxed_model.constr_by_name(crt_name)
relax_expr = crt_original.expr + slack_dict[crt_name]
relaxed_model.add_constr(
relax_expr, name=crt_original.name, priority=crt_original.priority
)
relaxed_model.remove(crt_original) # remove constraint
return relaxed_model
@contextlib.contextmanager
def ignore_output():
with tempfile.TemporaryFile() as f:
orig_std_out = os.dup(1)
os.dup2(f.fileno(), 1)
yield # pause the coroutine to execute the with code
os.dup2(orig_std_out, 1)
os.close(orig_std_out) | 2.53125 | 3 |
output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 11412 | <filename>output/models/nist_data/atomic/integer/schema_instance/nistschema_sv_iv_atomic_integer_pattern_1_xsd/__init__.py
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_pattern_1_xsd.nistschema_sv_iv_atomic_integer_pattern_1 import NistschemaSvIvAtomicIntegerPattern1
__all__ = [
"NistschemaSvIvAtomicIntegerPattern1",
]
| 1.140625 | 1 |
Dietscheduler/lib/menu_converter.py | floromaer/DietScheduler | 0 | 11413 | import re
import xlsxwriter
def parse_menu_to_excel(filename,menu_dict,days_dict,results,goal_dict,food_database,reversed_ingredient_dict,grocery_dict):
# making a temporary dict to map dates and columns in excel:
temp_dates_dict = {}
i=0
for key in days_dict.keys():
temp_dates_dict[days_dict[key]['date_var'].get()] = i
i += 1
temp_meals_dict = {}
i = 0
for meal in ['Breakfast', 'Lunch','Dinner']:
temp_meals_dict[meal] = i
i += 1
# converting the menu-dict to dates and lunches
for item in list(menu_dict.keys()):
new_key = tuple(tuple(elem.replace('M1', 'Breakfast').replace('M2', 'Lunch').replace('M3', 'Dinner').replace('D1', days_dict['D1']['date_var'].get()).replace('D2',days_dict['D2']['date_var'].get()).replace('D3',days_dict['D3']['date_var'].get()).replace('D4',days_dict['D4']['date_var'].get()).replace('D5',days_dict['D5']['date_var'].get()).replace('D6',days_dict['D6']['date_var'].get()).replace('D7',days_dict['D7']['date_var'].get())
for elem in tup) for tup in item)
menu_dict[new_key] = menu_dict[item]
menu_dict.pop(item)
# putting it into an excel file:
workbook = xlsxwriter.Workbook(filename)
separator_format = workbook.add_format({'bg_color': '#000000'})
# make worksheets
menu_worksheet = workbook.add_worksheet(f"Menu - {days_dict['D1']['date_var'].get()} to {days_dict['D7']['date_var'].get()}") # for menu
temp_worksheet_dict = {}
global_groceries_worksheet = workbook.add_worksheet("your grocery list")
for group in list(menu_dict.keys()):
temp_worksheet_dict[group] = workbook.add_worksheet(f"{list(menu_dict[group].keys())[0][:31]}")
# print the menu to menu-sheet
col = 0
for key in temp_dates_dict:
menu_worksheet.write(0, col, key)
col += 1
row = 1
for key in temp_meals_dict:
menu_worksheet.write(row, 0, key)
row += 1
for group in menu_dict.keys():
for slot in group:
menu_worksheet.write(temp_meals_dict[slot[1]] + 1,temp_dates_dict[slot[0]] + 1, str(list(menu_dict[group].keys())[0]))
for i in range(0,8):
menu_worksheet.write(4,i,"",separator_format)
menu_worksheet.write(5,0, "Results:")
row = 5
for metric in results.keys():
menu_worksheet.write(row,1,str(f"{metric}: {round(results[metric],2)}"))
row += 1
menu_worksheet.write(5,2, "Goals:")
row = 6
for metric in goal_dict.keys():
menu_worksheet.write(row,3,str(f"{metric}: {round(goal_dict[metric],2)}"))
row += 1
# writing the global grocery-list:
row = 1
col = 0
global_groceries_worksheet.write(0,0,"Your grocery list:")
for ingredient in grocery_dict.keys():
ingredient_id = reversed_ingredient_dict[ingredient]
global_groceries_worksheet.write(row, col, ingredient)
global_groceries_worksheet.write(row, col + 1, str(grocery_dict[ingredient]))
global_groceries_worksheet.write(row, col + 2, str(food_database['ingredients'][ingredient_id]['unit']))
row += 1
# writing the recipe-lists:
for group in menu_dict.keys():
temp_worksheet_dict[group].write(0,0, f"Ingredient list for {list(menu_dict[group].keys())[0]}:")
row = 1
col = 0
for recipe in menu_dict[group].keys():
for ingredient in menu_dict[group][recipe].keys():
ingredient_id = reversed_ingredient_dict[ingredient]
temp_worksheet_dict[group].write(row, col, ingredient)
temp_worksheet_dict[group].write(row, col + 1, str(menu_dict[group][recipe][ingredient]))
temp_worksheet_dict[group].write(row, col + 2, str(food_database['ingredients'][ingredient_id]['unit']))
row += 1
workbook.close()
| 3.265625 | 3 |
example_problems/tutorial/graph_connectivity/services/esempi/check_one_sol_server.py | romeorizzi/TAlight | 3 | 11414 | <gh_stars>1-10
#!/usr/bin/env python3
from sys import stderr, exit
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from parentheses_lib import recognize
# METADATA OF THIS TAL_SERVICE:
problem="parentheses"
service="check_one_sol_server"
args_list = [
('input_formula',str),
('n',str),
('silent',bool),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
n=ENV['n']
len_input = len(ENV["input_formula"])//2
if not ENV["silent"]:
TAc.print(LANG.opening_msg, "green")
def answer():
if recognize(ENV["input_formula"], TAc, LANG) and not ENV["silent"]:
TAc.OK()
TAc.print(LANG.render_feedback("ok", f' Your string is a well-formed formula with {len_input} pairs of parentheses.'), "yellow", ["bold"])
if n=='free':
answer()
else:
if len_input==int(n):
answer()
elif recognize(ENV["input_formula"], TAc, LANG) and not ENV['silent']:
TAc.print(LANG.render_feedback("different_lengths", f"No! Your string represents a valid formula of parentheses but not of {n} pairs."), "red", ["bold"])
exit(0)
| 2.890625 | 3 |
app/validation/translator.py | codingedward/book-a-meal-api | 0 | 11415 | <gh_stars>0
"""Translates validation error messages for the response"""
messages = {
'accepted': 'The :field: must be accepted.',
'after': 'The :field: must be a date after :other:.',
'alpha': 'The :field: may contain only letters.',
'alpha_dash': 'The :field: may only contain letters, numbers, and dashes.',
'alpha_num': 'The :field: may contain only letters and numbers.',
'array': 'The :field: must be an array.',
'before': 'The :field: must be a date before :other:.',
'between': 'The :field: must be between :least: and :most:.',
'between_string': 'The :field: must be between :least: and :most: characters.',
'between_numeric': 'The :field: must be between :least: and :most:.',
'boolean': 'The :field: must be either true or false.',
'confirmed': 'The :field: confirmation does not match.',
'date': 'The :field: is not a valid date.',
'different': 'The :field: and :other: must be different.',
'digits': 'The :field: must be :length: digits.',
'email': 'The :field: must be a valid email address.',
'exists': 'The selected :field: is invalid.',
'found_in': 'The selected :field: is invalid.',
'integer': 'The :field: must be an integer.',
'json': 'The :field: must be valid json format.',
'most_string': 'The :field: must not be greater than :most: characters.',
'most_numeric': 'The :field: must not be greater than :most:.',
'least_string': 'The :field: must be at least :least: characters.',
'least_numeric': 'The :field: must be at least :least:.',
'not_in': 'The selected :field: is invalid.',
'numeric': 'The :field: must be a number.',
'positive': 'The :field: must be a positive number.',
'regex': 'The :field: format is invalid.',
'required': 'The :field: field is required.',
'required_with': 'The :field: field is required when :other: is present.',
'required_without': 'The :field: field is required when :other: si not present.',
'same': 'The :field: and :other: must match.',
'size_string': 'The :field: must be :size: characters.',
'size_numeric': 'The :field: must be :size:.',
'string': 'The :field: must be a string.',
'unique': 'The :field: is already taken.',
'url': 'The :field: format is invalid.',
}
def trans(rule, fields):
message = messages[rule]
for k, v in fields.items():
message = message.replace(k, v).replace('_', ' ')
return message
| 2.5 | 2 |
chat/main/consumers.py | mlambir/channels_talk_pyconar2016 | 12 | 11416 | from channels import Group
# websocket.connect
def ws_add(message):
Group("chat").add(message.reply_channel)
# websocket.receive
def ws_message(message):
Group("chat").send({
"text": message.content['text'],
})
# websocket.disconnect
def ws_disconnect(message):
Group("chat").discard(message.reply_channel) | 2.515625 | 3 |
env/enviroment.py | Dorebom/robot_pybullet | 0 | 11417 | from copy import deepcopy
import numpy as np
import pybullet as p
import gym
from gym import spaces
from env.robot import Manipulator
from env.work import Work
class Env():
def __init__(self, reward,
step_max_pos = 0.002,
step_max_orn = 0.02,
initial_pos_noise = 0.001,
initial_orn_noise = 0.001,
step_pos_noise = 0.0002,
step_orn_noise = 0.0002):
p.connect(p.GUI)
p.setPhysicsEngineParameter(enableFileCaching=0)
p.setRealTimeSimulation(False)
p.setGravity(0, 0, -9.8)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.setPhysicsEngineParameter(contactBreakingThreshold=0.001)
# Init
self._is_init_env = False
# Plane
self.plane_pos = [0, 0, -0.1]
p.loadURDF("urdf/plane/plane.urdf", self.plane_pos)
self.reward = reward
self.max_initial_pos_noise = initial_pos_noise
self.max_initial_orn_noise = initial_orn_noise
self.max_step_pos_noise = step_pos_noise
self.max_step_orn_noise = step_orn_noise
# robot
self.step_max_pos = step_max_pos
self.step_max_orn = step_max_orn
self.inv_scaled_force_coef = 5000
# for learning
self.action_space = spaces.Box(
low=-1,
high=1,
shape=(6,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=-1,
high=1,
shape=(12,),
dtype=np.float32
)
self._act_rel_tcp_pose = [0, 0, 0, 0, 0, 0]
def init_env(self, mode = 'rel',
robot_tcp_pose = [0, 0, 0, 0, 0, 0],
robot_base_pose = [0, 0, 0, 0, 0, 0],
robot_tool_pose = [0, 0, 0, 0, 0, 0],
work_base_pose = [0, 0, 0, 0, 0, 0]):
if self._is_init_env == False:
# Load work
self.work = Work(base_pose = work_base_pose)
self.act_abs_work_pose = work_base_pose
# Load robot
self.robot = Manipulator(tool_pose=robot_tool_pose, base_pose=robot_base_pose)
self._reset_robot_pose(mode=mode, tcp_pose=robot_tcp_pose)
self.initial_pos_noise = np.random.uniform(-self.max_initial_pos_noise,
self.max_initial_pos_noise, 3)
self.initial_orn_noise = np.random.uniform(-self.max_initial_orn_noise,
self.max_initial_orn_noise, 3)
self._is_init_env = True
return self.observe_state(mode = mode)
def _reset_robot_pose(self, mode='rel', tcp_pose=[0, 0, 0, 0, 0, 0]):
abs_tcp_pose = np.zeros(6)
if mode == 'rel':
abs_tcp_pose = np.array(self.act_abs_work_pose) + np.array(tcp_pose)
elif mode == 'abs':
abs_tcp_pose = tcp_pose
else:
print("ERROR(enviroment.py): mode is not correct.")
abs_tcp_pose = [0, 0, 0, 0, 0, 0]
self.robot.reset_pose(abs_tcp_pose=abs_tcp_pose)
def reset(self,
mode = 'rel',
tcp_pose = [0, 0, 0, 0, 0, 0],
base_pose = [0, 0, 0, 0, 0, 0],
tool_pose = [0, 0, 0, 0, 0, 0],
work_pose = [0, 0, 0, 0, 0, 0]):
if self._is_init_env == False:
return self.init_env(mode = mode,
robot_tcp_pose = tcp_pose,
robot_base_pose = base_pose,
robot_tool_pose = tool_pose,
work_base_pose = work_pose)
# For 処理の高速化
'''
if np.linalg.norm( np.array(tool_pose) - self.prev_tool_pose ) < 1e-6:
else:
'''
# Reset env
p.resetSimulation()
# Load Plane
p.loadURDF("urdf/plane/plane.urdf", self.plane_pos)
# Reset work
self.work.reset(base_pose = work_pose)
# Reset Robot
self.robot.reset_base(base_pose=base_pose, tool_pose=tool_pose)
self._reset_robot_pose(mode='rel', tcp_pose=tcp_pose)
self.initial_pos_noise = np.random.uniform(-self.max_initial_pos_noise,
self.max_initial_pos_noise, 3)
self.initial_orn_noise = np.random.uniform(-self.max_initial_orn_noise,
self.max_initial_orn_noise, 3)
self.prev_tool_pose = tool_pose
return self.observe_state(mode = mode)
def destory(self):
p.disconnect()
def step(self, action, step):
# ここは指令値生成なので,真値が良い
cmd_abs_tcp_pose = np.zeros(6)
cmd_abs_tcp_pose[:3] = np.array(self._act_abs_tcp_pose[:3]) + np.array(action[:3])
cmd_abs_tcp_pose[3:6] = np.array(self._act_abs_tcp_pose[3:6]) + np.array(action[3:6])
print('next_pose:', cmd_abs_tcp_pose)
self.robot.move_to_pose(cmd_abs_tcp_pose, mode='direct')
pose, force, success, out_range = self.decision()
r = self.calc_reward(relative_pose = pose,
success = success,
out_range = out_range,
act_step = step)
done = success or out_range
return np.concatenate([pose, force]), r, done, success
def decision(self):
'''
observe
act_abs_tcp_pose
act_rel_tcp_pose
act_abs_work_pose
act_force
'''
act_pose_noisy, act_force = self.observe_state(mode='rel')
scaled_act_force = act_force / self.inv_scaled_force_coef
# [Note] ここは真値で評価
success_range_of_pos = 0.003
success_range_of_orn = 0.04
success = (np.linalg.norm(self._act_rel_tcp_pose[:3]) <= success_range_of_pos and \
np.linalg.norm(self._act_rel_tcp_pose[3:]) <= success_range_of_orn)
# [Note] ここは真値で評価は正しくない気がする.
out_range_of_pos = 0.1
out_range_of_orn = 0.8
out_range = any([abs(pos) > out_range_of_pos for pos in act_pose_noisy[:3]]) \
or any([abs(orn) > out_range_of_orn for orn in act_pose_noisy[3:6]])
return act_pose_noisy, scaled_act_force, success, out_range
def observe_state(self, mode='rel'):
self._act_abs_tcp_pose, self.act_force, _ = self.robot.get_state()
self._act_abs_work_pose = self.work.get_state()
self._act_rel_tcp_pose = np.array(self._act_abs_tcp_pose) - np.array(self._act_abs_work_pose)
'''
ノイズ処理
'''
act_rel_tcp_pose_noisy = np.zeros(6)
act_rel_tcp_pose_noisy[:3] = self._act_rel_tcp_pose[:3] + self.initial_pos_noise
act_rel_tcp_pose_noisy[3:6] = self._act_rel_tcp_pose[3:6] + self.initial_orn_noise
act_rel_tcp_pose_noisy[:3] += np.random.uniform(-self.max_step_pos_noise,
self.max_step_pos_noise, 3)
act_rel_tcp_pose_noisy[3:6] += np.random.uniform(-self.max_step_orn_noise,
self.max_step_orn_noise, 3)
if mode == 'rel':
return act_rel_tcp_pose_noisy, self.act_force
elif mode == 'abs':
act_abs_tcp_pose_noisy = np.zeros(6)
act_abs_tcp_pose_noisy[:3] = self._act_abs_tcp_pose[:3] + self.initial_pos_noise
act_abs_tcp_pose_noisy[3:6] = self._act_abs_tcp_pose[3:6] + self.initial_orn_noise
act_abs_work_pose_noisy = np.zeros(6)
act_abs_work_pose_noisy[:3] = self._act_abs_work_pose[:3] + self.initial_pos_noise
act_abs_work_pose_noisy[3:6] = self._act_abs_work_pose[3:6] + self.initial_orn_noise
return act_abs_tcp_pose_noisy, act_abs_work_pose_noisy, self.act_force
def calc_reward(self, relative_pose, success, out_range, act_step):
return self.reward.reward_function(relative_pose, success, out_range, act_step)
def scale_action(self, action):
scaled_action = deepcopy(action)
scaled_action[:3]*=self.step_max_pos
scaled_action[3:]*=self.step_max_orn
return scaled_action | 2.484375 | 2 |
Thread/Threading.py | zxg110/PythonGrammer | 0 | 11418 | import _thread
import time
import threading
#
# def print_time(threadName,delay):
# count = 0;
# while count < 5:
# time.sleep(delay)
# count += 1;
# print("%s: %s" % (threadName, time.ctime(time.time())))
#
# try:
# _thread.start_new(print_time,("Thread-1",2,))
# _thread.start_new(print_time("Thread-2",4))
# except:
# print("error")
#
# while 1:
# pass
# Python3 通过两个标准库 _thread 和 threading 提供对线程的支持。
# _thread 提供了低级别的、原始的线程以及一个简单的锁,它相比于 threading 模块的功能还是比较有限的。
# threading 模块除了包含 _thread 模块中的所有方法外,还提供的其他方法:
# threading.currentThread(): 返回当前的线程变量。
# threading.enumerate(): 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。
# threading.activeCount(): 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。
# 除了使用方法外,线程模块同样提供了Thread类来处理线程,Thread类提供了以下方法:
# run(): 用以表示线程活动的方法。
# start():启动线程活动。
# join([time]): 主线程中,创建了子线程B,并且在主线程A中调用了B.join(),那么,主线程A会在调用的地方等待,
# 直到子线程B完成操作后,才接着往下执行。参数time代表线程运行最大时间,即如果超过这个时间,不管这个此线程有
# 没有执行完毕都会被回收,然后主线程或函数都会接着执行的。
# isAlive(): 返回线程是否活动的。
# getName(): 返回线程名。
# setName(): 设置线程名。
exitFlag = 0
class MyThread(threading.Thread):
def __init__(self,threadID,name,counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
print ("开始线程:" + self.name)
print_time(self.name, 2,self.counter)
print ("退出线程:" + self.name)
def print_time(threadName, delay, counter):
while counter:
# if exitFlag:
# threadName.exit()
time.sleep(delay)
print("%s: %s" % (threadName, time.ctime(time.time())))
counter -= 1
# 创建新线程
thread1 = MyThread(1, "Thread-1", 5)
thread2 = MyThread(2, "Thread-2", 5)
# 开启新线程
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print ("退出主线程")
| 3.734375 | 4 |
server/petsAPI/views.py | StoyanDimStoyanov/ReactDJango | 0 | 11419 | <reponame>StoyanDimStoyanov/ReactDJango
from django.shortcuts import render
from rest_framework import generics
# Create your views here.
from petsAPI.models import Pets
from petsAPI.serializers import PetSerializer
def index(req):
return render(req, 'index.html')
class PetsListApiView(generics.ListCreateAPIView):
queryset = Pets.objects.all()
serializer_class = PetSerializer
class PetDetailsApiView(generics.RetrieveUpdateDestroyAPIView):
queryset = Pets.objects.all()
serializer_class = PetSerializer | 2.015625 | 2 |
cluster_faces.py | sandhyalaxmiK/faces_clustering | 0 | 11420 | <filename>cluster_faces.py
import face_recognition
import sys,os
import re,cv2
def sorted_alphanumeric(data):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(data, key=alphanum_key)
input_dir_path=sys.argv[1]
output_dir_path=sys.argv[2]
if not os.path.exists(output_dir_path):
os.mkdir(output_dir_path)
if not os.path.exists(output_dir_path+'/'+str(1)):
os.mkdir(output_dir_path+'/'+str(1))
input_images=sorted_alphanumeric(os.listdir(input_dir_path))
cv2.imwrite(output_dir_path+'/'+str(1)+'/'+input_images[0],cv2.imread(input_dir_path+'/'+input_images[0]))
if not os.path.exists(output_dir_path+'/back_imgs'):
os.mkdir(output_dir_path+'/back_imgs')
if not os.path.exists(output_dir_path+'/error'):
os.mkdir(output_dir_path+'/error')
for img_path in input_images[1:]:
try:
prev_similarity=0
img=face_recognition.load_image_file(input_dir_path+'/'+img_path)
img_encoding=face_recognition.face_encodings(img)
if img_encoding==[]:
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite(output_dir_path+'/back_imgs/'+img_path,img)
continue
img_encoding=face_recognition.face_encodings(img)[0]
imgs_dirs=sorted_alphanumeric(os.listdir(output_dir_path))
imgs_dirs=list(set(imgs_dirs)-set(['error','back_imgs']))
for img_dir in imgs_dirs:
check_img=face_recognition.load_image_file(output_dir_path+'/'+img_dir+'/'+sorted_alphanumeric(os.listdir(output_dir_path+'/'+img_dir))[0])
check_img_encoding=face_recognition.face_encodings(check_img)[0]
similarity=1-face_recognition.compare_faces([img_encoding], check_img_encoding)
if similarity>prev_similarity:
prev_similarity=similarity
result_dir=img_dir
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
if prev_similarity<0.6:
new_dir=str(len(os.listdir(output_dir_path))+1)
os.mkdir(output_dir_path+'/'+new_dir)
cv2.imwrite(output_dir_path+'/'+new_dir+'/'+img_path,img)
else:
cv2.imwrite(output_dir_path+'/'+result_dir+'/'+img_path,img)
except:
img=cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
cv2.imwrite(output_dir_path+'/error/'+img_path,img)
| 2.703125 | 3 |
src/bbdata/endpoint/output/objects.py | big-building-data/bbdata-python | 0 | 11421 | import requests
from bbdata.config import output_api_url
from bbdata.util import handle_response
class Objects:
base_path = "/objects"
auth = None
def __init__(self, auth):
self.auth = auth
def get_all(self, tags=None, search=None, page=None, per_page=None,
writable=False):
"""
Get the list of accessible objects.
GET /objects
https://bbdata.daplab.ch/api/#objects_get
"""
params = {
"tags": tags,
"search": search,
"page": page,
"perPage": per_page,
"writable": writable,
}
url = output_api_url + self.base_path
r = requests.get(url, params, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def put(self, name, unit_symbol, owner, description=None):
"""
Create a new object.
PUT /objects
https://bbdata.daplab.ch/api/#objects_put
"""
json = {
"name": name,
"description": description,
"unitSymbol": unit_symbol,
'owner': owner
}
url = output_api_url + self.base_path
r = requests.put(url, json=json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def get(self, object_id):
"""
Get an object.
GET /objects/{objectIs}
https://bbdata.daplab.ch/api/#objects__objectid__get
"""
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.get(url, headers=self.auth.headers)
# return ObjectResponse(r.json())
return handle_response(r.status_code, r.json())
def post(self, object_id, data):
"""
Edit the name and/or the description of the object.
Only the properties appearing in the body will be modified.
POST /objects/{objectId}
https://bbdata.daplab.ch/api/#objects__objectid__post
"""
# TODO The data to send isn't define in the API Docs
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.post(url, data, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def delete(self, object_id):
"""
Delete the object with the given id
POST /objects/{objectId}
https://bbdata.daplab.ch/api/#objects__objectid__delete
"""
# TODO This method is in the Postman profile but isn't in the docs
url = output_api_url + self.base_path + "/" + str(object_id)
r = requests.delete(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def post_disable(self, object_id):
"""
Disable this object. All associated tokens will be removed.
POST /objects/{objectId}/disable
https://bbdata.daplab.ch/api/#objects__objectid__disable_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/disable"
r = requests.post(url, headers=self.auth.headers)
return handle_response(r.status_code, True)
def post_enable(self, object_id):
"""
Enable this object.
POST /objects/{objectId}/enable
https://bbdata.daplab.ch/api/#objects__objectid__enable_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/enable"
r = requests.post(url, headers=self.auth.headers)
return handle_response(r.status_code, True)
def get_tokens(self, object_id, description=None):
"""
Get the list of tokens for the object. A token is used to submit new
measures (see input-api).
An optional description can be passed in the
body (max 65 characters).
GET /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_get
"""
# TODO The API docs says it's possible to pass an optional description
# but it looks like it's a mistake for a GET request...
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
json = {
"description": description
}
r = requests.get(url, json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def put_tokens(self, object_id):
"""
Generate a new secured token.
PUT /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_put
"""
# TODO The optional description should probably be added in this
# method
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
r = requests.put(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def post_tokens(self, object_id, description):
"""
Edit the token's description.
POST /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_post
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
json = {
"description": description
}
r = requests.post(url, json=json, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
def delete_tokens(self, object_id, token_id):
"""
Revoke a token.
DELETE /objects/{objectId}/tokens
https://bbdata.daplab.ch/api/#objects__objectid__tokens_delete
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tokens"
params = {
"tokenId": token_id
}
r = requests.delete(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def put_tags(self, object_id, tags):
"""
Add tags to the object.
PUT /objects/{objectId}/tags
https://bbdata.daplab.ch/api/#objects__objectid__tags_put
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tags"
params = {
"tags": tags
}
r = requests.put(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def delete_tags(self, object_id, tags):
"""
Remove tags.
DELETE /objects/{objectId}/tags
https://bbdata.daplab.ch/api/#objects__objectid__tags_delete
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/tags"
params = {
"tags": tags
}
r = requests.put(url, params=params, headers=self.auth.headers)
return handle_response(r.status_code, True)
def get_comments(self, object_id):
"""
Get all comments attached to this object. Use the /comments endpoint
for more actions.
GET /objects/{objectId}/comments
https://bbdata.daplab.ch/api/#objects__objectid__comments_get
"""
url = output_api_url + self.base_path + "/" + str(object_id) \
+ "/comments"
r = requests.get(url, headers=self.auth.headers)
return handle_response(r.status_code, r.json())
| 2.78125 | 3 |
python/tako/client/__init__.py | vyomkeshj/tako | 0 | 11422 | <gh_stars>0
from .exception import TakoException, TaskFailed # noqa
from .session import connect # noqa
| 1.109375 | 1 |
helpers/parser.py | yasahi-hpc/AMRNet | 0 | 11423 | <gh_stars>0
import argparse
def parse():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-data_dir', \
action='store', \
nargs='?', \
const=None, \
default='./dataset', \
type=str, \
choices=None, \
help='directory of datasets', \
metavar=None)
parser.add_argument('--device', \
action='store', \
nargs='?', \
const=None, \
default='cuda', \
type=str, \
choices=None, \
help='Device to run the model', \
metavar=None)
parser.add_argument('--dim', \
action='store', \
nargs='?', \
const=None, \
default=2, \
type=int, \
choices=None, \
help='Dimension of the model', \
metavar=None)
parser.add_argument('--batch_size', \
action='store', \
nargs='?', \
const=None, \
default=25, \
type=int, \
choices=None, \
help='Batch size', \
metavar=None)
parser.add_argument('--n_epochs', \
action='store', \
nargs='?', \
const=None, \
default=3, \
type=int, \
choices=None, \
help='Number of epochs', \
metavar=None)
parser.add_argument('--run_number', \
action='store', \
nargs='?', \
const=None, \
default=0, \
type=int, \
choices=None, \
help='Run number', \
metavar=None)
parser.add_argument('--padding_mode', \
action='store', \
nargs='?', \
const=None, \
default='reflect', \
type=str, \
choices=None, \
help='Padding type (default: reflect)', \
metavar=None)
parser.add_argument('--preprocess_type', \
action='store', \
nargs='?', \
const=None, \
default='normalization', \
type=str, \
choices=None, \
help='Preprocess type (default: normalization)', \
metavar=None)
parser.add_argument('--model_name', \
action='store', \
nargs='?', \
const=None, \
default='AMR_Net', \
type=str, \
choices=None, \
help='Name of the model (default: AMR_Net)', \
metavar=None)
parser.add_argument('--lr', \
action='store', \
nargs='?', \
const=None, \
default=0.0002, \
type=float, \
choices=None, \
help='Learning rate', \
metavar=None)
parser.add_argument('--beta_1', \
action='store', \
nargs='?', \
const=None, \
default=0.9, \
type=float, \
choices=None, \
help='beta_1 for Adam', \
metavar=None)
parser.add_argument('--beta_2', \
action='store', \
nargs='?', \
const=None, \
default=0.999, \
type=float, \
choices=None, \
help='beta_2 for Adam', \
metavar=None)
# Used for inference
parser.add_argument('--inference_mode', \
action='store_true', \
default=False, \
help='train or inference')
parser.add_argument('-state_file_dir', \
action='store', \
nargs='?', \
const=None, \
default='./', \
type=str, \
choices=None, \
help='directory storing torch state files', \
metavar=None)
parser.add_argument('--load_nth_state_file', \
action='store', \
nargs='?', \
const=None, \
default=0, \
type=int, \
choices=None, \
help='nth state file to load', \
metavar=None)
args = parser.parse_args()
return args
| 2.296875 | 2 |
api/patients/urls.py | Wellheor1/l2 | 10 | 11424 | <filename>api/patients/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('search-card', views.patients_search_card),
path('search-individual', views.patients_search_individual),
path('search-l2-card', views.patients_search_l2_card),
path('create-l2-individual-from-card', views.create_l2_individual_from_card),
path('card/<int:card_id>', views.patients_get_card_data),
path('card/save', views.patients_card_save),
path('card/archive', views.patients_card_archive),
path('card/unarchive', views.patients_card_unarchive),
path('individuals/search', views.individual_search),
path('individuals/sex', views.get_sex_by_param),
path('individuals/edit-doc', views.edit_doc),
path('individuals/edit-agent', views.edit_agent),
path('individuals/update-cdu', views.update_cdu),
path('individuals/update-wia', views.update_wia),
path('individuals/sync-rmis', views.sync_rmis),
path('individuals/sync-tfoms', views.sync_tfoms),
path('individuals/load-anamnesis', views.load_anamnesis),
path('individuals/load-dreg', views.load_dreg),
path('individuals/load-screening', views.load_screening),
path('individuals/load-vaccine', views.load_vaccine),
path('individuals/load-ambulatory-data', views.load_ambulatory_data),
path('individuals/load-benefit', views.load_benefit),
path('individuals/load-dreg-detail', views.load_dreg_detail),
path('individuals/load-vaccine-detail', views.load_vaccine_detail),
path('individuals/load-ambulatorydata-detail', views.load_ambulatory_data_detail),
path('individuals/load-ambulatory-history', views.load_ambulatory_history),
path('individuals/load-benefit-detail', views.load_benefit_detail),
path('individuals/save-dreg', views.save_dreg),
path('individuals/save-plan-dreg', views.update_dispensary_reg_plans),
path('individuals/save-vaccine', views.save_vaccine),
path('individuals/save-ambulatory-data', views.save_ambulatory_data),
path('individuals/save-benefit', views.save_benefit),
path('individuals/save-anamnesis', views.save_anamnesis),
path('is-card', views.is_l2_card),
path('save-screening-plan', views.update_screening_reg_plan),
]
| 1.742188 | 2 |
resolwe_bio/kb/migrations/0002_alter_field_max_length.py | JureZmrzlikar/resolwe-bio | 0 | 11425 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-15 07:06
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resolwe_bio_kb', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='feature',
name='aliases',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=256), blank=True, default=[], size=None),
),
migrations.AlterField(
model_name='feature',
name='name',
field=models.CharField(max_length=1024),
),
migrations.AlterField(
model_name='feature',
name='sub_type',
field=models.CharField(choices=[(b'protein-coding', b'Protein-coding'), (b'pseudo', b'Pseudo'), (b'rRNA', b'rRNA'), (b'ncRNA', b'ncRNA'), (b'snRNA', b'snRNA'), (b'snoRNA', b'snoRNA'), (b'tRNA', b'tRNA'), (b'asRNA', b'asRNA'), (b'other', b'Other'), (b'unknown', b'Unknown')], max_length=20),
),
]
| 1.734375 | 2 |
setup.py | conan-hdk/xlwings | 0 | 11426 | <reponame>conan-hdk/xlwings<gh_stars>0
import os
import sys
import re
import glob
from setuptools import setup, find_packages
# long_description: Take from README file
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
readme = f.read()
# Version Number
with open(os.path.join(os.path.dirname(__file__), 'xlwings', '__init__.py')) as f:
version = re.compile(r".*__version__ = '(.*?)'", re.S).match(f.read()).group(1)
# Dependencies
if sys.platform.startswith('win'):
if sys.version_info[:2] >= (3, 7):
pywin32 = 'pywin32 >= 224'
else:
pywin32 = 'pywin32'
install_requires = [pywin32]
# This places dlls next to python.exe for standard setup and in the parent folder for virtualenv
data_files = [('', glob.glob('xlwings*.dll'))]
elif sys.platform.startswith('darwin'):
install_requires = ['psutil >= 2.0.0', 'appscript >= 1.0.1']
data_files = [(os.path.expanduser("~") + '/Library/Application Scripts/com.microsoft.Excel', [f'xlwings/xlwings-{version}.applescript'])]
else:
if os.environ.get('READTHEDOCS', None) == 'True' or os.environ.get('INSTALL_ON_LINUX') == '1':
data_files = []
install_requires = []
else:
raise OSError("xlwings requires an installation of Excel and therefore only works on Windows and macOS. To enable the installation on Linux nevertheless, do: export INSTALL_ON_LINUX=1; pip install xlwings")
extras_require = {
'pro': ['cryptography', 'Jinja2', 'pdfrw'],
'all': ['cryptography', 'Jinja2', 'pandas', 'matplotlib', 'plotly', 'flask', 'requests', 'pdfrw']
}
setup(
name='xlwings',
version=version,
url='https://www.xlwings.org',
license='BSD 3-clause',
author='Zoomer Analytics LLC',
author_email='<EMAIL>',
description='Make Excel fly: Interact with Excel from Python and vice versa.',
long_description=readme,
data_files=data_files,
packages=find_packages(exclude=('tests', 'tests.*',)),
package_data={'xlwings': ['xlwings.bas', 'Dictionary.cls', '*.xlsm', '*.xlam', '*.applescript', 'addin/xlwings.xlam', 'addin/xlwings_unprotected.xlam']},
keywords=['xls', 'excel', 'spreadsheet', 'workbook', 'vba', 'macro'],
install_requires=install_requires,
extras_require=extras_require,
entry_points={'console_scripts': ['xlwings=xlwings.cli:main'],},
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Office/Business :: Financial :: Spreadsheet',
'License :: OSI Approved :: BSD License'],
platforms=['Windows', 'Mac OS X'],
python_requires='>=3.6',
)
| 2.015625 | 2 |
secedgar/tests/test_cli.py | abbadata/sec-edgar | 0 | 11427 | import pytest
from click.testing import CliRunner
from secedgar.cli import daily, filing
from secedgar.utils.exceptions import FilingTypeError
def run_cli_command(cli, user_input, directory, catch_exceptions=False):
runner = CliRunner()
user_input = user_input + " --directory {}".format(directory)
return runner.invoke(cli, user_input, catch_exceptions=catch_exceptions)
def check_bad_inputs(cli, user_input, expected_exception, directory):
# SystemExit does not raise exception by runner
if expected_exception is SystemExit:
result = run_cli_command(cli, user_input, directory)
assert result.exit_code != 0
else:
with pytest.raises(expected_exception):
run_cli_command(cli, user_input, directory)
class TestCLIFiling:
@pytest.mark.parametrize(
"user_input,expected_exception",
[
("-l aapl msft Facebook", SystemExit), # missing filing type
("-l aapl -t null", FilingTypeError), # unrecognized filing type
("-l aapl -t FILING_10Q -n abc", SystemExit), # count is not int
("-l aapl -t FILING_10Q -n 0", ValueError) # no filings available if 0 picked
]
)
def test_filing_bad_inputs(self, user_input, expected_exception, tmp_data_directory):
check_bad_inputs(filing, user_input, expected_exception, tmp_data_directory)
@pytest.mark.parametrize(
"user_input",
[
"-l aapl msft fb FILING_10Q",
"-l aapl msft fb FILING_10Q -n 10",
"-l aapl msft fb FILING_10Q -n 1"
]
)
def test_multiple_companies_input(self, user_input, tmp_data_directory):
pass
class TestCLIDaily:
@pytest.mark.parametrize(
"user_input,expected_exception",
[
("", SystemExit),
("-d 2020", ValueError)
]
)
def test_daily_bad_inputs(self, user_input, expected_exception, tmp_data_directory):
check_bad_inputs(daily, user_input, expected_exception, tmp_data_directory)
| 2.328125 | 2 |
hydro.py | garethcmurphy/hydrosolve | 0 | 11428 | import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
nstep=200
nx=400
nv=3
u=np.zeros((nx,nv))
prim=np.zeros((nx,nv))
gam=5./3.
dx=1./nx
dt=1e-3
time=0
x=np.linspace(0,1,num=nx)
def ptou(pri):
u=np.zeros((nx,nv))
rho=pri[:,0]
v=pri[:,1]
prs=pri[:,2]
mom=rho*v
u[:,0]=rho
u[:,1]=mom
u[:,2]=0.5*mom*v+prs/(gam-1)
return(u)
def utop(u):
pri=np.zeros((nx,nv))
rho=u[:,0]
mom=u[:,1]
ene=u[:,2]
vel=mom/(rho+1e-6)
pri[:,0]=rho
pri[:,1]=vel
pri[:,2]=(ene-0.5*mom*vel)*(gam-1)
return(pri)
def getmaxv(pri):
rho=pri[:,0]
vel=pri[:,1]
prs=pri[:,2]
cs=np.sqrt(gam*prs/rho)
return(max(abs(vel)+cs))
def getflux(u):
f=np.zeros((nx,nv))
pri=utop(u)
rho=pri[:,0]
v=pri[:,1]
prs=pri[:,2]
mom=u[:,1]
ene=u[:,2]
f[:,0]=mom
f[:,1]=mom*v+prs
f[:,2]=(ene+prs)*v
return(f)
prim[:,0]=1.
prim[:,1]=0.
prim[:,2]=1.
for i in range(int(nx/2),nx):
prim[i,0]=0.1
prim[i,1]=0.
prim[i,2]=0.125
print (prim[:,2])
u=ptou(prim)
uold=u
pold=prim
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(x,prim[:,0],'pres')
ax2.plot(x,prim[:,1],'pres')
ax3.plot(x,prim[:,2],'pres')
fig.show()
for nstep in range(0,nstep):
print (time)
um=np.roll(u, 1,axis=0)
up=np.roll(u,-1,axis=0)
um[0,:] =um[1,:]
up[nx-1,:]=up[nx-2,:]
fm=getflux(um)
fp=getflux(up)
cfl=0.49
dtdx=1./getmaxv(p)
dt=dtdx*dx
time=time+dt
un=0.5*(um+up) - cfl*dtdx* (fp-fm)
u=un
p=utop(u)
plt.close(fig)
fig = plt.figure()
gs = gridspec.GridSpec(nv,1)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax3 = fig.add_subplot(gs[2,0])
ax1.plot(p[:,0])
ax2.plot(p[:,1])
ax3.plot(p[:,2])
fig.show()
| 2.46875 | 2 |
tests/test_pluralize.py | weixu365/pluralizer-py | 4 | 11429 |
import unittest
from pluralizer import Pluralizer
import re
# Standard singular/plural matches.
#
# @type {Array}
BASIC_TESTS = [
# Uncountables.
['firmware', 'firmware'],
['fish', 'fish'],
['media', 'media'],
['moose', 'moose'],
['police', 'police'],
['sheep', 'sheep'],
['series', 'series'],
['agenda', 'agenda'],
['news', 'news'],
['reindeer', 'reindeer'],
['starfish', 'starfish'],
['smallpox', 'smallpox'],
['tennis', 'tennis'],
['chickenpox', 'chickenpox'],
['shambles', 'shambles'],
['garbage', 'garbage'],
['you', 'you'],
['wildlife', 'wildlife'],
['Staff', 'Staff'],
['STAFF', 'STAFF'],
['turquois', 'turquois'],
['carnivorous', 'carnivorous'],
['only', 'only'],
['aircraft', 'aircraft'],
# Latin.
['veniam', 'veniam'],
# Pluralization.
['this', 'these'],
['that', 'those'],
['is', 'are'],
['man', 'men'],
['superman', 'supermen'],
['ox', 'oxen'],
['bus', 'buses'],
['airbus', 'airbuses'],
['railbus', 'railbuses'],
['wife', 'wives'],
['guest', 'guests'],
['thing', 'things'],
['mess', 'messes'],
['guess', 'guesses'],
['person', 'people'],
['meteor', 'meteors'],
['chateau', 'chateaus'],
['lap', 'laps'],
['cough', 'coughs'],
['death', 'deaths'],
['coach', 'coaches'],
['boy', 'boys'],
['toy', 'toys'],
['guy', 'guys'],
['girl', 'girls'],
['chair', 'chairs'],
['toe', 'toes'],
['tiptoe', 'tiptoes'],
['tomato', 'tomatoes'],
['potato', 'potatoes'],
['tornado', 'tornadoes'],
['torpedo', 'torpedoes'],
['hero', 'heroes'],
['superhero', 'superheroes'],
['volcano', 'volcanoes'],
['canto', 'cantos'],
['hetero', 'heteros'],
['photo', 'photos'],
['portico', 'porticos'],
['quarto', 'quartos'],
['kimono', 'kimonos'],
['albino', 'albinos'],
['cherry', 'cherries'],
['piano', 'pianos'],
['pro', 'pros'],
['combo', 'combos'],
['turbo', 'turbos'],
['bar', 'bars'],
['crowbar', 'crowbars'],
['van', 'vans'],
['tobacco', 'tobaccos'],
['afficionado', 'afficionados'],
['monkey', 'monkeys'],
['neutrino', 'neutrinos'],
['rhino', 'rhinos'],
['steno', 'stenos'],
['latino', 'latinos'],
['casino', 'casinos'],
['avocado', 'avocados'],
['commando', 'commandos'],
['tuxedo', 'tuxedos'],
['speedo', 'speedos'],
['dingo', 'dingoes'],
['echo', 'echoes'],
['nacho', 'nachos'],
['motto', 'mottos'],
['psycho', 'psychos'],
['poncho', 'ponchos'],
['pass', 'passes'],
['ghetto', 'ghettos'],
['mango', 'mangos'],
['lady', 'ladies'],
['bath', 'baths'],
['professional', 'professionals'],
['dwarf', 'dwarves'], # Proper spelling is "dwarfs".
['encyclopedia', 'encyclopedias'],
['louse', 'lice'],
['roof', 'roofs'],
['woman', 'women'],
['formula', 'formulas'],
['polyhedron', 'polyhedra'],
['index', 'indices'], # Maybe "indexes".
['matrix', 'matrices'],
['vertex', 'vertices'],
['axe', 'axes'], # Could also be plural of "ax".
['pickaxe', 'pickaxes'],
['crisis', 'crises'],
['criterion', 'criteria'],
['phenomenon', 'phenomena'],
['addendum', 'addenda'],
['datum', 'data'],
['forum', 'forums'],
['millennium', 'millennia'],
['alumnus', 'alumni'],
['medium', 'mediums'],
['census', 'censuses'],
['genus', 'genera'],
['dogma', 'dogmata'],
['life', 'lives'],
['hive', 'hives'],
['kiss', 'kisses'],
['dish', 'dishes'],
['human', 'humans'],
['knife', 'knives'],
['phase', 'phases'],
['judge', 'judges'],
['class', 'classes'],
['witch', 'witches'],
['church', 'churches'],
['massage', 'massages'],
['prospectus', 'prospectuses'],
['syllabus', 'syllabi'],
['viscus', 'viscera'],
['cactus', 'cacti'],
['hippopotamus', 'hippopotamuses'],
['octopus', 'octopuses'],
['platypus', 'platypuses'],
['kangaroo', 'kangaroos'],
['atlas', 'atlases'],
['stigma', 'stigmata'],
['schema', 'schemata'],
['phenomenon', 'phenomena'],
['diagnosis', 'diagnoses'],
['mongoose', 'mongooses'],
['mouse', 'mice'],
['liturgist', 'liturgists'],
['box', 'boxes'],
['gas', 'gases'],
['self', 'selves'],
['chief', 'chiefs'],
['quiz', 'quizzes'],
['child', 'children'],
['shelf', 'shelves'],
['fizz', 'fizzes'],
['tooth', 'teeth'],
['thief', 'thieves'],
['day', 'days'],
['loaf', 'loaves'],
['fix', 'fixes'],
['spy', 'spies'],
['vertebra', 'vertebrae'],
['clock', 'clocks'],
['lap', 'laps'],
['cuff', 'cuffs'],
['leaf', 'leaves'],
['calf', 'calves'],
['moth', 'moths'],
['mouth', 'mouths'],
['house', 'houses'],
['proof', 'proofs'],
['hoof', 'hooves'],
['elf', 'elves'],
['turf', 'turfs'],
['craft', 'crafts'],
['die', 'dice'],
['penny', 'pennies'],
['campus', 'campuses'],
['virus', 'viri'],
['iris', 'irises'],
['bureau', 'bureaus'],
['kiwi', 'kiwis'],
['wiki', 'wikis'],
['igloo', 'igloos'],
['ninja', 'ninjas'],
['pizza', 'pizzas'],
['kayak', 'kayaks'],
['canoe', 'canoes'],
['tiding', 'tidings'],
['pea', 'peas'],
['drive', 'drives'],
['nose', 'noses'],
['movie', 'movies'],
['status', 'statuses'],
['alias', 'aliases'],
['memorandum', 'memorandums'],
['language', 'languages'],
['plural', 'plurals'],
['word', 'words'],
['multiple', 'multiples'],
['reward', 'rewards'],
['sandwich', 'sandwiches'],
['subway', 'subways'],
['direction', 'directions'],
['land', 'lands'],
['row', 'rows'],
['grow', 'grows'],
['flow', 'flows'],
['rose', 'roses'],
['raise', 'raises'],
['friend', 'friends'],
['follower', 'followers'],
['male', 'males'],
['nail', 'nails'],
['sex', 'sexes'],
['tape', 'tapes'],
['ruler', 'rulers'],
['king', 'kings'],
['queen', 'queens'],
['zero', 'zeros'],
['quest', 'quests'],
['goose', 'geese'],
['foot', 'feet'],
['ex', 'exes'],
['reflex', 'reflexes'],
['heat', 'heats'],
['train', 'trains'],
['test', 'tests'],
['pie', 'pies'],
['fly', 'flies'],
['eye', 'eyes'],
['lie', 'lies'],
['node', 'nodes'],
['trade', 'trades'],
['chinese', 'chinese'],
['please', 'pleases'],
['japanese', 'japanese'],
['regex', 'regexes'],
['license', 'licenses'],
['zebra', 'zebras'],
['general', 'generals'],
['corps', 'corps'],
['pliers', 'pliers'],
['flyer', 'flyers'],
['scissors', 'scissors'],
['fireman', 'firemen'],
['chirp', 'chirps'],
['harp', 'harps'],
['corpse', 'corpses'],
['dye', 'dyes'],
['move', 'moves'],
['zombie', 'zombies'],
['variety', 'varieties'],
['talkie', 'talkies'],
['walkie-talkie', 'walkie-talkies'],
['groupie', 'groupies'],
['goonie', 'goonies'],
['lassie', 'lassies'],
['genie', 'genies'],
['foodie', 'foodies'],
['faerie', 'faeries'],
['collie', 'collies'],
['obloquy', 'obloquies'],
['looey', 'looies'],
['osprey', 'ospreys'],
['cover', 'covers'],
['tie', 'ties'],
['groove', 'grooves'],
['bee', 'bees'],
['ave', 'aves'],
['wave', 'waves'],
['wolf', 'wolves'],
['airwave', 'airwaves'],
['archive', 'archives'],
['arch', 'arches'],
['dive', 'dives'],
['aftershave', 'aftershaves'],
['cave', 'caves'],
['grave', 'graves'],
['gift', 'gifts'],
['nerve', 'nerves'],
['nerd', 'nerds'],
['carve', 'carves'],
['rave', 'raves'],
['scarf', 'scarves'],
['sale', 'sales'],
['sail', 'sails'],
['swerve', 'swerves'],
['love', 'loves'],
['dove', 'doves'],
['glove', 'gloves'],
['wharf', 'wharves'],
['valve', 'valves'],
['werewolf', 'werewolves'],
['view', 'views'],
['emu', 'emus'],
['menu', 'menus'],
['wax', 'waxes'],
['fax', 'faxes'],
['nut', 'nuts'],
['crust', 'crusts'],
['lemma', 'lemmata'],
['anathema', 'anathemata'],
['analysis', 'analyses'],
['locus', 'loci'],
['uterus', 'uteri'],
['curriculum', 'curricula'],
['quorum', 'quora'],
['genius', 'geniuses'],
['flower', 'flowers'],
['crash', 'crashes'],
['soul', 'souls'],
['career', 'careers'],
['planet', 'planets'],
['son', 'sons'],
['sun', 'suns'],
['drink', 'drinks'],
['diploma', 'diplomas'],
['dilemma', 'dilemmas'],
['grandma', 'grandmas'],
['no', 'nos'],
['yes', 'yeses'],
['employ', 'employs'],
['employee', 'employees'],
['history', 'histories'],
['story', 'stories'],
['purchase', 'purchases'],
['order', 'orders'],
['key', 'keys'],
['bomb', 'bombs'],
['city', 'cities'],
['sanity', 'sanities'],
['ability', 'abilities'],
['activity', 'activities'],
['cutie', 'cuties'],
['validation', 'validations'],
['floaty', 'floaties'],
['nicety', 'niceties'],
['goalie', 'goalies'],
['crawly', 'crawlies'],
['duty', 'duties'],
['scrutiny', 'scrutinies'],
['deputy', 'deputies'],
['beauty', 'beauties'],
['bank', 'banks'],
['family', 'families'],
['tally', 'tallies'],
['ally', 'allies'],
['alley', 'alleys'],
['valley', 'valleys'],
['medley', 'medleys'],
['melody', 'melodies'],
['trolly', 'trollies'],
['thunk', 'thunks'],
['koala', 'koalas'],
['special', 'specials'],
['book', 'books'],
['knob', 'knobs'],
['crab', 'crabs'],
['plough', 'ploughs'],
['high', 'highs'],
['low', 'lows'],
['hiccup', 'hiccups'],
['bonus', 'bonuses'],
['circus', 'circuses'],
['abacus', 'abacuses'],
['phobia', 'phobias'],
['case', 'cases'],
['lace', 'laces'],
['trace', 'traces'],
['mage', 'mages'],
['lotus', 'lotuses'],
['motorbus', 'motorbuses'],
['cutlas', 'cutlases'],
['tequila', 'tequilas'],
['liar', 'liars'],
['delta', 'deltas'],
['visa', 'visas'],
['flea', 'fleas'],
['favela', 'favelas'],
['cobra', 'cobras'],
['finish', 'finishes'],
['gorilla', 'gorillas'],
['mass', 'masses'],
['face', 'faces'],
['rabbit', 'rabbits'],
['adventure', 'adventures'],
['breeze', 'breezes'],
['brew', 'brews'],
['canopy', 'canopies'],
['copy', 'copies'],
['spy', 'spies'],
['cave', 'caves'],
['charge', 'charges'],
['cinema', 'cinemas'],
['coffee', 'coffees'],
['favourite', 'favourites'],
['themself', 'themselves'],
['country', 'countries'],
['issue', 'issues'],
['authority', 'authorities'],
['force', 'forces'],
['objective', 'objectives'],
['present', 'presents'],
['industry', 'industries'],
['believe', 'believes'],
['century', 'centuries'],
['category', 'categories'],
['eve', 'eves'],
['fee', 'fees'],
['gene', 'genes'],
['try', 'tries'],
['currency', 'currencies'],
['pose', 'poses'],
['cheese', 'cheeses'],
['clue', 'clues'],
['cheer', 'cheers'],
['litre', 'litres'],
['money', 'monies'],
['attorney', 'attorneys'],
['balcony', 'balconies'],
['cockney', 'cockneys'],
['donkey', 'donkeys'],
['honey', 'honeys'],
['smiley', 'smilies'],
['survey', 'surveys'],
['whiskey', 'whiskeys'],
['whisky', 'whiskies'],
['volley', 'volleys'],
['tongue', 'tongues'],
['suit', 'suits'],
['suite', 'suites'],
['cruise', 'cruises'],
['eave', 'eaves'],
['consultancy', 'consultancies'],
['pouch', 'pouches'],
['wallaby', 'wallabies'],
['abyss', 'abysses'],
['weekly', 'weeklies'],
['whistle', 'whistles'],
['utilise', 'utilises'],
['utilize', 'utilizes'],
['mercy', 'mercies'],
['mercenary', 'mercenaries'],
['take', 'takes'],
['flush', 'flushes'],
['gate', 'gates'],
['evolve', 'evolves'],
['slave', 'slaves'],
['native', 'natives'],
['revolve', 'revolves'],
['twelve', 'twelves'],
['sleeve', 'sleeves'],
['subjective', 'subjectives'],
['stream', 'streams'],
['beam', 'beams'],
['foam', 'foams'],
['callus', 'calluses'],
['use', 'uses'],
['beau', 'beaus'],
['gateau', 'gateaus'],
['fetus', 'fetuses'],
['luau', 'luaus'],
['pilau', 'pilaus'],
['shoe', 'shoes'],
['sandshoe', 'sandshoes'],
['zeus', 'zeuses'],
['nucleus', 'nuclei'],
['sky', 'skies'],
['beach', 'beaches'],
['brush', 'brushes'],
['hoax', 'hoaxes'],
['scratch', 'scratches'],
['nanny', 'nannies'],
['negro', 'negroes'],
['taco', 'tacos'],
['cafe', 'cafes'],
['cave', 'caves'],
['giraffe', 'giraffes'],
['goodwife', 'goodwives'],
['housewife', 'housewives'],
['safe', 'safes'],
['save', 'saves'],
['pocketknife', 'pocketknives'],
['tartufe', 'tartufes'],
['tartuffe', 'tartuffes'],
['truffle', 'truffles'],
['jefe', 'jefes'],
['agrafe', 'agrafes'],
['agraffe', 'agraffes'],
['bouffe', 'bouffes'],
['carafe', 'carafes'],
['chafe', 'chafes'],
['pouffe', 'pouffes'],
['pouf', 'poufs'],
['piaffe', 'piaffes'],
['gaffe', 'gaffes'],
['executive', 'executives'],
['cove', 'coves'],
['dove', 'doves'],
['fave', 'faves'],
['positive', 'positives'],
['solve', 'solves'],
['trove', 'troves'],
['treasure', 'treasures'],
['suave', 'suaves'],
['bluff', 'bluffs'],
['half', 'halves'],
['knockoff', 'knockoffs'],
['handkerchief', 'handkerchiefs'],
['reed', 'reeds'],
['reef', 'reefs'],
['yourself', 'yourselves'],
['sunroof', 'sunroofs'],
['plateau', 'plateaus'],
['radius', 'radii'],
['stratum', 'strata'],
['stratus', 'strati'],
['focus', 'foci'],
['fungus', 'fungi'],
['appendix', 'appendices'],
['seraph', 'seraphim'],
['cherub', 'cherubim'],
['memo', 'memos'],
['cello', 'cellos'],
['automaton', 'automata'],
['button', 'buttons'],
['crayon', 'crayons'],
['captive', 'captives'],
['abrasive', 'abrasives'],
['archive', 'archives'],
['additive', 'additives'],
['hive', 'hives'],
['beehive', 'beehives'],
['olive', 'olives'],
['black olive', 'black olives'],
['chive', 'chives'],
['adjective', 'adjectives'],
['cattle drive', 'cattle drives'],
['explosive', 'explosives'],
['executive', 'executives'],
['negative', 'negatives'],
['fugitive', 'fugitives'],
['progressive', 'progressives'],
['laxative', 'laxatives'],
['incentive', 'incentives'],
['genesis', 'geneses'],
['surprise', 'surprises'],
['enterprise', 'enterprises'],
['relative', 'relatives'],
['positive', 'positives'],
['perspective', 'perspectives'],
['superlative', 'superlatives'],
['afterlife', 'afterlives'],
['native', 'natives'],
['detective', 'detectives'],
['collective', 'collectives'],
['lowlife', 'lowlives'],
['low-life', 'low-lives'],
['strife', 'strifes'],
['pony', 'ponies'],
['phony', 'phonies'],
['felony', 'felonies'],
['colony', 'colonies'],
['symphony', 'symphonies'],
['semicolony', 'semicolonies'],
['radiotelephony', 'radiotelephonies'],
['company', 'companies'],
['ceremony', 'ceremonies'],
['carnivore', 'carnivores'],
['emphasis', 'emphases'],
['abuse', 'abuses'],
['ass', 'asses'],
['mile', 'miles'],
['consensus', 'consensuses'],
['coatdress', 'coatdresses'],
['courthouse', 'courthouses'],
['playhouse', 'playhouses'],
['crispness', 'crispnesses'],
['racehorse', 'racehorses'],
['greatness', 'greatnesses'],
['demon', 'demons'],
['lemon', 'lemons'],
['pokemon', 'pokemon'],
['pokémon', 'pokémon'],
['christmas', 'christmases'],
['zymase', 'zymases'],
['accomplice', 'accomplices'],
['amice', 'amices'],
['titmouse', 'titmice'],
['slice', 'slices'],
['base', 'bases'],
['database', 'databases'],
['rise', 'rises'],
['uprise', 'uprises'],
['size', 'sizes'],
['prize', 'prizes'],
['booby', 'boobies'],
['hobby', 'hobbies'],
['baby', 'babies'],
['cookie', 'cookies'],
['budgie', 'budgies'],
['calorie', 'calories'],
['brownie', 'brownies'],
['lolly', 'lollies'],
['hippie', 'hippies'],
['smoothie', 'smoothies'],
['techie', 'techies'],
['specie', 'species'],
['quickie', 'quickies'],
['pixie', 'pixies'],
['rotisserie', 'rotisseries'],
['porkpie', 'porkpies'],
['newbie', 'newbies'],
['veggie', 'veggies'],
['bourgeoisie', 'bourgeoisies'],
['party', 'parties'],
['apology', 'apologies'],
['ancestry', 'ancestries'],
['anomaly', 'anomalies'],
['anniversary', 'anniversaries'],
['battery', 'batteries'],
['nappy', 'nappies'],
['hanky', 'hankies'],
['junkie', 'junkies'],
['hogtie', 'hogties'],
['footsie', 'footsies'],
['curry', 'curries'],
['fantasy', 'fantasies'],
['housefly', 'houseflies'],
['falsy', 'falsies'],
['doggy', 'doggies'],
['carny', 'carnies'],
['cabby', 'cabbies'],
['charlie', 'charlies'],
['bookie', 'bookies'],
['auntie', 'aunties'],
# Prototype inheritance.
['constructor', 'constructors'],
# Non-standard case.
['randomWord', 'randomWords'],
['camelCase', 'camelCases'],
['PascalCase', 'PascalCases'],
['Alumnus', 'Alumni'],
['CHICKEN', 'CHICKENS'],
['日本語', '日本語'],
['한국', '한국'],
['中文', '中文'],
['اللغة العربية', 'اللغة العربية'],
['四 chicken', '四 chickens'],
['Order2', 'Order2s'],
['Work Order2', 'Work Order2s'],
['SoundFX2', 'SoundFX2s'],
['oDonald', 'oDonalds']
]
#
# Odd plural to singular tests.
#
# @type {Array}
#
SINGULAR_TESTS = [
['dingo', 'dingos'],
['mango', 'mangoes'],
['echo', 'echos'],
['ghetto', 'ghettoes'],
['nucleus', 'nucleuses'],
['bureau', 'bureaux'],
['seraph', 'seraphs']
]
#
# Odd singular to plural tests.
#
# @type {Array}
#
PLURAL_TESTS = [
['plateaux', 'plateaux'],
['axis', 'axes'],
['basis', 'bases'],
['automatum', 'automata'],
['thou', 'you'],
['axiS', 'axes'],
['passerby', 'passersby']
]
class TestPluralize(unittest.TestCase):
def test_methods_plural(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *PLURAL_TESTS]:
self.assertEqual(pluralizer.plural(test[0]), test[1])
def test_methods_is_plural(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *PLURAL_TESTS]:
self.assertTrue(pluralizer.isPlural(test[1]), f"isPlural('{test[1]}')")
def test_methods_singular(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *SINGULAR_TESTS]:
self.assertEqual(pluralizer.singular(test[1]), test[0])
def test_methods_is_singular(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *SINGULAR_TESTS]:
self.assertTrue(pluralizer.isSingular(test[0]))
def test_automatically_convert_plural(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *PLURAL_TESTS]:
self.assertEqual(pluralizer.pluralize(test[1], 5), test[1])
self.assertEqual(pluralizer.pluralize(test[0], 5), test[1])
def test_automatically_convert_singular(self):
pluralizer = Pluralizer()
for test in [*BASIC_TESTS, *SINGULAR_TESTS]:
self.assertEqual(pluralizer.pluralize(test[0], 1), test[0])
self.assertEqual(pluralizer.pluralize(test[1], 1), test[0])
def test_prepend_count_plural_words(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('test', 5, True), '5 tests')
def test_prepend_count_singular_words(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('test', 1, True), '1 test')
def test_add_new_uncountable_rules(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('paper'), 'papers')
pluralizer.addUncountableRule('paper')
self.assertEqual(pluralizer.pluralize('paper'), 'paper')
def test_add_new_irregular_words(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.pluralize('irregular'), 'irregulars')
pluralizer.addIrregularRule('irregular', 'regular')
self.assertEqual(pluralizer.pluralize('irregular'), 'regular')
def test_return_false_for_irregular_words(self):
pluralizer = Pluralizer()
self.assertTrue(pluralizer.isPlural('irregulars'))
pluralizer.addIrregularRule('irregulars', 'regular')
self.assertFalse(pluralizer.isPlural('irregulars'))
def test_add_new_plural_matching_rules(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.plural('regex'), 'regexes')
pluralizer.addPluralRule(re.compile(r'(?i)gex$'), 'gexii')
self.assertEqual(pluralizer.plural('regex'), 'regexii')
def test_add_new_singular_matching_rules(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.singular('singles'), 'single')
pluralizer.addSingularRule(re.compile('singles$'), 'singular')
self.assertEqual(pluralizer.singular('singles'), 'singular')
def test_allow_new_plural_matching_rules_to_be_strings(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.plural('person'), 'people')
pluralizer.addPluralRule('person', 'peeps')
self.assertEqual(pluralizer.plural('person'), 'peeps')
def test_allow_new_singular_matching_rules_to_be_strings(self):
pluralizer = Pluralizer()
self.assertEqual(pluralizer.singular('mornings'), 'morning')
pluralizer.addSingularRule('mornings', 'suck')
self.assertEqual(pluralizer.singular('mornings'), 'suck')
if __name__ == '__main__':
unittest.main()
| 2.875 | 3 |
promgen/util.py | sundy-li/promgen | 0 | 11430 | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import requests.sessions
from django.db.models import F
from promgen.version import __version__
from django.conf import settings
# Wrappers around request api to ensure we always attach our user agent
# https://github.com/requests/requests/blob/master/requests/api.py
def post(url, data=None, json=None, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.post(url, data=data, json=json, **kwargs)
def get(url, params=None, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.get(url, params=params, **kwargs)
def delete(url, **kwargs):
with requests.sessions.Session() as session:
session.headers['User-Agent'] = 'promgen/{}'.format(__version__)
return session.delete(url, **kwargs)
def setting(key, default=None, domain=None):
"""
Settings helper based on saltstack's query
Allows a simple way to query settings from YAML
using the style `path:to:key` to represent
path:
to:
key: value
"""
rtn = settings.PROMGEN
if domain:
rtn = rtn[domain]
for index in key.split(":"):
try:
rtn = rtn[index]
except KeyError:
return default
return rtn
class HelpFor:
# Wrap a model's lower level api so that we can easily
# grab help_text for a specific field
# help_text = HelpFor(DjangoModel)
# help_test.field_name
def __init__(self, model):
self.model = model
def __getattr__(self, name):
return self.model._meta.get_field(name).help_text
def inc_for_pk(model, pk, **kwargs):
# key=F('key') + value
model.objects.filter(pk=pk).update(**{key: F(key) + kwargs[key] for key in kwargs})
| 2.21875 | 2 |
integration_tests/test_suites/k8s-integration-test-suite/test_utils.py | ericct/dagster | 0 | 11431 | import time
import kubernetes
import pytest
from dagster_k8s.client import DagsterK8sError, WaitForPodState
from dagster_k8s.utils import retrieve_pod_logs, wait_for_job_success, wait_for_pod
from dagster_k8s_test_infra.helm import get_helm_test_namespace
def construct_pod_spec(name, cmd):
return kubernetes.client.V1PodSpec(
restart_policy="Never",
containers=[
kubernetes.client.V1Container(name=name, image="busybox", args=["/bin/sh", "-c", cmd])
],
)
def construct_pod_manifest(name, cmd):
return kubernetes.client.V1Pod(
metadata=kubernetes.client.V1ObjectMeta(name=name), spec=construct_pod_spec(name, cmd),
)
def construct_job_manifest(name, cmd):
return kubernetes.client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=kubernetes.client.V1ObjectMeta(name=name),
spec=kubernetes.client.V1JobSpec(
template=kubernetes.client.V1PodTemplateSpec(spec=construct_pod_spec(name, cmd)),
),
)
def test_wait_for_pod(cluster_provider): # pylint: disable=unused-argument
api = kubernetes.client.CoreV1Api()
with get_helm_test_namespace() as namespace:
# Without this sleep, we get the following error on kind:
# HTTP response body:
# {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"No API
# token found for service account \"default\", retry after the token is automatically
# created and added to the service
# account","reason":"ServerTimeout","details":{"name":"create
# pod","kind":"serviceaccounts","retryAfterSeconds":1},"code":500}
time.sleep(5)
try:
api.create_namespaced_pod(
body=construct_pod_manifest("sayhi1", 'echo "hello world"'), namespace=namespace
)
wait_for_pod("sayhi1", namespace=namespace)
assert retrieve_pod_logs("sayhi1", namespace=namespace) == "hello world\n"
api.create_namespaced_pod(
body=construct_pod_manifest("sayhi2", 'echo "hello world"'), namespace=namespace
)
wait_for_pod("sayhi2", namespace=namespace, wait_for_state=WaitForPodState.Terminated)
with pytest.raises(
DagsterK8sError, match="Timed out while waiting for pod to become ready"
):
api.create_namespaced_pod(
body=construct_pod_manifest("sayhi3", 'sleep 5; echo "hello world"'),
namespace=namespace,
)
wait_for_pod("sayhi3", namespace=namespace, wait_timeout=1)
with pytest.raises(DagsterK8sError) as exc_info:
api.create_namespaced_pod(
body=construct_pod_manifest("fail", 'echo "whoops!"; exit 1'),
namespace=namespace,
)
wait_for_pod("fail", namespace=namespace, wait_for_state=WaitForPodState.Terminated)
# not doing total match because integration test. unit tests test full log message
assert "Pod did not exit successfully." in str(exc_info.value)
finally:
for pod_name in ["sayhi1", "sayhi2", "sayhi3", "fail"]:
try:
api.delete_namespaced_pod(pod_name, namespace=namespace)
except kubernetes.client.rest.ApiException:
pass
def test_wait_for_job(cluster_provider): # pylint: disable=unused-argument
with get_helm_test_namespace() as namespace:
# Without this sleep, we get the following error on kind:
# HTTP response body:
# {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"No API
# token found for service account \"default\", retry after the token is automatically
# created and added to the service
# account","reason":"ServerTimeout","details":{"name":"create
# pod","kind":"serviceaccounts","retryAfterSeconds":1},"code":500}
time.sleep(5)
try:
api = kubernetes.client.BatchV1Api()
api.create_namespaced_job(
body=construct_job_manifest("sayhi1", 'echo "hello world"'), namespace=namespace
)
wait_for_job_success("sayhi1", namespace=namespace)
with pytest.raises(
DagsterK8sError, match="Timed out while waiting for job sayhi2 to complete"
):
api.create_namespaced_job(
body=construct_job_manifest("sayhi2", 'sleep 5; echo "hello world"'),
namespace=namespace,
)
wait_for_job_success("sayhi2", namespace=namespace, wait_timeout=1)
with pytest.raises(
DagsterK8sError, match="Encountered failed job pods for job fail with status:",
):
api.create_namespaced_job(
body=construct_job_manifest("fail", 'echo "whoops!"; exit 1'),
namespace=namespace,
)
wait_for_job_success("fail", namespace=namespace)
finally:
for job in ["sayhi1", "sayhi2", "fail"]:
try:
api.delete_namespaced_job(
job, namespace=namespace, propagation_policy="Foreground"
)
except kubernetes.client.rest.ApiException:
pass
| 1.757813 | 2 |
radioepg/migrations/0001_initial.py | mervij/radiodns | 0 | 11432 | # Generated by Django 3.1.6 on 2021-02-15 08:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_name', models.CharField(max_length=8)),
('medium_name', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Bearer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bearer_id', models.TextField()),
('cost', models.IntegerField()),
('mimeValue', models.CharField(max_length=255)),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='radioepg.service')),
],
),
]
| 1.703125 | 2 |
uitester/ui/case_manager/tag_names_line_edit.py | IfengAutomation/uitester | 4 | 11433 | <filename>uitester/ui/case_manager/tag_names_line_edit.py
from PyQt5.QtCore import Qt, QStringListModel
from PyQt5.QtWidgets import QLineEdit, QCompleter
class TagNamesLineEdit(QLineEdit):
def __init__(self, parent=None):
super(QLineEdit, self).__init__(parent)
self.cmp = None
self.is_completer = True
def setCompleter(self, completer):
self.cmp = completer
self.cmp.setWidget(self)
self.cmp.setCompletionMode(QCompleter.PopupCompletion)
self.cmp.setCaseSensitivity(Qt.CaseInsensitive)
self.textChanged.connect(self.tag_names_changed)
self.cmp.activated.connect(self.insert_completion)
def tag_names_changed(self):
if self.is_completer:
text = self.text()
tag_names = text.split(';')
last_tag_name = tag_names[len(tag_names) - 1]
self.cmp.update(last_tag_name)
self.cmp.popup().setCurrentIndex(self.cmp.completionModel().index(0, 0))
cr = self.cursorRect()
cr.setWidth(self.cmp.popup().sizeHintForColumn(0)
+ self.cmp.popup().verticalScrollBar().sizeHint().width())
self.cmp.complete(cr)
else:
pass
def completer(self):
return self.cmp
def insert_completion(self, string):
text = self.text()
tag_names = text.split(';')
last_tag_name = tag_names[len(tag_names) - 1]
new_text = text[0:len(text) - len(last_tag_name)] + string + ';'
self.is_completer = False
self.clear()
self.setText(new_text)
self.is_completer = True
class TagCompleter(QCompleter):
def __init__(self, string_list, parent=None):
super(TagCompleter, self).__init__(parent)
self.string_list = string_list
self.setModel(QStringListModel())
def update(self, completion_text):
filtered = []
for string in self.string_list:
if completion_text in string:
filtered.append(string)
self.model().setStringList(filtered)
self.popup().setCurrentIndex(self.model().index(0, 0))
| 2.078125 | 2 |
utils/__init__.py | millermuttu/torch_soft | 0 | 11434 | # # importing all the modules at once
# from .config import *
# from .normalization import *
# from .others import *
# from .img_reg import *
# from .transformation import *
# from .visualization import *
# importing the modules in a selective way
import utils.config
import utils.normalization
import utils.misc
import utils.lr_finder
| 1.070313 | 1 |
tasks.py | epu-ntua/QualiChain-mediator | 2 | 11435 | from celery import Celery
from clients.dobie_client import send_data_to_dobie
app = Celery('qualichain_mediator')
app.config_from_object('settings', namespace='CELERY_')
@app.task()
def consume_messages_async(message):
"""
This task is used to received job posting text and feed DOBIE component
"""
extracted_skills = send_data_to_dobie(message)
print(extracted_skills, flush=True)
return extracted_skills
| 2.34375 | 2 |
ingest/ambit_geo.py | brianhouse/okavango | 2 | 11436 | import json, math
from ingest import ingest_json_body
from housepy import config, log, strings, util
def parse(request):
log.info("ambit_geo.parse")
sample = ingest_json_body(request)
if sample is None:
return sample, "Could not parse"
data = {}
for key, value in sample.items():
if key == "UTC":
dt = util.parse_date(sample['UTC']) # these are marked UTC in the data
t = util.timestamp(dt)
data['t_utc'] = t
continue
if key == "Longitude":
data['longitude'] = math.degrees(float(sample['Longitude']))
continue
if key == "Latitude":
data['latitude'] = math.degrees(float(sample['Latitude']))
continue
if key == "GPSAltitude":
data['altitude'] = float(sample['GPSAltitude'])
continue
if type(value) != str:
continue
data[key] = strings.as_numeric(value)
try:
log.debug("%s %s %s" % (data['longitude'], data['latitude'], data['altitude']))
except:
log.error("MISSING GEO")
return data | 2.78125 | 3 |
gandyndns.py | nim65s/scripts | 1 | 11437 | #!/usr/bin/env python
'''update gandi DNS domain entry, with LiveDNS v5
Cf. https://doc.livedns.gandi.net/#work-with-domains
'''
import argparse
import ipaddress
import json
import os
from subprocess import check_output
import requests
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('domain')
parser.add_argument('name')
parser.add_argument('--ip', help="defaults to ifconfig.me's return")
parser.add_argument('--api_key', help="defaults to GANDI_API_KEY env var, or the return of 'pass api/gandi'")
args = parser.parse_args()
if args.ip is None:
args.ip = requests.get('http://ifconfig.me', headers={'User-Agent': 'curl/7.61.1'}).content.decode().strip()
ip = ipaddress.ip_address(args.ip)
if args.api_key is None:
args.api_key = os.environ.get('GANDI_API_KEY', check_output(['pass', 'api/gandi'], text=True).strip())
key = {'X-Api-Key': args.api_key}
r = requests.get(f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}', headers=key)
r.raise_for_status()
if r.json()[0]['rrset_values'][0] == args.ip:
if args.verbose:
print('ok')
else:
type_ = 'AAAA' if isinstance(ip, ipaddress.IPv6Address) else 'A'
url = f'https://dns.api.gandi.net/api/v5/domains/{args.domain}/records/{args.name}/{type_}'
data = {'rrset_values': [args.ip]}
headers = {'Content-Type': 'application/json', **key}
r = requests.put(url, data=json.dumps(data), headers=headers)
if args.verbose:
print(r.json())
else:
r.raise_for_status()
| 2.171875 | 2 |
leetcode.com/python/314_Binary_Tree_Vertical_Order_Traversal.py | mamane19/coding-interview-gym | 713 | 11438 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from collections import deque
from collections import defaultdict
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
queue = deque([(root, 0)])
verticalNodeMap = defaultdict(list)
while queue:
node, horrizotalDistace = queue.popleft()
if node:
verticalNodeMap[horrizotalDistace].append(node.val)
queue.append((node.left, horrizotalDistace - 1))
queue.append((node.right, horrizotalDistace + 1))
minHorrizotalDistace, maxHorrizotalDistace = min(verticalNodeMap.keys()), max(verticalNodeMap.keys())
result = []
for key in range(minHorrizotalDistace, maxHorrizotalDistace + 1):
result.append(verticalNodeMap[key])
return result
# My solution during mock, getting TLE, don't know why
from collections import defaultdict
from collections import deque
class Solution(object):
def verticalOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
orderMap = defaultdict(list)
queue = deque([(root, 0)])
while queue:
currentNode, vLine = queue.popleft()
if currentNode:
orderMap[vLine].append(root.val)
queue.append((root.left, vLine - 1))
queue.append((root.right, vLine + 1))
result = []
for i in range(min(orderMap.keys()), max(orderMap.keys()) + 1):
result.append(orderMap[i])
return result | 3.59375 | 4 |
src/sentry/models/pluginhealth.py | ayesha-omarali/sentry | 0 | 11439 | <reponame>ayesha-omarali/sentry
from __future__ import absolute_import
from sentry.db.models import (
ArrayField, BoundedPositiveIntegerField, Model, FlexibleForeignKey, sane_repr
)
from django.db import models
from jsonfield import JSONField
from django.utils import timezone
from sentry.constants import ObjectStatus
from django.utils.translation import ugettext_lazy as _
class PluginFeatures(object):
issue_basic = 'issue_basic'
issue_sync = 'issue_sync'
repository = 'repository'
class PluginHealth(Model):
__core__ = True
name = models.CharField(max_length=128, db_index=True)
features_list = ArrayField(of=models.TextField)
date_added = models.DateTimeField(default=timezone.now)
link = models.URLField(null=True, blank=True)
author = models.CharField(max_length=64)
metadata = JSONField()
status = BoundedPositiveIntegerField(
default=0,
choices=(
(ObjectStatus.VISIBLE,
_('Active')), (ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
),
db_index=True
)
class Meta:
app_label = 'sentry'
db_table = 'sentry_pluginhealth'
__repr__ = sane_repr('name')
def run_tests(self):
plugin_test = PluginHealthTest.objects.create(
plugin_id=self.id,
)
plugin_test.test_data = plugin_test.run_tests(self)
plugin_test.save()
return plugin_test
class PluginHealthTest(Model):
__core__ = True
date_added = models.DateTimeField(default=timezone.now)
plugin = FlexibleForeignKey('sentry.PluginHealth')
test_data = JSONField()
class Meta:
app_label = 'sentry'
db_table = 'sentry_pluginhealthtest'
unique_together = (('plugin', 'date_added'))
__repr__ = sane_repr('plugin', 'date_added')
def run_tests(self, plugin_health):
return {
'configure_test': self.configure_test(plugin_health),
'create_issue_test': self.create_issue_test(plugin_health),
'link_issue_test': self.link_issue_test(plugin_health),
'sync_assignment_test': self.sync_assignment_test(plugin_health),
'sync_comment_test': self.sync_comment_test(plugin_health),
'sync_status_test': self.sync_status_test(plugin_health),
'repository_test': self.repository_test(plugin_health),
}
def configure_test(self, plugin_health):
test_results = None
return test_results
def create_issue_test(self, plugin_health):
if PluginFeatures.issue_basic not in plugin_health.features_list:
return None
test_results = None
return test_results
def link_issue_test(self, plugin_health):
if PluginFeatures.issue_basic not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_assignment_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_comment_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def sync_status_test(self, plugin_health):
if PluginFeatures.issue_sync not in plugin_health.features_list:
return None
test_results = None
return test_results
def repository_test(self, plugin_health):
if PluginFeatures.repository not in plugin_health.features_list:
return None
test_results = None
return test_results
| 1.875 | 2 |
src/masonite/oauth/drivers/FacebookDriver.py | girardinsamuel/masonite-socialite | 1 | 11440 | from .BaseDriver import BaseDriver
from ..OAuthUser import OAuthUser
class FacebookDriver(BaseDriver):
def get_default_scopes(self):
return ["email"]
def get_auth_url(self):
return "https://www.facebook.com/dialog/oauth"
def get_token_url(self):
return "https://graph.facebook.com/oauth/access_token"
def get_user_url(self):
return "https://graph.facebook.com/me?"
def get_request_options(self, token):
return {
"headers": {"Authorization": f"Bearer {token}", "Accept": "application/json"},
"query": {"prettyPrint": "false"},
}
def user(self):
user_data, token = super().user()
user = (
OAuthUser()
.set_token(token)
.build(
{
"id": user_data["sub"],
"nickname": user_data["nickname"],
"name": user_data["name"],
"email": user_data["email"],
"avatar": user_data["picture"],
}
)
)
return user
def user_from_token(self, token):
user_data = super().user_from_token(token)
user = (
OAuthUser()
.set_token(token)
.build(
{
"id": user_data["sub"],
"nickname": user_data["nickname"],
"name": user_data["name"],
"email": user_data["email"],
"avatar": user_data["picture"],
}
)
)
return user
| 2.59375 | 3 |
python/convert_to_readwise.py | t27/highlights-convert | 0 | 11441 | <filename>python/convert_to_readwise.py
import pandas as pd
import json
import glob
columns = ["Highlight","Title","Author","URL","Note","Location"]
# for sample of the input json look at any json in the root of the `results` folder
def convert_to_readwise_df(json_files):
"""Convert the internal json format to a readwise compatible dataframe
Args:
json_files (List[str]): list of json files
Returns:
pd.DataFrame: dataframe with columns as required by readwise
"""
df_data = []
for file in json_files:
with open(file) as f:
data = json.load(f)
title = data['volume']['title']
author = ", ".join(data['volume']['authors'])
for entry in data['highlights']:
highlight = entry['content']
location = entry['location']
notes = ""
if "notes" in entry:
for note in notes:
notes = notes+"\n"+note
df_data.append([highlight,title,author,"",notes,location])
df = pd.DataFrame(df_data,columns = columns)
return df
if __name__ == "__main__":
json_files = glob.glob("../results/*.json")
df = convert_to_readwise_df(json_files)
df.to_csv("tarang_readwise.csv",index=False)
| 3.5 | 4 |
ms_deisotope/qc/__init__.py | mstim/ms_deisotope | 18 | 11442 | """A collection of methods for determining whether a given spectrum is
of high quality (likely to produce a high quality interpretation)
"""
from .heuristic import xrea
from .isolation import CoIsolation, PrecursorPurityEstimator
__all__ = [
"xrea",
"CoIsolation", "PrecursorPurityEstimator"
]
| 1.609375 | 2 |
rpi_animations/message.py | Anski-D/rpi_animations_old | 0 | 11443 | <filename>rpi_animations/message.py
from .item import Item
class Message(Item):
"""
Message feature object in the rpi_animations package.
"""
def __init__(self, group, screen_animator) -> None:
"""
Initialise Message object with sprite group and screen object. Run initial setup methods.
Args:
group (Group): Pygame sprite group to which the object will be added.
screen_animator (ScreenAnimator): Main package object controlling the animation.
"""
super().__init__(group, screen_animator)
# Store x position as float
self._x = float(self._rect.x)
# Set the flag that the message hasn't fully emerged
self._has_fully_emerged = False
def _setup_item(self) -> None:
"""
Run methods to setup the object.
Returns:
None
"""
self._set_text()
# Run parent method
super()._setup_item()
def _set_text(self) -> None:
"""
Set font, message text, and outline of text.
Returns:
None
"""
# Set font
self._font = self._settings.font
# Set the message text
self._text = self._settings.text
# Set the outline text
self._outline_text = self._font.render(
self._text,
self._settings.settings['text_aa'],
self._settings.outline_colour
)
def _set_item_content(self) -> None:
"""
Render the message text.
Returns:
None
"""
self.content = self._font.render(
self._text,
self._settings.settings['text_aa'],
self._settings.text_colour
)
def _place_item(self) -> None:
"""
Set the initial object position on the screen.
Returns:
None
"""
self._rect.midleft = self._screen_rect.midright
def _draw_outline(self) -> None:
"""
Draw the message text outline.
Returns:
None
"""
outline_width = self._settings.settings['outline_width']
self._screen.blit(self._outline_text, (self._rect.x - outline_width, self._rect.y - outline_width))
self._screen.blit(self._outline_text, (self._rect.x - outline_width, self._rect.y + outline_width))
self._screen.blit(self._outline_text, (self._rect.x + outline_width, self._rect.y - outline_width))
self._screen.blit(self._outline_text, (self._rect.x + outline_width, self._rect.y + outline_width))
def blit(self) -> None:
"""
Add the object to the pygame screen.
Returns:
None
"""
# Draw outline text
self._draw_outline()
# Draw the message
self._set_item_content()
# Run parent method
super().blit()
def update(self) -> None:
"""
Move the object position to the left during a frame update.
Returns:
None
"""
self._x -= self._settings.settings['text_speed'] / self._settings.settings['fps']
self._rect.x = self._x
def is_on_screen(self) -> bool:
"""
Determine whether the object is still on the screen.
Returns:
bool: True if still on screen, False otherwise.
"""
if self._rect.right <= self._screen_rect.left:
return False
return True
def has_just_emerged(self) -> bool:
"""
Determine whether the right side of the message is now visible on the screen.
Returns:
bool: True if right edge is now on screen, False otherwise.
"""
if not self._has_fully_emerged and self._rect.right <= self._screen_rect.right:
self._has_fully_emerged = True
return True
return False
| 3.046875 | 3 |
styrobot/cogs/help.py | ThatRedKite/styrobot | 1 | 11444 | <reponame>ThatRedKite/styrobot<gh_stars>1-10
import discord
from discord.ext import commands
from styrobot.util.contrib import info
import random
class BetterHelpCommand(commands.HelpCommand):
async def send_embed(self, embed):
embed.colour = discord.Colour.random()
await self.get_destination().send(embed=embed)
def blank_line(self, embed):
embed.add_field(name='_ _', value='_ _', inline=False)
def signature(self, command: commands.Command):
out = [command.qualified_name]
params = command.clean_params or {}
for name, param in params.items():
# slightly copied from discord.py
greedy = isinstance(param.annotation, commands.converter._Greedy)
if param.default is not param.empty:
should_print = param.default if isinstance(param.default, str) else param.default is not None
if should_print:
out.append(f'[{name}={param.default}]{"..." if greedy else ""}')
else:
out.append(f'[{name}]')
elif param.kind == param.VAR_POSITIONAL:
out.append(f'<{name}...>')
elif greedy:
out.append(f'[{name}]...')
else:
out.append(f'<{name}>')
return ' '.join(out)
async def send_bot_help(self, mapping):
e = discord.Embed(title=info['name'])
if random.random() < 0.95:
e.add_field(name='I am', value=info['name'], inline=True)
else:
e.add_field(name='I am', value='an impostor', inline=True)
e.set_author(name='sus', icon_url='https://i.redd.it/0qtc8un3bz061.png')
e.add_field(name='Contribute at', value=info['repo'], inline=False)
e.add_field(name='I send you my cogs (pls respond)', value='_ _', inline=True)
cogs = [(cog, await self.filter_commands(mapping[cog])) for cog in mapping.keys()]
cogs = [x for x in cogs if len(x[1]) > 0]
for i, (cog, cmds) in enumerate(cogs):
if i % 2 == 0:
self.blank_line(e)
h = '\n'.join([cmd.name for cmd in cmds])
if cog is None:
e.add_field(name='builtin', value=h, inline=True)
else:
e.add_field(name=cog.qualified_name, value=h, inline=True)
if random.random() < 0.9:
e.set_footer(text='Made with ❤️')
else:
e.set_footer(text='Made with 🍆')
await self.send_embed(e)
async def send_cog_help(self, cog: commands.Cog):
e = discord.Embed(title=cog.qualified_name)
e.add_field(name='Cog', value=cog.qualified_name, inline=True)
e.add_field(name='`in_code`', value=f'`{cog.__class__.__name__}`', inline=True)
e.add_field(name='Commands', value='_ _', inline=False)
for cmd in await self.filter_commands(cog.get_commands()):
e.add_field(name=cmd, value=(cmd.help or '[no help]'), inline=False)
await self.send_embed(e)
async def send_group_help(self, group: commands.Group):
e = discord.Embed(title=group.qualified_name)
e.add_field(name='Command Group', value=group.qualified_name, inline=True)
e.add_field(name='Help', value=(group.help or '[no help]'), inline=False)
e.add_field(name='Subcommands', value='_ _', inline=False)
for command in await self.filter_commands(group.commands):
command: commands.Command
e.add_field(name=self.signature(command), value=(command.help or '[no help]'), inline=False)
await self.send_embed(e)
async def send_command_help(self, command: commands.Command):
e = discord.Embed(title=(command.qualified_name or command.name))
e.add_field(name='Name', value=(command.qualified_name or command.name), inline=False)
e.add_field(name='Signature', value=(self.signature(command)), inline=False)
e.add_field(name='Help', value=(command.help or '[no help]'), inline=False)
await self.send_embed(e)
class HelpCog(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
help_command = BetterHelpCommand()
help_command.cog = self
self.bot.help_command = help_command
def setup(bot):
bot.add_cog(HelpCog(bot))
| 2.515625 | 3 |
misc/Queue_hello.py | benhunter/py-stuff | 3 | 11445 | <reponame>benhunter/py-stuff<gh_stars>1-10
# Testing with threading and queue modules for Thread-based parallelism
import threading, queue, time
# The worker thread gets jobs off the queue. When the queue is empty, it
# assumes there will be no more work and exits.
# (Realistically workers will run until terminated.)
def worker():
print('Running worker')
time.sleep(0.1)
while True:
try:
arg = q.get(block=False) # False to terminate Thread when no work is available
except queue.Empty:
print('Worker', threading.currentThread(), end=' ')
print('queue empty')
break
else:
print('Worker', threading.currentThread(), end=' ')
print('running with argument', arg)
work_func(arg) # do the work
time.sleep(0.5)
q.task_done() # Create queue
# Work function that processes the arguments
def work_func(arg):
print('Working on', arg)
print('Square is', arg**2)
print('Cube is', arg**3)
q = queue.Queue()
# Begin adding work to the queue
for i in range(20):
q.put(i)
threadPool = []
# Start a pool of 5 workers
for i in range(5):
t = threading.Thread(target=worker, name='worker %i' % (i + 1))
t.start()
threadPool.append(t)
# time.sleep(5) # testing if workers die before work is queued - yes they do die
# q.join()
for i in range(20):
q.put(i+20)
for t in threadPool:
t.join()
# Give threads time to run
# print('Main thread sleeping')
# time.sleep(5)
print('Main thread finished') | 3.703125 | 4 |
tests/conftest.py | Beanxx/alonememo | 0 | 11446 | <reponame>Beanxx/alonememo
import pytest
from pymongo import MongoClient
import app as flask_app
test_database_name = 'spartatest'
client = MongoClient('localhost', 27017)
db = client.get_database(test_database_name)
@pytest.fixture
def app():
test_app = flask_app.create_app(test_database_name)
# 제네레이터 문법(yield 구문까지만 실행하고 대기,
# 이후 다시 호출할 때 yield 구문 다음이 진행됨)
# app이 종료되는 것이 아니라 stop됨.
yield test_app
# 여기서부터는 모든 테스트가 완료되고 나서 시행됨
client.drop_database(test_database_name)
print('테스트 DB 제거 완료')
| 2.15625 | 2 |
threader/__init__.py | mwoolweaver/threader | 34 | 11447 | """Tools to quickly create twitter threads."""
from .thread import Threader
__version__ = "0.1.1" | 1.296875 | 1 |
src/utility/count_pages.py | WikiCommunityHealth/wikimedia-revert | 0 | 11448 | <reponame>WikiCommunityHealth/wikimedia-revert<gh_stars>0
# count numbers of pages from the Mediawiki history dumps
import bz2
import subprocess
import os
from datetime import datetime
inizio = datetime.now()
dataset_folder = '/home/gandelli/dev/data/it/'
totali = set()
revisioni = set()
revert = set()
ns0 = set()
for year in range(2001, 2021):
dump_in = bz2.open(dataset_folder+'/it' + str(year) + '.tsv.bz2', 'r')
line = dump_in.readline()
print(year)
while line != '':
line = dump_in.readline().rstrip().decode('utf-8')[:-1]
values = line.split('\t')
if len(values) < 2:
continue
if values[23] != '':
page = int(values[23])
totali.add(page)
if values[28] == '0':
ns0.add(page)
if values[1] == 'revision':
revisioni.add(page)
if values[64] == 'true' and values[67] == 'true':
revert.add(page)
print('total page ',len(totali))
print('total pages ns0', len(ns0))
print('total revisions ns0', len(revisioni))
print('total revert ns0', len(revert) ) | 3.015625 | 3 |
livescore/LivescoreCommon.py | TechplexEngineer/frc-livescore | 0 | 11449 | import colorsys
import cv2
from PIL import Image
import pkg_resources
from .LivescoreBase import LivescoreBase
from .details import Alliance, OngoingMatchDetails
class LivescoreCommon(LivescoreBase):
def __init__(self, game_year, **kwargs):
super(LivescoreCommon, self).__init__(game_year, **kwargs)
self._match_key = None
self._match_name = None
def _getMatchKeyName(self, img, debug_img):
if self._match_key is None:
tl = self._transformPoint((220, 6))
br = self._transformPoint((570, 43))
raw_match_name = self._parseRawMatchName(self._getImgCropThresh(img, tl, br))
self._match_key = self._getMatchKey(raw_match_name)
if self._match_key:
self._match_name = raw_match_name
else:
self._match_name = None
if self._debug:
box = self._cornersToBox(tl, br)
self._drawBox(debug_img, box, (0, 255, 0))
return self._match_key, self._match_name
def _getTimeAndMode(self, img, debug_img):
# Check for match under review
review_point1 = self._transformPoint((624, 93))
review_sample1 = img[review_point1[1], review_point1[0], :]
hsvL = colorsys.rgb_to_hsv(float(review_sample1[2])/255, float(review_sample1[1])/255, float(review_sample1[0])/255)
review_point2 = self._transformPoint((1279 - 624, 93))
review_sample2 = img[review_point2[1], review_point2[0], :]
hsvR = colorsys.rgb_to_hsv(float(review_sample2[2])/255, float(review_sample2[1])/255, float(review_sample2[0])/255)
if 0.116 < hsvL[0] < 0.216 and 0.116 < hsvR[0] < 0.216:
return 0, 'post_match'
# Find time remaining
horiz_center = self._TEMPLATE_SHAPE[0]/2
tl = self._transformPoint((horiz_center-25, 56))
br = self._transformPoint((horiz_center+25, 82))
time_remaining = self._parseDigits(self._getImgCropThresh(img, tl, br))
if self._debug:
# draw a green box for time
box = self._cornersToBox(tl, br)
self._drawBox(debug_img, box, (0, 255, 0))
# Determine mode: 'pre_match', 'auto', 'teleop', or 'post_match'
mode_point = self._transformPoint((520, 70))
mode_point2 = self._transformPoint((581, 70))
mode_sample = img[mode_point[1], mode_point[0], :]
mode_sample2 = img[mode_point2[1], mode_point2[0], :]
hsv1 = colorsys.rgb_to_hsv(float(mode_sample[2])/255, float(mode_sample[1])/255, float(mode_sample[0])/255)
hsv2 = colorsys.rgb_to_hsv(float(mode_sample2[2])/255, float(mode_sample2[1])/255, float(mode_sample2[0])/255)
if time_remaining is None:
return None, None
if time_remaining == 0:
if hsv1[1] > 0.6 and hsv2[1] > 0.6: # Both saturated
mode = 'post_match'
elif hsv1[1] > 0.6: # First saturated
mode = 'auto' # End of auton
else:
mode = 'pre_match'
elif time_remaining <= 15 and hsv2[1] < 0.6:
mode = 'auto'
else:
mode = 'teleop'
if self._debug:
box = self._cornersToBox(tl, br)
self._drawBox(debug_img, box, (0, 255, 0))
cv2.circle(debug_img, review_point1, 2, (0, 255, 0), -1)
cv2.circle(debug_img, review_point2, 2, (0, 255, 0), -1)
cv2.circle(debug_img, mode_point, 2, (0, 255, 0), -1)
cv2.circle(debug_img, mode_point2, 2, (0, 255, 0), -1)
return time_remaining, mode
def _getFlipped(self, img, debug_img):
# Sample point to determine red/blue side
color_point = self._transformPoint((520, 95))
color_sample = img[color_point[1], color_point[0], :]
is_flipped = color_sample[0] > color_sample[2] # More blue than red
if self._debug:
cv2.circle(debug_img, color_point, 2, (0, 255, 0), -1)
return is_flipped
def _getScores(self, img, debug_img, is_flipped):
# Left score limits
left_tl = self._transformPoint((520, 110))
left_br = self._transformPoint((634, 155))
# Right score limits
right_tl = self._transformPoint((644, 110))
right_br = self._transformPoint((760, 155))
left_score = self._parseDigits(self._getImgCropThresh(img, left_tl, left_br, white=True))
right_score = self._parseDigits(self._getImgCropThresh(img, right_tl, right_br, white=True))
if is_flipped:
red_score = right_score
blue_score = left_score
else:
red_score = left_score
blue_score = right_score
if self._debug:
left_box = self._cornersToBox(left_tl, left_br)
right_box = self._cornersToBox(right_tl, right_br)
self._drawBox(debug_img, left_box, (255, 255, 0) if is_flipped else (255, 0, 255))
self._drawBox(debug_img, right_box, (255, 0, 255) if is_flipped else (255, 255, 0))
return red_score, blue_score
def _getMatchDetails(self, img, force_find_overlay):
debug_img = None
if self._debug:
debug_img = img.copy()
time_remaining, mode = self._getTimeAndMode(img, debug_img)
if self._is_new_overlay or force_find_overlay:
self._match_key = None
match_key, match_name = self._getMatchKeyName(img, debug_img)
is_flipped = self._getFlipped(img, debug_img)
red_score, blue_score = self._getScores(img, debug_img, is_flipped)
box = self._cornersToBox(self._transformPoint((0, 0)), self._transformPoint((1280, 170)))
self._drawBox(debug_img, box, (255, 255, 0))
if self._debug:
cv2.imshow("Match Details", debug_img)
cv2.waitKey()
if match_key is not None and red_score is not None \
and blue_score is not None and time_remaining is not None:
return OngoingMatchDetails(
match_key=match_key,
match_name=match_name,
mode=mode,
time=time_remaining,
red=Alliance(
score=red_score,
),
blue=Alliance(
score=blue_score,
)
)
else:
return None
| 2.09375 | 2 |
challenges/challenge.py | Tech-With-Tim/models | 2 | 11450 | <filename>challenges/challenge.py<gh_stars>1-10
from postDB import Model, Column, types
from datetime import datetime
import utils
class Challenge(Model):
"""
Challenge class to store the challenge details
Database Attributes:
Attributes stored in the `challenges` table.
:param int id: The challenge Snowflake ID.
:param str title: The challenge title.
:param int author_id: The challenge author's Discord ID.
:param str description: A description.
:param List[str] example_in: Example input.
:param List[str] example_out: Example output.
:param List[int] language_ids: The languages you can use to complete this challenge.
:param :class:`datetime` released_at: The time this challenge was released at.
:param bool deleted: Whether or not this challenge has been deleted.
:param str slug: The URL slug this challenge relates to.
"""
id = Column(types.Integer(big=True), primary_key=True)
title = Column(types.String, unique=True)
author_id = Column(
types.ForeignKey("users", "id", sql_type=types.Integer(big=True)),
)
description = Column(types.String)
example_in = Column(types.Array(types.String))
example_out = Column(types.Array(types.String))
# Implicit ForeignKey to ChallengeLanguage.id
language_ids = Column(types.Array(types.Integer(big=True)))
released_at = Column(types.DateTime, nullable=True)
deleted = Column(types.Boolean, default=False)
slug = Column(types.String, unique=True)
@property
def created_at(self) -> datetime:
"""Returns the time the challenge was created"""
return utils.snowflake_time(self.id)
| 3.046875 | 3 |
settings.py | embrace-inpe/cycle-slip-correction | 6 | 11451 | """
Commom settings to all applications
"""
A = 40.3
TECU = 1.0e16
C = 299792458
F1 = 1.57542e9
F2 = 1.22760e9
factor_1 = (F1 - F2) / (F1 + F2) / C
factor_2 = (F1 * F2) / (F2 - F1) / C
DIFF_TEC_MAX = 0.05
LIMIT_STD = 7.5
plot_it = True
REQUIRED_VERSION = 3.01
CONSTELLATIONS = ['G', 'R']
COLUMNS_IN_RINEX = {'3.03': {'G': {'L1': 'L1C', 'L2': 'L2W', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1C', 'L2': 'L2C', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.02': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
},
'3.01': {'G': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1W', 'P2': 'C2W'},
'R': {'L1': 'L1', 'L2': 'L2', 'C1': 'C1C', 'P1': 'C1P', 'P2': 'C2P'}
}
}
| 1.882813 | 2 |
io_almacen/channel/__init__.py | xyla-io/io_almacen | 0 | 11452 | <reponame>xyla-io/io_almacen<gh_stars>0
from .channel_io import Channel, channel_entity_url | 1.046875 | 1 |
tests/api/test_libcoap_api.py | ggravlingen/ikeatradfri | 726 | 11453 | """Test API utilities."""
import json
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.gateway import Gateway
def test_constructor_timeout_passed_to_subprocess(monkeypatch):
"""Test that original timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", timeout=20, psk="abc")
api.request(Gateway().get_devices())
assert capture["timeout"] == 20
def test_custom_timeout_passed_to_subprocess(monkeypatch):
"""Test that custom timeout is passed to subprocess."""
capture = {}
def capture_args(*args, **kwargs):
capture.update(kwargs)
return json.dumps([])
monkeypatch.setattr("subprocess.check_output", capture_args)
api = APIFactory("anything", psk="abc")
api.request(Gateway().get_devices(), timeout=1)
assert capture["timeout"] == 1
| 2.46875 | 2 |
scrape_tvz.py | awordforthat/rhymes | 0 | 11454 | # scrapes Townes van Zandt lyrics
# sample code so I don't have to remember all of this stuff
# the next time I want to source some verses
from bs4 import BeautifulSoup as soup
import requests
import string
punctuation_trans_table = str.maketrans("", "", string.punctuation)
def strip_punctuation(s):
return s.translate(punctuation_trans_table)
base_url = "http://ippc2.orst.edu/coopl/lyrics/"
index = requests.get(base_url + "albums.html")
parsed_index = soup(index.text)
all_links = parsed_index.find_all("a") # get all <a> tags
links = [l for l in all_links if l.text] # filter out image links
def to_filename(s, path="texts/townes_van_zandt/"):
'''Quick and dirty snake-casing'''
s = s.replace("&", "and") # special case, "Poncho & Lefty"
s = strip_punctuation(s)
s = s.lower()
s = s.replace(" ", "_")
s = path + s + ".txt"
return s
def process_link(link):
title = link.text
f = open(to_filename(title), "w")
remote_file = link.get("href")
song_file = requests.get(base_url + remote_file)
verses = [l for l in soup(song_file.text).find_all("font")
if l.get("size")]
for verse in verses:
if verse.text:
f.writelines("\n".join(verse.stripped_strings))
f.write("\n\n")
| 3.21875 | 3 |
chapter04/ifelse.py | persevere-in-coding-persist-in-learning/python2 | 3 | 11455 | # coding=utf-8
"""
控制结构if elif else的研究
Version: 0.1
Author: huijz
Date: 2020-08-24
"""
# 例1:if的基本用法:
flag = False
name = 'huijz'
if name == 'python': # 判断变量是否为 python
flag = True # 条件成立时设置标志为真
print 'welcome boss' # 并输出欢迎信息
else:
print name # 条件不成立时输出变量名称
# 例2:elif用法
num = 5
if num == 3: # 判断num的值
print 'boss'
elif num == 2:
print 'user'
elif num == 1:
print 'worker'
elif num < 0: # 值小于零时输出
print 'error'
else:
print 'road' # 条件均不成立时输出
# 例3:if语句多个条件
num = 9
if 0 <= num <= 10: # 判断值是否在0~10之间
print 'hello'
# 输出结果: hello
num = 10
if num < 0 or num > 10: # 判断值是否在小于0或大于10
print 'hello'
else:
print 'unDefine'
# 输出结果: unDefine
num = 8
# 判断值是否在0~5或者10~15之间
if (0 <= num <= 5) or (10 <= num <= 15):
print 'hello'
else:
print 'unDefine'
# 输出结果: unDefine
# 例4:var = 100
var = 100
if var == 100: print "变量 var 的值为100"
print "Good bye!"
| 4.15625 | 4 |
setup.py | korymath/JANN | 39 | 11456 | <filename>setup.py<gh_stars>10-100
from setuptools import setup
from setuptools import find_packages
setup(
name="Jann",
version="4.0.0",
description="Jann is a Nearest Neighbour retrieval-based chatbot.",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
url="https://github.com/korymath/jann",
packages=find_packages(),
setup_requires=[
"pytest-runner"
],
tests_require=[
"pytest"
],
)
| 1.25 | 1 |
tests/scanner/scanners/ke_version_scanner_test.py | pombredanne/forseti-security | 1 | 11457 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KE Version Rule Scanner Tests."""
import unittest
import mock
from tests import unittest_utils
from google.cloud.security.common.gcp_type import (
ke_cluster as ke_cluster_type)
from google.cloud.security.common.gcp_type import (
organization as organization_type)
from google.cloud.security.common.gcp_type import project as project_type
from google.cloud.security.scanner.scanners import ke_version_scanner
# pylint: disable=bad-indentation
class FakeProjectDao(object):
def get_project(self, project_id, snapshot_timestamp=0):
return project_type.Project(project_id=project_id)
class FakeOrgDao(object):
def find_ancestors(self, resource_id, snapshot_timestamp=0):
return [organization_type.Organization(organization_id=123456)]
class KeVersionScannerTest(unittest_utils.ForsetiTestCase):
def tearDown(self):
self.org_patcher.stop()
self.project_patcher.stop()
def setUp(self):
# patch the daos
self.org_patcher = mock.patch(
'google.cloud.security.common.data_access.'
'org_resource_rel_dao.OrgResourceRelDao')
self.mock_org_rel_dao = self.org_patcher.start()
self.mock_org_rel_dao.return_value = FakeOrgDao()
self.project_patcher = mock.patch(
'google.cloud.security.common.data_access.'
'project_dao.ProjectDao')
self.mock_project_dao = self.project_patcher.start()
self.mock_project_dao.return_value = FakeProjectDao()
self.server_config = {
'defaultClusterVersion': '1.7.11-gke.1',
'validNodeVersions': [
'1.8.6-gke.0',
'1.7.11-gke.1',
'1.7.10-gke.1',
'1.6.13-gke.1',
],
'defaultImageType': 'COS',
'validImageTypes': [
'UBUNTU',
'COS'
],
'validMasterVersions': [
'1.8.6-gke.0',
'1.7.11-gke.1'
]
}
self.ke_clusters = {
# The main backend service.
'master-version-invalid': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'master-version-invalid',
'nodePools': [{
'name': 'default-pool',
'version': '1.6.13-gke.1'
}],
'initialClusterVersion': '1.6.13-gke.1',
'currentMasterVersion': '1.6.13-gke.1',
'currentNodeVersion': '1.6.13-gke.1'
}),
'node-version-invalid': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'node-version-invalid',
'nodePools': [{
'name': 'default-pool',
'version': '1.8.4-gke.1'
}],
'initialClusterVersion': '1.8.4-gke.1',
'currentMasterVersion': '1.8.6-gke.0',
'currentNodeVersion': '1.8.4-gke.1'
}),
'node-version-not-allowed': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'node-version-not-allowed',
'nodePools': [{
'name': 'default-pool',
'version': '1.7.10-gke.1'
}],
'initialClusterVersion': '1.7.10-gke.1',
'currentMasterVersion': '1.7.11-gke.1',
'currentNodeVersion': '1.7.10-gke.1'
}),
'multiple-node-pools': ke_cluster_type.KeCluster.from_dict(
'foo', self.server_config,
{
'name': 'multiple-node-pools',
'nodePools': [{
'name': 'default-pool',
'version': '1.7.11-gke.1'
}, {
'name': 'secondary-pool',
'version': '1.7.11-gke.1'
}],
'initialClusterVersion': '1.7.11-gke.1',
'currentMasterVersion': '1.7.11-gke.1',
'currentNodeVersion': '1.7.11-gke.1'
})
}
self.scanner = ke_version_scanner.KeVersionScanner(
{}, {}, '',
unittest_utils.get_datafile_path(
__file__, 'ke_version_scanner_test_data.yaml'))
self.scanner._retrieve = mock.Mock(
return_value=self.ke_clusters.values())
@mock.patch.object(
ke_version_scanner.KeVersionScanner,
'_output_results_to_db', autospec=True)
def test_run_scanner(self, mock_output_results):
self.scanner.run()
expected_violations = [
{'resource_id': 'node-version-not-allowed',
'resource_type': 'ke',
'rule_index': 2,
'rule_name': 'Disallowed node pool version',
'violation_data': {'cluster_name': 'node-version-not-allowed',
'node_pool_name': 'default-pool',
'project_id': 'foo',
'violation_reason': (
"Node pool version 1.7.10-gke.1 is not "
"allowed (['>= 1.6.13-gke.1', "
"'>= 1.7.11-gke.1', '>= 1.8.4-gke.1', "
"'>= 1.9.*']).")},
'violation_type': 'KE_VERSION_VIOLATION'},
{'resource_id': 'master-version-invalid',
'resource_type': 'ke',
'rule_index': 1,
'rule_name': 'Unsupported master version',
'violation_data': {'cluster_name': 'master-version-invalid',
'node_pool_name': '',
'project_id': 'foo',
'violation_reason': (
"Master version 1.6.13-gke.1 is not "
"supported (['1.7.11-gke.1', "
"'1.8.6-gke.0']).")},
'violation_type': 'KE_VERSION_VIOLATION'},
{'resource_id': 'node-version-invalid',
'resource_type': 'ke',
'rule_index': 0,
'rule_name': 'Unsupported node pool version',
'violation_data': {'cluster_name': 'node-version-invalid',
'node_pool_name': 'default-pool',
'project_id': 'foo',
'violation_reason': (
"Node pool version 1.8.4-gke.1 is not "
"supported (['1.6.13-gke.1', "
"'1.7.10-gke.1', '1.7.11-gke.1', "
"'1.8.6-gke.0']).")},
'violation_type': 'KE_VERSION_VIOLATION'}]
mock_output_results.assert_called_once_with(mock.ANY,
expected_violations)
if __name__ == '__main__':
unittest.main()
| 1.828125 | 2 |
metrics/utils.py | edwardyehuang/iSeg | 4 | 11458 | <filename>metrics/utils.py<gh_stars>1-10
# ================================================================
# MIT License
# Copyright (c) 2021 edwardyehuang (https://github.com/edwardyehuang)
# ================================================================
import tensorflow as tf
from iseg.metrics.seg_metric_wrapper import SegMetricWrapper
from iseg.metrics.mean_iou import MeanIOU
class SegMetricBuilder:
def __init__(self, num_class, ignore_label):
self.num_class = num_class
self.ignore_label = ignore_label
self.__metrics = []
def add(self, prefix="", use_iou=True, pre_compute_fn=None):
metrics_list = []
if prefix is None:
prefix = ""
if prefix != "":
prefix = prefix + "_"
if use_iou:
iou_metric = SegMetricWrapper(
MeanIOU(self.num_class), num_class=self.num_class, ignore_label=self.ignore_label, name=prefix + "IOU"
)
iou_metric.add_pre_compute_fn(pre_compute_fn)
metrics_list.append(iou_metric)
self.__metrics.append(metrics_list)
@property
def metrics(self):
return self.__metrics
| 2.0625 | 2 |
src/core/stats.py | dynaryu/vaws | 0 | 11459 | import math
def lognormal_mean(m, stddev):
""" compute mean of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: mean of log x
"""
return math.log(m) - (0.5 * math.log(1.0 + (stddev * stddev) / (m * m)))
def lognormal_stddev(m, stddev):
""" compute std. of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: std. of log x
"""
return math.sqrt(math.log((stddev * stddev) / (m * m) + 1))
def lognormal_underlying_mean(m, stddev):
""" compute mean of x with mean and std of log x
Args:
m: mean of log x
stddev: std of log x
Returns:
"""
# if m == 0 or stddev == 0:
# print '{}'.format('why ???')
# return 0
return math.exp(m + 0.5 * stddev * stddev)
def lognormal_underlying_stddev(m, stddev):
""" compute std of x with mean and std of log x
Args:
m: mean of log x
stddev: std of log x
Returns: std of x
"""
# if m == 0 or stddev == 0:
# print '{}'.format('strange why???')
# return 0
return math.sqrt((math.exp(stddev**2.0) - 1.0) *
math.exp(2.0*m + stddev**2.0))
#return lognormal_underlying_mean(m, stddev) * \
# math.sqrt((math.exp(stddev * stddev) - 1.0))
| 3.875 | 4 |
vim.d/vimfiles/bundle/taghighlight/plugin/TagHighlight/module/languages.py | lougxing/gbox | 0 | 11460 | #!/usr/bin/env python
# Tag Highlighter:
# Author: <NAME> <abudden _at_ gmail _dot_ com>
# Copyright: Copyright (C) 2009-2013 <NAME>
# Permission is hereby granted to use and distribute this code,
# with or without modifications, provided that this copyright
# notice is copied with it. Like anything else that's free,
# the TagHighlight plugin is provided *as is* and comes with no
# warranty of any kind, either expressed or implied. By using
# this plugin, you agree that in no event will the copyright
# holder be liable for any damages resulting from the use
# of this software.
# ---------------------------------------------------------------------
import os
import glob
from .config import config
from .loaddata import LoadDataFile, LoadFile, GlobData
from .debug import Debug
class Languages():
registry = {}
def __init__(self, options):
self.options = options
self.kinds = None
language_list_entries = ['SkipList','Priority']
# Import language specific modules: this will make them be parsed
# and will add to the registry
self.defaults = LoadDataFile('language_defaults.txt')
for entry in language_list_entries:
if entry in self.defaults:
if not isinstance(self.defaults[entry], list):
self.defaults[entry] = self.defaults[entry].split(',')
for language_file in GlobData('languages/*.txt'):
language_dict = LoadDataFile(language_file)
for entry in language_list_entries:
if entry in language_dict:
if not isinstance(language_dict[entry], list):
language_dict[entry] = language_dict[entry].split(',')
language_dict['Filename'] = language_file
if 'ReservedKeywords' in language_dict:
# This is some weird python magic that takes a list of
# strings containing space-separated items and produces
# a single list of those items.
language_dict['ReservedKeywords'] = \
[item for sublist in language_dict['ReservedKeywords'] for item in sublist.split(' ')]
else:
language_dict['ReservedKeywords'] = []
language_dict = self.VerifyLanguage(language_dict)
self.registry[language_dict['FriendlyName']] = language_dict
def ReadConfigFile(self, filename):
result = {}
fh = open(filename, 'r')
list_entries = ['SkipList','Priority']
key = None
for line in fh:
if line.strip().endswith(':') and line[0] not in [' ','\t',':','#']:
key = line.strip()[:-1]
result[key] = []
elif key is not None and line.startswith('\t'):
result[key] += [line.strip()]
elif ':' in line and line[0] not in [' ','\t',':','#']:
# End of the previous list, so reset key
key = None
parts = line.strip().split(':',1)
if parts[0] in list_entries:
if ',' in parts[1]:
result[parts[0]] = parts[1].split(',')
else:
result[parts[0]] = [parts[1]]
else:
result[parts[0]] = parts[1]
fh.close()
return result
def VerifyLanguage(self, language_dict):
required_keys = [
'FriendlyName',
'CTagsName',
'PythonExtensionMatcher',
'VimExtensionMatcher',
'Suffix',
'SkipList',
'IsKeyword',
'Priority',
]
for key in required_keys:
if key not in language_dict:
if key in self.defaults:
language_dict[key] = self.defaults[key]
else:
raise Exception("Language data from file {filename} is " \
"missing required key {key} (no default " \
"available).".format(filename=language_dict['Filename'],
key=key))
return language_dict
def GetAllLanguages(self):
return list(self.registry.keys())
def GetAllLanguageHandlers(self):
return list(self.registry.values())
def GetLanguageHandler(self, name):
return self.registry[name]
def GenerateExtensionTable(self):
results = {}
for handler in list(self.registry.values()):
extensions = handler.GetVimMatcher()
suffix = handler.GetSuffix()
results[extensions] = suffix
return results
def GenerateFullKindList(self):
self.LoadKindList()
kinds = set()
for language in list(self.kinds.keys()):
kinds |= set(self.kinds[language].values())
return sorted(list(kinds))
def GetKindList(self, language=None):
"""Explicit list of kinds exported from ctags help."""
if self.kinds is None:
kind_import = LoadDataFile('kinds.txt')
# Generate the kind database with 'ctags_' prefix on the keys
self.kinds = {}
for key in kind_import:
self.kinds[key] = {}
for kind in kind_import[key]:
self.kinds[key]['ctags_'+kind] = kind_import[key][kind]
if language is None:
return self.kinds
elif language in self.kinds:
return self.kinds[language]
else:
return None
| 2.390625 | 2 |
archive/bayes_sensor.py | robmarkcole/HASS-data-science | 11 | 11461 | """
Bayes sensor code split out from
https://github.com/home-assistant/home-assistant/blob/dev/homeassistant/components/binary_sensor/bayesian.py
This module is used to explore the sensor.
"""
from collections import OrderedDict
from const import *
def update_probability(prior, prob_true, prob_false):
"""Update probability using Bayes' rule."""
numerator = prob_true * prior
denominator = numerator + prob_false * (1 - prior)
probability = numerator / denominator
return probability
def setup_platform(config):
"""Set up the Bayesian Binary sensor.
Modified from async_setup_platform."""
name = config[CONF_NAME]
observations = config[CONF_OBSERVATIONS]
prior = config[CONF_PRIOR]
probability_threshold = config[CONF_PROBABILITY_THRESHOLD]
device_class = config[CONF_DEVICE_CLASS]
return BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class)
class BinarySensorDevice(): # Entity
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
class BayesianBinarySensor(BinarySensorDevice):
"""Representation of a Bayesian sensor.
Removed some methods I don't think will be needed for this investigation.
"""
def __init__(self, name, prior, observations, probability_threshold,
device_class):
"""Initialize the Bayesian sensor."""
self._name = name
self._observations = observations
self._probability_threshold = probability_threshold
self._device_class = device_class
self._deviation = False
self.prior = prior
self.probability = prior
self.current_obs = OrderedDict({})
# return the entity_id to observ
to_observe = set(obs['entity_id'] for obs in self._observations)
self.entity_obs = dict.fromkeys(to_observe, [])
# Append observations
for ind, obs in enumerate(self._observations):
obs['id'] = ind
self.entity_obs[obs['entity_id']].append(obs)
self.watchers = {
'numeric_state': self._process_numeric_state,
'state': self._process_state
}
# @asyncio.coroutine
def async_added_to_hass(self):
"""Call when entity about to be added."""
@callback
# pylint: disable=invalid-name
def async_threshold_sensor_state_listener(entity, old_state,
new_state):
"""Handle sensor state changes."""
if new_state.state == STATE_UNKNOWN:
return
entity_obs_list = self.entity_obs[entity]
for entity_obs in entity_obs_list:
platform = entity_obs['platform']
self.watchers[platform](entity_obs)
prior = self.prior
for obs in self.current_obs.values():
prior = update_probability(
prior, obs['prob_true'], obs['prob_false'])
self.probability = prior # Updates prior for each observation.
# self.hass.async_add_job(self.async_update_ha_state, True)
entities = [obs['entity_id'] for obs in self._observations]
# async_track_state_change(
# self.hass, entities, async_threshold_sensor_state_listener)
def _update_current_obs(self, entity_observation, should_trigger):
"""Update current observation for single entity."""
obs_id = entity_observation['id']
if should_trigger:
prob_true = entity_observation['prob_given_true']
prob_false = entity_observation.get(
'prob_given_false', 1 - prob_true)
# Update prob_true and prob_false
self.current_obs[obs_id] = {
'prob_true': prob_true,
'prob_false': prob_false
}
else:
self.current_obs.pop(obs_id, None)
def _process_numeric_state(self, entity_observation):
"""Add entity to current_obs if numeric state conditions are met (regular sensor)."""
entity = entity_observation['entity_id']
should_trigger = condition.async_numeric_state(
self.hass, entity,
entity_observation.get('below'),
entity_observation.get('above'), None, entity_observation)
self._update_current_obs(entity_observation, should_trigger)
def _process_state(self, entity_observation):
"""Add entity to current observations if state conditions are met (binary sensor)."""
entity = entity_observation['entity_id']
should_trigger = condition.state(
self.hass, entity, entity_observation.get('to_state'))
self._update_current_obs(entity_observation, should_trigger)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._deviation
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_OBSERVATIONS: [val for val in self.current_obs.values()],
ATTR_PROBABILITY: round(self.probability, 2),
ATTR_PROBABILITY_THRESHOLD: self._probability_threshold,
}
<EMAIL>
def async_update(self):
"""Get the latest data and update the states."""
self._deviation = bool(self.probability > self._probability_threshold)
| 3.1875 | 3 |
CoarseNet/MinutiaeNet_utils.py | khaihp98/minutiae | 0 | 11462 | <gh_stars>0
import os
import glob
import shutil
import logging
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage, misc, signal, spatial
from skimage.filters import gaussian, gabor_kernel
import cv2
import math
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def re_mkdir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def init_log(output_dir):
re_mkdir(output_dir)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%Y%m%d-%H:%M:%S',
filename=os.path.join(output_dir, 'log.log'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
return logging
def copy_file(path_s, path_t):
shutil.copy(path_s, path_t)
def get_files_in_folder(folder, file_ext=None):
files = glob.glob(folder + file_ext)
files_name = []
for i in files:
_, name = os.path.split(i)
name, ext = os.path.splitext(name)
files_name.append(name)
return np.asarray(files), np.asarray(files_name)
def point_rot(points, theta, b_size, a_size):
cosA = np.cos(theta)
sinA = np.sin(theta)
b_center = [b_size[1] / 2.0, b_size[0] / 2.0]
a_center = [a_size[1] / 2.0, a_size[0] / 2.0]
points = np.dot(points - b_center, np.array([[cosA, -sinA], [sinA, cosA]])) + a_center
return points
def mnt_reader(file_name):
f = open(file_name)
minutiae = []
for i, line in enumerate(f):
if i < 4 or len(line) == 0: continue
w, h, o = [float(x) for x in line.split()]
w, h = int(round(w)), int(round(h))
minutiae.append([w, h, o])
f.close()
return minutiae
def mnt_writer(mnt, image_name, image_size, file_name):
f = open(file_name, 'w')
f.write('%s\n' % (image_name))
f.write('%d %d %d\n' % (mnt.shape[0], image_size[0], image_size[1]))
for i in range(mnt.shape[0]):
f.write('%d %d %.6f %.4f\n' % (mnt[i, 0], mnt[i, 1], mnt[i, 2], mnt[i, 3]))
f.close()
return
def gabor_fn(ksize, sigma, theta, Lambda, psi, gamma):
sigma_x = sigma
sigma_y = float(sigma) / gamma
# Bounding box
nstds = 3
xmax = ksize[0] / 2
ymax = ksize[1] / 2
xmin = -xmax
ymin = -ymax
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
# Rotation
x_theta = x * np.cos(theta) + y * np.sin(theta)
y_theta = -x * np.sin(theta) + y * np.cos(theta)
gb_cos = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2)) * np.cos(
2 * np.pi / Lambda * x_theta + psi)
gb_sin = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2)) * np.sin(
2 * np.pi / Lambda * x_theta + psi)
return gb_cos, gb_sin
def gabor_bank(stride=2, Lambda=8):
filters_cos = np.ones([25, 25, 180 // stride], dtype=float)
filters_sin = np.ones([25, 25, 180 // stride], dtype=float)
for n, i in enumerate(range(-90, 90, stride)):
theta = i * np.pi / 180.
kernel_cos, kernel_sin = gabor_fn((24, 24), 4.5, -theta, Lambda, 0, 0.5)
filters_cos[..., n] = kernel_cos
filters_sin[..., n] = kernel_sin
filters_cos = np.reshape(filters_cos, [25, 25, 1, -1])
filters_sin = np.reshape(filters_sin, [25, 25, 1, -1])
return filters_cos, filters_sin
def gaussian2d(shape=(5, 5), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def gausslabel(length=180, stride=2):
gaussian_pdf = signal.gaussian(length + 1, 3)
label = np.reshape(np.arange(stride / 2, length, stride), [1, 1, -1, 1])
y = np.reshape(np.arange(stride / 2, length, stride), [1, 1, 1, -1])
delta = np.array(np.abs(label - y), dtype=int)
delta = np.minimum(delta, length - delta) + length // 2
return gaussian_pdf[delta]
def angle_delta(A, B, max_D=np.pi * 2):
delta = np.abs(A - B)
delta = np.minimum(delta, max_D - delta)
return delta
def fmeasure(P, R):
return 2 * P * R / (P + R + 1e-10)
def distance(y_true, y_pred, max_D=16, max_O=np.pi / 6):
D = spatial.distance.cdist(y_true[:, :2], y_pred[:, :2], 'euclidean')
O = spatial.distance.cdist(np.reshape(y_true[:, 2], [-1, 1]), np.reshape(y_pred[:, 2], [-1, 1]), angle_delta)
return (D <= max_D) * (O <= max_O)
def metric_P_R_F(y_true, y_pred, maxd=16, maxo=np.pi / 6):
# Calculate Precision, Recall, F-score
if y_pred.shape[0] == 0 or y_true.shape[0] == 0:
return 0, 0, 0, 0, 0
y_true, y_pred = np.array(y_true), np.array(y_pred)
total_gt, total = float(y_true.shape[0]), float(y_pred.shape[0])
# Using L2 loss
dis = spatial.distance.cdist(y_pred[:, :2], y_true[:, :2], 'euclidean')
mindis, idx = dis.min(axis=1), dis.argmin(axis=1)
# Change to adapt to new annotation: old version. When training, comment it
# y_pred[:,2] = -y_pred[:,2]
angle = abs(np.mod(y_pred[:, 2], 2 * np.pi) - y_true[idx, 2])
angle = np.asarray([angle, 2 * np.pi - angle]).min(axis=0)
# Satisfy the threshold
tmp = (mindis <= maxd) & (angle <= maxo)
# print('mindis,idx,angle,tmp=%s,%s,%s,%s'%(mindis,idx,angle,tmp))
precision = len(np.unique(idx[(mindis <= maxd) & (angle <= maxo)])) / float(y_pred.shape[0])
recall = len(np.unique(idx[(mindis <= maxd) & (angle <= maxo)])) / float(y_true.shape[0])
# print('pre=%f/ %f'%(len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)])),float(y_pred.shape[0])))
# print('recall=%f/ %f'%(len(np.unique(idx[(mindis <= maxd) & (angle<=maxo)])),float(y_true.shape[0])))
if recall != 0:
loc = np.mean(mindis[(mindis <= maxd) & (angle <= maxo)])
ori = np.mean(angle[(mindis <= maxd) & (angle <= maxo)])
else:
loc = 0
ori = 0
return precision, recall, fmeasure(precision, recall), loc, ori
def nms(mnt):
if mnt.shape[0] == 0:
return mnt
# sort score
mnt_sort = mnt.tolist()
mnt_sort.sort(key=lambda x: x[3], reverse=True)
mnt_sort = np.array(mnt_sort)
# cal distance
inrange = distance(mnt_sort, mnt_sort, max_D=16, max_O=np.pi / 6).astype(np.float32)
keep_list = np.ones(mnt_sort.shape[0])
for i in range(mnt_sort.shape[0]):
if keep_list[i] == 0:
continue
keep_list[i + 1:] = keep_list[i + 1:] * (1 - inrange[i, i + 1:])
return mnt_sort[keep_list.astype(np.bool), :]
def fuse_nms(mnt, mnt_set_2):
if mnt.shape[0] == 0:
return mnt
# sort score
all_mnt = np.concatenate((mnt, mnt_set_2))
mnt_sort = all_mnt.tolist()
mnt_sort.sort(key=lambda x: x[3], reverse=True)
mnt_sort = np.array(mnt_sort)
# cal distance
inrange = distance(mnt_sort, mnt_sort, max_D=16, max_O=2 * np.pi).astype(np.float32)
keep_list = np.ones(mnt_sort.shape[0])
for i in range(mnt_sort.shape[0]):
if keep_list[i] == 0:
continue
keep_list[i + 1:] = keep_list[i + 1:] * (1 - inrange[i, i + 1:])
return mnt_sort[keep_list.astype(np.bool), :]
def py_cpu_nms(det, thresh):
if det.shape[0] == 0:
return det
dets = det.tolist()
dets.sort(key=lambda x: x[3], reverse=True)
dets = np.array(dets)
box_sz = 25
x1 = np.reshape(dets[:, 0], [-1, 1]) - box_sz
y1 = np.reshape(dets[:, 1], [-1, 1]) - box_sz
x2 = np.reshape(dets[:, 0], [-1, 1]) + box_sz
y2 = np.reshape(dets[:, 1], [-1, 1]) + box_sz
scores = dets[:, 2]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return dets[keep, :]
def draw_minutiae(image, minutiae, fname, saveimage=False, r=15, drawScore=False):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
# plt.hold(True)
# Check if no minutiae
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o, s in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
if drawScore == True:
plt.text(x - 10, y - 10, '%.2f' % s, color='yellow', fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight', pad_inches=0)
plt.close(fig)
else:
plt.show()
return
def draw_minutiae_overlay(image, minutiae, mnt_gt, fname, saveimage=False, r=15, drawScore=False):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
plt.hold(True)
if mnt_gt.shape[1] > 3:
mnt_gt = mnt_gt[:, :3]
if mnt_gt.shape[0] > 0:
if mnt_gt.shape[1] > 3:
mnt_gt = mnt_gt[:, :3]
plt.plot(mnt_gt[:, 0], mnt_gt[:, 1], 'bs', fillstyle='none', linewidth=1)
for x, y, o in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
if drawScore == True:
plt.text(x - 10, y - 10, '%.2f' %s, color='yellow', fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
plt.show()
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def draw_minutiae_overlay_with_score(image, minutiae, mnt_gt, fname, saveimage=False, r=15):
image = np.squeeze(image)
fig = plt.figure()
plt.imshow(image, cmap='gray')
plt.hold(True)
if mnt_gt.shape[0] > 0:
plt.plot(mnt_gt[:, 0], mnt_gt[:, 1], 'bs', fillstyle='none', linewidth=1)
if mnt_gt.shape[1] > 3:
for x, y, o, s in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
plt.text(x - 10, y - 5, '%.2f' % s, color='green', fontsize=4)
else:
for x, y, o in mnt_gt:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'b-')
if minutiae.shape[0] > 0:
plt.plot(minutiae[:, 0], minutiae[:, 1], 'rs', fillstyle='none', linewidth=1)
for x, y, o, s in minutiae:
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
plt.text(x - 10, y - 10, '%.2f' % s, color='yellow', fontsize=4)
plt.axis([0, image.shape[1], image.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, dpi=500, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def draw_ori_on_img(img, ori, mask, fname, saveimage=False, coh=None, stride=16):
ori = np.squeeze(ori)
# mask = np.squeeze(np.round(mask))
img = np.squeeze(img)
ori = ndimage.zoom(ori, np.array(img.shape) / np.array(ori.shape, dtype=float), order=0)
if mask.shape != img.shape:
mask = ndimage.zoom(mask, np.array(img.shape) / np.array(mask.shape, dtype=float), order=0)
if coh is None:
coh = np.ones_like(img)
fig = plt.figure()
plt.imshow(img, cmap='gray')
plt.hold(True)
for i in range(stride, img.shape[0], stride):
for j in range(stride, img.shape[1], stride):
if mask[i, j] == 0:
continue
x, y, o, r = j, i, ori[i, j], coh[i, j] * (stride * 0.9)
plt.plot([x, x + r * np.cos(o)], [y, y + r * np.sin(o)], 'r-')
plt.axis([0, img.shape[1], img.shape[0], 0])
plt.axis('off')
if saveimage:
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
else:
plt.show()
return
def local_constrast_enhancement(img):
img = img.astype(np.float32)
meanV = cv2.blur(img, (15, 15))
normalized = img - meanV
var = abs(normalized)
var = cv2.blur(var, (15, 15))
normalized = normalized / (var + 10) * 0.75
normalized = np.clip(normalized, -1, 1)
normalized = (normalized + 1) * 127.5
return normalized
def get_quality_map_ori_dict(img, dict, spacing, dir_map=None, block_size=16):
if img.dtype == 'uint8':
img = img.astype(np.float)
img = FastEnhanceTexture(img)
h, w = img.shape
blkH, blkW = dir_map.shape
quality_map = np.zeros((blkH, blkW), dtype=np.float)
fre_map = np.zeros((blkH, blkW), dtype=np.float)
ori_num = len(dict)
# dir_map = math.pi/2 - dir_map
dir_ind = dir_map * ori_num / math.pi
dir_ind = dir_ind.astype(np.int)
dir_ind = dir_ind % ori_num
patch_size = np.sqrt(dict[0].shape[1])
patch_size = patch_size.astype(np.int)
pad_size = (patch_size - block_size) // 2
img = np.lib.pad(img, (pad_size, pad_size), 'symmetric')
for i in range(0, blkH):
for j in range(0, blkW):
ind = dir_ind[i, j]
patch = img[i * block_size:i * block_size + patch_size, j * block_size:j * block_size + patch_size]
patch = patch.reshape(patch_size * patch_size, )
patch = patch - np.mean(patch)
patch = patch / (np.linalg.norm(patch) + 0.0001)
patch[patch > 0.05] = 0.05
patch[patch < -0.05] = -0.05
simi = np.dot(dict[ind], patch)
similar_ind = np.argmax(abs(simi))
quality_map[i, j] = np.max(abs(simi))
fre_map[i, j] = 1. / spacing[ind][similar_ind]
quality_map = gaussian(quality_map, sigma=2)
return quality_map, fre_map
def FastEnhanceTexture(img, sigma=2.5, show=False):
img = img.astype(np.float32)
h, w = img.shape
h2 = 2 ** nextpow2(h)
w2 = 2 ** nextpow2(w)
FFTsize = np.max([h2, w2])
x, y = np.meshgrid(range(-FFTsize // 2, FFTsize // 2), range(-FFTsize // 2, FFTsize // 2))
r = np.sqrt(x * x + y * y) + 0.0001
r = r // FFTsize
L = 1. // (1 + (2 * math.pi * r * sigma) ** 4)
img_low = LowpassFiltering(img, L)
gradim1 = compute_gradient_norm(img)
gradim1 = LowpassFiltering(gradim1, L)
gradim2 = compute_gradient_norm(img_low)
gradim2 = LowpassFiltering(gradim2, L)
diff = gradim1 - gradim2
ar1 = np.abs(gradim1)
diff[ar1 > 1] = diff[ar1 > 1] // ar1[ar1 > 1]
diff[ar1 <= 1] = 0
cmin = 0.3
cmax = 0.7
weight = (diff - cmin) / (cmax - cmin)
weight[diff < cmin] = 0
weight[diff > cmax] = 1
u = weight * img_low + (1 - weight) * img
temp = img - u
lim = 20
temp1 = (temp + lim) * 255 / (2 * lim)
temp1[temp1 < 0] = 0
temp1[temp1 > 255] = 255
v = temp1
if show:
plt.imshow(v, cmap='gray')
plt.show()
return v
def compute_gradient_norm(input):
input = input.astype(np.float32)
Gx, Gy = np.gradient(input)
out = np.sqrt(Gx * Gx + Gy * Gy) + 0.000001
return out
def LowpassFiltering(img, L):
h, w = img.shape
h2, w2 = L.shape
img = cv2.copyMakeBorder(img, 0, h2 - h, 0, w2 - w, cv2.BORDER_CONSTANT, value=0)
img_fft = np.fft.fft2(img)
img_fft = np.fft.fftshift(img_fft)
img_fft = img_fft * L
rec_img = np.fft.ifft2(np.fft.fftshift(img_fft))
rec_img = np.real(rec_img)
rec_img = rec_img[:h, :w]
return rec_img
def nextpow2(x):
return int(math.ceil(math.log(x, 2)))
def construct_dictionary(ori_num=30):
ori_dict = []
s = []
for i in range(ori_num):
ori_dict.append([])
s.append([])
patch_size2 = 16
patch_size = 32
dict_all = []
spacing_all = []
ori_all = []
Y, X = np.meshgrid(range(-patch_size2, patch_size2), range(-patch_size2, patch_size2))
for spacing in range(6, 13):
for valley_spacing in range(3, spacing // 2):
ridge_spacing = spacing - valley_spacing
for k in range(ori_num):
theta = np.pi / 2 - k * np.pi / ori_num
X_r = X * np.cos(theta) - Y * np.sin(theta)
for offset in range(0, spacing - 1, 2):
X_r_offset = X_r + offset + ridge_spacing / 2
X_r_offset = np.remainder(X_r_offset, spacing)
Y1 = np.zeros((patch_size, patch_size))
Y2 = np.zeros((patch_size, patch_size))
Y1[X_r_offset <= ridge_spacing] = X_r_offset[X_r_offset <= ridge_spacing]
Y2[X_r_offset > ridge_spacing] = X_r_offset[X_r_offset > ridge_spacing] - ridge_spacing
element = -np.sin(2 * math.pi * (Y1 / ridge_spacing / 2)) + np.sin(
2 * math.pi * (Y2 / valley_spacing / 2))
element = element.reshape(patch_size * patch_size, )
element = element - np.mean(element)
element = element / np.linalg.norm(element)
ori_dict[k].append(element)
s[k].append(spacing)
dict_all.append(element)
spacing_all.append(1.0 / spacing)
ori_all.append(theta)
for i in range(len(ori_dict)):
ori_dict[i] = np.asarray(ori_dict[i])
s[k] = np.asarray(s[k])
dict_all = np.asarray(dict_all)
dict_all = np.transpose(dict_all)
spacing_all = np.asarray(spacing_all)
ori_all = np.asarray(ori_all)
return ori_dict, s, dict_all, ori_all, spacing_all
def get_maps_STFT(img, patch_size=64, block_size=16, preprocess=False):
assert len(img.shape) == 2
nrof_dirs = 16
ovp_size = (patch_size - block_size) // 2
if preprocess:
img = FastEnhanceTexture(img, sigma=2.5, show=False)
img = np.lib.pad(img, (ovp_size, ovp_size), 'symmetric')
h, w = img.shape
blkH = (h - patch_size) // block_size + 1
blkW = (w - patch_size) // block_size + 1
local_info = np.empty((blkH, blkW), dtype=object)
x, y = np.meshgrid(range(-patch_size // 2, patch_size // 2), range(-patch_size // 2, patch_size // 2))
x = x.astype(np.float32)
y = y.astype(np.float32)
r = np.sqrt(x * x + y * y) + 0.0001
RMIN = 3 # min allowable ridge spacing
RMAX = 18 # maximum allowable ridge spacing
FLOW = patch_size // RMAX
FHIGH = patch_size // RMIN
dRLow = 1. // (1 + (r // FHIGH) ** 4)
dRHigh = 1. // (1 + (FLOW // r) ** 4)
dBPass = dRLow * dRHigh # bandpass
dir = np.arctan2(y, x)
dir[dir < 0] = dir[dir < 0] + math.pi
dir_ind = np.floor(dir / (math.pi // nrof_dirs))
dir_ind = dir_ind.astype(np.int, copy=False)
dir_ind[dir_ind == nrof_dirs] = 0
dir_ind_list = []
for i in range(nrof_dirs):
tmp = np.argwhere(dir_ind == i)
dir_ind_list.append(tmp)
sigma = patch_size // 3
weight = np.exp(-(x * x + y * y) // (sigma * sigma))
for i in range(0, blkH):
for j in range(0, blkW):
patch = img[i * block_size:i * block_size + patch_size, j * block_size:j * block_size + patch_size].copy()
local_info[i, j] = local_STFT(patch, weight, dBPass)
local_info[i, j].analysis(r, dir_ind_list)
# get the ridge flow from the local information
dir_map, fre_map = get_ridge_flow_top(local_info)
dir_map = smooth_dir_map(dir_map)
return dir_map, fre_map
def smooth_dir_map(dir_map, sigma=2.0, mask=None):
cos2Theta = np.cos(dir_map * 2)
sin2Theta = np.sin(dir_map * 2)
if mask is not None:
assert (dir_map.shape[0] == mask.shape[0])
assert (dir_map.shape[1] == mask.shape[1])
cos2Theta[mask == 0] = 0
sin2Theta[mask == 0] = 0
cos2Theta = gaussian(cos2Theta, sigma, multichannel=False, mode='reflect')
sin2Theta = gaussian(sin2Theta, sigma, multichannel=False, mode='reflect')
dir_map = np.arctan2(sin2Theta, cos2Theta) * 0.5
return dir_map
def get_ridge_flow_top(local_info):
blkH, blkW = local_info.shape
dir_map = np.zeros((blkH, blkW)) - 10
fre_map = np.zeros((blkH, blkW)) - 10
for i in range(blkH):
for j in range(blkW):
if local_info[i, j].ori is None:
continue
dir_map[i, j] = local_info[i, j].ori[0] # + math.pi*0.5
fre_map[i, j] = local_info[i, j].fre[0]
return dir_map, fre_map
class local_STFT:
def __init__(self, patch, weight=None, dBPass=None):
if weight is not None:
patch = patch * weight
patch = patch - np.mean(patch)
norm = np.linalg.norm(patch)
patch = patch / (norm + 0.000001)
f = np.fft.fft2(patch)
fshift = np.fft.fftshift(f)
if dBPass is not None:
fshift = dBPass * fshift
self.patch_FFT = fshift
self.patch = patch
self.ori = None
self.fre = None
self.confidence = None
self.patch_size = patch.shape[0]
def analysis(self, r, dir_ind_list=None, N=2):
assert (dir_ind_list is not None)
energy = np.abs(self.patch_FFT)
energy = energy / (np.sum(energy) + 0.00001)
nrof_dirs = len(dir_ind_list)
ori_interval = math.pi / nrof_dirs
ori_interval2 = ori_interval / 2
pad_size = 1
dir_norm = np.zeros((nrof_dirs + 2,))
for i in range(nrof_dirs):
tmp = energy[dir_ind_list[i][:, 0], dir_ind_list[i][:, 1]]
dir_norm[i + 1] = np.sum(tmp)
dir_norm[0] = dir_norm[nrof_dirs]
dir_norm[nrof_dirs + 1] = dir_norm[1]
# smooth dir_norm
smoothed_dir_norm = dir_norm
for i in range(1, nrof_dirs + 1):
smoothed_dir_norm[i] = (dir_norm[i - 1] + dir_norm[i] * 4 + dir_norm[i + 1]) / 6
smoothed_dir_norm[0] = smoothed_dir_norm[nrof_dirs]
smoothed_dir_norm[nrof_dirs + 1] = smoothed_dir_norm[1]
den = np.sum(smoothed_dir_norm[1:nrof_dirs + 1]) + 0.00001 # verify if den == 1
smoothed_dir_norm = smoothed_dir_norm / den # normalization if den == 1, this line can be removed
ori = []
fre = []
confidence = []
wenergy = energy * r
for i in range(1, nrof_dirs + 1):
if smoothed_dir_norm[i] > smoothed_dir_norm[i - 1] and smoothed_dir_norm[i] > smoothed_dir_norm[i + 1]:
tmp_ori = (i - pad_size) * ori_interval + ori_interval2 + math.pi / 2
ori.append(tmp_ori)
confidence.append(smoothed_dir_norm[i])
tmp_fre = np.sum(wenergy[dir_ind_list[i - pad_size][:, 0], dir_ind_list[i - pad_size][:, 1]]) / \
dir_norm[i]
tmp_fre = 1 / (tmp_fre + 0.00001)
fre.append(tmp_fre)
if len(confidence) > 0:
confidence = np.asarray(confidence)
fre = np.asarray(fre)
ori = np.asarray(ori)
ind = confidence.argsort()[::-1]
confidence = confidence[ind]
fre = fre[ind]
ori = ori[ind]
if len(confidence) >= 2 and confidence[0] / confidence[1] > 2.0:
self.ori = [ori[0]]
self.fre = [fre[0]]
self.confidence = [confidence[0]]
elif len(confidence) > N:
fre = fre[:N]
ori = ori[:N]
confidence = confidence[:N]
self.ori = ori
self.fre = fre
self.confidence = confidence
else:
self.ori = ori
self.fre = fre
self.confidence = confidence
def get_features_of_topN(self, N=2):
if self.confidence is None:
self.border_wave = None
return
candi_num = len(self.ori)
candi_num = np.min([candi_num, N])
patch_size = self.patch_FFT.shape
for i in range(candi_num):
kernel = gabor_kernel(self.fre[i], theta=self.ori[i], sigma_x=10, sigma_y=10)
kernel_f = np.fft.fft2(kernel.real, patch_size)
kernel_f = np.fft.fftshift(kernel_f)
patch_f = self.patch_FFT * kernel_f
patch_f = np.fft.ifftshift(patch_f) # *np.sqrt(np.abs(fshift)))
rec_patch = np.real(np.fft.ifft2(patch_f))
plt.subplot(121), plt.imshow(self.patch, cmap='gray')
plt.title('Input patch'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(rec_patch, cmap='gray')
plt.title('filtered patch'), plt.xticks([]), plt.yticks([])
plt.show()
def reconstruction(self, weight=None):
f_ifft = np.fft.ifftshift(self.patch_FFT) # *np.sqrt(np.abs(fshift)))
rec_patch = np.real(np.fft.ifft2(f_ifft))
if weight is not None:
rec_patch = rec_patch * weight
return rec_patch
def gabor_filtering(self, theta, fre, weight=None):
patch_size = self.patch_FFT.shape
kernel = gabor_kernel(fre, theta=theta, sigma_x=4, sigma_y=4)
f = kernel.real
f = f - np.mean(f)
f = f / (np.linalg.norm(f) + 0.0001)
kernel_f = np.fft.fft2(f, patch_size)
kernel_f = np.fft.fftshift(kernel_f)
patch_f = self.patch_FFT * kernel_f
patch_f = np.fft.ifftshift(patch_f) # *np.sqrt(np.abs(fshift)))
rec_patch = np.real(np.fft.ifft2(patch_f))
if weight is not None:
rec_patch = rec_patch * weight
return rec_patch
def show_orientation_field(img, dir_map, mask=None, fname=None):
h, w = img.shape[:2]
if mask is None:
mask = np.ones((h, w), dtype=np.uint8)
blkH, blkW = dir_map.shape
blk_size = h / blkH
R = blk_size / 2 * 0.8
fig, ax = plt.subplots(1)
ax.imshow(img, cmap='gray')
for i in range(blkH):
y0 = i * blk_size + blk_size / 2
y0 = int(y0)
for j in range(blkW):
x0 = j * blk_size + blk_size / 2
x0 = int(x0)
ori = dir_map[i, j]
if mask[y0, x0] == 0:
continue
if ori < -9:
continue
x1 = x0 - R * math.cos(ori)
x2 = x0 + R * math.cos(ori)
y1 = y0 - R * math.sin(ori)
y2 = y0 + R * math.sin(ori)
plt.plot([x1, x2], [y1, y2], 'r-', lw=0.5)
plt.axis('off')
if fname is not None:
fig.savefig(fname, dpi=500, bbox_inches='tight', pad_inches=0)
plt.close()
else:
plt.show(block=True)
| 2.125 | 2 |
config/paths.py | fusic-com/flask-todo | 34 | 11463 | from settings import VAR_DIR
CACHE=VAR_DIR/'cache'
| 1.273438 | 1 |
Android.py | ChakradharG/Sudoku-Core | 0 | 11464 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #To suppress warnings thrown by tensorflow
from time import sleep
import numpy as np
from cv2 import cv2
import pyautogui as pg
import Sudoku_Core as SC
import OCR
s = 513//9 #Size of board//9
fs = 25 #Size of the final image
def getBoard():
pg.click(266, 740)
sleep(1)
pg.click(266, 930) #Changing the difficulty to expert
sleep(2)
image = pg.screenshot(region=(10, 187, 513, 513))
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2GRAY)
_,image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
return image
def readBoard(image):
for i in range(9):
for j in range(9):
subImage = image[i*s + 3: (i+1)*s - 3, j*s + 3: (j+1)*s - 3] #(+3, -3) is a hack to remove border contours
contour, _ = cv2.findContours(subImage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contour != []:
(x, y, w, h) = cv2.boundingRect(contour[0])
img = cv2.resize(subImage[y: y+h, x: x+w], (fs, fs), interpolation=cv2.INTER_AREA)
else:
img = np.zeros((fs,fs), dtype='uint8')
SC.board[i][j] = OCR.model.predict(img.reshape(1, fs, fs, 1)).argmax()
def outputBoard():
for ((posY, posX), v) in SC.moves.items():
posX = 42 + posX * 57
posY = 216 + posY * 57
pg.moveTo(posX, posY, 0.1)
pg.click()
# vX = 42 + 55*(v-1)
# vY = 843
# pg.moveTo(vX, vY, 0.1) #To use the numpad in the app
# pg.click()
pg.typewrite(str(v)) #To send numbers from the keyboard
def main():
image = getBoard()
readBoard(image)
print('Got the board, now solving')
if SC.solve(0, 0):
outputBoard()
else:
print('Couldn\'t solve')
input('Press any key to exit')
if __name__ == '__main__':
main()
| 2.65625 | 3 |
app/model.py | kurapikaaaa/CITS3403Project | 1 | 11465 | from app import db, login
from flask_login import UserMixin
from datetime import datetime
from flask import url_for, redirect
from werkzeug.security import generate_password_hash, check_password_hash
class users(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String(100), nullable=False, unique=True)
password = db.Column(db.String(96), nullable=False)
email = db.Column(db.String(128), nullable=False, unique=True)
firstname = db.Column(db.String(130), nullable=False)
lastname = db.Column(db.String(130), nullable=False)
lastLogin = db.Column(db.DateTime)
isActive = db.Column(db.Boolean)
isAdmin = db.Column(db.Boolean)
noteHighScore = db.Column(db.Integer)
KeyHighScore = db.Column(db.Integer)
submit = db.relationship("submission", backref="submitter")
###################################################
def __init__(self):
self.isActive = True
self.isAdmin = False
self.noteHighScore = 0
self.lastLogin = None
self.KeyHighScore = 0
def set_password(self, pwd):
self.password = generate_password_hash(pwd, method="<PASSWORD>")
def check_password(self, pwd):
return check_password_hash(self.password, pwd)
def is_active(self):
return self.isActive
def validate(self):
if self.username and self.email and self.firstname and self.lastname:
return True
else:
return False
def getSubmissions(self):
res = submission.query.filter_by(creater_id=self.id).all()
return res
def __repr__(self):
return '<user %r>' % self.username
class submission(db.Model):
__tablename__ = 'submission'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
createdAt = db.Column(db.DateTime, nullable=False)
markedAt = db.Column(db.DateTime)
feedback = db.Column(db.Boolean)
totalmark = db.Column(db.Integer)
difficulty = db.Column(db.String(30), nullable=False)
passed = db.Column(db.Boolean)
creater_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
answers = db.relationship("answer", backref="submission")
def __init__(self):
self.createdAt = datetime.utcnow()
self.markedAt = None
self.feedback = False
self.totalmark = None
self.marked = False
self.passed = False
def validate(self):
if self.difficulty and self.creater_id and self.createdAt:
return True
def __repr__(self):
return '<submission %r>' % self.id
class answer(db.Model):
__tablename__ = 'answer'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
answerSeq = db.Column(db.Integer)
submittedAnswer = db.Column(db.String(400))
feedback = db.Column(db.String(400))
markreceived = db.Column(db.Boolean)
submissionId = db.Column(db.Integer, db.ForeignKey("submission.id"))
def __init__(self):
self.feedback = None
self.markreceived = False
def validate(self):
if self.answerSeq and self.submittedAnswer and self.submissionId:
return True
else:
print("missingfield")
return False
def __repr__(self):
return '<ans>'
@login.user_loader
def load_user(usr_id):
return users.query.get(int(usr_id))
@login.unauthorized_handler
def unauthorized():
return redirect(url_for("auth.login"))
| 2.625 | 3 |
bert_multitask_learning/top.py | akashnd/bert-multitask-learning | 1 | 11466 | # AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/12_top.ipynb (unless otherwise specified).
__all__ = ['empty_tensor_handling_loss', 'nan_loss_handling', 'create_dummy_if_empty', 'BaseTop', 'SequenceLabel',
'Classification', 'PreTrain', 'Seq2Seq', 'MultiLabelClassification', 'MaskLM']
# Cell
import logging
from functools import partial
from typing import Dict, Tuple, Union
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
from transformers.modeling_tf_utils import TFSharedEmbeddings
from tensorflow_addons.layers.crf import CRF
from tensorflow_addons.text.crf import crf_log_likelihood
from .params import BaseParams
from .utils import gather_indexes
@tf.function
def empty_tensor_handling_loss(labels, logits, loss_fn):
if tf.equal(tf.size(labels), 0):
return 0.0
if tf.equal(tf.size(tf.shape(labels)), 0):
return 0.0
if tf.equal(tf.shape(labels)[0], 0):
return 0.0
else:
return tf.reduce_mean(loss_fn(
labels, logits, from_logits=True))
@tf.function
def nan_loss_handling(loss):
if tf.math.is_nan(loss):
return 0.0
else:
return loss
@tf.function
def create_dummy_if_empty(inp_tensor: tf.Tensor) -> tf.Tensor:
shape_tensor = tf.shape(inp_tensor)
if tf.equal(shape_tensor[0], 0):
data_type = inp_tensor.dtype
dummy_shape_first_dim = tf.convert_to_tensor([1], dtype=tf.int32)
dummy_shape = tf.concat(
[dummy_shape_first_dim, shape_tensor[1:]], axis=0)
dummy_tensor = tf.zeros(dummy_shape, dtype=data_type)
return dummy_tensor
else:
return inp_tensor
class BaseTop(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(BaseTop, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
def call(self, inputs: Tuple[Dict], mode: str):
raise NotImplementedError
# Cell
class SequenceLabel(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str):
super(SequenceLabel, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
if self.params.crf:
self.crf = CRF(num_classes)
self.metric_fn = tf.keras.metrics.Accuracy(
name='{}_acc'.format(self.problem_name)
)
else:
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
def return_crf_result(self, labels: tf.Tensor, logits: tf.Tensor, mode: str, input_mask: tf.Tensor):
input_mask.set_shape([None, None])
logits = create_dummy_if_empty(logits)
input_mask = create_dummy_if_empty(input_mask)
viterbi_decoded, potentials, sequence_length, chain_kernel = self.crf(
logits, input_mask)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = -crf_log_likelihood(potentials,
labels, sequence_length, chain_kernel)[0]
loss = tf.reduce_mean(loss)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(
labels, viterbi_decoded, sample_weight=input_mask)
self.add_metric(acc)
# make the crf prediction has the same shape as non-crf prediction
return tf.one_hot(viterbi_decoded, name='%s_predict' % self.problem_name, depth=self.params.num_classes[self.problem_name])
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
# sometimes the length of labels dose not equal to length of inputs
# that's caused by tf.data.experimental.bucket_by_sequence_length in multi problem scenario
pad_len = tf.shape(input=hidden_feature)[
1] - tf.shape(input=labels)[1]
# top, bottom, left, right
pad_tensor = [[0, 0], [0, pad_len]]
labels = tf.pad(tensor=labels, paddings=pad_tensor)
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
if self.params.crf:
return self.return_crf_result(labels, hidden_feature, mode, feature['model_input_mask'])
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
loss = empty_tensor_handling_loss(
labels, logits,
tf.keras.losses.sparse_categorical_crossentropy)
self.add_loss(loss)
acc = self.metric_fn(
labels, logits, sample_weight=feature['model_input_mask'])
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class Classification(tf.keras.layers.Layer):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(Classification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
num_classes = self.params.num_classes[self.problem_name]
self.dense = tf.keras.layers.Dense(num_classes, activation=None)
self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
name='{}_acc'.format(self.problem_name))
self.dropout = tf.keras.layers.Dropout(1-params.dropout_keep_prob)
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
# labels = tf.squeeze(labels)
# convert labels to one-hot to use label_smoothing
one_hot_labels = tf.one_hot(
labels, depth=self.params.num_classes[self.problem_name])
loss_fn = partial(tf.keras.losses.categorical_crossentropy,
from_logits=True, label_smoothing=self.params.label_smoothing)
loss = empty_tensor_handling_loss(
one_hot_labels, logits,
loss_fn)
loss = nan_loss_handling(loss)
self.add_loss(loss)
acc = self.metric_fn(labels, logits)
self.add_metric(acc)
return tf.nn.softmax(
logits, name='%s_predict' % self.problem_name)
# Cell
class PreTrain(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.Tensor=None, share_embedding=True):
super(PreTrain, self).__init__(name=problem_name)
self.params = params
self.nsp = transformers.models.bert.modeling_tf_bert.TFBertNSPHead(
self.params.bert_config)
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str) -> Tuple[tf.Tensor, tf.Tensor]:
features, hidden_features = inputs
# compute logits
nsp_logits = self.nsp(hidden_features['pooled'])
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat(
[shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
nsp_labels = features['next_sentence_label_ids']
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
nsp_loss = empty_tensor_handling_loss(
nsp_labels, nsp_logits,
tf.keras.losses.sparse_categorical_crossentropy)
mlm_loss_layer = transformers.modeling_tf_utils.TFMaskedLanguageModelingLoss()
# mlm_loss = tf.reduce_mean(
# mlm_loss_layer.compute_loss(mlm_labels, mlm_logits))
# add a useless from_logits argument to match the function signature of keras losses.
def loss_fn_wrapper(labels, logits, from_logits=True):
return mlm_loss_layer.compute_loss(labels, logits)
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
loss_fn_wrapper
)
loss = nsp_loss + mlm_loss
self.add_loss(loss)
return (tf.sigmoid(nsp_logits), tf.nn.softmax(mlm_logits))
# Cell
class Seq2Seq(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer):
super(Seq2Seq, self).__init__(name=problem_name)
# self.params = params
# self.problem_name = problem_name
# # if self.params.init_weight_from_huggingface:
# # self.decoder = load_transformer_model(
# # self.params.transformer_decoder_model_name,
# # self.params.transformer_decoder_model_loading)
# # else:
# # self.decoder = load_transformer_model(
# # self.params.bert_decoder_config, self.params.transformer_decoder_model_loading)
# # TODO: better implementation
# logging.warning(
# 'Seq2Seq model is not well supported yet. Bugs are expected.')
# config = self.params.bert_decoder_config
# # some hacky approach to share embeddings from encoder to decoder
# word_embedding_weight = input_embeddings.word_embeddings
# self.vocab_size = word_embedding_weight.shape[0]
# self.share_embedding_layer = TFSharedEmbeddings(
# vocab_size=word_embedding_weight.shape[0], hidden_size=word_embedding_weight.shape[1])
# self.share_embedding_layer.build([1])
# self.share_embedding_layer.weight = word_embedding_weight
# # self.decoder = TFBartDecoder(
# # config=config, embed_tokens=self.share_embedding_layer)
# self.decoder = TFBartDecoderForConditionalGeneration(
# config=config, embedding_layer=self.share_embedding_layer)
# self.decoder.set_bos_id(self.params.bos_id)
# self.decoder.set_eos_id(self.params.eos_id)
# self.metric_fn = tf.keras.metrics.SparseCategoricalAccuracy(
# name='{}_acc'.format(self.problem_name))
raise NotImplementedError
def _seq2seq_label_shift_right(self, labels: tf.Tensor, eos_id: int) -> tf.Tensor:
batch_eos_ids = tf.fill([tf.shape(labels)[0], 1], eos_id)
batch_eos_ids = tf.cast(batch_eos_ids, dtype=tf.int64)
decoder_lable = labels[:, 1:]
decoder_lable = tf.concat([decoder_lable, batch_eos_ids], axis=1)
return decoder_lable
def call(self,
inputs: Tuple[Dict[str, Dict[str, tf.Tensor]], Dict[str, Dict[str, tf.Tensor]]],
mode: str):
features, hidden_features = inputs
encoder_mask = features['model_input_mask']
if mode == tf.estimator.ModeKeys.PREDICT:
input_ids = None
decoder_padding_mask = None
else:
input_ids = features['%s_label_ids' % self.problem_name]
decoder_padding_mask = features['{}_mask'.format(
self.problem_name)]
if mode == tf.estimator.ModeKeys.PREDICT:
return self.decoder.generate(eos_token_id=self.params.eos_id, encoder_hidden_states=hidden_features['seq'])
else:
decoder_output = self.decoder(input_ids=input_ids,
encoder_hidden_states=hidden_features['seq'],
encoder_padding_mask=encoder_mask,
decoder_padding_mask=decoder_padding_mask,
decode_max_length=self.params.decode_max_seq_len,
mode=mode)
loss = decoder_output.loss
logits = decoder_output.logits
self.add_loss(loss)
decoder_label = self._seq2seq_label_shift_right(
features['%s_label_ids' % self.problem_name], eos_id=self.params.eos_id)
acc = self.metric_fn(decoder_label, logits)
self.add_metric(acc)
return logits
# Cell
class MultiLabelClassification(tf.keras.Model):
def __init__(self, params: BaseParams, problem_name: str) -> None:
super(MultiLabelClassification, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
self.dense = tf.keras.layers.Dense(
self.params.num_classes[problem_name])
self.dropout = tf.keras.layers.Dropout(
1-self.params.dropout_keep_prob
)
# self.metric_fn = tfa.metrics.F1Score(
# num_classes=self.params.num_classes[problem_name],
# threshold=self.params.multi_cls_threshold,
# average='macro',
# name='{}_f1'.format(problem_name))
def call(self, inputs, mode):
training = (mode == tf.estimator.ModeKeys.TRAIN)
feature, hidden_feature = inputs
hidden_feature = hidden_feature['pooled']
if mode != tf.estimator.ModeKeys.PREDICT:
labels = feature['{}_label_ids'.format(self.problem_name)]
else:
labels = None
hidden_feature = self.dropout(hidden_feature, training)
logits = self.dense(hidden_feature)
if mode != tf.estimator.ModeKeys.PREDICT:
labels = tf.cast(labels, tf.float32)
# use weighted loss
label_weights = self.params.multi_cls_positive_weight
def _loss_fn_wrapper(x, y, from_logits=True):
return tf.nn.weighted_cross_entropy_with_logits(x, y, pos_weight=label_weights, name='{}_loss'.format(self.problem_name))
loss = empty_tensor_handling_loss(
labels, logits, _loss_fn_wrapper)
loss = nan_loss_handling(loss)
self.add_loss(loss)
# labels = create_dummy_if_empty(labels)
# logits = create_dummy_if_empty(logits)
# f1 = self.metric_fn(labels, logits)
# self.add_metric(f1)
return tf.nn.sigmoid(
logits, name='%s_predict' % self.problem_name)
# Cell
class MaskLM(tf.keras.Model):
"""Multimodal MLM top layer.
"""
def __init__(self, params: BaseParams, problem_name: str, input_embeddings: tf.keras.layers.Layer=None, share_embedding=True) -> None:
super(MaskLM, self).__init__(name=problem_name)
self.params = params
self.problem_name = problem_name
if share_embedding is False:
self.vocab_size = self.params.bert_config.vocab_size
self.share_embedding = False
else:
word_embedding_weight = input_embeddings.word_embeddings
self.vocab_size = word_embedding_weight.shape[0]
embedding_size = word_embedding_weight.shape[-1]
share_valid = (self.params.bert_config.hidden_size ==
embedding_size)
if not share_valid and self.params.share_embedding:
logging.warning(
'Share embedding is enabled but hidden_size != embedding_size')
self.share_embedding = self.params.share_embedding & share_valid
if self.share_embedding:
self.share_embedding_layer = TFSharedEmbeddings(
vocab_size=self.vocab_size, hidden_size=word_embedding_weight.shape[1])
self.share_embedding_layer.build([1])
self.share_embedding_layer.weight = word_embedding_weight
else:
self.share_embedding_layer = tf.keras.layers.Dense(self.vocab_size)
def call(self, inputs, mode):
features, hidden_features = inputs
# masking is done inside the model
seq_hidden_feature = hidden_features['seq']
if mode != tf.estimator.ModeKeys.PREDICT:
positions = features['masked_lm_positions']
# gather_indexes will flatten the seq hidden_states, we need to reshape
# back to 3d tensor
input_tensor = gather_indexes(seq_hidden_feature, positions)
shape_tensor = tf.shape(positions)
shape_list = tf.concat([shape_tensor, [seq_hidden_feature.shape.as_list()[-1]]], axis=0)
input_tensor = tf.reshape(input_tensor, shape=shape_list)
# set_shape to determin rank
input_tensor.set_shape(
[None, None, seq_hidden_feature.shape.as_list()[-1]])
else:
input_tensor = seq_hidden_feature
if self.share_embedding:
mlm_logits = self.share_embedding_layer(
input_tensor, mode='linear')
else:
mlm_logits = self.share_embedding_layer(input_tensor)
if mode != tf.estimator.ModeKeys.PREDICT:
mlm_labels = features['masked_lm_ids']
mlm_labels.set_shape([None, None])
# compute loss
mlm_loss = empty_tensor_handling_loss(
mlm_labels,
mlm_logits,
tf.keras.losses.sparse_categorical_crossentropy
)
loss = nan_loss_handling(mlm_loss)
self.add_loss(loss)
return tf.nn.softmax(mlm_logits)
| 1.96875 | 2 |
strings/#387/strings.py | sharmarkei/DSA-Practice | 0 | 11467 | <reponame>sharmarkei/DSA-Practice<gh_stars>0
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
dict_1 = {}
for i in s:
if i not in dict_1:
dict_1[i] = 1
else:
dict_1[i] += 1
print(dict_1)
for idx, val in enumerate(s):
if dict_1[val] == 1:
return idx
return -1
| 3.109375 | 3 |
challenge/utils/cancellation_code.py | AlonViz/IML.HUJI | 0 | 11468 | <reponame>AlonViz/IML.HUJI
import re
def process_cancellation_code(code):
regex_days_before = "^(([0-9]+)D)(([0-9]+)N|([0-9]+)P)"
regex_no_show = "(([0-9]+)P|([0-9]+)N)"
options = re.split("_", code)
final = []
for option in options:
days_match = re.match(regex_days_before, option)
if days_match:
days_before = None if days_match.group(2) is None else int(days_match.group(2))
nights_to_pay = None if days_match.group(4) is None else int(days_match.group(4))
percentage = None if days_match.group(5) is None else int(days_match.group(5))
final.append([days_before, nights_to_pay, percentage])
continue
no_show_match = re.match(regex_no_show, option)
if no_show_match:
nights_to_pay = None if no_show_match.group(3) is None else int(no_show_match.group(3))
percentage = None if no_show_match.group(2) is None else int(no_show_match.group(2))
final.append([0, nights_to_pay, percentage])
return final
def evaluate_cancellation_code(cancellation_code: str, booking_time_before: int, stay_duration: int) -> float:
"""
gives a numerical value to given cancellation code, return expected fine in percentage
:return:
"""
cancellations = process_cancellation_code(cancellation_code)
p = min(7, booking_time_before)
chosen_p = min([lst for lst in cancellations if lst[0] > p], key=lambda tup: tup[0], default=[None, None, None])
expected_fine = 0 if chosen_p[0] is None else chosen_p[2] if chosen_p[1] is None else chosen_p[1] / stay_duration
return expected_fine
def filter(cancellation_code: str, booking_time_before: int, stay_duration: int) -> float:
cancellations = process_cancellation_code(cancellation_code)
filtered = [i for i in cancellations if i[0] < booking_time_before]
prec_only = []
for i in filtered:
if i[2] is not None:
prec_only.append([i[0], i[2]])
else:
prec_only.append([i[0], i[1] / stay_duration])
def no_show(cancellation_code: str) -> int:
"""
returns 1 if the cancellation code contains a no-show fee, and 0 otherwise
"""
cancellations = process_cancellation_code(cancellation_code)
return any(lst for lst in cancellations if lst[0] == 0)
def fine_after_x_days(cancellation_code: str, booking_time_before: int, stay_duration: int, days: int):
"""
returns the expected fine in percentages after 'days' days from reservation.
"""
time_before_reservation = booking_time_before - days
if time_before_reservation < 0:
return 0
cancellations = process_cancellation_code(cancellation_code)
# convert cancellation policy to format (Days, Percentage)
percentage_cancellations = []
for cancel in cancellations:
if cancel[1] is None:
percentage_cancellations.append((cancel[0], cancel[2]))
else:
percentage_cancellations.append((cancel[0], cancel[1] / stay_duration))
if not percentage_cancellations:
return 0
# return the fine associated with the smallest number of days larger than time_before_reservation
fines = [x for x in percentage_cancellations if x[0] > time_before_reservation]
if not fines:
return 0
return min(fines, key=lambda x: x[0])[1]
| 2.71875 | 3 |
acronym/scoring.py | sigma67/acronym | 340 | 11469 | import re
regex = re.compile('[^a-zA-Z]')
def score_word(word, corpus=None):
word = regex.sub('', word) # leave only alpha
score = 0
consec_bonus = 2
for i, letter in enumerate(word):
if letter.islower():
continue
if i > 0 and word[i-1].upper():
score += consec_bonus
if i == 0:
score += 10
elif (i == 1) or (i == len(word)-1):
score += 3
else:
score += 1
if (i >= 1) and (corpus is not None) and (word[i:].lower() in corpus):
score += len(word[i:])-1
return score
def score_acronym(capitalized_acronym, corpus=None):
"""
For each capitalized letter in the acronym:
* 10 points if first letter in a word (with exception of first letter)
* 3 point if second or last letter in a word
* 1 point otherwise
* N bonus points if begins an N-length valid sub-word
(ex: multiVariable -> 8 bonus points)
* 2 bonus points if immediately following a capitalizd letter
"""
return sum([score_word(word, corpus=corpus) for word in capitalized_acronym.split(' ')]) - 10
| 3.828125 | 4 |
e2e_test.py | bartossh/hebbian_mirror | 2 | 11470 | <gh_stars>1-10
import requests
num_of_iter = 2
data = open('./assets/test.jpg', 'rb').read()
for i in range(0, num_of_iter):
res = requests.get(
url='http://0.0.0.0:8000/recognition/object/boxes_names'
)
print("\n RESPONSE GET boxes names for test number {}: \n {}"
.format(i, res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/boxes',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to boxes, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res.__dict__))
res = requests.post(url='http://0.0.0.0:8000/recognition/object/image',
data=data,
headers={'Content-Type': 'application/octet-stream'})
print("\n RESPONSE POST to image, test num {} \n Sending buffer length: {},\n Received {}"
.format(i, len(data), res))
| 3.03125 | 3 |
appendix/auc_accuracy/train_nn_metric.py | rit-git/tagging | 7 | 11471 | <gh_stars>1-10
import argparse
import os
import torch
import torch.nn as nn
from torchtext.data import TabularDataset, BucketIterator
from torchtext.data import Field
from torchtext.vocab import Vectors, GloVe
from tqdm import tqdm, trange
import sys
import os
sys.path.insert(0, "../../pyfunctor")
sys.path.insert(0, "../../model")
from cnn import CNNModel
from lstm import LSTMModel
from bilstm import BILSTMModel
from sklearn import metrics
import csv_handler as csv_handler
import transform as transform
import time
#from util.weight import WeightClassCSV
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_data(path, batch_size, max_seq_length, glove="840B", emb_size=300):
TEXT = Field(sequential=True, fix_length=max_seq_length, lower=True)
LABEL = Field(sequential=False, use_vocab=False)
ID = Field(sequential=False, use_vocab=False)
data_fields = [("id", ID),
("sent", TEXT),
("label", LABEL)]
train_path = os.path.join(path, "train.csv")
train = TabularDataset(path=train_path, format="csv", skip_header=False,
fields=data_fields)
test_path = os.path.join(path, "dev.csv")
test = TabularDataset(path=test_path, format="csv", skip_header=False,
fields=data_fields)
TEXT.build_vocab(train, vectors=GloVe(name=glove, dim=emb_size))
LABEL.build_vocab(train)
vocab_size = len(TEXT.vocab)
vocab_weights = TEXT.vocab.vectors
train_iter = BucketIterator(dataset=train, batch_size=batch_size,
sort_key=lambda x: x.id, shuffle=True, repeat=False)
test_iter = BucketIterator(dataset=test, batch_size=batch_size,
sort_key=lambda x: x.id, shuffle=False, repeat=False)
return train_iter, test_iter, vocab_size, vocab_weights
def F1(predicts, golds):
true_predict = 0
true = 0
predict = 0
for i in range(len(predicts)):
if predicts[i] == 1:
predict += 1
if golds[i] == 1:
true += 1
if predicts[i] == 1 and golds[i] == 1:
true_predict += 1
precision = (true_predict+0.0)/(predict+0.0) if predict>0 else 0
recall = (true_predict+0.0)/(true+0.0) if true>0 else 0
f1 = (2*precision*recall)/(precision+recall) if predict>0 and true>0 else 0
return precision, recall, f1
if __name__ == "__main__":
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",
default=None,
type=str,
required=True,
help="Dataset folder")
parser.add_argument("--model",
default=None,
type=str,
required=True,
help="Model type: CNN, LSTM or BILSTM")
parser.add_argument("--glove",
default="840B",
type=str,
help="Golve version (6B, 42B, 840B)")
parser.add_argument("--emb_size",
default=300,
type=int,
help="Golve embedding size (100, 200, 300)")
parser.add_argument("--max_seq_length",
default=256,
type=int,
help="Maximum sequence length")
parser.add_argument("--num_epoch",
default=9,
type=int,
help="Number of training epoch")
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size")
parser.add_argument("--lr",
default=1e-4,
type=float,
help="Learning rate")
parser.add_argument("--fix_emb",
default=False,
type=bool,
help="Fix embedding layer")
parser.add_argument("--log_file",
default=False,
type=str,
required=True,
help="log file path")
args = parser.parse_args()
# Load data
print("Loading data ...")
train_iter, test_iter, vocab_size, vocab_weights = load_data(args.dataset,
args.batch_size, args.max_seq_length, glove=args.glove, emb_size=args.emb_size)
# Initialize model
assert args.model in ["CNN", "LSTM", "BILSTM"], "Only support CNN, LSTM or BILSTM."
if args.model == "CNN":
model = CNNModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
elif args.model == "LSTM":
model = LSTMModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
else:
model = BILSTMModel(vocab_size, args.emb_size, args.max_seq_length,
weights=vocab_weights, fix_emb_weight=args.fix_emb)
model = model.to(device)
# Train
print("Training %s ..." % args.model)
params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
loss_func = nn.CrossEntropyLoss()
#label_weight = WeightClassCSV(args.dataset + "/train.csv").get_weights(['0', '1'])
#loss_func = nn.CrossEntropyLoss(weight = torch.tensor(label_weight).to(device))
model.train()
for epoch in trange(args.num_epoch, desc="Epoch"):
total_loss = 0
for idx, batch in enumerate(tqdm(train_iter, desc="Iteration")):
inputs, labels = batch.sent, batch.label
inputs = inputs.to(device)
labels = labels.to(device)
logits = model(inputs)
loss = loss_func(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("\tEpoch %d, total loss: %f" % (epoch, total_loss))
train_finish_time = time.time()
train_overall_time = train_finish_time - start_time
# Evaluate
print("Evaluating ...")
model.eval()
predicts = []
golds = []
predicted_proba = []
with torch.no_grad():
for idx, batch in enumerate(tqdm(test_iter, desc="Iteration")):
inputs, labels = batch.sent, batch.label
inputs = inputs.to(device)
logits = model(inputs)
predicted_proba += list(logits.data.cpu().numpy())
predict = torch.argmax(logits, dim=1).data.cpu().numpy()
predicts += list(predict)
golds += list(labels.data.cpu().numpy())
precision, recall, f1 = F1(predicts, golds)
print("Precision: %f, Recall: %f, F1: %f" % (precision, recall, f1))
train_time = train_overall_time
test_time = time.time() - train_finish_time
print(metrics.classification_report(golds, predicts))
(precision, recall, fscore, support) = metrics.precision_recall_fscore_support(golds, predicts)
log_row = []
log_row.append(args.dataset)
log_row.append(precision[1])
log_row.append(recall[1])
log_row.append(fscore[1])
log_row.append(train_time)
log_row.append(test_time)
pos_predicted = transform.map_func(predicted_proba, lambda p : p[1])
auc = metrics.roc_auc_score(golds, pos_predicted)
log_row.append(auc)
accuracy = metrics.accuracy_score(golds, predicts)
log_row.append(accuracy)
csv_handler.append_row(args.log_file, log_row)
| 2.28125 | 2 |
setup.py | teamproserve/pinkopy | 0 | 11472 | <filename>setup.py
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
with open('README.md') as f:
readme = f.read()
install_requires = [
'cachetools>=1.1.5',
'requests>=2.7.0',
'xmltodict>=0.9.2',
]
tests_require = [
'pytest',
'requests-mock==0.7.0'
]
setup(
name='pinkopy',
version='2.1.3-dev',
description='Python wrapper for Commvault api',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/teamproserve/pinkopy',
download_url='https://github.com/teamproserve/pinkopy/archive/2.1.3-dev.zip',
packages=find_packages(),
platforms=['all'],
license='MIT',
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_require,
classifiers=[
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
| 1.476563 | 1 |
scss/extension/core.py | xen0n/pyScss | 0 | 11473 | """Extension for built-in Sass functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from itertools import product
import math
import os.path
from pathlib import PurePosixPath
from six.moves import xrange
from scss.extension import Extension
from scss.namespace import Namespace
from scss.source import SourceFile
from scss.types import (
Arglist, Boolean, Color, List, Null, Number, String, Map, expect_type)
class CoreExtension(Extension):
name = 'core'
namespace = Namespace()
def handle_import(self, name, compilation, rule):
"""Implementation of the core Sass import mechanism, which just looks
for files on disk.
"""
# TODO virtually all of this is the same as the django stuff, except
# for the bit that actually looks for and tries to open the file.
# would be much easier if you could just stick an object in the search
# path that implements the pathlib API. the only problem is what to do
# when one path is a child of another, so the same file has two names,
# but tbh i'm not actually sure that's something worth protecting
# against...? like, the only cost is that we'll parse twice (or, later
# on, not respect single-import), and the fix is to just Not Do That
# TODO i think with the new origin semantics, i've made it possible to
# import relative to the current file even if the current file isn't
# anywhere in the search path. is that right?
path = PurePosixPath(name)
if path.suffix:
search_exts = [path.suffix]
else:
search_exts = ['.scss', '.sass']
relative_to = path.parent
basename = path.stem
search_path = [] # tuple of (origin, start_from)
if relative_to.is_absolute():
relative_to = PurePosixPath(*relative_to.parts[1:])
elif rule.source_file.origin:
# Search relative to the current file first, only if not doing an
# absolute import
search_path.append(
rule.source_file.origin / rule.source_file.relpath.parent)
search_path.extend(compilation.compiler.search_path)
for prefix, suffix in product(('_', ''), search_exts):
filename = prefix + basename + suffix
for origin in search_path:
relpath = relative_to / filename
# Lexically (ignoring symlinks!) eliminate .. from the part
# of the path that exists within Sass-space. pathlib
# deliberately doesn't do this, but os.path does.
relpath = PurePosixPath(os.path.normpath(str(relpath)))
if rule.source_file.key == (origin, relpath):
# Avoid self-import
# TODO is this what ruby does?
continue
path = origin / relpath
if not path.exists():
continue
# All good!
# TODO if this file has already been imported, we'll do the
# source preparation twice. make it lazy.
return SourceFile.read(origin, relpath)
# Alias to make the below declarations less noisy
ns = CoreExtension.namespace
# ------------------------------------------------------------------------------
# Color creation
def _interpret_percentage(n, relto=1., clamp=True):
expect_type(n, Number, unit='%')
if n.is_unitless:
ret = n.value / relto
else:
ret = n.value / 100
if clamp:
if ret < 0:
return 0
elif ret > 1:
return 1
return ret
@ns.declare
def rgba(r, g, b, a):
r = _interpret_percentage(r, relto=255)
g = _interpret_percentage(g, relto=255)
b = _interpret_percentage(b, relto=255)
a = _interpret_percentage(a, relto=1)
return Color.from_rgb(r, g, b, a)
@ns.declare
def rgb(r, g, b, type='rgb'):
return rgba(r, g, b, Number(1.0))
@ns.declare
def rgba_(color, a=None):
if a is None:
alpha = 1
else:
alpha = _interpret_percentage(a)
return Color.from_rgb(*color.rgba[:3], alpha=alpha)
@ns.declare
def rgb_(color):
return rgba_(color, a=Number(1))
@ns.declare
def hsla(h, s, l, a):
return Color.from_hsl(
h.value / 360 % 1,
# Ruby sass treats plain numbers for saturation and lightness as though
# they were percentages, just without the %
_interpret_percentage(s, relto=100),
_interpret_percentage(l, relto=100),
alpha=a.value,
)
@ns.declare
def hsl(h, s, l):
return hsla(h, s, l, Number(1))
@ns.declare
def hsla_(color, a=None):
return rgba_(color, a)
@ns.declare
def hsl_(color):
return rgba_(color, a=Number(1))
@ns.declare
def mix(color1, color2, weight=Number(50, "%")):
"""
Mixes together two colors. Specifically, takes the average of each of the
RGB components, optionally weighted by the given percentage.
The opacity of the colors is also considered when weighting the components.
Specifically, takes the average of each of the RGB components,
optionally weighted by the given percentage.
The opacity of the colors is also considered when weighting the components.
The weight specifies the amount of the first color that should be included
in the returned color.
50%, means that half the first color
and half the second color should be used.
25% means that a quarter of the first color
and three quarters of the second color should be used.
For example:
mix(#f00, #00f) => #7f007f
mix(#f00, #00f, 25%) => #3f00bf
mix(rgba(255, 0, 0, 0.5), #00f) => rgba(63, 0, 191, 0.75)
"""
# This algorithm factors in both the user-provided weight
# and the difference between the alpha values of the two colors
# to decide how to perform the weighted average of the two RGB values.
#
# It works by first normalizing both parameters to be within [-1, 1],
# where 1 indicates "only use color1", -1 indicates "only use color 0",
# and all values in between indicated a proportionately weighted average.
#
# Once we have the normalized variables w and a,
# we apply the formula (w + a)/(1 + w*a)
# to get the combined weight (in [-1, 1]) of color1.
# This formula has two especially nice properties:
#
# * When either w or a are -1 or 1, the combined weight is also that
# number (cases where w * a == -1 are undefined, and handled as a
# special case).
#
# * When a is 0, the combined weight is w, and vice versa
#
# Finally, the weight of color1 is renormalized to be within [0, 1]
# and the weight of color2 is given by 1 minus the weight of color1.
#
# Algorithm from the Sass project: http://sass-lang.com/
p = _interpret_percentage(weight)
# Scale weight to [-1, 1]
w = p * 2 - 1
# Compute difference in alpha channels
a = color1.alpha - color2.alpha
# Weight of first color
if w * a == -1:
# Avoid zero-div case
scaled_weight1 = w
else:
scaled_weight1 = (w + a) / (1 + w * a)
# Unscale back to [0, 1] and get the weight of the other color
w1 = (scaled_weight1 + 1) / 2
w2 = 1 - w1
# Do the scaling. Note that alpha isn't scaled by alpha, as that wouldn't
# make much sense; it uses the original untwiddled weight, p.
channels = [
ch1 * w1 + ch2 * w2
for (ch1, ch2) in zip(color1.rgba[:3], color2.rgba[:3])]
alpha = color1.alpha * p + color2.alpha * (1 - p)
return Color.from_rgb(*channels, alpha=alpha)
# ------------------------------------------------------------------------------
# Color inspection
@ns.declare
def red(color):
r, g, b, a = color.rgba
return Number(r * 255)
@ns.declare
def green(color):
r, g, b, a = color.rgba
return Number(g * 255)
@ns.declare
def blue(color):
r, g, b, a = color.rgba
return Number(b * 255)
@ns.declare_alias('opacity')
@ns.declare
def alpha(color):
return Number(color.alpha)
@ns.declare
def hue(color):
h, s, l = color.hsl
return Number(h * 360, "deg")
@ns.declare
def saturation(color):
h, s, l = color.hsl
return Number(s * 100, "%")
@ns.declare
def lightness(color):
h, s, l = color.hsl
return Number(l * 100, "%")
@ns.declare
def ie_hex_str(color):
c = Color(color).value
return String("#{3:02X}{0:02X}{1:02X}{2:02X}".format(
int(round(c[0])),
int(round(c[1])),
int(round(c[2])),
int(round(c[3] * 255)),
))
# ------------------------------------------------------------------------------
# Color modification
@ns.declare_alias('fade-in')
@ns.declare_alias('fadein')
@ns.declare
def opacify(color, amount):
r, g, b, a = color.rgba
if amount.is_simple_unit('%'):
amt = amount.value / 100
else:
amt = amount.value
return Color.from_rgb(
r, g, b,
alpha=a + amt)
@ns.declare_alias('fade-out')
@ns.declare_alias('fadeout')
@ns.declare
def transparentize(color, amount):
r, g, b, a = color.rgba
if amount.is_simple_unit('%'):
amt = amount.value / 100
else:
amt = amount.value
return Color.from_rgb(
r, g, b,
alpha=a - amt)
@ns.declare
def lighten(color, amount):
return adjust_color(color, lightness=amount)
@ns.declare
def darken(color, amount):
return adjust_color(color, lightness=-amount)
@ns.declare
def saturate(color, amount):
return adjust_color(color, saturation=amount)
@ns.declare
def desaturate(color, amount):
return adjust_color(color, saturation=-amount)
@ns.declare
def greyscale(color):
h, s, l = color.hsl
return Color.from_hsl(h, 0, l, alpha=color.alpha)
@ns.declare
def grayscale(color):
if isinstance(color, Number):
# grayscale(n) and grayscale(n%) are CSS3 filters and should be left
# intact, but only when using the "a" spelling
return String.unquoted("grayscale(%s)" % (color.render(),))
else:
return greyscale(color)
@ns.declare_alias('spin')
@ns.declare
def adjust_hue(color, degrees):
h, s, l = color.hsl
delta = degrees.value / 360
return Color.from_hsl((h + delta) % 1, s, l, alpha=color.alpha)
@ns.declare
def complement(color):
h, s, l = color.hsl
return Color.from_hsl((h + 0.5) % 1, s, l, alpha=color.alpha)
@ns.declare
def invert(color):
"""Returns the inverse (negative) of a color. The red, green, and blue
values are inverted, while the opacity is left alone.
"""
r, g, b, a = color.rgba
return Color.from_rgb(1 - r, 1 - g, 1 - b, alpha=a)
@ns.declare
def adjust_lightness(color, amount):
return adjust_color(color, lightness=amount)
@ns.declare
def adjust_saturation(color, amount):
return adjust_color(color, saturation=amount)
@ns.declare
def scale_lightness(color, amount):
return scale_color(color, lightness=amount)
@ns.declare
def scale_saturation(color, amount):
return scale_color(color, saturation=amount)
@ns.declare
def adjust_color(
color, red=None, green=None, blue=None,
hue=None, saturation=None, lightness=None, alpha=None):
do_rgb = red or green or blue
do_hsl = hue or saturation or lightness
if do_rgb and do_hsl:
raise ValueError(
"Can't adjust both RGB and HSL channels at the same time")
zero = Number(0)
a = color.alpha + (alpha or zero).value
if do_rgb:
r, g, b = color.rgba[:3]
channels = [
current + (adjustment or zero).value / 255
for (current, adjustment) in zip(color.rgba, (red, green, blue))]
return Color.from_rgb(*channels, alpha=a)
else:
h, s, l = color.hsl
h = (h + (hue or zero).value / 360) % 1
s += _interpret_percentage(saturation or zero, relto=100, clamp=False)
l += _interpret_percentage(lightness or zero, relto=100, clamp=False)
return Color.from_hsl(h, s, l, a)
def _scale_channel(channel, scaleby):
if scaleby is None:
return channel
expect_type(scaleby, Number)
if not scaleby.is_simple_unit('%'):
raise ValueError("Expected percentage, got %r" % (scaleby,))
factor = scaleby.value / 100
if factor > 0:
# Add x% of the remaining range, up to 1
return channel + (1 - channel) * factor
else:
# Subtract x% of the existing channel. We add here because the factor
# is already negative
return channel * (1 + factor)
@ns.declare
def scale_color(
color, red=None, green=None, blue=None,
saturation=None, lightness=None, alpha=None):
do_rgb = red or green or blue
do_hsl = saturation or lightness
if do_rgb and do_hsl:
raise ValueError(
"Can't scale both RGB and HSL channels at the same time")
scaled_alpha = _scale_channel(color.alpha, alpha)
if do_rgb:
channels = [
_scale_channel(channel, scaleby)
for channel, scaleby in zip(color.rgba, (red, green, blue))]
return Color.from_rgb(*channels, alpha=scaled_alpha)
else:
channels = [
_scale_channel(channel, scaleby)
for channel, scaleby
in zip(color.hsl, (None, saturation, lightness))]
return Color.from_hsl(*channels, alpha=scaled_alpha)
@ns.declare
def change_color(
color, red=None, green=None, blue=None,
hue=None, saturation=None, lightness=None, alpha=None):
do_rgb = red or green or blue
do_hsl = hue or saturation or lightness
if do_rgb and do_hsl:
raise ValueError(
"Can't change both RGB and HSL channels at the same time")
if alpha is None:
alpha = color.alpha
else:
alpha = alpha.value
if do_rgb:
channels = list(color.rgba[:3])
if red:
channels[0] = _interpret_percentage(red, relto=255)
if green:
channels[1] = _interpret_percentage(green, relto=255)
if blue:
channels[2] = _interpret_percentage(blue, relto=255)
return Color.from_rgb(*channels, alpha=alpha)
else:
channels = list(color.hsl)
if hue:
expect_type(hue, Number, unit=None)
channels[0] = (hue.value / 360) % 1
# Ruby sass treats plain numbers for saturation and lightness as though
# they were percentages, just without the %
if saturation:
channels[1] = _interpret_percentage(saturation, relto=100)
if lightness:
channels[2] = _interpret_percentage(lightness, relto=100)
return Color.from_hsl(*channels, alpha=alpha)
# ------------------------------------------------------------------------------
# String functions
@ns.declare_alias('e')
@ns.declare_alias('escape')
@ns.declare
def unquote(*args):
arg = List.from_maybe_starargs(args).maybe()
if isinstance(arg, String):
return String(arg.value, quotes=None)
else:
return String(arg.render(), quotes=None)
@ns.declare
def quote(*args):
arg = List.from_maybe_starargs(args).maybe()
if isinstance(arg, String):
return String(arg.value, quotes='"')
else:
return String(arg.render(), quotes='"')
@ns.declare
def str_length(string):
expect_type(string, String)
# nb: can't use `len(string)`, because that gives the Sass list length,
# which is 1
return Number(len(string.value))
# TODO this and several others should probably also require integers
# TODO and assert that the indexes are valid
@ns.declare
def str_insert(string, insert, index):
expect_type(string, String)
expect_type(insert, String)
expect_type(index, Number, unit=None)
py_index = index.to_python_index(len(string.value), check_bounds=False)
return String(
string.value[:py_index] + insert.value + string.value[py_index:],
quotes=string.quotes)
@ns.declare
def str_index(string, substring):
expect_type(string, String)
expect_type(substring, String)
# 1-based indexing, with 0 for failure
return Number(string.value.find(substring.value) + 1)
@ns.declare
def str_slice(string, start_at, end_at=None):
expect_type(string, String)
expect_type(start_at, Number, unit=None)
py_start_at = start_at.to_python_index(len(string.value))
if end_at is None:
py_end_at = None
else:
expect_type(end_at, Number, unit=None)
# Endpoint is inclusive, unlike Python
py_end_at = end_at.to_python_index(len(string.value)) + 1
return String(
string.value[py_start_at:py_end_at],
quotes=string.quotes)
@ns.declare
def to_upper_case(string):
expect_type(string, String)
return String(string.value.upper(), quotes=string.quotes)
@ns.declare
def to_lower_case(string):
expect_type(string, String)
return String(string.value.lower(), quotes=string.quotes)
# ------------------------------------------------------------------------------
# Number functions
@ns.declare
def percentage(value):
expect_type(value, Number, unit=None)
return value * Number(100, unit='%')
ns.set_function('abs', 1, Number.wrap_python_function(abs))
ns.set_function('round', 1, Number.wrap_python_function(round))
ns.set_function('ceil', 1, Number.wrap_python_function(math.ceil))
ns.set_function('floor', 1, Number.wrap_python_function(math.floor))
# ------------------------------------------------------------------------------
# List functions
def __parse_separator(separator, default_from=None):
if separator is None:
separator = 'auto'
separator = String.unquoted(separator).value
if separator == 'comma':
return True
elif separator == 'space':
return False
elif separator == 'auto':
if not default_from:
return True
elif len(default_from) < 2:
return True
else:
return default_from.use_comma
else:
raise ValueError('Separator must be auto, comma, or space')
# TODO get the compass bit outta here
@ns.declare_alias('-compass-list-size')
@ns.declare
def length(*lst):
if len(lst) == 1 and isinstance(lst[0], (list, tuple, List)):
lst = lst[0]
return Number(len(lst))
@ns.declare
def set_nth(list, n, value):
expect_type(n, Number, unit=None)
py_n = n.to_python_index(len(list))
return List(
tuple(list[:py_n]) + (value,) + tuple(list[py_n + 1:]),
use_comma=list.use_comma)
# TODO get the compass bit outta here
@ns.declare_alias('-compass-nth')
@ns.declare
def nth(lst, n):
"""Return the nth item in the list."""
expect_type(n, (String, Number), unit=None)
if isinstance(n, String):
if n.value.lower() == 'first':
i = 0
elif n.value.lower() == 'last':
i = -1
else:
raise ValueError("Invalid index %r" % (n,))
else:
# DEVIATION: nth treats lists as circular lists
i = n.to_python_index(len(lst), circular=True)
return lst[i]
@ns.declare
def join(lst1, lst2, separator=String.unquoted('auto')):
expect_type(separator, String)
ret = []
ret.extend(List.from_maybe(lst1))
ret.extend(List.from_maybe(lst2))
if separator.value == 'comma':
use_comma = True
elif separator.value == 'space':
use_comma = False
elif separator.value == 'auto':
# The Sass docs are slightly misleading here, but the algorithm is: use
# the delimiter from the first list that has at least 2 items, or
# default to spaces.
if len(lst1) > 1:
use_comma = lst1.use_comma
elif len(lst2) > 1:
use_comma = lst2.use_comma
else:
use_comma = False
else:
raise ValueError("separator for join() must be comma, space, or auto")
return List(ret, use_comma=use_comma)
@ns.declare
def min_(*lst):
if len(lst) == 1 and isinstance(lst[0], (list, tuple, List)):
lst = lst[0]
return min(lst)
@ns.declare
def max_(*lst):
if len(lst) == 1 and isinstance(lst[0], (list, tuple, List)):
lst = lst[0]
return max(lst)
@ns.declare
def append(lst, val, separator=None):
ret = []
ret.extend(List.from_maybe(lst))
ret.append(val)
use_comma = __parse_separator(separator, default_from=lst)
return List(ret, use_comma=use_comma)
@ns.declare
def index(lst, val):
for i in xrange(len(lst)):
if lst.value[i] == val:
return Number(i + 1)
return Boolean(False)
@ns.declare
def zip_(*lists):
return List(
[List(zipped) for zipped in zip(*lists)],
use_comma=True)
# TODO need a way to use "list" as the arg name without shadowing the builtin
@ns.declare
def list_separator(list):
if list.use_comma:
return String.unquoted('comma')
else:
return String.unquoted('space')
# ------------------------------------------------------------------------------
# Map functions
@ns.declare
def map_get(map, key):
return map.to_dict().get(key, Null())
@ns.declare
def map_merge(*maps):
key_order = []
index = {}
for map in maps:
for key, value in map.to_pairs():
if key not in index:
key_order.append(key)
index[key] = value
pairs = [(key, index[key]) for key in key_order]
return Map(pairs, index=index)
@ns.declare
def map_keys(map):
return List(
[k for (k, v) in map.to_pairs()],
use_comma=True)
@ns.declare
def map_values(map):
return List(
[v for (k, v) in map.to_pairs()],
use_comma=True)
@ns.declare
def map_has_key(map, key):
return Boolean(key in map.to_dict())
# DEVIATIONS: these do not exist in ruby sass
@ns.declare
def map_get3(map, key, default):
return map.to_dict().get(key, default)
@ns.declare
def map_get_nested3(map, keys, default=Null()):
for key in keys:
map = map.to_dict().get(key, None)
if map is None:
return default
return map
@ns.declare
def map_merge_deep(*maps):
pairs = []
keys = set()
for map in maps:
for key, value in map.to_pairs():
keys.add(key)
for key in keys:
values = [map.to_dict().get(key, None) for map in maps]
values = [v for v in values if v is not None]
if all(isinstance(v, Map) for v in values):
pairs.append((key, map_merge_deep(*values)))
else:
pairs.append((key, values[-1]))
return Map(pairs)
# ------------------------------------------------------------------------------
# Meta functions
@ns.declare
def type_of(obj): # -> bool, number, string, color, list
return String(obj.sass_type_name)
@ns.declare
def unit(number): # -> px, em, cm, etc.
numer = '*'.join(sorted(number.unit_numer))
denom = '*'.join(sorted(number.unit_denom))
if denom:
ret = numer + '/' + denom
else:
ret = numer
return String.unquoted(ret)
@ns.declare
def unitless(value):
if not isinstance(value, Number):
raise TypeError("Expected number, got %r" % (value,))
return Boolean(value.is_unitless)
@ns.declare
def comparable(number1, number2):
left = number1.to_base_units()
right = number2.to_base_units()
return Boolean(
left.unit_numer == right.unit_numer
and left.unit_denom == right.unit_denom)
@ns.declare
def keywords(value):
"""Extract named arguments, as a map, from an argument list."""
expect_type(value, Arglist)
return value.extract_keywords()
# ------------------------------------------------------------------------------
# Miscellaneous
@ns.declare
def if_(condition, if_true, if_false=Null()):
return if_true if condition else if_false
| 2.03125 | 2 |
pypy/module/cpyext/test/test_iterator.py | wdv4758h/mu-client-pypy | 34 | 11474 | from pypy.module.cpyext.test.test_api import BaseApiTest
class TestIterator(BaseApiTest):
def test_check_iter(self, space, api):
assert api.PyIter_Check(space.iter(space.wrap("a")))
assert api.PyIter_Check(space.iter(space.newlist([])))
assert not api.PyIter_Check(space.w_type)
assert not api.PyIter_Check(space.wrap(2))
def test_getIter(self, space, api):
w_iter = api.PyObject_GetIter(space.wrap([1, 2, 3]))
assert space.unwrap(api.PyIter_Next(w_iter)) == 1
assert space.unwrap(api.PyIter_Next(w_iter)) == 2
assert space.unwrap(api.PyIter_Next(w_iter)) == 3
assert api.PyIter_Next(w_iter) is None
assert not api.PyErr_Occurred()
def test_iternext_error(self,space, api):
assert api.PyIter_Next(space.w_None) is None
assert api.PyErr_Occurred() is space.w_TypeError
api.PyErr_Clear()
| 2.234375 | 2 |
capsule_em/experiment.py | jrmendeshurb/google-research | 6 | 11475 | <reponame>jrmendeshurb/google-research
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The runners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow as tf
from capsule_em import model as f_model
from capsule_em.mnist \
import mnist_record
from capsule_em.norb \
import norb_record
from tensorflow.contrib import tfprof as contrib_tfprof
from tensorflow.python import debug as tf_debug
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_prime_capsules', 32,
'Number of first layer capsules.')
tf.app.flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate')
tf.app.flags.DEFINE_integer('routing_iteration', 3,
'Number of iterations for softmax routing')
tf.app.flags.DEFINE_float(
'routing_rate', 1,
'ratio for combining routing logits and routing feedback')
tf.app.flags.DEFINE_float('decay_rate', 0.96, 'ratio for learning rate decay')
tf.app.flags.DEFINE_integer('decay_steps', 20000,
'number of steps for learning rate decay')
tf.app.flags.DEFINE_bool('normalize_kernels', False,
'Normalize the capsule weight kernels')
tf.app.flags.DEFINE_integer('num_second_atoms', 16,
'number of capsule atoms for the second layer')
tf.app.flags.DEFINE_integer('num_primary_atoms', 16,
'number of capsule atoms for the first layer')
tf.app.flags.DEFINE_integer('num_start_conv', 32,
'number of channels for the start layer')
tf.app.flags.DEFINE_integer('kernel_size', 5,
'kernel size for the start layer.')
tf.app.flags.DEFINE_integer(
'routing_iteration_prime', 1,
'number of routing iterations for primary capsules.')
tf.app.flags.DEFINE_integer('max_steps', 2000000,
'Number of steps to run trainer.')
tf.app.flags.DEFINE_string('data_dir', '/datasets/mnist/',
'Directory for storing input data')
tf.app.flags.DEFINE_string('summary_dir',
'/tmp/tensorflow/mnist/logs/mnist_with_summaries',
'Summaries log directory')
tf.app.flags.DEFINE_bool('train', True, 'train or test.')
tf.app.flags.DEFINE_integer(
'checkpoint_steps', 1500,
'number of steps before saving a training checkpoint.')
tf.app.flags.DEFINE_bool('verbose_image', False, 'whether to show images.')
tf.app.flags.DEFINE_bool('multi', True,
'whether to use multiple digit dataset.')
tf.app.flags.DEFINE_bool('eval_once', False,
'whether to evaluate once on the ckpnt file.')
tf.app.flags.DEFINE_integer('eval_size', 24300,
'number of examples to evaluate.')
tf.app.flags.DEFINE_string(
'ckpnt',
'/tmp/tensorflow/mnist/logs/mnist_with_summaries/train/model.ckpnt',
'The checkpoint to load and evaluate once.')
tf.app.flags.DEFINE_integer('keep_ckpt', 5, 'number of examples to evaluate.')
tf.app.flags.DEFINE_bool(
'clip_lr', False, 'whether to clip learning rate to not go bellow 1e-5.')
tf.app.flags.DEFINE_integer('stride_1', 2,
'stride for the first convolutinal layer.')
tf.app.flags.DEFINE_integer('kernel_2', 9,
'kernel size for the secon convolutinal layer.')
tf.app.flags.DEFINE_integer('stride_2', 2,
'stride for the second convolutinal layer.')
tf.app.flags.DEFINE_string('padding', 'VALID',
'the padding method for conv layers.')
tf.app.flags.DEFINE_integer('extra_caps', 2, 'number of extra conv capsules.')
tf.app.flags.DEFINE_string('caps_dims', '32,32',
'output dim for extra conv capsules.')
tf.app.flags.DEFINE_string('caps_strides', '2,1',
'stride for extra conv capsules.')
tf.app.flags.DEFINE_string('caps_kernels', '3,3',
'kernel size for extra conv capsuls.')
tf.app.flags.DEFINE_integer('extra_conv', 0, 'number of extra conv layers.')
tf.app.flags.DEFINE_string('conv_dims', '', 'output dim for extra conv layers.')
tf.app.flags.DEFINE_string('conv_strides', '', 'stride for extra conv layers.')
tf.app.flags.DEFINE_string('conv_kernels', '',
'kernel size for extra conv layers.')
tf.app.flags.DEFINE_bool('leaky', False, 'Use leaky routing.')
tf.app.flags.DEFINE_bool('staircase', False, 'Use staircase decay.')
tf.app.flags.DEFINE_integer('num_gpus', 1, 'number of gpus to train.')
tf.app.flags.DEFINE_bool('adam', True, 'Use Adam optimizer.')
tf.app.flags.DEFINE_bool('pooling', False, 'Pooling after convolution.')
tf.app.flags.DEFINE_bool('use_caps', True, 'Use capsule layers.')
tf.app.flags.DEFINE_integer(
'extra_fc', 512, 'number of units in the extra fc layer in no caps mode.')
tf.app.flags.DEFINE_bool('dropout', False, 'Dropout before last layer.')
tf.app.flags.DEFINE_bool('tweak', False, 'During eval recons from tweaked rep.')
tf.app.flags.DEFINE_bool('softmax', False, 'softmax loss in no caps.')
tf.app.flags.DEFINE_bool('c_dropout', False, 'dropout after conv capsules.')
tf.app.flags.DEFINE_bool(
'distort', True,
'distort mnist images by cropping to 24 * 24 and rotating by 15 degrees.')
tf.app.flags.DEFINE_bool('restart', False, 'Clean train checkpoints.')
tf.app.flags.DEFINE_bool('use_em', True,
'If set use em capsules with em routing.')
tf.app.flags.DEFINE_float('final_beta', 0.01, 'Temperature at the sigmoid.')
tf.app.flags.DEFINE_bool('eval_ensemble', False, 'eval over aggregated logits.')
tf.app.flags.DEFINE_string('part1', 'ok', 'ok')
tf.app.flags.DEFINE_string('part2', 'ok', 'ok')
tf.app.flags.DEFINE_bool('debug', False, 'If set use tfdbg wrapper.')
tf.app.flags.DEFINE_bool('reduce_mean', False,
'If set normalize mean of each image.')
tf.app.flags.DEFINE_float('loss_rate', 1.0,
'classification to regularization rate.')
tf.app.flags.DEFINE_integer('batch_size', 64, 'Batch size.')
tf.app.flags.DEFINE_integer('norb_pixel', 48, 'Batch size.')
tf.app.flags.DEFINE_bool('patching', True, 'If set use patching for eval.')
tf.app.flags.DEFINE_string('data_set', 'norb', 'the data set to use.')
tf.app.flags.DEFINE_string('cifar_data_dir', '/tmp/cifar10_data',
"""Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_string('norb_data_dir', '/tmp/smallNORB/',
"""Path to the norb data directory.""")
tf.app.flags.DEFINE_string('affnist_data_dir', '/tmp/affnist_data',
"""Path to the affnist data directory.""")
num_classes = {
'mnist': 10,
'cifar10': 10,
'mnist_multi': 10,
'svhn': 10,
'affnist': 10,
'expanded_mnist': 10,
'norb': 5,
}
def get_features(train, total_batch):
"""Return batched inputs."""
print(FLAGS.data_set)
batch_size = total_batch // max(1, FLAGS.num_gpus)
split = 'train' if train else 'test'
features = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/cpu:0'):
with tf.name_scope('input_tower_%d' % (i)):
if FLAGS.data_set == 'norb':
features += [
norb_record.inputs(
train_dir=FLAGS.norb_data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
image_pixel=FLAGS.norb_pixel,
distort=FLAGS.distort,
patching=FLAGS.patching,
)
]
elif FLAGS.data_set == 'affnist':
features += [
mnist_record.inputs(
train_dir=FLAGS.affnist_data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
shift=0,
height=40,
train_file='test.tfrecords')
]
elif FLAGS.data_set == 'expanded_mnist':
features += [
mnist_record.inputs(
train_dir=FLAGS.data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
height=40,
train_file='train_6shifted_6padded_mnist.tfrecords',
shift=6)
]
else:
if train and not FLAGS.distort:
shift = 2
else:
shift = 0
features += [
mnist_record.inputs(
train_dir=FLAGS.data_dir,
batch_size=batch_size,
split=split,
multi=FLAGS.multi,
shift=shift,
distort=FLAGS.distort)
]
print(features)
return features
def run_training():
"""Train."""
with tf.Graph().as_default():
# Input images and labels.
features = get_features(True, FLAGS.batch_size)
model = f_model.multi_gpu_model
print('so far so good!')
result = model(features)
param_stats = contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer
.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
sys.stdout.write('total_params: %d\n' % param_stats.total_parameters)
contrib_tfprof.model_analyzer.print_model_analysis(
tf.get_default_graph(),
tfprof_options=contrib_tfprof.model_analyzer.FLOAT_OPS_OPTIONS)
merged = result['summary']
train_step = result['train']
# test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test')
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type='curses')
sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
saver = tf.train.Saver(max_to_keep=FLAGS.keep_ckpt)
if tf.gfile.Exists(FLAGS.summary_dir + '/train'):
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
print(ckpt)
if (not FLAGS.restart) and ckpt and ckpt.model_checkpoint_path:
print('hesllo')
saver.restore(sess, ckpt.model_checkpoint_path)
prev_step = int(
ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
else:
print('what??')
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/train')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/train')
prev_step = 0
else:
tf.gfile.MakeDirs(FLAGS.summary_dir + '/train')
prev_step = 0
train_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/train',
sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
for i in range(prev_step, FLAGS.max_steps):
step += 1
summary, _ = sess.run([merged, train_step])
train_writer.add_summary(summary, i)
if (i + 1) % FLAGS.checkpoint_steps == 0:
saver.save(
sess,
os.path.join(FLAGS.summary_dir + '/train', 'model.ckpt'),
global_step=i + 1)
except tf.errors.OutOfRangeError:
print('Done training for %d steps.' % step)
finally:
# When done, ask the threads to stop.
coord.request_stop()
train_writer.close()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def run_eval():
"""Evaluate on test or validation."""
with tf.Graph().as_default():
# Input images and labels.
features = get_features(False, 5)
model = f_model.multi_gpu_model
result = model(features)
merged = result['summary']
correct_prediction_sum = result['correct']
almost_correct_sum = result['almost']
saver = tf.train.Saver()
test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test')
seen_step = -1
time.sleep(3 * 60)
paused = 0
while paused < 360:
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoin
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
time.sleep(2 * 60)
paused += 2
continue
while seen_step == int(global_step):
time.sleep(2 * 60)
ckpt = tf.train.get_checkpoint_state(FLAGS.summary_dir + '/train/')
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
paused += 2
if paused > 360:
test_writer.close()
return
paused = 0
seen_step = int(global_step)
print(seen_step)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
saver.restore(sess, ckpt.model_checkpoint_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
total_tp = 0
total_almost = 0
for i in range(FLAGS.eval_size // 5):
summary_j, tp, almost = sess.run(
[merged, correct_prediction_sum, almost_correct_sum])
total_tp += tp
total_almost += almost
total_false = FLAGS.eval_size - total_tp
total_almost_false = FLAGS.eval_size - total_almost
summary_tp = tf.Summary.FromString(summary_j)
summary_tp.value.add(tag='correct_prediction', simple_value=total_tp)
summary_tp.value.add(tag='wrong_prediction', simple_value=total_false)
summary_tp.value.add(
tag='almost_wrong_prediction', simple_value=total_almost_false)
test_writer.add_summary(summary_tp, global_step)
print('write done')
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
test_writer.close()
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def eval_ensemble(ckpnts):
"""Evaluate on an ensemble of checkpoints."""
with tf.Graph().as_default():
first_features = get_features(False, 100)[0]
h = first_features['height']
d = first_features['depth']
features = {
'images': tf.placeholder(tf.float32, shape=(100, d, h, h)),
'labels': tf.placeholder(tf.float32, shape=(100, 10)),
'recons_image': tf.placeholder(tf.float32, shape=(100, d, h, h)),
'recons_label': tf.placeholder(tf.int32, shape=(100)),
'height': first_features['height'],
'depth': first_features['depth']
}
model = f_model.multi_gpu_model
result = model([features])
logits = result['logits']
config = tf.ConfigProto(allow_soft_placement=True)
# saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpnt))
batch_logits = np.zeros((FLAGS.eval_size // 100, 100, 10), dtype=np.float32)
batch_recons_label = np.zeros((FLAGS.eval_size // 100, 100),
dtype=np.float32)
batch_labels = np.zeros((FLAGS.eval_size // 100, 100, 10), dtype=np.float32)
batch_images = np.zeros((FLAGS.eval_size // 100, 100, d, h, h),
dtype=np.float32)
batch_recons_image = np.zeros((FLAGS.eval_size // 100, 100, d, h, h),
dtype=np.float32)
saver = tf.train.Saver()
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for i in range(FLAGS.eval_size // 100):
(batch_recons_label[i, Ellipsis], batch_labels[i, Ellipsis], batch_images[i, Ellipsis],
batch_recons_image[i, Ellipsis]) = sess.run([
first_features['recons_label'], first_features['labels'],
first_features['images'], first_features['recons_image']
])
for ckpnt in ckpnts:
saver.restore(sess, ckpnt)
for i in range(FLAGS.eval_size // 100):
logits_i = sess.run(
logits,
feed_dict={
features['recons_label']: batch_recons_label[i, Ellipsis],
features['labels']: batch_labels[i, Ellipsis],
features['images']: batch_images[i, Ellipsis],
features['recons_image']: batch_recons_image[i, Ellipsis]
})
# batch_logits[i, ...] += softmax(logits_i)
batch_logits[i, Ellipsis] += logits_i
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
batch_pred = np.argmax(batch_logits, axis=2)
total_wrong = np.sum(np.not_equal(batch_pred, batch_recons_label))
print(total_wrong)
def eval_once(ckpnt):
"""Evaluate on one checkpoint once."""
ptches = np.zeros((14, 14, 32, 32))
for i in range(14):
for j in range(14):
ind_x = i * 2
ind_y = j * 2
for k in range(5):
for h in range(5):
ptches[i, j, ind_x + k, ind_y + h] = 1
ptches = np.reshape(ptches, (14 * 14, 32, 32))
with tf.Graph().as_default():
features = get_features(False, 1)[0]
if FLAGS.patching:
features['images'] = features['cc_images']
features['recons_label'] = features['cc_recons_label']
features['labels'] = features['cc_labels']
model = f_model.multi_gpu_model
result = model([features])
# merged = result['summary']
correct_prediction_sum = result['correct']
# almost_correct_sum = result['almost']
# mid_act = result['mid_act']
logits = result['logits']
saver = tf.train.Saver()
test_writer = tf.summary.FileWriter(FLAGS.summary_dir + '/test_once')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.3
sess = tf.Session(config=config)
# saver.restore(sess, tf.train.latest_checkpoint(FLAGS.ckpnt))
saver.restore(sess, ckpnt)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
i = 0
try:
total_tp = 0
for i in range(FLAGS.eval_size):
#, g_ac, ac
lb, tp, lg = sess.run([
features['recons_label'],
correct_prediction_sum,
logits,
])
if FLAGS.patching:
batched_lg = np.sum(lg / np.sum(lg, axis=1, keepdims=True), axis=0)
batch_pred = np.argmax(batched_lg)
tp = np.equal(batch_pred, lb[0])
total_tp += tp
total_false = FLAGS.eval_size - total_tp
print('false:{}, true:{}'.format(total_false, total_tp))
# summary_tp = tf.Summary.FromString(summary_j)
# summary_tp.value.add(tag='correct_prediction', simple_value=total_tp)
# summary_tp.value.add(tag='wrong_prediction', simple_value=total_false)
# summary_tp.value.add(
# tag='almost_wrong_prediction', simple_value=total_almost_false)
# test_writer.add_summary(summary_tp, i + 1)
except tf.errors.OutOfRangeError:
print('Done eval for %d steps.' % i)
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
test_writer.close()
def main(_):
if FLAGS.eval_ensemble:
if tf.gfile.Exists(FLAGS.summary_dir + '/test_ensemble'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test_ensemble')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test_ensemble')
ensem = []
for i in range(1, 12):
f_name = '/tmp/cifar10/{}{}{}-600000'.format(FLAGS.part1, i, FLAGS.part2)
if tf.train.checkpoint_exists(f_name):
ensem += [f_name]
print(len(ensem))
eval_ensemble(ensem)
elif FLAGS.eval_once:
if tf.gfile.Exists(FLAGS.summary_dir + '/test_once'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test_once')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test_once')
eval_once(FLAGS.ckpnt)
elif FLAGS.train:
run_training()
else:
if tf.gfile.Exists(FLAGS.summary_dir + '/test_once'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test_once')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test_once')
if tf.gfile.Exists(FLAGS.summary_dir + '/test'):
tf.gfile.DeleteRecursively(FLAGS.summary_dir + '/test')
tf.gfile.MakeDirs(FLAGS.summary_dir + '/test')
run_eval()
if __name__ == '__main__':
tf.app.run()
| 1.742188 | 2 |
grafana/common/dashboards/aggregated/client_subnet_statistics_detail.py | MikeAT/visualizer | 6 | 11476 | # Copyright 2021 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Aggregation client subnet statistics
import textwrap
import grafanalib.core as GCore
import grafanacommon as GCommon
def query_classification_chart(chart_title, yaxis_label, prefix_field, agginfo, nodesel):
return GCommon.BarChart(
title = chart_title,
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = yaxis_label,
),
),
traces = [
GCommon.BarChartTrace(
name = 'AForA',
x = 'AForA',
y = 'AForAPrefix',
text = 'AForA',
),
GCommon.BarChartTrace(
name = 'AForRoot',
x = 'AForRoot',
y = 'AForRootPrefix',
text = 'AForRoot',
),
GCommon.BarChartTrace(
name = 'FunnyQueryClass',
x = 'FunnyQueryClass',
y = 'FunnyQueryClassPrefix',
text = 'FunnyQueryClass',
),
GCommon.BarChartTrace(
name = 'FunnyQueryType',
x = 'FunnyQueryType',
y = 'FunnyQueryTypePrefix',
text = 'FunnyQueryType',
),
GCommon.BarChartTrace(
name = 'Localhost',
x = 'Localhost',
y = 'LocalhostPrefix',
text = 'Localhost',
),
GCommon.BarChartTrace(
name = 'NonAuthTld',
x = 'NonAuthTld',
y = 'NonAuthTldPrefix',
text = 'NonAuthTld',
),
GCommon.BarChartTrace(
name = 'Ok',
x = 'Ok',
y = 'OkPrefix',
text = 'Ok',
),
GCommon.BarChartTrace(
name = 'RFC1918Ptr',
x = 'RFC1918Ptr',
y = 'RFC1918PtrPrefix',
text = 'RFC1918Ptr',
),
GCommon.BarChartTrace(
name = 'RootServersNet',
x = 'RootServersNet',
y = 'RootServersNetPrefix',
text = 'RootServersNet',
),
GCommon.BarChartTrace(
name = 'SrcPortZero',
x = 'SrcPortZero',
y = 'SrcPortZeroPrefix',
text = 'SrcPortZero',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS AForAPrefix,
AForA,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(AForACount)/($to - $from) AS AForA
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS AForRootPrefix,
AForRoot,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(AForRootCount)/($to - $from) AS AForRoot
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'B'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS FunnyQueryClassPrefix,
FunnyQueryClass,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(FunnyQueryClassCount)/($to - $from) AS FunnyQueryClass
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'C'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS FunnyQueryTypePrefix,
FunnyQueryType,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(FunnyQueryTypeCount)/($to - $from) AS FunnyQueryType
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count DESC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'D'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS LocalhostPrefix,
Localhost,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(LocalhostCount)/($to - $from) AS Localhost
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'E'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS NonAuthTldPrefix,
NonAuthTld,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(NonAuthTldCount)/($to - $from) AS NonAuthTld
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'F'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS OkPrefix,
Ok,
TotalCount
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS TotalCount,
sum(Count -
(AForACount +
AForRootCount +
FunnyQueryClassCount +
FunnyQueryTypeCount +
LocalhostCount +
NonAuthTldCount +
RFC1918PtrCount +
RootServersNetCount +
SrcPortZeroCount))/($to - $from) AS Ok
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY TotalCount DESC
LIMIT 40
)
ORDER BY TotalCount ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'G'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS RFC1918PtrPrefix,
RFC1918Ptr,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(RFC1918PtrCount)/($to - $from) AS RFC1918Ptr
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'H'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS RootServersNetPrefix,
RootServersNet,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(RootServersNetCount)/($to - $from) AS RootServersNet
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'I'
),
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Prefix AS SrcPortZeroPrefix,
SrcPortZero,
Count
FROM
(
SELECT
{prefix_field} AS Prefix,
sum(Count) AS Count,
sum(SrcPortZeroCount)/($to - $from) AS SrcPortZero
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY Count DESC
LIMIT 40
)
ORDER BY Count ASC
""".format(
prefix_field=prefix_field,
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'J'
),
],
)
def dash(myuid, agginfo, nodesel, **kwargs):
return GCommon.Dashboard(
title = "Client subnet statistics detail",
tags = [
agginfo['graph_tag']
],
uid = myuid,
rows = [
GCore.Row(
height = GCore.Pixels(50),
panels = [
GCommon.HTMLPanel('grafana/common/dashboards/aggregated/client_subnet_statistics_header.html', transparent=True),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'Clients by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
traces = [
GCommon.BarChartTrace(
name = 'Subnet',
color = '#A352CC',
x = 'QPS',
y = 'Subnet',
text = 'QPS',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Subnet,
QPS
FROM
(
SELECT
Prefix AS Subnet,
sum(Count)/($to - $from) AS QPS
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
ORDER BY QPS DESC
LIMIT 30
)
ORDER BY QPS ASC""".format(
nodesel=nodesel)),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by ASN',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'ASN',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
ClientASN
FROM
(
SELECT
ClientASN,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
ClientASN,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
ClientASN
ORDER BY sCount DESC, ClientASN ASC
LIMIT 30
) AS ClientASNCounts
ALL LEFT JOIN
(
SELECT
ClientASN,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
ClientASN,
rcode
UNION ALL
(
SELECT
ClientASN,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
ClientASN
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY ClientASN
) AS ZeroClientASN
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS ClientASNRcodeCounts USING ClientASN
GROUP BY
ClientASN,
rcode
) AS ClientASNRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS ClientASNNameCountsTotal USING rcode
GROUP BY
ClientASN,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
ClientASN DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by AS subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'AS Subnet',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BGPPrefix' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
Prefix
FROM
(
SELECT
Prefix,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
Prefix,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix
ORDER BY sCount DESC, Prefix ASC
LIMIT 30
) AS PrefixCount
ALL LEFT JOIN
(
SELECT
Prefix,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix,
rcode
UNION ALL
(
SELECT
Prefix,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
Prefix
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
) AS ZeroPrefox
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS PrefixRcodeCounts USING Prefix
GROUP BY
Prefix,
rcode
) AS PrefixRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS PrefixNameCountsTotal USING rcode
GROUP BY
Prefix,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
Prefix DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'RCODE by clients by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
barmode = GCommon.BAR_CHART_LAYOUT_MODE_STACK,
showlegend = True,
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
autotrace = True,
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'BusiestClientSubnets' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
notEmpty(rcodeText) ? rcodeText : concat('RCODE', toString(rcode)) AS DisplayRcode,
sum(rcodeCount) / ($to - $from) AS rcodeCount,
Prefix
FROM
(
SELECT
Prefix,
rcode,
sum(rcodeCount) AS rcodeCount,
any(sCount) AS sCount
FROM
(
SELECT
Prefix,
sum(RcodeMap.Count) AS sCount
FROM $table
ARRAY JOIN RcodeMap
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix
ORDER BY sCount DESC, Prefix ASC
LIMIT 30
) AS PrefixCount
ALL LEFT JOIN
(
SELECT
Prefix,
RcodeMap.ResponseRcode AS rcode,
sum(RcodeMap.Count) AS rcodeCount
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY
Prefix,
rcode
UNION ALL
(
SELECT
Prefix,
rcode,
CAST(0 AS UInt64) AS rcodeCount
FROM
(
SELECT
0 AS Zero,
Prefix
FROM $table
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY Prefix
) AS ZeroPrefix
ALL LEFT JOIN
(
SELECT
0 AS Zero,
RcodeMap.ResponseRcode AS rcode
FROM $table
ARRAY JOIN RcodeMap
WHERE
$timeFilter
AND NodeID IN {nodesel}
GROUP BY rcode
) AS ZeroRcode USING Zero
)
) AS PrefixRcodeCounts USING Prefix
GROUP BY
Prefix,
rcode
) AS PrefixRcodeCountsTotal
ALL INNER JOIN
(
SELECT
value_name AS rcodeText,
toUInt16(value) AS rcode
FROM {nodeinfo_database}.iana_text
WHERE registry_name = 'RCODE'
) AS PrefixNameCountsTotal USING rcode
GROUP BY
Prefix,
rcode,
rcodeText
ORDER BY
sum(sCount) ASC,
rcodeText ASC,
Prefix DESC""".format(
nodesel=nodesel,
nodeinfo_database=agginfo['nodeinfo_database'])),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
GCommon.BarChart(
title = 'Root abusers by fixed subnet',
orientation = GCommon.BAR_CHART_ORIENTATION_HORIZONTAL,
layout = GCommon.BarChartLayout(
xaxis = GCommon.BarChartAxis(
title = 'Queries per second',
),
yaxis = GCommon.BarChartAxis(
autotick = False,
axtype = GCommon.BAR_CHART_AXIS_TYPE_CATEGORY,
tickmargin = 110,
title = 'Fixed Subnet',
),
),
traces = [
GCommon.BarChartTrace(
name = 'Subnet',
color = '#A352CC',
x = 'QPS',
y = 'Subnet',
text = 'QPS',
),
],
targets = [
GCommon.ClickHouseTableTarget(
database = agginfo['database'],
table = 'QueryClassifications' + agginfo['table_suffix'],
round = agginfo['round'],
query = textwrap.dedent("""\
SELECT
Subnet,
QPS
FROM
(
SELECT
FixedPrefix AS Subnet,
sum(RootAbuseCount)/($to - $from) AS QPS
FROM $table
WHERE $timeFilter
AND NodeID IN {nodesel}
GROUP BY FixedPrefix
ORDER BY QPS DESC
LIMIT 40
)
ORDER BY QPS ASC""".format(
nodesel=nodesel)),
refId = 'A'
)
],
),
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest fixed subnet',
'Fixed Subnet',
'FixedPrefix',
agginfo,
nodesel)
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest ASN',
'ASN',
'ClientASN',
agginfo,
nodesel)
],
),
GCore.Row(
height = GCore.Pixels(GCore.DEFAULT_ROW_HEIGHT.num * 2),
panels = [
query_classification_chart(
'Query classification by busiest AS subnet',
'AS subnet',
'ASPrefix',
agginfo,
nodesel)
],
),
]
)
| 1.734375 | 2 |
pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py | rdevon/pylearn2 | 1 | 11477 | <filename>pylearn2/neuroimaging_utils/tutorials/nice/jobman/simple_train.py
"""
Module to train a simple MLP for demo.
"""
from jobman.tools import expand
from jobman.tools import flatten
import logging
import nice_experiment
import numpy as np
from os import path
from pylearn2.config import yaml_parse
from pylearn2.neuroimaging_utils.datasets import MRI
from pylearn2.neuroimaging_utils.dataset_utils import mri_nifti
from pylearn2.scripts.jobman.experiment import ydict
from pylearn2.utils import serial
logging.basicConfig(format="[%(module)s:%(levelname)s]:%(message)s")
logger = logging.getLogger(__name__)
yaml_file = nice_experiment.yaml_file
def main(dataset_name="smri"):
logger.info("Getting dataset info for %s" % args.dataset_name)
data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)
mask_file = path.join(data_path, "mask.npy")
mask = np.load(mask_file)
input_dim = (mask == 1).sum()
if input_dim % 2 == 1:
input_dim -= 1
mri = MRI.MRI_Standard(which_set="full",
dataset_name=args.dataset_name,
unit_normalize=True,
even_input=True,
apply_mask=True)
variance_map_file = path.join(data_path, "variance_map.npy")
mri_nifti.save_variance_map(mri, variance_map_file)
user = path.expandvars("$USER")
save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%s"
% (user, "nice_jobman_test"))
file_params = {"save_path": save_path,
"variance_map_file": variance_map_file
}
yaml_template = open(yaml_file).read()
hyperparams = expand(flatten(nice_experiment.default_hyperparams(input_dim=input_dim)),
dict_type=ydict)
for param in hyperparams:
if hasattr(args, param) and getattr(args, param):
val = getattr(args, param)
logger.info("Filling %s with %r" % (param, val))
hyperparams[param] = type(hyperparams[param])(val)
elif param == "weight_decay":
val = getattr(args, "l1_decay")
if val == 0.0:
hyperparams["weight_decay"] = ""
else:
hyperparams["weight_decay"] = {
"__builder__": "pylearn2.costs.mlp.L1WeightDecay",
"coeffs": {"z": val}
}
for param in file_params:
yaml_template = yaml_template.replace("%%(%s)s" % param, file_params[param])
yaml = yaml_template % hyperparams
print yaml
logger.info("Training")
train = yaml_parse.load(yaml)
train.main_loop()
if __name__ == "__main__":
parser = nice_experiment.make_argument_parser()
args = parser.parse_args()
if args.verbose:
logger.setLevel(logging.DEBUG)
main(args)
| 2.484375 | 2 |
_Framework/Layer.py | isfopo/MacroPushScript | 0 | 11478 | <filename>_Framework/Layer.py
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/_Framework/Layer.py
u"""
Module implementing a way to resource-based access to controls in an
unified interface dynamic.
"""
from __future__ import absolute_import, print_function, unicode_literals
from builtins import str
from builtins import object
from future.utils import raise_
from itertools import repeat
from .ControlElement import ControlElementClient
from .Util import nop
from .Resource import ExclusiveResource, CompoundResource
from .Disconnectable import Disconnectable
class LayerError(Exception):
pass
class UnhandledControlError(LayerError):
pass
class SimpleLayerOwner(Disconnectable):
u"""
Simple owner that grabs a given layer until it's disconnected
"""
def __init__(self, layer = None):
self._layer = layer
self._layer.grab(self)
def disconnect(self):
self._layer.release(self)
class LayerClient(ControlElementClient):
u"""
Client of the indivial controls that delivers the controls to the
layer owner.
"""
def __init__(self, layer = None, layer_client = None, *a, **k):
super(LayerClient, self).__init__(*a, **k)
assert layer_client
assert layer
self.layer_client = layer_client
self.layer = layer
def set_control_element(self, control_element, grabbed):
layer = self.layer
owner = self.layer_client
assert owner
assert control_element in layer._control_to_names, u'Control not in layer: %s' % (control_element,)
names = layer._control_to_names[control_element]
if not grabbed:
control_element = None
for name in names:
try:
handler = getattr(owner, u'set_' + name)
except AttributeError:
try:
control = getattr(owner, name)
handler = control.set_control_element
except AttributeError:
if name[0] != u'_':
raise_(UnhandledControlError, u'Component %s has no handler for control_element %s' % (str(owner), name))
else:
handler = nop
handler(control_element or None)
layer._name_to_controls[name] = control_element
class LayerBase(object):
pass
class CompoundLayer(LayerBase, CompoundResource):
u"""
A compound resource takes two layers and makes them look like one,
grabbing both of them. Both can have different priorities
thought.
"""
def _get_priority(self):
assert self.first.priority == self.second.priority
return self.first.priority
def _set_priority(self, priority):
self.first.priority = priority
self.second.priority = priority
priority = property(_get_priority, _set_priority)
def __getattr__(self, key):
try:
return getattr(self.first, key)
except AttributeError:
return getattr(self.second, key)
class Layer(LayerBase, ExclusiveResource):
u"""
A layer provides a convenient interface to control resources. In a
layer, you can group several controls by name. The layer itself
is an exclusive resource. When grabbing the layer, it will try to
grab all controls and will forward them to its own owner when he
receives them, and will take them from him when they are
release. The layer with give and take away the controls from its
client using methods of the form::
client.set[control-name](control)
Where [control-name] is the name the control was given in this
layer. This way, layers are a convenient way to provide controls
to components indirectly, with automatic handling of competition
for them.
Note that [control-name] can not be any of the following reserved
names: priority, grab, release, on_received, on_lost, owner,
get_owner
If [control-name] starts with an underscore (_) it is considered
private. It is grabbed but it is not delivered to the client.
"""
def __init__(self, priority = None, **controls):
super(Layer, self).__init__()
self._priority = priority
self._name_to_controls = dict(zip(iter(controls.keys()), repeat(None)))
self._control_to_names = dict()
self._control_clients = dict()
for name, control in controls.items():
assert control is not None, name
self._control_to_names.setdefault(control, []).append(name)
def __add__(self, other):
return CompoundLayer(self, other)
def _get_priority(self):
return self._priority
def _set_priority(self, priority):
if priority != self._priority:
if self.owner:
raise RuntimeError(u"Cannot change priority of a layer while it's owned")
self._priority = priority
priority = property(_get_priority, _set_priority)
def __getattr__(self, name):
u""" Provides access to controls """
try:
return self._name_to_controls[name]
except KeyError:
raise AttributeError
def grab(self, client, *a, **k):
if client == self.owner:
self.on_received(client, *a, **k)
return True
return super(Layer, self).grab(client, *a, **k)
def on_received(self, client, *a, **k):
u""" Override from ExclusiveResource """
for control in self._control_to_names.keys():
k.setdefault(u'priority', self._priority)
control.resource.grab(self._get_control_client(client), *a, **k)
def on_lost(self, client):
u""" Override from ExclusiveResource """
for control in self._control_to_names.keys():
control.resource.release(self._get_control_client(client))
def _get_control_client(self, client):
try:
control_client = self._control_clients[client]
except KeyError:
control_client = self._control_clients[client] = LayerClient(layer_client=client, layer=self)
return control_client
| 2.25 | 2 |
src/retrieve_exons_sequence_genomes.py | naturalis/brassicaceae-hybseq-pipeline | 5 | 11479 | <gh_stars>1-10
# retrieve_exons_sequence_genomes.py
# This script is to retrieve exons from sequenced genomes which are also present in the reference genome (A. thaliana).
# To identify the contigs from the sequenced genomes, each contig has to be retrieved from A. thaliana first.
# Then, for each sequence query of A. thaliana, the query can be BLAT against the database reference.
# In this case, the database reference will be S. irio and A. lyrata.
# Made by: <NAME>
# Date: 19 May 2020
import os
from Bio import SeqIO
path_to_at_exons_dir = "/mnt/c/Users/elfyl/PycharmProjects/brassicaceae-hybseq-pipeline-offline/results/exons"
path_to_at_dir = "/mnt/c/Users/elfyl/PycharmProjects/brassicaceae-hybseq-pipeline-offline/data/reference_genomes"
path_to_at_reference = path_to_at_dir + "/ref-at.fasta"
# Create exons_AT Directory if don't exist
if not os.path.exists(path_to_at_exons_dir):
os.mkdir(path_to_at_exons_dir)
print("Directory ", path_to_at_exons_dir, " Created ")
else:
print("Directory ", path_to_at_exons_dir, " already exists")
# Create new files for every sequence query of the reference genome A. thaliana
count_id = 0
for seq_record in SeqIO.parse(path_to_at_reference, "fasta"):
f = open(path_to_at_exons_dir + "/" + seq_record.id + ".txt", "w+")
print("New text file created: " + seq_record.id + ".fa")
seq_id = seq_record.id
seq_seq = str(seq_record.seq)
f.write(">" + seq_id + "\n" + seq_seq)
f.close()
count_id += 1
print("Number of sequence records: " + str(count_id))
| 2.828125 | 3 |
lexical/lexical.py | xmeng17/Malicious-URL-Detection | 0 | 11480 | import re
class lexical(object):
'''Lexical Features:
Top Level domain (str)
Number of dots in hostname (int)
Average token length of hostname (float)
Max token length of hostname (int)
Average token length of path (float)
Max token length of path (int)
'''
def __init__(self):
pass
def lexical(self,hostname,path):
dot_num=self.dots(hostname)
arr_host=self.split(hostname)
arr_path=self.split(path)
avg_host=self.avg(arr_host)
max_host=self.max(arr_host)
avg_path=self.avg(arr_path)
max_path=self.max(arr_path)
return dot_num,avg_host,max_host,avg_path,max_path
def dots(self,hostname):
# returns number of dots
return hostname.count('.')
def split(self,string):
# returns a list split by ‘/’, ‘?’, ‘.’, ‘=’, ‘-’ and ‘_’
return re.split('/|\?|\.|=|-|_', string)
def avg(self,arr):
# returns average token length
return sum(len(token) for token in arr)/len(arr)
def max(self,arr):
# returns max token length
return max(len(token) for token in arr) | 3.703125 | 4 |
stacker/tests/providers/aws/test_interactive.py | GoodRx/stacker | 1 | 11481 | <gh_stars>1-10
import unittest
from ....providers.aws.interactive import requires_replacement
def generate_resource_change(replacement=True):
resource_change = {
"Action": "Modify",
"Details": [],
"LogicalResourceId": "Fake",
"PhysicalResourceId": "arn:aws:fake",
"Replacement": "True" if replacement else "False",
"ResourceType": "AWS::Fake",
"Scope": ["Properties"],
}
return {
"ResourceChange": resource_change,
"Type": "Resource",
}
class TestInteractiveProvider(unittest.TestCase):
def test_requires_replacement(self):
changeset = [
generate_resource_change(),
generate_resource_change(replacement=False),
generate_resource_change(),
]
replacement = requires_replacement(changeset)
self.assertEqual(len(replacement), 2)
for resource in replacement:
self.assertEqual(resource["ResourceChange"]["Replacement"], "True")
| 2.953125 | 3 |
setup.py | digicert/digicert_express | 2 | 11482 | from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='digicert-express',
version='1.1dev2',
description='Express Install for DigiCert, Inc.',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Topic :: Security',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
url='https://github.com/digicert/digicert_express',
author='DigiCert, Inc.',
author_email='<EMAIL>',
license='MIT',
zip_safe=False,
packages=find_packages(exclude=['tests.*', '*.tests.*', '*.tests', 'tests', 'scripts']),
include_package_data=True,
install_requires=[
'python-augeas',
'requests>=2.8.1',
'ndg-httpsclient',
'pyasn1',
'pyOpenSSL' # prefer OS install but we can try here, too
],
)
| 1.335938 | 1 |
pytorch/plane.py | NunoEdgarGFlowHub/autoregressive-energy-machines | 83 | 11483 | import argparse
import json
import numpy as np
import os
import torch
import data_
import models
import utils
from matplotlib import cm, pyplot as plt
from tensorboardX import SummaryWriter
from torch import optim
from torch.utils import data
from tqdm import tqdm
from utils import io
parser = argparse.ArgumentParser()
# CUDA
parser.add_argument('--use_gpu', type=bool, default=True, help='Whether to use GPU.')
# data
parser.add_argument('--dataset_name', type=str, default='spirals',
help='Name of dataset to use.')
parser.add_argument('--n_data_points', default=int(1e6),
help='Number of unique data points in training set.')
parser.add_argument('--batch_size', type=int, default=256,
help='Size of batch used for training.')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers used in data loaders.')
# MADE
parser.add_argument('--n_residual_blocks_made', default=4,
help='Number of residual blocks in MADE.')
parser.add_argument('--hidden_dim_made', default=256,
help='Dimensionality of hidden layers in MADE.')
parser.add_argument('--activation_made', default='relu',
help='Activation function for MADE.')
parser.add_argument('--use_batch_norm_made', default=False,
help='Whether to use batch norm in MADE.')
parser.add_argument('--dropout_probability_made', default=None,
help='Dropout probability for MADE.')
# energy net
parser.add_argument('--context_dim', default=64,
help='Dimensionality of context vector.')
parser.add_argument('--n_residual_blocks_energy_net', default=4,
help='Number of residual blocks in energy net.')
parser.add_argument('--hidden_dim_energy_net', default=128,
help='Dimensionality of hidden layers in energy net.')
parser.add_argument('--energy_upper_bound', default=0,
help='Max value for output of energy net.')
parser.add_argument('--activation_energy_net', default='relu',
help='Activation function for energy net.')
parser.add_argument('--use_batch_norm_energy_net', default=False,
help='Whether to use batch norm in energy net.')
parser.add_argument('--dropout_probability_energy_net', default=None,
help='Dropout probability for energy net.')
parser.add_argument('--scale_activation', default='softplus',
help='Activation to use for scales in proposal mixture components.')
parser.add_argument('--apply_context_activation', default=False,
help='Whether to apply activation to context vector.')
# proposal
parser.add_argument('--n_mixture_components', default=10,
help='Number of proposal mixture components (per dimension).')
parser.add_argument('--proposal_component', default='gaussian',
help='Type of location-scale family distribution '
'to use in proposal mixture.')
parser.add_argument('--n_proposal_samples_per_input', default=20,
help='Number of proposal samples used to estimate '
'normalizing constant during training.')
parser.add_argument('--n_proposal_samples_per_input_validation', default=100,
help='Number of proposal samples used to estimate '
'normalizing constant during validation.')
parser.add_argument('--mixture_component_min_scale', default=1e-3,
help='Minimum scale for proposal mixture components.')
# optimization
parser.add_argument('--learning_rate', default=5e-4,
help='Learning rate for Adam.')
parser.add_argument('--n_total_steps', default=int(4e5),
help='Number of total training steps.')
parser.add_argument('--alpha_warm_up_steps', default=5000,
help='Number of warm-up steps for AEM density.')
parser.add_argument('--hard_alpha_warm_up', default=True,
help='Whether to use a hard warm up for alpha')
# logging and checkpoints
parser.add_argument('--monitor_interval', default=100,
help='Interval in steps at which to report training stats.')
parser.add_argument('--visualize_interval', default=10000,
help='Interval in steps at which to report training stats.')
parser.add_argument('--save_interval', default=10000,
help='Interval in steps at which to save model.')
# reproducibility
parser.add_argument('--seed', default=1638128,
help='Random seed for PyTorch and NumPy.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.use_gpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
# Generate data
train_dataset = data_.load_plane_dataset(args.dataset_name, args.n_data_points)
train_loader = data_.InfiniteLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_epochs=None
)
# Generate test grid data
n_points_per_axis = 512
bounds = np.array([
[-4, 4],
[-4, 4]
])
grid_dataset = data_.TestGridDataset(n_points_per_axis=n_points_per_axis, bounds=bounds)
grid_loader = data.DataLoader(
dataset=grid_dataset,
batch_size=1000,
drop_last=False
)
# various dimensions for autoregressive and energy nets
dim = 2 # D
output_dim_multiplier = args.context_dim + 3 * args.n_mixture_components # K + 3M
# Create MADE
made = models.ResidualMADE(
input_dim=dim,
n_residual_blocks=args.n_residual_blocks_made,
hidden_dim=args.hidden_dim_made,
output_dim_multiplier=output_dim_multiplier,
conditional=False,
activation=utils.parse_activation(args.activation_made),
use_batch_norm=args.use_batch_norm_made,
dropout_probability=args.dropout_probability_made
).to(device)
# create energy net
energy_net = models.ResidualEnergyNet(
input_dim=(args.context_dim + 1),
n_residual_blocks=args.n_residual_blocks_energy_net,
hidden_dim=args.hidden_dim_energy_net,
energy_upper_bound=args.energy_upper_bound,
activation=utils.parse_activation(args.activation_energy_net),
use_batch_norm=args.use_batch_norm_energy_net,
dropout_probability=args.dropout_probability_energy_net
).to(device)
# create AEM
aem = models.AEM(
autoregressive_net=made,
energy_net=energy_net,
context_dim=args.context_dim,
n_proposal_mixture_components=args.n_mixture_components,
proposal_component_family=args.proposal_component,
n_proposal_samples_per_input=args.n_proposal_samples_per_input,
mixture_component_min_scale=args.mixture_component_min_scale,
apply_context_activation=args.apply_context_activation
).to(device)
# make optimizer
parameters = list(made.parameters()) + list(energy_net.parameters())
optimizer = optim.Adam(parameters, lr=args.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_total_steps)
# create summary writer and write to log directory
timestamp = io.get_timestamp()
log_dir = os.path.join(io.get_log_root(), args.dataset_name, timestamp)
writer = SummaryWriter(log_dir=log_dir)
filename = os.path.join(log_dir, 'config.json')
with open(filename, 'w') as file:
json.dump(vars(args), file)
# Training loop
tbar = tqdm(range(args.n_total_steps))
alpha = 0
for step in tbar:
aem.train()
scheduler.step(step)
optimizer.zero_grad()
# training step
batch = next(train_loader).to(device)
log_density, log_proposal_density, _, log_normalizer = aem(batch)
mean_log_density = torch.mean(log_density)
mean_log_proposal_density = torch.mean(log_proposal_density)
mean_log_normalizer = torch.mean(log_normalizer)
if args.alpha_warm_up_steps is not None:
if args.hard_alpha_warm_up:
alpha = float(step > args.alpha_warm_up_steps)
else:
alpha = torch.Tensor([min(step / args.alpha_warm_up_steps, 1)])
loss = - (alpha * mean_log_density + mean_log_proposal_density)
else:
loss = - (mean_log_density + mean_log_proposal_density)
loss.backward()
optimizer.step()
if (step + 1) % args.monitor_interval == 0:
s = 'Loss: {:.4f}, log p: {:.4f}, log q: {:.4f}'.format(
loss.item(),
mean_log_density.item(),
mean_log_proposal_density.item()
)
tbar.set_description(s)
# write summaries
summaries = {
'loss': loss.detach(),
'log-prob-aem': mean_log_density.detach(),
'log-prob-proposal': mean_log_proposal_density.detach(),
'log-normalizer': mean_log_normalizer.detach(),
'learning-rate': torch.Tensor(scheduler.get_lr()),
}
for summary, value in summaries.items():
writer.add_scalar(tag=summary, scalar_value=value, global_step=step)
if (step + 1) % args.visualize_interval == 0:
# Plotting
aem.eval()
aem.set_n_proposal_samples_per_input_validation(
args.n_proposal_samples_per_input_validation)
log_density_np = []
log_proposal_density_np = []
for batch in grid_loader:
batch = batch.to(device)
log_density, log_proposal_density, unnormalized_log_density, log_normalizer = aem(
batch)
log_density_np = np.concatenate((
log_density_np, utils.tensor2numpy(log_density)
))
log_proposal_density_np = np.concatenate((
log_proposal_density_np, utils.tensor2numpy(log_proposal_density)
))
fig, axs = plt.subplots(1, 3, figsize=(7.5, 2.5))
axs[0].hist2d(train_dataset.data[:, 0], train_dataset.data[:, 1],
range=bounds, bins=512, cmap=cm.viridis, rasterized=False)
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[1].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_proposal_density_np).reshape(grid_dataset.X.shape))
axs[1].set_xlim(bounds[0])
axs[1].set_ylim(bounds[1])
axs[1].set_xticks([])
axs[1].set_yticks([])
axs[2].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_density_np).reshape(grid_dataset.X.shape))
axs[2].set_xlim(bounds[0])
axs[2].set_ylim(bounds[1])
axs[2].set_xticks([])
axs[2].set_yticks([])
plt.tight_layout()
path = os.path.join(io.get_output_root(), 'pytorch', '{}.png'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_output_root())
plt.savefig(path, dpi=300)
writer.add_figure(tag='test-grid', figure=fig, global_step=step)
plt.close()
if (step + 1) % args.save_interval == 0:
path = os.path.join(io.get_checkpoint_root(), 'pytorch', '{}.t'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_checkpoint_root())
torch.save(aem.state_dict(), path)
path = os.path.join(io.get_checkpoint_root(),
'pytorch', '{}-{}.t'.format(args.dataset_name, timestamp))
torch.save(aem.state_dict(), path)
| 2.109375 | 2 |
music/music.py | spacerunaway/world_recoder | 0 | 11484 | <reponame>spacerunaway/world_recoder
import sys
sys.path.append('../utils')
from utils import *
from doubly_linkedlist import *
def link_chords(chordprogression):
"""
Chord progression is a sequences of chords.
A valid linked_chords can be one of the following:
1: the chord name(str) in CHORD dict
2: the key(type Key)
and a music have to a signal of start and end.
>>> c_p1 = [START,C_Major,'C','Am','F','G','C','Am','F','G7',END]
>>> c_p2 = [START,C_Major,'C','Am','F','G','C','Am','F','G',G_Major,'Em','C','D','D7','G',END]
>>> l1 = link_chords(c_p1)
>>> l1
start - C - Am - F - G - C - Am - F - G7 - end
>>> l2 = link_chords(c_p2)
>>> l2
start - C - Am - F - G - C - Am - F - G - Em - C - D - D7 - G - end
>>> l2[8].key is C_Major
True
>>> l2[8].chord == CHORD['G']
True
>>> l2[9].key is G_Major
True
>>> l2[9].chord == CHORD['Em']
True
>>> c_p3 = [C_Major,C_Major,START,'C',END,START,START,END,'F',G_Major]
>>> l3 = link_chords(c_p3)
>>> l3
start - C - end - start - start - end - F
"""
key = None
res = LinkedList()
for item in chordprogression:
if type(item) is Major_Scale or type(item) is minor_Scale:
key = item
else:
if item not in CHORD:
chord = item
else:
chord = CHORD[item]
node = LinkedChord(chord,key,item)
res.append(node)
return res
def parse_chordprogression(chordprogression):
link_chords(chordprogression)
cpd(chordprogression)
class Music(object):
melody = []
chordprogression = []
rhythm = []
def __init__(self,title,composer,key_signature,metre,arranger=''):
self.title = title
self.composer = composer
self.arranger = arranger
self.key = key
self.metre = metre
def add_subtitle(self,subtitle):
self.subtitle = subtitle
def add_chordprogression(self,chordprogression):
self.chordprogression = chordprogression
def add_tags(self,tags):
self.tags = tags
class Info(object):
def __init__(self,key,beat,tempo=90,rhythmtype=''):
self.key = key
self.beat = beat
self.tempo = tempo
self.rhythmtype = rhythmtype
| 3.359375 | 3 |
azure-mgmt/tests/test_mgmt_documentdb.py | v-Ajnava/azure-sdk-for-python | 4 | 11485 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.documentdb
from msrestazure.azure_exceptions import CloudError
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
import logging
#logging.basicConfig(level=logging.DEBUG)
class MgmtDocDBTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtDocDBTest, self).setUp()
self.client = self.create_mgmt_client(
azure.mgmt.documentdb.DocumentDB
)
# I don't record resource group creation, since it's another package
if not self.is_playback():
self.create_resource_group()
@record
def test_accounts_create(self):
account_name = self.get_resource_name('pydocdbtst')
self.assertFalse(self.client.database_accounts.check_name_exists(account_name))
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
account = async_docdb_create.result()
self.assertIsNotNone(account)
# Rest API issue
# self.assertEqual(account.name, account_name)
def test_accounts_features(self):
account_name = self.get_resource_name('pydocdbtest')
if not self.is_playback():
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
async_docdb_create.wait()
with self.recording():
account = self.client.database_accounts.get(
self.group_name,
account_name
)
self.assertEqual(account.name, account_name)
my_accounts = list(self.client.database_accounts.list_by_resource_group(self.group_name))
self.assertEqual(len(my_accounts), 1)
self.assertEqual(my_accounts[0].name, account_name)
my_accounts = list(self.client.database_accounts.list())
self.assertTrue(len(my_accounts) >= 1)
self.assertTrue(any(db.name == account_name for db in my_accounts))
# I guess we can make this test with no error, need to check with DocDB team
# This is an interesting test anyway, this implies that the serialization works
# and error message is available. Since this method does not return an object
# (i.e. no deserialization to test), this is a complete test.
# We are NOT here to test the RestAPI, but the Swagger file and Python code.
with self.assertRaises(CloudError) as cm:
async_change = self.client.database_accounts.failover_priority_change(
self.group_name,
account_name,
[{
'location_name': self.region,
'failover_priority': 0
}]
)
async_change.wait()
self.assertIn('Failover priorities must be unique', cm.exception.message)
my_keys = self.client.database_accounts.list_keys(
self.group_name,
account_name
)
self.assertIsNotNone(my_keys.primary_master_key)
self.assertIsNotNone(my_keys.secondary_master_key)
self.assertIsNotNone(my_keys.primary_readonly_master_key)
self.assertIsNotNone(my_keys.secondary_readonly_master_key)
my_keys = self.client.database_accounts.list_read_only_keys(
self.group_name,
account_name
)
self.assertIsNotNone(my_keys.primary_readonly_master_key)
self.assertIsNotNone(my_keys.secondary_readonly_master_key)
async_regenerate = self.client.database_accounts.regenerate_key(
self.group_name,
account_name,
"primary"
)
async_regenerate.wait()
def test_accounts_delete(self):
account_name = self.get_resource_name('pydocumentdbtst')
if not self.is_playback():
async_docdb_create = self.client.database_accounts.create_or_update(
self.group_name,
account_name,
{
'location': self.region,
'locations': [{
'location_name': self.region
}]
}
)
async_docdb_create.wait()
with self.recording():
# Current implementation of msrestazure does not support 404 as a end of LRO delete
# https://github.com/Azure/msrestazure-for-python/issues/7
async_delete = self.client.database_accounts.delete(self.group_name, account_name)
try:
async_delete.wait()
except CloudError as err:
if err.response.status_code != 404:
raise
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 2 | 2 |
config.py | somritabanerjee/speedplusbaseline | 0 | 11486 | import argparse
PROJROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplusbaseline',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/speedplusbaseline'}
DATAROOTDIR = {'mac': '/Users/taehapark/SLAB/speedplus/data/datasets',
'linux': '/home/somrita/Documents/Satellite_Pose_Estimation/dataset'}
parser = argparse.ArgumentParser('Configurations for SPEED+ Baseline Study')
# ------------------------------------------------------------------------------------------
# Basic directories and names
parser.add_argument('--seed', type=int, default=2021)
parser.add_argument('--projroot', type=str, default=PROJROOTDIR['linux'])
parser.add_argument('--dataroot', type=str, default=DATAROOTDIR['linux'])
parser.add_argument('--dataname', type=str, default='speedplus')
parser.add_argument('--savedir', type=str, default='checkpoints/synthetic/krn')
parser.add_argument('--resultfn', type=str, default='')
parser.add_argument('--logdir', type=str, default='log/synthetic/krn')
parser.add_argument('--pretrained', type=str, default='')
# ------------------------------------------------------------------------------------------
# Model config.
parser.add_argument('--model_name', type=str, default='krn')
parser.add_argument('--input_shape', nargs='+', type=int, default=(224, 224))
parser.add_argument('--num_keypoints', type=int, default=11) # KRN-specific
parser.add_argument('--num_classes', type=int, default=5000) # SPN-specific
parser.add_argument('--num_neighbors', type=int, default=5) # SPN-specific
parser.add_argument('--keypts_3d_model', type=str, default='src/utils/tangoPoints.mat')
parser.add_argument('--attitude_class', type=str, default='src/utils/attitudeClasses.mat')
# ------------------------------------------------------------------------------------------
# Training config.
parser.add_argument('--start_over', dest='auto_resume', action='store_false', default=True)
parser.add_argument('--randomize_texture', dest='randomize_texture', action='store_true', default=False)
parser.add_argument('--perform_dann', dest='dann', action='store_true', default=False)
parser.add_argument('--texture_alpha', type=float, default=0.5)
parser.add_argument('--texture_ratio', type=float, default=0.5)
parser.add_argument('--use_fp16', dest='fp16', action='store_true', default=False)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=75)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--test_epoch', type=int, default=-1)
parser.add_argument('--optimizer', type=str, default='rmsprop')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--lr_decay_alpha', type=float, default=0.96)
parser.add_argument('--lr_decay_step', type=int, default=1)
# ------------------------------------------------------------------------------------------
# Dataset-related inputs
parser.add_argument('--train_domain', type=str, default='synthetic')
parser.add_argument('--test_domain', type=str, default='lightbox')
parser.add_argument('--train_csv', type=str, default='train.csv')
parser.add_argument('--test_csv', type=str, default='lightbox.csv')
# ------------------------------------------------------------------------------------------
# Other miscellaneous settings
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--no_cuda', dest='use_cuda', action='store_false', default=True)
# End
cfg = parser.parse_args() | 2.03125 | 2 |
h1/api/insight_project_journal_api.py | hyperonecom/h1-client-python | 0 | 11487 | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from h1.api_client import ApiClient, Endpoint as _Endpoint
from h1.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from h1.model.event import Event
from h1.model.inline_response400 import InlineResponse400
from h1.model.insight_project_journal_create import InsightProjectJournalCreate
from h1.model.insight_project_journal_credential_patch import InsightProjectJournalCredentialPatch
from h1.model.insight_project_journal_transfer import InsightProjectJournalTransfer
from h1.model.insight_project_journal_update import InsightProjectJournalUpdate
from h1.model.journal import Journal
from h1.model.journal_credential import JournalCredential
from h1.model.resource_service import ResourceService
from h1.model.tag import Tag
from h1.model.tag_array import TagArray
class InsightProjectJournalApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __insight_project_journal_create(
self,
project_id,
location_id,
insight_project_journal_create,
**kwargs
):
"""Create insight/journal # noqa: E501
Create journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_create(project_id, location_id, insight_project_journal_create, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
insight_project_journal_create (InsightProjectJournalCreate):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['insight_project_journal_create'] = \
insight_project_journal_create
return self.call_with_http_info(**kwargs)
self.insight_project_journal_create = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal',
'operation_id': 'insight_project_journal_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'insight_project_journal_create',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'insight_project_journal_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'insight_project_journal_create':
(InsightProjectJournalCreate,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'insight_project_journal_create': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_create
)
def __insight_project_journal_credential_create(
self,
project_id,
location_id,
journal_id,
journal_credential,
**kwargs
):
"""Create insight/journal.credential # noqa: E501
Create insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_create(project_id, location_id, journal_id, journal_credential, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
journal_credential (JournalCredential):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['journal_credential'] = \
journal_credential
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_create = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'journal_credential',
],
'required': [
'project_id',
'location_id',
'journal_id',
'journal_credential',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'journal_credential':
(JournalCredential,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'journal_credential': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_create
)
def __insight_project_journal_credential_delete(
self,
project_id,
location_id,
journal_id,
credential_id,
**kwargs
):
"""Delete insight/journal.credential # noqa: E501
Delete insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_delete(project_id, location_id, journal_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_delete = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_delete
)
def __insight_project_journal_credential_get(
self,
project_id,
location_id,
journal_id,
credential_id,
**kwargs
):
"""Get insight/journal.credential # noqa: E501
Get insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_get(project_id, location_id, journal_id, credential_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_get = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_get
)
def __insight_project_journal_credential_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.credential # noqa: E501
List insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[JournalCredential]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_list = _Endpoint(
settings={
'response_type': ([JournalCredential],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential',
'operation_id': 'insight_project_journal_credential_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_credential_list
)
def __insight_project_journal_credential_patch(
self,
project_id,
location_id,
journal_id,
credential_id,
insight_project_journal_credential_patch,
**kwargs
):
"""Update insight/journal.credential # noqa: E501
Update insight/journal.credential # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_credential_patch(project_id, location_id, journal_id, credential_id, insight_project_journal_credential_patch, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
credential_id (str): credentialId
insight_project_journal_credential_patch (InsightProjectJournalCredentialPatch):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
JournalCredential
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['credential_id'] = \
credential_id
kwargs['insight_project_journal_credential_patch'] = \
insight_project_journal_credential_patch
return self.call_with_http_info(**kwargs)
self.insight_project_journal_credential_patch = _Endpoint(
settings={
'response_type': (JournalCredential,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/credential/{credentialId}',
'operation_id': 'insight_project_journal_credential_patch',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'required': [
'project_id',
'location_id',
'journal_id',
'credential_id',
'insight_project_journal_credential_patch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'credential_id':
(str,),
'insight_project_journal_credential_patch':
(InsightProjectJournalCredentialPatch,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'credential_id': 'credentialId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'credential_id': 'path',
'insight_project_journal_credential_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_credential_patch
)
def __insight_project_journal_delete(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Delete insight/journal # noqa: E501
Delete journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_delete(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_delete
)
def __insight_project_journal_event_get(
self,
project_id,
location_id,
journal_id,
event_id,
**kwargs
):
"""Get insight/journal.event # noqa: E501
Get insight/journal.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_event_get(project_id, location_id, journal_id, event_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
event_id (str): eventId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Event
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['event_id'] = \
event_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_event_get = _Endpoint(
settings={
'response_type': (Event,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/event/{eventId}',
'operation_id': 'insight_project_journal_event_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'event_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'event_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'event_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'event_id': 'eventId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'event_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_event_get
)
def __insight_project_journal_event_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.event # noqa: E501
List insight/journal.event # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_event_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
limit (float): $limit. [optional] if omitted the server will use the default value of 100
skip (float): $skip. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Event]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_event_list = _Endpoint(
settings={
'response_type': ([Event],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/event',
'operation_id': 'insight_project_journal_event_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'limit',
'skip',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
'limit',
]
},
root_map={
'validations': {
('limit',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'limit':
(float,),
'skip':
(float,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'limit': '$limit',
'skip': '$skip',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'limit': 'query',
'skip': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_event_list
)
def __insight_project_journal_get(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Get insight/journal # noqa: E501
Returns a single journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_get(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_get = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_get
)
def __insight_project_journal_list(
self,
project_id,
location_id,
**kwargs
):
"""List insight/journal # noqa: E501
List journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_list(project_id, location_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
Keyword Args:
name (str): Filter by name. [optional]
tag_value (str): Filter by tag.value. [optional]
tag_key (str): Filter by tag.key. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Journal]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_list = _Endpoint(
settings={
'response_type': ([Journal],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal',
'operation_id': 'insight_project_journal_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'name',
'tag_value',
'tag_key',
],
'required': [
'project_id',
'location_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'name':
(str,),
'tag_value':
(str,),
'tag_key':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'name': 'name',
'tag_value': 'tag.value',
'tag_key': 'tag.key',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'name': 'query',
'tag_value': 'query',
'tag_key': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_list
)
def __insight_project_journal_log_get(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""Get insight/journal.log # noqa: E501
websocket is also supported # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_log_get(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
since (datetime): since. [optional]
until (datetime): until. [optional]
follow (bool): follow. [optional] if omitted the server will use the default value of False
tail (float): tail. [optional]
tag (TagArray): tag. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_log_get = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/log',
'operation_id': 'insight_project_journal_log_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'since',
'until',
'follow',
'tail',
'tag',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'since':
(datetime,),
'until':
(datetime,),
'follow':
(bool,),
'tail':
(float,),
'tag':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'since': 'since',
'until': 'until',
'follow': 'follow',
'tail': 'tail',
'tag': 'tag',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'since': 'query',
'until': 'query',
'follow': 'query',
'tail': 'query',
'tag': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_log_get
)
def __insight_project_journal_service_get(
self,
project_id,
location_id,
journal_id,
service_id,
**kwargs
):
"""Get insight/journal.service # noqa: E501
Get insight/journal.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_service_get(project_id, location_id, journal_id, service_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
service_id (str): serviceId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ResourceService
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['service_id'] = \
service_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_service_get = _Endpoint(
settings={
'response_type': (ResourceService,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/service/{serviceId}',
'operation_id': 'insight_project_journal_service_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'service_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'service_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'service_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'service_id': 'serviceId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'service_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_service_get
)
def __insight_project_journal_service_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.service # noqa: E501
List insight/journal.service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_service_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[ResourceService]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_service_list = _Endpoint(
settings={
'response_type': ([ResourceService],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/service',
'operation_id': 'insight_project_journal_service_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_service_list
)
def __insight_project_journal_tag_create(
self,
project_id,
location_id,
journal_id,
tag,
**kwargs
):
"""Create insight/journal.tag # noqa: E501
Create insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_create(project_id, location_id, journal_id, tag, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag (Tag):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag'] = \
tag
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_create = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_create',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag':
(Tag,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_tag_create
)
def __insight_project_journal_tag_delete(
self,
project_id,
location_id,
journal_id,
tag_id,
**kwargs
):
"""Delete insight/journal.tag # noqa: E501
Delete insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_delete(project_id, location_id, journal_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_delete = _Endpoint(
settings={
'response_type': None,
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag/{tagId}',
'operation_id': 'insight_project_journal_tag_delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_delete
)
def __insight_project_journal_tag_get(
self,
project_id,
location_id,
journal_id,
tag_id,
**kwargs
):
"""Get insight/journal.tag # noqa: E501
Get insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_get(project_id, location_id, journal_id, tag_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_id (str): tagId
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Tag
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_id'] = \
tag_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_get = _Endpoint(
settings={
'response_type': (Tag,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag/{tagId}',
'operation_id': 'insight_project_journal_tag_get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'tag_id': 'tagId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_get
)
def __insight_project_journal_tag_list(
self,
project_id,
location_id,
journal_id,
**kwargs
):
"""List insight/journal.tag # noqa: E501
List insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_list(project_id, location_id, journal_id, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_list = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
],
'required': [
'project_id',
'location_id',
'journal_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__insight_project_journal_tag_list
)
def __insight_project_journal_tag_put(
self,
project_id,
location_id,
journal_id,
tag_array,
**kwargs
):
"""Replace insight/journal.tag # noqa: E501
Replace insight/journal.tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_tag_put(project_id, location_id, journal_id, tag_array, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
tag_array (TagArray):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Tag]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['tag_array'] = \
tag_array
return self.call_with_http_info(**kwargs)
self.insight_project_journal_tag_put = _Endpoint(
settings={
'response_type': ([Tag],),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/tag',
'operation_id': 'insight_project_journal_tag_put',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'tag_array',
],
'required': [
'project_id',
'location_id',
'journal_id',
'tag_array',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'tag_array':
(TagArray,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'tag_array': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_tag_put
)
def __insight_project_journal_transfer(
self,
project_id,
location_id,
journal_id,
insight_project_journal_transfer,
**kwargs
):
"""Transfer insight/journal # noqa: E501
action transfer # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_transfer(project_id, location_id, journal_id, insight_project_journal_transfer, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
insight_project_journal_transfer (InsightProjectJournalTransfer):
Keyword Args:
x_idempotency_key (str): Idempotency key. [optional]
x_dry_run (str): Dry run. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['insight_project_journal_transfer'] = \
insight_project_journal_transfer
return self.call_with_http_info(**kwargs)
self.insight_project_journal_transfer = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}/actions/transfer',
'operation_id': 'insight_project_journal_transfer',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_transfer',
'x_idempotency_key',
'x_dry_run',
],
'required': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_transfer',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'insight_project_journal_transfer':
(InsightProjectJournalTransfer,),
'x_idempotency_key':
(str,),
'x_dry_run':
(str,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
'x_idempotency_key': 'x-idempotency-key',
'x_dry_run': 'x-dry-run',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'insight_project_journal_transfer': 'body',
'x_idempotency_key': 'header',
'x_dry_run': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_transfer
)
def __insight_project_journal_update(
self,
project_id,
location_id,
journal_id,
insight_project_journal_update,
**kwargs
):
"""Update insight/journal # noqa: E501
Returns modified journal # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insight_project_journal_update(project_id, location_id, journal_id, insight_project_journal_update, async_req=True)
>>> result = thread.get()
Args:
project_id (str): Project Id
location_id (str): Location Id
journal_id (str): Journal Id
insight_project_journal_update (InsightProjectJournalUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Journal
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['project_id'] = \
project_id
kwargs['location_id'] = \
location_id
kwargs['journal_id'] = \
journal_id
kwargs['insight_project_journal_update'] = \
insight_project_journal_update
return self.call_with_http_info(**kwargs)
self.insight_project_journal_update = _Endpoint(
settings={
'response_type': (Journal,),
'auth': [
'BearerAuth'
],
'endpoint_path': '/insight/{locationId}/project/{projectId}/journal/{journalId}',
'operation_id': 'insight_project_journal_update',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_update',
],
'required': [
'project_id',
'location_id',
'journal_id',
'insight_project_journal_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'project_id':
(str,),
'location_id':
(str,),
'journal_id':
(str,),
'insight_project_journal_update':
(InsightProjectJournalUpdate,),
},
'attribute_map': {
'project_id': 'projectId',
'location_id': 'locationId',
'journal_id': 'journalId',
},
'location_map': {
'project_id': 'path',
'location_id': 'path',
'journal_id': 'path',
'insight_project_journal_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__insight_project_journal_update
)
| 1.804688 | 2 |
forms/views.py | urchinpro/L2-forms | 0 | 11488 | <filename>forms/views.py
from django.http import HttpResponse
from django.utils.module_loading import import_string
def pdf(request):
"""
Get form's number (decimal type: 101.15 - where "101" is form's group and "15"-number itsels).
Can't use 1,2,3,4,5,6,7,8,9 for number itsels - which stands after the point.
Bacause in database field store in decimal format xxx.yy - two number after dot, and active status.
Must use: 01,02,03-09,10,11,12-19,20,21,22-29,30,31.....
:param request:
:return:
"""
response = HttpResponse(content_type='application/pdf')
t = request.GET.get("type")
response['Content-Disposition'] = 'inline; filename="form-' + t + '.pdf"'
f = import_string('forms.forms' + t[0:3] + '.form_' + t[4:6])
response.write(f(request_data=request.GET))
return response
| 2.171875 | 2 |
main.py | code-aifarmer/Python-EXE-maker | 2 | 11489 | <gh_stars>1-10
#!/usr/bin/env python
import PySimpleGUI as sg
import cv2
import subprocess
import shutil
import os
import sys
# Demonstrates a number of PySimpleGUI features including:
# Default element size
# auto_size_buttons
# Button
# Dictionary return values
# update of elements in form (Text, Input)
def runCommand(cmd, timeout=None, window=None):
""" run shell command
@param cmd: command to execute
@param timeout: timeout for command execution
@return: (return code from command, command output)
"""
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ''
for line in p.stdout:
line = line.decode(errors='replace' if (sys.version_info) < (3, 5)
else 'backslashreplace').rstrip()
output += line
print(line)
if window:
window.Refresh()
retval = p.wait(timeout)
return (retval, output)
def w():
sg.theme('LightGreen')
layout = [[sg.Text(' Python EXE Creator', font='Any 15')],
[sg.Text('Source Python File'), sg.Input(key='-sourcefile-', size=(45, 1)),
sg.FileBrowse(file_types=(("Python Files", "*.py"),))],
[sg.Text('Icon File'), sg.Input(key='-iconfile-', size=(45, 1)),
sg.FileBrowse(file_types=(("Icon Files", "*.ico"),))],
[sg.Frame('Output', font='Any 15', layout=[
[sg.Output(size=(65, 15), font='Courier 10')]])],
[sg.Button('Make EXE', bind_return_key=True),
sg.Button('Quit', button_color=('white', 'firebrick3'))],
]
window = sg.Window('PySimpleGUI EXE Maker', layout, auto_size_text=False, auto_size_buttons=False,
default_element_size=(20, 1), text_justification='right')
# ---===--- Loop taking in user input --- #
while True:
event, values = window.read()
if event in ('Exit', 'Quit', None):
break
source_file = values['-sourcefile-']
icon_file = values['-iconfile-']
icon_option = '-i "{}"'.format(icon_file) if icon_file else ''
source_path, source_filename = os.path.split(source_file)
workpath_option = '--workpath "{}"'.format(source_path)
dispath_option = '--distpath "{}"'.format(source_path)
specpath_option = '--specpath "{}"'.format(source_path)
folder_to_remove = os.path.join(source_path, source_filename[:-3])
file_to_remove = os.path.join(source_path, source_filename[:-3] + '.spec')
command_line = 'pyinstaller -wF --clean "{}" {} {} {} {}'.format(source_file, icon_option, workpath_option,
dispath_option, specpath_option)
if event == 'Make EXE':
try:
print(command_line)
print('Making EXE...the program has NOT locked up...')
window.refresh()
# print('Running command {}'.format(command_line))
out, err = runCommand(command_line, window=window)
shutil.rmtree(folder_to_remove)
os.remove(file_to_remove)
print('**** DONE ****')
except:
sg.PopupError('Something went wrong',
'close this window and copy command line from text printed out in main window',
'Here is the output from the run', out)
print('Copy and paste this line into the command prompt to manually run PyInstaller:\n\n', command_line)
layout = [[sg.Text('Enter Your Passcode')],
[sg.Input('', size=(10, 1), key='input')],
[sg.Button('1'), sg.Button('2'), sg.Button('3')],
[sg.Button('4'), sg.Button('5'), sg.Button('6')],
[sg.Button('7'), sg.Button('8'), sg.Button('9')],
[sg.Button('Submit'), sg.Button('0'), sg.Button('Clear')],
[sg.Text('', size=(15, 1), font=('Helvetica', 18),
text_color='red', key='out')],
]
window = sg.Window('Keypad', layout,
default_button_element_size=(5, 2),
auto_size_buttons=False,
grab_anywhere=False)
# Loop forever reading the form's values, updating the Input field
keys_entered = ''
while True:
event, values = window.read() # read the form
if event == sg.WIN_CLOSED: # if the X button clicked, just exit
break
if event == 'Clear': # clear keys if clear button
keys_entered = ''
elif event in '1234567890':
keys_entered = values['input'] # get what's been entered so far
keys_entered += event # add the new digit
elif event == 'Submit':
keys_entered = values['input']
if values['input']=='123456':
sg.popup('输入正确')
w()
else:
sg.popup('输入错误')
window['out'].update(keys_entered) # output the final string
# change the form to reflect current key string
window['input'].update(keys_entered)
window.close()
| 2.765625 | 3 |
seisflows/system/lsf_sm.py | jpvantassel/seisflows | 97 | 11490 | #
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
raise NotImplementedError
| 1.023438 | 1 |
data/objects/sample.py | predictive-analytics-lab/tiny-comparison-framework | 0 | 11491 | <filename>data/objects/sample.py
from data.objects.data import Data
class Sample(Data):
"""
A way to sample from a dataset for testing purposes.
"""
def __init__(self, data, num = 100):
self.data = data
self.dataset_name = data.get_dataset_name()
self.class_attr = data.get_class_attribute()
self.positive_class_val = data.get_positive_class_val("") # sigh
self.sensitive_attrs = data.get_sensitive_attributes()
self.privileged_class_names = data.get_privileged_class_names("") # sigh
self.categorical_features = data.get_categorical_features()
self.features_to_keep = data.get_features_to_keep()
self.missing_val_indicators = data.get_missing_val_indicators()
self.num_to_sample = num
def data_specific_processing(self, dataframe):
dataframe = self.data.data_specific_processing(dataframe)
return dataframe.sample(n = self.num_to_sample, replace=True)
| 3.328125 | 3 |
parkrundata/views.py | remarkablerocket/parkrundata | 0 | 11492 | <reponame>remarkablerocket/parkrundata
# -*- coding: utf-8 -*-
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import Country, Event
from .serializers import CountrySerializer, EventSerializer
class CountryViewSet(viewsets.ModelViewSet):
queryset = Country.objects.all()
serializer_class = CountrySerializer
permission_classes = [IsAuthenticatedOrReadOnly]
class EventViewSet(viewsets.ModelViewSet):
queryset = Event.objects.all()
serializer_class = EventSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
| 1.976563 | 2 |
spearmint/models/gp_classifier.py | jatinarora2409/Spearmint | 0 | 11493 | <gh_stars>0
# -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by <NAME>, <NAME>, and
# <NAME> at Harvard University, <NAME> at the
# University of Toronto (“Toronto”), and <NAME> at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: <EMAIL>
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# <NAME>, <NAME> and <NAME>
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# <NAME>, <NAME> and <NAME>
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# <NAME>, <NAME>, <NAME> and <NAME>
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology <NAME>, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import copy
import sys, logging
import numpy as np
import numpy.random as npr
import scipy.linalg as spla
import scipy.optimize as spo
import scipy.io as sio
import scipy.stats as sps
try:
import scipy.weave as weave
except ImportError:
import weave
from .gp import GP
from ..utils.param import Param as Hyperparameter
from ..kernels import Matern52, Noise, Scale, SumKernel, TransformKernel
from ..sampling.slice_sampler import SliceSampler
from ..sampling.whitened_prior_slice_sampler import WhitenedPriorSliceSampler
from ..sampling.elliptical_slice_sampler import EllipticalSliceSampler
from ..utils import priors
from ..transformations import BetaWarp, Transformer
try:
module = sys.modules['__main__'].__file__
log = logging.getLogger(module)
except:
log = logging.getLogger()
print 'Not running from main.'
class GPClassifier(GP):
def __init__(self, num_dims, **options):
self.counts = None
log.debug('GP Classifier initialized with options: %s' % (options))
self.ess_thinning = int(options.get("ess-thinning", 10))
self._set_likelihood(options)
self.prior_whitening = options.get('prior-whitening', True)
sigmoid = options.get("sigmoid", "probit")
if not self.noiseless:
if sigmoid == "probit":
self.sigmoid = sps.norm.cdf
self.sigmoid_derivative = sps.norm.pdf # not used
self.sigmoid_inverse = sps.norm.ppf
elif sigmoid == "logistic":
self.sigmoid = sps.logistic.cdf
self.sigmoid_derivative = sps.logistic.pdf
self.sigmoid_inverse = sps.logistic.ppf
else:
raise Exception("Only probit and logistic sigmoids are supported")
else:
# If no noise we use the step function and ignore the "sigmoid" argument.
# (This is the step function likelihood)
# assert options['likelihood'] == 'STEP'
self.sigmoid = lambda x: np.greater_equal(x, 0)
self.sigmoid_derivative = lambda x: 0.
self.sigmoid_inverse = lambda x: 0.
# The constraint is that p=s(f) > 1-epsilon
# where s if the sigmoid and f is the latent function value, and p is the binomial probability
# This is only in more complicated situations. The main situation where this is used
# we want f>0. This is equivalent to epsilon=0.5 for the sigmoids we use
# The point is: do not set epsilon unless you know what you are doing!
# (and do not confuse it with delta, the min constraint confidence)
self._one_minus_epsilon = 1.0 - float(options.get("epsilon", 0.5))
self.latent_values_list = []
super(GPClassifier, self).__init__(num_dims, **options)
def _set_likelihood(self, options):
self.likelihood = options.get('likelihood', 'binomial').lower()
if self.likelihood.lower() == "binomial":
self.noiseless = False
elif self.likelihood.lower() == "step":
self.noiseless = True
else:
raise Exception("GP classifier only supports step or binomial likelihood, not %s" % (options['likelihood']))
def _reset(self):
super(GPClassifier, self)._reset()
# Reset the latent values
if self.counts is not None:
initial_latent_vals = self.counts - 0.5
else:
initial_latent_vals = np.zeros(0)
self.latent_values.initial_value = initial_latent_vals
self.latent_values.reset_value()
self._latent_values_list = []
def _set_latent_values_from_dict(self, latent_values_dict):
# Read in the latent values. For pre-existing data, just load them in
# For new data, set them to a default.
default_latent_values = self.counts - 0.5
latent_values = np.zeros(self._inputs.shape[0])
for i in xrange(self._inputs.shape[0]):
key = str(hash(self._inputs[i].tostring()))
if key in latent_values_dict:
latent_values[i] = latent_values_dict[key]
else:
latent_values[i] = default_latent_values[i]
self.latent_values.value = latent_values
def _burn_samples(self, num_samples):
# sys.stderr.write('GPClassifer: burning %s: ' % ', '.join(self.params.keys()))
# sys.stderr.write('%04d/%04d' % (0, num_samples))
for i in xrange(num_samples):
# sys.stderr.write('\b'*9+'%04d/%04d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.latent_values_sampler.sample(self)
self.chain_length += 1
# sys.stderr.write('\n')
def _collect_samples(self, num_samples):
# sys.stderr.write('GPClassifer: sampling %s: ' % ', '.join(self.params.keys()))
# sys.stderr.write('%04d/%04d' % (0, num_samples))
hypers_list = []
latent_values_list = []
for i in xrange(num_samples):
# sys.stderr.write('\b'*9+'%04d/%04d' % (i, num_samples))
for sampler in self._samplers:
sampler.sample(self)
self.latent_values_sampler.sample(self)
current_dict = self.to_dict()
hypers_list.append(current_dict['hypers'])
latent_values_list.append(current_dict['latent values'])
self.chain_length += 1
# sys.stderr.write('\n')
return hypers_list, latent_values_list
def _build(self):
self.params = {}
self.latent_values = None
# Build the transformer
beta_warp = BetaWarp(self.num_dims)
beta_alpha, beta_beta = beta_warp.hypers
self.params['beta_alpha'] = beta_alpha
self.params['beta_beta'] = beta_beta
transformer = Transformer(self.num_dims)
transformer.add_layer(beta_warp)
# Build the component kernels
input_kernel = Matern52(self.num_dims)
ls = input_kernel.hypers
self.params['ls'] = ls
# Now apply the transformation.
transform_kernel = TransformKernel(input_kernel, transformer)
# Add some perturbation for stability
stability_noise = Noise(self.num_dims)
# Finally make a noisy version if necessary
# In a classifier GP the notion of "noise" is really just the scale.
if self.noiseless:
self._kernel = SumKernel(transform_kernel, stability_noise)
else:
scaled_kernel = Scale(transform_kernel)
self._kernel = SumKernel(scaled_kernel, stability_noise)
amp2 = scaled_kernel.hypers
self.params['amp2'] = amp2
# Build the mean function (just a constant mean for now)
self.mean = Hyperparameter(
initial_value = 0.0,
prior = priors.Gaussian(0.0,1.0),
name = 'mean'
)
self.params['mean'] = self.mean
# Buld the latent values. Empty for now until the GP gets data.
self.latent_values = Hyperparameter(
initial_value = np.array([]),
name = 'latent values'
)
# Build the samplers
to_sample = [self.mean] if self.noiseless else [self.mean, amp2]
self._samplers.append(SliceSampler(*to_sample, compwise=False, thinning=self.thinning))
self._samplers.append(WhitenedPriorSliceSampler(ls, beta_alpha, beta_beta, compwise=True, thinning=self.thinning))
self.latent_values_sampler = EllipticalSliceSampler(self.latent_values, thinning=self.ess_thinning)
@property
def values(self):
if self.pending is None or len(self._fantasy_values_list) < self.num_states:
return self.observed_values
if self.num_fantasies == 1:
return np.append(self.latent_values.value, self._fantasy_values_list[self.state].flatten(), axis=0)
else:
return np.append(np.tile(self.latent_values.value[:,None], (1,self.num_fantasies)), self._fantasy_values_list[self.state], axis=0)
@property
def observed_values(self):
if self.latent_values is not None:
return self.latent_values.value
else:
return np.array([])
def set_state(self, state):
self.state = state
self._set_params_from_dict(self._hypers_list[state])
self._set_latent_values_from_dict(self._latent_values_list[state])
def pi(self, pred, compute_grad=False):
return super(GPClassifier, self).pi( pred, compute_grad=compute_grad,
C=self.sigmoid_inverse(self._one_minus_epsilon) )
def fit(self, inputs, counts, pending=None, hypers=None, reburn=False, fit_hypers=True):
# Set the data for the GP
self._inputs = inputs
self.counts = counts
# Reset the GP
self._reset()
# Initialize the GP with hypers if provided
if hypers:
self.from_dict(hypers)
if fit_hypers:
# Burn samples (if needed)
num_samples = self.burnin if reburn or self.chain_length < self.burnin else 0
self._burn_samples(num_samples)
# Now collect some samples
self._hypers_list, self._latent_values_list = self._collect_samples(self.mcmc_iters)
# Now we have more states
self.num_states = self.mcmc_iters
elif not self._hypers_list:
# Just use the current hypers as the only state
current_dict = self.to_dict()
self._hypers_list = [current_dict['hypers']]
self._latent_values_list = [current_dict['latent values']]
self.num_states = 1
# Set pending data and generate corresponding fantasies
if pending is not None:
self.pending = pending
self._fantasy_values_list = self._collect_fantasies(pending)
# Get caching ready
if self.caching:
self._prepare_cache()
# Set the hypers to the final state of the chain
self.set_state(len(self._hypers_list)-1)
return self.to_dict()
def log_binomial_likelihood(self, y=None):
# If no data, don't do anything
if not self.has_data:
return 0.0
if y is None:
y = self.latent_values.value
p = self.sigmoid(y)
# Note on the below: the obvious implementation would be
# return np.sum( pos*np.log(p) + neg*np.log(1-p) )
# The problem is, if pos = 0, and p=0, we will get a 0*-Inf = nan
# This messes things up. So we use the safer implementation below that ignores
# the term entirely if the counts are 0.
pos = self.counts # positive counts
neg = 1 - pos
with np.errstate(divide='ignore'): # suppress warnings about log(0)
return np.sum( pos[pos>0]*np.log(p[pos>0]) ) + np.sum( neg[neg>0]*np.log(1-p[neg>0]) )
def to_dict(self):
gp_dict = {}
gp_dict['hypers'] = {}
for name, hyper in self.params.iteritems():
gp_dict['hypers'][name] = hyper.value
# Save the latent values as a dict with keys as hashes of the data
# so that each latent value is associated with its input
# then when we load them in we know which ones are which
gp_dict['latent values'] = {str(hash(self._inputs[i].tostring())) : self.latent_values.value[i]
for i in xrange(self._inputs.shape[0])}
gp_dict['chain length'] = self.chain_length
return gp_dict
def from_dict(self, gp_dict):
self._set_params_from_dict(gp_dict['hypers'])
self._set_latent_values_from_dict(gp_dict['latent values'])
self.chain_length = gp_dict['chain length']
| 1.382813 | 1 |
pycom_lopy4_LoRaBattMonitor/transmitter/main.py | AidanTek/Fab-Cre8_IoT | 0 | 11494 | from machine import Pin, ADC
from network import LoRa
import socket
from utime import sleep
# Use a pin for a 'config' mode
configPin = Pin('P21', Pin.IN, Pin.PULL_UP)
# Create an ADC object
adc = ADC()
# vbatt pin:
vbatt = adc.channel(attn=1, pin='P16')
def battConversion():
adcVoltage = vbatt()
voltage = adcVoltage*3*1.334/4095
return voltage
# Initialise LoRa in LoRa mode
# For Europe, use LoRa.EU868
lora = LoRa(mode=LoRa.LORA, region=LoRa.EU868)
# Create a raw LoRa socket
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# Check the Config pin:
configMode = not configPin()
if not configMode:
print('Reading Battery')
pycom.rgbled(0x0000FF)
message = 'Battery Status: {}'.format(battConversion())
print(message)
sleep(2)
print('Sending battery status estimate...')
pycom.rgbled(0xFF0000)
sleep(2)
s.setblocking(True)
# Send some data
s.send(message)
print('Message Sent!')
pycom.rgbled(0x00FF00)
sleep(2)
print('Going to sleep')
machine.deepsleep(300000)
# Otherwise, we are in 'config' so exit to REPL
print('Config Mode')
| 2.875 | 3 |
scrapi/harvesters/lwbin.py | wearpants/scrapi | 34 | 11495 | """
A Lake Winnipeg Basin Information Network (BIN) harvester for the SHARE project
Example API request: http://130.179.67.140/api/3/action/package_search?q= (problematic)
http://130.179.67.140/api/3/action/current_package_list_with_resources (currently using)
It oddly returns 5 more datasets than all searchable ones on LWBIN data hub.
Known issues:
1 -- Five datasets can be searched but cannot be accessed via LWBIN.
Clicking on the searching result would result in linking to a redirected page like this:
http://172.16.58.3/user/login?came_from=http://130.179.67.140/dataset/mpca-surface-water-data-access-interactive-map
Within each dataset there are resouces that contain urls to source pages. For future work considering using resources
urls as canonical urls.
2 -- Resouces properties contained in raw metadata of the datasets are not added to the normalized metadata at this
point.
3 -- Single name contributors can be used as filters or an invalid query will be returned. Has nothing to do with scrapi but the frontend.
"""
from __future__ import unicode_literals
import json
import logging
from dateutil.parser import parse
from scrapi import requests
from scrapi.base import JSONHarvester
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import build_properties, datetime_formatter, parse_name
logger = logging.getLogger(__name__)
ORGANIZATIONS = (
"organization", "fund", "canada", "agriculture", "commitee", "international", "council", "office", "of",
"observation", "institute", "lwbin", "cocorahs", "usgs", "nsidc"
)
def is_organization(name):
"""Return a boolean to indicate if the name passed to the function is an organization
"""
words = name.split(' ')
return any(word.strip(";").lower() in ORGANIZATIONS for word in words)
def clean_authors(authors):
"""Cleam authors list.
"""
authors = authors.strip().replace('<span class="author-names">', '').replace('</span>', '')
authors = authors.split(',')
new_authors = []
for author in authors:
if is_organization(author):
new_authors.append(author)
else:
if ' and ' in author or ' <em>et al.</em>' in author:
split_name = author.replace(' <em>et al.</em>', '').split(' and ')
new_authors.extend(split_name)
else:
new_authors.append(author)
return new_authors
def process_contributors(authors, emails):
"""Process authors and add author emails
If multiple authors and one email, put email in a new author
"""
emails = emails.split(',')
authors = clean_authors(authors)
contributor_list = []
append_emails = len(authors) == 1 and len(emails) == 1 and not emails[0] == u'' # append the email to the author only when 1 record is observed
for i, author in enumerate(authors):
if is_organization(author):
contributor = {
'name': author
}
else:
contributor = parse_name(author)
if append_emails:
contributor['email'] = emails[i]
contributor_list.append(contributor)
if not append_emails and emails[0] != u'':
for email in emails:
contributor = {
'name': '',
'email': email
}
contributor_list.append(contributor)
return contributor_list
def process_licenses(license_title, license_url, license_id):
"""Process licenses to comply with the normalized schema
"""
if not license_url:
return []
else:
license = {
'uri': license_url,
'description': "{} ({})".format(license_title, license_id) or ""
}
return [license]
def construct_url(url, dataset_path, end_point):
"""
:return: a url that directs back to the page on LBWIN Data Hub instead of the source page.
:param url: host url
:param dataset_path: parent path of all datasets
:param end_point: name of datasets
"""
return "/".join([url, dataset_path, end_point])
def process_object_uris(url, extras):
"""Extract doi from /extras, and return a list of object uris including /url and doi if it exists.
"""
doi = []
for d in extras:
if d['key'] == "DOI" or d['key'] == "DOI:":
doi.append(d['value'])
if doi == []:
return [url]
else:
return [url].extend(doi)
class LWBINHarvester(JSONHarvester):
short_name = 'lwbin'
long_name = 'Lake Winnipeg Basin Information Network'
url = 'http://130.179.67.140'
dataset_path = "dataset" # dataset base url for constructing urls that go back to LWBIN instead of source pages.
DEFAULT_ENCODING = 'UTF-8'
record_encoding = None
@property
def schema(self):
return {
'title': ('/title', lambda x: x or ''),
'description': ('/notes'),
'providerUpdatedDateTime': ('/metadata_modified', datetime_formatter),
'uris': {
'canonicalUri': ('/name', lambda x: construct_url(self.url, self.dataset_path, x)), # Construct new urls directing to LWBIN
'objectUris': ('/url', '/extras', process_object_uris) # Default urls from the metadata directing to source pages
},
'contributors': ('/author', '/author_email', process_contributors),
'licenses': ('/license_title', '/license_url', '/license_id', process_licenses),
'tags': ('/tags', lambda x: [tag['name'].lower() for tag in (x or [])]),
'freeToRead': {
'startDate': ('/isopen', '/metadata_created', lambda x, y: parse(y).date().isoformat() if x else None)
},
'otherProperties': build_properties(
('maintainer', '/maintainer'),
('maintainerEmail', '/maintainer_email'),
('revisionTimestamp', ('/revision_timestamp', datetime_formatter)),
('id', '/id'),
('metadataCreated', ('/metadata_created', datetime_formatter)),
('state', '/state'),
('version', '/version'),
('creatorUserId', '/creator_user_id'),
('type', '/type'),
('numberOfResources', '/num_resources'),
('numberOfTags', '/num_tags'),
('name', '/name'),
('groups', '/groups'),
)
}
def harvest(self, start_date=None, end_date=None):
"""Returns a list of Rawdocuments (metadata)
Searching by time is not supported by LWBIN CKAN API. all datasets have to be scanned each time.
"""
base_url = 'http://172.16.58.3/api/3/action/current_package_list_with_resources'
records = requests.get(base_url).json()['result']
total = len(records) # Total number of documents
logger.info('{} documents to be harvested'.format(total))
return [
RawDocument({
'doc': json.dumps(record),
'source': self.short_name,
'docID': record['id'],
'filetype': 'json'
}) for record in records
]
| 2.140625 | 2 |
catalog/bindings/gmd/point.py | NIVANorge/s-enda-playground | 0 | 11496 | from dataclasses import dataclass
from bindings.gmd.point_type import PointType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class Point(PointType):
"""A Point is defined by a single coordinate tuple.
The direct position of a point is specified by the pos element which
is of type DirectPositionType.
"""
class Meta:
namespace = "http://www.opengis.net/gml"
| 3.15625 | 3 |
hknweb/exams/migrations/0019_auto_20200413_0212.py | AndrewKe/hknweb | 0 | 11497 | <gh_stars>0
# Generated by Django 2.2.8 on 2020-04-13 09:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('exams', '0018_auto_20200412_1715'),
]
operations = [
migrations.CreateModel(
name='ExamChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam_Choice', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='exam',
name='exam_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exams.ExamChoice'),
),
migrations.AlterField(
model_name='exam',
name='instructor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exams.Instructor'),
),
migrations.DeleteModel(
name='CourseSemester',
),
]
| 1.632813 | 2 |
tools/linear_algebra/preconditioners/Jacobi.py | mathischeap/mifem | 1 | 11498 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Jacobian preconditioner.
"""
from root.config.main import *
from scipy import sparse as spspa
from tools.linear_algebra.preconditioners.base import Preconditioner
class JacobiPreconditioner(Preconditioner):
""""""
def __init__(self, A):
""""""
super(JacobiPreconditioner, self).__init__(A)
self._freeze_self_()
@property
def invM(self):
A = self._A_.M
diag = A.diagonal()
if rAnk != mAster_rank:
DIAG = None
else:
DIAG = np.empty((sIze, self._A_.shape[0]))
cOmm.Gather(diag, DIAG, root=mAster_rank)
if rAnk == mAster_rank:
DIAG = np.sum(DIAG, axis=0)
DIAG = np.reciprocal(DIAG)
else:
DIAG = np.empty((self._A_.shape[0],))
cOmm.Bcast(DIAG, root=mAster_rank)
invM = spspa.dia_matrix((DIAG, 0), shape=self._A_.shape)
return invM
@property
def ___applying_method___(self):
return 'left_multiply_invM' | 2.34375 | 2 |
social_redirects/models.py | JoshZero87/site | 4 | 11499 | <reponame>JoshZero87/site
from django.contrib.sites.models import Site
from django.db import models
class Redirect(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=1024, blank=True, null=True)
social_image = models.ImageField(null=True, blank=True)
old_path = models.CharField(max_length=200, db_index=True, verbose_name="Redirect From", help_text="This should be an absolute path, excluding the domain name. Example: '/events/search/'.")
new_path = models.CharField(max_length=200, blank=True, verbose_name="Redirect To", help_text="This can be either an absolute path (as above) or a full URL starting with 'http://'.")
site = models.ForeignKey(Site, models.CASCADE)
class Meta:
unique_together = (('site', 'old_path'),)
ordering = ('old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
| 2.234375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.