content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from UR10 import *
u = UR10Controller('10.1.1.6')
x = URPoseManager()
x.load('t1.urpose')
print(x.getPosJoint('home'))
resp = input('hit y if you want to continue')
if resp == 'y':
x.moveUR(u,'home',30)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
'''Update the "manuf" file.
Make-manuf creates a file containing ethernet OUIs and their company
IDs. It merges the databases at IEEE with entries in our template file.
Our file in turn contains entries from
http://www.cavebear.com/archive/cavebear/Ethernet/Ethernet.txt along
with our own.
The script reads the comments at the top of "manuf.tmpl" and writes them
to "manuf". It then joins the manufacturer listing in "manuf.tmpl" with
the listing in "oui.txt", "iab.txt", etc, with the entries in
"manuf.tmpl" taking precedence.
'''
import csv
import io
import os
import re
import sys
if sys.version_info[0] >= 3:
import urllib.request, urllib.error, urllib.parse
import codecs
else:
import urllib2
have_icu = False
try:
# Use the grapheme or segments module instead?
import icu
have_icu = True
except ImportError:
pass
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def open_url(url):
'''Open a URL.
Returns a tuple containing the body and response dict. The body is a
str in Python 3 and bytes in Python 2 in order to be compatibile with
csv.reader.
'''
req_headers = { 'User-Agent': 'Wireshark make-manuf' }
try:
if sys.version_info[0] >= 3:
req = urllib.request.Request(url, headers=req_headers)
response = urllib.request.urlopen(req)
body = response.read().decode('UTF-8', 'replace')
else:
req = urllib2.Request(url, headers=req_headers)
response = urllib2.urlopen(req)
body = response.read()
except:
exit_msg('Error opening ' + url)
return (body, dict(response.info()))
def shorten(manuf):
'''Convert a long manufacturer name to abbreviated and short names'''
# Normalize whitespace.
manuf = ' '.join(manuf.split())
orig_manuf = manuf
# Add exactly one space on each end.
# XXX This appears to be for the re.sub below.
manuf = u' {} '.format(manuf)
# Convert to consistent case
manuf = manuf.title()
# Remove any punctuation
# XXX Use string.punctuation? Note that it includes '-' and '*'.
manuf = re.sub(u"[',.()]", ' ', manuf)
# & isn't needed when Standalone
manuf = manuf.replace(" & ", " ")
# Remove any "the", "inc", "plc" ...
manuf = re.sub('\W(the|incorporated|inc|plc|systems|corporation|corp|s/a|a/s|ab|ag|kg|gmbh|company|co|limited|ltd|holding|spa)(?= )', '', manuf, flags=re.IGNORECASE)
# Remove all spaces
manuf = re.sub('\s+', '', manuf)
# Truncate names to a reasonable length, say, 8 characters. If
# the string contains UTF-8, this may be substantially more than
# 8 bytes. It might also be less than 8 visible characters. Plain
# Python slices Unicode strings by code point, which is better
# than raw bytes but not as good as grapheme clusters. PyICU
# supports grapheme clusters. https://bugs.python.org/issue30717
#
# In our case plain Python truncates 'Savroni̇k Elektroni̇k'
# to 'Savroni̇', which is 7 visible characters, 8 code points,
# and 9 bytes.
# Truncate by code points
trunc_len = 8
if have_icu:
# Truncate by grapheme clusters
bi_ci = icu.BreakIterator.createCharacterInstance(icu.Locale('en_US'))
bi_ci.setText(manuf)
bounds = list(bi_ci)
bounds = bounds[0:8]
trunc_len = bounds[-1]
manuf = manuf[:trunc_len]
if manuf.lower() == orig_manuf.lower():
# Original manufacturer name was short and simple.
return manuf
mixed_manuf = orig_manuf
# At least one entry has whitespace in front of a period.
mixed_manuf = re.sub('\s+\.', '.', mixed_manuf)
#If company is all caps, convert to mixed case (so it doesn't look like we're screaming the company name)
if mixed_manuf.upper() == mixed_manuf:
mixed_manuf = mixed_manuf.title()
return u'{}\t{}'.format(manuf, mixed_manuf)
def prefix_to_oui(prefix):
pfx_len = len(prefix) * 8 / 2
if pfx_len == 24:
# 24-bit OUI assignment, no mask
return ':'.join(hi + lo for hi, lo in zip(prefix[0::2], prefix[1::2]))
# Other lengths which require a mask.
oui = prefix.ljust(12, '0')
oui = ':'.join(hi + lo for hi, lo in zip(oui[0::2], oui[1::2]))
return '{}/{:d}'.format(oui, int(pfx_len))
def main():
this_dir = os.path.dirname(__file__)
template_path = os.path.join(this_dir, '..', 'manuf.tmpl')
manuf_path = os.path.join(this_dir, '..', 'manuf')
header_l = []
in_header = True
ieee_d = {
'OUI': { 'url': "http://standards-oui.ieee.org/oui/oui.csv", 'min_entries': 1000 },
'CID': { 'url': "http://standards-oui.ieee.org/cid/cid.csv", 'min_entries': 75 },
'IAB': { 'url': "http://standards-oui.ieee.org/iab/iab.csv", 'min_entries': 1000 },
'OUI28': { 'url': "http://standards-oui.ieee.org/oui28/mam.csv", 'min_entries': 1000 },
'OUI36': { 'url': "http://standards-oui.ieee.org/oui36/oui36.csv", 'min_entries': 1000 },
}
oui_d = {}
hp = "[0-9a-fA-F]{2}"
manuf_re = re.compile('^({}:{}:{})\s+(\S.*)$'.format(hp, hp, hp))
min_total = 35000; # 35830 as of 2018-09-05
tmpl_added = 0
total_added = 0
# Write out the header and populate the OUI list with our entries.
try:
tmpl_fd = io.open(template_path, 'r', encoding='UTF-8')
except:
exit_msg("Couldn't open template file for reading ({}) ".format(template_path))
for tmpl_line in tmpl_fd:
tmpl_line = tmpl_line.strip()
m = manuf_re.match(tmpl_line)
if not m and in_header:
header_l.append(tmpl_line)
elif m:
in_header = False
oui = m.group(1).upper()
oui_d[oui] = m.group(2)
tmpl_added += 1
tmpl_fd.close()
total_added += tmpl_added
# Add IEEE entries from each of their databases
ieee_db_l = list(ieee_d.keys())
ieee_db_l.sort()
for db in ieee_db_l:
db_url = ieee_d[db]['url']
ieee_d[db]['skipped'] = 0
ieee_d[db]['added'] = 0
ieee_d[db]['total'] = 0
print('Merging {} data from {}'.format(db, db_url))
(body, response_d) = open_url(db_url)
ieee_csv = csv.reader(body.splitlines())
if sys.version_info[0] >= 3:
ieee_d[db]['last-modified'] = response_d['Last-Modified']
ieee_d[db]['length'] = response_d['Content-Length']
else:
ieee_d[db]['last-modified'] = response_d['last-modified']
ieee_d[db]['length'] = response_d['content-length']
# Pop the title row.
next(ieee_csv)
for ieee_row in ieee_csv:
#Registry,Assignment,Organization Name,Organization Address
#IAB,0050C2DD6,Transas Marine Limited,Datavagen 37 Askim Vastra Gotaland SE 436 32
oui = prefix_to_oui(ieee_row[1].upper())
if sys.version_info[0] >= 3:
manuf = ieee_row[2].strip()
else:
manuf = ieee_row[2].strip().decode('UTF-8')
if oui in oui_d:
print(u'{} - Skipping IEEE "{}" in favor of "{}"'.format(oui, manuf, oui_d[oui]))
ieee_d[db]['skipped'] += 1
else:
oui_d[oui] = shorten(manuf)
ieee_d[db]['added'] += 1
ieee_d[db]['total'] += 1
if ieee_d[db]['total'] < ieee_d[db]['min_entries']:
exit_msg("Too few {} entries ({})".format(ieee_db, ieee_d[db]['total']))
total_added += ieee_d[db]['total']
if total_added < min_total:
exit_msg("Too few total entries ({})".format(total_added))
# Write the output file.
try:
manuf_fd = io.open(manuf_path, 'w', encoding='UTF-8')
except:
exit_msg("Couldn't open manuf file for reading ({}) ".format(manuf_path))
manuf_fd.write(u"# This file was generated by running ./tools/make-manuf.py.\n")
manuf_fd.write(u"# Don't change it directly, change manuf.tmpl instead.\n#\n")
manuf_fd.write('\n'.join(header_l))
for db in ieee_db_l:
manuf_fd.write(
u'''\
# {url}:
# Content-Length: {length}
# Last-Modified: {last-modified}
'''.format( **ieee_d[db]))
oui_l = list(oui_d.keys())
oui_l.sort()
for oui in oui_l:
manuf_fd.write(u'{}\t{}\n'.format(oui, oui_d[oui]))
manuf_fd.close()
print('{:<20}: {}'.format('Original entries', tmpl_added))
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' added', ieee_d[db]['added']))
print('{:<20}: {}'.format('Total added', total_added))
print()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' total', ieee_d[db]['total']))
print()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' skipped', ieee_d[db]['skipped']))
if __name__ == '__main__':
main()
|
python
|
from unittest import mock
import json
import time
from aiohttp import web
from aiohttp.web_middlewares import _Handler
from aiohttp.test_utils import TestClient
from typing import Any, Dict
from aiohttp_session import get_session, SimpleCookieStorage
from aiohttp_session import setup as setup_middleware
from typedefs import _TAiohttpClient
def make_cookie(client: TestClient, data: Dict[Any, Any]) -> None:
session_data = {
'session': data,
'created': int(time.time())
}
value = json.dumps(session_data)
# Ignoring type until aiohttp#4252 is released
client.session.cookie_jar.update_cookies(
{'AIOHTTP_SESSION': value} # type: ignore
)
def create_app(handler: _Handler) -> web.Application:
app = web.Application()
setup_middleware(app, SimpleCookieStorage(max_age=10))
app.router.add_route('GET', '/', handler)
return app
async def test_max_age_also_returns_expires(
aiohttp_client: _TAiohttpClient
) -> None:
async def handler(request: web.Request) -> web.StreamResponse:
# Ignoring type since time.time is mocked in this context
time.time.return_value = 0.0 # type: ignore[attr-defined]
session = await get_session(request)
session['c'] = 3
return web.Response(body=b'OK')
with mock.patch('time.time') as m_clock:
m_clock.return_value = 0.0
client = await aiohttp_client(create_app(handler))
make_cookie(client, {'a': 1, 'b': 2})
resp = await client.get('/')
assert resp.status == 200
assert 'expires=Thu, 01-Jan-1970 00:00:10 GMT' in \
resp.headers['SET-COOKIE']
|
python
|
import json
import os
import dotenv
class ConfigError(Exception):
def __init__(self, field):
super().__init__(f'Missing environment variable {field}')
def get_env_var(name: str, default: str = None, prefix='', allow_empty=False):
if prefix:
env = prefix + '_' + name
else:
env = name
value = os.getenv(env)
if not value and default:
value = default
if not allow_empty and not value:
raise ConfigError(env)
return value
def set_env_var(name: str, value):
if isinstance(value, (list, dict, tuple)):
value = json.dumps(value).replace(" ", "")
dotenv.set_key(".env", key_to_set=name, value_to_set=value, quote_mode="")
|
python
|
"""Library for Byte-pair-encoding (BPE) tokenization.
Authors
* Abdelwahab Heba 2020
* Loren Lugosch 2020
"""
import os.path
import torch
import logging
import csv
import json
import sentencepiece as spm
from speechbrain.dataio.dataio import merge_char
from speechbrain.utils import edit_distance
import speechbrain as sb
logger = logging.getLogger(__name__)
class SentencePiece:
"""BPE class call the SentencePiece unsupervised text tokenizer from Google.
Reference: https://github.com/google/sentencepiece
SentencePiece lib is an unsupervised text tokenizer and detokenizer.
It implements subword units like Byte-pair-encoding (BPE),
Unigram language model and char/word tokenizer.
Arguments
---------
model_dir : str
The directory where the model will be saved (or already stored).
vocab_size : int, None, optional
Vocab size for the chosen tokenizer type (BPE, Unigram).
The vocab_size is optional for char, and mandatory for BPE & unigram
tokenization.
annotation_train : str
Path of the annotation file which is used to learn the tokenizer. It
can be in JSON or csv format.
annotation_read : str
The data entry which contains the word sequence in the annotation file.
model_type : str
(bpe, char, unigram).
If "bpe", train unsupervised tokenization of piece of words. see:
https://www.aclweb.org/anthology/P16-1162/
If "word" take the vocabulary from the input text.
If "unigram" do piece of word tokenization using unigram language
model, see: https://arxiv.org/abs/1804.10959
char_format_input : bool
Whether the read entry contains characters format input.
(default: False)
(e.g., a p p l e _ i s _ g o o d)
character_coverage : int
Amount of characters covered by the model, good defaults
are: 0.9995 for languages with a rich character set like Japanese or
Chinese and 1.0 for other languages with small character set.
(default: 1.0)
user_defined_symbols : string
String contained a list of symbols separated by a comma.
User-defined symbols are handled as one piece in any context.
(default: None)
max_sentencepiece_length : int
Maximum number of characters for the tokens. (default: 10)
bos_id : int
If -1 the bos_id = unk_id = 0. otherwise, bos_id = int. (default: -1)
eos_id : int
If -1 the bos_id = unk_id = 0. otherwise, bos_id = int. (default: -1)
split_by_whitespace : bool
If False, allow the sentencepiece to extract piece crossing multiple
words. This feature is important for : Chinese/Japanese/Korean.
(default: True)
num_sequences : int
If not none, use at most this many sequences to train the tokenizer
(for large datasets). (default: None)
annotation_list_to_check : list,
List of the annotation file which is used for checking the accuracy of
recovering words from the tokenizer.
annotation_format : str
The format of the annotation file. JSON or csv are the formats supported.
Example
-------
>>> import torch
>>> dict_int2lab = {1: "HELLO", 2: "MORNING"}
>>> model_dir = "tests/unittests/tokenizer_data/"
>>> # Example with csv
>>> annotation_train = "tests/unittests/tokenizer_data/dev-clean.csv"
>>> annotation_read = "wrd"
>>> model_type = "bpe"
>>> bpe = SentencePiece(model_dir,100, annotation_train, annotation_read,
... model_type)
>>> batch_seq = torch.Tensor([[1, 2, 2, 1],[1, 2, 1, 0]])
>>> batch_lens = torch.Tensor([1.0, 0.75])
>>> encoded_seq_ids, encoded_seq_pieces = bpe(
... batch_seq, batch_lens, dict_int2lab, task="encode"
... )
>>> # Example using JSON
>>> annotation_train = "tests/unittests/tokenizer_data/dev-clean.json"
>>> annotation_read = "wrd"
>>> bpe = SentencePiece(model_dir,100, annotation_train, annotation_read,
... model_type, annotation_format = 'json')
>>> encoded_seq_ids, encoded_seq_pieces = bpe(
... batch_seq, batch_lens, dict_int2lab, task="encode"
... )
"""
def __init__(
self,
model_dir,
vocab_size,
annotation_train=None,
annotation_read=None,
model_type="unigram",
char_format_input=False,
character_coverage=1.0,
user_defined_symbols=None,
max_sentencepiece_length=10,
bos_id=-1,
eos_id=-1,
pad_id=-1,
unk_id=0,
split_by_whitespace=True,
num_sequences=None,
annotation_list_to_check=None,
annotation_format="csv",
):
if model_type not in ["unigram", "bpe", "char"]:
raise ValueError("model_type must be one of : [unigram, bpe, char]")
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
if not isinstance(vocab_size, int):
raise ValueError("vocab_size must be integer.")
self.annotation_train = annotation_train
self.annotation_read = annotation_read
self.annotation_format = annotation_format
if self.annotation_train is not None:
ext = os.path.splitext(self.annotation_train)[1]
self.text_file = self.annotation_train.replace(ext, ".txt")
self.prefix_model_file = os.path.join(
model_dir, str(vocab_size) + "_" + model_type
)
self.vocab_size = str(vocab_size)
self.model_type = model_type
self.char_format_input = char_format_input
self.character_coverage = str(character_coverage)
self.max_sentencepiece_length = str(max_sentencepiece_length)
self.bos_id = str(bos_id)
self.eos_id = str(eos_id)
self.pad_id = str(pad_id)
self.unk_id = str(unk_id)
self.num_sequences = num_sequences
self.split_by_whitespace = split_by_whitespace
self.user_defined_symbols = user_defined_symbols
if not os.path.isfile(self.prefix_model_file + ".model"):
logger.info("Train tokenizer with type:" + self.model_type)
if not os.path.isfile(self.text_file):
try:
if sb.utils.distributed.if_main_process():
if annotation_format == "csv":
self._csv2text()
elif annotation_format == "json":
self._json2text()
else:
raise ValueError(
"Annotation format not supported. Supported formats are csv and json. Got "
+ annotation_format
)
finally:
sb.utils.distributed.ddp_barrier()
try:
if sb.utils.distributed.if_main_process():
self._train_BPE()
finally:
sb.utils.distributed.ddp_barrier()
else:
logger.info("Tokenizer is already trained.")
logger.info("==== Loading Tokenizer ===")
logger.info("Tokenizer path: " + self.prefix_model_file + ".model")
logger.info("Tokenizer vocab_size: " + str(self.vocab_size))
logger.info("Tokenizer type: " + self.model_type)
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.prefix_model_file + ".model")
try:
if sb.utils.distributed.if_main_process():
if annotation_list_to_check is not None:
self._check_coverage_from_bpe(annotation_list_to_check)
finally:
sb.utils.distributed.ddp_barrier()
def _csv2text(self):
"""Read CSV file and convert specific data entries into text file.
"""
if not os.path.isfile(os.path.abspath(self.annotation_train)):
raise ValueError(
self.annotation_train
+ " is not a file. please provide annotation file for training."
)
logger.info(
"Extract "
+ self.annotation_read
+ " sequences from:"
+ self.annotation_train
)
annotation_file = open(self.annotation_train, "r")
reader = csv.reader(annotation_file)
headers = next(reader, None)
if self.annotation_read not in headers:
raise ValueError(
self.annotation_read + " must exist in:" + self.annotation_train
)
index_label = headers.index(self.annotation_read)
text_file = open(self.text_file, "w+")
row_idx = 0
for row in reader:
if self.num_sequences is not None and row_idx > self.num_sequences:
print(
"Using %d sequences to train the tokenizer."
% self.num_sequences
)
break
row_idx += 1
sent = row[index_label]
if self.char_format_input:
(sent,) = merge_char([sent.split()])
sent = " ".join(sent)
text_file.write(sent + "\n")
text_file.close()
annotation_file.close()
logger.info("Text file created at: " + self.text_file)
def _json2text(self):
"""Read JSON file and convert specific data entries into text file.
"""
if not os.path.isfile(os.path.abspath(self.annotation_train)):
raise ValueError(
self.annotation_train
+ " is not a file. please provide annotation file for training."
)
logger.info(
"Extract "
+ self.annotation_read
+ " sequences from:"
+ self.annotation_train
)
# Read JSON
with open(self.annotation_train, "r") as f:
out_json = json.load(f)
# Save text file
text_file = open(self.text_file, "w+")
row_idx = 0
for snt_id in out_json.keys():
if self.num_sequences is not None and row_idx > self.num_sequences:
print(
"Using %d sequences to train the tokenizer."
% self.num_sequences
)
break
row_idx += 1
sent = out_json[snt_id][self.annotation_read]
if self.char_format_input:
(sent,) = merge_char([sent.split()])
sent = " ".join(sent)
text_file.write(sent + "\n")
text_file.close()
logger.info("Text file created at: " + self.text_file)
def _train_BPE(self):
"""Train tokenizer with unsupervised techniques (BPE, Unigram) using
SentencePiece Library. If you use "char" mode, the SentencePiece
creates a char dict so the vocab_size attribute is not needed.
"""
query = (
"--input="
+ self.text_file
+ " --model_prefix="
+ self.prefix_model_file
+ " --model_type="
+ self.model_type
+ " --bos_id="
+ self.bos_id
+ " --eos_id="
+ self.eos_id
+ " --pad_id="
+ self.pad_id
+ " --unk_id="
+ self.unk_id
+ " --max_sentencepiece_length="
+ self.max_sentencepiece_length
+ " --character_coverage="
+ self.character_coverage
)
if self.model_type not in ["char"]:
# include vocab_size
query += " --vocab_size=" + str(self.vocab_size)
if self.user_defined_symbols is not None:
query += " --user_defined_symbols=" + self.user_defined_symbols
if not self.split_by_whitespace:
query += " --split_by_whitespace=false"
# Train tokenizer
spm.SentencePieceTrainer.train(query)
def _check_coverage_from_bpe(self, list_annotation_files=[]):
"""Logging the accuracy of the BPE model to recover words from the training text.
Arguments
---------
annotation_list_to_check : list,
List of the annotation file which is used for checking the accuracy of recovering words from the tokenizer.
"""
for annotation_file in list_annotation_files:
if os.path.isfile(os.path.abspath(annotation_file)):
logger.info(
"==== Accuracy checking for recovering text from tokenizer ==="
)
# csv reading
if self.annotation_format == "csv":
fannotation_file = open(annotation_file, "r")
reader = csv.reader(fannotation_file)
headers = next(reader, None)
if self.annotation_read not in headers:
raise ValueError(
self.annotation_read
+ " must exist in:"
+ annotation_file
)
index_label = headers.index(self.annotation_read)
# json reading
else:
with open(self.annotation_train, "r") as f:
reader = json.load(f)
index_label = self.annotation_read
wrong_recover_list = []
for row in reader:
if self.annotation_format == "csv":
row = row[index_label]
else:
row = reader[row][index_label]
if self.char_format_input:
(row,) = merge_char([row.split()])
row = " ".join(row)
row = row.split("\n")[0]
encoded_id = self.sp.encode_as_ids(row)
decode_text = self.sp.decode_ids(encoded_id)
(details,) = edit_distance.wer_details_for_batch(
["utt1"],
[row.split(" ")],
[decode_text.split(" ")],
compute_alignments=True,
)
if details["WER"] > 0:
for align in details["alignment"]:
if align[0] != "=" and align[1] is not None:
if align[1] not in wrong_recover_list:
wrong_recover_list.append(align[1])
if self.annotation_format == "csv":
fannotation_file.close()
logger.info("recover words from: " + annotation_file)
if len(wrong_recover_list) > 0:
logger.warn(
"Wrong recover words: " + str(len(wrong_recover_list))
)
logger.warn(
"Tokenizer vocab size: " + str(self.sp.vocab_size())
)
logger.warn(
"accuracy recovering words: "
+ str(
1
- float(len(wrong_recover_list))
/ self.sp.vocab_size()
)
)
else:
logger.info("Wrong recover words: 0")
logger.warning("accuracy recovering words: " + str(1.0))
else:
logger.info(
"No accuracy recover checking for" + annotation_file
)
def __call__(
self, batch, batch_lens=None, ind2lab=None, task="encode",
):
"""This __call__ function implements the tokenizer encoder and decoder
(restoring the string of word) for BPE, Regularized BPE (with unigram),
and char (speechbrain/nnet/RNN.py).
Arguments
----------
batch : tensor.IntTensor or list
List if ( batch_lens = None and task = "decode_from_list")
Contains the original labels. Shape: [batch_size, max_length]
batch_lens : tensor.LongTensor
Containing the relative length of each label sequences. Must be 1D
tensor of shape: [batch_size]. (default: None)
ind2lab : dict
Dictionary which maps the index from label sequences
(batch tensor) to string label.
task : str
("encode", "decode", "decode_from_list)
"encode": convert the batch tensor into sequence of tokens.
the output contain a list of (tokens_seq, tokens_lens)
"decode": convert a tensor of tokens to a list of word sequences.
"decode_from_list": convert a list of token sequences to a list
of word sequences.
"""
if task == "encode" and ind2lab is None:
raise ValueError("Tokenizer encoder must have the ind2lab function")
if task == "encode":
# Convert list of words/chars to bpe ids
bpe = []
max_bpe_len = 0
batch_lens = (batch_lens * batch.shape[1]).int()
for i, utt_seq in enumerate(batch):
tokens = [
ind2lab[int(index)] for index in utt_seq[: batch_lens[i]]
]
if self.char_format_input:
(words_list,) = merge_char([tokens])
sent = " ".join(words_list)
else:
sent = " ".join(tokens)
bpe_encode = self.sp.encode_as_ids(sent)
bpe.append(bpe_encode)
# save the longest bpe sequence
# it help to compute the relative length of each utterance
if len(bpe_encode) > max_bpe_len:
max_bpe_len = len(bpe_encode)
# Create bpe tensor
bpe_tensor = torch.zeros(
(batch.shape[0], max_bpe_len), device=batch.device
)
bpe_lens = torch.zeros((batch.shape[0]), device=batch.device)
for i, bpe_utt in enumerate(bpe):
bpe_tensor[i, : len(bpe_utt)] = torch.Tensor(bpe_utt)
bpe_lens[i] = len(bpe_utt) / max_bpe_len
return bpe_tensor, bpe_lens
elif task == "decode_from_list":
# From list of hyps (not padded outputs)
# do decoding
return [self.sp.decode_ids(utt_seq).split(" ") for utt_seq in batch]
elif task == "decode":
# From a batch tensor and a length tensor
# find the absolute batch lengths and do decoding
batch_lens = (batch_lens * batch.shape[1]).int()
return [
self.sp.decode_ids(
utt_seq[: batch_lens[i]].int().tolist()
).split(" ")
for i, utt_seq in enumerate(batch)
]
|
python
|
from ExceptionHandler import ExceptionHandler
class InputController:
def __init__(self, inputReader, exceptionHandler):
self.InputReader = inputReader
self.ExceptionHandler = exceptionHandler
def pollUserInput(self):
return self.ExceptionHandler.executeFunc()
|
python
|
import math
def factors(n):
results = set()
for i in range(1, int(math.sqrt(n)) + 1):
if n % i == 0:
results.add(i)
results.add(int(n/i))
return results
x = 0
i = 0
while True:
x += i
if len(factors(x)) > 500:
print(x)
i += 1
|
python
|
"""
MesoNet
Authors: Brandon Forys and Dongsheng Xiao, Murphy Lab
https://github.com/bf777/MesoNet
Licensed under the Creative Commons Attribution 4.0 International License (see LICENSE for details)
The method "vxm_data_generator" is adapted from VoxelMorph:
Balakrishnan, G., Zhao, A., Sabuncu, M. R., Guttag, J., & Dalca, A. V. (2019). VoxelMorph: A Learning Framework for
Deformable Medical Image Registration. IEEE Transactions on Medical Imaging, 38(8), 1788–1800.
https://doi.org/10.1109/TMI.2019.2897538
VoxelMorph is distributed under the Apache License 2.0.
"""
from mesonet.mask_functions import *
import voxelmorph as vxm
from skimage.color import rgb2gray
def vxm_data_generator(x_data, template, batch_size=1):
"""
Generator that takes in data of size [N, H, W], and yields data for
our custom vxm model. Note that we need to provide numpy data for each
input, and each output.
inputs: moving [bs, H, W, 1], fixed image [bs, H, W, 1]
outputs: moved image [bs, H, W, 1], zero-gradient [bs, H, W, 2]
"""
# preliminary sizing
if batch_size == 1:
x_data = rgb2gray(x_data)
template = rgb2gray(template)
x_data = np.expand_dims(x_data, axis=0)
template = np.expand_dims(template, axis=0)
vol_shape = x_data.shape[1:] # extract data shape
ndims = len(vol_shape)
# prepare a zero array the size of the deformation
zero_phi = np.zeros([batch_size, *vol_shape, ndims])
while True:
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
idx1 = np.random.randint(0, template.shape[0], size=batch_size)
moving_images = template[idx1, ..., np.newaxis]
idx2 = np.random.randint(0, x_data.shape[0], size=batch_size)
fixed_images = x_data[idx2, ..., np.newaxis]
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
# NOTE: we don't currently use these output images in our analyses;
# the inputs are put directly into vxm_model.predict().
outputs = [fixed_images, zero_phi]
yield inputs, outputs
def init_vxm_model(img_path, model_path):
"""
Initializes a VoxelMorph model to be applied.
:param img_path: (required) The path to the image to be aligned using VoxelMorph.
:param model_path: (required) The path to the VoxelMorph model to be used.
:return:
"""
# configure unet features
nb_features = [
[32, 32, 32, 32], # encoder features
[32, 32, 32, 32, 32, 16], # decoder features
]
# Since our input is a 2D image, we can take the shape from the first two dimensions in .shape
inshape = img_path.shape[0:2]
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
losses = [vxm.losses.MSE().loss, vxm.losses.Grad("l2").loss]
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer="Adam", loss=losses, loss_weights=loss_weights)
vxm_model.load_weights(model_path)
return vxm_model
def vxm_transform(x_data, flow_path):
"""
Carried out a VoxelMorph transformation.
:param x_data: (required) The image data to be transformed.
:param flow_path: (required) If we already have a deformation field that we want
to apply to all data, use the deformation field specified at this path.
:return:
"""
# If we already have a deformation field that we want to apply to all data,
# use this deformation field instead of computing a new one.
# preliminary sizing
flow_data = np.load(flow_path)
x_data = rgb2gray(x_data)
x_data = np.expand_dims(x_data, axis=0)
x_data = x_data[..., np.newaxis]
vol_size = x_data.shape[1:-1]
results = vxm.networks.Transform(
vol_size, interp_method="linear", nb_feats=x_data.shape[-1]
).predict([x_data, flow_data])
output_img = results[0, :, :, 0]
return output_img
def voxelmorph_align(model_path, img_path, template, exist_transform, flow_path):
"""
Carries out a VoxelMorph alignment procedure, and returns the output image and corresponding flow field.
:param model_path: (required) The path to a VoxelMorph model to use. By default, this is in the MesoNet repository >
mesonet/models/voxelmorph.
:param img_path: (required) The path to an image to be aligned using VoxelMorph.
:param template: (required) The path to a VoxelMorph template image to which the input image will be aligned,
creating the transformation to be applied to the output image.
:param exist_transform: (required) If True, uses an existing VoxelMorph flow field (the .npy file saved alongside
each VoxelMorph transformed image) to carry out the transformations (instead of computing a new flow field).
:param flow_path: (required) The path to the directory to which the VoxelMorph flow field from the current
transformation should be saved.
:return:
"""
if not exist_transform:
vxm_model = init_vxm_model(img_path, model_path)
val_generator = vxm_data_generator(img_path, template)
val_input, _ = next(val_generator)
# Makes predictions on each image
results = vxm_model.predict(val_input)
# Saves output mask
output_img = [img[0, :, :, 0] for img in results][0]
# Saves flow image to flow
flow_img = results[1]
else:
print("using existing transform")
output_img = vxm_transform(img_path, flow_path)
# Saves flow image to flow
flow_img = ""
print("Results saved!")
return output_img, flow_img
|
python
|
from py.test import raises
from pypy.conftest import gettestobjspace
class AppTestUnicodeData:
def setup_class(cls):
space = gettestobjspace(usemodules=('unicodedata',))
cls.space = space
def test_hangul_syllables(self):
import unicodedata
# Test all leading, vowel and trailing jamo
# but not every combination of them.
for code, name in ((0xAC00, 'HANGUL SYLLABLE GA'),
(0xAE69, 'HANGUL SYLLABLE GGAEG'),
(0xB0D2, 'HANGUL SYLLABLE NYAGG'),
(0xB33B, 'HANGUL SYLLABLE DYAEGS'),
(0xB5A4, 'HANGUL SYLLABLE DDEON'),
(0xB80D, 'HANGUL SYLLABLE RENJ'),
(0xBA76, 'HANGUL SYLLABLE MYEONH'),
(0xBCDF, 'HANGUL SYLLABLE BYED'),
(0xBF48, 'HANGUL SYLLABLE BBOL'),
(0xC1B1, 'HANGUL SYLLABLE SWALG'),
(0xC41A, 'HANGUL SYLLABLE SSWAELM'),
(0xC683, 'HANGUL SYLLABLE OELB'),
(0xC8EC, 'HANGUL SYLLABLE JYOLS'),
(0xCB55, 'HANGUL SYLLABLE JJULT'),
(0xCDBE, 'HANGUL SYLLABLE CWEOLP'),
(0xD027, 'HANGUL SYLLABLE KWELH'),
(0xD290, 'HANGUL SYLLABLE TWIM'),
(0xD4F9, 'HANGUL SYLLABLE PYUB'),
(0xD762, 'HANGUL SYLLABLE HEUBS'),
(0xAE27, 'HANGUL SYLLABLE GYIS'),
(0xB090, 'HANGUL SYLLABLE GGISS'),
(0xB0AD, 'HANGUL SYLLABLE NANG'),
(0xB316, 'HANGUL SYLLABLE DAEJ'),
(0xB57F, 'HANGUL SYLLABLE DDYAC'),
(0xB7E8, 'HANGUL SYLLABLE RYAEK'),
(0xBA51, 'HANGUL SYLLABLE MEOT'),
(0xBCBA, 'HANGUL SYLLABLE BEP'),
(0xBF23, 'HANGUL SYLLABLE BBYEOH'),
(0xD7A3, 'HANGUL SYLLABLE HIH')):
assert unicodedata.name(unichr(code)) == name
assert unicodedata.lookup(name) == unichr(code)
# Test outside the range
raises(ValueError, unicodedata.name, unichr(0xAC00 - 1))
raises(ValueError, unicodedata.name, unichr(0xD7A3 + 1))
def test_cjk(self):
import sys
if sys.maxunicode < 0x10ffff:
skip("requires a 'wide' python build.")
import unicodedata
cases = ((0x3400, 0x4DB5),
(0x4E00, 0x9FA5))
if unicodedata.unidata_version >= "4.1":
cases = ((0x3400, 0x4DB5),
(0x4E00, 0x9FBB),
(0x20000, 0x2A6D6))
for first, last in cases:
# Test at and inside the boundary
for i in (first, first + 1, last - 1, last):
charname = 'CJK UNIFIED IDEOGRAPH-%X'%i
assert unicodedata.name(unichr(i)) == charname
assert unicodedata.lookup(charname) == unichr(i)
# Test outside the boundary
for i in first - 1, last + 1:
charname = 'CJK UNIFIED IDEOGRAPH-%X'%i
try:
unicodedata.name(unichr(i))
except ValueError:
pass
raises(KeyError, unicodedata.lookup, charname)
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords
from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper
class FlattenMapper(AtenToMindSporeMapper):
"""Flatten mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "P.Reshape"
@staticmethod
def _convert_params(**kwargs):
return dict()
@staticmethod
def _convert_trained_weights(**kwargs):
return dict()
@staticmethod
def _generate_snippet_template(**kwargs):
template, exchange_msg, outputs_list, outputs_mapping = AtenToMindSporeMapper._generate_snippet_template(
**kwargs)
raw_params = kwargs.get("raw_params")
if not raw_params:
return template, exchange_msg, outputs_list, outputs_mapping
op = kwargs["operation"]
weights = kwargs["weights"]
output_shape = raw_params.get("output_shape")
args = {"shape": output_shape}
variable_slot = "var_0"
target_shape = f"self.{{{variable_slot}}}_shape = tuple({{shape}})"
init_template = f"self.{{{variable_slot}}} = {op}()"
construct_template = f"opt_{{{variable_slot}}} = self.{{{variable_slot}}}" \
f"({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}, " \
f"self.{{{variable_slot}}}_shape)"
template = {
variable_slot: {
TemplateKeywords.INIT.value: [init_template, target_shape],
TemplateKeywords.CONSTRUCT.value: [construct_template]
}
}
exchange_msg = AtenToMindSporeMapper._generate_exchange_msg(variable_slot=variable_slot, op=op, args=args,
weights=weights)
return template, exchange_msg, outputs_list, outputs_mapping
|
python
|
class A:
def foo<caret><error descr="Method must have a first parameter, usually called 'self'">()</error>: # Add 'self'
pass
|
python
|
import copy
import math
import random
import typing as t
import hypemaths as hm
from ..exceptions import (
InvalidMatrixError,
MatrixDimensionError,
MatrixNotSquare,
)
from ..mixins import CopyMixin
class Matrix(CopyMixin):
def __init__(
self,
matrix: t.Union[int, float, list] = None
) -> None:
"""
Parameters
----------
matrix : t.Union[int, float, list]
This is the nested 2D lists which will be converted into an efficient `Matrix` object capable of several
calculations and features. Defaults to `None`.
"""
if not matrix:
raise ValueError("You need to pass the 2D for the matrix object!")
else:
self.matrix = self._cleaned_matrix(matrix)
@property
def rows(self) -> int:
"""
Returns
-------
int
The number of rows in the 2D matrix created.
"""
return len(self.matrix)
@property
def cols(self) -> int:
"""
Returns
-------
int
The number of the columns in the 2D matrix created.
"""
return len(self.matrix[0])
@property
def dims(self) -> tuple:
"""
Returns
-------
tuple
The tuple containing the shape or the rows and columns in the matrix created.
"""
return tuple(self._get_mat_dimension(self.matrix))
@property
def size(self) -> int:
"""
Returns
-------
int
The integer which is the total number of items in the matrix
"""
matrix = Matrix(self.matrix)
return matrix.cols * matrix.rows
def __hash__(self):
return hash(self.matrix)
def __repr__(self) -> str:
return "{}([{}])".format(
self.__class__.__name__,
",\n ".join([str(x) for x in self.matrix])
)
def __eq__(self, other: "Matrix") -> bool:
if not isinstance(other, Matrix):
raise TypeError(
f"Equality comparison with Matrix can only be performed with another Matrix, got {type(other)}"
)
return self.matrix == other.matrix
def __getitem__(self, index: t.Union[int, tuple]) -> t.Union[int, float, list]:
if isinstance(index, int):
return self.matrix[index]
else:
return self.matrix[index[0]][index[1]]
def __setitem__(self, index: t.Union[int, tuple], value: t.Union[int, float]) -> None:
if isinstance(value, (int, float)):
if isinstance(index, int):
self.matrix[index] = value
else:
self.matrix[index[0]][index[1]] = value
else:
raise TypeError(
f"All values must be integers or floats, but value[{value}] is {type(value)}."
)
def __add__(self, other: "Matrix") -> "Matrix":
cls = self.__class__
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be added with other matrix. Not {type(other)}")
if not (self.rows, self.cols) == (other.rows, other.cols):
raise MatrixDimensionError("These matrices cannot be added due to wrong dimensions.")
matrix = [[self[row][cols] + other[row][cols] for cols in range(self.cols)] for row in range(self.rows)]
return cls(matrix)
def __sub__(self, other: "Matrix") -> "Matrix":
cls = self.__class__
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be subtracted with other matrix. Not {type(other)}")
if not (self.rows, self.cols) == (other.rows, other.cols):
raise MatrixDimensionError("These matrices cannot be subtracted due to wrong dimensions.")
matrix = [[self[row][cols] - other[row][cols] for cols in range(self.cols)] for row in range(self.rows)]
return cls(matrix)
def __mul__(self, other: t.Union["Matrix"]) -> "Matrix":
cls = self.__class__
if isinstance(other, (int, float)):
matrix = [[element * other for element in row] for row in self]
return cls(matrix)
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be multiplied with other matrix. Not {type(other)}")
if self.cols != other.rows:
raise MatrixDimensionError("These matrices cannot be multiplied due to wrong dimensions.")
matrix = [[
sum(a * b for a, b in zip(self_row, other_col)) for other_col in zip(*other)] for self_row in self
]
return cls(matrix)
def __truediv__(self, other: "Matrix") -> "Matrix":
cls = self.__class__
if isinstance(other, (int, float)):
matrix = [[element / other for element in row] for row in self]
return cls(matrix)
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be divided with other matrix. Not {type(other)}")
if self.cols != other.rows:
raise MatrixDimensionError("These matrices cannot be divided due to wrong dimensions.")
matrix = [[
sum(a / b for a, b in zip(self_row, other_col)) for other_col in zip(*other)] for self_row in self
]
return cls(matrix)
def __radd__(self, other: "Matrix") -> "Matrix":
return self.__add__(other)
def __rmul__(self, other: "Matrix") -> "Matrix":
return self.__mul__(other)
def __matmul__(self, other: "Matrix") -> "Matrix":
return self.__mul__(other)
def __abs__(self) -> "Matrix":
cls = self.__class__
matrix = [
[abs(self[row][cols]) for cols in range(self.cols)]
for row in range(self.rows)
]
return cls(matrix)
def __round__(self, n: t.Optional[int] = None) -> "Matrix":
cls = self.__class__
matrix = [
[round(self[row][cols], ndigits=n) for cols in range(self.cols)] for row in range(self.rows)
]
return cls(matrix)
def __int__(self) -> "Matrix":
cls = self.__class__
matrix = [
[int(self[row][cols]) for cols in range(self.cols)]
for row in range(self.rows)
]
return cls(matrix)
def __float__(self) -> "Matrix":
cls = self.__class__
matrix = [
[float(self[row][cols]) for cols in range(self.cols)]
for row in range(self.rows)
]
return cls(matrix)
@classmethod
def get_filled_matrix(cls, dims: tuple, fill: t.Union[int, float]) -> "Matrix":
"""
Create a Matrix object with dimension specified containing fill value specified.
Parameters
----------
dims : tuple
This is the dimensions of the fill matrix, created when the `matrix` parameter is not specified and only
this value and the fill value is provided. Defaults to `None`.
fill : t.Union[int, float]
This is the fill value, which works with the `dims` parameter to create a filled matrix with the given
value. Defaults to `None`.
Returns
-------
Matrix
Returns filled matrix object with the dimensions and fill value passed.
Examples
--------
Create a matrix of dimensions : (2, 2) with the fill value of 5.
>>> matrix = Matrix.get_filled_matrix((2, 2), 5)
>>> matrix
Matrix([[5, 5], [5, 5]])
Create a matrix of dimensions : (4, 3) with the fill value 9
>>> matrix = Matrix.get_filled_matrix((4, 3), 9)
>>> matrix
Matrix([[9, 9, 9], [9, 9, 9], [9, 9, 9], [9, 9, 9]])
"""
return cls(cls._create_filled_matrix(dims, fill))
@classmethod
def get_randomized_matrix(
cls, dims: tuple, min_value: int, max_value: int, seed: int = None, round_digits: t.Optional[int] = 2
) -> "Matrix":
"""
Generate a random matrix object with the specified parameters.
Parameters
----------
dims: tuple
The dimensions for the matrix to be generated.
min_value: int
The minimum value for random number generation
max_value: int
The maximum value for random number generation
seed: int
The seed for random numer generation which can be recreated later.
round_digits: int
The number of digits to be in the number after decimal. Set the value as number for integer values.
Returns
-------
Matrix
The random matrix generated from the function.
Examples
--------
Generate a matrix with random integer values
>>> matrix = Matrix.get_randomized_matrix((2, 2), 1, 10, round_digits=None)
>>> matrix
Matrix([[4, 9], [9, 2]])
Generate a reproducible matrix with seed of 7
>>> matrix = Matrix.get_randomized_matrix((2, 2), 1, 10, seed=7)
>>> matrix
Matrix([[3.91, 2.36], [6.86, 1.65]])
Generate a float matrix with 5 digits after decimal
>>> matrix = Matrix.get_randomized_matrix((2, 2), 1, 10, round_digits=5)
>>> matrix
Matrix([[5.82294, 4.2912], [1.52199, 5.56692]])
"""
def is_float_or_int(value: t.Any) -> bool:
if not isinstance(value, (int, float)):
raise TypeError(
f"The values or value must be integer or float, but the given fill value is {type(value)}."
)
return True
if len(dims) != 2:
raise ValueError("You must pass the 2 DIMENSIONS for the Matrix fill.")
if is_float_or_int(min_value) and is_float_or_int(max_value):
if seed is not None:
random.seed(seed)
if not round_digits:
matrix = [
[round(random.uniform(min_value, max_value)) for _ in range(dims[1])] for _ in range(dims[0])
]
return cls(matrix)
else:
matrix = [
[
round(random.uniform(min_value, max_value), ndigits=round_digits) for _ in range(dims[1])
] for _ in range(dims[0])
]
return cls(matrix)
@staticmethod
def _cleaned_matrix(matrix: list) -> list:
"""
Checks if a matrix passed is valid or not and returns the processed and cleaned matrix.
Parameters
----------
matrix : list
The matrix passed to this function for processing, validation and cleaning.
Returns
-------
list
The list consisting the validated and cleaned matrix after passing the checks.
Raises
------
TypeError
If the matrix contains any datatype other than `int` or `float`.
InvalidMatrixError
If the matrix has invalid size or cannot be validated.
"""
def contains_sublist(mat: list) -> bool:
"""
Parameters
----------
mat: list
The matrix passed for checking if it contains sublist.
Returns
-------
bool
If the matrix passed contains sublist.
"""
return all(isinstance(element, list) for element in mat)
def value_check(mat: list) -> bool:
"""
Parameters
----------
mat: list
The matrix passed for validating the datatypes in it.
Returns
-------
bool
If the matrix contains any datatypes other than `int` or `float`.
Raises
------
TypeError
Raised if the matrix consists of value which is not a `int` or `float`.
"""
for row, row_values in enumerate(mat):
for col, value in enumerate(row_values):
if not isinstance(value, (int, float)):
raise TypeError(
f"All values must be integers or floats, but value[{row}][{col}] is {type(value)}"
)
return True
if isinstance(matrix, (int, float)):
return [[matrix]]
matrix = [matrix] if not contains_sublist(matrix) else matrix
if value_check(matrix):
len_set = set([len(x) for x in matrix])
if len(len_set) > 1 and value_check(matrix):
raise InvalidMatrixError(
"Matrix sizes are invalid! Must have same number of element in each sub list."
)
return matrix
@staticmethod
def _create_filled_matrix(dims: tuple, fill: t.Union[int, float] = None) -> list:
"""
Parameters
----------
dims: tuple
The dimensions for the matrix to be initialized. Only 2 dimensions (X, Y) are allowed.
fill: t.Union[int, float]
The value to be filled across the matrix of the specified dimension.
Returns
-------
list
The 2D python list, to be converted into `Matrix` object.
Raises
------
ValueError
If the number of dimensions don't equal to 2.
TypeError
If the fill value isn't either `int` or `float`.
"""
if len(dims) != 2:
raise ValueError(
"You must pass the 2 DIMENSIONS for the Matrix fill.")
if not fill:
fill = 0
if not isinstance(fill, (int, float)):
raise TypeError(
f"The fill value must be integer or float, but the given fill value is {type(fill)}."
)
matrix_structure = []
first_row = [fill] * dims[1]
for _ in range(dims[0]):
matrix_structure.append(first_row.copy())
return matrix_structure
def _get_mat_dimension(self, matrix: list) -> list:
"""
Parameters
----------
matrix : list
The matrix whose dimensions are to be figured out.
Returns
-------
list
A tuple containing the dimensions of the matrix passed.
"""
if not isinstance(matrix, list):
return []
return [len(matrix)] + self._get_mat_dimension(matrix[0])
def clone(self) -> "Matrix":
"""
Returns the copy of the matrix.
Returns
-------
Matrix
The copy of the present matrix.
Examples
--------
Getting the copy instead of directly assigning, when you want to modify the matrix without disturbing the first
one.
>>> matrix = Matrix([[1, 2], [3, 4]])
>>> matrix.clone()
Matrix([[1, 2], [3, 4]])
"""
return copy.deepcopy(self)
def trace(self) -> t.Union[int, float]:
"""
Returns the sum of the diagonals of the matrix
Returns
-------
t.Union[int, float]
The sum of the diagonals of the current `Matrix`
Raises
------
MatrixNotSquare
If the number of columns and rows are not equal in the `Matrix`.
Examples
--------
Getting the sum of the rows of the specified matrix.
>>> matrix = Matrix([[5, 5], [3, 4]])
>>> matrix.trace()
9
"""
if self.rows != self.cols:
raise MatrixNotSquare("Cannot retrieve the sum of diagonals as the row and column count are not same.")
total = 0
for i in range(self.rows):
total += self[i, i]
return total
def transpose(self) -> "Matrix":
"""
Transposes the matrix.
This converts the matrix elements order, by converting the rows into columns and vice versa.
Returns
-------
Matrix
The transposed matrix.
Examples
--------
>>> mat = Matrix([[1, 2], [3, 4]])
>>> mat.transpose()
Matrix([[1, 3], [2, 4]])
"""
cls = self.__class__
matrix = [[self[cols][row] for cols in range(self.rows)] for row in range(self.cols)]
return cls(matrix)
def frobenius_norm(self) -> float:
"""
Calculate the frobenius norm of the matrix.
The frobenius norm is computed by taking square root of the sums the squares of each entry of the matrix.
This can be used to calculate the 2-norm of a column vector.
Returns
-------
float:
The computed frobenius norm.
"""
sum_of_squares = 0
for column in self.matrix:
for elem in column:
sum_of_squares += elem ** 2
return math.sqrt(sum_of_squares)
def determinant(self) -> float:
"""
Get the determinant of a matrix.
In linear algebra, the determinant is a scalar value that can be computed from the elements of a square
matrix and encodes certain properties of the linear transformation described by the matrix. The determinant of
a matrix ``A`` is denoted det, det ``A``, or ``|A|``.
Returns
-------
float:
The determinant of the matrix.
"""
matrix_size = len(self.matrix)
matrix_copy = self.clone()
for fd in range(matrix_size): # FD - The focus diagonal.
for i in range(fd + 1, matrix_size):
if matrix_copy[fd][fd] == 0:
matrix_copy[fd][fd] = 1.0e-18
current_row_scaler = matrix_copy[i][fd] / matrix_copy[fd][fd]
for j in range(matrix_size):
matrix_copy[i][j] = matrix_copy[i][j] - current_row_scaler * matrix_copy[fd][j]
product = 1.0
for i in range(matrix_size):
product *= matrix_copy[i][i]
return product
@classmethod
def from_vector(cls, vector: "hm.Vector") -> "Matrix":
"""
Convert a `Vector` into a `Matrix` object.
Parameters
----------
vector: Vector
The vector which is going to be converted into Matrix.
Returns
-------
Matrix
The matrix formed after conversion of vector.
Examples
--------
>>> from hypemaths import Vector
>>> vec = Vector(1, 2, 3, 4)
>>> vec
Vector([1, 2, 3, 4])
>>> Matrix.from_vector(vec)
Matrix([[1], [2], [3], [4]])
"""
matrix_list = [[value] for value in vector]
return cls(matrix_list)
def flatten(self) -> "hm.Vector":
"""
Return a flattened version of the matrix.
All elements of the matrix are placed into a single row.
Returns
-------
hm.Vector
A vector containing the elements of the matrix passed.
Examples
--------
>>> m = hm.Matrix([[1,2], [3,4]])
>>> m.flatten()
Vector([[1, 2, 3, 4]])
"""
flat_list = []
for element in self.matrix:
if type(element) is list:
# If the element is of type list, iterate through the sublist
for item in element:
flat_list.append(item)
else:
flat_list.append(element)
return hm.Vector(flat_list)
def sum(self, axis: int = None) -> t.Union[int, float, "hm.Vector", "hm.Matrix"]:
"""
Returns the sum of the entire matrix or along a specific axis
Parameters
----------
axis: {0, 1}, Optional
The sum of the matrix along which axis
Returns
-------
int, float
The sum of the elements of the matrix
Examples
--------
>>> m = Matrix([[1, 2], [4, 2], [7, 2]])
>>> m.sum()
18
>>> m.sum(0)
Matrix([[12, 6]])
>>> m.sum(1)
Matrix([[3], [6], [9]])
"""
matrix = Matrix(self.matrix)
if axis is None:
return sum(matrix.flatten())
if axis not in [-1, 0, 1, None]:
raise TypeError(f"Axis {axis} is out of bounds for array of 2nd dimension.")
if not isinstance(axis, (list, int)):
raise TypeError(f"Axis should be inteer or list indices. Got {type(axis)}")
if axis == 0:
return Matrix([float(sum(i)) for i in zip(*matrix)])
elif axis == 1:
return Matrix([[sum(i)] for i in matrix])
|
python
|
#
# Qutebrowser Config
#
from cconfig import CConfig
# Custom state full config options
cc = CConfig(config)
cc.redirect = True
# ==================== General Settings ==================================
c.hints.chars = 'dfghjklcvbnm'
c.hints.uppercase = True
c.confirm_quit = ['never']
c.content.fullscreen.window = True
c.spellcheck.languages = ["de-DE", "en-GB", "en-US"]
c.tabs.show = 'never'
c.tabs.tabs_are_windows = True
c.new_instance_open_target = 'window'
c.url.default_page = 'about:blank'
c.url.start_pages = ['about:blank']
c.zoom.default = 150
c.content.autoplay = False
c.content.mute = True
c.fonts.web.size.minimum = 14
c.editor.command = ['st', '-e', 'nvim', '-f', '{file}', '-c',
'normal{line}G{column0}l']
c.content.default_encoding = 'utf-8'
# ==================== Privacy & Security ================================
c.content.javascript.enabled = False
c.content.cookies.accept = 'never'
c.content.plugins = False
c.content.geolocation = False
c.content.pdfjs = False
c.content.webgl = False
c.content.javascript.can_access_clipboard = False
c.content.headers.referer = 'same-domain'
c.content.dns_prefetch = False
c.content.canvas_reading = True # some websites break when disabled
c.content.headers.do_not_track = False # can be used to fingerprint
c.content.webrtc_ip_handling_policy = 'disable-non-proxied-udp'
c.content.hyperlink_auditing = False
# ==================== Adblock ===========================================
c.content.blocking.enabled = True
c.content.blocking.method = 'both'
c.content.blocking.hosts.block_subdomains = True
c.content.blocking.adblock.lists = [
"https://easylist.to/easylist/easylist.txt",
"https://easylist.to/easylist/easyprivacy.txt",
"https://easylist.to/easylist/fanboy-social.txt",
"https://secure.fanboy.co.nz/fanboy-cookiemonster.txt",
"https://secure.fanboy.co.nz/fanboy-annoyance.txt",
"https://easylist-downloads.adblockplus.org/antiadblockfilters.txt",
"https://curben.gitlab.io/malware-filter/urlhaus-filter-online.txt",
"https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/legacy.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/filters.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/filters-2020.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/filters-2021.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/badware.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/privacy.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/resource-abuse.txt",
]
c.content.blocking.hosts.lists = [
"https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts",
]
# ==================== Dark Mode =========================================
c.colors.webpage.preferred_color_scheme = 'dark'
c.colors.webpage.darkmode.enabled = True
c.colors.webpage.darkmode.algorithm = 'lightness-cielab'
c.colors.webpage.darkmode.policy.images = 'always'
c.colors.webpage.darkmode.grayscale.images = 0.5
c.colors.webpage.darkmode.threshold.background = 150
c.colors.webpage.darkmode.threshold.text = 120
c.colors.webpage.bg = '#000000'
c.content.user_stylesheets = ['~/.config/qutebrowser/css/custom-dark.css']
# ==================== Downloads =========================================
c.downloads.location.directory = '~/Downloads'
c.downloads.location.prompt = True
c.downloads.location.remember = True
c.downloads.location.suggestion = 'both'
c.downloads.open_dispatcher = "xdg-open '{}'"
c.downloads.position = 'bottom'
c.downloads.prevent_mixed_content = True
c.downloads.remove_finished = -1
# ==================== Aliases ===========================================
c.aliases = {
'w': 'session-save',
'q': 'quit',
'wq': 'quit --save',
'cs': 'config-source',
'au': 'adblock-update',
'qr': 'spawn --userscript qr',
}
# ==================== Bindings ==========================================
# Javascript
config.bind('ess', 'set -p -t content.javascript.enabled true ;; reload')
config.bind('eSs', 'set -p content.javascript.enabled true ;; reload')
config.bind('esh', 'set -p -t -u *://{url:host}/* content.javascript.enabled true ;; reload')
config.bind('eSh', 'set -p -u *://{url:host}/* content.javascript.enabled true ;; reload')
config.bind('esH', 'set -p -t -u *://*.{url:host}/* content.javascript.enabled true ;; reload')
config.bind('eSH', 'set -p -u *://*.{url:host}/* content.javascript.enabled true ;; reload')
config.bind('esu', 'set -p -t -u {url} content.javascript.enabled true ;; reload')
config.bind('eSu', 'set -p -u {url} content.javascript.enabled true ;; reload')
config.unbind('d')
config.bind('dss', 'set -p -t content.javascript.enabled false ;; reload')
config.bind('dSs', 'set -p content.javascript.enabled false ;; reload')
config.bind('dsh', 'set -p -t -u *://{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dSh', 'set -p -u *://{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dsH', 'set -p -t -u *://*.{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dSH', 'set -p -u *://*.{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dsu', 'set -p -t -u {url} content.javascript.enabled false ;; reload')
config.bind('dSu', 'set -p -u {url} content.javascript.enabled false ;; reload')
config.bind('tss', 'config-cycle -p -t content.javascript.enabled ;; reload')
config.bind('tSs', 'config-cycle -p content.javascript.enabled ;; reload')
# Cookies
config.bind('ecc', 'set -p -t content.cookies.accept no-3rdparty ;; reload')
config.bind('eCc', 'set -p content.cookies.accept no-3rdparty ;; reload')
config.bind('ech', 'set -p -t -u *://{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('eCh', 'set -p -u *://{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('ecH', 'set -p -t -u *://*.{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('eCH', 'set -p -u *://*.{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('ecu', 'set -p -t -u {url} content.cookies.accept no-3rdparty ;; reload')
config.bind('eCu', 'set -p -u {url} content.cookies.accept no-3rdparty ;; reload')
config.bind('ecac', 'set -p -t content.cookies.accept all ;; reload')
config.bind('eCac', 'set -p content.cookies.accept all ;; reload')
config.bind('ecah', 'set -p -t -u *://{url:host}/* content.cookies.accept all ;; reload')
config.bind('eCah', 'set -p -u *://{url:host}/* content.cookies.accept all ;; reload')
config.bind('ecaH', 'set -p -t -u *://*.{url:host}/* content.cookies.accept all ;; reload')
config.bind('eCaH', 'set -p -u *://*.{url:host}/* content.cookies.accept all ;; reload')
config.bind('ecau', 'set -p -t -u {url} content.cookies.accept all ;; reload')
config.bind('eCau', 'set -p -u {url} content.cookies.accept all ;; reload')
config.bind('dcc', 'set -p -t content.cookies.accept never ;; reload')
config.bind('dCc', 'set -p content.cookies.accept never ;; reload')
config.bind('dch', 'set -p -t -u *://{url:host}/* content.cookies.accept never ;; reload')
config.bind('dCh', 'set -p -u *://{url:host}/* content.cookies.accept never ;; reload')
config.bind('dcH', 'set -p -t -u *://*.{url:host}/* content.cookies.accept never ;; reload')
config.bind('dCH', 'set -p -u *://*.{url:host}/* content.cookies.accept never ;; reload')
config.bind('dcu', 'set -p -t -u {url} content.cookies.accept never ;; reload')
config.bind('dCu', 'set -p -u {url} content.cookies.accept never ;; reload')
config.bind('tcc', 'config-cycle -p -t content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCc', 'config-cycle -p content.cookies.accept no-3rdparty never ;; reload')
config.bind('tch', 'config-cycle -p -t -u *://{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCh', 'config-cycle -p -u *://{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tcH', 'config-cycle -p -t -u *://*.{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCH', 'config-cycle -p -u *://*.{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tcu', 'config-cycle -p -t -u {url} content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCu', 'config-cycle -p -u {url} content.cookies.accept no-3rdparty never ;; reload')
config.bind('tcac', 'config-cycle -p -t content.cookies.accept all never ;; reload')
config.bind('tCac', 'config-cycle -p content.cookies.accept all never ;; reload')
config.bind('tcah', 'config-cycle -p -t -u *://{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tCah', 'config-cycle -p -u *://{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tcaH', 'config-cycle -p -t -u *://*.{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tCaH', 'config-cycle -p -u *://*.{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tcau', 'config-cycle -p -t -u {url} content.cookies.accept all never ;; reload')
config.bind('tCau', 'config-cycle -p -u {url} content.cookies.accept all never ;; reload')
# AdBlocker
config.bind('ebb', 'set -p -t content.blocking.enabled true ;; reload')
config.bind('eBb', 'set -p content.blocking.enabled true ;; reload')
config.bind('ebh', 'set -p -t -u *://{url:host}/* content.blocking.enabled true ;; reload')
config.bind('eBh', 'set -p -u *://{url:host}/* content.blocking.enabled true ;; reload')
config.bind('ebH', 'set -p -t -u *://*.{url:host}/* content.blocking.enabled true ;; reload')
config.bind('eBH', 'set -p -u *://*.{url:host}/* content.blocking.enabled true ;; reload')
config.bind('ebu', 'set -p -t -u {url} content.blocking.enabled true ;; reload')
config.bind('eBu', 'set -p -u {url} content.blocking.enabled true ;; reload')
config.bind('dbb', 'set -p -t content.blocking.enabled false ;; reload')
config.bind('dBb', 'set -p content.blocking.enabled false ;; reload')
config.bind('dbh', 'set -p -t -u *://{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dBh', 'set -p -u *://{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dbH', 'set -p -t -u *://*.{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dBH', 'set -p -u *://*.{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dbu', 'set -p -t -u {url} content.blocking.enabled false ;; reload')
config.bind('dBu', 'set -p -u {url} content.blocking.enabled false ;; reload')
config.bind('tbb', 'config-cycle -p -t content.blocking.enabled ;; reload')
config.bind('tBb', 'config-cycle -p content.blocking.enabled ;; reload')
config.bind('tbh', 'config-cycle -p -t -u *://{url:host}/* content.blocking.enabled ;; reload')
config.bind('tBh', 'config-cycle -p -u *://{url:host}/* content.blocking.enabled ;; reload')
config.bind('tbH', 'config-cycle -p -t -u *://*.{url:host}/* content.blocking.enabled ;; reload')
config.bind('tBH', 'config-cycle -p -u *://*.{url:host}/* content.blocking.enabled ;; reload')
config.bind('tbu', 'config-cycle -p -t -u {url} content.blocking.enabled ;; reload')
config.bind('tBu', 'config-cycle -p -u {url} content.blocking.enabled ;; reload')
# Images
config.bind('eii', 'set -p -t content.images true ;; reload')
config.bind('eIi', 'set -p content.images true ;; reload')
config.bind('eih', 'set -p -t -u *://{url:host}/* content.images true ;; reload')
config.bind('eIh', 'set -p -u *://{url:host}/* content.images true ;; reload')
config.bind('eiH', 'set -p -t -u *://*.{url:host}/* content.images true ;; reload')
config.bind('eIH', 'set -p -u *://*.{url:host}/* content.images true ;; reload')
config.bind('eiu', 'set -p -t -u {url} content.images true ;; reload')
config.bind('eIu', 'set -p -u {url} content.images true ;; reload')
config.bind('dii', 'set -p -t content.images false ;; reload')
config.bind('dIi', 'set -p content.images false ;; reload')
config.bind('dih', 'set -p -t -u *://{url:host}/* content.images false ;; reload')
config.bind('dIh', 'set -p -u *://{url:host}/* content.images false ;; reload')
config.bind('diH', 'set -p -t -u *://*.{url:host}/* content.images false ;; reload')
config.bind('dIH', 'set -p -u *://*.{url:host}/* content.images false ;; reload')
config.bind('diu', 'set -p -t -u {url} content.images false ;; reload')
config.bind('dIu', 'set -p -u {url} content.images false ;; reload')
# Plugins
config.bind('epp', 'set -p -t content.plugins true ;; reload')
config.bind('ePp', 'set -p content.plugins true ;; reload')
config.bind('eph', 'set -p -t -u *://{url:host}/* content.plugins true ;; reload')
config.bind('ePh', 'set -p -u *://{url:host}/* content.plugins true ;; reload')
config.bind('epH', 'set -p -t -u *://*.{url:host}/* content.plugins true ;; reload')
config.bind('ePH', 'set -p -u *://*.{url:host}/* content.plugins true ;; reload')
config.bind('epu', 'set -p -t -u {url} content.plugins true ;; reload')
config.bind('ePu', 'set -p -u {url} content.plugins true ;; reload')
config.bind('dpp', 'set -p -t content.plugins false ;; reload')
config.bind('dPp', 'set -p content.plugins false ;; reload')
config.bind('dph', 'set -p -t -u *://{url:host}/* content.plugins false ;; reload')
config.bind('dPh', 'set -p -u *://{url:host}/* content.plugins false ;; reload')
config.bind('dpH', 'set -p -t -u *://*.{url:host}/* content.plugins false ;; reload')
config.bind('dPH', 'set -p -u *://*.{url:host}/* content.plugins false ;; reload')
config.bind('dpu', 'set -p -t -u {url} content.plugins false ;; reload')
config.bind('dPu', 'set -p -u {url} content.plugins false ;; reload')
# Tor proxy
config.bind('et', 'set -p -t content.proxy socks://127.0.0.1:9050')
config.bind('dt', 'set -p -t content.proxy none')
config.bind('tt', 'config-cycle -p -t content.proxy none socks://127.0.0.1:9050')
# Mute
config.bind('emm', 'set -p -t content.mute true')
config.bind('eMm', 'set -p content.mute true')
config.bind('emh', 'set -p -t -u *://{url:host}/* content.mute true')
config.bind('eMh', 'set -p -u *://{url:host}/* content.mute true')
config.bind('emH', 'set -p -t -u *://*.{url:host}/* content.mute true')
config.bind('eMH', 'set -p -u *://*.{url:host}/* content.mute true')
config.bind('emu', 'set -p -t -u {url} content.mute true')
config.bind('eMu', 'set -p -u {url} content.mute true')
config.bind('dmm', 'set -p -t content.mute false')
config.bind('dMm', 'set -p content.mute false')
config.bind('dmh', 'set -p -t -u *://{url:host}/* content.mute false')
config.bind('dMh', 'set -p -u *://{url:host}/* content.mute false')
config.bind('dmH', 'set -p -t -u *://*.{url:host}/ content.mute false')
config.bind('dMH', 'set -p -u *://*.{url:host}/* content.mute false')
config.bind('dmu', 'set -p -t -u {url} content.mute false')
config.bind('dMu', 'set -p -u {url} content.mute false')
config.bind('tmm', 'config-cycle -p -t content.mute')
config.bind('tMm', 'config-cycle -p content.mute')
config.bind('tmh', 'config-cycle -p -t -u *://{url:host}/* content.mute')
config.bind('tMh', 'config-cycle -p -u *://{url:host}/* content.mute')
config.bind('tmH', 'config-cycle -p -t -u *://*.{url:host}/* content.mute')
config.bind('tMH', 'config-cycle -p -u *://*.{url:host}/* content.mute')
config.bind('tmu', 'config-cycle -p -t -u {url} content.mute')
config.bind('tMu', 'config-cycle -p -u {url} content.mute')
# Local Storage
config.bind('ell', 'set -p -t content.local_storage true ;; reload')
config.bind('eLl', 'set -p content.local_storage true ;; reload')
config.bind('elh', 'set -p -t -u *://{url:host}/* content.local_storage true ;; reload')
config.bind('eLh', 'set -p -u *://{url:host}/* content.local_storage true ;; reload')
config.bind('elH', 'set -p -t -u *://*.{url:host}/* content.local_storage true ;; reload')
config.bind('eLH', 'set -p -u *://*.{url:host}/* content.local_storage true ;; reload')
config.bind('elu', 'set -p -t -u {url} content.local_storage true ;; reload')
config.bind('eLu', 'set -p -u {url} content.local_storage true ;; reload')
config.bind('dll', 'set -p -t content.local_storage false ;; reload')
config.bind('dLl', 'set -p content.local_storage false ;; reload')
config.bind('dlh', 'set -p -t -u *://{url:host}/* content.local_storage false ;; reload')
config.bind('dLh', 'set -p -u *://{url:host}/* content.local_storage false ;; reload')
config.bind('dlH', 'set -p -t -u *://*.{url:host}/ content.local_storage false ;; reload')
config.bind('dLH', 'set -p -u *://*.{url:host}/* content.local_storage false ;; reload')
config.bind('dlu', 'set -p -t -u {url} content.local_storage false ;; reload')
config.bind('dLu', 'set -p -u {url} content.local_storage false ;; reload')
config.bind('tll', 'config-cycle -p -t content.local_storage ;; reload')
config.bind('tLl', 'config-cycle -p content.local_storage ;; reload')
config.bind('tlh', 'config-cycle -p -t -u *://{url:host}/* content.local_storage ;; reload')
config.bind('tLh', 'config-cycle -p -u *://{url:host}/* content.local_storage ;; reload')
config.bind('tlH', 'config-cycle -p -t -u *://*.{url:host}/* content.local_storage ;; reload')
config.bind('tLH', 'config-cycle -p -u *://*.{url:host}/* content.local_storage ;; reload')
config.bind('tlu', 'config-cycle -p -t -u {url} content.local_storage ;; reload')
config.bind('tLu', 'config-cycle -p -u {url} content.local_storage ;; reload')
# clipboard
config.bind('eyy', 'set -p -t content.javascript.can_access_clipboard true ;; reload')
config.bind('eYy', 'set -p content.javascript.can_access_clipboard true ;; reload')
config.bind('eyh', 'set -p -t -u *://{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eYh', 'set -p -u *://{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eyH', 'set -p -t -u *://*.{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eYH', 'set -p -u *://*.{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eyu', 'set -p -t -u {url} content.javascript.can_access_clipboard true ;; reload')
config.bind('eYu', 'set -p -u {url} content.javascript.can_access_clipboard true ;; reload')
config.bind('dyy', 'set -p -t content.javascript.can_access_clipboard false ;; reload')
config.bind('dYy', 'set -p content.javascript.can_access_clipboard false ;; reload')
config.bind('dyh', 'set -p -t -u *://{url:host}/* content.javascript.can_access_clipboard false ;; reload')
config.bind('dYh', 'set -p -u *://{url:host}/* content.javascript.can_access_clipboard false ;; reload')
config.bind('dyH', 'set -p -t -u *://*.{url:host}/ content.javascript.can_access_clipboard false ;; reload')
config.bind('dYH', 'set -p -u *://*.{url:host}/* content.javascript.can_access_clipboard false ;; reload')
config.bind('dyu', 'set -p -t -u {url} content.javascript.can_access_clipboard false ;; reload')
config.bind('dYu', 'set -p -u {url} content.javascript.can_access_clipboard false ;; reload')
config.bind('tyy', 'config-cycle -p -t content.javascript.can_access_clipboard ;; reload')
config.bind('tYy', 'config-cycle -p content.javascript.can_access_clipboard ;; reload')
config.bind('tyh', 'config-cycle -p -t -u *://{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tYh', 'config-cycle -p -u *://{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tyH', 'config-cycle -p -t -u *://*.{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tYH', 'config-cycle -p -u *://*.{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tyu', 'config-cycle -p -t -u {url} content.javascript.can_access_clipboard ;; reload')
config.bind('tYu', 'config-cycle -p -u {url} content.javascript.can_access_clipboard ;; reload')
# redirect
config.bind('er', 'spawn --userscript redirect True')
config.bind('dr', 'spawn --userscript redirect False')
# rebinds
config.bind('q', 'close')
config.bind('O', 'set-cmd-text -s :open -w')
config.bind('F', 'hint all window ')
config.bind('I', 'hint -f inputs normal ')
config.bind('m', 'tab-mute')
config.bind('gc', 'tab-clone')
# config.bind('<Escape>', 'mode-leave ;; jseval -q document.activeElement.blur()', mode='insert')
config.bind('<Ctrl+Escape>', 'fake-key <Escape>')
# leader binds
leader = '<Space>'
config.bind(leader + leader, 'fake-key ' + leader)
config.bind(leader + 'o', 'set-cmd-text -s :open -p')
config.bind(leader + 'vv', 'hint links spawn --detach mpv "{hint-url}"')
config.bind(leader + 'vr', 'hint -r links spawn --detach mpv "{hint-url}"')
config.bind(leader + 'vu', 'spawn --detach mpv "{url}"')
config.bind(leader + 'dd', 'hint links spawn ytdl "{hint-url}"')
config.bind(leader + 'dr', 'hint -r links spawn --detach ytdl "{hint-url}"')
config.bind(leader + 'du', 'spawn --detach ytdl "{url}"')
config.bind(leader + 'ii', 'hint images spawn --detach img -u "{hint-url}"')
config.bind(leader + 'ir', 'hint -r images spawn --detach img -u "{hint-url}"')
config.bind(leader + 'iu', 'spawn --detach img -u "{url}"')
config.bind(leader + 'cc', 'hint links spawn --detach chromium "{hint-url}"')
config.bind(leader + 'cr', 'hint -r links spawn --detach chromium "{hint-url}"')
config.bind(leader + 'cu', 'spawn --detach chromium "{url}"')
config.bind(leader + 'ff', 'hint links spawn --detach firefox "{hint-url}"')
config.bind(leader + 'fr', 'hint -r links spawn --detach firefox "{hint-url}"')
config.bind(leader + 'fu', 'spawn --detach firefox "{url}"')
config.bind(leader + 'tt', 'hint links spawn --detach tm -a "{hint-url}"')
config.bind(leader + 'tr', 'hint -r links spawn --detach tm -a "{hint-url}"')
config.bind(leader + 'tu', 'spawn --detach tm -a "{url}"')
config.bind(leader + 'qq', 'hint links userscript qr')
config.bind(leader + 'qu', 'spawn --userscript qr')
config.bind(leader + 'qr', 'hint -r links userscript qr')
# ==================== Search Engines ====================================
c.url.searchengines = {
'DEFAULT': 'https://search.simonhugh.xyz/searx/search?q={}',
# DuckDuckGO
'd': 'https://duckduckgo.com/?q={}',
# Google
'g' : 'http://www.google.com/search?q={}',
# Google Maps
'm' : 'https://www.google.com/maps/search/{}',
# Youtube
'y' : 'https://www.youtube.com/results?search_query={}',
# Amazon
'a' : 'https://www.amazon.co.uk/s?k={}',
}
# redirect urls
config.source('redirect.py')
# load autoconfig.yml
config.load_autoconfig(True)
|
python
|
"""Misc funcs for backtester"""
import pandas as pd
from io import StringIO
from . import fb_amzn
def load_example():
"""Load example input data"""
df = pd.read_csv(StringIO(fb_amzn.data))
df['date'] = pd.to_datetime(df['date']).dt.tz_localize('US/Central')
return df
|
python
|
import pprint
import sys
import numpy as np
def pbatch(source, dic):
ss = np.transpose(source)
for line in ss[:10]:
for word in line:
a = dic[word]
b = a
if a == "SOS":
b = "{"
elif a == "EOS":
b = "}"
elif a == "ZERO":
b = "_"
elif a == "UNK":
b = "|"
sys.stdout.write(b)
print " "
print ""
def pbatch_many(source, dic, n_x):
ss = np.transpose(source)
iis = [0, 20, n_x-8,n_x-1]
for ii in iis:
line = ss[ii]
for word in line:
a = dic[word]
b = a
if a == "SOS":
b = "{"
elif a == "EOS":
b = "}"
elif a == "ZERO":
b = "_"
elif a == "UNK":
b = "|"
sys.stdout.write(b)
print " "
print ""
|
python
|
from cryptofield.fieldmatrix import *
import unittest
class TestFMatrix(unittest.TestCase):
def testMatrixGetRow1(self):
F = FField(4)
m = FMatrix(F, 3, 3)
m.ident()
r = m.getRow(1)
ans = [FElement(F, 0), FElement(F, 1), FElement(F, 0)]
self.assertEqual(r, ans)
def testMatrixGetColumn1(self):
F = FField(4)
m = FMatrix(F, 5, 5)
m.ident()
r = m.getColumn(0)
ans = [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 0), FElement(F, 0)]
self.assertEqual(r, ans)
def testMatrixInverse1(self):
F = FField(2)
m = FMatrix(F, 4, 4)
m.setRow(0, [FElement(F, 0), FElement(F, 0), FElement(F, 0), FElement(F, 1)])
m.setRow(1, [FElement(F, 0), FElement(F, 0), FElement(F, 1), FElement(F, 0)])
m.setRow(2, [FElement(F, 0), FElement(F, 1), FElement(F, 0), FElement(F, 0)])
m.setRow(3, [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 1)])
inv = m.inverse()
ans = FMatrix(F, 4, 4)
ans.setRow(0, [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 1)])
ans.setRow(1, [FElement(F, 0), FElement(F, 0), FElement(F, 1), FElement(F, 0)])
ans.setRow(2, [FElement(F, 0), FElement(F, 1), FElement(F, 0), FElement(F, 0)])
ans.setRow(3, [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 0)])
self.assertEqual(inv, ans)
|
python
|
# Generated by Django 2.2.5 on 2019-10-28 17:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('service', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rating',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='service.Service'),
),
]
|
python
|
def binary_search(input_array, value):
first = 0
last = len(input_array) - 1
found = False
while first <= last and not found:
middle_point = (first + last)//2
if input_array[middle_point] == value:
found = True
else:
if value < input_array[middle_point]:
last = middle_point -1
else:
first = middle_point + 1
return input_array.index(value) if found else -1
|
python
|
from rest_framework import permissions
class IsBuyerOrSellerUser(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated and request.user.is_buyer_or_seller:
return True
return False
|
python
|
from copy import deepcopy
from dbt.contracts.graph.manifest import WritableManifest
from dbt.contracts.results import CatalogArtifact
def edit_catalog(
catalog: CatalogArtifact, manifest: WritableManifest
) -> CatalogArtifact:
output = deepcopy(catalog)
node_names = tuple(node for node in output.nodes)
for node in node_names:
if node not in manifest.nodes:
output.nodes.pop(node)
source_names = tuple(source for source in output.sources)
for source in source_names:
if source not in manifest.sources:
output.sources.pop(source)
return output
|
python
|
# -*- coding: utf-8 -*-
"""
encryption test_services module.
"""
import pytest
import pyrin.security.encryption.services as encryption_services
import pyrin.configuration.services as config_services
from pyrin.security.encryption.handlers.aes128 import AES128Encrypter
from pyrin.security.encryption.handlers.rsa256 import RSA256Encrypter
from pyrin.security.encryption.exceptions import DuplicatedEncryptionHandlerError, \
InvalidEncryptionHandlerTypeError, EncryptionHandlerNotFoundError, DecryptionError
def test_register_encryption_handler_duplicate():
"""
registers an already available encryption handler.
it should raise an error.
"""
with pytest.raises(DuplicatedEncryptionHandlerError):
encryption_services.register_encryption_handler(AES128Encrypter())
def test_register_encryption_handler_invalid_type():
"""
registers an encryption handler with an invalid type.
it should raise an error.
"""
with pytest.raises(InvalidEncryptionHandlerTypeError):
encryption_services.register_encryption_handler(25)
def test_register_encryption_handler_duplicate_with_replace():
"""
registers an already available encryption handler with replace option.
it should not raise an error.
"""
encryption_services.register_encryption_handler(RSA256Encrypter(), replace=True)
def test_encrypt_default():
"""
encrypts the given value using default handler and returns the encrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
assert encrypted_value is not None
assert config_services.get('security', 'encryption',
'default_encryption_handler') in encrypted_value
def test_encrypt_aes128():
"""
encrypts the given value using aes128 handler and returns the encrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='AES128')
assert encrypted_value is not None
assert 'AES128' in encrypted_value
def test_encrypt_rsa256():
"""
encrypts the given value using rsa256 handler and returns the encrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='RSA256')
assert encrypted_value is not None
assert 'RSA256' in encrypted_value
def test_encrypt_invalid_handler():
"""
encrypts the given value using an invalid handler.
it should raise an error.
"""
with pytest.raises(EncryptionHandlerNotFoundError):
encryption_services.encrypt('confidential', handler_name='missing_handler')
def test_decrypt_default():
"""
decrypts the given full encrypted value using default
handler and returns the decrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
original_value = encryption_services.decrypt(encrypted_value)
assert original_value == message
def test_decrypt_aes128():
"""
decrypts the given full encrypted value using aes128
handler and returns the decrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='AES128')
original_value = encryption_services.decrypt(encrypted_value)
assert original_value == message
def test_decrypt_rsa256():
"""
decrypts the given full encrypted value using rsa256
handler and returns the decrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='RSA256')
original_value = encryption_services.decrypt(encrypted_value)
assert original_value == message
def test_decrypt_invalid_value():
"""
decrypts the given invalid encrypted value using default handler.
it should raise an error.
"""
with pytest.raises(DecryptionError):
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
encrypted_value = encrypted_value.replace('o', 'b')
encryption_services.decrypt(encrypted_value)
def test_decrypt_invalid_handler():
"""
decrypts the given encrypted value using an invalid handler.
it should raise an error.
"""
with pytest.raises(EncryptionHandlerNotFoundError):
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
handler = config_services.get('security', 'encryption',
'default_encryption_handler')
encrypted_value = encrypted_value.replace(handler, 'missing handler')
encryption_services.decrypt(encrypted_value)
def test_decrypt_mismatch_handler():
"""
decrypts the given encrypted value using a handler that is not the original handler.
it should raise an error.
"""
with pytest.raises(DecryptionError):
message = 'confidential'
handler = 'AES128'
mismatch_handler = 'RSA256'
encrypted_value = encryption_services.encrypt(message, handler_name=handler)
encrypted_value = encrypted_value.replace(handler, mismatch_handler)
encryption_services.decrypt(encrypted_value)
def test_generate_key_aes128():
"""
generates a valid key for aes128 handler and returns it.
"""
key = encryption_services.generate_key('AES128')
assert key is not None and len(key) > 0
def test_generate_key_rsa256():
"""
generates a valid public/private key pair for rsa256 handler and returns it.
"""
public, private = encryption_services.generate_key('RSA256')
assert public is not None and private is not None
assert len(public) > 0 and len(private) > 0
def test_generate_key_invalid_handler():
"""
generates a key for an invalid handler.
it should raise an error.
"""
with pytest.raises(EncryptionHandlerNotFoundError):
encryption_services.generate_key('missing handler')
def test_encrypter_is_singleton():
"""
tests that different types of encrypters are singleton.
"""
encrypter1 = AES128Encrypter()
encrypter2 = AES128Encrypter()
assert encrypter1 == encrypter2
encrypter3 = RSA256Encrypter()
encrypter4 = RSA256Encrypter()
assert encrypter3 == encrypter4
|
python
|
"""
HoNCore. Python library providing connectivity and functionality
with HoN's chat server.
Packet ID definitions.
Updated 23-7-11.
Client version 2.40.2
"""
""" Server -> Client """
HON_SC_AUTH_ACCEPTED = 0x1C00
HON_SC_PING = 0x2A00
HON_SC_CHANNEL_MSG = 0x03
HON_SC_JOINED_CHANNEL = 0x04
HON_SC_ENTERED_CHANNEL = 0x05
HON_SC_LEFT_CHANNEL = 0x06
HON_SC_WHISPER = 0x08
HON_SC_WHISPER_FAILED = 0x09
HON_SC_INITIAL_STATUS = 0x0B
HON_SC_UPDATE_STATUS = 0x0C
HON_SC_CLAN_MESSAGE = 0x13
HON_SC_LOOKING_FOR_CLAN = 0x18
HON_SC_PM = 0x1C
HON_SC_PM_FAILED = 0x1D
HON_SC_WHISPER_BUDDIES = 0x20
HON_SC_MAX_CHANNELS = 0x21
HON_SC_USER_INFO_NO_EXIST = 0x2B
HON_SC_USER_INFO_OFFLINE = 0x2C
HON_SC_USER_INFO_ONLINE = 0x2D
HON_SC_USER_INFO_IN_GAME = 0x2E
HON_SC_CHANNEL_UPDATE = 0x2F
HON_SC_CHANNEL_UPDATE_TOPIC = 0x30
HON_SC_CHANNEL_KICK = 0x31
HON_SC_CHANNEL_BAN = 0x32
HON_SC_CHANNEL_UNBAN = 0x33
HON_SC_CHANNEL_BANNED = 0x34
HON_SC_CHANNEL_SILENCED = 0x35
HON_SC_CHANNEL_SILENCE_LIFTED = 0x36
HON_SC_CHANNEL_SILENCE_PLACED = 0x37
HON_SC_MESSAGE_ALL = 0x39
HON_SC_CHANNEL_PROMOTE = 0x3A
HON_SC_CHANNEL_DEMOTE = 0x3B
HON_SC_CHANNEL_AUTH_ENABLE = 0x3E
HON_SC_CHANNEL_AUTH_DISABLE = 0x3F
HON_SC_CHANNEL_AUTH_ADD = 0x40
HON_SC_CHANNEL_AUTH_DELETE = 0x41
HON_SC_CHANNEL_AUTH_LIST = 0x42
HON_SC_CHANNEL_PASSWORD_CHANGED = 0x43
HON_SC_CHANNEL_AUTH_ADD_FAIL = 0x44
HON_SC_CHANNEL_AUTH_DEL_FAIL = 0x45
HON_SC_JOIN_CHANNEL_PASSWORD = 0x46
HON_SC_CLAN_MEMBER_ADDED = 0x4E
HON_SC_NAME_CHANGE = 0x5A
HON_SC_CHANNEL_EMOTE = 0x65
HON_SC_TOTAL_ONLINE = 0x68
HON_SC_REQUEST_NOTIFICATION = 0xB2
HON_SC_NOTIFICATION = 0xB4
"Reverse-engineered"
HON_SC_GAME_INVITE = 0x25
""" GameServer -> Client """
HON_GSC_PACKET_RECV = "HON_GSC_PACKET_RECV"
HON_GSC_PING = 0x4c
HON_GSC_AUTH_ACCEPTED = 0x5c
HON_GSC_CHANNEL_MSG = 0x6c
HON_GSC_TIMEOUT = 0x5101
HON_GSC_SERVER_STATE = 0x03
HON_GSC_SERVER_INFO = 0x01
""" Client -> Server """
HON_CS_PONG = 0x2A01
HON_CS_CHANNEL_MSG = 0x03
HON_CS_WHISPER = 0x08
HON_CS_AUTH_INFO = 0x0C00
HON_CS_BUDDY_ADD_NOTIFY = 0x0D
HON_CS_JOIN_GAME = 0x10
HON_CS_CLAN_MESSAGE = 0x13
HON_CS_PM = 0x1C
HON_CS_JOIN_CHANNEL = 0x1E
HON_CS_WHISPER_BUDDIES = 0x20
HON_CS_LEAVE_CHANNEL = 0x22
HON_CS_USER_INFO = 0x2A
HON_CS_UPDATE_TOPIC = 0x30
HON_CS_CHANNEL_KICK = 0x31
HON_CS_CHANNEL_BAN = 0x33
HON_CS_CHANNEL_UNBAN = 0x32
HON_CS_CHANNEL_SILENCE_USER = 0x38
HON_CS_CHANNEL_PROMOTE = 0x3A
HON_CS_CHANNEL_DEMOTE = 0x3B
HON_CS_CHANNEL_AUTH_ENABLE = 0x3E
HON_CS_CHANNEL_AUTH_DISABLE = 0x3F
HON_CS_CHANNEL_AUTH_ADD = 0x40
HON_CS_CHANNEL_AUTH_DELETE = 0x41
HON_CS_CHANNEL_AUTH_LIST = 0x42
HON_CS_CHANNEL_SET_PASSWORD = 0x43
HON_CS_JOIN_CHANNEL_PASSWORD = 0x46
HON_CS_CLAN_ADD_MEMBER = 0x47
HON_CS_CHANNEL_EMOTE = 0x65
HON_CS_BUDDY_ACCEPT = 0xB3
HON_CS_START_MM_GROUP = 0x0C0A
HON_CS_INVITE_TO_MM = 0x0C0D
"Reverse-engineered"
HON_CS_GAME_INVITE = 0x24
HON_CS_GAME_SERVER_IP = 0xf
HON_CS_GAME_SERVER_INFO = 0x1000
""" Client -> GameServer """
HON_CGS_PONG = 0
HON_CGS_AUTH_INFO = 0xc001
HON_CGS_AUTH_MAGIC_PACKET = 0xc901cbcf
# Dummy Events / Custom events?
HON_SC_PACKET_RECV = "HON_SC_PACKET_RECV"
HON_GSC_PACKET_RECV = "HON_GSC_PACKET_RECV"
""" User Flags"""
HON_FLAGS_NONE = 0x00
HON_FLAGS_OFFICER = 0x01
HON_FLAGS_LEADER = 0x02
HON_FLAGS_ADMINISTRATOR = 0x03
HON_FLAGS_STAFF = 0x04
HON_FLAGS_PREPURCHASED = 0x40
""" User States"""
HON_STATUS_OFFLINE = 0
HON_STATUS_ONLINE = 3
HON_STATUS_INLOBBY = 4
HON_STATUS_INGAME = 5
""" Login Modes"""
HON_MODE_NORMAL = 0x00
HON_MODE_INVISIBLE = 0x03
""" Game Server"""
GAME_SERVER_TYPE = 90
MAXIMUM_SERVER_PING = 90
""" Team Slots"""
TEAM_SLOTS = {
'BLUE' : (1, 0),
'TEAL' : (1, 1),
'PURPLE' : (1, 2),
'YELLOW' : (1, 3),
'ORANGE' : (1, 4),
'PINK' : (2, 0),
'GREY' : (2, 1),
'LIGHTBLUE' : (2, 2),
'GREEN' : (2, 3),
'BROWN' : (2, 4),
'SPECTATOR' : (3, 0),
'REFEREE' : (4, 0)
}
HON_SERVER_VERSION = "2.6.10"
#HON_HOST_ID = 1542367444
HON_CONNECTION_ID = 52175
HON_HOST_ID = 1253506080
#HON_CONNECTION_ID = 24938
|
python
|
"""A quantum tic tac toe running in command line"""
from qiskit import Aer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import CompositeGate
from qiskit import execute
import numpy as np
from composite_gates import cry,cnx,any_x,bus_or,x_bus
class Move():
def __init__(self,indices,player,q1=None,q2=None):
"""A data structure for game moves"""
self.indices = indices
self.player=player
self.q1=q1
self.q2=q2
def __str__(self):
return str([self.indices,self.player,self.q1,self.q2])
class Board():
def __init__(self,x,y,print_info=False):
#quantum register, classical register, quantum circuit.
self.print_info=print_info
self.q = QuantumRegister(1)
self.c = ClassicalRegister(1)
self.qc = QuantumCircuit(self.q, self.c)
self.qc.cry = cry
self.qc.x_bus = x_bus
self.qc.cnx = cnx
self.qc.any_x = any_x
self.qc.bus_or = bus_or
#the dimensions of the bord
self.x=x
self.y=y
#To keep track of what is in each cell, no entanglement etc.
#Provides a graphic of the game state.
self.cells = np.empty((x,y),dtype=object)
self.cells[:]='' #Initially game is empty.
self.game_full = False
self.moves = []
def __str__(self):
return str(self.cells)
def add_move(self,indices,player):
"""Adds a move if it is non-clashing, otherwise passes it on"""
for index in indices:
if index[0] >= self.x:
return 'Index out of range'
if index[1] >= self.y:
return 'Index out of range'
status = self._add_move(indices,player)
if status=='ok':
if player==0:
char = 'X'
elif player==1:
char = 'O'
char+=str(len(self.moves))
for index in indices:
s = self.cells[index[0],index[1]]
if s: #If the cell has some text
#Add char with a comma
self.cells[index[0],index[1]]+=' '+char
else: #cell is empty so just add char
self.cells[index[0],index[1]]+=char
print(self.cells)
return status
def _add_move(self,indices,player):
"""Actually adds the move if not clashing,
otherwise passes it to _add_clashing_move"""
if len(indices)==2:
if indices[0]==indices[1]:
indices = [indices[0]]
num=len(indices)
caught_clashes = False #turns true if all moves are safe clashes
for existing_move in self.moves:
for index in indices:
if index in existing_move.indices:
if len(existing_move.indices)==1:
return 'overfull'
#This move will ALWAYS be there, if it can.
#hence, overfull.
else:
#captures any clash
caught_clashes = True
if caught_clashes:
return self._add_clashing_move(indices,player)
else:
#Reach this section if there are no clashes at all
if num==1:
self.moves.append(Move(indices,player)) #No control needed
return 'ok'
else:
self.q.size+=2 #indicator qubit, and move qubit
q1 = self.q[self.q.size-2] #To make this readable...
q2 = self.q[self.q.size-1]
self.qc.h(q1) #the last qubit in register.
self.qc.x(q2)
self.qc.cx(q1,q2)
self.moves.append(Move(indices,player,q1,q2))
return 'ok'
def _add_clashing_move(self,indices,player):
"""Adds a clashing move"""
if len(indices)==1: #100% of qubit is on one clashing spot.
#This spot COULD be occupied.
self.q.size+=1 #Only one bit needed, move happens or not.
index = indices[0]
bus = []
for existing_move in self.moves:
if index in existing_move.indices:
if index==existing_move.indices[0]:
bus.append(existing_move.q1)
elif index==existing_move.indices[1]:
bus.append(existing_move.q2)
#Now if any entry on the bus is true, our qubit is false.
self.qc.x(self.q[self.q.size-1]) # make it 1
self.qc.any_x(self.qc,*bus,self.q[self.q.size-1])
#negate is any dependents are true.
#So the new move can happen if none of the others happen.
self.moves.append(Move(indices,player,self.q[self.q.size-1]))
return 'ok'
elif len(indices)==2:
#Check first spot is not occupied, then second spot if first
#is not occupied.
self.q.size+=2 #Two bits needed (maybe) for each index.
#This can be optimized, in effect only one qubit is needed,
#and its result indicates the selected qubit.
#However, then some control qubit is needed too.
#Since there are moves that could possibly be erased completely!
bus0 = []
bus1 = []
for existing_move in self.moves:
if indices[0] in existing_move.indices:
if indices[0]==existing_move.indices[0]:
bus0.append(existing_move.q1)
elif indices[0]==existing_move.indices[1]:
bus0.append(existing_move.q2)
if indices[1] in existing_move.indices:
if indices[1]==existing_move.indices[0]:
bus1.append(existing_move.q1)
elif indices[1]==existing_move.indices[1]:
bus1.append(existing_move.q2)
#Now if any entry on the bus is true, our first qubit is false.
q1 = self.q[self.q.size-2] #a bit easier to look at (:
q2 = self.q[self.q.size-1]
if bus0:
self.qc.x(q1)
self.qc.cnx(self.qc,*bus0,q1)
else: self.qc.h(q1)
#And now the second qubit is 1 only if none of its competitors
#are 1, and likewise if the previous qubit is zero.
self.qc.x(q2)
self.qc.bus_or(self.qc,q2,bus1,[q1])
self.moves.append(Move(indices,player,q1,q2))
return 'ok'
def run(self):
"""Game loop"""
self.running=True
if self.print_info:
print("Welcome to Quantum tic tac toe!")
print("At each turn choose if to make one or two moves.")
print("Playing one move at a time is a classic tic tac toe game.")
print("At each turn the game state is printed;")
print("X3 is the third move, played by X. When a move is made in a super position,")
print("You will see its label, say X3, appear in several places.")
print("This means your move is in a superposition of two classical moves!")
print("You can make a move in a possibly occupied spot.")
print("Then the new move will be anti-correlated with the move already in that spot.")
print("And so the game branches out into many possible states.")
print("The outcome is then computed by simulation...")
print("so don't make too many quantum moves or it will take long to compute!")
print("Enter 'q' at any time to quit")
print("Enter 'end' to end the game, and compute the winner(s).")
print("Good luck!")
while self.running:
self.ask_player(0)
self.ask_player(1)
if self.game_full:
self.compute_winner()
def ask_player(self,player):
"""Ask a player for move details"""
asking=False
if self.running:
asking = True
while asking:
if player==0:
player_name = 'X'
elif player==1:
player_name = 'O'
print("PLAYER "+player_name+" :")
cells = self.question('Play in 1 or 2 cells?')
if cells=='1':
x = int(self.question('x index:'))
y = int(self.question('y index:'))
status = self.add_move([[y,x]],player)
if status == 'ok':
asking = False
else: print(status)
elif cells=='2':
x1 = int(self.question('x1 index:'))
y1 = int(self.question('y1 index:'))
x2 = int(self.question('x2 index:'))
y2 = int(self.question('y2 index:'))
status = self.add_move([[y1,x1],[y2,x2]],player)
if status == 'ok':
asking = False
else: print(status)
if not self.running:
asking=False
def question(self,text):
"""ask user a question"""
if self.running:
answer = input(text)
if answer=='q':
self.running=False
return None
elif answer=='end':
self.game_full = True
self.running = False
else:
return answer
else: return None
def compute_winner(self):
"""Find overall game winner, by finding winners of each outcome"""
self.c.size = self.q.size #Make them the same
self.qc.measure(self.q, self.c) #Measure
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.qc, backend=backend, shots=100)
sim_result = job_sim.result()
print("simulation: ", sim_result)
print(sim_result.get_counts(self.qc))
self.counts = sim_result.get_counts(self.qc)
for count in self.counts: #Takes key names
c = list(count)[:-1] #splits key '1011' => ['1','0','1','1']
c = c[::-1] #invert it so it goes 0 up...
#Ignore the last bit since I dont know how to get rid of it
#It is zero always.
#The reason it is included is that I create a quantum register and
#then start adding operations, quantum registers need at least one bit.
counter = 0
weight = self.counts[count]
empty = np.zeros((self.x,self.y),dtype=str)
for m in self.moves:
if m.player == 0:
char = 'x'
elif m.player==1:
char = 'o'
result = []
if m.q1:
result.append(c[counter])
counter+=1
if m.q2:
result.append(c[counter])
counter+=1
#print(result)
if len(result) == len(m.indices):
#print(m)
if result[0]=='1':
empty[m.indices[0][0],m.indices[0][1]] = char
if len(result)>1:
if result[1]=='1':
if result[0]=='1':
print('problem! a move appeard in two places.')
print(m)
empty[m.indices[1][0],m.indices[1][1]] = char
elif not result: #Then it was a classcal move
empty[m.indices[0][0],m.indices[0][1]] = char
xscore,oscore=self.winners(empty)
print('X wins: '+str(xscore))
print('O wins: '+str(oscore))
print('Shots: '+str(weight))
print(empty)
def winners(self,empty):
"""Compute winners of a board"""
oscore = 0
xscore = 0
for x in range(self.x):
if empty[x,1]==empty[x,0] and empty[x,2]==empty[x,1]:
if empty[x,0]=='o':
oscore+=1
elif empty[x,0]=='x':
xscore +=1
for y in range(self.y):
if empty[1,y]==empty[0,y] and empty[2,y]==empty[0,y]:
if empty[0,y]=='o':
oscore+=1
elif empty[0,y]=='x':
xscore +=1
if empty[0,0]==empty[1,1] and empty[1,1]==empty[2,2]:
if empty[0,0]=='o':
oscore+=1
elif empty[0,0]=='x':
xscore += 1
if empty[2,0]==empty[1,1] and empty[1,1]==empty[0,2]:
if empty[2,0]=='o':
oscore+=1
elif empty[2,0]=='x':
xscore += 1
return [xscore,oscore]
def _populate_board(self):
"""Automatically populate as below, for testing purposes"""
self.add_move([[2,2],[0,0]],1)
self.add_move([[1,1],[1,2]],0)
self.add_move([[1,2],[2,1]],1)
self.add_move([[2,1]],0)
self.add_move([[0,1]],1)
self.add_move([[1,0]],0)
self.add_move([[2,0]],1)
self.add_move([[2,2]],0)
self.add_move([[0,0]],1)
self.add_move([[0,2]],0)
self.add_move([[1,1]],1)
self.add_move([[1,2]],0)
if __name__=="__main__":
B= Board(3,3)
B.run()
#B._populate_board()
#a = B.compute_winner()
|
python
|
import os
import foundations
from foundations_contrib.global_state import current_foundations_context, message_router
from foundations_events.producers.jobs import RunJob
foundations.set_project_name('default')
job_id = os.environ['ACCEPTANCE_TEST_JOB_ID']
pipeline_context = current_foundations_context().pipeline_context()
pipeline_context.file_name = job_id
RunJob(message_router, pipeline_context).push_message()
foundations.set_tag('model type', 'simple mlp')
foundations.set_tag('data set', 'out of time')
foundations.set_tag('what I was doing,', 'drinking tea')
print('Hello World!')
|
python
|
class MySql(object):
pass
|
python
|
#!/usr/bin/env python3
#
# Author: Jeremy Compostella <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
'''This module implements a car charger task based on the Wallbox EV charger.
'''
import os
import sys
from datetime import datetime, timedelta
from select import select
from time import sleep
import Pyro5
import requests
from cachetools import TTLCache
from wallbox import Wallbox
from car_sensor import CarSensorProxy
from power_sensor import RecordScale
from scheduler import Priority, SchedulerProxy, Task
from sensor import SensorReader
from tools import NameServer, Settings, debug, init, log_exception
from watchdog import WatchdogProxy
DEFAULT_SETTINGS = {'power_sensor_key': 'EV',
'min_available_current': 6,
'cycle_length': 15}
MODULE_NAME = 'car_charger'
class CarCharger(Task):
'''Wallbox car charger Task.
This task handles a Wallbox car charger and automatically adjusts the
charge rate based on produced power availability.
'''
FULLY_CHARGED = 'Connected: waiting for car demand'
PLUGGED_IN = ['Charging', FULLY_CHARGED,
'Connected: waiting for next schedule',
'Paused by user']
def __init__(self, wallbox: Wallbox, charger_id: int, settings: Settings):
Task.__init__(self, Priority.LOW, keys=[settings.power_sensor_key],
auto_adjust=True)
self.wallbox = wallbox
self.charger_id = charger_id
self.settings = settings
self.cache = TTLCache(1, timedelta(seconds=3), datetime.now)
self.state_of_charge = None
def __call(self, name, *args):
for _ in range(3):
try:
method = getattr(self.wallbox, name)
return method(self.charger_id, *args)
except requests.exceptions.HTTPError:
log_exception('%s%s failed' % (name, args), *sys.exc_info())
self.wallbox.authenticate()
except requests.exceptions.ReadTimeout:
log_exception('%s%s failed' % (name, args), *sys.exc_info())
sleep(0.5)
raise RuntimeError('%s%s failed too many times' % (name, args))
@property
def status(self):
'''JSON representation of the charger status.'''
try:
return self.cache['status']
except KeyError:
self.cache['status'] = self.__call('getChargerStatus')
return self.cache['status']
@Pyro5.api.expose
@Pyro5.api.oneway
def start(self):
debug('Starting')
self.__call('resumeChargingSession')
self.cache.clear()
@Pyro5.api.expose
@Pyro5.api.oneway
def stop(self):
debug('Stopping')
self.__call('pauseChargingSession')
self.__call('setMaxChargingCurrent', self.min_available_current)
self.cache.clear()
@property
def status_description(self):
'''String describing the charger status.'''
return self.status['status_description']
@property
def min_available_current(self):
'''Minimum current supported by the charger in Ampere.'''
return self.settings.min_available_current
@property
def max_available_current(self):
'''Maximal current supported by the charger in Ampere.'''
return self.status['config_data']['max_available_current']
@Pyro5.api.expose
def is_running(self) -> bool:
return self.status_description == 'Charging'
@Pyro5.api.expose
def is_stoppable(self):
return True
@Pyro5.api.expose
def is_runnable(self):
'''True if calling the 'start' function would initiate charging.'''
return self.status_description in self.PLUGGED_IN \
and self.status_description != self.FULLY_CHARGED
@Pyro5.api.expose
def meet_running_criteria(self, ratio, power=0) -> bool:
debug('meet_running_criteria(%.3f, %.3f)' % (ratio, power))
if not self.is_runnable():
return False
if self.is_running():
return ratio >= 0.8
return ratio >= 1
@property
@Pyro5.api.expose
def desc(self):
description = '%s(%s' % (self.__class__.__name__, self.priority.name)
if self.state_of_charge is not None:
description += ', %.1f%%' % self.state_of_charge
return description + ')'
@property
@Pyro5.api.expose
def power(self):
return self.min_available_current * .24
def adjust_priority(self, state_of_charge):
'''Update the priority according to the current state of charge'''
self.state_of_charge = state_of_charge
thresholds = {Priority.URGENT: 50, Priority.HIGH: 65,
Priority.MEDIUM: 80, Priority.LOW: 101}
for priority in reversed(Priority):
if state_of_charge < thresholds[priority]:
self.priority = priority
break
def current_rate_for(self, power):
'''Return the appropriate current in Ampere for POWER in KWh.'''
rate = max(int(power / .24), self.min_available_current)
return min(rate, self.max_available_current)
def adjust_charge_rate(self, record):
'''Adjust the charging rate according to the instant POWER record.'''
available = -(record['net'] - self.usage(record))
current = self.current_rate_for(available)
if self.status['config_data']['max_charging_current'] != current:
debug('Adjusting to %dA (%.2f KWh)' % (current, available))
self.__call('setMaxChargingCurrent', current)
def main():
'''Register and run the car charger task.'''
# pylint: disable=too-many-locals
base = os.path.splitext(__file__)[0]
config = init(base + '.log')['Wallbox']
settings = Settings(base + '.ini', DEFAULT_SETTINGS)
wallbox = Wallbox(config['login'], config['password'],
requestGetTimeout=5)
wallbox.authenticate()
device_id = int(config['device_id'])
if device_id not in wallbox.getChargersList():
raise RuntimeError('%d charger ID does not exist' % device_id)
task = CarCharger(wallbox, device_id, settings)
Pyro5.config.COMMTIMEOUT = 5
daemon = Pyro5.api.Daemon()
nameserver = NameServer()
uri = daemon.register(task)
nameserver.register_task(MODULE_NAME, uri)
sensor = CarSensorProxy()
power_sensor = SensorReader('power')
power_simulator = SensorReader('power_simulator')
scheduler = SchedulerProxy()
watchdog = WatchdogProxy()
debug("... is now ready to run")
while True:
settings.load()
watchdog.register(os.getpid(), MODULE_NAME)
watchdog.kick(os.getpid())
try:
nameserver.register_task(MODULE_NAME, uri)
except RuntimeError:
log_exception('Failed to register the sensor',
*sys.exc_info())
# Self-testing: on basic operation failure unregister from the
# scheduler.
try:
task.status_description # pylint: disable=pointless-statement
scheduler.register_task(uri)
except RuntimeError:
debug('Self-test failed, unregister from the scheduler')
scheduler.unregister_task(uri)
next_cycle = datetime.now() + timedelta(
# pylint: disable=maybe-no-member
seconds=settings.cycle_length)
while True:
timeout = next_cycle - datetime.now()
sockets, _, _ = select(daemon.sockets, [], [],
timeout.seconds
+ timeout.microseconds / 1000000)
if sockets:
daemon.events(sockets)
if datetime.now() >= next_cycle:
break
try:
task.adjust_priority(sensor.read()['state of charge'])
except RuntimeError:
debug('Could not read current state of charge')
if not task.is_running():
continue
record = power_sensor.read(scale=RecordScale.SECOND)
if not record:
debug('No new power record, use the simulator')
record = power_simulator.read(scale=RecordScale.SECOND)
if not record:
debug('Failed to get a record from the simulator')
if record:
try:
task.adjust_charge_rate(record)
except RuntimeError:
log_exception('adjust_charge_rate() failed', *sys.exc_info())
if __name__ == "__main__":
main()
|
python
|
import cv2
import numpy as np
from PIL import Image, ImageDraw
from scipy.spatial import ConvexHull
from skimage import filters
import tensorflow as tf
from monopsr.core import evaluation
from monopsr.datasets.kitti import instance_utils, calib_utils
from monopsr.visualization import vis_utils
def np_proj_error(points_uv, points_mask, exp_grid_uv):
"""Calculates projection error of instance points with a 2D box
Args:
points_uv: (2, N) Points in u, v coordinates
points_mask: (N,) Mask of valid points
exp_grid_uv: expected [u, v] grid projection
Returns:
proj_err_norm: projection error normalized by the number of valid pixels
"""
# Calculation projection error
pred_grid_uv = points_uv.reshape(2, *exp_grid_uv[0].shape)
points_mask = points_mask.reshape(1, *exp_grid_uv[0].shape)
pred_proj_err_uv = pred_grid_uv - exp_grid_uv
pred_proj_err = np.sum(np.abs(pred_proj_err_uv) * points_mask)
proj_err_norm = pred_proj_err / np.count_nonzero(points_mask)
return proj_err_norm
def scipy_proj_error(x, args):
"""Calculates projection error of instance points with a 2D box.
Used for minimizing projection error when varying xz_dist and centroid_y.
Args:
x: array of inputs
xz_dist: distance along viewing angle
centroid_y: box centroid y
args: dict with additional data
'viewing_angle': viewing angle
'inst_points' = (N, 3) instance points
'cam_p' = (3, 4) camera projection matrix
'exp_grid_uv' = expected [u, v] grid projection
'rotate_view' = bool of whether to rotate by viewing angle
Returns:
proj_err_norm: projection error normalized by the number of valid pixels
"""
# Parse inputs from x
xz_dist = x[0]
centroid_y = x[1]
# Parse inputs from args
viewing_angle = args['viewing_angle']
inst_points = args['inst_points']
cam_p = args['cam_p']
exp_grid_uv = args['exp_grid_uv']
rotate_view = args['rotate_view']
pred_points_in_img, valid_points_mask = instance_utils.proj_points(
xz_dist, centroid_y, viewing_angle, inst_points, cam_p, rotate_view=rotate_view)
proj_err_norm = np_proj_error(pred_points_in_img, valid_points_mask, exp_grid_uv)
return proj_err_norm
def scipy_proj_error_with_viewing_angle(x, args):
"""Calculates projection error of instance points with a 2D box.
Used for minimizing projection error when varying xz_dist, centroid_y, and viewing_angle.
Args:
x: array of inputs
xz_dist: distance along viewing angle
centroid_y: box centroid y
viewing_angle: viewing angle
args: dict with additional data
'inst_points' = (N, 3) instance points
'cam_p' = (3, 4) camera projection matrix
'exp_grid_uv' = expected [u, v] grid projection
'rotate_view' = bool of whether to rotate by viewing angle
Returns:
proj_err_norm: projection error normalized by the number of valid pixels
"""
# Parse inputs from x
xz_dist = x[0]
centroid_y = x[1]
viewing_angle = x[2]
# Parse inputs from args
inst_points = args['inst_points']
cam_p = args['cam_p']
exp_grid_uv = args['exp_grid_uv']
rotate_view = args['rotate_view']
pred_points_in_img, valid_points_mask = instance_utils.proj_points(
xz_dist, centroid_y, viewing_angle, inst_points, cam_p, rotate_view=rotate_view)
proj_err_norm = np_proj_error(pred_points_in_img, valid_points_mask, exp_grid_uv)
return proj_err_norm
def tf_proj_error(points_uv, points_mask, exp_grid_uv):
"""
Args:
points_uv:
points_mask:
exp_grid_uv:
Returns:
"""
# return tf.zeros(32)
raise NotImplementedError('Not implemented yet')
def np_proj_err_rgb_images(xz_dist, centroid_y, viewing_angle,
cam2_inst_points_local, cam_p,
inst_rgb, inst_mask, image, valid_mask_map, box_2d,
guess_row_col, show_images=False):
"""(Work in progress) Calculates the projection error based on RGB similarity and shows
images for comparison.
Args:
xz_dist: Distance along viewing angle
centroid_y: Object centroid y
viewing_angle: Viewing angle
cam2_inst_points_local: (N, 3) Instance points in local frame
cam_p: (3, 4) Camera projection matrix
inst_rgb: List of instance RGB values
image: Image of sample
valid_mask_map: (H, W) Map mask of valid values
guess_row_col: Guess index, used for numbering images
show_images: (optional) Whether to show comparison images
Returns:
image_diff_total: Lowest image difference
"""
# Get projection into image
proj_uv, valid_points_mask = instance_utils.proj_points(
xz_dist, centroid_y, viewing_angle, cam2_inst_points_local, cam_p)
# Get RGB values of projected pixels
proj_uv_int = np.round(proj_uv).astype(np.int32)
guess_rgb = image[proj_uv_int[1], proj_uv_int[0]]
guess_rgb_map = guess_rgb.reshape(48, 48, 3) * np.expand_dims(valid_mask_map, 2)
# Estimated image
est_image = np.copy(image) * np.expand_dims(~inst_mask, 2)
est_image[proj_uv_int[1], proj_uv_int[0]] = inst_rgb
est_image[proj_uv_int[1]-1, proj_uv_int[0]] = inst_rgb
est_image[proj_uv_int[1]+1, proj_uv_int[0]] = inst_rgb
est_image[proj_uv_int[1], proj_uv_int[0]-1] = inst_rgb
est_image[proj_uv_int[1], proj_uv_int[0]+1] = inst_rgb
box_2d_int = np.round(box_2d).astype(np.int32)
est_inst_rgb = est_image[box_2d_int[0]:box_2d_int[2], box_2d_int[1]:box_2d_int[3]]
est_inst_rgb_resized = cv2.resize(est_inst_rgb, (48, 48))
# Check image similarity
inst_rgb_map = inst_rgb.reshape(48, 48, 3)
# image_diff_map = abs(inst_rgb_map - guess_rgb_map)
image_diff_map = abs(inst_rgb_map - est_inst_rgb_resized)
image_diff_map_norm = np.sum(image_diff_map, axis=2) / 255.0
image_diff_total = np.sum(image_diff_map_norm)
if show_images:
# cv2_size = (160, 160)
cv2_size = (90, 90)
cv2_size = (120, 120)
# # Show instance RGB for comparison
# inst_rgb_map_resized = cv2.resize(inst_rgb_map, cv2_size)
# vis_utils.cv2_imshow('inst_rgb_map_resized {}'.format(guess_row_col),
# inst_rgb_map_resized,
# size_wh=cv2_size, row_col=guess_row_col)
#
# # Show guess
# guess_rgb_map_resized = cv2.resize(guess_rgb_map, (200, 200))
# vis_utils.cv2_imshow('guess_rgb_map_resized {}'.format(guess_row_col),
# guess_rgb_map_resized,
# size_wh=cv2_size, row_col=guess_row_col)
vis_utils.cv2_imshow('est_inst_rgb_resized {}'.format(guess_row_col),
est_inst_rgb_resized,
size_wh=cv2_size, row_col=guess_row_col)
# combined = cv2.addWeighted(inst_rgb_map, 0.5, est_inst_rgb_resized, 0.5, 0.0)
# vis_utils.cv2_imshow('combined {}'.format(guess_row_col),
# combined,
# size_wh=cv2_size, row_col=guess_row_col)
# vis_utils.cv2_imshow('image_diff_map_norm {}'.format(guess_row_col),
# image_diff_map_norm,
# size_wh=cv2_size, row_col=guess_row_col)
# vis_utils.cv2_imshow('valid_mask {}'.format(centroid_y),
# (valid_mask_map * 255).astype(np.uint8),
# size_wh=cv2_size, row_col=guess_row_col)
return image_diff_total
def np_proj_err_rgb(xz_dist, centroid_y, viewing_angle, cam2_inst_points_local, cam_p,
inst_rgb, image, valid_mask_map):
# Get instance RGB
inst_rgb_map = inst_rgb.reshape(48, 48, 3)
# Project points to image
proj_uv, _ = instance_utils.proj_points(
xz_dist, centroid_y, viewing_angle, cam2_inst_points_local, cam_p)
# Get RGB values of projected pixels
proj_uv_int = np.round(proj_uv).astype(np.int32)
guess_rgb = image[proj_uv_int[1], proj_uv_int[0]]
guess_rgb_map = guess_rgb.reshape(48, 48, 3) * np.expand_dims(valid_mask_map, 2)
# Check image similarity
image_diff_map = abs(inst_rgb_map - guess_rgb_map)
image_diff_map_norm = np.sum(image_diff_map, axis=2) / 255.0
image_diff_total = np.sum(image_diff_map_norm) / np.count_nonzero(valid_mask_map)
return image_diff_total
def scipy_proj_err_rgb(x, args):
"""Calculates projection error based on RGB similarity.
(Minimization with this doesn't seem to work since
large patches will be matched at incorrect positions)
"""
# Parse inputs from x
xz_dist = x[0]
centroid_y = x[1]
if len(x) == 3:
viewing_angle = x[2]
else:
viewing_angle = args['viewing_angle']
# Parse inputs from args
inst_points = args['inst_points']
cam_p = args['cam_p']
inst_rgb = args['inst_rgb']
image = args['image']
valid_mask_map = args['valid_mask_map']
proj_err_rgb = np_proj_err_rgb(
xz_dist=xz_dist,
centroid_y=centroid_y,
viewing_angle=viewing_angle,
cam2_inst_points_local=inst_points,
cam_p=cam_p,
inst_rgb=inst_rgb,
image=image,
valid_mask_map=valid_mask_map,
)
return proj_err_rgb
def convex_hull_mask_iou(points_uv, im_shape, gt_hull_mask):
"""Computes masks by calculating a convex hull from points. Creates two masks (if possible),
one for the estimated foreground pixels and one for the estimated background pixels.
Args:
points_uv: (2, N) Points in u, v coordinates
im_shape: image shape [image_height, im_width]
gt_hull_mask: mask created by calculating convex hull
Returns:
best_iou: best mask iou calculated from the calculated hull masks and the ground truth hull
mask
"""
im_height, im_width = im_shape
# Segment the points into background and foreground
if len(set(points_uv[0])) > 1:
thresh = filters.threshold_li(points_uv[0])
pred_seg_1 = points_uv[0] > thresh
pred_seg_2 = points_uv[0] < thresh
segs = [pred_seg_1, pred_seg_2]
else:
# There is only one unique point so a threshold cannot be made
segs = [np.full(points_uv[0].shape, True, dtype=bool)]
mask_list = []
# Loop over both segments since it is uncertain which segment is foreground or background
for seg in segs:
# Obtain the coordinates of the pixels
pred_u = np.int32(points_uv[0][seg])
pred_v = np.int32(points_uv[1][seg])
# Remove duplicate coordinates by forming a set
coords = set(zip(pred_u, pred_v))
# Convex hull calculation requires a numpy array
coords = np.array(list(coords))
# Need at least 3 points to create convex hull
if len(coords) < 3:
continue
# Points must not lie along a single line in order to create convex hull
elif any(np.all(coords == coords[0, :], axis=0)):
continue
else:
hull = ConvexHull(coords)
img = Image.new('L', (im_width, im_height), 0)
vertices = list(zip(coords[hull.vertices, 0], coords[hull.vertices, 1]))
ImageDraw.Draw(img).polygon(vertices, outline=1, fill=1)
mask = np.array(img)
mask_list.append(mask)
best_iou = 0
for mask in mask_list:
iou = evaluation.mask_iou(mask, gt_hull_mask)
if iou > best_iou:
best_iou = iou
return best_iou
def scipy_convex_hull_mask_inv_iou(x, args):
"""Computes masks by calculating a convex hull from points. Creates two masks (if possible),
one for the estimated foreground pixels and one for the estimated background pixels.
Minimizes inverted IoU by varying xz_dist and centroid_y.
Args:
x: array of inputs
xz_dist: distance along viewing angle
centroid_y: box centroid y
args: dict with additional data
'viewing_angle': viewing angle
'inst_points' = (N, 3) instance points
'cam_p' = (3, 4) camera projection matrix
'im_shape' = image shape [im_height, im_width]
'gt_hull_mask' = expected mask created from instance mask
Returns:
inverted_iou: 1.0 - IoU of the mask computed from the convex hull and the gt hull mask
"""
# Parse inputs from x
xz_dist = x[0]
centroid_y = x[1]
# Parse inputs from args
viewing_angle = args['viewing_angle']
inst_points = args['inst_points']
cam_p = args['cam_p']
im_shape = args['im_shape']
gt_hull_mask = args['gt_hull_mask']
pred_points_in_img, valid_points_mask = instance_utils.proj_points(
xz_dist, centroid_y, viewing_angle, inst_points, cam_p)
iou = convex_hull_mask_iou(pred_points_in_img, im_shape, gt_hull_mask)
# Invert IoU so it can be minimized
inverted_iou = 1.0 - iou
return inverted_iou
def scipy_convex_hull_mask_inv_iou_with_viewing_angle(x, args):
"""Computes masks by calculating a convex hull from points. Creates two masks (if possible),
one for the estimated foreground pixels and one for the estimated background pixels.
Minimizes inverted IoU by varying xz_dist, centroid_y, and viewing angle.
Args:
x: array of inputs
xz_dist: distance along viewing angle
centroid_y: box centroid y
viewing_angle: viewing angle
args: dict with additional data
'viewing_angle': viewing angle
'inst_points' = (N, 3) instance points
'cam_p' = (3, 4) camera projection matrix
'im_shape' = image shape [im_height, im_width]
'gt_hull_mask' = expected mask created from instance mask
Returns:
inverted_iou: 1.0 - IoU of the mask computed from the convex hull and the gt hull mask
"""
# Parse inputs from x
xz_dist = x[0]
centroid_y = x[1]
viewing_angle = x[2]
# Parse inputs from args
inst_points = args['inst_points']
cam_p = args['cam_p']
im_shape = args['im_shape']
gt_hull_mask = args['gt_hull_mask']
pred_points_in_img, valid_points_mask = instance_utils.proj_points(
xz_dist, centroid_y, viewing_angle, inst_points, cam_p)
iou = convex_hull_mask_iou(pred_points_in_img, im_shape, gt_hull_mask)
# Invert IoU so it can be minimized
inverted_iou = 1.0 - iou
return inverted_iou
|
python
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test the ExpiredDataRemover object."""
import logging
import re
from datetime import datetime
from unittest.mock import patch
from uuid import uuid4
import pytz
from dateutil import relativedelta
from api.provider.models import Provider
from masu.external.date_accessor import DateAccessor
from masu.processor.expired_data_remover import ExpiredDataRemover
from masu.processor.expired_data_remover import ExpiredDataRemoverError
from masu.test import MasuTestCase
from masu.test.database.helpers import ManifestCreationHelper
from reporting_common.models import CostUsageReportManifest
class ExpiredDataRemoverTest(MasuTestCase):
"""Test Cases for the ExpiredDataRemover object."""
def test_initializer(self):
"""Test to init."""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
self.assertEqual(remover._months_to_keep, 3)
self.assertEqual(remover._line_items_months, 1)
remover2 = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS, 2, 2)
self.assertEqual(remover2._months_to_keep, 2)
self.assertEqual(remover2._line_items_months, 2)
def test_initializer_ocp(self):
"""Test to init for OCP."""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_OCP)
self.assertEqual(remover._months_to_keep, 3)
self.assertEqual(remover._line_items_months, 1)
def test_initializer_azure(self):
"""Test to init for Azure."""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AZURE)
self.assertEqual(remover._months_to_keep, 3)
self.assertEqual(remover._line_items_months, 1)
def test_initializer_invalid_provider(self):
"""Test to init with unknown provider."""
with self.assertRaises(ExpiredDataRemoverError):
ExpiredDataRemover(self.schema, "BAD")
@patch("masu.processor.aws.aws_report_db_cleaner.AWSReportDBCleaner.__init__", side_effect=Exception)
def test_initializer_provider_exception(self, mock_aws_cleaner):
"""Test to init."""
with self.assertRaises(ExpiredDataRemoverError):
ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
def test_calculate_expiration_date(self):
"""Test that the expiration date is correctly calculated."""
date_matrix = [
{
"current_date": datetime(year=2018, month=7, day=1),
"expected_expire": datetime(year=2018, month=4, day=1, tzinfo=pytz.UTC),
"months_to_keep": None,
},
{
"current_date": datetime(year=2018, month=7, day=31),
"expected_expire": datetime(year=2018, month=4, day=1, tzinfo=pytz.UTC),
"months_to_keep": None,
},
{
"current_date": datetime(year=2018, month=3, day=20),
"expected_expire": datetime(year=2017, month=12, day=1, tzinfo=pytz.UTC),
"months_to_keep": None,
},
{
"current_date": datetime(year=2018, month=7, day=1),
"expected_expire": datetime(year=2017, month=7, day=1, tzinfo=pytz.UTC),
"months_to_keep": 12,
},
{
"current_date": datetime(year=2018, month=7, day=31),
"expected_expire": datetime(year=2017, month=7, day=1, tzinfo=pytz.UTC),
"months_to_keep": 12,
},
{
"current_date": datetime(year=2018, month=3, day=20),
"expected_expire": datetime(year=2016, month=3, day=1, tzinfo=pytz.UTC),
"months_to_keep": 24,
},
]
for test_case in date_matrix:
with patch.object(DateAccessor, "today", return_value=test_case.get("current_date")):
retention_policy = test_case.get("months_to_keep")
if retention_policy:
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS, retention_policy)
else:
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
expire_date = remover._calculate_expiration_date()
self.assertEqual(expire_date, test_case.get("expected_expire"))
def test_remove(self):
"""Test that removes the expired data based on the retention policy."""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
removed_data = remover.remove()
self.assertEqual(len(removed_data), 0)
@patch("masu.processor.expired_data_remover.AWSReportDBCleaner.purge_expired_report_data")
def test_remove_provider(self, mock_purge):
"""Test that remove is called with provider_uuid."""
provider_uuid = self.aws_provider_uuid
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
remover.remove(provider_uuid=provider_uuid)
mock_purge.assert_called_with(simulate=False, provider_uuid=provider_uuid)
@patch("masu.processor.expired_data_remover.AWSReportDBCleaner.purge_expired_line_item")
def test_remove_provider_items_only(self, mock_purge):
"""Test that remove is called with provider_uuid items only."""
provider_uuid = self.aws_provider_uuid
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
date = remover._calculate_expiration_date(line_items_only=True)
remover.remove(provider_uuid=provider_uuid, line_items_only=True)
mock_purge.assert_called_with(expired_date=date, simulate=False, provider_uuid=provider_uuid)
@patch("masu.processor.expired_data_remover.AWSReportDBCleaner.purge_expired_line_item")
def test_remove_items_only(self, mock_purge):
"""Test that remove is called with provider_uuid items only."""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
date = remover._calculate_expiration_date(line_items_only=True)
remover.remove(line_items_only=True)
mock_purge.assert_called_with(expired_date=date, simulate=False)
def test_delete_expired_cost_usage_report_manifest(self):
"""
Test that expired CostUsageReportManifests are removed.
This test inserts CostUsageReportManifest objects,
And then deletes CostUsageReportManifest objects older than
the calculated expiration_date.
"""
provider_type_dict = {
Provider.PROVIDER_AWS_LOCAL: self.aws_provider_uuid,
Provider.PROVIDER_AZURE_LOCAL: self.azure_provider_uuid,
Provider.PROVIDER_OCP: self.ocp_provider_uuid,
}
for provider_type in provider_type_dict:
remover = ExpiredDataRemover(self.schema, provider_type)
expiration_date = remover._calculate_expiration_date()
current_month = datetime.today().replace(day=1)
day_before_cutoff = expiration_date - relativedelta.relativedelta(days=1)
dates = [current_month, day_before_cutoff, expiration_date]
uuids = []
uuids_to_be_deleted = []
for date in dates:
manifest_creation_datetime = current_month
manifest_updated_datetime = manifest_creation_datetime + relativedelta.relativedelta(days=2)
uuid = uuid4()
data = {
"assembly_id": uuid,
"manifest_creation_datetime": manifest_creation_datetime,
"manifest_updated_datetime": manifest_updated_datetime,
"billing_period_start_datetime": date,
"num_total_files": 1,
"provider_id": provider_type_dict[provider_type],
}
uuids.append(uuid)
if date == day_before_cutoff:
uuids_to_be_deleted.append(uuid)
manifest_entry = CostUsageReportManifest(**data)
manifest_entry.save()
remover.remove()
for uuid in uuids:
record_count = CostUsageReportManifest.objects.filter(assembly_id=uuid).count()
if uuid in uuids_to_be_deleted:
self.assertEqual(0, record_count)
else:
self.assertEqual(1, record_count)
def test_simulate_delete_expired_cost_usage_report_manifest(self):
"""
Test that expired CostUsageReportManifest is not removed during simulation.
Test that the number of records that would have been deleted is logged.
"""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
expiration_date = remover._calculate_expiration_date()
day_before_cutoff = expiration_date - relativedelta.relativedelta(days=1)
day_before_cutoff_data = {
"assembly_id": uuid4(),
"manifest_creation_datetime": None,
"manifest_updated_datetime": None,
"billing_period_start_datetime": day_before_cutoff,
"num_total_files": 1,
"provider_id": self.aws_provider_uuid,
}
CostUsageReportManifest(**day_before_cutoff_data).save()
with self.assertLogs(logger="masu.processor.expired_data_remover", level="INFO") as cm:
logging.disable(logging.NOTSET)
remover.remove(simulate=True)
expected_log_message = "Removed CostUsageReportManifest"
# Check if the log message exists in the log output:
self.assertTrue(
any(match is not None for match in [re.search(expected_log_message, line) for line in cm.output]),
"Expected to see log message: "
+ expected_log_message
+ "in the list of log messages"
+ " but the list of log messages was instead : "
+ str(cm.output),
)
# Re-enable log suppression
logging.disable(logging.CRITICAL)
def test_remove_cost_usage_manifests_by_provider_uuid(self):
"""
Test that calling remove(provider_uuid) deletes CostUsageReportManifests.
CostUsageReportManifests that are associated with the provider_uuid
should be deleted.
"""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS_LOCAL)
expiration_date = remover._calculate_expiration_date()
current_month = datetime.today().replace(day=1)
day_before_cutoff = expiration_date - relativedelta.relativedelta(days=1)
fixture_records = [
(self.aws_provider_uuid, expiration_date), # not expired, should not delete
(self.aws_provider_uuid, day_before_cutoff), # expired, should delete
(self.azure_provider_uuid, day_before_cutoff), # expired, should not delete
]
manifest_uuids = []
manifest_uuids_to_be_deleted = []
manifest_creation_datetime = current_month
manifest_updated_datetime = manifest_creation_datetime + relativedelta.relativedelta(days=2)
for fixture_record in fixture_records:
manifest_uuid = uuid4()
data = {
"assembly_id": manifest_uuid,
"manifest_creation_datetime": manifest_creation_datetime,
"manifest_updated_datetime": manifest_updated_datetime,
"billing_period_start_datetime": fixture_record[1],
"num_total_files": 1,
"provider_id": fixture_record[0],
}
CostUsageReportManifest(**data).save()
manifest_uuids.append(manifest_uuid)
if fixture_record[1] == day_before_cutoff and fixture_record[0] == self.aws_provider_uuid:
manifest_uuids_to_be_deleted.append(manifest_uuid)
remover.remove(provider_uuid=self.aws_provider_uuid)
for manifest_uuid in manifest_uuids:
record_count = CostUsageReportManifest.objects.filter(assembly_id=manifest_uuid).count()
if manifest_uuid in manifest_uuids_to_be_deleted:
self.assertEqual(0, record_count)
else:
self.assertEqual(1, record_count)
def test_simulate_delete_expired_cost_usage_report_manifest_by_provider_uuid(self):
"""
Test simulating the deletion of expired CostUsageReportManifests.
using remove(provider_uuid)
"""
remover = ExpiredDataRemover(self.schema, Provider.PROVIDER_AWS)
expiration_date = remover._calculate_expiration_date()
day_before_cutoff = expiration_date - relativedelta.relativedelta(days=1)
manifest_id = 7766
day_before_cutoff_data = {
"id": manifest_id,
"assembly_id": uuid4(),
"manifest_creation_datetime": None,
"manifest_updated_datetime": None,
"billing_period_start_datetime": day_before_cutoff,
"num_total_files": 1,
"provider_id": self.aws_provider_uuid,
}
manifest_entry = CostUsageReportManifest(**day_before_cutoff_data)
manifest_entry.save()
manifest_helper = ManifestCreationHelper(
manifest_id, manifest_entry.num_total_files, manifest_entry.assembly_id
)
manifest_helper.generate_test_report_files()
manifest_helper.process_all_files()
count_records = CostUsageReportManifest.objects.count()
with self.assertLogs(logger="masu.processor.expired_data_remover", level="INFO") as cm:
logging.disable(logging.NOTSET)
remover.remove(simulate=True, provider_uuid=self.aws_provider_uuid)
expected_log_message = "Removed CostUsageReportManifest"
# Check if the log message exists in the log output:
self.assertTrue(
any(match is not None for match in [re.search(expected_log_message, line) for line in cm.output]),
"Expected to see log message: "
+ expected_log_message
+ "in the list of log messages"
+ " but the list of log messages was instead : "
+ str(cm.output),
)
# Re-enable log suppression
logging.disable(logging.CRITICAL)
self.assertEqual(count_records, CostUsageReportManifest.objects.count())
def test_remove_items_only_azure(self):
"""Test that remove is called with provider_uuid items only."""
azure_types = [Provider.PROVIDER_AZURE, Provider.PROVIDER_AZURE_LOCAL]
for az_type in azure_types:
remover = ExpiredDataRemover(self.schema, az_type)
result_no_provider = remover.remove(line_items_only=True)
self.assertIsNone(result_no_provider)
result_with_provider = remover.remove(line_items_only=True, provider_uuid="1234")
self.assertIsNone(result_with_provider)
|
python
|
import threading
import time
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.widgets import Slider, Button
import logging
logging.basicConfig(level=logging.DEBUG,
format='(%(threadName)-9s) %(message)s',)
class MyFigure(Figure):
def __init__(self, *args, **kwargs):
"""
custom kwarg figtitle is a figure title
"""
figtitle = kwargs.pop('figtitle', 'hi mom')
Figure.__init__(self, *args, **kwargs)
self.text(0.5, 0.95, figtitle, ha='center')
self.y = np.zeros(100, dtype=float)
self.x = range(100)
self.subplots_adjust(left=0.25, bottom=0.25)
self.lock = threading.Lock()
def init_gui(self):
self.ax_top = fig.add_subplot(111)
self.ax_top.set_ylim([0,1])
self.ax_btn_update = plt.axes([0.8, 0.025, 0.1, 0.04])
self.ax_sld_value = plt.axes([0.25, 0.1, 0.65, 0.03])
self.btn_update = Button(self.ax_btn_update, 'Update')
self.btn_update.on_clicked(self.rand)
self.sld_value = Slider(self.ax_sld_value, 'Value', 0., 1.0, valinit=0.5)
self.sld_value.on_changed(self.value)
def rand(self, event):
self.y = np.delete(self.y, 0)
self.y = np.append(self.y, [np.random.random_sample()])
self.ax_top.cla()
self.ax_top.set_ylim([0,1])
self.ax_top.plot(self.x, self.y, 'b')
self.canvas.draw_idle()
def value(self, val):
self.y = np.delete(self.y, 0)
self.y = np.append(self.y, [float(val)])
self.ax_top.cla()
self.ax_top.set_ylim([0,1])
self.ax_top.plot(self.x, self.y, 'b')
self.canvas.draw_idle()
def async_event(self ):
while True:
self.rand(0)
time.sleep(0.1)
if __name__ == '__main__':
fig = plt.figure(FigureClass=MyFigure, figtitle='my title')
fig.init_gui()
t = threading.Thread(target=fig.async_event,args=())
t.start()
plt.show()
|
python
|
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from ultra.utils.ray import default_ray_kwargs
# Set environment to better support Ray
os.environ["MKL_NUM_THREADS"] = "1"
import time
import psutil, pickle, dill
import gym, ray, torch, argparse
from smarts.zoo.registry import make
from ultra.utils.episode import episodes
from ultra.evaluate import evaluation_check
num_gpus = 1 if torch.cuda.is_available() else 0
# @ray.remote(num_gpus=num_gpus / 2, max_calls=1)
@ray.remote(num_gpus=num_gpus / 2)
def train(task, num_episodes, policy_class, eval_info, timestep_sec, headless, seed):
torch.set_num_threads(1)
total_step = 0
finished = False
# --------------------------------------------------------
# Initialize Agent and social_vehicle encoding method
# -------------------------------------------------------
AGENT_ID = "007"
spec = make(locator=policy_class)
env = gym.make(
"ultra.env:ultra-v0",
agent_specs={AGENT_ID: spec},
scenario_info=task,
headless=headless,
timestep_sec=timestep_sec,
seed=seed,
)
agent = spec.build_agent()
for episode in episodes(num_episodes, etag=policy_class):
observations = env.reset()
state = observations[AGENT_ID]
dones, infos = {"__all__": False}, None
episode.reset()
experiment_dir = episode.experiment_dir
# save entire spec [ policy_params, reward_adapter, observation_adapter]
if not os.path.exists(f"{experiment_dir}/spec.pkl"):
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir)
with open(f"{experiment_dir}/spec.pkl", "wb") as spec_output:
dill.dump(spec, spec_output, pickle.HIGHEST_PROTOCOL)
while not dones["__all__"]:
if episode.get_itr(AGENT_ID) >= 1000000: # 1M observation break
finished = True
break
evaluation_check(
agent=agent,
agent_id=AGENT_ID,
policy_class=policy_class,
episode=episode,
**eval_info,
**env.info,
)
action = agent.act(state, explore=True)
observations, rewards, dones, infos = env.step({AGENT_ID: action})
next_state = observations[AGENT_ID]
loss_output = agent.step(
state=state,
action=action,
reward=rewards[AGENT_ID],
next_state=next_state,
done=dones[AGENT_ID],
)
episode.record_step(
agent_id=AGENT_ID,
infos=infos,
rewards=rewards,
total_step=total_step,
loss_output=loss_output,
)
total_step += 1
state = next_state
episode.record_episode()
episode.record_tensorboard(agent_id=AGENT_ID)
if finished:
break
env.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser("intersection-single-agent")
parser.add_argument(
"--task", help="Tasks available : [0, 1, 2, 3]", type=str, default="1"
)
parser.add_argument(
"--level",
help="Tasks available : [easy, medium, hard, no-traffic]",
type=str,
default="easy",
)
parser.add_argument(
"--episodes", help="number of training episodes", type=int, default=1000000
)
parser.add_argument(
"--timestep", help="environment timestep (sec)", type=float, default=0.1
)
parser.add_argument(
"--headless", help="run without envision", type=bool, default=False
)
parser.add_argument(
"--eval-episodes", help="number of evaluation episodes", type=int, default=200
)
parser.add_argument(
"--eval-rate",
help="evaluation rate based on number of observations",
type=int,
default=10000,
)
parser.add_argument(
"--seed", help="environment seed", default=2, type=int,
)
args = parser.parse_args()
num_cpus = max(
1, psutil.cpu_count(logical=False) - 1
) # remove `logical=False` to use all cpus
policy_class = "ultra.baselines.sac:sac-v0"
# ray_kwargs = default_ray_kwargs(num_cpus=num_cpus, num_gpus=num_gpus)
ray.init() # **ray_kwargs)
# try:
ray.wait(
[
train.remote(
task=(args.task, args.level),
num_episodes=int(args.episodes),
eval_info={
"eval_rate": float(args.eval_rate),
"eval_episodes": int(args.eval_episodes),
},
timestep_sec=float(args.timestep),
headless=args.headless,
policy_class=policy_class,
seed=args.seed,
)
]
)
# finally:
# time.sleep(1)
# ray.shutdown()
|
python
|
#!/usr/local/bin/python3.3
def echo(message):
print(message)
return
echo('Direct Call')
x = echo
x('Indirect Call')
def indirect(func, arg):
func(arg)
indirect(echo, "Argument Call")
schedule = [(echo, 'Spam'), (echo, 'Ham')]
for (func, arg) in schedule:
func(arg)
def make(label):
def echo(message):
print(label + ': ' + message)
return echo
F = make('Spam')
F('Eggs')
F('Ham')
def func(a):
b = 'spam'
return b * a
print(func(8))
print(dir(func))
func.handles = 'Bottom-Press'
func.count = 0
print(dir(func))
def func(a: 'spam', b: (1, 10), c: float) -> int:
return a+b+c
print(func.__annotations__)
|
python
|
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
from boardfarm.devices import prompt
from boardfarm.tests import rootfs_boot
class NetperfRFC2544(rootfs_boot.RootFSBootTest):
"""Single test to simulate RFC2544."""
def runTest(self):
"""Single test to simulate RFC2544."""
board = self.dev.board
lan = self.dev.lan
for sz in ["74", "128", "256", "512", "1024", "1280", "1518"]:
print("running %s UDP test" % sz)
lan.sendline(
"netperf -H 192.168.0.1 -t UDP_STREAM -l 60 -- -m %s" % sz)
lan.expect_exact(
"netperf -H 192.168.0.1 -t UDP_STREAM -l 60 -- -m %s" % sz)
lan.expect("UDP UNIDIRECTIONAL")
lan.expect(prompt, timeout=90)
board.sendline()
board.expect(prompt)
|
python
|
import SimpleITK as sitk
import numpy as np
def reshape_by_padding_upper_coords(image, new_shape, pad_value=None):
shape = tuple(list(image.shape))
new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2,len(shape))), axis=0))
if pad_value is None:
if len(shape)==2:
pad_value = image[0,0]
elif len(shape)==3:
pad_value = image[0, 0, 0]
else:
raise ValueError("Image must be either 2 or 3 dimensional")
res = np.ones(list(new_shape), dtype=image.dtype) * pad_value
if len(shape) == 2:
res[0:0+int(shape[0]), 0:0+int(shape[1])] = image
elif len(shape) == 3:
res[0:0+int(shape[0]), 0:0+int(shape[1]), 0:0+int(shape[2])] = image
return res
def random_crop_3D_image_batched(img, crop_size):
if type(crop_size) not in (tuple, list):
crop_size = [crop_size] * (len(img.shape) - 2)
else:
assert len(crop_size) == (len(img.shape) - 2), "If you provide a list/tuple as center crop make sure it has the same len as your data has dims (3d)"
if crop_size[0] < img.shape[2]:
lb_x = np.random.randint(0, img.shape[2] - crop_size[0])
elif crop_size[0] == img.shape[2]:
lb_x = 0
else:
raise ValueError("crop_size[0] must be smaller or equal to the images x dimension")
if crop_size[1] < img.shape[3]:
lb_y = np.random.randint(0, img.shape[3] - crop_size[1])
elif crop_size[1] == img.shape[3]:
lb_y = 0
else:
raise ValueError("crop_size[1] must be smaller or equal to the images y dimension")
if crop_size[2] < img.shape[4]:
lb_z = np.random.randint(0, img.shape[4] - crop_size[2])
elif crop_size[2] == img.shape[4]:
lb_z = 0
else:
raise ValueError("crop_size[2] must be smaller or equal to the images z dimension")
return img[:, :, lb_x:lb_x + crop_size[0], lb_y:lb_y + crop_size[1], lb_z:lb_z + crop_size[2]]
|
python
|
#
# PySNMP MIB module SUN-SNMP-NETRA-CT-RSC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SUN-SNMP-NETRA-CT-RSC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:12:10 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, ModuleIdentity, NotificationType, enterprises, iso, TimeTicks, MibIdentifier, Gauge32, IpAddress, Bits, Integer32, ObjectIdentity, Unsigned32, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ModuleIdentity", "NotificationType", "enterprises", "iso", "TimeTicks", "MibIdentifier", "Gauge32", "IpAddress", "Bits", "Integer32", "ObjectIdentity", "Unsigned32", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
DisplayString, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "MacAddress", "TextualConvention")
netraCtRscMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 42, 2, 65, 2))
netraCtRscMIB.setRevisions(('1900-04-18 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: netraCtRscMIB.setRevisionsDescriptions(('First version of MIB module SUN-SNMP-NETRA-CT-RSC-MIB.',))
if mibBuilder.loadTexts: netraCtRscMIB.setLastUpdated('0004181200Z')
if mibBuilder.loadTexts: netraCtRscMIB.setOrganization('')
if mibBuilder.loadTexts: netraCtRscMIB.setContactInfo('')
if mibBuilder.loadTexts: netraCtRscMIB.setDescription('The MIB module for the Netra ct 400/800 Remote System Control Products')
netraCtRscObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1))
netraCtRscEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 2))
netraCtRscExpmnt = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 3))
netraCtRscAdminObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 1))
netraCtRscConfigObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2))
netraCtRscSerial2Objs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3))
netraCtRscModemObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 4))
netraCtRscEnetObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5))
netraCtRscEnvObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6))
netraCtRscLogObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7))
netraCtRscRccConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 3, 1))
netraCtRscTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 2, 0))
class DateAndTime(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(11, 11)
fixedLength = 11
netraCtRscAdminRscReset = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("set", 1), ("clear", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscAdminRscReset.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAdminRscReset.setDescription('Setting this will soft Reset only the RSC (Remote System Controller) card. The clear(2) setting is read-only for this variable. This variable will always read as clear(2).')
netraCtRscAdminHostReset = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("set", 1), ("clear", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscAdminHostReset.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAdminHostReset.setDescription('Setting netraCtRscAdminHostReset will cause either send a break to the host, or will toggle a hard reset line. A break will be sent if netraCtRscPanicDump conatins the value of on(1).Otherwise, a hard reset will occur. The clear(2) setting is read-only for this variable. This variable will always reads as clear(2).')
netraCtRscAdminXir = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("set", 1), ("clear", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscAdminXir.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAdminXir.setDescription('Sends a hardware xir pulse to the host when set to true. This variable resets itself to clear(2) after the negation of the pulse.')
netraCtRscAdminNmi = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("set", 1), ("clear", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscAdminNmi.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAdminNmi.setDescription('Sends a hardware nmi pulse to the host when set to true. This variable resets itself to clear(2) after the negation of the pulse.')
netraCtRscAdminBreak = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("set", 1), ("clear", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscAdminBreak.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAdminBreak.setDescription('Sends a break to the Host when this is set. The clear(2) setting is read-only for this variable. This variable will always read as clear(2).')
netraCtRscGlobalPageFlag = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscGlobalPageFlag.setStatus('current')
if mibBuilder.loadTexts: netraCtRscGlobalPageFlag.setDescription('An on(1) to this variable will enable paging for RSC alerts. An off(2) will disable paging.')
netraCtRscGlobalEmailFlag = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscGlobalEmailFlag.setStatus('current')
if mibBuilder.loadTexts: netraCtRscGlobalEmailFlag.setDescription('An on(1) to this variable will enable email for RSC alerts. An off(2) will disable email.')
netraCtRscGlobalIPModeFlag = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("disabled", 1), ("config", 2), ("dhcp", 3), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscGlobalIPModeFlag.setStatus('current')
if mibBuilder.loadTexts: netraCtRscGlobalIPModeFlag.setDescription('IP mode global flag')
netraCtRscGlobalPPPFlag = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscGlobalPPPFlag.setStatus('current')
if mibBuilder.loadTexts: netraCtRscGlobalPPPFlag.setDescription('serial configuration flag indicating whether PPP should be the default.')
netraCtRscHostname = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscHostname.setStatus('current')
if mibBuilder.loadTexts: netraCtRscHostname.setDescription('name of Host connected to RSC')
netraCtRscCustomerInfo = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscCustomerInfo.setStatus('current')
if mibBuilder.loadTexts: netraCtRscCustomerInfo.setDescription('Customer information used in the message generated for a pager or email alert')
netraCtRscVersionBootMajor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionBootMajor.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionBootMajor.setDescription('RSC Firmware BootMonitor Revision Major Number')
netraCtRscVersionBootMinor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionBootMinor.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionBootMinor.setDescription('RSC Firmware BootMonitor Revision Minor Number')
netraCtRscVersionBootMicro = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionBootMicro.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionBootMicro.setDescription('RSC Firmware BootMonitor Revision Micro Number')
netraCtRscVersionMainMajor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionMainMajor.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionMainMajor.setDescription('RSC Core RSC Revision Major Number')
netraCtRscVersionMainMinor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionMainMinor.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionMainMinor.setDescription('RSC Core RSC Revision Minor Number')
netraCtRscVersionMainMicro = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionMainMicro.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionMainMicro.setDescription('RSC Core RSC Revision Micro Number')
netraCtRscVersionFirmwareMajor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionFirmwareMajor.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionFirmwareMajor.setDescription('RSC Core Firmware Revision Major Number')
netraCtRscVersionFirmwareMinor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionFirmwareMinor.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionFirmwareMinor.setDescription('RSC Core Firmware Revision Minor Number')
netraCtRscVersionFirmwareMicro = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscVersionFirmwareMicro.setStatus('current')
if mibBuilder.loadTexts: netraCtRscVersionFirmwareMicro.setDescription('RSC Core Firmware Revision Micro Number')
netraCtRscTOD = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 16), DateAndTime()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscTOD.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTOD.setDescription('RSC time of day')
netraCtRscEscape = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 17), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscEscape.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEscape.setDescription('set the first character of the 2-character escape sequence')
netraCtRscHostWatchDogReboot = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscHostWatchDogReboot.setStatus('current')
if mibBuilder.loadTexts: netraCtRscHostWatchDogReboot.setDescription('RSC monitors a heartbeat from the Solaris host. If this heartbeat is late, then a message will be logged, and an alarm will be set. If netraCtRscHostWatchDogReboot is on, then the solaris host will also be rebooted.')
netraCtRscHostWatchDogTimeout = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscHostWatchDogTimeout.setStatus('current')
if mibBuilder.loadTexts: netraCtRscHostWatchDogTimeout.setDescription('RSC monitors a heartbeat from the Solaris host. This variable indicates the maximum tolerable number of seconds between heartbeats, before RSC will set alarm0. A setting of 0 indicates that the heartbeat should not be monitored.')
netraCtRscPanicDump = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 2, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscPanicDump.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPanicDump.setDescription('If set to true when netraCtRscAdminHostReset is set, then a break will be sent to the host, causing a core dump to be saved on the host. Otherwise, the setting of netraCtRscAdminHostReset will cause a hardware reset.')
netraCtRscSerial2Mode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 2147483647))).clone(namedValues=NamedValues(("rcc", 1), ("modem", 2), ("tty", 3), ("disabled", 4), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2Mode.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2Mode.setDescription('serial port 2 configuration mode.')
netraCtRscSerial2Parity = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("none", 1), ("odd", 2), ("even", 3), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2Parity.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2Parity.setDescription('serial port 2 parity mode.')
netraCtRscSerial2Stop = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("one", 1), ("two", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2Stop.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2Stop.setDescription('serial port 2 stop bits.')
netraCtRscSerial2Data = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("seven", 1), ("eight", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2Data.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2Data.setDescription('serial port 2 data bits.')
netraCtRscSerial2Baud = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 2147483647))).clone(namedValues=NamedValues(("b300", 1), ("b1200", 2), ("b1800", 3), ("b2400", 4), ("b4800", 5), ("b9600", 6), ("b19200", 7), ("b38400", 8), ("b57600", 9), ("b115200", 10), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2Baud.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2Baud.setDescription('serial port 2 baud rate.')
netraCtRscSerial2HwFlowcontrol = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2HwFlowcontrol.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2HwFlowcontrol.setDescription('serial port 2 Hardware Flowcontrol.')
netraCtRscSerial2Inactivity = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2Inactivity.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2Inactivity.setDescription('serial port 2 inactivity.')
netraCtRscSerial2PagerOneConfig = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneConfig.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneConfig.setDescription('primary pager number for RSC.')
netraCtRscSerial2PagerTwoConfig = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoConfig.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoConfig.setDescription('secondary pager number for RSC')
netraCtRscSerial2PagerOneBaud = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 2147483647))).clone(namedValues=NamedValues(("b300", 1), ("b1200", 2), ("b2400", 3), ("b4800", 4), ("b9600", 5), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneBaud.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneBaud.setDescription('primary pager baud rate.')
netraCtRscSerial2PagerTwoBaud = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 2147483647))).clone(namedValues=NamedValues(("b300", 1), ("b1200", 2), ("b2400", 3), ("b4800", 4), ("b9600", 5), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoBaud.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoBaud.setDescription('secondary pager baud rate.')
netraCtRscSerial2PagerOneParity = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("none", 1), ("odd", 2), ("even", 3), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneParity.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneParity.setDescription('primary pager parity mode.')
netraCtRscSerial2PagerTwoParity = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("none", 1), ("odd", 2), ("even", 3), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoParity.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoParity.setDescription('secondary pager parity mode.')
netraCtRscSerial2PagerOneStop = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("one", 1), ("two", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneStop.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneStop.setDescription('primary pager stop bits.')
netraCtRscSerial2PagerTwoStop = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("one", 1), ("two", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoStop.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoStop.setDescription('secondary pager stop bits.')
netraCtRscSerial2PagerOneData = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("seven", 1), ("eight", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneData.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneData.setDescription('primary pager data bits.')
netraCtRscSerial2PagerTwoData = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("seven", 1), ("eight", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoData.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoData.setDescription('secondary pager data bits.')
netraCtRscSerial2PagerOneInit = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 18), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneInit.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOneInit.setDescription('primary pager modem init string.')
netraCtRscSerial2PagerTwoInit = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 19), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 30))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoInit.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoInit.setDescription('secondary pager modem init string.')
netraCtRscSerial2PagerOnePassword = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 20), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerOnePassword.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerOnePassword.setDescription('primary pager TAP password.')
netraCtRscSerial2PagerTwoPassword = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 3, 21), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoPassword.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSerial2PagerTwoPassword.setDescription('secondary pager TAP password.')
netraCtRscModemParity = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 4, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("none", 1), ("odd", 2), ("even", 3), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscModemParity.setStatus('current')
if mibBuilder.loadTexts: netraCtRscModemParity.setDescription('modem parity mode.')
netraCtRscModemStop = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 4, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("one", 1), ("two", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscModemStop.setStatus('current')
if mibBuilder.loadTexts: netraCtRscModemStop.setDescription('modem stop bits.')
netraCtRscModemData = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 4, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("seven", 1), ("eight", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscModemData.setStatus('current')
if mibBuilder.loadTexts: netraCtRscModemData.setDescription('modem data bits.')
netraCtRscCountryCode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscCountryCode.setStatus('current')
if mibBuilder.loadTexts: netraCtRscCountryCode.setDescription('country specified using CCITT internaional dial-plan number.')
netraCtRscModemModel = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 4, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscModemModel.setStatus('current')
if mibBuilder.loadTexts: netraCtRscModemModel.setDescription('the modem type used on the netraCtRsc card.')
netraCtRscMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscMacAddress.setStatus('current')
if mibBuilder.loadTexts: netraCtRscMacAddress.setDescription('Ethernet address for RSC.')
netraCtRscEnetTpeLinkTest = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscEnetTpeLinkTest.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEnetTpeLinkTest.setDescription("Determines whether tpe-link-test should be set of clear. This should be off when interfacing with older-style hubs that don't support heartbeat.")
netraCtRscIPAddress = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscIPAddress.setStatus('current')
if mibBuilder.loadTexts: netraCtRscIPAddress.setDescription('configured IP address for RSC.')
netraCtRscIpMask = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscIpMask.setStatus('current')
if mibBuilder.loadTexts: netraCtRscIpMask.setDescription('configured IP netmask for RSC.')
netraCtRscIpGateway = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscIpGateway.setStatus('current')
if mibBuilder.loadTexts: netraCtRscIpGateway.setDescription('configured IP gateway for RSC.')
netraCtRscSNMPHostAddress = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscSNMPHostAddress.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSNMPHostAddress.setDescription('configured SNMP server for RSC alerts.')
netraCtRscMailHostAddress = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscMailHostAddress.setStatus('current')
if mibBuilder.loadTexts: netraCtRscMailHostAddress.setDescription('Address of Mail Server.')
netraCtRscMailUser = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscMailUser.setStatus('current')
if mibBuilder.loadTexts: netraCtRscMailUser.setDescription('email address for RSC alerts.')
netraCtRscPPPLocalIP = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscPPPLocalIP.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPPPLocalIP.setDescription('PPP local IP address.')
netraCtRscPPPRemoteIP = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscPPPRemoteIP.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPPPRemoteIP.setDescription('PPP remote IP address.')
netraCtRscMailHostAddressBackup = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 5, 11), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscMailHostAddressBackup.setStatus('current')
if mibBuilder.loadTexts: netraCtRscMailHostAddressBackup.setDescription(' Backup Address of Mail Server. when no IP address is configured for mail server, back up IP address becomes first IP address of mailhost ')
netraCtRscSystemType = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscSystemType.setStatus('current')
if mibBuilder.loadTexts: netraCtRscSystemType.setDescription('System Type. This will currently return one of the following 2 strings: NetraCt400 , or NetraCt800.')
netraCtRscPowerSupplyCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscPowerSupplyCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyCount.setDescription('The total number of Power Supply FRUs in the platform.')
netraCtRscPowerSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 3), )
if mibBuilder.loadTexts: netraCtRscPowerSupplyTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyTable.setDescription('A table listing the characteristics of the Power-Supply FRU.')
netraCtRscPowerSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 3, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscPowerSupplyIndex"))
if mibBuilder.loadTexts: netraCtRscPowerSupplyEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyEntry.setDescription('an entry (conceptual row) in the netraCtRscPowerSupplyTable')
netraCtRscPowerSupplyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscPowerSupplyIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyIndex.setDescription('Entry number for this Power-Supply FRU row.')
netraCtRscPowerSupplyPresent = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("true", 1), ("false", 2), ("unknown", 3), ("notimpl", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscPowerSupplyPresent.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyPresent.setDescription('Power-Supply FRU is present.')
netraCtRscPowerSupplyOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 2147483647))).clone(namedValues=NamedValues(("okay", 1), ("failed", 2), ("unknown", 3), ("offline", 4), ("notimpl", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscPowerSupplyOperState.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyOperState.setDescription('The current status of the Power-Supply FRU.')
netraCtRscPowerSupplyAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscPowerSupplyAdminState.setStatus('current')
if mibBuilder.loadTexts: netraCtRscPowerSupplyAdminState.setDescription('Setting this value to on(1) requests that the power supply be turned on. Setting this value to off(2) requests that the power supply be turned off.')
netraCtRscAlarmCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscAlarmCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmCount.setDescription('Number of Alarm Ports in this Platform')
netraCtRscAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5), )
if mibBuilder.loadTexts: netraCtRscAlarmTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmTable.setDescription('a table listing the available Alarm Ports.')
netraCtRscAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscAlarmIndex"))
if mibBuilder.loadTexts: netraCtRscAlarmEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmEntry.setDescription('an entry (conceptual row) in the netraCtRscAlarmTable.')
netraCtRscAlarmIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64)))
if mibBuilder.loadTexts: netraCtRscAlarmIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmIndex.setDescription('row index into Alarm Port table')
netraCtRscAlarmID = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscAlarmID.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmID.setDescription('Alarm identifier. The current Netra ct 400/800 Alarm Cards currently supports Alarm IDs of 0,1,2, and 3.')
netraCtRscAlarmOperState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscAlarmOperState.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmOperState.setDescription('This returns the current value of the Alarm, which may have been set by either software due to environmental conditions, such as Fan failure, or manually, by the setting of netraCtRscAlarmAdminState.')
netraCtRscAlarmAdminState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscAlarmAdminState.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmAdminState.setDescription('This reflects the requested state of an alarm port by the SNMP manager.')
netraCtRscAlarmPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscAlarmPrefix.setStatus('current')
if mibBuilder.loadTexts: netraCtRscAlarmPrefix.setDescription('The Alarm Prefix that will associate an FRU type to an alarm. Currently supported definitions are: cpuunit(1), software(2), powersupply(3), temperature(4), fan(5). 6 through 9 are reserved for future expansion. 10 through 255 are undefined, and can be configured for user-defined messages and alarming.')
netraCtRscFanCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscFanCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscFanCount.setDescription('Maximum number of Fan Tray FRUs in Platform')
netraCtRscFanTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 7), )
if mibBuilder.loadTexts: netraCtRscFanTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscFanTable.setDescription('a table listing the characteristics of the Fan Tray FRU.')
netraCtRscFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 7, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscFanIndex"))
if mibBuilder.loadTexts: netraCtRscFanEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscFanEntry.setDescription('an entry (conceptual row) in the netraCtRscFanTable.')
netraCtRscFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscFanIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscFanIndex.setDescription('row index into Fan FRU table')
netraCtRscFanPresent = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("true", 1), ("false", 2), ("unknown", 3), ("notimpl", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscFanPresent.setStatus('current')
if mibBuilder.loadTexts: netraCtRscFanPresent.setDescription('Fan FRU is present')
netraCtRscFanStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 2147483647))).clone(namedValues=NamedValues(("okay", 1), ("failed", 2), ("unknown", 3), ("notimpl", 2147483647)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscFanStatus.setStatus('current')
if mibBuilder.loadTexts: netraCtRscFanStatus.setDescription('The current status of the Fan FRU. unknown will be returned if the status is unknown.')
netraCtRscTemperatureCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureCount.setDescription('Maximum number of temperature sensors on Platform')
netraCtRscTemperatureTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9), )
if mibBuilder.loadTexts: netraCtRscTemperatureTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureTable.setDescription('a table listing the contents of the event log as an array of strings.')
netraCtRscTemperatureEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscTemperatureIndex"))
if mibBuilder.loadTexts: netraCtRscTemperatureEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureEntry.setDescription('an entry (conceptual row) in the netraCtRscTemperatureTable.')
netraCtRscTemperatureIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureIndex.setDescription('current row of the Temperature sensor')
netraCtRscTemperatureValid = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("true", 1), ("false", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureValid.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureValid.setDescription('The current temperature at this sensor is known. If the temperature cannot be obtained, then false will be returned, and netraCtRscTemperatureValue should not be considered to be valid.')
netraCtRscTemperatureValue = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureValue.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureValue.setDescription('current temperature at this sensor')
netraCtRscTemperatureLowWarn = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureLowWarn.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureLowWarn.setDescription('low warning threshold for this temperature sensor')
netraCtRscTemperatureHighWarn = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureHighWarn.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureHighWarn.setDescription('high warning threshold for this temperature sensor')
netraCtRscTemperatureDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 6, 9, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscTemperatureDesc.setStatus('current')
if mibBuilder.loadTexts: netraCtRscTemperatureDesc.setDescription('textual description of the Temperature sensor')
netraCtRscEventLogCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscEventLogCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEventLogCount.setDescription('current number of lines in the event log')
netraCtRscEventLogTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 2), )
if mibBuilder.loadTexts: netraCtRscEventLogTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEventLogTable.setDescription('a table listing the contents of the event log as an array of strings.')
netraCtRscEventLogEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 2, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscEventLogIndex"))
if mibBuilder.loadTexts: netraCtRscEventLogEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEventLogEntry.setDescription('an entry (conceptual row) in the netraCtRscEventLogTable.')
netraCtRscEventLogIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscEventLogIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEventLogIndex.setDescription('The index of the entry. The oldest entry will start at index 0. The most recent entry will be found at the index equal to netraCtRscEventLogCount - 1.')
netraCtRscEventLogTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 2, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscEventLogTimeStamp.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEventLogTimeStamp.setDescription('event timestamp')
netraCtRscEventLogMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscEventLogMessage.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEventLogMessage.setDescription('textual description of an event.')
netraCtRscOrigConsoleLogCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogCount.setDescription('current number of lines in the original console log.')
netraCtRscOrigConsoleLogTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 4), )
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogTable.setDescription('a table listing the contents of the original console log as an array of strings.')
netraCtRscOrigConsoleLogEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 4, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscOrigConsoleLogIndex"))
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogEntry.setDescription('an entry (conceptual row) in the netraCtRscOrigConsoleLogTable.')
netraCtRscOrigConsoleLogIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogIndex.setDescription('The index of the entry. The oldest entry will start at index 0. The most recent entry will be found at the index equal to netraCtRscOrigConsoleLogCount - 1.')
netraCtRscOrigConsoleLogTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 4, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogTimeStamp.setStatus('current')
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogTimeStamp.setDescription('Original Console Log timestamp')
netraCtRscOrigConsoleLogMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 4, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogMessage.setStatus('current')
if mibBuilder.loadTexts: netraCtRscOrigConsoleLogMessage.setDescription('textual description of an event.')
netraCtRscConsoleLogCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscConsoleLogCount.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleLogCount.setDescription('current number of lines in the console log.')
netraCtRscConsoleLogTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 6), )
if mibBuilder.loadTexts: netraCtRscConsoleLogTable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleLogTable.setDescription('a table listing the contents of the console log as an array of strings.')
netraCtRscConsoleLogEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 6, 1), ).setIndexNames((0, "SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscConsoleLogIndex"))
if mibBuilder.loadTexts: netraCtRscConsoleLogEntry.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleLogEntry.setDescription('an entry (conceptual row) in the netraCtRscConsoleLogTable.')
netraCtRscConsoleLogIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscConsoleLogIndex.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleLogIndex.setDescription('The index of the entry. The oldest entry will start at index 0. The most recent entry will be found at the index equal to netraCtRscConsoleLogCount - 1.')
netraCtRscConsoleLogTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 6, 1, 2), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscConsoleLogTimeStamp.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleLogTimeStamp.setDescription('Console Log timestamp')
netraCtRscConsoleLogMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 6, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: netraCtRscConsoleLogMessage.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleLogMessage.setDescription('textual description of an event.')
netraCtRscConsoleReset = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 1, 7, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("set", 1), ("clear", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscConsoleReset.setStatus('current')
if mibBuilder.loadTexts: netraCtRscConsoleReset.setDescription('When set, the console log is reset so that the current log is copied over to the original console log, and the the console log will be cleared. All new console messages will then go to the console log.clear(2) setting is read-only.')
netraCtRscRCCPowerOnEnable = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscRCCPowerOnEnable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscRCCPowerOnEnable.setDescription('If set to false, masks the RCC PowerOn command.')
netraCtRscRCCPowerOffEnable = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscRCCPowerOffEnable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscRCCPowerOffEnable.setDescription('If set to false, masks the RCC PowerOff command.')
netraCtRscRCCResetEnable = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 2147483647))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("notimpl", 2147483647)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscRCCResetEnable.setStatus('current')
if mibBuilder.loadTexts: netraCtRscRCCResetEnable.setDescription('If set to false, masks the RCC Reset command.')
netraCtRscRCCLinkNum = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: netraCtRscRCCLinkNum.setStatus('current')
if mibBuilder.loadTexts: netraCtRscRCCLinkNum.setDescription('2-byte string that defines the RCC Linknum address.')
netraCtRscEvent = NotificationType((1, 3, 6, 1, 4, 1, 42, 2, 65, 2, 2, 0, 1)).setObjects(("SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscAlarmID"), ("SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscAlarmOperState"), ("SUN-SNMP-NETRA-CT-RSC-MIB", "netraCtRscAlarmPrefix"))
if mibBuilder.loadTexts: netraCtRscEvent.setStatus('current')
if mibBuilder.loadTexts: netraCtRscEvent.setDescription('Event used to notify the snmp manager of a new RSC event. An event is generated when one of the values: netraCtRscAlarmOperState, in the Table:netraCtRscAlarmTable changes state.')
mibBuilder.exportSymbols("SUN-SNMP-NETRA-CT-RSC-MIB", netraCtRscExpmnt=netraCtRscExpmnt, netraCtRscMailUser=netraCtRscMailUser, netraCtRscSerial2PagerOneConfig=netraCtRscSerial2PagerOneConfig, netraCtRscOrigConsoleLogTimeStamp=netraCtRscOrigConsoleLogTimeStamp, netraCtRscVersionFirmwareMicro=netraCtRscVersionFirmwareMicro, netraCtRscEventLogTimeStamp=netraCtRscEventLogTimeStamp, netraCtRscPanicDump=netraCtRscPanicDump, netraCtRscMailHostAddressBackup=netraCtRscMailHostAddressBackup, netraCtRscOrigConsoleLogIndex=netraCtRscOrigConsoleLogIndex, netraCtRscCountryCode=netraCtRscCountryCode, netraCtRscRccConfig=netraCtRscRccConfig, netraCtRscSerial2PagerTwoBaud=netraCtRscSerial2PagerTwoBaud, netraCtRscPowerSupplyAdminState=netraCtRscPowerSupplyAdminState, netraCtRscIPAddress=netraCtRscIPAddress, netraCtRscEventLogTable=netraCtRscEventLogTable, netraCtRscSNMPHostAddress=netraCtRscSNMPHostAddress, netraCtRscPowerSupplyOperState=netraCtRscPowerSupplyOperState, netraCtRscEventLogEntry=netraCtRscEventLogEntry, netraCtRscCustomerInfo=netraCtRscCustomerInfo, netraCtRscFanTable=netraCtRscFanTable, netraCtRscEvent=netraCtRscEvent, netraCtRscGlobalPPPFlag=netraCtRscGlobalPPPFlag, netraCtRscConfigObjs=netraCtRscConfigObjs, netraCtRscTemperatureIndex=netraCtRscTemperatureIndex, netraCtRscObjs=netraCtRscObjs, netraCtRscAlarmID=netraCtRscAlarmID, netraCtRscEnetTpeLinkTest=netraCtRscEnetTpeLinkTest, DateAndTime=DateAndTime, netraCtRscVersionFirmwareMinor=netraCtRscVersionFirmwareMinor, netraCtRscSystemType=netraCtRscSystemType, netraCtRscAlarmOperState=netraCtRscAlarmOperState, netraCtRscAdminXir=netraCtRscAdminXir, netraCtRscConsoleLogCount=netraCtRscConsoleLogCount, netraCtRscPowerSupplyEntry=netraCtRscPowerSupplyEntry, netraCtRscSerial2Data=netraCtRscSerial2Data, netraCtRscOrigConsoleLogEntry=netraCtRscOrigConsoleLogEntry, netraCtRscHostname=netraCtRscHostname, netraCtRscEnvObjs=netraCtRscEnvObjs, netraCtRscSerial2PagerOneStop=netraCtRscSerial2PagerOneStop, netraCtRscVersionMainMinor=netraCtRscVersionMainMinor, netraCtRscSerial2Parity=netraCtRscSerial2Parity, netraCtRscSerial2PagerTwoPassword=netraCtRscSerial2PagerTwoPassword, netraCtRscSerial2Objs=netraCtRscSerial2Objs, netraCtRscVersionMainMicro=netraCtRscVersionMainMicro, netraCtRscConsoleLogTable=netraCtRscConsoleLogTable, netraCtRscSerial2PagerOneInit=netraCtRscSerial2PagerOneInit, netraCtRscVersionFirmwareMajor=netraCtRscVersionFirmwareMajor, netraCtRscEnetObjs=netraCtRscEnetObjs, netraCtRscSerial2PagerTwoInit=netraCtRscSerial2PagerTwoInit, netraCtRscAlarmTable=netraCtRscAlarmTable, netraCtRscOrigConsoleLogTable=netraCtRscOrigConsoleLogTable, netraCtRscVersionBootMinor=netraCtRscVersionBootMinor, netraCtRscVersionBootMajor=netraCtRscVersionBootMajor, netraCtRscSerial2PagerOneParity=netraCtRscSerial2PagerOneParity, netraCtRscAdminBreak=netraCtRscAdminBreak, netraCtRscSerial2Stop=netraCtRscSerial2Stop, netraCtRscFanPresent=netraCtRscFanPresent, netraCtRscIpGateway=netraCtRscIpGateway, netraCtRscTrapPrefix=netraCtRscTrapPrefix, netraCtRscVersionBootMicro=netraCtRscVersionBootMicro, netraCtRscTemperatureEntry=netraCtRscTemperatureEntry, netraCtRscSerial2Mode=netraCtRscSerial2Mode, netraCtRscMacAddress=netraCtRscMacAddress, netraCtRscFanIndex=netraCtRscFanIndex, netraCtRscEscape=netraCtRscEscape, netraCtRscFanEntry=netraCtRscFanEntry, netraCtRscAlarmIndex=netraCtRscAlarmIndex, netraCtRscAlarmAdminState=netraCtRscAlarmAdminState, netraCtRscPPPRemoteIP=netraCtRscPPPRemoteIP, netraCtRscPowerSupplyCount=netraCtRscPowerSupplyCount, netraCtRscTemperatureValue=netraCtRscTemperatureValue, netraCtRscAdminObjs=netraCtRscAdminObjs, netraCtRscGlobalEmailFlag=netraCtRscGlobalEmailFlag, netraCtRscPowerSupplyTable=netraCtRscPowerSupplyTable, netraCtRscConsoleLogMessage=netraCtRscConsoleLogMessage, netraCtRscConsoleReset=netraCtRscConsoleReset, netraCtRscTemperatureDesc=netraCtRscTemperatureDesc, netraCtRscSerial2Baud=netraCtRscSerial2Baud, netraCtRscTemperatureValid=netraCtRscTemperatureValid, netraCtRscSerial2HwFlowcontrol=netraCtRscSerial2HwFlowcontrol, netraCtRscModemStop=netraCtRscModemStop, PYSNMP_MODULE_ID=netraCtRscMIB, netraCtRscRCCResetEnable=netraCtRscRCCResetEnable, netraCtRscTemperatureTable=netraCtRscTemperatureTable, netraCtRscConsoleLogIndex=netraCtRscConsoleLogIndex, netraCtRscLogObjs=netraCtRscLogObjs, netraCtRscRCCLinkNum=netraCtRscRCCLinkNum, netraCtRscPowerSupplyPresent=netraCtRscPowerSupplyPresent, netraCtRscOrigConsoleLogMessage=netraCtRscOrigConsoleLogMessage, netraCtRscSerial2Inactivity=netraCtRscSerial2Inactivity, netraCtRscOrigConsoleLogCount=netraCtRscOrigConsoleLogCount, netraCtRscSerial2PagerOneData=netraCtRscSerial2PagerOneData, netraCtRscPPPLocalIP=netraCtRscPPPLocalIP, netraCtRscTOD=netraCtRscTOD, netraCtRscConsoleLogEntry=netraCtRscConsoleLogEntry, netraCtRscTemperatureHighWarn=netraCtRscTemperatureHighWarn, netraCtRscModemModel=netraCtRscModemModel, netraCtRscConsoleLogTimeStamp=netraCtRscConsoleLogTimeStamp, netraCtRscEventLogCount=netraCtRscEventLogCount, netraCtRscSerial2PagerOneBaud=netraCtRscSerial2PagerOneBaud, netraCtRscAlarmPrefix=netraCtRscAlarmPrefix, netraCtRscSerial2PagerTwoConfig=netraCtRscSerial2PagerTwoConfig, netraCtRscMailHostAddress=netraCtRscMailHostAddress, netraCtRscHostWatchDogTimeout=netraCtRscHostWatchDogTimeout, netraCtRscTemperatureCount=netraCtRscTemperatureCount, netraCtRscModemData=netraCtRscModemData, netraCtRscSerial2PagerTwoData=netraCtRscSerial2PagerTwoData, netraCtRscAdminNmi=netraCtRscAdminNmi, netraCtRscAlarmEntry=netraCtRscAlarmEntry, netraCtRscSerial2PagerTwoParity=netraCtRscSerial2PagerTwoParity, netraCtRscEvents=netraCtRscEvents, netraCtRscAdminRscReset=netraCtRscAdminRscReset, netraCtRscFanCount=netraCtRscFanCount, netraCtRscPowerSupplyIndex=netraCtRscPowerSupplyIndex, netraCtRscFanStatus=netraCtRscFanStatus, netraCtRscTemperatureLowWarn=netraCtRscTemperatureLowWarn, netraCtRscAlarmCount=netraCtRscAlarmCount, netraCtRscIpMask=netraCtRscIpMask, netraCtRscGlobalPageFlag=netraCtRscGlobalPageFlag, netraCtRscRCCPowerOnEnable=netraCtRscRCCPowerOnEnable, netraCtRscMIB=netraCtRscMIB, netraCtRscHostWatchDogReboot=netraCtRscHostWatchDogReboot, netraCtRscGlobalIPModeFlag=netraCtRscGlobalIPModeFlag, netraCtRscModemParity=netraCtRscModemParity, netraCtRscRCCPowerOffEnable=netraCtRscRCCPowerOffEnable, netraCtRscModemObjs=netraCtRscModemObjs, netraCtRscEventLogMessage=netraCtRscEventLogMessage, netraCtRscVersionMainMajor=netraCtRscVersionMainMajor, netraCtRscSerial2PagerOnePassword=netraCtRscSerial2PagerOnePassword, netraCtRscAdminHostReset=netraCtRscAdminHostReset, netraCtRscSerial2PagerTwoStop=netraCtRscSerial2PagerTwoStop, netraCtRscEventLogIndex=netraCtRscEventLogIndex)
|
python
|
from django.core.management.base import BaseCommand
from django.conf import settings
from ..utilities.modelwriter import *
class Command(BaseCommand):
help = 'Add a new model to an app.'
def add_arguments(self, parser):
parser.add_argument(
'app_name',
action='store',
help='App name',
)
def handle(self, *args, **options):
context={
'app_name': options['app_name'],
}
ModelsFile().write(context)
|
python
|
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Recursive (Nested) Snippets {{{#
class RecTabStops_SimpleCase_ExpectCorrectResult(_VimTest):
snippets = ('m', '[ ${1:first} ${2:sec} ]')
keys = 'm' + EX + 'm' + EX + 'hello' + \
JF + 'world' + JF + 'ups' + JF + 'end'
wanted = '[ [ hello world ]ups end ]'
class RecTabStops_SimpleCaseLeaveSecondSecond_ExpectCorrectResult(_VimTest):
snippets = ('m', '[ ${1:first} ${2:sec} ]')
keys = 'm' + EX + 'm' + EX + 'hello' + JF + 'world' + JF + JF + JF + 'end'
wanted = '[ [ hello world ] sec ]end'
class RecTabStops_SimpleCaseLeaveFirstSecond_ExpectCorrectResult(_VimTest):
snippets = ('m', '[ ${1:first} ${2:sec} ]')
keys = 'm' + EX + 'm' + EX + 'hello' + JF + JF + JF + 'world' + JF + 'end'
wanted = '[ [ hello sec ] world ]end'
class RecTabStops_InnerWOTabStop_ECR(_VimTest):
snippets = (
('m1', 'Just some Text'),
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm1' + EX + 'hi' + JF + 'two' + JF + 'end'
wanted = '[ Just some Texthi two ]end'
class RecTabStops_InnerWOTabStopTwiceDirectly_ECR(_VimTest):
snippets = (
('m1', 'JST'),
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm1' + EX + ' m1' + EX + 'hi' + JF + 'two' + JF + 'end'
wanted = '[ JST JSThi two ]end'
class RecTabStops_InnerWOTabStopTwice_ECR(_VimTest):
snippets = (
('m1', 'JST'),
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm1' + EX + JF + 'm1' + EX + 'hi' + JF + 'end'
wanted = '[ JST JSThi ]end'
class RecTabStops_OuterOnlyWithZeroTS_ECR(_VimTest):
snippets = (
('m', 'A $0 B'),
('m1', 'C $1 D $0 E'),
)
keys = 'm' + EX + 'm1' + EX + 'CD' + JF + 'DE'
wanted = 'A C CD D DE E B'
class RecTabStops_OuterOnlyWithZero_ECR(_VimTest):
snippets = (
('m', 'A $0 B'),
('m1', 'C $1 D $0 E'),
)
keys = 'm' + EX + 'm1' + EX + 'CD' + JF + 'DE'
wanted = 'A C CD D DE E B'
class RecTabStops_ExpandedInZeroTS_ECR(_VimTest):
snippets = (
('m', 'A $0 B $1'),
('m1', 'C $1 D $0 E'),
)
keys = 'm' + EX + 'hi' + JF + 'm1' + EX + 'CD' + JF + 'DE'
wanted = 'A C CD D DE E B hi'
class RecTabStops_ExpandedInZeroTSTwice_ECR(_VimTest):
snippets = (
('m', 'A $0 B $1'),
('m1', 'C $1 D $0 E'),
)
keys = 'm' + EX + 'hi' + JF + 'm' + EX + 'again' + JF + 'm1' + \
EX + 'CD' + JF + 'DE'
wanted = 'A A C CD D DE E B again B hi'
class RecTabStops_ExpandedInZeroTSSecondTime_ECR(_VimTest):
snippets = (
('m', 'A $0 B $1'),
('m1', 'C $1 D $0 E'),
)
keys = 'm' + EX + 'hi' + JF + 'm' + EX + \
'm1' + EX + 'CD' + JF + 'DE' + JF + 'AB'
wanted = 'A A AB B C CD D DE E B hi'
class RecTabsStops_TypeInZero_ECR(_VimTest):
snippets = (
('v', r"\vec{$1}", 'Vector', 'w'),
('frac', r"\frac{${1:one}}${0:zero}{${2:two}}", 'Fractio', 'w'),
)
keys = 'v' + EX + 'frac' + EX + 'a' + JF + 'b' + JF + 'frac' + EX + 'aa' + JF + JF + 'cc' + JF + \
'hello frac' + EX + JF + JF + 'world'
wanted = r"\vec{\frac{a}\frac{aa}cc{two}{b}}hello \frac{one}world{two}"
class RecTabsStops_TypeInZero2_ECR(_VimTest):
snippets = (
('m', r"_${0:explicit zero}", 'snip', 'i'),
)
keys = 'm' + EX + 'hello m' + EX + 'world m' + EX + 'end'
wanted = r"_hello _world _end"
class RecTabsStops_BackspaceZero_ECR(_VimTest):
snippets = (
('m', r"${1:one}${0:explicit zero}${2:two}", 'snip', 'i'),
)
keys = 'm' + EX + JF + JF + BS + 'm' + EX
wanted = r"oneoneexplicit zerotwotwo"
class RecTabStops_MirrorInnerSnippet_ECR(_VimTest):
snippets = (
('m', '[ $1 $2 ] $1'),
('m1', 'ASnip $1 ASnip $2 ASnip'),
)
keys = 'm' + EX + 'm1' + EX + 'Hallo' + JF + 'Hi' + \
JF + 'endone' + JF + 'two' + JF + 'totalend'
wanted = '[ ASnip Hallo ASnip Hi ASnipendone two ] ASnip Hallo ASnip Hi ASnipendonetotalend'
class RecTabStops_NotAtBeginningOfTS_ExpectCorrectResult(_VimTest):
snippets = ('m', '[ ${1:first} ${2:sec} ]')
keys = 'm' + EX + 'hello m' + EX + 'hi' + JF + 'two' + JF + 'ups' + JF + 'three' + \
JF + 'end'
wanted = '[ hello [ hi two ]ups three ]end'
class RecTabStops_InNewlineInTabstop_ExpectCorrectResult(_VimTest):
snippets = ('m', '[ ${1:first} ${2:sec} ]')
keys = 'm' + EX + 'hello\nm' + EX + 'hi' + JF + 'two' + JF + 'ups' + JF + 'three' + \
JF + 'end'
wanted = '[ hello\n[ hi two ]ups three ]end'
class RecTabStops_InNewlineInTabstopNotAtBeginOfLine_ECR(_VimTest):
snippets = ('m', '[ ${1:first} ${2:sec} ]')
keys = 'm' + EX + 'hello\nhello again m' + EX + 'hi' + JF + 'two' + \
JF + 'ups' + JF + 'three' + JF + 'end'
wanted = '[ hello\nhello again [ hi two ]ups three ]end'
class RecTabStops_InNewlineMultiline_ECR(_VimTest):
snippets = ('m', 'M START\n$0\nM END')
keys = 'm' + EX + 'm' + EX
wanted = 'M START\nM START\n\nM END\nM END'
class RecTabStops_InNewlineManualIndent_ECR(_VimTest):
snippets = ('m', 'M START\n$0\nM END')
keys = 'm' + EX + ' m' + EX + 'hi'
wanted = 'M START\n M START\n hi\n M END\nM END'
class RecTabStops_InNewlineManualIndentTextInFront_ECR(_VimTest):
snippets = ('m', 'M START\n$0\nM END')
keys = 'm' + EX + ' hallo m' + EX + 'hi'
wanted = 'M START\n hallo M START\n hi\n M END\nM END'
class RecTabStops_InNewlineMultilineWithIndent_ECR(_VimTest):
snippets = ('m', 'M START\n $0\nM END')
keys = 'm' + EX + 'm' + EX + 'hi'
wanted = 'M START\n M START\n hi\n M END\nM END'
class RecTabStops_InNewlineMultilineWithNonZeroTS_ECR(_VimTest):
snippets = ('m', 'M START\n $1\nM END -> $0')
keys = 'm' + EX + 'm' + EX + 'hi' + JF + 'hallo' + JF + 'end'
wanted = 'M START\n M START\n hi\n M END -> hallo\n' \
'M END -> end'
class RecTabStops_BarelyNotLeavingInner_ECR(_VimTest):
snippets = (
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm' + EX + 'a' + 3 * ARR_L + JF + 'hallo' + \
JF + 'ups' + JF + 'world' + JF + 'end'
wanted = '[ [ a hallo ]ups world ]end'
class RecTabStops_LeavingInner_ECR(_VimTest):
snippets = (
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm' + EX + 'a' + 4 * ARR_L + JF + 'hallo' + \
JF + 'world'
wanted = '[ [ a sec ] hallo ]world'
class RecTabStops_LeavingInnerInner_ECR(_VimTest):
snippets = (
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm' + EX + 'm' + EX + 'a' + 4 * ARR_L + JF + 'hallo' + \
JF + 'ups' + JF + 'world' + JF + 'end'
wanted = '[ [ [ a sec ] hallo ]ups world ]end'
class RecTabStops_LeavingInnerInnerTwo_ECR(_VimTest):
snippets = (
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm' + EX + 'm' + EX + 'a' + 6 * ARR_L + JF + 'hallo' + \
JF + 'end'
wanted = '[ [ [ a sec ] sec ] hallo ]end'
class RecTabStops_ZeroTSisNothingSpecial_ECR(_VimTest):
snippets = (
('m1', '[ ${1:first} $0 ${2:sec} ]'),
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm1' + EX + 'one' + JF + 'two' + \
JF + 'three' + JF + 'four' + JF + 'end'
wanted = '[ [ one three two ] four ]end'
class RecTabStops_MirroredZeroTS_ECR(_VimTest):
snippets = (
('m1', '[ ${1:first} ${0:Year, some default text} $0 ${2:sec} ]'),
('m', '[ ${1:first} ${2:sec} ]'),
)
keys = 'm' + EX + 'm1' + EX + 'one' + JF + 'two' + \
JF + 'three' + JF + 'four' + JF + 'end'
wanted = '[ [ one three three two ] four ]end'
class RecTabStops_ChildTriggerContainsParentTextObjects(_VimTest):
# https://bugs.launchpad.net/bugs/1191617
files = { 'us/all.snippets': r"""
global !p
def complete(t, opts):
if t:
opts = [ q[len(t):] for q in opts if q.startswith(t) ]
if len(opts) == 0:
return ''
return opts[0] if len(opts) == 1 else "(" + '|'.join(opts) + ')'
def autocomplete_options(t, string, attr=None):
return complete(t[1], [opt for opt in attr if opt not in string])
endglobal
snippet /form_for(.*){([^|]*)/ "form_for html options" rw!
`!p
auto = autocomplete_options(t, match.group(2), attr=["id: ", "class: ", "title: "])
snip.rv = "form_for" + match.group(1) + "{"`$1`!p if (snip.c != auto) : snip.rv=auto`
endsnippet
"""}
keys = 'form_for user, namespace: some_namespace, html: {i' + EX + 'i' + EX
wanted = 'form_for user, namespace: some_namespace, html: {(id: |class: |title: )d: '
# End: Recursive (Nested) Snippets #}}}
|
python
|
from disnake import CommandInteraction, Embed, Thread
from disnake.ext.commands import Cog, Param, slash_command
from src import Bot
from src.impl.database import Channel, ChannelMap, Message
from src.impl.utils import is_administrator
class Core(Cog):
def __init__(self, bot: Bot) -> None:
self.bot = bot
@slash_command(
name="status",
description="Get the status of the bot",
)
@is_administrator()
async def status(self, itr: CommandInteraction) -> None:
await itr.response.defer()
channels = await Channel.objects.count()
dchannels = await ChannelMap.objects.count()
messages = await Message.objects.count()
embed = Embed(
title="CrossChat Status",
colour=0x87CEEB,
description=(
f"Connected as {self.bot.user}\n"
f"Latency: {self.bot.latency * 1000:.2f}ms\n"
f"Guilds: {len(self.bot.guilds)}\n"
),
)
embed.add_field(
name="Channels",
value=f"Virtual: {channels}\nDiscord: {dchannels}",
)
embed.add_field(
name="Messages",
value=f"Total: {messages}",
)
await itr.send(embed=embed)
@slash_command(
name="setup",
description="Setup a channel for CrossChat",
)
@is_administrator()
async def setup(
self,
itr: CommandInteraction,
channel: str = Param(desc="The CrossChat channel to connect to"),
) -> None:
vchannel = self.bot.vchannels.get(channel, None)
if vchannel is None:
await itr.send(f"Channel {channel} does not exist.")
return
if isinstance(itr.channel, Thread):
await vchannel.join(itr.channel.parent_id, itr.channel.id)
else:
await vchannel.join(itr.channel.id)
await itr.send(f"Mapped channel {itr.channel.id} to CC:{channel}")
@slash_command(
name="unlink",
description="Unlink a channel from CrossChat",
)
@is_administrator()
async def unlink(
self,
itr: CommandInteraction,
) -> None:
vchannel = self.bot.resolve_channel(itr.channel.id)
if vchannel is None:
await itr.send(f"Channel {itr.channel.id} is not linked.")
return
await vchannel.leave(itr.channel.id)
await itr.send(f"Unlinked channel {itr.channel.id} from CC:{vchannel.channel.name}")
def setup(bot: Bot) -> None:
bot.add_cog(Core(bot))
|
python
|
from .models import Folder, MEDIA_MODELS
def handle_uploaded_file(file, folder=None, is_public=True):
'''handle uploaded file to folder
match first media type and create media object and returns it
file: File object
folder: str or Folder isinstance
is_public: boolean
'''
_folder = None
if folder and isinstance(folder, Folder):
_folder = folder
elif folder:
_folder, folder_created = Folder.objects.get_or_create(
name=folder)
for cls in MEDIA_MODELS:
if cls.matches_file_type(file.name):
obj, created = cls.objects.get_or_create(
original_filename=file.name,
file=file,
folder=_folder,
is_public=is_public)
if created:
return obj
return None
def handle_uploaded_files(files, folder=None, is_public=True):
'''handle uploaded files to folder
files: array of File objects or single object
folder: str or Folder isinstance
is_public: boolean
'''
results = []
for f in files:
result = handle_uploaded_file(f, folder, is_public)
results.append(result)
return results
|
python
|
# Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for seqio.preprocessors."""
from absl.testing import absltest
from seqio import dataset_providers
from seqio import experimental
from seqio import test_utils
from seqio import utils
from seqio import vocabularies
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
assert_dataset = test_utils.assert_dataset
Feature = dataset_providers.Feature
CacheDatasetPlaceholder = dataset_providers.CacheDatasetPlaceholder
MixtureRegistry = dataset_providers.MixtureRegistry
TaskRegistry = dataset_providers.TaskRegistry
ShardInfo = dataset_providers.ShardInfo
class FullyCachedTaskTest(absltest.TestCase):
def setUp(self):
super().setUp()
TaskRegistry.reset()
MixtureRegistry.reset()
self.fake_source = dataset_providers.FunctionDataSource(
lambda split, shuffle_files: tf.data.Dataset.range(2), ['train'])
self.vocabulary = vocabularies.PassThroughVocabulary(100)
self.metrics_fns = [lambda targets, predictions: 0]
def fake_preprocessor(ds):
"""Adds one and casts to int32."""
return ds.map(lambda x: tf.cast(x+1, tf.int32))
def fake_preprocessor_of(ds, output_features):
"""Creates output feature dict from scalar input."""
return ds.map(lambda x: {k: [x] for k in output_features})
def fake_preprocessor_sl(ds, sequence_length):
"""Concatenates the sequence length to each feature."""
return ds.map(
lambda x: { # pylint:disable=g-long-lambda
k: tf.concat([v, [sequence_length[k]]], 0) for k, v in x.items()
})
def fake_preprocessor_sl_of(ds, sequence_length, output_features):
"""Adds the sequence length to each feature with `add_eos` enabled."""
return ds.map(
lambda x: { # pylint:disable=g-long-lambda
k: tf.concat([v, [sequence_length[k]]], 0)
if output_features[k].add_eos else v for k, v in x.items()
})
self.preprocessors = [
fake_preprocessor,
fake_preprocessor_of,
fake_preprocessor_sl,
fake_preprocessor_sl_of,
]
def validate_fully_cached_task(
self, name, sequence_length, actual_sequence_length, expected_dataset):
new_task = TaskRegistry.get(name)
self.assertLen(new_task.preprocessors, 6)
self.assertEqual(new_task.metric_fns, self.metrics_fns)
self.assertIsInstance(new_task.preprocessors[-2], CacheDatasetPlaceholder)
self.assertTrue(new_task.preprocessors[-2].required)
with self.assertRaisesWithLiteralMatch(
ValueError,
f"Task '{name}' requires caching, but was called with "
"`use_cached=False`."):
new_task.get_dataset(None)
# Disable caching restriction to verify dataset is correct.
new_task.preprocessors[-2]._required = False
with self.assertRaisesWithLiteralMatch(
ValueError,
f"Fully-cached task '{name}' can only be loaded with "
f'`sequence_length={sequence_length}` or `None`. '
f'Given sequence_length={actual_sequence_length}.'):
new_task.get_dataset(
{k: v+1 for k, v in sequence_length.items()},
use_cached=False)
assert_dataset(
new_task.get_dataset(None, shuffle=False),
expected_dataset)
assert_dataset(
new_task.get_dataset(sequence_length, shuffle=False),
expected_dataset)
def test_add_fully_cached_task(self):
preprocessors = list(self.preprocessors)
preprocessors.insert(2, CacheDatasetPlaceholder())
TaskRegistry.add(
'encoder_decoder_task',
source=self.fake_source,
preprocessors=preprocessors,
output_features={
'inputs': Feature(self.vocabulary, add_eos=True),
'targets': Feature(self.vocabulary, add_eos=False)
},
metric_fns=self.metrics_fns)
sequence_length = {'inputs': 5, 'targets': 6}
actual_sequence_length = {'inputs': 6, 'targets': 7}
experimental.add_fully_cached_task('encoder_decoder_task', sequence_length)
self.validate_fully_cached_task(
'encoder_decoder_task_i5_t6',
sequence_length,
actual_sequence_length,
[
{'inputs': [1, 5, 5], 'targets': [1, 6]},
{'inputs': [2, 5, 5], 'targets': [2, 6]},
])
def test_add_fully_cached_task_single_feature(self):
TaskRegistry.add(
'decoder_task',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'targets': Feature(self.vocabulary, add_eos=True)
},
metric_fns=self.metrics_fns)
sequence_length = {'targets': 6}
actual_sequence_length = {'targets': 7}
experimental.add_fully_cached_task('decoder_task', sequence_length)
self.validate_fully_cached_task(
'decoder_task_6',
sequence_length,
actual_sequence_length,
[
{'targets': [1, 6, 6]},
{'targets': [2, 6, 6]},
])
def test_add_fully_cached_task_unique_prefix(self):
TaskRegistry.add(
'feature_prefix_task',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'tar': Feature(self.vocabulary, add_eos=True),
'targets': Feature(self.vocabulary, add_eos=False)
},
metric_fns=self.metrics_fns)
sequence_length = {'tar': 5, 'targets': 6}
actual_sequence_length = {'tar': 6, 'targets': 7}
experimental.add_fully_cached_task(
'feature_prefix_task', sequence_length)
self.validate_fully_cached_task(
'feature_prefix_task_tar5_targ6',
sequence_length,
actual_sequence_length,
[
{'tar': [1, 5, 5], 'targets': [1, 6]},
{'tar': [2, 5, 5], 'targets': [2, 6]},
])
def test_add_fully_cached_task_disallow_shuffling(self):
TaskRegistry.add(
'decoder_task',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'targets': Feature(self.vocabulary, add_eos=True)
},
metric_fns=self.metrics_fns)
sequence_length = {'targets': 6}
new_task = experimental.add_fully_cached_task(
'decoder_task', sequence_length, disallow_shuffling=True)
# Disable caching restriction to get past cache check.
new_task.preprocessors[-2]._required = False
with self.assertRaisesWithLiteralMatch(
ValueError,
"Shuffling is disallowed for Task 'decoder_task_6' since its "
'`shuffle_buffer_size` was set to `None` on construction.'):
new_task.get_dataset(None, shuffle=True, use_cached=False)
new_task.get_dataset(None, shuffle=False, use_cached=False)
def test_add_fully_cached_mixture(self):
TaskRegistry.add(
'task1',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'targets': Feature(self.vocabulary, add_eos=False)
},
metric_fns=self.metrics_fns)
TaskRegistry.add(
'task2',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'targets': Feature(self.vocabulary, add_eos=True)
},
metric_fns=self.metrics_fns)
MixtureRegistry.add('mix', [('task1', 2), ('task2', lambda x: 1)])
experimental.add_fully_cached_mixture('mix', sequence_length={'targets': 6})
new_mix = MixtureRegistry.get('mix_6')
new_task_names = ('task1_6', 'task2_6')
self.assertContainsSubset(new_task_names, TaskRegistry.names())
new_tasks = [TaskRegistry.get(n) for n in new_task_names]
self.assertCountEqual(new_tasks, new_mix.tasks)
self.assertEqual(new_mix.get_rate(new_tasks[0]), 2)
self.assertEqual(new_mix.get_rate(new_tasks[1]), 1)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task 'task1_6' requires caching, but was called with "
"`use_cached=False`."):
new_mix.get_dataset(None)
# Disable caching restriction to get past cache check.
for t in new_tasks:
t.preprocessors[-2]._required = False
with self.assertRaisesWithLiteralMatch(
ValueError,
"Fully-cached task 'task1_6' can only be loaded with "
"`sequence_length={'targets': 6}` or `None`. "
"Given sequence_length={'targets': 7}."):
new_mix.get_dataset({'targets': 7}, use_cached=False)
expected_dataset = [
{'targets': [1, 6, 6]},
{'targets': [2, 6, 6]},
{'targets': [1, 6]},
{'targets': [1, 6, 6]},
{'targets': [2, 6]},
{'targets': [2, 6, 6]},
]
assert_dataset(
new_mix.get_dataset(None, shuffle=False).take(6),
expected_dataset)
assert_dataset(
new_mix.get_dataset({'targets': 6}, shuffle=False).take(6),
expected_dataset)
def test_add_fully_cached_mixture_disallow_shuffling(self):
TaskRegistry.add(
'task1',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'targets': Feature(self.vocabulary, add_eos=False)
},
metric_fns=self.metrics_fns)
TaskRegistry.add(
'task2',
source=self.fake_source,
preprocessors=self.preprocessors,
output_features={
'targets': Feature(self.vocabulary, add_eos=True)
},
metric_fns=self.metrics_fns)
MixtureRegistry.add('mix', [('task1', 2), ('task2', lambda x: 1)])
new_mixture = experimental.add_fully_cached_mixture(
'mix', sequence_length={'targets': 6}, disallow_shuffling=True)
# Disable caching restriction to get past cache check.
for t in new_mixture.tasks:
t.preprocessors[-2]._required = False
with self.assertRaisesWithLiteralMatch(
ValueError,
"Shuffling is disallowed for Task 'task1_6' since its "
'`shuffle_buffer_size` was set to `None` on construction.'):
new_mixture.get_dataset(None, shuffle=True, use_cached=False)
new_mixture.get_dataset(None, shuffle=False, use_cached=False)
class FewshotTest(absltest.TestCase):
def test_fewshot_data_source(self):
def fake_dataset_fn(split, shuffle_files, seed=None):
# Note that for the purposes of this unit test, fake_dataset_fn
# deliberately does not properly implement shuffling. We test whether
# FewShotDataSource is robust to this.
del shuffle_files
del seed
return tf.data.Dataset.range(
*((0, 2) if split == 'validation' else (3, 7))
)
# 0 shot
src = experimental.FewshotDataSource(
dataset_providers.FunctionDataSource(
dataset_fn=fake_dataset_fn,
splits=['train', 'validation']
),
num_shots=0
)
dataset = src.get_dataset('validation', shuffle=False)
assert_dataset(
dataset, [{'eval': 0,}, {'eval': 1}]
)
# 1 shot
preprocessors = [
utils.map_over_dataset(lambda x: {'inputs': 0, 'targets': x})]
src = experimental.FewshotDataSource(
dataset_providers.FunctionDataSource(
dataset_fn=fake_dataset_fn, splits=['train', 'validation']),
train_preprocessors=preprocessors,
eval_preprocessors=preprocessors,
num_shots=1,
)
# When split is 'train', check that 'train' and 'eval' fields of each
# example are NOT always the same -- this can happen if the underlying
# dataset_fn does not implement shuffling, causing identical examples from
# the same split to be zipped together.
def train_and_eval_fields_always_same(dataset):
for ex in tfds.as_numpy(dataset):
if ex['train'] != ex['eval']:
return False
return True
# As long as train and eval fields aren't the same for SOME random seed, we
# have achieved the desired behavior. We fix the seed for this test because
# there are some seeds where train and eval fields DO happen to be the same
# by random chance, which would break this test.
self.assertFalse(
train_and_eval_fields_always_same(
src.get_dataset(split='train', shuffle=True, seed=123)))
# Even when shuffle is off, we don't want the train and eval fields to be
# the same. Instead, the 'train' field should be deterministically shuffled.
self.assertFalse(
train_and_eval_fields_always_same(
src.get_dataset(split='train', shuffle=False)))
# 3 shot
src = experimental.FewshotDataSource(
dataset_providers.FunctionDataSource(
dataset_fn=fake_dataset_fn,
splits=['train', 'validation']
),
train_preprocessors=[
utils.map_over_dataset(lambda x: {'inputs': 0, 'targets': x})
],
num_shots=3
)
dataset = src.get_dataset('validation', shuffle=False)
assert_dataset(
dataset, [
{
'eval': 0,
'train': {'inputs': [0, 0, 0], 'targets': [3, 5, 4]}
},
{
'eval': 1,
'train': {'inputs': [0, 0, 0], 'targets': [6, 6, 3]}
},
]
)
# Note: the train split has been deterministically shuffled, so the values
# of the 'targets' field that we test for are deterministic but arbitrary.
# 3-shot, sharded.
assert_dataset(
src.get_dataset(
'validation', shuffle=False, shard_info=ShardInfo(0, 2)), [
{
'eval': 0,
'train': {
'inputs': [0, 0, 0],
'targets': [3, 5, 5]
}
},
])
assert_dataset(
src.get_dataset(
'validation', shuffle=False, shard_info=ShardInfo(1, 2)), [
{
'eval': 1,
'train': {
'inputs': [0, 0, 0],
'targets': [4, 6, 6]
}
},
])
# Note: the train split has been deterministically shuffled, so the values
# of the 'targets' field that we test for are deterministic but arbitrary.
# Missing train
src = experimental.FewshotDataSource(
dataset_providers.FunctionDataSource(
dataset_fn=fake_dataset_fn,
splits=['validation']
),
num_shots=3
)
with self.assertRaisesRegex(
ValueError,
'Train split \'train\' is not one of the original source splits: '
r'\(\'validation\',\)'):
dataset = src.get_dataset('validation')
def test_fewshot_data_source_eval_on_fixed_exemplars(self):
def fake_dataset_fn(split, shuffle_files, seed=None):
# Note that for the purposes of this unit test, fake_dataset_fn
# deliberately does not properly implement shuffling. We test whether
# FewShotDataSource is robust to this.
del shuffle_files
del seed
return tf.data.Dataset.range(*((0, 2) if split == 'validation' else (3,
7)))
# 1 shot
preprocessors = [
utils.map_over_dataset(lambda x: {'inputs': 0, 'targets': x})]
src = experimental.FewshotDataSource(
dataset_providers.FunctionDataSource(
dataset_fn=fake_dataset_fn, splits=['train', 'validation']),
train_preprocessors=preprocessors,
num_shots=1,
eval_on_fixed_exemplars=True,
)
def exemplars_always_same(dataset):
"""Checks if exemplars are always the same."""
train_ex = None
for ex in dataset:
if train_ex is None:
train_ex = ex['train']
continue
if ex['train'] != train_ex:
return False
return True
# Use 'validation' split for `eval_ds`. Since `train_ds` is initiated from
# the 'train' split and `eval_on_fixed_exemplars=True`, always use the fixed
# set of exemplars.
self.assertTrue(
exemplars_always_same(
src.get_dataset(split='validation', shuffle=True, seed=123)))
assert_dataset(
src.get_dataset('validation', shuffle=False), [
{
'eval': 0,
'train': {
'inputs': [0],
'targets': [3],
}
},
{
'eval': 1,
'train': {
'inputs': [0],
'targets': [3],
}
},
])
# `eval_on_fixed_exemplars` is ignored when `split` equals `train_split`.
self.assertFalse(
exemplars_always_same(
src.get_dataset(split='train', shuffle=True, seed=123)))
def test_fewshot_preprocessor(self):
train_examples = [
{
'inputs': 'How many states in the US?',
'targets': '50',
},
{
'inputs': 'How many cents in a dollar?',
'targets': '100',
},
{
'inputs': 'How many cents in a quarter?',
'targets': '25',
}
]
eval_examples = [
{
'inputs': 'Who was in the Beatles?',
'targets': 'John',
'answers': ['John', 'Paul', 'George', 'Ringo']
},
{
'inputs': 'When did the Beatles break up?',
'targets': '1970',
'answers': ['1970', 'April 10, 1970', 'April 10', '4/10/1970'],
}
]
def _from_generator(examples):
return tf.data.Dataset.from_generator(
lambda: (x for x in examples),
output_types={k: tf.string for k in examples[0].keys()},
output_shapes={
k: [None] if isinstance(v, list) else []
for k, v in examples[0].items()
})
train_ds = _from_generator(train_examples).repeat()
eval_ds = _from_generator(eval_examples)
# 0-shot
dataset = experimental.fewshot_preprocessor(
tf.data.Dataset.zip({'eval': eval_ds}),
inputs_prefix='0 ',
targets_prefix=' X 1 ',
example_separator=' X ')
assert_dataset(
dataset,
[
{
'inputs': '0 Who was in the Beatles? X 1',
'targets': 'John',
'answers': ['John', 'Paul', 'George', 'Ringo']
},
{
'inputs': '0 When did the Beatles break up? X 1',
'targets': '1970',
'answers': ['1970', 'April 10, 1970', 'April 10', '4/10/1970'],
}
])
# 2-shot
dataset = experimental.fewshot_preprocessor(
tf.data.Dataset.zip({'train': train_ds.batch(2), 'eval': eval_ds}),
inputs_prefix='0 ',
targets_prefix=' X 1 ',
example_separator=' X ')
assert_dataset(
dataset,
[
{
'inputs':
'0 How many states in the US? X 1 50 X 0 How many cents in '
'a dollar? X 1 100 X 0 Who was in the Beatles? X 1',
'targets': 'John',
'answers': ['John', 'Paul', 'George', 'Ringo']
},
{
'inputs':
'0 How many cents in a quarter? X 1 25 X 0 How many states '
'in the US? X 1 50 X 0 When did the Beatles break up? X 1',
'targets': '1970',
'answers': ['1970', 'April 10, 1970', 'April 10', '4/10/1970'],
}
])
# 1-shot, batched eval
dataset = experimental.fewshot_preprocessor(
tf.data.Dataset.zip(
{'train': train_ds.batch(1), 'eval': eval_ds.batch(2)}
),
inputs_prefix='0 ',
targets_prefix=' X 1 ',
example_separator=' X ')
assert_dataset(
dataset,
[
{
'inputs':
'0 How many states in the US? X 1 50 X 0 Who was in the '
'Beatles? X 1',
'targets': 'John',
'answers': ['John', 'Paul', 'George', 'Ringo']
},
{
'inputs':
'0 How many states in the US? X 1 50 X 0 When did the '
'Beatles break up? X 1',
'targets': '1970',
'answers': ['1970', 'April 10, 1970', 'April 10', '4/10/1970'],
},
])
class SentinelTaskTest(FullyCachedTaskTest):
def validate_sentinel_task(
self, name, sequence_length, expected_dataset):
new_task = TaskRegistry.get(name)
# With sentinels inserted we want +1 processors.
self.assertLen(new_task.preprocessors, 5)
self.assertEqual(new_task.metric_fns, self.metrics_fns)
self.assertIsNotNone(new_task.postprocessor)
assert_dataset(
new_task.get_dataset(sequence_length, shuffle=False),
expected_dataset)
def test_add_sentinel_task(self):
preprocessors = list(self.preprocessors)
TaskRegistry.add(
'encoder_decoder_task',
source=self.fake_source,
preprocessors=preprocessors,
output_features={
'inputs': Feature(self.vocabulary, add_eos=True),
'targets': Feature(self.vocabulary, add_eos=False)
},
metric_fns=self.metrics_fns)
sequence_length = {'inputs': 10, 'targets': 11}
for num_sentinels in [1, 2, 4]:
experimental.add_task_with_sentinels(
'encoder_decoder_task', num_sentinels=num_sentinels)
for sentinel_num in [1, 2, 4]:
sentinel_ids = [
self.vocabulary.vocab_size - (i + 1) for i in range(sentinel_num)]
self.validate_sentinel_task(
f'encoder_decoder_task_{sentinel_num}_sentinel', sequence_length,
[
{
'inputs': [1, 10, 10] + sentinel_ids,
'targets': sentinel_ids + [1, 11]
},
{
'inputs': [2, 10, 10] + sentinel_ids,
'targets': sentinel_ids + [2, 11]
},
])
if __name__ == '__main__':
absltest.main()
|
python
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Map a DAGCircuit onto a `coupling_map` adding swap gates."""
from logging import getLogger
from math import inf
from collections import OrderedDict
import numpy as np
from qiskit.circuit.quantumregister import QuantumRegister
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.dagcircuit import DAGCircuit
from qiskit.circuit.library.standard_gates import SwapGate
from qiskit.transpiler.layout import Layout
# pylint: disable=no-name-in-module
from .cython.stochastic_swap.utils import nlayout_from_layout
# pylint: disable=no-name-in-module
from .cython.stochastic_swap.swap_trial import swap_trial
logger = getLogger(__name__)
class StochasticSwap(TransformationPass):
"""Map a DAGCircuit onto a `coupling_map` adding swap gates.
Uses a randomized algorithm.
Notes:
1. Measurements may occur and be followed by swaps that result in repeated
measurement of the same qubit. Near-term experiments cannot implement
these circuits, so some care is required when using this mapper
with experimental backend targets.
2. We do not use the fact that the input state is zero to simplify
the circuit.
"""
def __init__(self, coupling_map, trials=20, seed=None):
"""StochasticSwap initializer.
The coupling map is a connected graph
If these are not satisfied, the behavior is undefined.
Args:
coupling_map (CouplingMap): Directed graph representing a coupling
map.
trials (int): maximum number of iterations to attempt
seed (int): seed for random number generator
"""
super().__init__()
self.coupling_map = coupling_map
self.trials = trials
self.seed = seed
self.qregs = None
self.rng = None
self.trivial_layout = None
def run(self, dag):
"""Run the StochasticSwap pass on `dag`.
Args:
dag (DAGCircuit): DAG to map.
Returns:
DAGCircuit: A mapped DAG.
Raises:
TranspilerError: if the coupling map or the layout are not
compatible with the DAG
"""
if len(dag.qregs) != 1 or dag.qregs.get('q', None) is None:
raise TranspilerError('Basic swap runs on physical circuits only')
if len(dag.qubits()) > len(self.coupling_map.physical_qubits):
raise TranspilerError('The layout does not match the amount of qubits in the DAG')
canonical_register = dag.qregs['q']
self.trivial_layout = Layout.generate_trivial_layout(canonical_register)
self.qregs = dag.qregs
if self.seed is None:
self.seed = np.random.randint(0, np.iinfo(np.int32).max)
self.rng = np.random.default_rng(self.seed)
logger.debug("StochasticSwap default_rng seeded with seed=%s", self.seed)
new_dag = self._mapper(dag, self.coupling_map, trials=self.trials)
return new_dag
def _layer_permutation(self, layer_partition, layout, qubit_subset,
coupling, trials):
"""Find a swap circuit that implements a permutation for this layer.
The goal is to swap qubits such that qubits in the same two-qubit gates
are adjacent.
Based on S. Bravyi's algorithm.
Args:
layer_partition (list): The layer_partition is a list of (qu)bit
lists and each qubit is a tuple (qreg, index).
layout (Layout): The layout is a Layout object mapping virtual
qubits in the input circuit to physical qubits in the coupling
graph. It reflects the current positions of the data.
qubit_subset (list): The qubit_subset is the set of qubits in
the coupling graph that we have chosen to map into, as tuples
(Register, index).
coupling (CouplingMap): Directed graph representing a coupling map.
This coupling map should be one that was provided to the
stochastic mapper.
trials (int): Number of attempts the randomized algorithm makes.
Returns:
Tuple: success_flag, best_circuit, best_depth, best_layout
If success_flag is True, then best_circuit contains a DAGCircuit with
the swap circuit, best_depth contains the depth of the swap circuit,
and best_layout contains the new positions of the data qubits after the
swap circuit has been applied.
Raises:
TranspilerError: if anything went wrong.
"""
logger.debug("layer_permutation: layer_partition = %s",
layer_partition)
logger.debug("layer_permutation: layout = %s",
layout.get_virtual_bits())
logger.debug("layer_permutation: qubit_subset = %s",
qubit_subset)
logger.debug("layer_permutation: trials = %s", trials)
# The input dag is on a flat canonical register
# TODO: cleanup the code that is general for multiple qregs below
canonical_register = QuantumRegister(len(layout), 'q')
qregs = OrderedDict({canonical_register.name: canonical_register})
gates = [] # list of lists of tuples [[(register, index), ...], ...]
for gate_args in layer_partition:
if len(gate_args) > 2:
raise TranspilerError("Layer contains > 2-qubit gates")
if len(gate_args) == 2:
gates.append(tuple(gate_args))
logger.debug("layer_permutation: gates = %s", gates)
# Can we already apply the gates? If so, there is no work to do.
dist = sum([coupling.distance(layout[g[0]], layout[g[1]])
for g in gates])
logger.debug("layer_permutation: distance = %s", dist)
if dist == len(gates):
logger.debug("layer_permutation: nothing to do")
circ = DAGCircuit()
circ.add_qreg(canonical_register)
return True, circ, 0, layout
# Begin loop over trials of randomized algorithm
num_qubits = len(layout)
best_depth = inf # initialize best depth
best_edges = None # best edges found
best_circuit = None # initialize best swap circuit
best_layout = None # initialize best final layout
cdist2 = coupling._dist_matrix**2
# Scaling matrix
scale = np.zeros((num_qubits, num_qubits))
int_qubit_subset = _regtuple_to_numeric(qubit_subset, qregs)
int_gates = _gates_to_idx(gates, qregs)
int_layout = nlayout_from_layout(layout, qregs, coupling.size())
trial_circuit = DAGCircuit() # SWAP circuit for slice of swaps in this trial
for qubit in layout.get_virtual_bits().keys():
if qubit.register not in trial_circuit.qregs.values():
trial_circuit.add_qreg(qubit.register)
edges = np.asarray(coupling.get_edges(), dtype=np.int32).ravel()
cdist = coupling._dist_matrix
for trial in range(trials):
logger.debug("layer_permutation: trial %s", trial)
# This is one Trial --------------------------------------
dist, optim_edges, trial_layout, depth_step = swap_trial(num_qubits, int_layout,
int_qubit_subset,
int_gates, cdist2,
cdist, edges, scale,
self.rng)
logger.debug("layer_permutation: final distance for this trial = %s", dist)
if dist == len(gates) and depth_step < best_depth:
logger.debug("layer_permutation: got circuit with improved depth %s",
depth_step)
best_edges = optim_edges
best_layout = trial_layout
best_depth = min(best_depth, depth_step)
# Break out of trial loop if we found a depth 1 circuit
# since we can't improve it further
if best_depth == 1:
break
# If we have no best circuit for this layer, all of the
# trials have failed
if best_layout is None:
logger.debug("layer_permutation: failed!")
return False, None, None, None
edges = best_edges.edges()
for idx in range(best_edges.size//2):
swap_src = self.trivial_layout[edges[2*idx]]
swap_tgt = self.trivial_layout[edges[2*idx+1]]
trial_circuit.apply_operation_back(SwapGate(), [swap_src, swap_tgt], [])
best_circuit = trial_circuit
# Otherwise, we return our result for this layer
logger.debug("layer_permutation: success!")
best_lay = best_layout.to_layout(qregs)
return True, best_circuit, best_depth, best_lay
def _layer_update(self, i, best_layout, best_depth,
best_circuit, layer_list):
"""Provide a DAGCircuit for a new mapped layer.
Args:
i (int): layer number
best_layout (Layout): layout returned from _layer_permutation
best_depth (int): depth returned from _layer_permutation
best_circuit (DAGCircuit): swap circuit returned from _layer_permutation
layer_list (list): list of DAGCircuit objects for each layer,
output of DAGCircuit layers() method
Returns:
DAGCircuit: a DAGCircuit object to append to the output DAGCircuit
that the _mapper method is building.
"""
layout = best_layout
logger.debug("layer_update: layout = %s", layout)
logger.debug("layer_update: self.trivial_layout = %s", self.trivial_layout)
dagcircuit_output = DAGCircuit()
for qubit in layout.get_virtual_bits().keys():
if qubit.register not in dagcircuit_output.qregs.values():
dagcircuit_output.add_qreg(qubit.register)
# Output any swaps
if best_depth > 0:
logger.debug("layer_update: there are swaps in this layer, "
"depth %d", best_depth)
dagcircuit_output.compose(best_circuit)
else:
logger.debug("layer_update: there are no swaps in this layer")
# Output this layer
layer_circuit = layer_list[i]["graph"]
for creg in layer_circuit.cregs.values():
dagcircuit_output.add_creg(creg)
order = layout.reorder_bits(dagcircuit_output.qubits())
dagcircuit_output.compose(layer_circuit, qubits=order)
return dagcircuit_output
def _mapper(self, circuit_graph, coupling_graph, trials=20):
"""Map a DAGCircuit onto a CouplingMap using swap gates.
Use self.trivial_layout for the initial layout.
Args:
circuit_graph (DAGCircuit): input DAG circuit
coupling_graph (CouplingMap): coupling graph to map onto
trials (int): number of trials.
Returns:
DAGCircuit: object containing a circuit equivalent to
circuit_graph that respects couplings in coupling_graph
Raises:
TranspilerError: if there was any error during the mapping
or with the parameters.
"""
# Schedule the input circuit by calling layers()
layerlist = list(circuit_graph.layers())
logger.debug("schedule:")
for i, v in enumerate(layerlist):
logger.debug(" %d: %s", i, v["partition"])
qubit_subset = self.trivial_layout.get_virtual_bits().keys()
# Find swap circuit to precede each layer of input circuit
layout = self.trivial_layout.copy()
# Construct an empty DAGCircuit with the same set of
# qregs and cregs as the input circuit
dagcircuit_output = DAGCircuit()
dagcircuit_output.name = circuit_graph.name
for qreg in circuit_graph.qregs.values():
dagcircuit_output.add_qreg(qreg)
for creg in circuit_graph.cregs.values():
dagcircuit_output.add_creg(creg)
logger.debug("trivial_layout = %s", layout)
# Iterate over layers
for i, layer in enumerate(layerlist):
# Attempt to find a permutation for this layer
success_flag, best_circuit, best_depth, best_layout \
= self._layer_permutation(layer["partition"], layout,
qubit_subset, coupling_graph,
trials)
logger.debug("mapper: layer %d", i)
logger.debug("mapper: success_flag=%s,best_depth=%s",
success_flag, str(best_depth))
# If this fails, try one gate at a time in this layer
if not success_flag:
logger.debug("mapper: failed, layer %d, "
"retrying sequentially", i)
serial_layerlist = list(layer["graph"].serial_layers())
# Go through each gate in the layer
for j, serial_layer in enumerate(serial_layerlist):
success_flag, best_circuit, best_depth, best_layout = \
self._layer_permutation(
serial_layer["partition"],
layout, qubit_subset,
coupling_graph,
trials)
logger.debug("mapper: layer %d, sublayer %d", i, j)
logger.debug("mapper: success_flag=%s,best_depth=%s,",
success_flag, str(best_depth))
# Give up if we fail again
if not success_flag:
raise TranspilerError("swap mapper failed: " +
"layer %d, sublayer %d" % (i, j))
# Update the record of qubit positions
# for each inner iteration
layout = best_layout
# Update the DAG
dagcircuit_output.compose(
self._layer_update(j,
best_layout,
best_depth,
best_circuit,
serial_layerlist))
else:
# Update the record of qubit positions for each iteration
layout = best_layout
# Update the DAG
dagcircuit_output.compose(
self._layer_update(i,
best_layout,
best_depth,
best_circuit,
layerlist))
# This is the final edgemap. We might use it to correctly replace
# any measurements that needed to be removed earlier.
logger.debug("mapper: self.trivial_layout = %s", self.trivial_layout)
logger.debug("mapper: layout = %s", layout)
return dagcircuit_output
def _regtuple_to_numeric(items, qregs):
"""Takes Qubit instances and converts them into an integer array.
Args:
items (list): List of Qubit instances to convert.
qregs (dict): List of Qubit instances.
Returns:
ndarray: Array of integers.
"""
sizes = [qr.size for qr in qregs.values()]
reg_idx = np.cumsum([0]+sizes)
regint = {}
for ind, qreg in enumerate(qregs.values()):
regint[qreg] = ind
out = np.zeros(len(items), dtype=np.int32)
for idx, val in enumerate(items):
out[idx] = reg_idx[regint[val.register]]+val.index
return out
def _gates_to_idx(gates, qregs):
"""Converts gate tuples into a nested list of integers.
Args:
gates (list): List of Qubit instances representing gates.
qregs (dict): List of Qubit instances.
Returns:
list: Nested list of integers for gates.
"""
sizes = [qr.size for qr in qregs.values()]
reg_idx = np.cumsum([0]+sizes)
regint = {}
for ind, qreg in enumerate(qregs.values()):
regint[qreg] = ind
out = np.zeros(2*len(gates), dtype=np.int32)
for idx, gate in enumerate(gates):
out[2*idx] = reg_idx[regint[gate[0].register]]+gate[0].index
out[2*idx+1] = reg_idx[regint[gate[1].register]]+gate[1].index
return out
|
python
|
import time
import RPi.GPIO as GPIO
import SerialWombatPigpioI2c
import SerialWombatServo
import SerialWombatAnalogInput
import SerialWombatQuadEnc
GPIO.setwarnings(False)
sw = SerialWombatPigpioI2c.SerialWombatChipPigpioI2c(17,27,0x6D)
sw.begin(False)
print(sw.version)
print(sw.model)
print(sw.fwVersion)
servo = SerialWombatServo.SerialWombatServo(sw)
servo.attach(3)
analog = SerialWombatAnalogInput.SerialWombatAnalogInput(sw)
analog.begin(2)
knob = SerialWombatQuadEnc.SerialWombatQuadEnc(sw)
knob.begin(0,1,10)
print("Pin 2 analog: ",analog.readPublicData())
print("Source Voltage mv: ",sw.readSupplyVoltage_mV())
time.sleep(2)
while(True):
print(knob.read()," ",analog.readCounts())
servo.write16bit(analog.readCounts())
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# newclass.py
from pfp_sdk.PFPUtil import *
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title, pos=(100,100), size=(800, 230))
self.InitUI()
self.Centre()
self.Show()
def InitUI(self):
#read config file
self.default_modulelistDB_path = ""
self.user = ""
self.contact = ""
panel = wx.Panel(self)
sizer = wx.GridBagSizer(9, 9)
#---Main Text
text1 = wx.StaticText(panel, label=" Category")
sizer.Add(text1, pos=(0, 0), flag=wx.EXPAND, border=50)
#---Logo
#icon = wx.StaticBitmap(panel, bitmap=wx.Bitmap('PFPModule/icons/SelfTest.png'))
#sizer.Add(icon, pos=(0, 3), flag=wx.EXPAND, border=5)
line = wx.StaticLine(panel)
sizer.Add(line, pos=(1, 0), span=(1, 4), flag=wx.EXPAND|wx.BOTTOM, border=5)
#---Category Combo
text2 = wx.StaticText(panel, label=" Category Name")
sizer.Add(text2, pos=(2, 0), flag=wx.EXPAND, border=5)
self.tc2 = wx.TextCtrl(panel)
sizer.Add(self.tc2, pos=(2, 1), span=(1, 3), flag=wx.TOP|wx.EXPAND, border=5)
text3 = wx.StaticText(panel, label=" Description")
sizer.Add(text3, pos=(3, 0), flag=wx.EXPAND, border=5)
self.tc3 = wx.TextCtrl(panel)
sizer.Add(self.tc3, pos=(3, 1), span=(1, 3), flag=wx.TOP|wx.EXPAND, border=5)
#---Last Buttons
self.button91 = wx.Button(panel, label="Apply", size = wx.Size(70,30))
sizer.Add(self.button91, pos=(4, 1), span=(1, 1), flag=wx.ALIGN_RIGHT)
self.button91.Bind(wx.EVT_BUTTON, self.OnButtonOK)
self.button92 = wx.Button(panel, label="Cancel", size = wx.Size(70,30))
sizer.Add(self.button92, pos=(4, 2), span=(1, 1), flag=wx.ALIGN_LEFT)
self.button92.Bind(wx.EVT_BUTTON, self.OnButtonCancel)
sizer.AddGrowableCol(2)
panel.SetSizer(sizer)
def OnButtonOK(self, event):
#Add RowID of Module into ModuleIDs field in the ModuleCategory table
con = sqlite3.connect( self.default_modulelistDB_path )
cursor = con.cursor()
SelectQuery = "select * from ModuleCategory;"
cursor.execute( SelectQuery )
Results = cursor.fetchall()
count = 0
for row in Results:
if row[1].lower() == self.tc2.GetValue().lower():
count += 1
if count > 0:
wx.MessageBox("Category Name is duplicated")
return
InsertQuery = "insert into ModuleCategory values (null, '" + self.tc2.GetValue() + "', '"+ self.tc3.GetValue() +"', '')"
cursor.execute( InsertQuery )
con.commit()
con.close()
self.Close()
return
def OnButtonCancel(self, event):
self.Close()
return
def main():
app = wx.App()
Example(None, title="Module Category Setting")
app.MainLoop()
if __name__ == '__main__':
main()
|
python
|
"""A practical configuration system.
"""
from .extension_point import ExtensionPoint # noqa: F401
from .loading import load_from_module, load_from_pkg_resources # noqa: F401
from .option import build_default_config, Option # noqa: F401
from .profile import Profile # noqa: F401
from .utilities import merge # noqa: F401
|
python
|
"""Transform metrics stored in SQuaSH into InfluxDB format.
See sqr-009.lsst.io for a description on how metrics are stored in SQuaSH and
the resulting InfluxDB data model.
"""
__all__ = ["Transformer"]
import logging
import math
import pathlib
import urllib.parse
import requests
import yaml
from requests.exceptions import ConnectionError, HTTPError
from squash.tasks.utils.format import Formatter
logger = logging.getLogger("squash")
class Transformer(Formatter):
"""Transform metrics stored in SQuaSH into InfluxDB format.
Parameters
----------
squash_api_url : `str`
SQuaSH API URL.
data : `str`
SQuaSH job data in JSON.
"""
def __init__(self, squash_api_url, data):
super().__init__(squash_api_url=squash_api_url)
self.squash_api_url = squash_api_url
self.data = data
self.mapping = self.load_mapping()
def load_mapping(self):
"""Load the SQuaSH to InfluxDB mapping.
Returns
-------
mapping : `dict`
Dictionary with the SQuaSH to InfluxDB mapping.
"""
filename = pathlib.Path(__file__).parent / "mapping.yaml"
with open(filename) as f:
mapping = yaml.load(f, Loader=yaml.FullLoader)
return mapping
def run_mapping(self, key):
"""Return schema, key, and transformation from the mapping.
Parameters
----------
key : `str`
The key to look for in the mapping.
Returns
-------
schema : `str` or `None`
The InfluxDB schema to write or `None` if it should not
be added to InfluxDB.
mapped_key : `str` or `None`
The mapped key or `None` if it should not be added to InfluxDB.
transformation : `str` or `None`
The transformation that should be applied to the value if any.
"""
# By default, if the key is not found in the mapping, it should be
# added to InfluxDB as a tag and preserving the original name.
schema = "tag"
mapped_key = key
transformation = None
if key in self.mapping:
item = self.mapping[key]
schema = item["schema"]
mapped_key = item["key"]
transformation = item["transformation"]
return schema, mapped_key, transformation
def get_timestamp(self):
"""Get the timestamp to use in InfluxDB.
Use the timestamp when the verification job is recorded. If it runs
in Jenkins uses the pipeline runtime instead.
Returns
-------
timestamp : `int`
Formatted timestamp.
"""
timestamp = Formatter.format_timestamp(self.data["date_created"])
if self.data["meta"]["env"]["env_name"] == "jenkins":
ci_id = self.data["meta"]["env"]["ci_id"]
ci_name = self.data["meta"]["env"]["ci_name"]
# Get timestamp from Jenkins
jenkins_url = (
f"{self.squash_api_url}/jenkins/{ci_id}?ci_name={ci_name}"
)
try:
r = requests.get(jenkins_url)
r.raise_for_status()
except HTTPError:
message = "Could not get timestamp from Jenkins."
logger.error(message)
except ConnectionError:
message = (
f"Failed to establish connection with Jenkins "
f"{jenkins_url}."
)
logger.error(message)
date_created = r.json()["date_created"]
timestamp = Formatter.format_timestamp(date_created)
return timestamp
def update_metadata(self):
"""Add/remove metadata before the trandformation step."""
# Add extra metadata
id = self.data["id"]
self.data["meta"]["id"] = id
self.data["meta"]["url"] = urllib.parse.urljoin(
self.squash_api_url, f"/job/{id}"
)
self.data["meta"]["date_created"] = self.data["date_created"]
self.data["meta"]["env"]["ci_dataset"] = self.data["ci_dataset"]
# Fix dataset_repo_url duplication
if "dataset_repo_url" in self.data["meta"].keys():
del self.data["meta"]["dataset_repo_url"]
# Fix use of ci_dataset key in environments other than jenkins
if self.data["meta"]["env"]["env_name"] != "jenkins":
if "ci_dataset" in self.data["meta"]["env"]:
del self.data["meta"]["env"]["ci_dataset"]
# Add code changes metadata keys
if self.data["meta"]["env"]["env_name"] == "jenkins":
self.data["meta"]["env"]["code_changes"] = ""
self.data["meta"]["env"]["code_changes_counts"] = ""
# Add ci_name until DM-18599 is not implemented
if "ci_url" in self.data["meta"]["env"].keys():
if "validate_drp_gen3" in self.data["meta"]["env"]["ci_url"]:
self.data["meta"]["env"]["ci_name"] = "validate_drp_gen3"
elif "validate_drp" in self.data["meta"]["env"]["ci_url"]:
self.data["meta"]["env"]["ci_name"] = "validate_drp"
elif "ap_verify" in self.data["meta"]["env"]["ci_url"]:
self.data["meta"]["env"]["ci_name"] = "ap_verify"
def process_metadata(self, data):
"""Process SQuaSH metadata using a pre-configured mapping to InfluxDB.
Parameters
----------
data : `dict`
A dictionary with SQuaSH metadata.
Return
------
tags : `<list>`
List of tags to be written to InfluxDB.
fields : `<list>`
List of fields to be written to InfluxDB.
"""
tags = []
fields = []
for key, value in data.items():
# process nested dict
if isinstance(value, dict):
tmp_tags, tmp_fields = self.process_metadata(value)
tags.extend(tmp_tags)
fields.extend(tmp_fields)
else:
schema, mapped_key, transformation = self.run_mapping(key)
if transformation:
value = eval(transformation)
if mapped_key and schema == "tag":
tags.append(
"{}={}".format(
Formatter.sanitize(mapped_key),
Formatter.sanitize(value),
)
)
elif mapped_key and schema == "field":
if isinstance(value, str):
fields.append(
'{}="{}"'.format(
Formatter.sanitize(mapped_key), value
)
)
else:
fields.append(
"{}={}".format(
Formatter.sanitize(mapped_key), value
)
)
# Make sure tags and fields are unique
tags = list(set(tags))
fields = list(set(fields))
return tags, fields
def get_meas_by_package(self):
"""Group verify measurements by package.
By grouping verify measurements by package we can send them to InfluxDB
in batch. A package is mapped to an InfluxDB measurement.
"""
meas_by_package = {}
for meas in self.data["measurements"]:
# DM-18360 - SQuaSH API/measurements should return the verification
# package
# a metric fqn is <package>.<metric>, extract package name from the
# metric fqn
package = None
if "." in meas["metric"]:
package = meas["metric"].split(".")[0]
if package:
# No need to carry the package name prefix in the metric name.
if meas["metric"].startswith(package):
metric = meas["metric"][len(package) + 1 :]
value = meas["value"]
# InfluxDB does not store NaNs and it is safe to just skip
# values that are NaN.
# https://github.com/influxdata/influxdb/issues/4089
if not math.isnan(value):
if package not in meas_by_package:
meas_by_package[package] = []
meas_by_package[package].append(f"{metric}={value}")
return meas_by_package
def to_influxdb_line(self):
"""Process job data and make the InfluxDB lines.
Returns
-------
influxdb_lines : `list`
A list with strings representing each InfluxDB line.
"""
timestamp = self.get_timestamp()
self.update_metadata()
tags, extra_fields = self.process_metadata(self.data["meta"])
meas_by_package = self.get_meas_by_package()
influxdb_lines = []
for meas in meas_by_package:
fields = meas_by_package[meas] + extra_fields
influxdb_lines.append(
Formatter.format_influxdb_line(meas, tags, fields, timestamp)
)
return influxdb_lines
|
python
|
from globibot.lib.web.handlers import SessionHandler
from globibot.lib.web.decorators import authenticated, respond_json
from http import HTTPStatus
server_data = lambda server: dict(
id = server.id,
name = server.name,
icon_url = server.icon_url,
)
class GuildHandler(SessionHandler):
@authenticated
@respond_json
def get(self, server_id):
server = self.bot.find_server(server_id)
if server:
return server_data(server)
else:
self.set_status(HTTPStatus.BAD_REQUEST)
|
python
|
import json
import random
import glob
import torch
import numpy as np
import clip.clip as clip
import pickle
from collections import Counter, defaultdict
from tqdm import tqdm
from torch.utils.data import DataLoader
import sys
from vqa.vqa_dataset import VQADataset
SOFT_PROMPT = True
ITER_TO_BREAK = 999
def eval_init():
global model, preprocess, device
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
print(clip.available_models())
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f'Using device: {device}')
model, preprocess = clip.load("RN50", device=device, download_root='/home/work/checkpoints/CLIP')
def clip_infer(image, text):
with torch.no_grad():
image_features = model.encode_image(image)
b, k, n = text.size()
text = text.view(b*k, n)
text_features = model.encode_text(text, soft_prompting=SOFT_PROMPT)
text_features = text_features.view(b, k, -1)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = model.logit_scale.exp()
logits_per_image = logit_scale * torch.bmm(image_features.unsqueeze(1), text_features.permute(0,2,1)).squeeze(1)
probs = logits_per_image.softmax(dim=-1).cpu()
return probs
def main():
eval_init()
TP = 0
upper_bound_accuracy = 0
n_samples = 0
if sys.gettrace() is not None:
N_WORKERS = 0
else:
N_WORKERS = 4
dataset = VQADataset('/home/work/Datasets/vqa2', preprocess, clip.tokenize, 'val')
loader = DataLoader(dataset, 256, shuffle=False, num_workers=N_WORKERS)
for i, (text, image, label) in enumerate(tqdm(loader)):
image = image.to(device)
text = text.to(device)
upper_bound_accuracy += label.max(dim=1).values.sum().item()
probs = clip_infer(image, text)
pred_answer = torch.argmax(probs, dim=1)
TP += label[torch.arange(256), pred_answer].sum().item()
n_samples += image.size(0)
if i == ITER_TO_BREAK:
break
print(f'TP: {TP}, Accuracy: {TP/n_samples}, Upper bound: {upper_bound_accuracy / n_samples}')
main()
|
python
|
"""
Created on Jan 27, 2016
@author: tmahrt
Tests that praat files can be read in and then written out, and that the two
resulting files are the same.
This does not test that the file reader is correct. If the file
reader is bad (e.g. truncates floating points to 1 decimal place), the
resulting data structures will look the same for both the source and
generated files.
"""
import unittest
import os
import io
from os.path import join
from praatio import tgio
from praatio import dataio
from praatio import kgio
from praatio import audioio
def areTheSame(fn1, fn2, fileHandler):
"""
Tests that files contain the same data
If fileHandler is tgio file reader like tgio.openTextgrid then
we can compare a shortTextgrid and a longTextgrid.
If fileHandler is readFile or io.open, etc then the raw
text will be compared.
"""
data1 = fileHandler(fn1)
data2 = fileHandler(fn2)
return data1 == data2
def readFile(fn):
data = ""
with io.open(fn, "r") as fd:
return fd.read()
def run_save(
tg,
minimumIntervalLength=None,
minTimestamp=None,
maxTimestamp=None,
ignoreBlankSpaces=False,
):
"""
Mock write function and return the first tier's entry list
tg.save() mutates the textgrid's data, so the entry list
before and after saving can be different
"""
tg.save(
"garbage.Textgrid",
minimumIntervalLength=minimumIntervalLength,
minTimestamp=minTimestamp,
maxTimestamp=maxTimestamp,
ignoreBlankSpaces=ignoreBlankSpaces,
)
entryList = tg.tierDict[tg.tierNameList[0]].entryList
entryList = [[start, end, label] for start, end, label in entryList]
return entryList
class IOTests(unittest.TestCase):
"""Testing input and output"""
def __init__(self, *args, **kargs):
super(IOTests, self).__init__(*args, **kargs)
cwd = os.path.dirname(os.path.realpath(__file__))
root = os.path.split(cwd)[0]
self.dataRoot = join(root, "files")
self.outputRoot = join(self.dataRoot, "io_test_output")
def setUp(self):
if not os.path.exists(self.outputRoot):
os.mkdir(self.outputRoot)
def test_reading_textgrids_with_newlines_in_labels(self):
"""Tests for reading/writing textgrids with newlines"""
fn = "bobby_words_with_newlines.TextGrid"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
tg = tgio.openTextgrid(inputFN)
tg.save(outputFN)
self.assertTrue(areTheSame(inputFN, outputFN, readFile))
def test_reading_long_textgrids_with_newlines_in_labels(self):
"""Tests for reading/writing textgrids with newlines"""
fn = "bobby_words_with_newlines_longfile.TextGrid"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
tg = tgio.openTextgrid(inputFN)
tg.save(outputFN, useShortForm=False)
self.assertTrue(areTheSame(inputFN, outputFN, readFile))
fn = "bobby_words_with_newlines_longfile_elan.TextGrid"
elanInputFN = join(self.dataRoot, fn)
elanOutputFN = join(self.outputRoot, fn)
tg = tgio.openTextgrid(elanInputFN)
tg.save(elanOutputFN, useShortForm=False)
self.assertTrue(areTheSame(inputFN, elanOutputFN, readFile))
def test_tg_io(self):
"""Tests for reading/writing textgrid io"""
fn = "textgrid_to_merge.TextGrid"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
tg = tgio.openTextgrid(inputFN)
tg.save(outputFN)
self.assertTrue(areTheSame(inputFN, outputFN, readFile))
def test_tg_io_long_vs_short(self):
"""Tests reading of long vs short textgrids"""
shortFN = join(self.dataRoot, "textgrid_to_merge.TextGrid")
longFN = join(self.dataRoot, "textgrid_to_merge_longfile.TextGrid")
self.assertTrue(areTheSame(shortFN, longFN, tgio.openTextgrid))
def test_saving_short_textgrid(self):
"""Tests that short textgrid files are saved non-destructively"""
fn = "textgrid_to_merge.TextGrid"
shortFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, "saved_short_file.textgrid")
tg = tgio.openTextgrid(shortFN)
tg.save(outputFN)
self.assertTrue(areTheSame(shortFN, outputFN, readFile))
def test_saving_long_textgrid(self):
"""Tests that long textgrid files are saved non-destructively"""
fn = "textgrid_to_merge_longfile.TextGrid"
longFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, "saved_long_file.textgrid")
tg = tgio.openTextgrid(longFN)
tg.save(outputFN, useShortForm=False)
self.assertTrue(areTheSame(longFN, outputFN, readFile))
def test_saving_and_loading_json(self):
"""Tests that json files are saved non-destructively"""
fn = "mary.TextGrid"
shortFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, "saved_textgrid_as_json.json")
outputLastFN = join(
self.outputRoot, "saved_textgrid_as_json_then_textgrid.TextGrid"
)
tgFromTgFile = tgio.openTextgrid(shortFN)
tgFromTgFile.save(outputFN, outputFormat=tgio.JSON)
tgFromJsonFile = tgio.openTextgrid(outputFN, readAsJson=True)
tgFromJsonFile.save(outputLastFN)
self.assertTrue(areTheSame(shortFN, outputLastFN, readFile))
def test_get_audio_duration(self):
"""Tests that the two audio duration methods output the same value."""
wavFN = join(self.dataRoot, "bobby.wav")
durationA = tgio._getWavDuration(wavFN)
durationB = audioio.getDuration(wavFN)
self.assertTrue(durationA == durationB)
def test_duration_tier_io(self):
"""Tests for reading/writing duration tiers"""
fn = "mary.DurationTier"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
dt = dataio.open2DPointObject(inputFN)
dt.save(outputFN)
self.assertTrue(areTheSame(inputFN, outputFN, dataio.open2DPointObject))
def test_pitch_io(self):
"""Tests for reading/writing pitch tiers"""
fn = "mary.PitchTier"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
pp = dataio.open2DPointObject(inputFN)
pp.save(outputFN)
self.assertTrue(areTheSame(inputFN, outputFN, dataio.open2DPointObject))
def test_pitch_io_long_vs_short(self):
"""Tests reading of long vs short 2d point objects"""
shortFN = join(self.dataRoot, "mary.PitchTier")
longFN = join(self.dataRoot, "mary_longfile.PitchTier")
self.assertTrue(areTheSame(shortFN, longFN, dataio.open2DPointObject))
def test_point_process_io(self):
"""Tests for reading/writing point processes"""
fn = "bobby.PointProcess"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
pp = dataio.open1DPointObject(inputFN)
pp.save(outputFN)
self.assertTrue(areTheSame(inputFN, outputFN, dataio.open1DPointObject))
def test_point_process_io_long_vs_short(self):
shortFN = join(self.dataRoot, "bobby.PointProcess")
longFN = join(self.dataRoot, "bobby_longfile.PointProcess")
self.assertTrue(areTheSame(shortFN, longFN, dataio.open1DPointObject))
def test_kg_io(self):
"""Tests for reading/writing klattgrids"""
fn = "bobby.KlattGrid"
inputFN = join(self.dataRoot, fn)
outputFN = join(self.outputRoot, fn)
kg = kgio.openKlattgrid(inputFN)
kg.save(outputFN)
self.assertTrue(areTheSame(inputFN, outputFN, kgio.openKlattgrid))
def test_save(self):
userEntryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0.0, 0.4, ""],
[0.4, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 2.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
actualEntryList = run_save(tg)
self.assertEqual(expectedEntryList, actualEntryList)
def test_save_with_minimum_time_stamp(self):
userEntryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0.3, 0.4, ""],
[0.4, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 2.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0.3, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
actualEntryList = run_save(tg)
self.assertEqual(expectedEntryList, actualEntryList)
def test_save_with_force_zero_as_minimum_time(self):
userEntryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0, 0.4, ""],
[0.4, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 2.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0.3, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
actualEntryList = run_save(tg, minTimestamp=0)
self.assertEqual(expectedEntryList, actualEntryList)
def test_save_with_force_larger_value_as_maximum_time(self):
userEntryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0.3, 0.4, ""],
[0.4, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 3.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0.3, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
actualEntryList = run_save(tg, maxTimestamp=3.0)
self.assertEqual(expectedEntryList, actualEntryList)
def test_save_with_force_too_large_minimum_time(self):
# If you choose to force save to use a minTimestamp, all
# of your entries must be higher than that minTimestamp
userEntryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0, 0.4, ""],
[0.4, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 2.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0.3, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
self.assertRaises(AssertionError, run_save, tg, minTimestamp=1.0)
def test_save_with_force_too_large_minimum_time(self):
# If you choose to force save to use a minTimestamp, all
# of your entries must be higher than that minTimestamp
userEntryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0, 0.4, ""],
[0.4, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 2.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0.3, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
self.assertRaises(AssertionError, run_save, tg, maxTimestamp=1.0)
def test_save_with_minimum_interval_length(self):
# The first entry will be stretched to fill the unlabeled region in
# front of it: [0.30, 0.35, ''] (The unlabeled region starts at 0.3
# instead of 0 because the minTimestamp for this tg is 0.3)
userEntryList = [[0.35, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = [
[0.3, 0.6, "A"],
[0.6, 0.8, ""],
[0.8, 1.0, "E"],
[1.0, 1.2, ""],
[1.2, 1.3, "I"],
[1.3, 2.0, ""],
]
tier = tgio.IntervalTier("test", userEntryList, 0.3, 2.0)
tg = tgio.Textgrid()
tg.addTier(tier)
actualEntryList = run_save(tg, minimumIntervalLength=0.06)
self.assertEqual(expectedEntryList, actualEntryList)
def test_save_with_ignore_blank_sections(self):
"""
Tests that blank sections can be ignored on saving a textgrid
"""
entryList = [[0.4, 0.6, "A"], [0.8, 1.0, "E"], [1.2, 1.3, "I"]]
expectedEntryList = entryList # Blank intervals should not be inserted
tier = tgio.IntervalTier("test", entryList)
tg = tgio.Textgrid()
tg.addTier(tier)
actualEntryList = run_save(tg, ignoreBlankSpaces=True)
self.assertEqual(expectedEntryList, actualEntryList)
if __name__ == "__main__":
unittest.main()
|
python
|
from pyfluminus.authorization import vafs_jwt
from pyfluminus.api import name, modules, get_announcements
from pyfluminus.structs import Module
from flask import Flask, request, jsonify, redirect, url_for, render_template
import sys
from app import app, db, util
from app.models import User, User_Mods, Announcements, Mod_files
from app.extra_api import get_class_grps
import json
from sqlalchemy.orm.attributes import flag_modified
HTTP_OK = 200
HTTP_NO_CONTENT = 204
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORISED = 401
HTTP_NOT_FOUND = 404
@app.route('/')
def index():
return 'Main page'
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
@app.route('/get_class_grps', methods=['POST'])
def f():
mod_id = request.get_json()['mod_id']
auth = request.get_json()['auth']
return get_class_grps(auth, mod_id)
# receives login info and returns auth token, login info must be sent as application/json
@app.route('/login', methods=['POST'])
def login():
login_info = request.get_json()
print(login_info['userName']+'\n')
if login_info['userName'] == 'test':
auth = {'jwt' : 'test'}
return util.response_json(True, 1, auth), HTTP_OK
auth = vafs_jwt("nusstu\\" + login_info['userName'].upper(), login_info['password'])
if "error" in auth:
return util.response_json(False, 1, auth), HTTP_UNAUTHORISED
user_id = login_info['userName'].upper()
if User.query.filter_by(nus_net_id=user_id).first() == None:
uName = name(auth).data
u = User(name = uName, nus_net_id = user_id)
#mods = util.get_active_mods(auth)
db.session.add(u)
db.session.commit()
uId = User.query.filter_by(nus_net_id=user_id).first().id
util.add_mods(auth, uId)
u = User.query.get(uId)
u.get_busy_time()
flag_modified(u, "timetable")
db.session.commit()
return util.response_json(True, 1, auth), HTTP_OK
@app.route('/name', methods=['POST'])
def userName():
try:
auth = request.get_json()
return util.response_json(True, 1, name(auth).data), HTTP_OK
except:
return util.response_json(False, 1, {"error" : "Invalid"}), HTTP_NOT_FOUND
@app.route('/updateProfile', methods=['POST'])
def updateProfile():
login_info = request.get_json()
auth = vafs_jwt("nusstu\\" + login_info['userName'], login_info['password'])
user_id = login_info['userName'].upper()
if "error" in auth:
return util.response_json(False, 1, auth), HTTP_UNAUTHORISED
if User.query.filter_by(nus_net_id=user_id).first() != None:
uName = name(auth).data
db.update(User).where(User.nus_net_id==user_id).values(name=uName)
db.session.commit()
else:
uName = name(auth).data
u = User(name = uName, nus_net_id = user_id)
db.session.add(u)
db.session.commit()
if User.query.filter_by(nus_net_id=user_id).first().mods == []:
uId = User.query.filter_by(nus_net_id=user_id).first().id
util.add_mods(auth, uId)
else:
uId = User.query.filter_by(nus_net_id=user_id).first().id
util.update_mods(auth, uId)
u = User.query.filter_by(nus_net_id=user_id).first()
u.get_busy_time()
flag_modified(u, "timetable")
db.session.commit()
return redirect(url_for('profile', nusNetId=user_id))
@app.route('/activeModules', methods=['POST'])
def active_mods():
try:
auth = request.get_json()
mods = util.get_active_mods(auth)
return util.response_json(True, len(mods), mods), HTTP_OK
except:
return util.response_json(False, 1, {"error" : "Invalid"}), HTTP_NOT_FOUND
@app.route('/announcementsAll', methods = ['POST'])
def announcements():
try:
auth = request.get_json()
msgs = util.get_all_announcement(auth)
return util.response_json(True, len(msgs), msgs), HTTP_OK
except:
return util.response_json(False, 1, {"error" : "Invalid"}), HTTP_NOT_FOUND
@app.route('/profile/<nusNetId>')
def profile(nusNetId):
try:
user = User.query.filter_by(nus_net_id=nusNetId).first()
uId = user.id
mods = User_Mods.query.filter_by(student=uId).all()
mod_info = {}
for mod in mods:
mod_info[mod.code] = {"id" : mod.mod_id,
"name" : mod.name,
"term" : mod.term}
return util.response_json(True, len(mods), {
"name" : user.name,
"mods" : mod_info,
"timetable" : user.timetable}), HTTP_OK
except:
return util.response_json(False, 1, {"error" : "Not found"}), HTTP_NOT_FOUND
@app.route('/modules/filesAll', methods=['POST'])
def files_all():
auth = request.get_json()
files = util.get_mod_files(auth)
return util.response_json(True, len(files), files), HTTP_OK
@app.route('/modules/files', methods=['POST'])
def files():
auth = request.get_json()['auth']
code = request.get_json()['code']
files = json.dumps(util.get_single_mod_files(auth, code))
f = Mod_files(code=code, contents=files)
db.session.add(f)
db.session.commit()
return util.response_json(True, len(files), files), HTTP_OK
@app.route('/modules/announcements', methods=['POST'])
def announcements_single():
auth = request.get_json()['auth']
code = request.get_json()['code']
mod_id = User_Mods.query.filter_by(code=code).first().mod_id
msgs = util.get_single_mod_announcements(auth, mod_id)
m = Announcements(code=code, contents=msgs)
db.session.add(m)
db.session.commit()
return util.response_json(True, len(msgs), msgs), HTTP_OK
@app.route('/modules/announcementsTest', methods=['POST'])
def aTest():
#code = request.get_json()['code']
#reply = Announcements.query.filter_by(code=code).first().contents
#return util.response_json(True, len(reply), reply), HTTP_OK
auth = request.get_json()['auth']
code = request.get_json()['code']
mod_id = User_Mods.query.filter_by(code=code).first().mod_id
msgs = util.get_single_mod_announcements(auth, mod_id)
m = Announcements(code=code, contents=msgs)
db.session.add(m)
db.session.commit()
return util.response_json(True, len(msgs), msgs), HTTP_OK
@app.route('/modules/modFileTest', methods=['POST'])
def fTest():
#code = request.get_json()['code']
#reply = Mod_files.query.filter_by(code=code).first().contents
#return util.response_json(True, len(reply), reply), HTTP_OK
auth = request.get_json()['auth']
code = request.get_json()['code']
files = json.dumps(util.get_single_mod_files(auth, code))
f = Mod_files(code=code, contents=files)
db.session.add(f)
db.session.commit()
return util.response_json(True, len(files), files), HTTP_OK
|
python
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pickle as pkl
import json
from .format_converter import DirectoryBasedAnnotationConverter, ConverterReturn
from ..representation import CharacterRecognitionAnnotation
from ..utils import read_txt, check_file_existence
from ..config import PathField
def read_vocab(vocab_path):
"""Reads vocab file from disk as .pkl or .json
Args:
vocab_path (str): path to vocab file
Raises:
ValueError: If wrong extension of the file
Returns:
Vocab: Vocab object with sign2id and id2sign dictinaries
"""
if vocab_path.suffix == '.pkl':
with open(vocab_path, "rb") as f:
vocab_dict = pkl.load(f)
elif vocab_path.suffix == '.json':
with open(vocab_path, "r") as f:
vocab_dict = json.load(f)
id2sign = {int(k): v for k, v in vocab_dict['id2sign'].items()}
vocab_dict['id2sign'] = id2sign
else:
raise ValueError("Wrong extension of the vocab file")
return vocab_dict["id2sign"]
class Im2latexDatasetConverter(DirectoryBasedAnnotationConverter):
__provider__ = 'im2latex_formula_recognition'
annotation_types = (CharacterRecognitionAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update(
{
'images_dir': PathField(
is_directory=True, optional=False,
description='path to input images'
),
'formula_file': PathField(
optional=True,
description='path to file containing one formula per line'
),
'split_file': PathField(
optional=True,
description='path to split containing image_name\\tformula_idx'
),
'vocab_file': PathField(
optional=True,
description='path to vocabulary'
),
}
)
return configuration_parameters
def configure(self):
super().configure()
self.images_dir = self.get_value_from_config('images_dir')
self.formula_path = self.get_value_from_config('formula_file')
self.split_path = self.get_value_from_config('split_file')
self.vocab_path = self.get_value_from_config('vocab_file')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""Reads data from disk and returns dataset in converted for AC format
Args:
check_content (bool, optional): Check if content is valid. Defaults to False.
progress_callback (bool, optional): Display progress. Defaults to None.
progress_interval (int, optional): Units to display progress. Defaults to 100 (percent).
Returns:
[type]: Converted dataset
"""
annotations = []
content_errors = None if not check_content else []
split_file = read_txt(self.split_path)
formulas_file = read_txt(self.formula_path)
num_iterations = len(split_file)
vocab = read_vocab(self.vocab_path)
for line_id, line in enumerate(split_file):
img_name, formula_idx = line.split('\t')
gt_formula = formulas_file[int(formula_idx)]
annotations.append(CharacterRecognitionAnnotation(img_name, gt_formula))
if check_content:
if not check_file_existence(self.images_dir / img_name):
content_errors.append('{}: does not exist'.format(img_name))
if progress_callback is not None and line_id % progress_interval == 0:
progress_callback(line_id / num_iterations * 100)
meta = {'vocab': vocab}
return ConverterReturn(annotations, meta, content_errors)
|
python
|
# -*- coding: utf-8 -*-
"""
runserver.py
~~~~~~~~~~~~~
This code launches the backend webserver of moves using flask with eventlet
(for concurrency) and socket.io.
"""
from moves import app,socketio,r
app.run(debug=True)
socketio.run(app,host='0.0.0.0',port=PORT, debug=DEBUG)
|
python
|
import torch
from models.MaskRCNN import get_model_instance_segmentation
from dataset import PennFudanDataset, get_transform
from references.engine import train_one_epoch, evaluate
from references import utils
# train on the GPU or the CPU, if a GPU is not available
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# our dataset has two classes only - background and person
num_classes = 2
# use out dataset an defined transformations
dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))
# split the dataset in train and test set
indices = torch.randperm(len(dataset)).tolist()
dataset = torch.utils.data.Subset(dataset, indices[:-50])
dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
# define training and validation data loaders
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=2, shuffle=True, num_workers=4,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1, shuffle=False, num_workers=4,
collate_fn=utils.collate_fn)
# get the model using our helper function
model = get_model_instance_segmentation(num_classes)
# mode model to the right device
model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005,
momentum=0.9, weight_decay=0.0005)
# and a learning rete scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=3,
gamma=0.1)
# let's train it for 10 epochs
num_epoch = 10
for epoch in range(num_epoch):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, device=device)
# Saving Model for Inference
torch.save(model.state_dict(), "dict.pth")
print("That's it!")
|
python
|
"""empty message
Revision ID: 8c7f8fa92c20
Revises: c925e4d07621
Create Date: 2018-08-17 13:09:27.720622
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '8c7f8fa92c20'
down_revision = 'c925e4d07621'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('registrants', sa.Column('ab_completed_at', sa.DateTime(), nullable=True))
op.add_column('registrants', sa.Column('ab_permanent', sa.Boolean(), nullable=True))
op.add_column('registrants', sa.Column('vr_completed_at', sa.DateTime(), nullable=True))
op.drop_column('registrants', 'completed_at')
op.drop_column('registrants', 'last_completed_step')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('registrants', sa.Column('last_completed_step', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('registrants', sa.Column('completed_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.drop_column('registrants', 'vr_completed_at')
op.drop_column('registrants', 'ab_permanent')
op.drop_column('registrants', 'ab_completed_at')
# ### end Alembic commands ###
|
python
|
import toml
t = toml.load("Cargo.toml")
crate_version = t['package']['version']
t = toml.load("pyproject.toml")
wheel_version = t['tool']['poetry']['version']
assert crate_version == wheel_version
print(crate_version)
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Parameter
from fairseq import utils
from fairseq.modules.multihead_attention import MultiheadAttention
class RelativePositionEmbeddings(nn.Module):
"""
learned relative position embedding for self-attention with relative position of shaw et al
"""
def __init__(self, max_rel_positions, embedding_dim, dropout=0.0, direction=True, **params):
super().__init__()
self.window_size = max_rel_positions
self.embedding_dim = embedding_dim
self.direction = direction
num_embeddings = max_rel_positions * 2 + 1 if self.direction else max_rel_positions + 1
self.embeddings = nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim)
self.dropout = nn.Dropout(dropout)
def map_to_index(self, distance, shift_to_zero=True):
max_rel_len = self.window_size
if max_rel_len is None:
distance = distance
else:
distance = distance.clamp(-max_rel_len, max_rel_len)
if self.direction:
if shift_to_zero and max_rel_len is not None:
distance = distance + max_rel_len
else:
distance = distance
else:
distance = distance.abs()
return distance
def forward(self, inputs):
"""
:param inputs: length, length, num_embeddings or length
:return:
"""
if inputs.dim() > 2:
embed = inputs @ self.embeddings.weight
embed = self.dropout(embed)
return embed
elif inputs.dim() == 2:
distance = inputs
else:
inputs = inputs.squeeze()
distance = inputs[:, None] - inputs[None, :]
distance = self.map_to_index(distance)
embed = self.embeddings(distance)
embed = self.dropout(embed)
return embed
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-1)).squeeze(-1)
def shaw_attention(query, key, pos_key):
"""
:param query:
:param key:
:param pos_key: length, length, depth
:return:
"""
bsize, heads, length, depth = key.size()
q_dot_k = matmul(query, key.contiguous().transpose(-1, -2)) # batch, heads, length, length
query_for_pos = query.contiguous().permute(2, 0, 1, 3).view(length, bsize * heads, depth)
pos_for_att = pos_key.contiguous().transpose(-2, -1) # length, depth, length
q_dot_p = matmul(query_for_pos, pos_for_att) # length, batch*heads, length
q_dot_p = q_dot_p.contiguous().permute(1, 0, 2).view(bsize, heads, length, length)
return q_dot_k + q_dot_p
def shaw_combine(probs, value, pos_val):
"""
:param probs:
:param value:
:param pos_val: length, length, depth
:return:
"""
bsize, heads, length, depth = value.size()
w_dot_v = matmul(probs, value) # batch, head, length, depth
w_for_comb = probs.contiguous().permute(2, 0, 1, 3).view(length, bsize * heads, length)
w_dot_p = matmul(w_for_comb, pos_val) # length,batch*heads, depth
w_dot_p = w_dot_p.contiguous().permute(1, 0, 2).view(bsize, heads, length, depth)
return w_dot_v + w_dot_p
class RelativeSelfAttention(MultiheadAttention):
"""Multi-headed attention with relative attentions.
See "Self Attention with relative positions" for more details.
"""
@classmethod
def relative_attention(cls, query, key, pos_key):
if pos_key.dim() == 3:
return shaw_attention(query, key, pos_key)
@classmethod
def relative_combine(cls, probs, value, pos_val):
if pos_val.dim() == 3:
return shaw_combine(probs, value, pos_val)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
pos_key=None,
pos_val=None,
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
**kwargs
) -> Tuple[Tensor, Optional[Tensor]]:
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
# self-attention
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = (k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1))
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = self.relative_attention(
q.contiguous().view(bsz, self.num_heads, -1, self.head_dim),
k.contiguous().view(bsz, self.num_heads, -1, self.head_dim),
pos_key,
).contiguous().view(bsz * self.num_heads, tgt_len, src_len)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf")
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = self.relative_combine(
probs=attn_probs.contiguous().view(bsz, self.num_heads, tgt_len, src_len),
value=v.contiguous().view(bsz, self.num_heads, -1, self.head_dim),
pos_val=pos_val
).contiguous().view(bsz * self.num_heads, -1, self.head_dim)
if self.onnx_trace and attn.size(1) == 1:
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
class FFNAttention(nn.Module):
def __init__(self, input_dim, hidden_dim, bias=False):
super(FFNAttention, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.q_proj = nn.Linear(input_dim, hidden_dim)
self.k_proj = nn.Conv1d(input_dim, hidden_dim, 1, 1)
self.out = nn.Linear(hidden_dim, 1, bias=bias)
self._inf = Parameter(torch.Tensor([-1e18]), requires_grad=False)
self.inf = None
# Initialize vector V
nn.init.uniform_(self.out.weight, -1, 1)
def forward(self, query, key, mask=None):
query = self.q_proj(query).unsqueeze(2).expand(-1, -1, key.size(1)) # (batch, hidden, seq_len)
key = key.permute(0, 2, 1) # (batch, hidden, seq_len)
key = self.k_proj(key) # (batch, hidden, seq_len)
attn_weight = self.out((query + key).permute(0, 2, 1)).squeeze(-1) # (batch, seq_len)
if mask is not None and len(attn_weight[mask]) > 0:
attn_weight[mask] = self.inf[mask]
attn_prob = attn_weight.softmax(dim=-1)
attn = torch.bmm(key, attn_prob.unsqueeze(2)).squeeze(2)
return attn, attn_weight
def init_inf(self, mask_size):
self.inf = self._inf.unsqueeze(1).expand(*mask_size)
class DotProductAttention(nn.Module):
""" Attention model for Pointer-Net """
def __init__(self, ninp, nhid):
"""
Initiate Attention
:param int ninp: Input's diamention
:param int nhid: Number of hidden units in the attention
"""
super(DotProductAttention, self).__init__()
self.input_dim = ninp
self.hidden_dim = nhid
self.input_linear = nn.Linear(ninp, nhid)
self.context_linear = nn.Conv1d(ninp, nhid, 1, 1)
self.V = Parameter(torch.FloatTensor(nhid), requires_grad=True)
self._inf = Parameter(torch.FloatTensor([-1e18]), requires_grad=False)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=-1)
self.inf = None
# Initialize vector V
nn.init.uniform_(self.V, -1, 1)
def forward(self, inputs, context, mask):
"""
Attention - Forward-pass
:param Tensor inputs: Hidden state h
:param Tensor context: Attention context
:param ByteTensor mask: Selection mask
:return: tuple of - (Attentioned hidden state, Alphas)
"""
# (batch, hidden_dim, seq_len)
inp = self.input_linear(inputs).unsqueeze(2).expand(-1, -1, context.size(1))
# (batch, hidden_dim, seq_len)
context = context.permute(0, 2, 1)
ctx = self.context_linear(context)
# (batch, 1, hidden_dim)
V = self.V.unsqueeze(0).expand(context.size(0), -1).unsqueeze(1)
# (batch, seq_len)
attn_weight = torch.bmm(V, self.tanh(inp + ctx)).squeeze(1)
if mask is not None and len(attn_weight[mask]) > 0:
attn_weight[mask] = self.inf[mask]
attn_prob = self.softmax(attn_weight)
attn = torch.bmm(ctx, attn_prob.unsqueeze(2)).squeeze(2)
return attn, attn_weight
def init_inf(self, mask_size):
self.inf = self._inf.unsqueeze(1).expand(*mask_size)
class FeedForward(nn.Module):
def __init__(self, d_model, d_hidden, dropout=0.0):
super().__init__()
# dropout = 0.0 # means 17
self.input_to_hidden = nn.Linear(d_model, d_hidden)
self.hidden_to_output = nn.Linear(d_hidden, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs):
h = F.relu(self.input_to_hidden(inputs))
h = self.dropout(h)
return self.hidden_to_output(h)
|
python
|
import nltk, json, pickle
from HelperFunctions import find_author
class MessageSentiment:
"""Generate mood sentiments for messages"""
MINIMUM_CERTAINTY_PROBABILITY = 0.85
TRAINING_SET_SIZE = 5000
try:
STOP_WORDS = set(nltk.corpus.stopwords.words('english'))
except:
nltk.download('stopwords')
STOP_WORDS = set(nltk.corpus.stopwords.words('english'))
def __init__(self, training_size = 5000):
"""Generates the classifier for NB analysis of messages"""
self.TRAINING_SET_SIZE = training_size
self.tweets = self.make_tweets()
self.word_features = self.make_word_features()
self.classifier = self.get_saved_classifier()
if self.classifier is None:
# Must generate new classifier
self.classifier = self.generate_classfier_from_twitter()
self.save_classifier()
def make_tweets(self):
raw_tweets = []
with open('negative_tweets.json') as txt:
for line in txt.readlines()[:self.TRAINING_SET_SIZE]:
tup = (json.loads(line)['text'], 'negative')
raw_tweets.append(tup)
with open('positive_tweets.json') as txt:
for line in txt.readlines()[:self.TRAINING_SET_SIZE]:
tup = (json.loads(line)['text'], 'positive')
raw_tweets.append(tup)
# Combine negative and positive tweets
parsed_tweets = []
for (words, sentiment) in raw_tweets:
words_filtered = [e.lower() for e in words.split() if self.is_real_word(e)]
parsed_tweets.append((words_filtered, sentiment))
# Make and return word features
return parsed_tweets
def is_real_word(self, word):
return len(word) >= 3 #and word not in self.STOP_WORDS
def make_word_features(self):
wordlist = []
for (words, sentiment) in self.tweets:
wordlist.extend(words)
return nltk.FreqDist(wordlist).keys()
def get_saved_classifier(self):
"""Return memozied data; if none exists, then make empty DB"""
with open("classifier.pkl", "rb") as pkl_db:
try:
memoized_data = pickle.load(pkl_db)
if memoized_data['classifier'] is not None:
return memoized_data['classifier']
except:
print("Saved classifier not found. Regenerating classifier...")
return None
print("Saved classifier not found. Regenerating classifier...")
return None
def generate_classfier_from_twitter(self):
print("Generating training set...")
training_set = nltk.classify.apply_features(self.extract_features, self.tweets)
return nltk.NaiveBayesClassifier.train(training_set)
def extract_features(self, document):
document_words = set(document)
features = {}
for word in self.word_features:
features['contains(%s)' % word] = (word in document_words)
return features
def save_classifier(self):
with open("classifier.pkl", "wb") as pkl_db:
print('Pickling classifier')
pickle.dump({'classifier': self.classifier}, pkl_db)
def classify_text(self, text_features):
prob = self.classifier.prob_classify(text_features)
(prob_pos, prob_neg) = prob.prob('positive'), prob.prob('negative')
if prob_neg > self.MINIMUM_CERTAINTY_PROBABILITY:
classification = "negative"
elif prob_pos > self.MINIMUM_CERTAINTY_PROBABILITY:
classification = "positive"
else:
classification = "neutral"
return (classification, max(prob_neg, prob_pos))
def get_mood(self, text):
parsed_text = [word for word in text.split() if self.is_real_word(word)]
return self.classify_text(self.extract_features(parsed_text))
|
python
|
import os, cv2, shutil
import numpy as np
import argparse
def read_coords(coord_file):
coord_data, inds_pos = [], []
assert os.path.exists(coord_file), "File does not exist! %s"%coord_file
with open(coord_file, 'r') as f:
for ind, line in enumerate(f.readlines()):
x_coord = int(line.strip().split(',')[0])
y_coord = int(line.strip().split(',')[1])
if x_coord > 0 and y_coord > 0:
inds_pos.append(ind)
coord_data.append([x_coord, y_coord])
coord_data = np.array(coord_data, dtype=np.int32)
inds_pos = np.array(inds_pos, dtype=np.int32)
return coord_data, inds_pos
def write_coords(coord_data, frame_ids, coord_file):
coord_dir = os.path.dirname(coord_file)
if not os.path.exists(coord_dir):
os.makedirs(coord_dir)
with open(coord_file, 'w') as f:
for i in frame_ids:
x_coord = int(coord_data[i, 0])
y_coord = int(coord_data[i, 1])
f.writelines('%d,%d\n'%(x_coord, y_coord))
def reduce_video(src_file, dst_file, ratio, frame_ids):
assert os.path.exists(src_file), "File does not exist! %s"%src_file
video_dir = os.path.dirname(dst_file)
if os.path.exists(dst_file):
return
if not os.path.exists(video_dir):
os.makedirs(video_dir)
# video capture of src video
cap_src = cv2.VideoCapture(src_file)
ret, frame = cap_src.read()
ind = 0
# dest capture
dst_size = (int(frame.shape[1] * ratio), int(frame.shape[0] * ratio)) # (width, height)
cap_dst = cv2.VideoWriter(dst_file, cv2.VideoWriter_fourcc(*'XVID'), 30, dst_size)
while (ret):
if ind in frame_ids:
frame_resize = cv2.resize(frame, dst_size)
cap_dst.write(frame_resize)
# read next frame
ret, frame = cap_src.read()
ind += 1
def reduce_data(data_path, ratio, max_frames, subset, result_path):
# the input path
coord_path_src = os.path.join(data_path, subset, 'coordinate')
focus_path_src = os.path.join(data_path, subset, 'focus_videos')
salmap_path_src = os.path.join(data_path, subset, 'salmap_videos')
video_path_src = os.path.join(data_path, subset, 'rgb_videos')
# the input path
coord_path_dst = os.path.join(result_path, subset, 'coordinate')
focus_path_dst = os.path.join(result_path, subset, 'focus_videos')
salmap_path_dst = os.path.join(result_path, subset, 'salmap_videos')
video_path_dst = os.path.join(result_path, subset, 'rgb_videos')
for accID in sorted(os.listdir(coord_path_src)):
txtfile_dir = os.path.join(coord_path_src, accID)
for filename in sorted(os.listdir(txtfile_dir)):
coord_file_src = os.path.join(txtfile_dir, filename)
coord_data, inds_pos = read_coords(coord_file_src)
if inds_pos.shape[0] == 0:
continue # ignore videos without any accident
# remove the frames after accident ends
video_end = min(inds_pos[-1] + 1 + 16, coord_data.shape[0])
video_start = max(0, video_end - max_frames)
frame_ids = np.arange(video_start, video_end)
vid = filename.split('_')[0]
print("Processing the video: %s/%s, # frames: %d"%(accID, vid, len(frame_ids)))
# resize & write coords
coord_file_dst = os.path.join(coord_path_dst, accID, filename)
write_coords(ratio * coord_data, frame_ids, coord_file_dst)
# read focus videos
focus_video_src = os.path.join(focus_path_src, accID, vid + '.avi')
focus_video_dst = os.path.join(focus_path_dst, accID, vid + '.avi')
reduce_video(focus_video_src, focus_video_dst, ratio, frame_ids)
# read salmap videos
salmap_video_src = os.path.join(salmap_path_src, accID, vid + '.avi')
salmap_video_dst = os.path.join(salmap_path_dst, accID, vid + '.avi')
reduce_video(salmap_video_src, salmap_video_dst, ratio, frame_ids)
# read rgb videos
rgb_video_src = os.path.join(video_path_src, accID, vid + '.avi')
rgb_video_dst = os.path.join(video_path_dst, accID, vid + '.avi')
reduce_video(rgb_video_src, rgb_video_dst, ratio, frame_ids)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Reduce the size of DADA-2000')
parser.add_argument('--data_path', default="./DADA-2000",
help='Directory to the original DADA-2000 folder.')
parser.add_argument('--result_path', default="./DADA-2000-small",
help='Directory to the result DADA-2000 folder.')
args = parser.parse_args()
ratio = 0.5
max_frames = 450 # for fps=30, the maxtime=20 s after clipped
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
reduce_data(args.data_path, ratio, max_frames, 'training', args.result_path)
reduce_data(args.data_path, ratio, max_frames, 'testing', args.result_path)
reduce_data(args.data_path, ratio, max_frames, 'validation', args.result_path)
|
python
|
# Type of the message
FIELD_MSGTYPE = "t"
MSG_OP = 1 # This is an operation
MSG_REPLY = 2 # This is a regular reply
MSG_EXCEPTION = 3 # This is an exception
MSG_CONTROL = 4 # This is a control message
MSG_INTERNAL_ERROR = 5 # Some internal error happened
# Fields for operations/control
FIELD_OPTYPE = "o"
FIELD_TARGET = "o_t"
FIELD_ARGS = "o_a"
FIELD_KWARGS = "o_ka"
# Fields for reply/exception
FIELD_CONTENT = "c"
# Fields for values
# Indicates that the object is remote to the receiver (and local to the sender)
VALUE_REMOTE = 1
# Indicates that the object is local to the receiver (and remote to the sender)
VALUE_LOCAL = 2
# Operations that we support
OP_GETATTR = 1
OP_SETATTR = 2
OP_DELATTR = 3
OP_CALL = 4
OP_CALLATTR = 5
OP_REPR = 6
OP_STR = 7
OP_HASH = 9
OP_PICKLE = 10
OP_DEL = 11
OP_GETMETHODS = 12
OP_DIR = 13
OP_CALLFUNC = 14
OP_GETVAL = 15
OP_SETVAL = 16
OP_INIT = 17
OP_CALLONCLASS = 18
# Control messages
CONTROL_SHUTDOWN = 1
CONTROL_GETEXPORTS = 2
|
python
|
import logging
from typing import List, Union, Iterable
from matplotlib.pyplot import Figure
import matplotlib.ticker as mtick
import pandas as pd
from py_muvr.permutation_test import PermutationTest
from matplotlib import pyplot as plt
from py_muvr.data_structures import FeatureSelectionResults
log = logging.getLogger(__name__)
class PALETTE:
lightblue = "#deebf7"
blue = "#3182bd"
black = "black"
white = "white"
grey = "grey"
lightgrey = "#9facbd"
def plot_validation_curves(
feature_selection_results: FeatureSelectionResults, **figure_kwargs
) -> plt.Figure:
curves = feature_selection_results.score_curves
plt.figure(**figure_kwargs)
for i, curve in enumerate(curves["outer_loops"]):
label = "Outer loop average" if i == 0 else None
plt.semilogx(curve.n_features, curve.scores, c=PALETTE.lightblue, label=label)
for i, curve in enumerate(curves["repetitions"]):
label = "Repetition average" if i == 0 else None
plt.semilogx(curve.n_features, curve.scores, c=PALETTE.blue, label=label)
for i, curve in enumerate(curves["total"]):
label = "Total average" if i == 0 else None
plt.semilogx(curve.n_features, curve.scores, c=PALETTE.black, label=label)
min_y, max_y = plt.gca().get_ylim()
selected_features = feature_selection_results.selected_features
for attribute in ["min", "max", "mid"]:
n_feats = len(getattr(selected_features, attribute))
plt.vlines(
n_feats,
min_y,
max_y,
linestyle="--",
colors=PALETTE.grey,
lw=2,
label=attribute,
zorder=100000,
)
plt.xlabel("# features")
plt.ylabel("Fitness score")
plt.grid(ls=":")
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0)
return plt.gcf()
def plot_feature_rank(
feature_selection_results: FeatureSelectionResults,
model: str,
feature_names: List[str] = None,
show_outliers: bool = True,
**figure_kwargs,
) -> Figure:
if model not in {"min", "max", "mid"}:
raise ValueError("The model parameter must be one of 'min', 'max' or 'mid'.")
eval_attr = model + "_eval"
feats_attr = model
ranks = []
for r in feature_selection_results.raw_results:
for ol in r:
ranks_raw_data = getattr(ol, eval_attr).ranks.get_data()
ranks.append(ranks_raw_data)
selected_features = feature_selection_results.selected_features
best = getattr(selected_features, feats_attr)
selected_ranks = pd.DataFrame(r for r in ranks)[best]
sorted_feats = selected_ranks.mean().sort_values().index
selected_ranks = selected_ranks[sorted_feats]
if "figsize" not in figure_kwargs.keys():
fig_width = len(selected_ranks.columns) / 3
figure_kwargs["figsize"] = (6, max(fig_width, 5))
fig, (ax_ranks, ax_notnan) = plt.subplots(
nrows=1, ncols=2, sharey=True, **figure_kwargs
)
ax_notnan.xaxis.set_major_formatter(mtick.PercentFormatter())
ax_notnan.set_ylabel("Feature")
ax_notnan.set_xlabel("Percentage of times selected")
ax_ranks.set_xlabel("Feature Rank")
for ax in [ax_notnan, ax_ranks]:
ax.grid(linestyle=":", zorder=0)
ax.tick_params(axis="x")
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
bbox_props = {
"color": PALETTE.blue,
"alpha": 0.8,
}
bbox_color = {"boxes": PALETTE.blue, "medians": PALETTE.black}
if feature_names is not None:
feature_numbers = range(len(feature_names))
numbers_to_names = dict(zip(feature_numbers, feature_names))
selected_ranks.rename(columns=numbers_to_names, inplace=True)
selected_ranks.boxplot(
positions=range(len(selected_ranks.columns)),
color=bbox_color,
patch_artist=True,
ax=ax_ranks,
boxprops=bbox_props,
vert=False,
showfliers=show_outliers,
)
(selected_ranks.notna().mean() * 100).plot.barh(
facecolor=PALETTE.lightgrey,
ax=ax_notnan,
edgecolor=PALETTE.black,
grid=True,
alpha=0.8,
)
ax_notnan.invert_yaxis() # being the y-axis shared, it will invert both
fig.tight_layout() # otherwise the right y-label is slightly clipped
return fig
def plot_permutation_scores(
permutation_test: PermutationTest,
model: str,
bins: Union[int, str, Iterable[float]] = "auto",
**fig_kwargs,
) -> Figure:
score, perm_scores = permutation_test.compute_permutation_scores(model)
p_value = permutation_test.compute_p_values(model, ranks=False)
fig, ax = plt.subplots(1, 1, **fig_kwargs)
ax.grid(linestyle=":", zorder=0)
counts, _, _ = ax.hist(
perm_scores,
bins=bins,
alpha=0.8,
edgecolor=PALETTE.white,
facecolor=PALETTE.blue,
label="Permutation Scores",
zorder=10,
)
ax.vlines(
score,
ymin=0,
ymax=counts.max(),
color=PALETTE.black,
label="Feature Selection Score",
zorder=20,
)
ax.set_ylabel("Number of Occurrences")
ax.set_xlabel("Score")
ax.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0)
ax.set_title("Feature selection p-value = %1.3g" % p_value)
return fig
|
python
|
# WEATHER RETRIEVING MICROSERVICE
# By: Cody Jennette
# CS 361 - Software Engineering I
# [email protected]
import requests
import urllib.request
# Program fetches local machine's external IP address, later used for current location:
external_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
# get_location function saves local machine's city, country, latitude, and longitude as a tuple.
def get_location(ip):
access_key = "9c0df8c38ae552d45174ea3dc2454c18"
base_url = "http://api.ipstack.com/"
full_url = str(base_url) + str(ip) + "?access_key=" + str(access_key)
response = requests.get(full_url)
loc_info = response.json()
country = loc_info["country_code"]
city = loc_info["city"]
lat = loc_info["latitude"]
lon = loc_info["longitude"]
return lat, lon, city, country # Tuple is later unpacked for proper output display
def get_weather(latitude, longitude): # Latitude and longitude from tuple used to get weather conditions
key = "20184d8f0b1ac6a9146bc617163b1c64"
url_weather = "http://api.openweathermap.org/data/2.5/weather"
params = {"lat": latitude, "lon": longitude, "appid": key, "units": "imperial"}
output = requests.get(url_weather, params=params)
output_json = output.json()
desc = output_json["weather"][0]["description"]
temp = output_json["main"]["temp"]
return desc, temp # Last part of the output, saved as a tuple and later unpacked
def display_output(location_name, description, temperature): # Function to properly display output
display = "City: %s \nConditions: %s \nTemperature (°F): %s" % (location_name, description, temperature)
return display
location = get_location(external_ip)
(lat, lon, city, country) = location
full_city = str(city + ", " + country)
wea_info = get_weather(lat, lon)
(desc, temp) = wea_info
# full_city and wea_info tuples unpacked, then displayed by display_output function:
final = display_output(full_city, desc, temp)
print(final)
|
python
|
# -*- coding: utf-8 -*-
# pylint: disable=no-member,invalid-name,duplicate-code
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
import logging
from typing import Callable
from collections import namedtuple, defaultdict
from enum import Enum
from api.models.CreditTrade import CreditTrade
from api.models.CreditTradeStatus import CreditTradeStatus
class CreditTradeRelationshipMixin(object):
"""
Mixin to provide user mapping for related parties to credit transactions
"""
class UserRelationship(Enum):
"""
Enumerates the ways in which a client (user) can be related to a
credit trade
"""
INITIATOR = 1
RESPONDENT = 2
THIRD_PARTY = 3
GOVERNMENT_ANALYST = 4
GOVERNMENT_DIRECTOR = 5
user_map = {
UserRelationship.INITIATOR: 'fs_user_1',
UserRelationship.RESPONDENT: 'fs_user_2',
UserRelationship.THIRD_PARTY: 'fs_user_3',
UserRelationship.GOVERNMENT_ANALYST: 'gov_analyst',
UserRelationship.GOVERNMENT_DIRECTOR: 'gov_director'
}
class CreditTradeFlowHooksMixin(object):
ChangeRecord = namedtuple('ChangeRecord', [
'trade_id',
'requesting_username',
'relationship',
'expected_to_be_successful',
'data_before_request',
'data_after_request',
'response_code'
])
PreChangeRecord = namedtuple('PreChangeRecord', [
'trade_id',
'current_status',
'rescinded',
'status_change'
])
StatusChange = namedtuple('StatusChange', [
'relationship',
'status',
'rescinded'
])
def _sensible_status_changes(self, current_status, rescinded):
"""
Return a list of valid potential status changes for a given starting
state
"""
status_changes = defaultdict(lambda: [])
status_changes[('Draft', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Submitted', False),
self.StatusChange(self.UserRelationship.INITIATOR,
'Cancelled', False)
]
status_changes[('Submitted', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Submitted', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Accepted', False),
self.StatusChange(self.UserRelationship.RESPONDENT,
'Refused', False)
]
status_changes[('Accepted', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Accepted', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Accepted', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,
'Recommended', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,
'Not Recommended', False)
]
status_changes[('Recommended', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Recommended', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Recommended', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Approved', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Declined', False)
]
status_changes[('Not Recommended', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Not Recommended', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Not Recommended', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Approved', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Declined', False)
]
return status_changes[(current_status, rescinded)]
def _path_builder(self, node, path=[], valid_paths=[]):
"""
Recursively build an array of valid paths through the status tree
"""
s = self._sensible_status_changes(node.status, node.rescinded)
is_leaf = not s
path = path + [node]
if is_leaf:
valid_paths.append(path) # end of the line
for branch in s:
self._path_builder(branch, path, valid_paths)
return valid_paths
def check_credit_trade_workflow(
self,
before_change_callback: Callable[[PreChangeRecord], None] = lambda x: None,
after_change_callback: Callable[[ChangeRecord], None] = lambda x: None,
path_end_callback: Callable[[], None] = lambda: None,
modify_request_payload: Callable[[dict], None] = lambda x: None
):
"""
Evaluate all normal status paths through the application via
REST API as appropriate users
with callbacks for tests:
before_change_callback called just before a status change.
Initial status and trade_id may be None
after_change_callback called after a change
data_before_request can be None if this was a creation
path_end_callback called when this pathway is done
(another will begin unless this was the last)
"""
initiating_org = self.users[
self.user_map[
self.UserRelationship.INITIATOR
]].organization
responding_org = self.users[
self.user_map[
self.UserRelationship.RESPONDENT
]].organization
payload = {
'fairMarketValuePerCredit': 1,
'initiator': initiating_org.id,
'numberOfCredits': 1,
'respondent': responding_org.id,
'tradeEffectiveDate': datetime.datetime.today().strftime('%Y-%m-%d'),
'type': self.credit_trade_types['sell'].id,
'zeroReason': None
}
valid_paths = (self._path_builder(
self.StatusChange(self.UserRelationship.INITIATOR, 'Draft', False)
))
for path in valid_paths:
logging.debug('evaluating path: {}'.format(
'\n'.join(
[
'{} sets status to {} and rescinded to {}'.format(
c.relationship, c.status, c.rescinded) for c in path
]
)))
trade_id = None
response_data = None
for node in path:
before_change_callback(self.PreChangeRecord(
trade_id,
CreditTrade.objects.filter(
id=trade_id
).first().status.status if trade_id else None,
CreditTrade.objects.filter(
id=trade_id
).first().is_rescinded if trade_id else None,
node
))
payload['status'] = CreditTradeStatus.objects.get_by_natural_key(node.status).id
payload['is_rescinded'] = node.rescinded
modify_request_payload(payload)
if not trade_id:
response = self.clients[self.user_map[node.relationship]].post(
'/api/credit_trades',
content_type='application/json',
data=json.dumps(payload)
)
else:
response = self.clients[self.user_map[node.relationship]].put(
'/api/credit_trades/{}'.format(trade_id),
content_type='application/json',
data=json.dumps(payload)
)
previous_response_data = response_data
response_data = json.loads(response.content.decode('utf-8'))
trade_id = response_data['id'] if 'id' in response_data else trade_id
after_change_callback(self.ChangeRecord(
trade_id,
self.user_map[node.relationship],
node.relationship,
True,
previous_response_data,
response_data,
response.status_code
))
path_end_callback()
|
python
|
import time
import requests
import threading
from filibuster.logger import debug
TIMEOUT_ITERATIONS = 100
SLEEP = 1
def num_services_running(services):
num_running = len(services)
for service in services:
if not service_running(service):
debug("! service " + service + " not yet running!")
num_running -= 1
return num_running
def wait_for_num_services_running(services, num_running, waiting_message):
timeout = TIMEOUT_ITERATIONS
while num_services_running(services) != num_running:
debug("Filibuster server waiting for {} to {}.".format(services, waiting_message))
debug("=> num_running: " + str(num_running))
debug("=> num_services_running(services): " + str(num_services_running(services)))
time.sleep(SLEEP)
timeout -= 1
if timeout == 0:
debug("Filibuster server timed out waiting for {} to {}.".format(services, waiting_message))
exit(1)
def wait_for_services_to_stop(services):
wait_for_num_services_running(services, 0, "stop")
def wait_for_services_to_start(services):
wait_for_num_services_running(services, len(services), "start")
def service_running(service):
name = service[0]
host = service[1]
port = service[2]
base_uri = "http://{}:{}".format(host, str(port))
# Jaeger will pass the health check only because health-check reroutes to /search.
debug("checking service's health-check: " + name)
try:
response = requests.get(
"{}/health-check".format(base_uri, timeout=60))
if response.status_code == 200:
return True
else:
return False
except requests.exceptions.ConnectionError:
debug("! connection error")
return False
except requests.exceptions.Timeout:
debug("! timeout")
return False
def start_filibuster_server_thread(app):
class Server(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
app.run(port=5005, host="0.0.0.0")
server_thread = Server()
server_thread.setDaemon(True)
server_thread.start()
|
python
|
class ParkingSystem(object):
def __init__(self, big, medium, small):
"""
:type big: int
:type medium: int
:type small: int
"""
self.lot = {
1: [big,0],
2: [medium,0],
3: [small,0]
}
def addCar(self, carType):
"""
:type carType: int
:rtype: bool
"""
if self.lot[carType][1] < self.lot[carType][0]:
self.lot[carType][1] += 1
return True
else:
return False
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for the shell item events."""
from plaso.formatters import interface
class ShellItemFileEntryEventFormatter(interface.ConditionalEventFormatter):
"""Class that formats Windows volume creation events."""
DATA_TYPE = 'windows:shell_item:file_entry'
FORMAT_STRING_PIECES = [
u'Name: {name}',
u'Long name: {long_name}',
u'Localized name: {localized_name}',
u'NTFS file reference: {file_reference}',
u'Origin: {origin}']
FORMAT_STRING_SHORT_PIECES = [
u'Name: {name}',
u'NTFS file reference: {file_reference}',
u'Origin: {origin}']
SOURCE_LONG = 'File entry shell item'
SOURCE_SHORT = 'FILE'
|
python
|
# test_hello_add.py
from API import app
from flask import json
def test_predict():
response = app.test_client().post(
'/predict',
data=json.dumps({"gender":["Male"],
"SeniorCitizen":["0"],
"Partner":["0"],
"Dependents":["0"],
"tenure":["-0.223317"],
"MultipleLines":["-0.508112"],
"InternetService":["No"],
"Contract":["Month-to-month"],
"PaperlessBilling":["1"],
"PaymentMethod":["Electronic check"],
"MonthlyCharges":["-1.512322"],
"AddServices":["-1.757234"]
}),
content_type='application/json',
)
data = json.loads(response.get_data(as_text=True))
assert response.status_code == 200
assert data['sum'] == 3
print(data)
|
python
|
import argparse
import marshal
import os
import py_compile
from importlib import import_module
from pathlib import Path
from zipfile import ZipFile, PyZipFile
from Crypto.Cipher import AES
from loaders import register, PycZimpLoader, PyZimpLoader
def get_key(path):
if path is None:
return None
with open(path, "rb") as file:
return file.read()
class ZimpCreator:
def __init__(self, name, key, optimize, is_pyc):
self.name = name
self.key = key
self.optimize = optimize
self.is_pyc = is_pyc
def walk_pyc(self):
with PyZipFile(self.name + ".zip", 'w', optimize=self.optimize) as zimpfile:
zimpfile.writepy(self.name)
@staticmethod
def _get_data(path):
raise NotImplementedError("Use subclass")
def _encrypt(self, data):
if self.key is None:
return data
cipher = AES.new(self.key, AES.MODE_EAX)
nonce = cipher.nonce
encrypted_data, tag = cipher.encrypt_and_digest(data)
return b"".join((nonce, tag, encrypted_data))
def run(self):
with ZipFile(self.name + ".zip", 'w') as zimpfile:
# Iterate over all the files in directory
for folder_name, subfolders, filenames in os.walk(self.name):
for filename in filenames:
file_path = os.path.join(folder_name, filename)
if filename.endswith(".py"):
zimpfile.writestr(file_path + "c", self._encrypt(self._get_data(file_path)))
class PyZimpCreator(ZimpCreator):
@staticmethod
def _get_data(path):
with open(path, "rb") as file:
return file.read()
class PycZimpCreator(ZimpCreator):
def _get_data(self, path):
pycpath = py_compile.compile(path, optimize=self.optimize)
with open(pycpath, "rb") as pycfile:
return pycfile.read()
class ZimpRunner:
def __init__(self, name, key):
self.name = name
self.key = key
def _get_loader(self):
raise NotImplementedError("Use subclass")
def run(self):
register(self._get_loader())
import_module(self.name)
class PyZimpRunner(ZimpRunner):
def _get_loader(self):
return PyZimpLoader({self.name: self.key})
class PycZimpRunner(ZimpRunner):
def __init__(self, name, key, marshal_offset):
super().__init__(name, key)
self.marshal_offset = marshal_offset
def _get_loader(self):
return PycZimpLoader({self.name: self.key}, self.marshal_offset)
def find_marshal():
py_name = "__test_marshal.py"
pyc_name = py_name + "c"
try:
open(py_name, "wb").close()
py_compile.compile(py_name, pyc_name)
with open(pyc_name, "rb") as pycfile:
pyc = pycfile.read()
for i in range(Path(pyc_name).stat().st_size):
try:
exec(marshal.loads(pyc[i:]))
# ValueError when marshal fails. TypeError when exec fails. For example, during testing,
# on i=9 marshal.loads returns an int, which fails exec.
except (ValueError, TypeError):
pass
else:
return i
finally:
os.unlink(py_name)
os.unlink(pyc_name)
def run_zimp(args):
if args.pyc:
PycZimpRunner(args.name, get_key(args.key_file), args.marshal_offset).run()
else:
PyZimpRunner(args.name, get_key(args.key_file)).run()
def create_zimp(args):
if args.pyc:
PycZimpCreator(args.name, get_key(args.key_file), args.optimize, args.pyc).run()
else:
PyZimpCreator(args.name, get_key(args.key_file), args.optimize, args.pyc).run()
def main():
modes = {
"run": run_zimp,
"zip": create_zimp
}
argparser = argparse.ArgumentParser()
argparser.add_argument("mode", choices=modes.keys())
argparser.add_argument("--key-file")
argparser.add_argument("--name", required=True)
run_argparser = argparser.add_argument_group("run", "Run zimp")
run_argparser.add_argument("--pyc", action="store_true")
run_argparser.add_argument("--marshal-offset", default=16)
zip_argparser = argparser.add_argument_group("zip", "Create zimp")
zip_argparser.add_argument("--compression-level", default=None)
zip_argparser.add_argument("--optimize", type=int, default=-1)
args = argparser.parse_args()
modes[args.mode](args)
if __name__ == "__main__":
main()
|
python
|
from brick_wall_build import task
@task()
def clean():
pass
# Should be marked as task.
def html():
pass
# References a non task.
@task(clean,html)
def android():
pass
|
python
|
def INSERTION_SORT(list):
for n in range(1, len(list)):
for i in range(0, len(list) - 2):
if list[i] > list[i + 1]:
tmp = list[i]
list[i] = list[i + 1]
list[i + 1] = tmp
k = 0
for i in range(0, len(list) - 2):
if list[-1] > list[i]:
k = i + 1
else:
break
tmp = list[k:-1]
list[k] = list[-1]
list[k + 1:] = tmp
return list
A = [5, 2, 4, 6, 1, 3]
print(INSERTION_SORT(A))
|
python
|
"""placeholder
Revision ID: 57539722e5cf
Revises: c1b5abada09c
Create Date: 2019-12-03 00:55:16.012247
"""
# revision identifiers, used by Alembic.
revision = '57539722e5cf'
down_revision = 'c1b5abada09c'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
python
|
first_number=1+1
print(first_number)
second_number=105+10
print(second_number)
|
python
|
from flask import Flask, request, jsonify, url_for, Blueprint
from api.models import db, User
from api.utils import generate_sitemap, APIException
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
import os
api = Blueprint('api', __name__)
# Create a route to authenticate your users and return JWTs. The
# create_access_token() function is used to actually generate the JWT.
@app.route("/token", methods=["POST"])
def create_token():
email = request.json.get("email", None)
password = request.json.get("password", None)
if email != "test" or password != "test":
return jsonify({"msg": "Bad username or password"}), 401
access_token = create_access_token(identity=email)
return jsonify(access_token=access_token)
|
python
|
import os
import package1.process as process
import package1.loadsettings as loadsettings
filenames = os.listdir("./task") #Create list of mediafiles to run through
if filenames == []:
print( "\nERROR: Task folder is empty. Put in video file(s) that you want to condense." )
quit()
if 'deletethis.txt' in filenames:
print("\nYou need to delete the file 'deletethis' in the 'condenser\Task' directory before the program can run.")
quit()
def strip_filename_extension(string):
temp = string.split('.')
return temp[0]
first_file = True #Certain operations need to be performed for the first file of a batch only.
for filename in filenames:
stripped_filename = strip_filename_extension(filename)
output_name = loadsettings.file_prefix + stripped_filename + ".mp3"
process.run_condenser(filename, output_name, first_file)
first_file = False
|
python
|
import urllib
from BeautifulSoup import *
class ComputerLab():
def __init__(self, room, num, time):
self.room = room
self.num = num
self.time = time
def __repr__(self):
str = "Room: %s\nNum: %s\nTime: %s\n" % (self.room, self.num, self.time)
return str
url = "https://tomcat.itap.purdue.edu:8445/ICSWeb/AvailableStations"
page = urllib.urlopen(url)
soup = BeautifulSoup(page.read())
xp = []
mac = []
sun = []
labs = [xp, mac, sun]
i=0
j=0
for tbl in soup.findAll('table'):
if (i==0):
i=1
continue
for tr in tbl.findAll('tr'):
if (len(tr.contents) > 2):
a = ComputerLab(None, None, None)
a.room = tr.contents[0].find('font').contents[0]
a.num = tr.contents[1].find('font').contents[0]
a.time = tr.contents[2].find('font').contents[0]
labs[j].append(a)
j+=1
for labos in labs:
for lab in labos:
print lab
print
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""
mssql2csv.py
Created by Bill Wiens on 2010-05-04.
"""
import sys, os, getopt, getpass
import optparse
import logging
import csv
import pymssql
def main():
parser = optparse.OptionParser()
parser.description="""Python script to dump a MSSQL Server Database to folder of CSV files.
Requires the freetds library and the pymssql module"""
parser.add_option("-H", "--host", dest="hostname", help="connect to HOSTNAME", metavar="HOSTNAME")
parser.add_option("-d", "--database", dest="database", help="connect to DATABASE", metavar="DATABASE")
parser.add_option("-u", "--user", dest="username", help="username to connect with", metavar="USERNAME")
parser.add_option("-p", "--password", dest="password", help="password to connect with", metavar="PASSWORD")
parser.add_option("-t", "--tables", dest="tables", help="Comma-separated list of tables to dump", metavar="TABLES")
(options, args) = parser.parse_args()
options = vars(options)
if not options['password']:
options['password'] = getpass.getpass("Enter password:")
if options['tables']:
options['tables'] = str.split(options['tables'], ",")
dump_db(options['hostname'], options['database'], options['username'], options['password'], options['tables'])
def dump_db(database_host, database_name, database_user, database_pass, database_tables):
try:
os.mkdir(database_name)
os.chdir(database_name)
except:
logging.getLogger().error("Failed to make folder for CSV's: {0}".format(database_name))
sys.exit(2)
try:
conn = pymssql.connect(user = database_user, password = database_pass, host = database_host, database = database_name)
cursor = conn.cursor()
except:
logging.getLogger().error("Error: Can't connect to database")
sys.exit(2)
if len(database_tables) > 0:
tables = database_tables
else:
cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='Base Table'")
tables = [table[0] for table in cursor.fetchall()]
for table_name in tables:
dump_table(cursor, table_name)
cursor.close()
conn.close()
def dump_table(cursor, tablename):
query = "SELECT COLUMN_NAME, DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='{0}'".format(tablename)
cursor.execute(query)
schema = cursor.fetchall()
fieldnames = [column[0] for column in schema]
# casts 'ntext' to nvarchar
selectnames = ["CAST ({0} as nvarchar(max))".format(name) if datatype == 'ntext' else name for name, datatype in schema]
query = "SELECT {0} FROM {1}".format(", ".join(selectnames), tablename)
cursor.execute(query)
filename = "{0}.csv".format(tablename)
with open(filename, "wb") as fp:
writer = csv.writer(fp)
writer.writerow(fieldnames)
row = cursor.fetchone()
while row:
writer.writerow(row)
row = cursor.fetchone()
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
try:
main()
except KeyboardInterrupt:
logging.getLogger().error("Cancelled by user")
|
python
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.mec_mod_interfaces.hybrid_bodies import HybridBodies
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
from pycatia.mec_mod_interfaces.hybrid_shapes import HybridShapes
from pycatia.mec_mod_interfaces.ordered_geometrical_sets import OrderedGeometricalSets
from pycatia.mec_mod_interfaces.shapes import Shapes
from pycatia.mec_mod_interfaces.sketches import Sketches
from pycatia.system_interfaces.any_object import AnyObject
class Body(AnyObject):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| Body
|
| The object that manages a sequence of shapes, a set of sketches, a set of
| hybrid bodies, a set of ordered geometrical sets and a set of hybrid
| shapes.
|
| It belongs to the Bodies collection of a Part or HybridBody
| object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.body = com_object
@property
def hybrid_bodies(self) -> HybridBodies:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property HybridBodies() As HybridBodies (Read Only)
|
| Returns the body's HybridBodies collection.
|
| Example:
| The following example returns in hybridBodyColl the collection of
| hybrid bodies of the main body of partDoc part
| document:
|
| Dim body As Body
| Set body = partDoc.Part.Bodies.MainBody
| Set hybridBodyColl = body.HybridBodies
:return: HybridBodies
:rtype: HybridBodies
"""
return HybridBodies(self.body.HybridBodies)
@property
def hybrid_shapes(self) -> HybridShapes:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property HybridShapes() As HybridShapes (Read Only)
|
| Returns the list of hybrid shapes included in the body.
|
| Returns:
| oHybridShapes The list of hybrid shapes in the body (@see
| CATIAHybridShapes
| for more information).
|
| Example:
| The following example returns in HybridShapes1 the list
| of
| hybrid shapes in the body Body1:
|
| Dim HybridShapes1 As HybridShapes
| Set HybridShapes1 = Body1.HybridShapes
:return: HybridShapes
:rtype: HybridShapes
"""
return HybridShapes(self.body.HybridShapes)
@property
def in_boolean_operation(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property InBooleanOperation() As boolean (Read Only)
|
| Returns True if the body is involved in a boolean operation, else returns
| False.
|
| Example:
| The following example returns in operated True if the body body1belongs
| to a boolean operation.
|
| operated = body1.InBooleanOperation
:return: bool
:rtype: bool
"""
return self.body.InBooleanOperation
@property
def ordered_geometrical_sets(self) -> OrderedGeometricalSets:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property OrderedGeometricalSets() As OrderedGeometricalSets (Read
| Only)
|
| Returns the body's OrderedGeometricalSets collection.
|
| ometricalSetColl = Body1.OrderedGeometricalSets Example:
| The following example returns in OrderedGeometricalSetColl the
| collection of ordered geometrical set of the body Body1
| :
|
| Set OrderedGe
:return: OrderedGeometricalSets
:rtype: OrderedGeometricalSets
"""
return OrderedGeometricalSets(self.body.OrderedGeometricalSets)
@property
def shapes(self) -> Shapes:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Shapes() As Shapes (Read Only)
|
| Returns the body's Shapes collection. These shapes make up the sequence of
| shapes that will produce an intermediate result for the part, or the final
| result in the case of the main body.
|
| Example:
| The following example returns in shapColl the collection of shapes
| managed by the main body of the partDoc part document:
|
| Dim body As Body
| Set body = partDoc.Part.Bodies.MainBody
| Set shapColl = body.Shapes
:return: Shapes
:rtype: Shapes
"""
return Shapes(self.body.Shapes)
@property
def sketches(self) -> Sketches:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Sketches() As Sketches (Read Only)
|
| Returns the body's Sketches collection. These sketches are those inside the
| body at all levels.
|
| Example:
| The following example returns in skColl the collection of sketches of
| the main body of partDoc part document:
|
| Dim body As Body
| Set body = partDoc.Part.Bodies.MainBody
| Set skColl = body.Sketches
:return: Sketches
:rtype: Sketches
"""
return Sketches(self.body.Sketches)
def insert_hybrid_shape(self, i_hybrid_shape: HybridShape) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub InsertHybridShape(HybridShape iHybridShape)
|
| Insert a hybrid shape to the body.
|
| Parameters:
|
| iHybriShape
| The hybrid shape to insert.
|
| Example:
| This example inserts the hybrid shape HybridShape1 to the body
| Body1:
|
| Body1.InsertHybridShape (HybridShape1)
:param HybridShape i_hybrid_shape:
:return: None
:rtype: None
"""
return self.body.InsertHybridShape(i_hybrid_shape.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'insert_hybrid_shape'
# # vba_code = """
# # Public Function insert_hybrid_shape(body)
# # Dim iHybridShape (2)
# # body.InsertHybridShape iHybridShape
# # insert_hybrid_shape = iHybridShape
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def __repr__(self):
return f'Body(name="{self.name}")'
|
python
|
import numpy as np
import read_thres as thrs
def test_thr(check_thr):
data, x = thrs.ths_def(check_thr, threshd=1.E-5)
dat_nw = check_thr.drop(columns=["norm", "<x>", "<y>"])
x_nw = dat_nw.columns.values
assert len(x) == len(x_nw)
assert np.array_equal(x, x_nw)
assert data.equals(dat_nw)
|
python
|
import os
import yaml
import getpass
from ConfigParser import SafeConfigParser
from twisted.internet import defer, reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from os.path import abspath, expanduser
from ooni.utils.net import ConnectAndCloseProtocol, connectProtocol
from ooni import geoip
from ooni.utils import Storage, log, get_ooni_root
from ooni import errors
class OConfig(object):
_custom_home = None
def __init__(self):
self.current_user = getpass.getuser()
self.global_options = {}
self.reports = Storage()
self.scapyFactory = None
self.tor_state = None
# This is used to store the probes IP address obtained via Tor
self.probe_ip = geoip.ProbeIP()
self.logging = True
self.basic = Storage()
self.advanced = Storage()
self.tor = Storage()
self.privacy = Storage()
self.set_paths()
def embedded_settings(self, category, option):
embedded_settings = os.path.join(get_ooni_root(), 'settings.ini')
if os.path.isfile(embedded_settings):
settings = SafeConfigParser()
with open(embedded_settings) as fp:
settings.readfp(fp)
return settings.get(category, option)
return None
@property
def var_lib_path(self):
var_lib_path = self.embedded_settings("directories", "var_lib")
if var_lib_path:
return os.path.abspath(var_lib_path)
return "/var/lib/ooni"
@property
def usr_share_path(self):
usr_share_path = self.embedded_settings("directories", "usr_share")
if usr_share_path:
return os.path.abspath(usr_share_path)
return "/usr/share/ooni"
@property
def data_directory_candidates(self):
dirs = [
self.ooni_home,
self.var_lib_path,
self.usr_share_path,
os.path.join(get_ooni_root(), '..', 'data'),
'/usr/share/'
]
if os.getenv("OONI_DATA_DIR"):
dirs.insert(0, os.getenv("OONI_DATA_DIR"))
if self.global_options.get('datadir'):
dirs.insert(0, abspath(expanduser(self.global_options['datadir'])))
return dirs
@property
def data_directory(self):
for target_dir in self.data_directory_candidates:
if os.path.isdir(target_dir):
return target_dir
return self.var_lib_path
@property
def ooni_home(self):
home = expanduser('~'+self.current_user)
if os.getenv("HOME"):
home = os.getenv("HOME")
if self._custom_home:
return self._custom_home
else:
return os.path.join(home, '.ooni')
def get_data_file_path(self, file_name):
for target_dir in self.data_directory_candidates:
file_path = os.path.join(target_dir, file_name)
if os.path.isfile(file_path):
return file_path
def set_paths(self):
self.nettest_directory = os.path.join(get_ooni_root(), 'nettests')
if self.advanced.inputs_dir:
self.inputs_directory = self.advanced.inputs_dir
else:
self.inputs_directory = os.path.join(self.ooni_home, 'inputs')
if self.advanced.decks_dir:
self.decks_directory = self.advanced.decks_dir
else:
self.decks_directory = os.path.join(self.ooni_home, 'decks')
self.reports_directory = os.path.join(self.ooni_home, 'reports')
self.resources_directory = os.path.join(self.data_directory,
"resources")
if self.advanced.report_log_file:
self.report_log_file = self.advanced.report_log_file
else:
self.report_log_file = os.path.join(self.ooni_home,
'reporting.yml')
if self.global_options.get('configfile'):
config_file = self.global_options['configfile']
self.config_file = expanduser(config_file)
else:
self.config_file = os.path.join(self.ooni_home, 'ooniprobe.conf')
if 'logfile' in self.basic:
self.basic.logfile = expanduser(self.basic.logfile.replace(
'~', '~'+self.current_user))
def initialize_ooni_home(self, custom_home=None):
if custom_home:
self._custom_home = custom_home
self.set_paths()
if not os.path.isdir(self.ooni_home):
print "Ooni home directory does not exist."
print "Creating it in '%s'." % self.ooni_home
os.mkdir(self.ooni_home)
os.mkdir(self.inputs_directory)
os.mkdir(self.decks_directory)
def _create_config_file(self):
target_config_file = self.config_file
print "Creating it for you in '%s'." % target_config_file
sample_config_file = self.get_data_file_path('ooniprobe.conf.sample')
with open(sample_config_file) as f:
with open(target_config_file, 'w+') as w:
for line in f:
if line.startswith(' logfile: '):
w.write(' logfile: %s\n' % (
os.path.join(self.ooni_home, 'ooniprobe.log'))
)
else:
w.write(line)
def read_config_file(self, check_incoherences=False):
if not os.path.isfile(self.config_file):
print "Configuration file does not exist."
self._create_config_file()
self.read_config_file()
with open(self.config_file) as f:
config_file_contents = '\n'.join(f.readlines())
configuration = yaml.safe_load(config_file_contents)
for setting in configuration.keys():
if setting in dir(self) and configuration[setting] is not None:
for k, v in configuration[setting].items():
getattr(self, setting)[k] = v
self.set_paths()
if check_incoherences:
self.check_incoherences(configuration)
def check_incoherences(self, configuration):
incoherent = []
if configuration['advanced']['interface'] != 'auto':
from scapy.all import get_if_list
if configuration['advanced']['interface'] not in get_if_list():
incoherent.append('advanced:interface')
self.log_incoherences(incoherent)
def log_incoherences(self, incoherences):
if len(incoherences) > 0:
if len(incoherences) > 1:
incoherent_pretty = ", ".join(incoherences[:-1]) + ' and ' + incoherences[-1]
else:
incoherent_pretty = incoherences[0]
log.err("You must set properly %s in %s." % (incoherent_pretty, self.config_file))
raise errors.ConfigFileIncoherent
@defer.inlineCallbacks
def check_tor(self):
"""
Called only when we must start tor by director.start
"""
incoherent = []
if not self.advanced.start_tor:
if self.tor.socks_port is None:
incoherent.append('tor:socks_port')
else:
socks_port_ep = TCP4ClientEndpoint(reactor,
"localhost",
self.tor.socks_port)
try:
yield connectProtocol(socks_port_ep, ConnectAndCloseProtocol())
except Exception:
incoherent.append('tor:socks_port')
if self.tor.control_port is not None:
control_port_ep = TCP4ClientEndpoint(reactor,
"localhost",
self.tor.control_port)
try:
yield connectProtocol(control_port_ep, ConnectAndCloseProtocol())
except Exception:
incoherent.append('tor:control_port')
self.log_incoherences(incoherent)
config = OConfig()
if not os.path.isfile(config.config_file) \
and os.path.isfile('/etc/ooniprobe.conf'):
config.global_options['configfile'] = '/etc/ooniprobe.conf'
config.set_paths()
|
python
|
#!/usr/bin/env python3
import pandas as pd
from tqdm import tqdm
from collections import defaultdict
import os, re, time, warnings, sys
import warnings
import pickle
from mutagen.mp3 import MP3
import numpy as np
def create_df(tsv, audio_dir):
tqdm.pandas()
df = pd.read_csv(tsv, sep='\t')
df['dur'] = df['path'].progress_apply(get_dur, args=(audio_dir,))
return df
def get_dur(mp3, audio_dir):
""" return audio duration in seconds """
audio=MP3(os.path.join(audio_dir,mp3))
return audio.info.length
def select_subset(df, n_spk, tgt_dur, accent=None, balanced_gender=True, add_precaution_spks=False):
print("n_spk = ", n_spk)
if add_precaution_spks:
tgt_dur= (tgt_dur/n_spk)*(n_spk+4)
n_spk+=4
print(n_spk)
print('Adding 4 additional speakers that will be not counted towards max duration, to allow manual suppression of unwanted speakers')
#1. filter out speakers with accents
df_start=len(df)
if not accent:
df = df[df['accent'].isnull()] #only want those with no sepcific accent
else:
print(accent)
df = df[df.accent.isin(accent)]
print("{}% of data was removed after filtering by accent".format((df_start-len(df))/df_start*100))
df_start=len(df)
#2. filter out speakers with not enough data
print("n_spk = ", n_spk)
tgt_dur_spk = float(tgt_dur) / n_spk
df_spk = df.groupby('client_id')['dur'].sum()
spks=list(df_spk[df_spk >= tgt_dur_spk].index)
df = df[df.client_id.isin(spks)]
if not len(df['client_id'].unique()) >= n_spk :
raise ValueError('There are no enough speakers to reach the desired target duration with the target number of speakers. Try reducing either one or the other of these values')
#3. select x spks, half from each langauge.
if (n_spk % 2) != 0:
warnings.warn("Warning....... n_spk is an odd number, adding one speaker so that we can have equal share between males and females")
n_spk += 1
df_m = df[df["gender"] == "male"]
df_f = df[df["gender"] == "female"]
print("n_spk = ", n_spk)
if not len(df_m['client_id'].unique()) >= n_spk / 2 or not len(df_f['client_id'].unique()) >= n_spk / 2 :
raise ValueError('Not enough speakers of each gender for the taarget duration. It could be becuse a lot of speakers have not entered their the gender information. Try setting "balanced_gender" to false or reducing tgt_dur.')
if accent and len(accent) > 1:
#then do balanced.
finalspks = set()
for ac in accent :
a=df_m[df_m["accent"] == ac]['client_id']
spks= list(list(np.random.choice(df_m[df_m["accent"] == ac]['client_id'].unique(),int(n_spk/2/len(accent)), replace=False)) + list(np.random.choice(df_f[df_f["accent"] == ac]['client_id'].unique(),int(n_spk/2/len(accent)), replace=False)))
for x in spks:
finalspks.add(x)
else:
print("n_spk/2 = ", int(n_spk/2))
print("n_spk/2 = ", n_spk/2)
finalspks = set(list(np.random.choice(df_m['client_id'].unique(),int(n_spk/2), replace=False)) + list(np.random.choice(df_f['client_id'].unique(),int(n_spk/2), replace=False)))
print(len(finalspks))
print("male: ", len(df_m['client_id'].unique()))
print("female: ", len(df_f['client_id'].unique()))
#4. Sample n seconds per spk
#filter out if above threshold length
final_df = pd.DataFrame(columns=df.columns)
for spk in tqdm(finalspks):
print(spk)
tot=0
tmp_df = df[df['client_id'] == spk]
for i in tmp_df.sample(frac=1).iterrows():
if i[1]['dur'] >= 20:
continue #not over 20 sec
if tot >= tgt_dur_spk:
break
final_df = final_df.append(i[1])
tot += i[1]['dur']
return final_df
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("CV_path", help="path to the commonvoice main directory")
parser.add_argument("lang", help="language code in CV format")
parser.add_argument('output_tsv', type=str, help="path to the output tsv")
parser.add_argument("--tgt_spk", type=int, help='target number of shespeakers, must be an even number', default=24)
parser.add_argument("--tgt_dur", type=int, help='target total duration of the selection, in seconds', default=36000)
parser.add_argument("--add_precaution_spks", default=False, action="store_true", help="If True, add 4 more speakers (2 for each gender) that will have to be manually removed.")
parser.add_argument("--accent", default=None, action='append')
parser.parse_args()
args, leftovers = parser.parse_known_args()
validated_tsv = os.path.join(args.CV_path, args.lang, "validated.tsv")
audio_dir= os.path.join(args.CV_path, args.lang,"clips")
print(args.accent)
if os.path.exists(os.path.join(args.CV_path, args.lang, "validated.pkl")):
df=pickle.load(open(os.path.join(args.CV_path, args.lang, "validated.pkl"), 'rb'))
else :
print("Retrieveing audio information from the tsv file")
df = create_df(validated_tsv, audio_dir)
df.to_pickle(os.path.join(args.CV_path, args.lang, "validated.pkl"))
if not os.path.exists(args.output_tsv):
print("Selecting the subset")
if args.add_precaution_spks:
final_df = select_subset(df, args.tgt_spk, args.tgt_dur, add_precaution_spks=args.add_precaution_spks, accent=args.accent)
else:
final_df = select_subset(df, args.tgt_spk, args.tgt_dur, accent=args.accent)
final_df.to_csv(args.output_tsv, sep="\t")
|
python
|
from unittest import TestCase
from cards.businesslogic.description_generator.DescriptionAppender import DescriptionAppender
class DescriptionAppenderTestCase(TestCase):
def test_sample_description1(self):
appender = DescriptionAppender()
text1 = "line1"
text2 = "line2"
appender.append(text1)
appender.append(text2)
result = appender.process()
self.assertEqual("Line1 i line2.", result)
def test_single_item_desc(self):
appender = DescriptionAppender()
appender.append("test")
self.assertEqual("Test.", appender.process())
def test_multiple(self):
appender = DescriptionAppender()
appender.append("12test")
appender.append("ASDASD")
appender.append("112a")
appender.append("Test")
self.assertEqual("12test, aSDASD, 112a i test.", appender.process())
def test_none(self):
appender = DescriptionAppender()
self.assertEqual("", appender.process())
|
python
|
# Copyright 2020 Thomas Rogers
# SPDX-License-Identifier: Apache-2.0
import typing
import yaml
from direct.gui import DirectGui, DirectGuiGlobals
from direct.task import Task
from panda3d import core
from ... import constants, edit_mode
from ...tiles import manager
from ...utils import gui
from .. import descriptors, event_grouping, map_objects
from ..descriptors import wall_type_descriptor
from . import sprite_property_view
_WALL_CATEGORIES_TYPE = typing.Dict[
str, typing.List[wall_type_descriptor.WallTypeDescriptor]
]
class WallDialog:
def __init__(self, parent: core.NodePath, edit_mode: edit_mode.EditMode):
self._dialog = DirectGui.DirectFrame(
parent=parent,
pos=core.Vec3(-0.78, -0.9),
frameSize=(0, 1.58, 0, 1.8),
relief=DirectGuiGlobals.RAISED,
borderWidth=(0.01, 0.01),
)
self._dialog.hide()
self._property_parent: core.NodePath = self._dialog.attach_new_node(
"properties"
)
self._property_parent.set_pos(0.04, 0, 0.38)
self._edit_mode = edit_mode
self._wall: map_objects.EditorWall = None
self._selected_descriptor: wall_type_descriptor.WallTypeDescriptor = None
self._current_descriptor: wall_type_descriptor.WallTypeDescriptor = None
self._current_picnum: int = None
self._current_palette: int = None
self._current_status_number: int = None
self._properties: sprite_property_view.SpritePropertyView = None
self._type_lookup = {
wall_type.name: type_index
for type_index, wall_type in descriptors.wall_types.items()
}
type_names = list(self._type_lookup.keys())
self._type_selector = DirectGui.DirectOptionMenu(
parent=self._dialog,
pos=core.Vec3(0.05, 0.38),
scale=constants.TEXT_SIZE,
items=type_names,
command=self._type_changed,
)
DirectGui.DirectLabel(
parent=self._dialog,
text="Special Source:",
pos=core.Vec3(1.12, 0.38),
scale=constants.TEXT_SIZE,
)
self._special_source_menu = DirectGui.DirectOptionMenu(
parent=self._dialog,
pos=core.Vec3(1.28, 0.38),
items=["None", "Level Start"],
scale=constants.TEXT_SIZE,
)
DirectGui.DirectLabel(
parent=self._dialog,
text="Special Target:",
pos=core.Vec3(1.12, 0.38 - constants.TEXT_SIZE - 0.02),
scale=constants.TEXT_SIZE,
)
self._special_target_menu = DirectGui.DirectOptionMenu(
parent=self._dialog,
pos=core.Vec3(1.28, 0.38 - constants.TEXT_SIZE - 0.02),
items=["None", "Next Level", "Secret Level"],
scale=constants.TEXT_SIZE,
)
DirectGui.DirectButton(
parent=self._dialog,
pos=core.Vec3(1.36, 0.07),
text="Ok",
scale=constants.TEXT_SIZE,
command=self._save_changes,
)
DirectGui.DirectButton(
parent=self._dialog,
pos=core.Vec3(1.48, 0.07),
text="Cancel",
scale=constants.TEXT_SIZE,
command=self._hide,
)
def show(self, wall: map_objects.EditorWall):
self._wall = wall
self._current_descriptor = descriptors.wall_types[self._wall.get_type()]
if (
self._wall.target_event_grouping
== event_grouping.EventGroupingCollection.END_LEVEL_GROUPING
):
self._special_target_menu.set("Next Level")
elif (
self._wall.target_event_grouping
== event_grouping.EventGroupingCollection.SECRET_END_LEVEL_GROUPING
):
self._special_target_menu.set("Secret Level")
else:
self._special_target_menu.set("None")
if (
self._wall.source_event_grouping
== event_grouping.EventGroupingCollection.START_LEVEL_GROUPING
):
self._special_source_menu.set("Next Level")
else:
self._special_source_menu.set("None")
type_name = self._current_descriptor.name
self._type_selector.set(type_name)
self._update_property_view()
self._edit_mode.push_mode(self)
def _save_changes(self):
new_values = self._properties.get_values()
new_picnum = self._properties.get_current_tile()
if new_picnum is not None:
self._current_picnum = new_picnum
self._current_descriptor.apply_wall_properties(self._wall, new_values)
self._wall.blood_wall.wall.tags[0] = self._current_descriptor.wall_type
self._wall.invalidate_geometry()
target_special_value = self._special_target_menu.get()
if target_special_value == "Next Level":
self._wall.set_target_event_grouping(
event_grouping.EventGroupingCollection.END_LEVEL_GROUPING
)
elif target_special_value == "Secret Level":
self._wall.set_target_event_grouping(
event_grouping.EventGroupingCollection.SECRET_END_LEVEL_GROUPING
)
elif (
self._wall.target_event_grouping is not None
and self._wall.target_event_grouping.special_receiver_id is not None
):
self._wall.set_target_event_grouping(None)
source_special_value = self._special_source_menu.get()
if source_special_value == "Level Start":
self._wall.set_source_event_grouping(
event_grouping.EventGroupingCollection.START_LEVEL_GROUPING
)
elif (
self._wall.source_event_grouping is not None
and self._wall.source_event_grouping.special_receiver_id is not None
):
self._wall.set_source_event_grouping(None)
self._hide()
def _clear_property_view(self):
if self._properties is not None:
self._properties.destroy()
self._properties = None
def _update_property_view(self):
self._clear_property_view()
self._properties = sprite_property_view.SpritePropertyView(
self._property_parent,
-1,
self._current_descriptor.get_wall_properties(self._wall),
None,
None,
1.65,
1.5,
1.25,
)
def _type_changed(self, value):
type_index = self._type_lookup[value]
self._current_descriptor = descriptors.wall_types[type_index]
self._update_property_view()
if self._wall.get_type() == type_index:
return
self._wall.blood_wall.wall.tags[0] = type_index
def _reset_selected_wall_type(self, task):
self._selected_descriptor = None
return task.done
def enter_mode(self, state: dict):
self._dialog.show()
def exit_mode(self):
self._dialog.hide()
return {}
def _hide(self):
self._edit_mode.pop_mode()
def tick(self):
pass
|
python
|
import tkinter as tk
import pygubu
import csv
import serial
import rospy
import numpy as np
import PID
import ctypes
from can_msgs import msg
import fcntl
import termios
import sys
import select
import subprocess
import os
from threading import Timer
import signal
from termios import tcflush, TCIOFLUSH
from rospy.core import NullHandler
import tty
from pyquaternion import Quaternion
from OdriveClass import *
# Note that positive velocity values lower winch 1
MAX_VEL = 100000 # Max. speed for winch in encoder counts per second
LIVEPLOTTER = 1
doCalibrate = 0
class numhex64(ctypes.Union):
_fields_ = [("num", ctypes.c_double),
("sint", ctypes.c_int64),
("uint", ctypes.c_uint64),
("hex", ctypes.c_ubyte * 8)]
class numhex32(ctypes.Union):
_fields_ = [("num", ctypes.c_float),
("sint", ctypes.c_int32),
("uint", ctypes.c_uint32),
("hex", ctypes.c_ubyte * 4)]
class ManualWinchApp:
def __init__(self):
# Initalize the gantry:
os.system("stty -echo")
rospy.init_node('can_send', anonymous=True)
pub = rospy.Publisher('sent_messages', msg.Frame, queue_size=100)
rate = rospy.Rate(50)
xSpeed = numhex64()
ySpeed = numhex64()
msgData = ""
frame = msg.Frame()
frame.is_rtr = False
frame.is_extended = False
frame.dlc = 8
self.gantry_x_pid = PID.PID(P=0.2, I=0.0, D=0.0)
self.gantry_z_pid = PID.PID(P=0.2, I=0.0, D=0.0)
#1: Create a builder
self.builder = builder = pygubu.Builder()
#2: Load an ui file
builder.add_from_file('WinchesManualGUI.ui')
#3: Create the mainwindow
self.mainwindow = builder.get_object('MainWindow')
#4: Connect callbacks
builder.connect_callbacks(self)
self.ser_add='/dev/ttyACM3' #For Strain Gauges and IMU
self.ser_add2='/dev/ttyACM1' # for Encoders
self.testCounter=1
self.list_of_floats=[]
# Data from gantry crane:
self.x = 0
self.y = 0
self.z = 0
self.xval1=0
self.yval1=0
self.buttjpin=0
self.butt1pin=0
self.butt2pin=0
self.butt3pin=0
self.str1=0
self.str2=0
self.str3=0
self.ytilt=0
self.ztilt=0
self.qw=0
self.qx=0
self.qy=0
self.qz=0
self.q = Quaternion(self.qw,self.qx,self.qy,self.qz)
self.rot_ax = self.q.axis
self.rot_ang = self.q.degrees
self.accx=0
self.accy=0
self.accz=0
self.sys_cal=0
self.gyro_cal=0
self.acc_cal=0
self.mag_cal=0
self.accz_thresh_wedgeBreaking=0.5
self.ytilt_zero=-.81
self.ztilt_zero=2.87
self.accx_zero=--.133
self.accy_zero=-.5
self.accz_zero=10.08
self.angle_Zthresh=.75
self.angle_Ythresh=.75
self.exitholeflag=0
self.psiwedge=0
self.pitch=0
self.roll=0
self.psi=0
self.v4=0.0
self.v4_prev=0.0
self.mot1spd=0.0
self.mot1spd_prev=0.0
self.mot2spd=0.0
self.mot2spd_prev=0.0
self.mot3spd=0.0
self.mot3spd_prev=0.0
self.ytiltw=0
self.ztiltw=0
self.sv=0
if self.sv==1:
self.str1thresh=10
self.str2thresh=10
self.str3thresh=10
else:
self.str1thresh=5
self.str2thresh=5
self.str3thresh=5
#print([self.str1thresh,self.str2thresh,self.str3thresh])
#time.sleep(.5)
# get calibration parameters
self.list_of_floats=[]
self.list_of_floats_temp=[]
self.TotalList=[]
def run(self):
self.mainwindow.mainloop()
def winch1scale_move(self, vel):
des_vel = MAX_VEL*float(vel)/100
odrv0.VelMove(des_vel,0)
def winch2scale_move(self, vel):
des_vel = MAX_VEL*float(vel)/100
odrv1.VelMove(des_vel,0)
def winch3scale_move(self, vel):
des_vel = MAX_VEL*float(vel)/100
odrv1.VelMove(des_vel,1)
def move_all(self,vel):
des_vel = MAX_VEL*float(vel)/100
odrv0.VelMove(des_vel,0)
odrv1.VelMove(des_vel,0)
odrv1.VelMove(des_vel,1)
def stopall_butt(self):
odrv0.VelMove(0,0)
odrv1.VelMove(0,0)
odrv1.VelMove(0,1)
def get_gantry_coords(self):
self.x = 0
self.y = 0
self.z = 0
def move_gantry(self,x,y,z):
xSpeed.num = 0.0
zSpeed.num = 0.0
self.gantry_x_pid.SetPoint = x
self.gantry_z_pid.SetPoint = z
self.gantry_x_pid.update()
self.gantry_z_pid.update()
frame.id = 0x01
for idx in range(8):
msgData += chr(xSpeed.hex[idx])
frame.data = msgData
frame.header.stamp = rospy.Time.now()
pub.publish(frame)
frame.id = 0x02
for idx in range(8):
msgData += chr(zSpeed.hex[idx])
frame.data = msgData
frame.header.stamp = rospy.Time.now()
pub.publish(frame)
rospy.loginfo("x: %f rps, y: %f rps", xSpeed.num, zSpeed.num)
rate.sleep()
return 0
##### ARDUINO SERIAL FUNCS
def ArduinoSetup(self):
userinput=input('Setting up the arduino. If you restarted the arduino, unload everything and then enter 1 so it can calibrate')
#print(type(int(userinput)))
print(self.ser_add)
print(self.ser_add2)
self.ser = serial.Serial(self.ser_add, 115200,timeout=1)
# self.ser.flushInput()
# self.ser.write(int(userinput))
# self.ser.flushInput()
self.ser2 = serial.Serial(self.ser_add2, 115200,timeout=1)
# self.ser2.flushInput()
# self.ser2.write(int(userinput))
# self.ser2.flushInput()
print(self.ser_add)
print(self.ser_add2)
print("connected")
# Calibrate Arduino if needed
line=[]
ctr=0
while a.buttjpin==0:
try:
line = self.ser.readline()
line.decode('ascii').strip()
print(line.decode('ascii').strip())
list_of_floats_temp=[]
list_of_floats_temp_2=[]
list_of_floats_temp_1= [float(item) for item in line.decode('ascii').strip().split(';')]
line2 = self.ser2.readline()
line2.decode('ascii').strip()
print(line2.decode('ascii').strip())
list_of_floats_temp_2= [float(item) for item in line2.decode('ascii').strip().split(';')]
list_of_floats_temp_1.extend(list_of_floats_temp_2)
print(list_of_floats_temp_1)
# if len(self.list_of_floats_temp)==13:
# list_of_floats_temp2=list_of_floats_temp
# #print(self.list_of_floats)
# list_of_floats_temp2[8]=180-(360-list_of_floats_temp[8])
# list_of_floats_temp2[9]=90-list_of_floats_temp[9]
# print(list_of_floats_temp2)
ctr=ctr+1
except:
pass
# print("Keyboard Interrupt")
finally:
#if len(line)>0:
#if line.decode('ascii').strip()=="good":
if ctr>5:
break
# input('Everything calibrated. LOAD UP. Then serial data will display. Enter 1 to start ')
# line=[]
timeout_start=time.time()
timeout=2;
#display serialdata for 5 seconds to make sure it looks good
while time.time() < timeout_start + timeout:
self.get_data(0)
#print([ self.str1,self.str2,self.str3])
print([ self.str1,self.str2,self.str3, self.phi1enc,self.phi2enc,self.phi3enc])
#print([ self.phi1enc,self.phi2enc,self.phi3enc, self.phi1deg,self.phi2deg,self.phi3deg])
input('If you are happy with the serial, press 1 to continue. otherwise, restart the python ')
def ReadSerial(self,tosaveflag):
self.ser.flushInput()
self.ser2.flushInput()
# while (self.ser.inWaiting()<30 and self.ser2.inWaiting()<15):
# pass
try:
line2 = self.ser2.readline()
self.list_of_floats_temp2=[]
self.list_of_floats_temp2 = [float(item) for item in line2.decode('ascii').strip().split(';')]
line = self.ser.readline()
self.list_of_floats_temp=[]
self.list_of_floats_temp = [float(item) for item in line.decode('ascii').strip().split(';')]
self.list_of_floats_temp.extend(self.list_of_floats_temp2)
#print(self.list_of_floats_temp)
#print(len(self.list_of_floats_temp))
# while (self.ser.inWaiting()<30):
# pass
# #print('less')
# try:
# line = self.ser.readline()
# line2=line
# #print(len(line))
# # print(line)
# self.list_of_floats_temp=[]
# self.list_of_floats_temp = [float(item) for item in line.decode('ascii').strip().split(';')]
# except:
# pass
# while (self.ser2.inWaiting()<30):
# pass
# try:
# line2 = self.ser2.readline()
# self.list_of_floats_temp2=[]
# self.list_of_floats_temp2 = [float(item) for item in line2.decode('ascii').strip().split(';')]
# except:
# pass
#self.list_of_floats_temp.extend(self.list_of_floats_temp2)
#print(self.list_of_floats_temp)
# print(line.decode('ascii').strip())
#print(len(self.list_of_floats_temp))
if len(self.list_of_floats_temp)==28:
self.list_of_floats=[]
self.list_of_floats=self.list_of_floats_temp
self.xval1=self.list_of_floats[0]
self.yval1=self.list_of_floats[1]
self.buttjpin=self.list_of_floats[2]
self.butt1pin=self.list_of_floats[3]
self.butt2pin=self.list_of_floats[4]
self.butt3pin=self.list_of_floats[5]
self.str1=round(self.list_of_floats[6],1)
self.str2=round(self.list_of_floats[7],1)
self.str3=round(self.list_of_floats[8],1)
self.ytilt=self.list_of_floats[9]
self.ztilt=self.list_of_floats[10]
self.qw=self.list_of_floats[11]
self.qx=self.list_of_floats[12]
self.qy=self.list_of_floats[13]
self.qz=self.list_of_floats[14]
self.q = Quaternion(self.qw,self.qx,self.qy,self.qz)
self.rot_ax = self.q.axis
self.rot_ang = self.q.degrees
self.accx=self.list_of_floats[15]
self.accy=self.list_of_floats[16]
self.accz=self.list_of_floats[17]
self.sys_cal=self.list_of_floats[18]
self.gyro_cal=self.list_of_floats[19]
self.acc_cal=self.list_of_floats[20]
self.mag_cal=self.list_of_floats[21]
self.phi1enc=self.list_of_floats[22]
self.phi2enc=self.list_of_floats[23]
self.phi3enc=self.list_of_floats[24]
self.beta1enc=self.list_of_floats[25]
self.beta2enc=self.list_of_floats[26]
self.beta3enc=self.list_of_floats[27]
self.phi1deg=360-self.phi1enc/16384*360
self.phi2deg=360-self.phi2enc/16384*360
self.phi3deg=360-self.phi3enc/16384*360
self.beta1deg=self.beta1enc/16384*360
self.beta2deg=self.beta2enc/16384*360
self.beta3deg=self.beta3enc/16384*360
self.calculatepsi()
self.list_of_floats.append(self.psi)
self.list_of_floats.append(self.ytilt_zero)
self.list_of_floats.append(self.ztilt_zero)
self.list_of_floats.append(self.accx_zero)
self.list_of_floats.append(self.accx_zero)
self.list_of_floats.append(self.accz_zero)
self.pitch = 180 * np.arctan2(self.accx ,np.sqrt(self.accy*self.accy+ self.accz*self.accz))/3.14;
self.roll = 180 * np.arctan2(self.accy, np.sqrt(self.accx*self.accx + self.accz*self.accz))/3.14;
self.list_of_floats.append(self.pitch)
self.list_of_floats.append(self.roll)
#append aruco stuff
#self.list_of_floats.extend(self.pegrvec)
#self.list_of_floats.extend(self.pegtvec)
#self.list_of_floats.extend(self.holervec)
#self.list_of_floats.extend(self.holetvec)
#self.getPegDepth()
#print(self.depth_1,self.depth_2)
#self.list_of_floats.append(self.phi1deg)
#self.list_of_floats.append(self.phi2deg)
#self.list_of_floats.append(self.phi3deg)
#self.list_of_floats.append(self.beta1deg)
#self.list_of_floats.append(self.beta2deg)
#self.list_of_floats.append(self.beta3deg)
#self.list_of_floats.append(self.depth_1)
self.winchenc1=0
self.winchenc2=0
self.winchenc3=0
if self.connectflag==1:
self.winchenc1=self.odrv0.get_encoder_count(0)
self.winchenc2=self.odrv1.get_encoder_count(0)
self.winchenc3=self.odrv1.get_encoder_count(1)
self.list_of_floats.append(self.winchenc1)
self.list_of_floats.append(self.winchenc2)
self.list_of_floats.append(self.winchenc3)
self.list_of_floats.append(self.mot1spd)
self.list_of_floats.append(self.mot2spd)
self.list_of_floats.append(self.mot3spd)
#self.phi1rad=self.phi1deg*3.14/180
#self.phi2rad=self.phi2deg*3.14/180
#self.phi3rad=self.phi3deg*3.14/180
#self.str1P=self.str1*np.cos(self.phi1rad)
#self.str2P=self.str2*np.cos(self.phi2rad)
#self.str3P=self.str3*np.cos(self.phi3rad)
self.list_of_floats.insert(0,time.time())
#yrdgs.append((self.ytilt-self.ytilt_zero))
#zrdgs.append((self.ztilt-self.ztilt_zero))
#self.ytilta=self.avg(yrdgs)
#self.ztilta=self.avg(zrdgs)
#self.list_of_floats.append(self.ytilta)
#self.list_of_floats.append(self.ztilta)
if len(yrdgs)==20:
yrdgs.pop(0)
if len(zrdgs)==20:
zrdgs.pop(0)
if tosaveflag==1:
self.DataToSave()
except:
pass
def CalibrateIMU(self):
self.buttjpin=0
input("Calibrate the IMU. Press 1 to start, hit the joystick button 4 to stop")
while self.buttjpin==0:
self.get_data(0)
print(self.buttjpin,self.sys_cal,self.gyro_cal,self.acc_cal,self.mag_cal)
self.get_data(0)
time.sleep(2)
input("get IMU Data. Hit joystick button to stop")
self.get_data(0)
self.buttjpin=0
while self.buttjpin==0:
self.get_data(0)
pitch = 180 * np.arctan2(self.accx ,np.sqrt(self.accy*self.accy+ self.accz*self.accz))/3.14;
roll = 180 * np.arctan2(self.accy, np.sqrt(self.accx*self.accx + self.accz*self.accz))/3.14;
#print([self.ytilt,self.ytilt-self.ytilt_zero, self.ztilt,self.ztilt-self.ztilt_zero,self.psi ,self.accz, self.sys_cal, self.gyro_cal, self.acc_cal, self.mag_cal])
#print([self.psi,self.accx-self.accx_zero,self.accy-self.accy_zero,self.accz-self.accz_zero])
print([round(self.ytilt,2),round(self.ztilt,2)])
def GetIMUOffsets(self):
print(self.roll,self.roll-self.ytilt_zero, self.pitch,self.pitch-self.ztilt_zero)
getimuoffsets_var=input("Enter 1 to get IMU Offsets. Enter 0 to use stored offsets: ")
if int(getimuoffsets_var)==1:
input("Adjust peg so it is in the hole")
time.sleep(1)
self.get_data(0)
self.buttjpin=0
print(self.buttjpin)
while self.buttjpin==0:
self.get_data(0)
self.getJoystickMotorSpeed(1)
print(self.mot1spd,self.mot2spd,self.mot3spd)
self.CmdMotors()
input("Let the peg rest so vals can be obtained: ")
tc=time.time()
accxlist=[]
accylist=[]
acczlist=[]
ytiltlist=[]
ztiltlist=[]
rolllist=[]
pitchlist=[]
while time.time()-tc<5:
self.ReadSerial(0)
accxlist.append(self.accx)
accylist.append(self.accy)
acczlist.append(self.accz)
ytiltlist.append(self.ytilt)
ztiltlist.append(self.ztilt)
rolllist.append(self.roll)
pitchlist.append(self.pitch)
self.accx_zero=self.avg(accxlist)
self.accy_zero=self.avg(accylist)
self.accz_zero=self.avg(acczlist)
# self.ytilt_zero=self.avg(rolllist)
# self.ztilt_zero=self.avg(pitchlist)
self.ytilt_zero=self.avg(ytiltlist)
self.ztilt_zero=self.avg(ztiltlist)
print("done!")
print(self.ytilt_zero,self.ztilt_zero,self.accx_zero,self.accy_zero,self.accz_zero)
input("Write down ytilt_zero,ztilt_zero and accx_zero,accy_zero,accz_zero in the code for future use!!")
# self.get_data(0)
# time.sleep(2)
# input("get IMU Data. Hit joystick button to stop")
# self.get_data(0)
# a.buttjpin=0
# while a.buttjpin==0:
# self.get_data(0)
# pitch = 180 * np.arctan2(self.accx ,np.sqrt(self.accy*self.accy+ self.accz*self.accz))/3.14;
# roll = 180 * np.arctan2(self.accy, np.sqrt(self.accx*self.accx + self.accz*self.accz))/3.14;
# #print([self.ytilt,self.ytilt-self.ytilt_zero, self.ztilt,self.ztilt-self.ztilt_zero,self.psi ,self.accz, self.sys_cal, self.gyro_cal, self.acc_cal, self.mag_cal])
# #print([self.psi,self.accx-self.accx_zero,self.accy-self.accy_zero,self.accz-self.accz_zero])
# print([self.ytilt-self.ytilt_zero,self.roll-self.ytilt_zero, self.ztilt-self.ztilt_zero,self.pitch-self.ztilt_zero])
def IMUData(self):
self.get_data(0)
time.sleep(2)
input("get IMU Data. Hit joystick button to stop")
self.get_data(0)
a.buttjpin=0
while a.buttjpin==0:
self.get_data(0)
pitch = 180 * np.arctan2(self.accx ,np.sqrt(self.accy*self.accy+ self.accz*self.accz))/3.14;
roll = 180 * np.arctan2(self.accy, np.sqrt(self.accx*self.accx + self.accz*self.accz))/3.14;
#print([self.ytilt,self.ytilt-self.ytilt_zero, self.ztilt,self.ztilt-self.ztilt_zero,self.psi ,self.accz, self.sys_cal, self.gyro_cal, self.acc_cal, self.mag_cal])
#print([self.psi,self.accx-self.accx_zero,self.accy-self.accy_zero,self.accz-self.accz_zero])
print([self.ytilt-self.ytilt_zero,self.roll-self.ytilt_zero, self.ztilt-self.ztilt_zero,self.pitch-self.ztilt_zero])
def IMUData2(self):
self.get_data(0)
time.sleep(2)
input("get IMU Data. Hit joystick button to stop")
self.get_data(0)
a.buttjpin=0
while a.buttjpin==0:
self.get_data(0)
pitch = 180 * np.arctan2(self.accx ,np.sqrt(self.accy*self.accy+ self.accz*self.accz))/3.14;
roll = 180 * np.arctan2(self.accy, np.sqrt(self.accx*self.accx + self.accz*self.accz))/3.14;
#print([self.ytilt,self.ytilt-self.ytilt_zero, self.ztilt,self.ztilt-self.ztilt_zero,self.psi ,self.accz, self.sys_cal, self.gyro_cal, self.acc_cal, self.mag_cal])
#print([self.psi,self.accx-self.accx_zero,self.accy-self.accy_zero,self.accz-self.accz_zero])
print([round(self.ytilt,2),round(self.ztilt,2)])
""" def Sensorcheck(self):
self.buttjpin=0
sensorcheckflag=0
sensorcheckflag=int(input("Do you want to check sensors? 1 for yes: "))
if sensorcheckflag==1:
while self.buttjpin==0:
self.get_data(0)
#print([ self.str1,self.str2,self.str3])
print([ round(self.str1,2), round(self.str2,2),round(self.str3,2), round(self.phi1deg,1),round(self.phi2deg,1),round(self.phi3deg,1), round(self.beta1deg,1),round(self.beta2deg,1),round(self.beta3deg,1)])
#print([ self.phi1enc,self.phi2enc,self.phi3enc, self.phi1deg,self.phi2deg,self.phi3deg])
time.sleep(1)
self.buttjpin=0
while self.buttjpin==0:
self.get_data(0)
#print([ self.str1,self.str2,self.str3])
print([ round(self.ytilt,3), round(self.ztilt,3)])
#print([ self.phi1enc,self.phi2enc,self.phi3enc, self.phi1deg,self.phi2deg,self.phi3deg]) """
### SAVING AND GET DATA
def SetupNewFile(self):
if self.testname=='1':
self.tn=input("Enter Test name for series, without number: ")
self.TotalList=[]
self.testname=self.tn+'_'+str(self.testCounter)
self.testCounter=self.testCounter+1
print(self.testname)
self.vidname='/home/rachel/odrive/Data_and_Vids/'+ self.testname
self.filename=self.vidname+".csv"
self.vidfile1=self.vidname+".avi"
self.vidfile2=self.vidname+"_2"+".avi"
# self.cap1 = cv2.VideoCapture(int(self.camnum1))
# self.frame_width1 = int(self.cap1.get(3))
# self.frame_height1 = int(self.cap1.get(4))
self.window = 'Camera'
self.out1 = cv2.VideoWriter(self.vidfile1,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (self.frame_width1,self.frame_height1))
self.out2 = cv2.VideoWriter(self.vidfile2,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (self.frame_width1,self.frame_height1))
def DataToSave(self):
#self.TotalList.append([self.ytilt,self.ztilt,self.str1,self.str2,self.str3])
self.TotalList.append(self.list_of_floats)
def writevideo(self):
self.ret1, self.frame1 = self.cap1.read()
self.out1.write(self.frame1)
# self.ret2, self.frame2 = self.cap2.read()
# self.out2.write(self.frame2)
def delaywithvideo(self,timedelay):
tc=time.time()
while time.time()-tc<timedelay:
#self.writevideo()
self.get_data(1)
def writefile(self):
with open(self.filename, "w") as f:
writer = csv.writer(f)
writer.writerows(self.TotalList)
print("saved")
def finishtestrecording(self):
self.StopPeg()
#a.writevideo()
self.writefile()
#self.cap1.release()
self.out1.release()
self.out2.release()
cv2.destroyAllWindows()
def get_data(self,tosaveflag):
self.ReadDisplayCVApril(tosaveflag)
self.ReadSerial(tosaveflag)
def ring_alignment(self):
""" Perform the alignment of the ring and the peg using data from the IMU. """
xtilt_thresh = 3
ytilt_thresh = 3
while True:
dz = 0.03
self.move_gantry(self.x, self.y, self.z+dz)
perp_vec = np.cross(self.rot_ax, [0,0,1])
if self.rot_ang > 3:
self.move_gantry(self.x-perp_vec(0),self.y-perp_vec(2),self.z-perp_vec(3))
elif self.rot_ang < 3:
break
self.move_gantry(self.x,self.y,self.z+0.1)
def exit_system(self):
os.system("stty echo")
sys.exit()
if __name__ == '__main__':
odrv0 = Odrive('20673881304E') # Only has 1 winch
odrv1 = Odrive('2087377E3548') # Has 2 winches
if (doCalibrate):
print('ODrive 0 Calibrating')
od0.full_init()
time.sleep(2)
print('ODrive 1 Calibrating')
od1.full_init()
print('Calibration Complete')
app = ManualWinchApp()
app.run()
|
python
|
# Generated by Django 2.0.3 on 2018-04-07 13:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0035_auto_20180402_1507'),
]
operations = [
migrations.AddField(
model_name='project_attendees',
name='minimum_registration',
field=models.PositiveIntegerField(default=2),
),
migrations.AlterField(
model_name='project_volunteers',
name='minimum_registration',
field=models.PositiveIntegerField(default=2),
),
]
|
python
|
from .fpath import *
from .tree import *
|
python
|
from .constants import AWSRegion
def parse_aws_region(region_arg: str) -> AWSRegion:
for region in AWSRegion:
if region_arg == region.value[0]:
return region
raise ValueError(f'Invalid AWS region {region_arg}')
|
python
|
import os
import json
import dfd
extended_python_path = dfd.get_path_if_exists('extended_python_path')
environment_path = dfd.get_path_if_exists('environment')
if extended_python_path:
import site
site.addsitedir(extended_python_path)
if environment_path:
with open(environment_path) as env_file:
new_env_vars = json.load(env_file)
os.environ.update(new_env_vars)
real_settings = os.environ.get('DJANGO_SETTINGS_MODULE')
if real_settings is not None:
os.environ['REAL_DJANGO_SETTINGS'] = real_settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'dfd_settings'
|
python
|
from typing import Optional, Tuple
from munch import Munch
import logging
from api.jwt import Jwt, JwtPayload
ADMIN_AUTHORITY = "ADMIN"
BASIC_AUTHORITY = "BASIC"
class Auth:
def __init__(self, event):
self._event = Munch.fromDict(event)
self.jwt = Jwt()
@property
def auth_header(self) -> Optional[str]:
if self.event.headers.get("Authorization"):
return (
self.event.headers.Authorization[7:]
if "Bearer " in self.event.headers.Authorization
else self.event.headers.Authorization
)
@property
def refresh_header(self) -> Optional[str]:
if self.event.headers.get("Refresh"):
return (
self.event.headers.Refresh[7:]
if "Bearer " in self.event.headers.Refresh
else self.event.headers.Refresh
)
def validate_jwt(self) -> Tuple[bool, Optional[str], Optional[str]]:
jwt_payload = self.get_jwt_payload()
if not jwt_payload:
logging.info("JWT payload is missing.")
return False, None, None
if not jwt_payload.all_fields_present():
logging.info("JWT payload is missing a field.")
return False, None, None
if jwt_payload.is_expired():
refresh_payload = self.get_refresh_payload()
if (
refresh_payload
and refresh_payload.all_fields_present()
and not refresh_payload.is_expired()
):
new_jwt = self.jwt.extend_jwt_token(jwt_payload)
return (
True,
new_jwt,
self.refresh_header,
)
return False, None, None
return True, self.auth_header, self.refresh_header
def is_admin(self) -> bool:
payload = self.get_jwt_payload()
return ADMIN_AUTHORITY in payload.authorities if payload else False
def get_jwt_payload(self) -> Optional[JwtPayload]:
return self.jwt.decode_jwt_token(self.auth_header)
def get_refresh_payload(self) -> Optional[JwtPayload]:
return self.jwt.decode_refresh_token(self.refresh_header)
@property
def event(self) -> Munch:
return Munch.fromDict(self._event)
|
python
|
import choraconfig, re, sys, os.path
def master_theorem_bounds_callout(params) :
if "logpath" not in params :
print "ERROR: duet_bounds_callout was called without a path"
sys.exit(0)
#output = ""
with open(params["logpath"],"rb") as logfile : output = logfile.read().strip()
return output
# really should have a tool root
tool = choraconfig.get_default_tool_dict()
tool["displayname"] = "Master Theorem"
tool["shortname"] = "master"
tool["root"] = choraconfig.benchroot + "rba/master-theorem"
tool["cmd"] = ["python",os.path.join(tool["root"],"mastertheorem.py"),"{filename}"]
tool["bounds_callout"] = master_theorem_bounds_callout
tool["no_assert_line_numbers"] = True
tool["error_callout"] = choraconfig.generic_error_callout
|
python
|
import numpy as np
class Territory:
def __init__(self,name,adjacent_territories,occupying_player=None,troops=None):
self.name = name
self.adjacent_territories = adjacent_territories
self.occupying_player = occupying_player
self.troops = troops
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
def get_bsr(self,game):
get = getattr(game,'get_territory')
self.bsr = sum([len(get(territory).troops) for territory in self.adjacent_territories if get(territory).troops]) / len(self.troops)
return self.bsr
def get_nbsr(self,game):
self.nbsr = self.get_bsr(game) / sum([trt.get_bsr(game) for trt in self.occupying_player.territories])
return self.nbsr
def json(self):
return {
"name":self.name,
"adjacent_territories":self.adjacent_territories,
"occupying_player":self.occupying_player.id if self.occupying_player else None,
"troops": [troop.json() for troop in self.troops] if self.occupying_player else None
}
def print(self):
print("Territory",self.name," occupied by player",self.occupying_player.id,
"with",len(self.troops),"troops")
|
python
|
# This file will contain the entry point where you load the data and init the variables
|
python
|
from mars_profiling.report.presentation.core.collapse import Collapse
from mars_profiling.report.presentation.core.container import Container
from mars_profiling.report.presentation.core.duplicate import Duplicate
from mars_profiling.report.presentation.core.frequency_table import FrequencyTable
from mars_profiling.report.presentation.core.frequency_table_small import (
FrequencyTableSmall,
)
from mars_profiling.report.presentation.core.html import HTML
from mars_profiling.report.presentation.core.image import Image
from mars_profiling.report.presentation.core.root import Root
from mars_profiling.report.presentation.core.sample import Sample
from mars_profiling.report.presentation.core.table import Table
from mars_profiling.report.presentation.core.toggle_button import ToggleButton
from mars_profiling.report.presentation.core.variable import Variable
from mars_profiling.report.presentation.core.variable_info import VariableInfo
from mars_profiling.report.presentation.core.warnings import Warnings
|
python
|
from django.urls import path, include
from users.api.loginviews import LoginAPI
urlpatterns = [
path('', LoginAPI.as_view())
]
|
python
|
def corrupt_part_data_on_disk(node, table, part_name):
part_path = node.query(
"SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format(
table, part_name
)
).strip()
corrupt_part_data_by_path(node, part_path)
def corrupt_part_data_by_path(node, part_path):
print("Corrupting part", part_path, "at", node.name)
print(
"Will corrupt: ",
node.exec_in_container(
["bash", "-c", "cd {p} && ls *.bin | head -n 1".format(p=part_path)]
),
)
node.exec_in_container(
[
"bash",
"-c",
"cd {p} && ls *.bin | head -n 1 | xargs -I{{}} sh -c 'echo \"1\" >> $1' -- {{}}".format(
p=part_path
),
],
privileged=True,
)
|
python
|
#!/usr/bin/python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unitests for wbo2.py"""
import unittest
from absl.testing import absltest
import config_lib
import mock
import serial
import wbo2
TEST_FRAME = (b'Z\xa5\x08\x0c\xf8\x0f\xff \x00\x020\x01`\x03\xd0\x00\x15\x00'
b'\x1a\x00 \x01\xa4\x00\x00\x03\x00i')
class TestWBO2(unittest.TestCase):
"""WBO2 unittests."""
def setUp(self):
super().setUp()
self.start = 0
def MockRead(self, size=1):
data = TEST_FRAME[5:] + TEST_FRAME
output = data[self.start:self.start + size]
self.start += size
return output
def testFindFrameStart(self):
mock_serial = mock.create_autospec(serial.Serial)
mock_serial.read.side_effect = self.MockRead
self.assertEqual(TEST_FRAME, wbo2.FindFrameStart(mock_serial))
def testCheckFrame(self):
self.assertTrue(wbo2.CheckFrame(TEST_FRAME))
self.assertFalse(wbo2.CheckFrame(TEST_FRAME[:-1] + b'0x02'))
def testReadSerial(self):
mock_serial = mock.create_autospec(serial.Serial)
mock_serial.read.side_effect = self.MockRead
for frame in wbo2.ReadSerial(mock_serial):
self.assertEqual(TEST_FRAME, frame)
break
def testGetBytes(self):
self.assertEqual(0.5962854349951124, wbo2.GetBytes(TEST_FRAME, 'user_3'))
self.assertEqual(0.0010162306553235967,
wbo2.GetBytes(TEST_FRAME, 'thermocouple_1'))
self.assertEqual(3320, wbo2.GetBytes(TEST_FRAME, 'tick'))
self.assertEqual(0, wbo2.GetBytes(TEST_FRAME, 'rpm_count'))
self.assertEqual(14.69820556640625, wbo2.GetBytes(TEST_FRAME, 'lambda_16'))
def testLambda16ToAFR(self):
lambda_16 = int.from_bytes(b'\x0f\xff', 'big')
self.assertEqual(14.69820556640625, wbo2.Lambda16ToAFR(lambda_16))
def testRPMCountToRPM(self):
self.assertEqual(6000, wbo2.RPMCountToRPM(1000, 4))
self.assertEqual(4000, wbo2.RPMCountToRPM(1000, 6))
def testGetUser3(self):
self.assertEqual(0.5962854349951124,
wbo2.GetBytes(TEST_FRAME, 'user_3'))
def testAddConfigValues(self):
config = config_lib.LoadConfig('etc/corrado.yaml')
interface = wbo2.WBO2(config, start_process=False)
keys = {'afr': None, 'rpm': None, 'tps_voltage': None}.keys()
self.assertEqual(keys, interface.values.keys())
if __name__ == '__main__':
absltest.main()
|
python
|
import numpy as np
import pandas as pd
import bbi
import pysam
##only mappabilty_by_idx called from top level
def load_chromsizes(f_bw):
chroms = bbi.chromsizes(f_bw)
chroms.pop('chrM')
chroms.pop('chrX')
chroms.pop('chrY')
return chroms
def mappability_by_window(f_mapp, window, overlap=0):
chroms = load_chromsizes(f_mapp)
mapp_lst = []
for chr_id, chr_size in chroms.items():
print(chr_id, end=' ')
i = 0
while i + window < chr_size:
# print(i)
mapp = bbi.fetch(f_mapp, chr_id, i, i + window, bins=1)[0]
mapp_lst.append([chr_id, i, i+window, mapp])
i += window - overlap
return pd.DataFrame(np.array(mapp_lst),
columns=['CHROM', 'START', 'END', 'MAPP'])
def mappability_by_idx(f_mapp, idx):
mapp_lst = []
chr_prev = ''
for row in idx:
chr_id = 'chr{}'.format(row[0])
start = row[1]
end = row[2]
if chr_id != chr_prev:
print(chr_id)
mapp = bbi.fetch(f_mapp, chr_id, start, end, bins=1)[0]
mapp_lst.append([row[0], start, end, mapp])
chr_prev = chr_id
return mapp_lst
def P_bases_by_window(f_fasta, window, overlap=0):
fasta = pysam.FastaFile(f_fasta)
sizes = fasta.lengths
chroms = fasta.references
mapp_lst = []
for chr_id, chr_size in zip(chroms, sizes):
print(chr_id, end=' ')
i = 0
while i + window < chr_size:
seq = fasta.fetch(chr_id, i, i + window)
mapp = seq.count('P') / window
mapp_lst.append([chr_id, i, i+window, mapp])
i += window - overlap
return pd.DataFrame(np.array(mapp_lst),
columns=['CHROM', 'START', 'END', 'MAPP'])
|
python
|
import unittest
import io
from contextlib import redirect_stdout
from rdflib import Namespace, Graph
from sparqlslurper import SlurpyGraph
from sparqlslurper._graphdb_slurpygraph import GraphDBSlurpyGraph
endpoint = 'https://graph.fhircat.org/repositories/fhirontology'
class SparqlParametersTestCase(unittest.TestCase):
def test_parms(self):
""" Show how to pass a parameter to a wrapper instance
This test assumes a GraphDB SPARQL endpoint
loaded with the fhir.ttl w/ the inference option on.
We are testing that the parameter makes it through and
changes the behavior of the server.
Note that a copy of fhir.ttl can be found in tests/data.
"""
FHIR = Namespace("http://hl7.org/fhir/")
g = GraphDBSlurpyGraph(endpoint)
self.assertLess(85, len(list(g.predicate_objects(FHIR.Patient))))
g = GraphDBSlurpyGraph(endpoint)
g.sparql.addParameter("infer", "false")
self.assertGreater(60, len(list(g.predicate_objects(FHIR.Patient))))
g = GraphDBSlurpyGraph(endpoint + '?infer=false')
self.assertGreater(60, len(list(g.predicate_objects(FHIR.Patient))))
if __name__ == '__main__':
unittest.main()
|
python
|
import pathlib
import subprocess
import signal
import time
import os
import sys
import argparse
def main():
parser = argparse.ArgumentParser(prog="run-snet-services")
parser.add_argument("--daemon-config-path", help="Path to daemon configuration file", required=False)
args = parser.parse_args(sys.argv[1:])
root_path = pathlib.Path(__file__).absolute().parent
all_p = [start_snetd(root_path, args.daemon_config_path), start_service(root_path)]
# Continuous checking all subprocess
while True:
for p in all_p:
p.poll()
if p.returncode and p.returncode != 0:
kill_and_exit(all_p)
time.sleep(1)
def start_snetd(cwd, daemon_config_path=None):
cmd = ["snetd", "serve"]
if daemon_config_path is not None:
cmd.extend(["--config", daemon_config_path])
return subprocess.Popen(cmd, cwd=cwd)
def start_service(cwd):
return subprocess.Popen(["python3.6", "-m", "services.summary_server"], cwd=cwd)
def kill_and_exit(all_p):
"""
Kills main, service and daemon's processes if one fails.
"""
for p in all_p:
try:
os.kill(p.pid, signal.SIGTERM)
except Exception as e:
print(e)
exit(1)
if __name__ == "__main__":
main()
|
python
|
from django.urls import path
from.import views
urlpatterns = [
path('index',views.index,name='Iniciowarehouse')
]
|
python
|
# coding=utf-8
from collections import namedtuple
CamouflageInfo = namedtuple('CamouflageInfo', ['id', 'schemeId'])
|
python
|
lua_1 = """
local k = 1/math.sqrt(0.05)
local val = tonumber(ARGV[1])
local old_vals = redis.call('get',KEYS[1])
local new_vals = {}
if (old_vals) then
old_vals = cjson.decode(old_vals)
new_vals["count_1"] = old_vals['count_1'] + 1
local delta = val - old_vals["mean_1"]
new_vals["mean_1"] = old_vals["mean_1"] + delta / new_vals["count_1"]
new_vals["M2_1"] = old_vals["M2_1"] + delta * (val - new_vals["mean_1"])
new_vals["variance_1"] = new_vals["M2_1"] / new_vals["count_1"]
local std = math.sqrt(new_vals["variance_1"])
new_vals["ODV1L"] = new_vals["mean_1"] - k * std
new_vals["ODV1U"] = new_vals["mean_1"] + k * std
if (val <= new_vals["ODV1U"] and val >= new_vals["ODV1L"]) then
new_vals["count_2"] = old_vals['count_2'] + 1
delta = val - old_vals["mean_2"]
new_vals["mean_2"] = old_vals["mean_2"] + delta / new_vals["count_2"]
new_vals["M2_2"] = old_vals["M2_2"] + delta * (val - new_vals["mean_2"])
new_vals["variance_2"] = new_vals["M2_2"] / new_vals["count_2"]
std = math.sqrt(new_vals["variance_2"])
new_vals["ODV2L"] = new_vals["mean_2"] - k * std
new_vals["ODV2U"] = new_vals["mean_2"] + k * std
else
new_vals["count_2"] = old_vals['count_2']
new_vals["mean_2"] = old_vals["mean_2"]
new_vals["M2_2"] = old_vals["M2_2"]
new_vals["variance_2"] = old_vals["variance_2"]
new_vals["ODV2L"] = old_vals["ODV2L"]
new_vals["ODV2U"] = old_vals["ODV2U"]
end
else
new_vals["count_1"] = 1
new_vals["mean_1"] = val
new_vals["M2_1"] = 0
new_vals["variance_1"] = 0
new_vals["ODV1L"] = val
new_vals["ODV1U"] = val
new_vals["count_2"] = 1
new_vals["mean_2"] = val
new_vals["M2_2"] = 0
new_vals["variance_2"] = 0
new_vals["ODV2L"] = val
new_vals["ODV2U"] = val
end
redis.call('set', KEYS[1], cjson.encode(new_vals))
"""
lua_2 = """
local val = tonumber(ARGV[1])
local dt = tostring(ARGV[2])
local month = tostring(ARGV[3])
local hour = tostring(ARGV[4])
local old_vals = redis.call('get',KEYS[1])
local new_vals = {}
if (old_vals) then
old_vals = cjson.decode(old_vals)
new_vals = old_vals
if(old_vals["count_" .. dt]) then
new_vals["count_" .. dt] = old_vals["count_" .. dt] + 1
new_vals["sum_" .. dt] = old_vals["sum_" .. dt] + val
else
new_vals["count_" .. dt] = 1
new_vals["sum_" .. dt] = val
end
if (old_vals["count_" .. dt .. '_' .. hour]) then
new_vals["count_" .. dt .. '_' .. hour] = old_vals["count_" .. dt .. '_' .. hour] + 1
new_vals["sum_" .. dt .. '_' .. hour] = old_vals["sum_" .. dt .. '_' .. hour] + val
else
new_vals["count_" .. dt .. '_' .. hour] = 1
new_vals["sum_" .. dt .. '_' .. hour] = val
end
else
new_vals["count_" .. dt .. '_' .. hour] = 1
new_vals["sum_" .. dt .. '_' .. hour] = val
new_vals["count_" .. dt] = 1
new_vals["sum_" .. dt] = val
end
redis.call('set', KEYS[1], cjson.encode(new_vals))
"""
|
python
|
import time
from django.core.management.base import BaseCommand
from django.db import transaction
import database_locks
class Command(BaseCommand):
help = 'Lock it!'
def add_arguments(self, parser):
parser.add_argument('lock_name', help='lock name to be used')
parser.add_argument(
'-o',
'--owner',
help='Owner to be registered with the lock (used to renew and persist lock - hostname is default)',
)
parser.add_argument(
'-d', '--duration', default=10, help='Lock duration (in seconds)'
)
def handle(self, *args, **options):
with database_locks.lock(options['lock_name'], locked_by=options['owner']):
self.stdout.write(f'Got the lock, sleeping {options["duration"]} seconds')
time.sleep(options["duration"])
self.stdout.write(f'Releasing lock')
|
python
|
import logging.config
import uvicorn
from fastapi import FastAPI, Request, status
from fastapi.encoders import jsonable_encoder
from dotenv import load_dotenv
from fastapi.responses import JSONResponse, PlainTextResponse
from starlette.exceptions import HTTPException as StarletteHTTPException
from fastapi.exceptions import RequestValidationError
from layer_view import view
from config.settings import config_basic
logging.config.dictConfig(config_basic)
logger = logging.getLogger(__name__)
load_dotenv()
app = FastAPI()
app.include_router(view.router)
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request, exc):
return PlainTextResponse(str(exc.detail), status_code=exc.status_code)
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError):
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=jsonable_encoder({"detail": exc.errors(), "body": exc.body}),
)
@app.exception_handler(Exception)
async def exception_general_handler(request: Request, exc: Exception):
return JSONResponse(
status_code=418,
content={"message": f"Nope wrong Ask"},
)
if __name__ == "__main__":
uvicorn.run(
"app:app", host="0.0.0.0", port=5001, reload=True
)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.