content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
__all__ = ['auth', 'constants', 'controllers', 'forms']
|
python
|
#!/bin/python
import sys
S = raw_input().strip()
try:
r = int(S)
print r
except ValueError:
print "Bad String"
|
python
|
#!/usr/bin/env python3
# coding: utf-8
"""Automatic EcoFlex sequences annotation pipeline.
Edits:
- Recolor all AmpR with the same color as YTK parts
- Add AmpR terminator feature with standard color
"""
import copy
import io
import itertools
import json
import re
import os
import warnings
import sys
import bs4 as bs
import fs.path
import six
import tqdm
import requests
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq, translate
from Bio.SeqFeature import (
SeqFeature,
FeatureLocation,
CompoundLocation,
Reference,
)
from Bio.SeqIO import read, write
from Bio.SeqRecord import SeqRecord
from Bio.Restriction import BsaI
from fs.zipfs import ReadZipFS
from moclo.record import CircularRecord
from moclo.regex import DNARegex
ZIP_URL = "https://media.addgene.org/cms/filer_public/1a/00/1a00a9f1-608f-453a-937a-7f46cf872dfc/ecoflex-kit-genbank-files.zip"
URL = "https://www.addgene.org/cloning/moclo/freemont-ecoflex/"
UA = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"
# Part sequence for automatic annotation / annotation relocation
AMPR_TERM = DNARegex("gattatcaaaaaggatctt") # Reverse 3' of AmpR terminator
BB_PREFIX = DNARegex("gaattcgcggccgcttctag")
CMR_PROMOTER = DNARegex(
'tttagcttccttagctcctgaaaatctcgataactcaaaaaatacgcccggtagtgatcttatttcattatggtgaaagttggaacctcttacgtgcccgatcaa')
CMR_TERMINATOR = DNARegex(
'accaataaaaaacgcccggcggcaaccgagcgttctgaacaaatccagatggagttctgaggtcattactggatctatcaacaggagtccaagcgagctcgatatcaaa')
AMPR_PROMOTER = DNARegex(
'actcttcctttttcaatattattgaagcatttatcagggttattgtctcatgagcggatacatatttgaatgtatttagaaaaataaacaaataggggttccgcgcacatttccccgaaaagtgccacctg')
AMPR_TERMINATOR = DNARegex(
'gattatcaaaaaggatcttcacctagatccttttaaattaaaaatgaagttttaaatcaatctaaagtatatatgagtaaacttggtctgacag')
NAME_REGEX = re.compile(r"([^ ]*) \(([^\)]*)\)(_[A-Z]{2})")
COLOR_REGEX = re.compile(r"color: (#[0-9a-fA-F]{6})")
FULL_SEQUENCES = {
"pBP-BBa_B0034": "https://www.addgene.org/72980/sequences/",
"pBP-SJM901": "https://www.addgene.org/72966/sequences/",
}
# Partial sequences from the reference EcoFlex paper
PROMOTERS = {
"pBP-SJM901": "CTATTTTACAGCTAGCTCAGTCCTAGGTATAATGCTAGCGTAC",
"pBP-SJM902": "CTATTTTACAGCTAGCTCAGTCCTAGGGATTATGCTAGCGTAC",
"pBP-SJM903": "CTATCTTATAGCTAGCTCAGTCCTTGGGATTATGCTAGCGTAC",
"pBP-SJM905": "CTATTTTATAGCTAGCTCAGTCCTTGGGATTATGCTAGCGTAC",
"pBP-SJM906": "CTATTTGATGGCTAGCTCAGTCCTAGGGATTGTGCTAGCGTAC",
"pBP-SJM908": "CTATTTTATAGCTAGCTCAGCCCTTGGTATTATGCTAGCGTAC",
"pBP-SJM910": "CTATTTGATGGCTAGCTCAGTCCTTGGTATTATGCTAGCGTAC",
"pBP-SJM911": "CTATTTGACAGCTAGCTCAGTCCTTGGTACTGTGCTAGCGTAC",
"pBP-SJM912": "CTATTTGATAGCTAGCTCAGTCCTAGGTACTATGCTAGCGTAC",
"pBP-SJM914": "CTATTTGATGGCTAGCTCAGTCCTAGGGATTGTGCTAGCGTAC",
"pBP-SJM915": "CTATTTTATGGCTAGCTCAGTCCTTGGTATTATGCTAGCGTAC",
}
def translate_color(feature):
notes = feature.qualifiers.get("note", [])
color_note = next((n for n in notes if n.startswith("color: #")), None)
if color_note is None:
return
hex_color = COLOR_REGEX.match(color_note).group(1).lower()
feature.qualifiers["note"].remove(color_note)
feature.qualifiers.update(
{
"ApEinfo_fwdcolor": [hex_color],
"ApEinfo_revcolor": [hex_color],
"ApEinfo_graphicformat": [
"arrow_data {{0 1 2 0 0 -1} {} 0} width 5 offset 0"
],
}
)
if __name__ == "__main__":
warnings.simplefilter("ignore")
session = requests.Session()
# load the kit inventory page
with session.get(URL) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
# load the zip archive
with session.get(ZIP_URL) as res:
archive = ReadZipFS(six.BytesIO(res.content)).opendir(
"/EcoFlex - GenBank/"
)
# load inventory
inventory = soup.find("table", class_="kit-inventory-table")
it = tqdm.tqdm(inventory.find_all("tr")[1:])
for row in it:
# extract each row
row_text = row.find("a").text
# get antibiotics resistances
resistance = row.find("span", class_="resistance-spacing").text.strip()
name = id_ = row_text.strip()
# Update the progress bar
it.set_description(id_)
# TODO: entry vector not supported
if id_ in ('pBP', 'pBP-ORF', 'pBP-lacZ'):
continue
elif id_ == "pBP-T7_RBS-His6-Thrombin":
name = id_ = "pBP-T7-RBS-His6"
elif id_.startswith("pBP-T7_"):
name = id_ = id_.replace("_", "-")
elif id_.startswith("pBP-ORF-"):
name = id_ = id_.replace("pBP-ORF-", "pBP-")
elif id_ == "pBP-HexHis":
name = id_ = "pBP-His6"
elif id_.startswith("pBP_BBa"):
name = id_ = id_.replace("pBP_BBa", "pBP-BBa")
# extract info
info = {
"resistance": resistance,
# "name": id_,
"id": id_,
# "type": type_,
"location": row.find("b").text.strip().replace(" / ", ""),
"addgene_id": row.find("a").get("href").strip("/"),
}
# get the online full sequence
if id_ in FULL_SEQUENCES:
# Load the AddGene sequences page and get the full sequence
with requests.get(FULL_SEQUENCES[id_]) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
section = soup.find("section", id="depositor-full")
gb_url = soup.find("a", class_="genbank-file-download").get('href')
# Get the Genbank file
with requests.get(gb_url) as res:
gb = CircularRecord(read(io.StringIO(res.text), "gb"))
# get the pBP-SJM901 sequence and patch it
elif id_.startswith("pBP-SJM"):
# get pBP-SJM
# Load the AddGene sequences page and get the full sequence
with requests.get(FULL_SEQUENCES["pBP-SJM901"]) as res:
soup = bs.BeautifulSoup(res.text, "html.parser")
section = soup.find("section", id="depositor-full")
gb_url = soup.find("a", class_="genbank-file-download").get('href')
# Get the Genbank file
with requests.get(gb_url) as res:
gb = CircularRecord(read(io.StringIO(res.text), "gb"))
# replace the target sequence
gb.seq = Seq(
str(gb.seq.upper()).replace(PROMOTERS["pBP-SJM901"], PROMOTERS[id_])
)
gb.description = gb.description.replace("SJM901", id_[4:])
gb.keywords = [id_[4:]]
# get the ZIP sequence
else:
path = next(
(
f
for f in archive.walk.files('/')
if fs.path.basename(f).lower() == '{}.gb'.format(id_).lower()
),
None,
)
if id_ == "pBP-His6":
path = "/Level 0/Tags/pBP-His6_tag.gb"
elif id_ == "pBP-T7-RBS-His6":
path = "/Level 0/T7 parts/pBP-T7_RBS_His6.gb"
elif id_ == "pBP-T7-RBS":
path = "/Level 0/T7 parts/pBP-T7_RBS.gb"
elif id_ == "pBP-Strep(II)":
path = "/Level 0/Tags/pBP-StrepII_tag.gb"
elif id_ == "pBP-pET-RBS":
path = "/Level 0/RBS/pBP-PET_RBS.gb"
elif id_ == "pBP-BBa_B0034":
path = "/Level 0/Promoters/pBP_BBa_B0034.gb"
if path is None:
print("COULD NOT FIND", id_)
continue
with archive.open(path) as f:
gb = CircularRecord(read(f, "gb"))
# Copy well documented information from one record to the other
gb.seq = gb.seq.upper()
gb.seq.alphabet = IUPAC.unambiguous_dna
gb.id = id_
gb.name = name
gb.annotations['references'].clear() # FIXME ?
# quick feature accessor
def get_features_from_label(label):
return (
f for f in gb.features if label in f.qualifiers.get("label", [])
)
def get_features_from_note(note):
return (
f for f in gb.features if note in f.qualifiers.get("note", [])
)
def get_features(name):
return itertools.chain(
get_features_from_label(name),
get_features_from_note(name),
)
# Correct overlapping features by setting the origin just before the
# biobrick prefix
pref = next(itertools.chain(
get_features("BioBrick prefix"),
get_features_from_note("BioBrick prefix")
))
if pref.location is None:
match = BB_PREFIX.search(gb)
pref.location = FeatureLocation(
start=match.start(),
end=match.end(),
strand=1,
)
gb <<= pref.location.start - 1
# AmpR recolor and annotations
ampr = next(get_features("AmpR"), None)
if ampr is not None:
ampr.qualifiers = {
"label": "AmpR",
"codon_start": 1,
"gene": "bla",
"product": "beta-lactamase",
"function": "ampicilin and caribenicillin resistance",
"translation": ampr.extract(gb.seq).translate(),
"note": ["color: #9F4240"],
"db_xref": [
"GO:0005515",
"GO:0008800",
"GO:0016787",
"GO:0030655",
"GO:0046677",
"InterPro:IPR000871",
"InterPro:IPR023650",
"InterPro:IPR012338",
"PDB:1ZG4",
"UniProtKB/Swiss-Prot:P62593",
],
"EC_number": "3.5.2.6",
}
old_prom = next(get_features_from_note('AmpR promoter'), None)
if old_prom is not None:
gb.features.remove(old_prom)
ampr_prom = next(get_features_from_label("AmpR promoter"), None)
if ampr_prom is None:
start, end = AMPR_PROMOTER.search(gb.seq).span()
ampr_prom = SeqFeature(FeatureLocation(start, end, -1))
gb.features.append(ampr_prom)
ampr_prom.type = "promoter"
ampr_prom.qualifiers["label"] = ["AmpR Promoter"]
ampr_prom.qualifiers["note"] = ["color: #ff6666"]
ampr_term = next(get_features_from_label("AmpR terminator"), None)
if ampr_term is None:
start, end = AMPR_TERMINATOR.search(gb.seq).span()
ampr_term = SeqFeature(FeatureLocation(start, end, -1))
gb.features.append(ampr_term)
ampr_term.type = 'terminator'
ampr_term.qualifiers['label'] = 'AmpR Terminator'
ampr_term.qualifiers['note'] = ['color: #ff6666']
# CmR recolor and annotations
cmr = next(get_features('CmR'), None)
if cmr is not None:
cmr.qualifiers.update(
{
"codon_start": [1],
"gene": ["cat"],
"product": ["chloramphenicol acetyltransferase"],
"label": ["CmR"],
"function": ["chloramphenicol resistance"],
"note": ["color: #0000ff; direction: LEFT"],
"EC_number": ["2.3.1.28"],
"db_xref": [
"UniProtKB/Swiss-Prot:P62577",
"GO:0008811",
"GO:0016740",
"GO:0016746",
"GO:0046677",
"PFAM:PF00302",
],
}
)
cmr_prom = next(get_features("CamR Promoter"), None)
if cmr_prom is None:
start, end = CMR_PROMOTER.search(gb.seq).span()
cmr_prom = SeqFeature(location=FeatureLocation(start, end, -1))
gb.features.append(cmr_prom)
cmr_prom.type = "promoter"
cmr_prom.qualifiers.update(
{
"label": ["CmR Promoter"],
"note": ["color: #66ccff; direction: LEFT"],
}
)
cmr_term = next(get_features_from_label("CamR Terminator"), None)
if cmr_term is None:
start, end = CMR_TERMINATOR.search(gb.seq).span()
cmr_term = SeqFeature(location=FeatureLocation(start, end, -1))
gb.features.append(cmr_term)
cmr_term.type = "terminator"
cmr_term.qualifiers.update(
{
"label": ["CmR Terminator"],
"note": ["color: #66ccff; direction: LEFT"],
}
)
old_term = next(get_features_from_note('lambda t0 terminator'), None)
if old_term is not None:
gb.features.remove(old_term)
# GFP recolor and annotations
gfp = next(get_features_from_label("GFP"), None)
if gfp is not None:
gfp.qualifiers.update(
{
"label": "GFP",
"note": ["color: #34ff03"],
"product": ["green fluorescent protein"],
"gene": ["GFP"],
"db_xref": [
"PDB:1H6R",
"InterPro:IPR009017",
"InterPro:IPR011584",
"InterPro:IPR000786",
"PFAM:PF01353",
"GO:0008218",
"GO:0006091",
"GO:0018298",
"UniProtKB/Swiss-Prot:P42212",
],
"inference": [
"DESCRIPTION:alignment:blastx:UniProtKB/Swiss-Prot:P42212"
],
}
)
# mRFP1 recolor and annotations
rfp = next(get_features_from_label("mRFP1"), None)
if rfp is not None:
rfp.qualifiers.update(
{
"label": "mRFP",
"product": "mRFP1",
"note": [
"monomeric derivative of DsRed (Campbell et al., 2002)",
"iGEM Part: BBa_E1010",
"color: #c16969",
],
"db_xref": [
"UniProtKB/Swiss-Prot:Q9U6Y8",
"GO:0008218",
"GO:0006091",
"GO:0018298",
"PDB:2H5R",
],
}
)
# patch pBP-SJM promoters
if id_.startswith("pBP-SJM"):
promoter = next(get_features_from_label("J23119 promoter"))
promoter.type = "promoter"
promoter.qualifiers.update({
"function": ["strong constitutive promoter"],
"note": ["color: #00a1ee; direction: RIGHT"],
})
if id_ == "pBP-SJM901":
promoter.qualifiers['label'] = "J23119 Promoter"
promoter.qualifiers['note'].insert(0, "Anderson series consensus promoter")
else:
promoter.qualifiers['label'] = "{} Promoter".format(id_[4:])
promoter.qualifiers['note'].insert(0, "derived from pBP-SJM901 (BBa_J23119)")
# if any(f.location is None for f in gb.features):
# continue
for f in gb.features:
if f.location is None:
print(gb, f)
# sort features by start location, source always first
gb.features.sort(
key=lambda f: (-len(gb.seq)) * (f.type == "source")
+ f.location.start
)
# translate color from notes to ApEinfo
for feature in gb.features:
translate_color(feature)
# Add an EcoFlex article reference
ref = Reference()
ref.authors = 'Moore SJ, Lai HE, Kelwick RJ, Chee SM, Bell DJ, Polizzi KM, Freemont PS.'
ref.title = 'EcoFlex: A Multifunctional MoClo Kit for E. coli Synthetic Biology.'
ref.journal = 'ACS Synth Biol 2016;5:1059-1069.'
ref.pubmed_id = '27096716'
gb.annotations['references'].append(ref)
# Fix the direct submission reference
ref = Reference()
# ref = gb.annotations["references"][-1]
ref.authors = "Larralde M"
ref.title = "Direct Submission"
ref.journal = "Distributed with the MoClo Python library\nhttps://github.com/althonos/moclo"
gb.annotations['references'].append(ref)
# write the final record
dst_dir = os.path.abspath(
os.path.join(
__file__, "..", "..", "moclo-ecoflex", "registry", "ecoflex"
)
)
dst_file = os.path.join(dst_dir, "{}.gb").format(info["id"])
write(gb, dst_file, "gb")
|
python
|
# hsrp parameters
ng_order = (3072,)
_ng_const = (
# 3072
(
"""\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
"5",
),
)
def get_srp_context(ng_group_len, hashfunc, salt_len=16, secret_len=32):
group = _ng_const[ng_order.index(ng_group_len)]
ctx = {
"hashfunc": hashfunc,
"N": int(group[0], 16),
"g": int(group[1], 16),
"N_len": ng_group_len,
"salt_len": salt_len,
"secret_len": secret_len,
}
return ctx
|
python
|
import random
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
kivy.require('1.9.0')
class MyRoot(BoxLayout):
def __init__(self):
super(MyRoot, self).__init__()
def generate_affirmation(self):
affirmations = ["I am the architect of my life; \nI build its foundation and choose its contents.",
"I am brimming with energy and \noverflowing with joy.",
"My body is healthy; my mind is brilliant; \nmy soul is tranquil.",
"I forgive those who have harmed me in my past and \npeacefully detach from them.",
"A river of compassion washes away my anger \nand replaces it with love.",
"Creative energy surges through me and leads \nme to new and brilliant ideas.",
"The only thing to fear is fear itself.",
"My ability to exceed my goals is limitless; \nmy potential to succeed is infinite.",
"I acknowledge my own self-worth; \nmy confidence is soaring.",
"Everything that is happening now is \nhappening for my ultimate good.",
"I woke up today with strength in my \nheart and clarity in my mind."]
rand_num = random.randint(0, len(affirmations)-1)
self.rand_aff.text = affirmations[rand_num]
class RandAffirmations(App):
def build(self):
return MyRoot()
randAffirmations = RandAffirmations()
randAffirmations.run()
|
python
|
# Copyright 2020 Regents of the University of Minnesota.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from mtap import GenericLabel, Location, Document
from mtap.data._label_indices import presorted_label_index
document = Document('plaintext', text='blah')
@pytest.fixture
def tested():
return presorted_label_index([
GenericLabel(0, 5, document=document, i=7),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 13, document=document, i=0),
]).descending()
def test_getitem(tested):
assert tested[3] == GenericLabel(6, 8, document=document, i=3)
def test_getitem_first(tested):
assert tested[0] == GenericLabel(9, 13, document=document, i=0)
def test_getitem_last(tested):
assert tested[7] == GenericLabel(0, 5, document=document, i=7)
def test_getitem_negative(tested):
assert tested[-4] == GenericLabel(6, 7, document=document, i=4)
def test_getitem_last_negative(tested):
assert tested[-1] == GenericLabel(0, 5, document=document, i=7)
def test_getitem_slice(tested):
sliced = tested[2:4]
assert sliced == [
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 8, document=document, i=3),
]
def test_getitem_slice_end(tested):
assert tested[4:8] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_getitem_slice_open_left(tested):
assert tested[:4] == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 8, document=document, i=3),
]
def test_getitem_slice_open_right(tested):
assert tested[4:] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_getitem_slice_neg_right(tested):
assert tested[4:-1] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
]
def test_getitem_slice_neg_left(tested):
assert tested[-4:-1] == [
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
]
def test_getitem_not_idx_slice(tested):
with pytest.raises(TypeError):
tested['foo']
def tested_getitem_slice_step_not_one(tested):
slice = tested[1:4:2]
assert slice == ([
GenericLabel(9, 13, document=document, i=1),
GenericLabel(6, 8, document=document, i=3),
])
def test_at(tested):
assert tested.at(GenericLabel(2, 6, document=document))[0] == GenericLabel(2, 6, document=document, i=5)
def test_at_location(tested):
assert tested.at(Location(2, 6))[0] == GenericLabel(2, 6, document=document, i=5)
def test_at_location_multiple(tested):
assert tested.at(Location(9, 13)) == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 13, document=document, i=1),
]
def test_at_location_not_found(tested):
assert tested.at(Location(10, 10)) == []
def test_len(tested):
assert len(tested) == 8
def test_covering(tested):
covering = tested.covering(2, 4)
assert list(covering) == [
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_covering_empty(tested):
assert tested.covering(4, 10) == []
def test_empty_covering(tested):
covering = tested.covering(4, 10)
assert list(covering) == []
def test_inside(tested):
inside = tested.inside(1, 8)
assert list(inside) == [
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
]
def test_inside_before(tested):
inside = tested.inside(0, 3)
assert list(inside) == []
def test_inside_after(tested):
inside = tested.inside(15, 20)
assert list(inside) == []
def test_inside_many(tested):
tested = presorted_label_index([
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(0, 3, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(2, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 6, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
GenericLabel(6, 10, document=document),
])
inside = tested.inside(3, 6)
assert inside == [
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(3, 5, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
]
inside = inside.inside(5, 6)
assert inside == [
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
GenericLabel(5, 6, document=document),
]
def test_begins_inside(tested):
inside = tested.beginning_inside(1, 9)
assert list(inside) == [
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
]
def test_begins_inside_empty(tested):
inside = tested.beginning_inside(3, 5)
assert inside == []
def test_ascending(tested):
ascending = tested.ascending()
assert ascending == [
GenericLabel(0, 5, document=document, i=7),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 13, document=document, i=0),
]
def test_descending(tested):
descending = tested.descending()
assert descending == tested
def test_before(tested):
before = tested.before(8)
assert before == [
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(0, 5, document=document, i=7),
]
def test_before_start(tested):
before = tested.before(3)
assert before == []
def test_after(tested):
after = tested.after(2)
assert after == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(2, 6, document=document, i=5),
]
def test_contains_true(tested):
assert GenericLabel(9, 13, document=document, i=0) in tested
def test_contains_false_location_in(tested):
assert GenericLabel(9, 13, document=document) not in tested
def test_contains_false_location_not_in(tested):
assert GenericLabel(0, 4, document=document) not in tested
def test_contains_false_not_label(tested):
assert "blub" not in tested
def test_reversed(tested):
l = list(reversed(tested))
assert l == [
GenericLabel(0, 5, document=document, i=7),
GenericLabel(0, 7, document=document, i=6),
GenericLabel(2, 6, document=document, i=5),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(6, 8, document=document, i=3),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(9, 13, document=document, i=1),
GenericLabel(9, 13, document=document, i=0),
]
def test_count_in(tested):
assert tested.count(GenericLabel(2, 6, document=document, i=5)) == 1
def test_count_multiple(tested):
index = presorted_label_index([
GenericLabel(2, 6, document=document, i=2),
GenericLabel(6, 7, document=document, i=3),
GenericLabel(6, 8, document=document, i=4),
GenericLabel(9, 10, document=document, i=5),
GenericLabel(9, 13, document=document, i=6),
GenericLabel(9, 13, document=document, i=7),
GenericLabel(9, 13, document=document, i=6)
]).descending()
assert index.count(GenericLabel(9, 13, document=document, i=6)) == 2
def test_count_different_label(tested):
assert tested.count(GenericLabel(9, 13, document=document, x=2)) == 0
def test_count_not_label(tested):
assert tested.count("blub") == 0
def test_count_location_not_in(tested):
assert tested.count(GenericLabel(4, 5, document=document)) == 0
def test_filter(tested):
assert tested.filter(lambda x: x.i % 2 == 0) == [
GenericLabel(9, 13, document=document, i=0),
GenericLabel(9, 10, document=document, i=2),
GenericLabel(6, 7, document=document, i=4),
GenericLabel(0, 7, document=document, i=6),
]
|
python
|
from rpcb.message_dispatch import MessageDispatch
from rpcb.service import Service
import threading
import time
import random
import pika
import queue
import logging
import pika.adapters.blocking_connection
"""
消息调度器实现,用于批处理数据,多个数据来了会打包为一个batch输入到模型中,从而提高整体的吞吐量
"""
class BatchMessageDispatcher(MessageDispatch):
def __init__(self, callback, max_queue_size:int=32, max_waiting_time:float=0.1,
service:Service=None, max_batch_size=10):
self.safe_queue = queue.Queue(maxsize=max_queue_size) # 线程安全队列,用于存放数据
self.message_send_and_ack = callback # 回调函数
self.service = service
guard = GuardThread(max_waiting_time=max_waiting_time,safe_queue=self.safe_queue,
compute=self.compute, max_batch_size=max_batch_size)
guard.start()
def compute(self, status):
"""
开始调用算法进行计算
"""
status = True
batch = list()
batch_size = min(10, self.safe_queue.qsize())
for i in range(batch_size):
batch.append(self.safe_queue.get())
# 提取body信息
service_needed_batch = [item['body'] for item in batch]
pre_time = time.time()
result = self.service(service_needed_batch) # [body1,body2....]
logging.info("service 用时"+str(time.time()-pre_time))
# 发送消息到队列中
for i in range(len(batch)):
correlation_id = batch[i]['correlation_id']
reply_to = batch[i]['reply_to']
delivery_tag = batch[i]['delivery_tag']
self.message_send_and_ack(result[i], correlation_id, reply_to, delivery_tag)
status = False
logging.info("batch size大小:"+str(len(result)))
return result
def deliver_message(self, body:bytes, properties:pika.BasicProperties, delivery_tag:str) -> None:
"""
将消息递交给消息管理器管理
:params
body: 传来的数据
properties: 传来消息的属性,用于回调消息
delivery_tag: 用于ack答复
:returns
None
"""
message = {'body': body, 'correlation_id':properties.correlation_id,
'reply_to':properties.reply_to, 'delivery_tag':delivery_tag}
self.safe_append_queue(message=message)
logging.debug("添加一个消息入队")
def safe_append_queue(self, message):
"""
安全的入队
"""
if self.safe_queue is None:
raise Exception("消息队列为空")
if self.safe_queue.full():
logging.warning(self.safe_queue.qsize())
# TODO wait until queue is not full
logging.error("队列长度超出范围")
self.safe_queue.put(message)
def check_channel(self):
"""
检查channel是否可用
"""
if self.channel is None:
return False
return True
class GuardThread(threading.Thread):
"""
守护进程,用于监听消息是否已经超过指定时间,从而分发数据到服务中
"""
def __init__(self, max_waiting_time:float=0.1, safe_queue=None, compute=None,max_batch_size=10):
threading.Thread.__init__(self, daemon=True)
self.safe_queue = safe_queue
self.activate = False # 处理器激活,当有消息来的时候通知消息调度器可以开始处理了,开启一个计时器
self.is_running = False # 算法正在处理
self.max_waiting_time = max_waiting_time # 最大等待时间,单位秒
self.max_batch_size = max_batch_size # 算法最大batch size
self.start_time = time.time() # 第一个算法开始时间
self.compute = compute
def run(self):
"""
开始处理消息
"""
# 监听循环
while(True):
# 判断queue是否有数据,如果没有,就继续loop 开始计时,保证一定时间内一定处理
if self.safe_queue.qsize() == 0:
self.activate = False
if (self.safe_queue.qsize() > 0) and not self.is_running:
# 如果静默状态突然有消息到达,或者运行状态结束队列又存在消息时,开启计时
if(not self.activate):
self.start_time = time.time()
self.activate = True
# 如果到达时间大于最大等待时间
if self.activate and time.time()-self.start_time > self.max_waiting_time:
self.activate = False
logging.info(" 等待时间:"+ str(time.time()-self.start_time))
# TODO self.is_running 无用,因为计算是阻塞式,计算完成之前,不可能进行新计算,考虑到将来多线程拓展,有必要引入此变量
self.compute(self.is_running)
# 如果队列元素超过最大batch size,并且处于非运行状态直接计算
if self.safe_queue.qsize() >= self.max_batch_size and not self.is_running:
self.activate = False
logging.info(" 等待时间:"+ str(time.time()-self.start_time))
self.compute(self.is_running)
# logging.debug("队列大小"+ str(self.safe_queue.qsize()))
time.sleep(0.01) # 每隔10ms检测一次
if __name__ == '__main__':
from base.service import AsrService
logging.basicConfig(level=logging.DEBUG, format='%(levelname) -10s %(asctime)s %(name) -20s %(funcName) -25s %(lineno) -5d: %(message)s')
message_disaptcher = BatchMessageDispatcher(None, max_queue_size=32, max_waiting_time=0.1, service=AsrService())
class Prop:
correlation_id = "correlation_id"
reply_to = "reply_to"
pre_time = time.time()
for i in range(200):
message_disaptcher.deliver_message(bytes("hello"+str(i), encoding='utf-8'), properties=Prop(), delivery_tag="delivery_tag")
sleep_time = float(random.randint(2, 25))/1000
time.sleep(sleep_time)
logging.info("计算总时长为: "+ str(time.time()-pre_time))
|
python
|
import vcs
import cdms2
import os
x = vcs.init()
f = cdms2.open(os.path.join(vcs.sample_data, 'clt.nc'))
u = f("u")
v = f("v")
V = x.createvector()
V.linecolor = 242
V.scale = 5.
V.type = "arrows"
V.reference = 6.
V.list()
x.plot(u[::2], v[::2], V)
x.png("vectors")
x.interact()
|
python
|
# OpenWeatherMap API Key
api_key = "Goes here if needed"
|
python
|
# Note that only the currently used fields are shown unless show_all is set to True.
import os
import pandas as pd
import anytree
from anytree.search import find
from anytree.exporter import DotExporter
import collections
PolicyTuple = collections.namedtuple('PolicyTuple','layer_id agg_id calc_rules')
CalcRuleTuple = collections.namedtuple('CalcRuleTuple', 'policytc_id calcrule_id is_step trig_start trig_end')
def load_df(path, required_file=None):
if path:
return pd.read_csv(path)
else:
if required_file:
raise FileNotFoundError(f"Required File does not exist: {required_file}")
else:
return None
def create_fm_tree(fm_programme_df, fm_policytc_df, fm_profile_df, fm_summary_df):
missing_node_link = False
def get_policy_tc(agg_id, level_id):
policytc = fm_policytc_df.loc[
(fm_policytc_df['agg_id'] == agg_id) & (fm_policytc_df['level_id'] == level_id)
]
policy_list = []
for _, policy in policytc.iterrows():
# Find calc_rule
profile = fm_profile_df.loc[fm_profile_df.policytc_id == policy.policytc_id]
calc_rules = []
for _, step in profile.iterrows():
trig_start = step.trigger_start if hasattr(step, 'trigger_start') else 0
trig_end = step.trigger_end if hasattr(step, 'trigger_end') else 0
is_step_rule = (trig_end > 0 or trig_start > 0)
calc_rules.append(CalcRuleTuple(
policytc_id=int(policy.policytc_id),
calcrule_id=int(step.calcrule_id),
is_step=is_step_rule,
trig_start=trig_start,
trig_end=trig_end,
))
policy_list.append(
PolicyTuple(
layer_id=int(policy.layer_id),
agg_id=int(policy.agg_id),
calc_rules=calc_rules,
)
)
return len(policytc), policy_list
level_ids = sorted(list(fm_programme_df.level_id.unique()), reverse=True)
root = anytree.Node('Insured Loss', agg_id=1, level_id=max(level_ids)+1, policy_tc=None)
for level in level_ids:
agg_id_idxs = list(fm_programme_df[fm_programme_df.level_id == level].drop_duplicates(subset=['level_id','to_agg_id'], keep="first").index)
for node_idx in agg_id_idxs:
node_info = fm_programme_df.iloc[node_idx]
layer_max, policy_list = get_policy_tc(node_info.to_agg_id, node_info.level_id)
# Set parent node as root or find based on level/agg ids
if level == max(level_ids):
parent_node = root
else:
try:
matched_id = fm_programme_df.loc[(fm_programme_df.level_id == level+1) & (fm_programme_df.from_agg_id == node_info.to_agg_id)].to_agg_id.item()
parent_node = find(root, filter_=lambda node: node.level_id == level+1 and node.agg_id == matched_id)
except ValueError:
missing_node_link = True
print('Missing node link: agg_id={}, level_id={}'.format(node_info.to_agg_id,level+1))
# Set node names based on attrs in FM files
if level >= 3:
node_name = "policy term {} \nlevel: {}".format(
node_info.to_agg_id,
node_info.level_id
)
elif level == 2:
node_name = "loc term {} ".format(node_info.to_agg_id)
else:
node_name = "cov term {}".format(node_info.to_agg_id)
for policy in policy_list:
node_name += "\n\nlayer_id: {}".format(policy.layer_id)
for rule in policy.calc_rules:
if rule.is_step:
node_name += "\n policytc_id {}: step_rule:{}, start:{} end:{}".format(
rule.policytc_id,
rule.calcrule_id,
rule.trig_start,
rule.trig_end
)
else:
node_name += "\npolicytc_id: {} \ncalc_rule: {}".format(
rule.policytc_id,
rule.calcrule_id,
)
# Create Node in FM tree
node = anytree.Node(
node_name,
agg_id=node_info.to_agg_id,
level_id=level,
parent=parent_node,
layer_max=layer_max,
policy_tc=policy_list,
)
# Add item level data
item_agg_idx = list(fm_summary_df[['agg_id']].drop_duplicates().index)
for item in item_agg_idx:
item_info = fm_summary_df.iloc[item]
matched_id = fm_programme_df.loc[(fm_programme_df.level_id == 1) & (fm_programme_df.from_agg_id == item_info.agg_id)].to_agg_id.item()
parent_node = find(root, filter_=lambda node: node.level_id == 1 and node.agg_id == matched_id)
node_name = "\n".join([
"item {}\n".format(int(item_info.agg_id)),
"locnumber: {}".format(item_info.locnumber),
"accnumber: {}".format(item_info.accnumber),
"polnumber: {}".format(item_info.polnumber),
"portnumber: {}".format(item_info.portnumber),
"cov_type: {}".format(item_info.coverage_type_id),
"peril_id: {}".format(item_info.peril_id),
"tiv: {}".format(item_info.tiv),
])
node = anytree.Node(
node_name,
agg_id=item_info.agg_id,
level_id=0,
parent=parent_node,
locnumber=item_info.locnumber,
accnumber=item_info.accnumber,
polnumber=item_info.polnumber,
portnumber=item_info.polnumber,
tiv=item_info.tiv,
coverage_id=item_info.coverage_id,
coverage_type=item_info.coverage_type_id,
peril_id=item_info.peril_id,
)
return root, missing_node_link
def render_fm_tree(root_node, filename='tree.png'):
# Function to format nodes in FM tree
def format_box(node):
# https://graphviz.org/doc/info/shapes.html
if node.level_id == 0:
# Item Level Node
return "fixedsize=false, shape=rect, fillcolor=lightgrey, style=filled"
else:
if not node.policy_tc:
# Error? missing policy_tc entry for this Node
return "fixedsize=false, shape=ellipse, fillcolor=pink, style=filled"
elif len(node.policy_tc) > 1:
# Node with multiple layers
return "fixedsize=false, shape=rect, fillcolor=orange, style=filled"
else:
# Cov or loc nodes
return "fixedsize=false, shape=ellipse, fillcolor=lightblue, style=filled"
# Function to add weighted 'by layer number' edges
def layered_edge(node, child):
# https://anytree.readthedocs.io/en/latest/tricks/weightededges.html
if hasattr(child, 'layer_max'):
if child.layer_max > 1:
return 'dir=back, style=bold, label=" {} Layers"'.format(child.layer_max)
return "dir=back"
# Render tree to png
dot_data = DotExporter(
root_node,
edgeattrfunc=layered_edge,
nodeattrfunc=format_box)
dot_data.to_picture(filename)
|
python
|
# -*- coding: utf-8 -*-
import os
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from common import xJiPZbUzlGCIdemowYnQNONypdeudgmd, ckAjUaLEXnferbefRGpQeOZRysoqlffQ
FFVGFOvcuiKjdGKFcTRNoKJcuBaGjGEf = 'b14ce95fa4c33ac2803782d18341869f'
class LVPFsEGShJELnCwtpptaZvXDbVmShyns(Exception):
pass
def NmtIKYiMrjhKpKqWnTKDAJlAKWDTPVIy(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp, FRIUnJhVUpQceKKKwrGdGufEFeSRdAAs=AES.block_size):
kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh = (FRIUnJhVUpQceKKKwrGdGufEFeSRdAAs - (len(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp) % FRIUnJhVUpQceKKKwrGdGufEFeSRdAAs))
return OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp + (chr(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)*kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)
def GwAonSsUlHwhDnYTFlqQhOKBVLcSheYV(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp):
kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh = OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp[-1]
if OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp.endswith(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh*ord(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)):
return OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp[:-ord(kWFtuPcTzhXQsxkZcspaKNvhNllUXCxh)]
raise LVPFsEGShJELnCwtpptaZvXDbVmShyns("PKCS7 improper padding {}".format(repr(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp[-32:])))
def ZHmaXjmaptcjOuQWzIYmNcRFyCaggAdR(sock, server=True, bits=2048):
gFsSukpmrcgWJfrmLhgayqqAVmsbyWUi = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF;
adZcabjFPtBsWTrudsVIVsDshBtgiUsQ = 2
adrCPWbaRPWXrGfZTxfXAtVEFHttGCMF = ckAjUaLEXnferbefRGpQeOZRysoqlffQ(os.urandom(32))
DPSXmPpMUYVABlOJsKMoPltwubSXAvTt = pow(adZcabjFPtBsWTrudsVIVsDshBtgiUsQ, adrCPWbaRPWXrGfZTxfXAtVEFHttGCMF, gFsSukpmrcgWJfrmLhgayqqAVmsbyWUi)
if server:
sock.send(xJiPZbUzlGCIdemowYnQNONypdeudgmd(DPSXmPpMUYVABlOJsKMoPltwubSXAvTt))
b = ckAjUaLEXnferbefRGpQeOZRysoqlffQ(sock.recv(4096))
else:
b = ckAjUaLEXnferbefRGpQeOZRysoqlffQ(sock.recv(4096))
sock.send(xJiPZbUzlGCIdemowYnQNONypdeudgmd(DPSXmPpMUYVABlOJsKMoPltwubSXAvTt))
OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp = pow(b, adrCPWbaRPWXrGfZTxfXAtVEFHttGCMF, gFsSukpmrcgWJfrmLhgayqqAVmsbyWUi)
return SHA256.new(xJiPZbUzlGCIdemowYnQNONypdeudgmd(OPGQtyHoGVjbssyoAyqrdJvFlGoiAQYp)).digest()
def gcbCoqAgZztElhuzHlCRVsaXiDmrxjeQ(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx, KEY):
vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx = NmtIKYiMrjhKpKqWnTKDAJlAKWDTPVIy(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx)
VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU = Random.new().read(AES.block_size)
omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK = AES.new(KEY, AES.MODE_CBC, VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU)
return VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU + omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK.encrypt(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx)
def zcqgzrNhhMMrepGKrXzOYcYeaRymVspf(ciphertext, KEY):
VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU = ciphertext[:AES.block_size]
omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK = AES.new(KEY, AES.MODE_CBC, VMJrviwEuDZCQAKsDTVcZeWibgUxuOuU)
vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx = omtoDzoHpDngJrwwtkYkmrGaWwwGAHVK.decrypt(ciphertext[AES.block_size:])
return GwAonSsUlHwhDnYTFlqQhOKBVLcSheYV(vKpdSqEzIxjyGuerXAIFZMYvfWbEQyYx)
|
python
|
#!/usr/bin/env python3
import os
import re
import sys
LOWP_SEARCH = "lowp"
MEDIUMP_SEARCH = "mediump"
HIGHP_SEARCH = "highp"
VERTEX_SHADER_EXT = ".vsh.glsl"
FRAG_SHADER_EXT = ".fsh.glsl"
GLES3_PREFIX = "GLES3_"
GLES3_SHADER_PREFIX = "gles3_"
SHADERS_LIB_COMMON_PATTERN = "// Common"
SHADERS_LIB_VS_PATTERN = "// VS"
SHADERS_LIB_FS_PATTERN = "// FS"
SHADERS_LIB_COMMON_INDEX = 0
SHADERS_LIB_VS_INDEX = 1
SHADERS_LIB_FS_INDEX = 2
def format_shader_source_name(shader_file_name):
shader_source_name = shader_file_name
return shader_source_name.replace(".glsl", "").replace(".", "_").upper()
def read_index_file(file_path, programs_order):
gpu_programs = dict()
with open(file_path, 'r') as f:
index = 0
for line in f:
line_parts = line.strip().split()
if len(line_parts) != 3:
print("Incorrect GPU program definition : " + line)
exit(10)
if line_parts[0] != programs_order[index]:
print("Incorrect GPU program order or name : " + line)
exit(11)
vertex_shader = next(f for f in line_parts if f.endswith(VERTEX_SHADER_EXT))
fragment_shader = next(f for f in line_parts if f.endswith(FRAG_SHADER_EXT))
if not vertex_shader:
print("Vertex shader not found in GPU program definition : " + line)
exit(12)
if not fragment_shader:
print("Fragment shader not found in GPU program definition : " + line)
exit(13)
if line_parts[0] in gpu_programs.keys():
print("More than one definition of %s gpu program" % line_parts[0])
exit(14)
gpu_programs[index] = (vertex_shader, fragment_shader, line_parts[0])
index += 1
return gpu_programs
def read_programs_file(file_path):
gpu_programs = []
with open(file_path, 'r') as f:
found = False
for line in f:
if not found and line.find('enum class Program') >= 0:
found = True
continue
if found and line.find('}') >= 0:
break
if found and line.find('{') == -1:
line_parts = re.split(',|=', line)
name = line_parts[0].strip()
if name and name != 'ProgramsCount':
gpu_programs.append(name)
return gpu_programs
def read_shaders_lib_file(file_path):
shaders_library = ['', '', '']
with open(file_path, 'r') as f:
shaders_lib_content = f.read()
if len(shaders_lib_content) == 0:
return shaders_library
common_index = shaders_lib_content.find(SHADERS_LIB_COMMON_PATTERN)
if common_index < 0:
print("Common functions block is not found in " + file_path)
exit(14)
vs_index = shaders_lib_content.find(SHADERS_LIB_VS_PATTERN)
if vs_index < 0:
print("Vertex shaders functions block is not found in " + file_path)
exit(15)
fs_index = shaders_lib_content.find(SHADERS_LIB_FS_PATTERN)
if fs_index < 0:
print("Vertex shaders functions block is not found in " + file_path)
exit(16)
if not (common_index < vs_index < fs_index):
print("Order of functions block is incorrect in " + file_path)
exit(17)
shaders_library[SHADERS_LIB_COMMON_INDEX] = shaders_lib_content[common_index:vs_index - 1]
shaders_library[SHADERS_LIB_VS_INDEX] = shaders_lib_content[vs_index:fs_index - 1]
shaders_library[SHADERS_LIB_FS_INDEX] = shaders_lib_content[fs_index:]
return shaders_library
def generate_shader_indexes(shaders):
return dict((v, k) for k, v in enumerate(shaders))
def write_definition_file(defines_file, generation_dir):
with open(os.path.join(generation_dir, defines_file), 'w') as output_file:
output_file.write("#pragma once\n\n")
output_file.write("#include \"shaders/programs.hpp\"\n")
output_file.write("#include \"shaders/gl_program_info.hpp\"\n\n")
output_file.write("#include \"drape/drape_global.hpp\"\n\n")
output_file.write("namespace gpu\n")
output_file.write("{\n")
output_file.write("extern char const * GL3_SHADER_VERSION;\n")
output_file.write("extern char const * GLES3_SHADER_VERSION;\n\n")
output_file.write("extern GLProgramInfo GetProgramInfo(dp::ApiVersion apiVersion, Program program);\n")
output_file.write("} // namespace gpu\n")
def write_shader_gles_header(output_file):
output_file.write(" #ifdef GL_ES \\n\\\n")
output_file.write(" #ifdef GL_FRAGMENT_PRECISION_HIGH \\n\\\n")
output_file.write(" #define MAXPREC highp \\n\\\n")
output_file.write(" #else \\n\\\n")
output_file.write(" #define MAXPREC mediump \\n\\\n")
output_file.write(" #endif \\n\\\n")
output_file.write(" precision MAXPREC float; \\n\\\n")
output_file.write(" #define LOW_P lowp \\n\\\n")
output_file.write(" #define MEDIUM_P mediump \\n\\\n")
output_file.write(" #define HIGH_P highp \\n\\\n")
output_file.write(" #else \\n\\\n")
output_file.write(" #define LOW_P \\n\\\n")
output_file.write(" #define MEDIUM_P \\n\\\n")
output_file.write(" #define HIGH_P \\n\\\n")
output_file.write(" #endif \\n\\\n")
def get_shaders_lib_content(shader_file, shaders_library):
lib_content = shaders_library[SHADERS_LIB_COMMON_INDEX]
if shader_file.find(VERTEX_SHADER_EXT) >= 0:
lib_content += shaders_library[SHADERS_LIB_VS_INDEX]
elif shader_file.find(FRAG_SHADER_EXT) >= 0:
lib_content += shaders_library[SHADERS_LIB_FS_INDEX]
return lib_content
def write_shader_line(output_file, line, convert_to_gles3, is_fragment_shader):
if line.lstrip().startswith("//") or line == '\n' or len(line) == 0:
return
if line.find(LOWP_SEARCH) >= 0:
print("Incorrect shader. Do not use lowp in shader, use LOW_P instead.")
exit(2)
if line.find(MEDIUMP_SEARCH) >= 0:
print("Incorrect shader. Do not use mediump in shader, use MEDIUM_P instead.")
exit(2)
if line.find(HIGHP_SEARCH) >= 0:
print("Incorrect shader. Do not use highp in shader, use HIGH_P instead.")
exit(2)
output_line = line.rstrip()
if convert_to_gles3:
output_line = output_line.replace("attribute", "in")
if is_fragment_shader:
output_line = output_line.replace("varying", "in")
else:
output_line = output_line.replace("varying", "out")
output_line = output_line.replace("texture2D", "texture")
output_line = output_line.replace("gl_FragColor", "v_FragColor")
output_file.write(" %s \\n\\\n" % output_line)
def write_shader_body(output_file, shader_file, shader_dir, shaders_library, convert_to_gles3):
is_fragment_shader = shader_file.find(FRAG_SHADER_EXT) >= 0
lib_content = get_shaders_lib_content(shader_file, shaders_library)
for line in open(os.path.join(shader_dir, shader_file)):
if line.lstrip().startswith("void main"):
for lib_line in lib_content.splitlines():
write_shader_line(output_file, lib_line, convert_to_gles3, is_fragment_shader)
if convert_to_gles3 and is_fragment_shader:
output_file.write(" out vec4 v_FragColor; \\n\\\n")
write_shader_line(output_file, line, convert_to_gles3, is_fragment_shader)
output_file.write("\";\n\n")
def write_shader(output_file, shader_file, shader_dir, shaders_library):
output_file.write("char const %s[] = \" \\\n" % (format_shader_source_name(shader_file)))
write_shader_gles_header(output_file)
write_shader_body(output_file, shader_file, shader_dir, shaders_library, False)
def write_gles3_shader(output_file, shader_file, shader_dir, shaders_library):
output_file.write("char const %s[] = \" \\\n" % (GLES3_PREFIX + format_shader_source_name(shader_file)))
write_shader_gles_header(output_file)
if os.path.exists(os.path.join(shader_dir, GLES3_SHADER_PREFIX + shader_file)):
write_shader_body(output_file, GLES3_SHADER_PREFIX + shader_file, shader_dir, shaders_library, False)
else:
write_shader_body(output_file, shader_file, shader_dir, shaders_library, True)
def write_gpu_programs_map(file, programs_def, source_prefix):
for program in programs_def.keys():
vertex_shader = programs_def[program][0]
vertex_source_name = source_prefix + format_shader_source_name(vertex_shader)
fragment_shader = programs_def[program][1]
fragment_source_name = source_prefix + format_shader_source_name(fragment_shader)
file.write(" GLProgramInfo(\"%s\", \"%s\", %s, %s),\n" % (
vertex_source_name, fragment_source_name, vertex_source_name, fragment_source_name))
def write_implementation_file(programs_def, shader_index, shader_dir, impl_file, def_file, generation_dir,
shaders_library):
with open(os.path.join(generation_dir, impl_file), 'w') as file:
file.write("#include \"shaders/%s\"\n\n" % (def_file))
file.write("#include \"base/assert.hpp\"\n\n")
file.write("#include \"std/target_os.hpp\"\n\n")
file.write("#include <array>\n\n")
file.write("namespace gpu\n")
file.write("{\n")
file.write("char const * GL3_SHADER_VERSION = \"#version 150 core \\n\";\n")
file.write("char const * GLES3_SHADER_VERSION = \"#version 300 es \\n\";\n\n")
for shader in shader_index.keys():
write_shader(file, shader, shader_dir, shaders_library)
write_gles3_shader(file, shader, shader_dir, shaders_library)
file.write("GLProgramInfo GetProgramInfo(dp::ApiVersion apiVersion, Program program)\n")
file.write("{\n")
file.write(" if (apiVersion == dp::ApiVersion::OpenGLES2)\n")
file.write(" {\n")
file.write(" static std::array<GLProgramInfo, static_cast<size_t>(Program::ProgramsCount)> gpuIndex = {{\n")
write_gpu_programs_map(file, programs_def, '')
file.write(" }};\n")
file.write(" return gpuIndex[static_cast<size_t>(program)];\n")
file.write(" }\n")
file.write(" else if (apiVersion == dp::ApiVersion::OpenGLES3)\n")
file.write(" {\n")
file.write(" static std::array<GLProgramInfo, static_cast<size_t>(Program::ProgramsCount)> gpuIndex = {{\n")
write_gpu_programs_map(file, programs_def, GLES3_PREFIX)
file.write(" }};\n")
file.write(" return gpuIndex[static_cast<size_t>(program)];\n")
file.write(" }\n")
file.write(" CHECK(false, (\"Unsupported API version.\"));\n")
file.write(" return {};\n")
file.write("}\n")
file.write("} // namespace gpu\n")
if __name__ == '__main__':
if len(sys.argv) < 6:
print("Usage : " + sys.argv[0] + " <shader_dir> <index_file> <programs_file> <shaders_lib> <generation_dir> <generated_file>")
exit(1)
shader_dir = sys.argv[1]
index_file_name = sys.argv[2]
programs_file_name = sys.argv[3]
shaders_lib_file = sys.argv[4]
generation_dir = sys.argv[5]
defines_file = sys.argv[6] + ".hpp"
impl_file = sys.argv[6] + ".cpp"
shaders = [file for file in os.listdir(shader_dir) if
os.path.isfile(os.path.join(shader_dir, file)) and (
file.endswith(VERTEX_SHADER_EXT) or file.endswith(FRAG_SHADER_EXT))]
shaderIndex = generate_shader_indexes(shaders)
programs_order = read_programs_file(os.path.join(shader_dir, '..', programs_file_name))
programDefinition = read_index_file(os.path.join(shader_dir, index_file_name), programs_order)
shaders_library = read_shaders_lib_file(os.path.join(shader_dir, shaders_lib_file))
write_definition_file(defines_file, generation_dir)
write_implementation_file(programDefinition, shaderIndex, shader_dir, impl_file, defines_file, generation_dir,
shaders_library)
|
python
|
#!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import platform
import sys
def main():
parser = argparse.ArgumentParser()
# TODO(fxbug.dev/5535): make this argument required.
parser.add_argument(
'--reference', help='Path to the golden API file', required=False)
parser.add_argument(
'--manifest', help='Path to the SDK manifest', required=True)
parser.add_argument(
'--updated', help='Path to the API file to compute', required=True)
parser.add_argument(
'--warn',
help='Whether API changes should only cause warnings',
action='store_true')
args = parser.parse_args()
if not args.reference:
# Nothing to do.
with open(args.updated, 'w') as updated_file:
updated_file.write('No API verification for this SDK :/')
return 0
with open(args.manifest, 'r') as manifest_file:
manifest = json.load(manifest_file)
ids = [a['id'] for a in manifest['atoms']]
# Ignore images which are very architecture-dependent.
# TODO(fxbug.dev/5824): remove this exception when obsolete.
ids = [i for i in ids if not (i.startswith('sdk://images'))]
with open(args.updated, 'w') as updated_file:
updated_file.write('\n'.join(ids))
with open(args.reference, 'r') as reference_file:
old_ids = [l.strip() for l in reference_file.readlines()]
# tools/arm64 should not exist on mac hosts
# TODO(fxbug.dev/42999): remove when SDK transition is complete.
if platform.mac_ver()[0]:
old_ids = [i for i in old_ids if not i.startswith('sdk://tools/arm64')]
ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), ids)
old_ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), old_ids)
ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), ids)
old_ids = filter(lambda i: not i.startswith('sdk://fidl/zx'), old_ids)
new_id_set = set(ids)
old_id_set = set(old_ids)
added_ids = new_id_set - old_id_set
removed_ids = old_id_set - new_id_set
if added_ids:
print('Elements added to SDK:')
for id in sorted(added_ids):
print(' - %s' % id)
if removed_ids:
print('Elements removed from SDK:')
for id in sorted(removed_ids):
print(' - %s' % id)
if removed_ids or added_ids:
type = 'Warning' if args.warn else 'Error'
print('%s: SDK contents have changed!' % type)
print('Please acknowledge this change by running:')
print(
' cp ' + os.path.abspath(args.updated) + ' ' +
os.path.abspath(args.reference))
if not args.warn:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
python
|
import filecmp
import os
def exists(path):
return os.path.isfile(path)
class FileApprover(object):
def verify(self, namer, writer, reporter):
base = namer.get_basename()
approved = namer.get_approved_filename(base)
received = namer.get_received_filename(base)
writer.write_received_file(received)
ok = self.verify_files(approved, received, reporter)
if not ok:
return "Approval Mismatch"
return None
def verify_files(self, approved_file, received_file, reporter):
if self.are_files_the_same(approved_file, received_file):
os.remove(received_file)
return True
reporter.report(received_file, approved_file)
return False
@staticmethod
def are_files_the_same(approved_file, received_file):
if not exists(approved_file) or not exists(received_file):
return False
if os.stat(approved_file).st_size != os.stat(received_file).st_size:
return False
else:
return filecmp.cmp(approved_file, received_file)
|
python
|
import socket
import serial
from config import DEFAULT_VELOCITY
from config import TIME_INTERVAL
import time
import math
import os
class Robot:
def __init__(self, mac: str, color: str, com: str):
if os.name == 'posix':
self.socket = socket.socket(
socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
print("Input 1234 code in system bluetooth window")
self.socket.connect((mac, 1))
else:
self.serial = serial.Serial(com)
self.last_update = time.time()
self.color = color
self.user_time = time.time()
self.user_age = 15
self.user_id = 0
def send_speed_command(self, left, right):
cmd = '[={},{}]'.format(left, right)
if os.name == 'posix':
self.socket.send(bytes(cmd, 'UTF-8'))
else:
self.serial.write(bytes(cmd, 'UTF-8'))
self.last_update = time.time()
print(cmd)
def forward(self):
self.send_speed_command(self.velocity(), self.velocity())
def reverse(self):
self.send_speed_command(-self.velocity(), -self.velocity())
def left(self):
self.send_speed_command(0, math.ceil(self.velocity()/2))
def right(self):
self.send_speed_command(math.ceil(self.velocity()/2), 0)
def stop(self):
self.send_speed_command(0, 0)
def is_time_exceeded(self):
return (time.time() - self.last_update) > TIME_INTERVAL
def age(self):
return (time.time() - self.user_time)
def velocity(self):
if self.user_age < 10:
return math.ceil(DEFAULT_VELOCITY/2)
elif self.user_age > 20:
return DEFAULT_VELOCITY
else:
return math.ceil(self.user_age / 20 * DEFAULT_VELOCITY)
|
python
|
# THIS FILE IS GENERATED FROM KIVY SETUP.PY
__version__ = '1.11.0.dev0'
__hash__ = '9b90467ec9efea3891e07be92c9bb4ba638a7ca0'
__date__ = '20190329'
|
python
|
# Tara O'Kelly - G00322214
# Emerging Technologies, Year 4, Software Development, GMIT.
# Problem set: Python fundamentals
# 6. Write a function that returns the largest and smallest elements in a list.
user_list = []
# get user input
n = int(input('How many numbers: '))
for x in range(n):
numbers = int(input('Enter number: \n'))
user_list.append(numbers)
# use min and max functions
# https://docs.python.org/3/library/functions.html#max
# https://docs.python.org/3/library/functions.html#min
print("Largest element in the list is :", max(user_list), "\nSmallest element in the list is :", min(user_list))
|
python
|
#%%
import numpy as np
import pandas as pd
# Load the data
data = pd.read_csv('./input/2021-02-11_REL606_NCM3722_diauxie.csv')
# DO some serious tidying
melted = data.melt('Cycle Nr.')
# Get the time indices
time = melted[melted['Cycle Nr.']=='Time [s]']
time.sort_values(by='variable', inplace=True)
time = time['value'].values
# Get the temperature indices
temp = melted[melted['Cycle Nr.']=='Temp. [°C]']
temp.sort_values(by='variable', inplace=True)
temp = temp['value'].values
# get the well info
dfs = []
_melted = melted[(melted['Cycle Nr.'] != 'Time [s]') &
(melted['Cycle Nr.'] != 'Temp. [°C]')]
for g, d in _melted.groupby(['Cycle Nr.']):
d.sort_values(by='variable', inplace=True)
d['time_s'] = time
d['temp_C'] = temp
d.rename(columns={'Cycle Nr.': 'well',
'value':'od_600nm'}, inplace=True)
d.drop(columns=['variable'], inplace=True)
dfs.append(d)
tidy = pd.concat(dfs, sort=False)
# Add identifier for the strain
tidy['strain'] = 'blank'
tidy['medium'] = 'blank'
for n in range(4, 10):
if n <= 6:
medium = '10 mM glucose + 30 mM acetate'
else:
medium = '0.61 mM glucose + 30 mM acetate'
for letter, strain in zip(['D', 'E'], ['NCM3722', 'REL606']):
tidy.loc[tidy['well'] == f'{letter}{n}', 'strain'] = strain
tidy.loc[tidy['well'] == f'{letter}{n}', 'medium'] = medium
# Add replicate information.
for g, d in tidy.groupby(['strain', 'medium']):
mapper = {w:r + 1 for r, w in enumerate(d['well'].unique())}
for k, v in mapper.items():
tidy.loc[tidy['well']==k, 'replicate'] = v
tidy['replicate'] = tidy['replicate'].astype(int)
# Save the tidy dataframe to disk for further processing
tidy.to_csv('./output/2021-02-11_NCM_REL_diauxie_tidy.csv', index=False)
# %%
# %%
|
python
|
"""Library for CIM sparql queries"""
__version__ = "1.9.0"
|
python
|
# -*- coding: utf-8 -*-
"""
Script to make test
"""
from indeed import params
def test_indeed_params():
assert params('my_username', 'my_password') == ('my_username', 'my_password')
assert params('your_username', 'your_password') == ('your_username', 'your_password')
|
python
|
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
import mount_efs
import os
import pytest
from datetime import datetime
from mock import MagicMock
try:
import ConfigParser
except ImportError:
from configparser import ConfigParser
FS_ID = 'fs-deadbeef'
AP_ID = 'fsap-fedcba9876543210'
REGION = 'us-east-1'
COMMON_NAME = 'fs-deadbeef.efs.us-east-1.amazonaws.com'
MOUNT_NAME = 'fs-deadbeef.mount.dir.12345'
ACCESS_KEY_ID_VAL = 'FAKE_AWS_ACCESS_KEY_ID'
SECRET_ACCESS_KEY_VAL = 'FAKE_AWS_SECRET_ACCESS_KEY'
SESSION_TOKEN_VAL = 'FAKE_SESSION_TOKEN'
CREDENTIALS = {
'AccessKeyId': ACCESS_KEY_ID_VAL,
'SecretAccessKey': SECRET_ACCESS_KEY_VAL,
'Token': SESSION_TOKEN_VAL
}
FIXED_DT = datetime(2000, 1, 1, 12, 0, 0)
@pytest.fixture(autouse=True)
def setup_method(mocker):
mocker.patch('mount_efs.get_region', return_value=REGION)
mocker.patch('mount_efs.get_region_helper', return_value=REGION)
mocker.patch('mount_efs.get_aws_security_credentials', return_value=CREDENTIALS)
mocker.patch('mount_efs.get_utc_now', return_value=FIXED_DT)
mocker.patch('socket.gethostbyname')
def _get_config():
try:
config = ConfigParser.SafeConfigParser()
except AttributeError:
config = ConfigParser()
config.add_section(mount_efs.CONFIG_SECTION)
config.set(mount_efs.CONFIG_SECTION, 'state_file_dir_mode', '750')
config.set(mount_efs.CONFIG_SECTION, 'dns_name_format', '{fs_id}.efs.{region}.amazonaws.com')
return config
def _get_ca_conf_body(config_path, common_name, directory, private_key, date, region, fs_id, iam, ap_id):
ca_conf_str = mount_efs.create_ca_conf(config_path, common_name, directory, private_key, date, region, fs_id, iam, ap_id)
return ca_conf_str
def _get_mock_config(dns_name_format='{fs_id}.efs.{region}.amazonaws.com'):
def config_get_side_effect(section, field):
if section == mount_efs.CONFIG_SECTION and field == 'state_file_dir_mode':
return '0755'
elif section == mount_efs.CONFIG_SECTION and field == 'dns_name_format':
return dns_name_format
else:
raise ValueError('Unexpected arguments')
mock_config = MagicMock()
mock_config.get.side_effect = config_get_side_effect
return mock_config
def _get_mock_private_key_path(mocker, tmpdir):
pk_path = os.path.join(str(tmpdir), 'privateKey.pem')
mocker.patch('mount_efs.get_private_key_path', return_value=pk_path)
return pk_path
def test_certificate_without_iam_with_ap_id(mocker, tmpdir):
config = _get_mock_config()
pk_path = _get_mock_private_key_path(mocker, tmpdir)
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
tmp_config_path = os.path.join(str(tmpdir), MOUNT_NAME, 'tmpConfig')
mount_efs.create_certificate(config, MOUNT_NAME, COMMON_NAME, REGION, FS_ID, False, ap_id=AP_ID, base_path=str(tmpdir))
with open(os.path.join(tls_dict['mount_dir'], 'config.conf')) as f:
conf_body = f.read()
assert conf_body == _get_ca_conf_body(tmp_config_path, COMMON_NAME, tls_dict['mount_dir'], pk_path, FIXED_DT, REGION,
FS_ID, False, AP_ID)
assert os.path.exists(pk_path)
assert not os.path.exists(os.path.join(tls_dict['mount_dir'], 'publicKey.pem'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'request.csr'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'certificate.pem'))
def test_certificate_with_iam_with_ap_id(mocker, tmpdir):
config = _get_mock_config()
pk_path = _get_mock_private_key_path(mocker, tmpdir)
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
tmp_config_path = os.path.join(str(tmpdir), MOUNT_NAME, 'tmpConfig')
mount_efs.create_certificate(config, MOUNT_NAME, COMMON_NAME, REGION, FS_ID, True, ap_id=AP_ID, base_path=str(tmpdir))
with open(os.path.join(tls_dict['mount_dir'], 'config.conf')) as f:
conf_body = f.read()
assert conf_body == _get_ca_conf_body(tmp_config_path, COMMON_NAME, tls_dict['mount_dir'], pk_path, FIXED_DT, REGION,
FS_ID, True, AP_ID)
assert os.path.exists(pk_path)
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'publicKey.pem'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'request.csr'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'certificate.pem'))
def test_certificate_with_iam_without_ap_id(mocker, tmpdir):
config = _get_mock_config()
pk_path = _get_mock_private_key_path(mocker, tmpdir)
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
tmp_config_path = os.path.join(str(tmpdir), MOUNT_NAME, 'tmpConfig')
mount_efs.create_certificate(config, MOUNT_NAME, COMMON_NAME, REGION, FS_ID, True, ap_id=None, base_path=str(tmpdir))
with open(os.path.join(tls_dict['mount_dir'], 'config.conf')) as f:
conf_body = f.read()
assert conf_body == _get_ca_conf_body(tmp_config_path, COMMON_NAME, tls_dict['mount_dir'], pk_path, FIXED_DT, REGION,
FS_ID, True, None)
assert os.path.exists(pk_path)
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'publicKey.pem'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'request.csr'))
assert os.path.exists(os.path.join(tls_dict['mount_dir'], 'certificate.pem'))
def test_create_ca_supporting_dirs(tmpdir):
config = _get_config()
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
mount_efs.ca_dirs_check(config, tls_dict['database_dir'], tls_dict['certs_dir'])
assert os.path.exists(tls_dict['database_dir'])
assert os.path.exists(tls_dict['certs_dir'])
def test_create_ca_supporting_files(tmpdir):
config = _get_config()
tls_dict = mount_efs.tls_paths_dictionary(MOUNT_NAME, str(tmpdir))
index = tls_dict['index']
index_attr = tls_dict['index_attr']
serial = tls_dict['serial']
rand = tls_dict['rand']
mount_efs.ca_dirs_check(config, tls_dict['database_dir'], tls_dict['certs_dir'])
mount_efs.ca_supporting_files_check(index, index_attr, serial, rand)
with open(index_attr, 'r') as index_attr_file:
index_attr_content = index_attr_file.read()
with open(serial, 'r') as serial_file:
serial_content = serial_file.read()
assert os.path.exists(index)
assert os.path.exists(index_attr)
assert os.path.exists(serial)
assert os.path.exists(rand)
assert 'unique_subject = no' == index_attr_content
assert '00' == serial_content
def test_create_canonical_request_without_token():
public_key_hash = 'fake_public_key_hash'
canonical_request_out = mount_efs.create_canonical_request(public_key_hash, FIXED_DT, ACCESS_KEY_ID_VAL, REGION, FS_ID)
assert 'GET\n/\nAction=Connect&PublicKeyHash=fake_public_key_hash&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=' \
'FAKE_AWS_ACCESS_KEY_ID%2F20000101%2Fus-east-1%2Felasticfilesystem%2Faws4_request&X-Amz-Date=20000101T120000Z&' \
'X-Amz-Expires=86400&X-Amz-SignedHeaders=host\nhost:fs-deadbeef\nhost\n' \
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == canonical_request_out
def test_create_canonical_request_with_token(mocker):
mocker.patch('mount_efs.get_utc_now', return_value=FIXED_DT)
public_key_hash = 'fake_public_key_hash'
canonical_request_out = mount_efs.create_canonical_request(public_key_hash, FIXED_DT, ACCESS_KEY_ID_VAL, REGION, FS_ID,
SESSION_TOKEN_VAL)
assert 'GET\n/\nAction=Connect&PublicKeyHash=fake_public_key_hash&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=' \
'FAKE_AWS_ACCESS_KEY_ID%2F20000101%2Fus-east-1%2Felasticfilesystem%2Faws4_request&X-Amz-Date=20000101T120000Z&' \
'X-Amz-Expires=86400&X-Amz-Security-Token=FAKE_SESSION_TOKEN&X-Amz-SignedHeaders=host\nhost:fs-deadbeef\nhost' \
'\ne3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' == canonical_request_out
def test_get_public_key_sha1(tmpdir):
fake_public_key_filename = 'fake_public_key.pem'
fake_public_key_path = os.path.join(str(tmpdir), fake_public_key_filename)
public_key_body = '-----BEGIN PUBLIC KEY-----\nMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEArGJgJTTwefL+jHV8A9EM\npX56n3Z' \
'JczM+4iPPSnledJzBcUO1VF+j6TOzy39BWBtvRjSs0nqd5wqw+1xHawhh\ndJF5KsqMNGcP/y9fLi9Bm1vInHfQVan4NhXWh8S' \
'NbRZM1tNZV5/k+VnFur6ACHwq\neWppGXkGBASL0zG0MiCbOVMkwfv/E69APVC6ljnPXBWaDuggAClYheTv5RIU4wD1\nc1nohR' \
'b0ZHyfZjELjnqLfY0eOqY+msQXzP0eUmZXCMvUkGxi5DJnNVKhw5y96QbB\nRFO5ImQXpNsQmp8F9Ih1RIxNsl4csaEuK+/Zo' \
'J68vR47oQNtPp1PjdIwcnQ3cOvO\nHMxulMX21Fd/e9TsnqISOTOyebmYFgaHczg4JVu5lV699+7QWJm1a7M4ab0WgVVR\nz27J0' \
'Lx/691MZB4TbGoEIFza30/sk6uTPxAzebzCaroXzT7uA6TIRtRpxt4X9a+4\n6GhfgR5RJfFMb8rPGmaKWqA2YkTsZzRGHhbAzs' \
'J/nEstAgMBAAE=\n-----END PUBLIC KEY-----'
tmpdir.join(fake_public_key_filename).write(public_key_body)
sha1_result = mount_efs.get_public_key_sha1(fake_public_key_path)
assert sha1_result == 'd9c2a68f2c4de49982e310d95e539a89abd6bc13'
def test_create_string_to_sign():
canonical_request = 'canonical_request'
string_to_sign_output = mount_efs.create_string_to_sign(canonical_request, FIXED_DT, REGION)
assert 'AWS4-HMAC-SHA256\n20000101T120000Z\n20000101/us-east-1/elasticfilesystem/aws4_request\n' \
'572b1e335109068b81e4def81524c5fe5d0e385143b5656cbf2f7c88e5c1a51e' == string_to_sign_output
def test_calculate_signature():
string_to_sign = 'string_to_sign'
signature_output = mount_efs.calculate_signature(string_to_sign, FIXED_DT, SECRET_ACCESS_KEY_VAL, REGION)
assert '6aa643803d4a1b07c5ac87bff96347ef28dab1cb5a5c5d63969c90ca11454c4a' == signature_output
|
python
|
from chibi.units.base import Unit
from unittest import TestCase
class Test_unit( TestCase ):
def setUp( self ):
self.unit = Unit( 10 )
def test_should_print_the_value_when_is_str( self ):
self.assertIn( '10', str( self.unit ) )
def test_when_add_a_int_should_work( self ):
r = 10 + self.unit
self.assertEqual( r.value, 20 )
r = self.unit + 10
self.assertEqual( r.value, 20 )
def test_when_add_a_float_should_work( self ):
r = 10.10 + self.unit
self.assertEqual( r.value, 20.1 )
r = self.unit + 10.1
self.assertEqual( r.value, 20.1 )
def test_when_sub_a_int_should_work( self ):
r = 10 - self.unit
self.assertEqual( r.value, 0 )
r = self.unit - 10
self.assertEqual( r.value, 0 )
def test_when_sub_a_float_should_work( self ):
r = 10.10 - self.unit
self.assertAlmostEqual( r.value, -0.1, delta=0.01 )
r = self.unit - 10.10
self.assertAlmostEqual( r.value, -0.1, delta=0.01 )
def test_when_mul_a_int_should_work( self ):
r = 10 * self.unit
self.assertEqual( r.value, 100 )
r = self.unit * 10
self.assertEqual( r.value, 100 )
def test_when_mul_a_float_should_work( self ):
r = 10.1 * self.unit
self.assertEqual( r.value, 101.0 )
r = self.unit * 10.1
self.assertEqual( r.value, 101.0 )
def test_when_div_a_int_should_work( self ):
r = 10 / self.unit
self.assertEqual( r.value, 1 )
r = self.unit / 10
self.assertEqual( r.value, 1 )
def test_when_div_a_float_should_work( self ):
r = 10.10 / self.unit
self.assertAlmostEqual( r.value, 0.99, delta=0.001 )
r = self.unit / 10.10
self.assertAlmostEqual( r.value, 0.99, delta=0.001 )
def test_when_div_int_a_int_should_work( self ):
r = 10 // self.unit
self.assertEqual( r.value, 1 )
r = self.unit // 10
self.assertEqual( r.value, 1 )
def test_when_div_int_a_float_should_work( self ):
r = 10.10 // self.unit
self.assertEqual( r.value, 0 )
r = self.unit // 10.10
self.assertEqual( r.value, 0 )
def test_when_pow_a_int_should_work( self ):
r = 10 ** self.unit
self.assertEqual( r.value, 10000000000 )
r = self.unit ** 10
self.assertEqual( r.value, 10000000000 )
def test_when_pow_float_a_float_should_work( self ):
r = 10.10 ** self.unit
self.assertEqual( r.value, 12589254117.941662 )
r = self.unit ** 10.10
self.assertEqual( r.value, 12589254117.941662 )
|
python
|
###################################################################################################################
# Uses a trained network to predict the class for an input image
# Notes - Run train.py first before this script
# Basic usage: python predict.py /path/to/image checkpoint
# Options:
# Return top KK most likely classes: python predict.py input checkpoint --top_k 3
# Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json
# Use GPU for inference: python predict.py input checkpoint --gpu
# Typical run: python predict.py --gpu --category_names cat_to_name.json --top_k 3 check_point.pt
#####################################################################################################################
###########################################
# Get the arguments from the command line
###########################################
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('image_checkpoint', metavar='image_checkpoint',
help='/path/to/image_checkpoint')
parser.add_argument('--category_names', action="store",
dest="category_names", default='cat_to_name.json',
help='a mapping of categories to real names ')
parser.add_argument('--top_k', metavar='top_k',
default=3, type=int,
help='top KK most likely classes (default: 3)')
parser.add_argument('--gpu', dest='use_gpu', action="store_true",
default=False,
help='Use GPU for training (default: True)')
parser.add_argument('--version', action='version', version='%(prog)s 1.0 There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.') # Decided to pull some wording from GCC
parser.add_argument('--load_dir', action="store",
dest="load_dir", default='./',
help='directory_to_saved_checkpoints')
parser.add_argument('--test_image_dir', action="store",
dest="test_image_dir", default='./flowers/test/10',
help='directory location to image used to test prediction')
parser.add_argument('--test_image', action="store",
dest="test_image", default='image_07104.jpg',
help='Image file used to test prediction')
args = parser.parse_args()
### DEBUG ###
print(vars(args))
print(args.use_gpu)
#########################
# Various Python imports
#########################
import os
import sys
import numpy as np
import torch
import time
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
#%matplotlib inline
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
import json
from matplotlib.ticker import FormatStrFormatter
from collections import OrderedDict
image_checkpoint = args.image_checkpoint
category_names = args.category_names
top_k = args.top_k
use_gpu = args.use_gpu
load_dir = args.load_dir
test_image_dir = args.test_image_dir
test_image = args.test_image
device = torch.device('cuda' if torch.cuda.is_available() and use_gpu else 'cpu')
if use_gpu:
#############################
# Check if CUDA is available
#############################
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Exiting ...')
sys.exit()
else:
print('CUDA is available! Training on GPU ...')
### DEBUG ###
#print('Passed GPU Check')
##########################
# Possible models to use
##########################
structures = {"densenet121" : 1024,
"alexnet" : 9216}
def model_setup(structure='densenet121',dropout=0.5, hidden_layer1 = 120,lr = 0.001):
#def model_setup(structure='densenet121',dropout=0.5, hidden_layer1 = 512,lr = 0.01):
### DEBUG ###
#print('Model Setup Function...')
if structure == 'densenet121':
model = models.densenet121(pretrained=True)
elif structure == 'alexnet':
model = models.alexnet(pretrained = True)
else:
print("Im sorry but {} is not a valid model. Did you mean densenet121 or alexnet?".format(structure))
sys.exit()
classifier = nn.Sequential(OrderedDict([
('dropout',nn.Dropout(dropout)),
('inputs', nn.Linear(structures[structure], hidden_layer1)),
('relu1', nn.ReLU()),
('hidden_layer1', nn.Linear(hidden_layer1, 90)),
('relu2',nn.ReLU()),
('hidden_layer2',nn.Linear(90,80)),
('relu3',nn.ReLU()),
('hidden_layer3',nn.Linear(80,102)),
('output', nn.LogSoftmax(dim=1))
]))
for param in model.parameters():
param.requires_grad = False
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr )# Observe that all parameters are being optimized
if use_gpu:
model.cuda()
return model, optimizer, criterion, structure
####################################################################
# Loads a checkpoint and rebuilds the model
####################################################################
def load_model(path='./',file_name='check_point.pt'):
### DEBUG ###
#print('Load Model Function...')
checkpoint = torch.load((path + file_name))
structure = checkpoint['structure']
hidden_layer1 = checkpoint['hidden_layer1']
model,_,_,_ = model_setup(structure , 0.5,hidden_layer1)
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
### DEBUG ###
#print('Exiting Load Model Function...')
return model
#############
# Load Model
#############
model2 = load_model(path=load_dir,file_name=image_checkpoint)
### DEBUG ###
#print(model2)
#print(model2.state_dict())
###########################
# Label mapping for DEBUG
###########################
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
#######################
# Image Preprocessing
#######################
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
### DEBUG ###
#print('Image Preprocessing Function...')
img_pil = Image.open(image)
adjustments = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
img_tensor = adjustments(img_pil)
return img_tensor
####################
# Class Prediction
####################
#model.class_to_idx =train_data.class_to_idx
### DEBUG ###
#print('Pre Class Prediction')
ctx = model2.class_to_idx
#use_gpu = True
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# if use_gpu:
# model.to('cuda:0')
# else:
# model.to('cpu')
model.to(device)
img_torch = process_image(image_path)
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
if use_gpu:
output = model.forward(img_torch.cuda())
else:
output = model.forward(img_torch)
probability = F.softmax(output.data,dim=1)
############################
# Pulled from check_sanity()
############################
probabilities = probability.topk(topk)
#b = [cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0])]
idx_to_class = {val: key for key, val in model.class_to_idx.items()}
b = [cat_to_name[idx_to_class[index]] for index in np.array(probabilities[1][0])]
print(b)
return probability.topk(topk)
# Implement the code to predict the class from an image file
####################
# Get an test image
####################
#data_dir = 'flowers'
#img = (data_dir + '/test' + '/10/' + 'image_07104.jpg')
img = os.path.join(test_image_dir,test_image)
val1, val2 = predict(img, model2, top_k)
print(val1)
print(val2)
|
python
|
from datetime import datetime
import hashlib
import uuid
from google.cloud import firestore
SONG_TEMPLATE = '{verse}\n\n{pre_chorus}\n\n{chorus}\n\n{pre_chorus}\n\n{chorus}\n\n{bridge}'
class Song:
collection_name = 'songs'
def __init__(self, id, chorus_id=None, pre_chorus_id=None, verse_id=None,
bridge_id=None, created=None, modified=None):
now = datetime.utcnow()
self.db = firestore.Client()
self.id = id
self.chorus_id = chorus_id
self.pre_chorus_id = pre_chorus_id
self.verse_id = verse_id
self.bridge_id = bridge_id
self.created = created or now
self.modified = modified or now
@classmethod
def get_id(cls, chorus, pre_chorus, verse, bridge):
id_base = f'{chorus}|{pre_chorus}|{verse}|{bridge}'
hasher = hashlib.sha1()
hasher.update(id_base.encode('utf8'))
song_id = hasher.hexdigest()[:7]
return song_id
@classmethod
def get(cls, song_id):
db = firestore.Client()
results = [
item
for item in db.collection(cls.collection_name).where('id', '==', song_id).stream()
]
if results:
return cls(song_id).populate(**(results[0].to_dict()))
else:
return None
@classmethod
def get_all(cls):
db = firestore.Client()
return [
cls(item.id).populate(**item.to_dict())
for item in db.collection(cls.collection_name).stream()
]
def populate(self, **kwargs):
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
return self
def save(self):
doc_ref = self.db.collection(self.collection_name).document(self.id)
doc_ref.set({
'id': self.id,
'chorus_id': self.chorus_id,
'pre_chorus_id': self.pre_chorus_id,
'verse_id': self.verse_id,
'bridge_id': self.bridge_id,
'created': self.created,
'modified': datetime.utcnow(),
})
|
python
|
"""This module contains various decorators.
There are two kinds of decorators defined in this module which consists of either two or
three nested functions. The former are decorators without and the latter with arguments.
For more information on decorators, see this `guide`_ on https://realpython.com which
provides a comprehensive overview.
.. _guide:
https://realpython.com/primer-on-python-decorators/
"""
import functools
import warnings
from typing import NamedTuple
import numpy as np
import pandas as pd
from estimagic.exceptions import get_traceback
from estimagic.parameters.process_constraints import process_constraints
from estimagic.parameters.reparametrize import reparametrize_from_internal
def numpy_interface(func=None, *, params=None, constraints=None, numpy_output=False):
"""Convert x to params.
This decorated function receives a NumPy array of parameters and converts it to a
:class:`pandas.DataFrame` which can be handled by the user's criterion function.
For convenience, the decorated function can also be called directly with a
params DataFrame. In that case, the decorator does nothing.
Args:
func (callable): The function to which the decorator is applied.
params (pandas.DataFrame): See :ref:`params`.
constraints (list of dict): Contains constraints.
numpy_output (bool): Whether pandas objects in the output should also be
converted to numpy arrays.
Returns:
callable
"""
constraints = [] if constraints is None else constraints
pc, pp = process_constraints(constraints, params)
fixed_values = pp["_internal_fixed_value"].to_numpy()
pre_replacements = pp["_pre_replacements"].to_numpy().astype(int)
post_replacements = pp["_post_replacements"].to_numpy().astype(int)
def decorator_numpy_interface(func):
@functools.wraps(func)
def wrapper_numpy_interface(x, *args, **kwargs):
if isinstance(x, pd.DataFrame):
p = x
elif isinstance(x, np.ndarray):
p = reparametrize_from_internal(
internal=x,
fixed_values=fixed_values,
pre_replacements=pre_replacements,
processed_constraints=pc,
post_replacements=post_replacements,
params=params,
return_numpy=False,
)
else:
raise ValueError(
"x must be a numpy array or DataFrame with 'value' column."
)
criterion_value = func(p, *args, **kwargs)
if isinstance(criterion_value, (pd.DataFrame, pd.Series)) and numpy_output:
criterion_value = criterion_value.to_numpy()
return criterion_value
return wrapper_numpy_interface
if callable(func):
return decorator_numpy_interface(func)
else:
return decorator_numpy_interface
def catch(
func=None,
*,
exception=Exception,
exclude=(KeyboardInterrupt, SystemExit),
onerror=None,
default=None,
warn=True,
reraise=False,
):
"""Catch and handle exceptions.
This decorator can be used with and without additional arguments.
Args:
exception (Exception or tuple): One or several exceptions that
are caught and handled. By default all Exceptions are
caught and handled.
exclude (Exception or tuple): One or several exceptionts that
are not caught. By default those are KeyboardInterrupt and
SystemExit.
onerror (None or Callable): Callable that takes an Exception
as only argument. This is called when an exception occurs.
default: Value that is returned when as the output of func when
an exception occurs. Can be one of the following:
- a constant
- "__traceback__", in this case a string with a traceback is returned.
- callable with the same signature as func.
warn (bool): If True, the exception is converted to a warning.
reraise (bool): If True, the exception is raised after handling it.
"""
def decorator_catch(func):
@functools.wraps(func)
def wrapper_catch(*args, **kwargs):
try:
res = func(*args, **kwargs)
except exclude:
raise
except exception as e:
if onerror is not None:
onerror(e)
if reraise:
raise e
tb = get_traceback()
if warn:
msg = f"The following exception was caught:\n\n{tb}"
warnings.warn(msg)
if default == "__traceback__":
res = tb
elif callable(default):
res = default(*args, **kwargs)
else:
res = default
return res
return wrapper_catch
if callable(func):
return decorator_catch(func)
else:
return decorator_catch
def unpack(func=None, symbol=None):
def decorator_unpack(func):
if symbol is None:
@functools.wraps(func)
def wrapper_unpack(arg):
return func(arg)
elif symbol == "*":
@functools.wraps(func)
def wrapper_unpack(arg):
return func(*arg)
elif symbol == "**":
@functools.wraps(func)
def wrapper_unpack(arg):
return func(**arg)
return wrapper_unpack
if callable(func):
return decorator_unpack(func)
else:
return decorator_unpack
def switch_sign(func):
"""Switch sign of all outputs of a function."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
unswitched = func(*args, **kwargs)
if isinstance(unswitched, dict):
switched = {key: -val for key, val in unswitched.items()}
elif isinstance(unswitched, (tuple, list)):
switched = []
for entry in unswitched:
if isinstance(entry, dict):
switched.append({key: -val for key, val in entry.items()})
else:
switched.append(-entry)
if isinstance(unswitched, tuple):
switched = tuple(switched)
else:
switched = -unswitched
return switched
return wrapper
class AlgoInfo(NamedTuple):
primary_criterion_entry: str
name: str
parallelizes: bool
disable_cache: bool
needs_scaling: bool
is_available: bool
def mark_minimizer(
func=None,
*,
primary_criterion_entry="value",
name=None,
parallelizes=False,
disable_cache=False,
needs_scaling=False,
is_available=True,
):
"""Decorator to mark a function as internal estimagic minimizer and add information.
Args:
func (callable): The function to be decorated
primary_criterion_entry (str): One of "value", "contributions",
"root_contributions" or "dict". Default: "value". This decides
which part of the output of the user provided criterion function
is needed by the internal optimizer.
name (str): The name of the internal algorithm.
parallelizes (bool): Must be True if an algorithm evaluates the criterion,
derivative or criterion_and_derivative in parallel.
disable_cache (bool): If True, no caching for the criterion function
or its derivatives are used.
needs_scaling (bool): Must be True if the algorithm is not reasonable
independent of the scaling of the parameters.
is_available (bool): Whether the algorithm is available. This is needed for
algorithms that require optional dependencies.
"""
if name is None:
raise TypeError(
"mark_minimizer() missing 1 required keyword-only argument: 'name'"
)
elif not isinstance(name, str):
raise TypeError("name must be a string.")
valid_entries = ["value", "dict", "contributions", "root_contributions"]
if primary_criterion_entry not in valid_entries:
raise ValueError(
f"primary_criterion_entry must be one of {valid_entries} not "
f"{primary_criterion_entry}."
)
if not isinstance(parallelizes, bool):
raise TypeError("parallelizes must be a bool.")
if not isinstance(disable_cache, bool):
raise TypeError("disable_cache must be a bool.")
if not isinstance(needs_scaling, bool):
raise TypeError("needs_scaling must be a bool.")
if not isinstance(is_available, bool):
raise TypeError("is_available must be a bool.")
algo_info = AlgoInfo(
primary_criterion_entry=primary_criterion_entry,
name=name,
parallelizes=parallelizes,
disable_cache=disable_cache,
needs_scaling=needs_scaling,
is_available=is_available,
)
def decorator_mark_minimizer(func):
@functools.wraps(func)
def wrapper_mark_minimizer(*args, **kwargs):
return func(*args, **kwargs)
wrapper_mark_minimizer._algorithm_info = algo_info
return wrapper_mark_minimizer
if callable(func):
return decorator_mark_minimizer(func)
else:
return decorator_mark_minimizer
|
python
|
from typing import Tuple, List
import pytest
from predicates.state import State
from predicates import guards, actions
from predicates.guards import AlwaysTrue, AlwaysFalse
from model.model import the_model, Model
from model.operation import Operation, Transition
from planner.plan import plan
# ---------------------------------------------------------------------------
# ...
# ---------------------------------------------------------------------------
g = guards.from_str
a = actions.from_str
def test_simple_planner_1():
"""
This test checks the implementation of the planner with a simple model
"""
initial_state = State(
v1 = False,
v2 = 0
)
o1 = Operation(
name=f"o1",
# enabled when v1 is false
precondition=Transition("pre", g("!v1"), ()),
# the guard of the postcondition is only used when running the operation, not when planning
postcondition=Transition("post", AlwaysTrue, a(f"v1")),
# the effects are only used when planning to simulate changes of sensors
effects=(),
)
o2 = Operation(
name=f"o2",
precondition=Transition("pre", g("v1 && v2 == 0"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 1")),
effects=(),
)
o3 = Operation(
name=f"o3",
precondition=Transition("pre", g("v1 && v2 == 0"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 2")),
effects=(),
)
o4 = Operation(
name=f"o4",
precondition=Transition("pre", g("v1 && v2 == 2"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 1")),
effects=(),
)
o5 = Operation(
name=f"o5",
precondition=Transition("pre", g("v1"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 <- 0")),
effects=(),
)
simple_model = Model(initial_state, {
o1.name: o1,
o2.name: o2,
o3.name: o3,
o4.name: o4,
o5.name: o5,
})
goal = g("v2 == 3")
p = plan(initial_state, goal, simple_model)
assert p != None
assert len(p) != 0
assert p == [o1.name, o3.name, o4.name]
goal = g("v2 == 1")
p = plan(initial_state, goal, simple_model)
assert p == [o1.name, o2.name]
def test_simple_planner_2():
"""
This test checks the implementation of the planner with a simple model
"""
initial_state = State(
v1 = False,
v2 = 0
)
ops = {}
for i in range(100):
ops[f"o{i}"] = Operation(
name=f"o{i}",
# enabled when v1 is false
precondition=Transition("pre", g("!v1"), ()),
# the guard of the postcondition is only used when running the operation, not when planning
postcondition=Transition("post", AlwaysTrue, a(f"v1")),
# the effects are only used when planning to simulate changes of sensors
effects=(),
)
ops["final"] = Operation(
name=f"final",
precondition=Transition("pre", g("v1 && v2 == 0"), ()),
postcondition=Transition("post", AlwaysTrue, a(f"v2 += 1")),
effects=(),
)
model = Model(initial_state, ops)
goal = g("v2 == 1")
p = plan(initial_state, goal, model)
print(p)
assert p != None
assert len(p) == 2
assert p[1] == "final"
def test_simple_planner_3():
"""
This test checks the implementation of the planner with a simple model
"""
initial_state = State(
v1 = False,
v2 = 0
)
ops = {}
for i in range(100):
ops[f"o{i}"] = Operation(
name=f"o{i}",
# enabled when v1 is false
precondition=Transition("pre", g(f"v2 == {i}"), ()),
# the guard of the postcondition is only used when running the operation, not when planning
postcondition=Transition("post", AlwaysTrue, a(f"v2 +=1")),
# the effects are only used when planning to simulate changes of sensors
effects=(),
)
model = Model(initial_state, ops)
goal = g("v2 == 100")
p = plan(initial_state, goal, model, 120)
print(p)
assert p != None
assert len(p) == 100
# Use this test when you are working with the model
def test_planner_real_model_1():
"""This method creates the test the planner that you will use for just a simple case"""
m = the_model()
goal = g("in_pos1 == empty")
assert plan(m.initial_state, goal, m) == []
goal = g("in_pos1 != empty")
p = plan(m.initial_state, goal, m)
print(f"plan: {p}")
assert p == ['add_cube', 'to_input', 'pick_at_input', 'to_pos1', 'place_at_pos1']
goal = g("in_pos1 != empty && in_pos2 != empty && in_pos3 != empty")
p = plan(m.initial_state, goal, m)
print(f"plan long: {p}")
assert p != None
assert len(p) == 15
# here you should create more tests to check your model ...
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_review'),
]
operations = [
migrations.AddField(
model_name='location',
name='alcohol',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]),
),
migrations.AddField(
model_name='location',
name='bathrooms',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]),
),
migrations.AddField(
model_name='location',
name='coffee',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Truck Stop'), (2, b'Good'), (3, b'Really Good'), (4, b'Great')]),
),
migrations.AddField(
model_name='location',
name='food',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]),
),
migrations.AddField(
model_name='location',
name='outdoor',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'No'), (1, b'Yes')]),
),
migrations.AddField(
model_name='location',
name='outlets',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Minimal'), (2, b'Some'), (3, b'Ample')]),
),
migrations.AddField(
model_name='location',
name='seating',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Minimal'), (2, b'Some'), (3, b'Ample')]),
),
migrations.AddField(
model_name='location',
name='wifi',
field=models.IntegerField(blank=True, null=True, choices=[(0, b'None'), (1, b'Spotty'), (2, b'Strong')]),
),
]
|
python
|
#导入requests模块
import requests
import urllib.parse
class Xiaoniu(object):
def __init__(self):
self.headers={
'Accept': 'application/json, text/plain, */*',
'Content-Type': 'application/x-www-form-urlencoded',
'Origin': 'https://niutrans.vip',
'Referer': 'https://niutrans.vip/console/textTrans',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
}
self.url = 'https://test.niutrans.vip/NiuTransServer/testtrans'
def translate(self, from_lan, to_lan, text):
data = {
'from' : from_lan,
'to' : to_lan,
'src_text': text
}
url = self.url
url+='?from={}&to={}&src_text='.format(from_lan, to_lan)
url+=urllib.parse.quote(data['src_text'])
#print(url)
result = requests.get(url=url,headers=self.headers)
#print(result.text)
if result != None:
return result.json()['tgt_text']
def xiaoniuTrans(word,from_language='zh',to_language='en'):
niu = Xiaoniu()
return niu.translate(from_language,to_language,word)
if __name__ == '__main__':
print(xiaoniuTrans("hello",from_language='en',to_language='zh'))
|
python
|
import warnings
from pyro import params
from pyro.distributions.distribution import Distribution
from pyro.poutine.util import is_validation_enabled
from .messenger import Messenger
class LiftMessenger(Messenger):
"""
Messenger which "lifts" parameters to random samples.
Given a stochastic function with param calls and a prior,
creates a stochastic function where all param calls are
replaced by sampling from prior.
Prior should be a callable or a dict of names to callables.
"""
def __init__(self, prior):
"""
:param prior: prior used to lift parameters. Prior can be of type
dict, pyro.distributions, or a python stochastic fn
Constructor
"""
super(LiftMessenger, self).__init__()
self.prior = prior
self._samples_cache = {}
def __enter__(self):
self._samples_cache = {}
if is_validation_enabled() and isinstance(self.prior, dict):
self._param_hits = set()
self._param_misses = set()
return super(LiftMessenger, self).__enter__()
def __exit__(self, *args, **kwargs):
self._samples_cache = {}
if is_validation_enabled() and isinstance(self.prior, dict):
extra = set(self.prior) - self._param_hits
if extra:
warnings.warn(
"pyro.module prior did not find params ['{}']. "
"Did you instead mean one of ['{}']?"
.format("', '".join(extra), "', '".join(self._param_misses)))
return super(LiftMessenger, self).__exit__(*args, **kwargs)
def _pyro_sample(self, msg):
return None
def _pyro_param(self, msg):
"""
Overrides the `pyro.param` call with samples sampled from the
distribution specified in the prior. The prior can be a
pyro.distributions object or a dict of distributions keyed
on the param names. If the param name does not match the
name the keys in the prior, that param name is unchanged.
"""
name = msg["name"]
param_name = params.user_param_name(name)
if isinstance(self.prior, dict):
# prior is a dict of distributions
if param_name in self.prior.keys():
msg["fn"] = self.prior[param_name]
msg["args"] = msg["args"][1:]
if isinstance(msg['fn'], Distribution):
msg["args"] = ()
msg["kwargs"] = {}
msg["infer"] = {}
if is_validation_enabled():
self._param_hits.add(param_name)
else:
if is_validation_enabled():
self._param_misses.add(param_name)
return None
elif isinstance(self.prior, Distribution):
# prior is a distribution
msg["fn"] = self.prior
msg["args"] = ()
msg["kwargs"] = {}
msg["infer"] = {}
elif callable(self.prior):
if not isinstance(self.prior, Distribution):
# prior is a stochastic fn. block sample
msg["stop"] = True
msg["fn"] = self.prior
msg["args"] = msg["args"][1:]
else:
# otherwise leave as is
return None
msg["type"] = "sample"
if name in self._samples_cache:
# Multiple pyro.param statements with the same
# name. Block the site and fix the value.
msg['value'] = self._samples_cache[name]['value']
msg["is_observed"] = True
msg["stop"] = True
else:
self._samples_cache[name] = msg
msg["is_observed"] = False
return self._pyro_sample(msg)
|
python
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.dialogue.routers import dialogues_router
from app.message.middleware import WebSocketStateMiddleware
from app.message.routers import message_router
from app.notification.routers import notification_router
from config import PROJECT_NAME, API, VERSION, CLIENT_NAME
from db import engine, Base
app = FastAPI(
title=PROJECT_NAME,
version=VERSION,
description='Messenger Service Anti-Freelancer by Counter',
root_path=f'/{CLIENT_NAME}',
)
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=['*'],
allow_headers=['*'],
)
app.add_middleware(WebSocketStateMiddleware)
@app.on_event('startup')
async def startup():
""" Startup """
async with engine.begin() as connection:
await connection.run_sync(Base.metadata.create_all)
app.include_router(message_router, prefix=f'/{API}/messages')
app.include_router(notification_router, prefix=f'/{API}/notifications')
app.include_router(dialogues_router, prefix=f'/{API}/dialogues')
|
python
|
import numpy as np
from glob import glob as glob
#This will probably change
astrotable = '/Users/Arthur/Documents/School/MetaPak/GradPak_code/extras/gradpak_w_sky_astrometry_table.txt'
basedir = '/Users/Arthur/Documents/School/891_paper/GP_data'
#astrotable = '/usr/users/eigenbrot/research/Pak/gradpak_w_sky_astrometry_table.txt'
def write_header(f):
f.write(r"""\renewcommand{\thefootnote}{\alph{footnote}}
\begin{center}
\begin{longtable}{crrccccc}
\caption{\GP Fiber Locations and LabData} \label{GPtesting:tab:GP_cal_full} \\
\hline \hline \\[-2ex]
Fiber &
$\Delta\alpha$\tablenotemark{a} &
$\Delta\delta$\tablenotemark{a} &
diameter &
$T_\mathrm{tot}$ &
$T_4$\tablenotemark{b} &
$T_{4.4}$ &
$T_5$ \\
number &
('') &
('') &
('') &
&
&
& \\[0.5ex] \hline
\\[-1.8ex]
\endfirsthead
\multicolumn{7}{c}{{\tablename} \thetable{} -- Continued} \\[0.5ex]
\hline \hline \\[-2ex]
Fiber &
$\Delta\alpha$\tablenotemark{a} &
$\Delta\delta$\tablenotemark{a} &
diameter &
$T_\mathrm{tot}$ &
$T_4$\tablenotemark{b} &
$T_{4.4}$ &
$T_5$ \\
number &
('') &
('') &
('') &
&
&
& \\[0.5ex] \hline
\\[-1.8ex]
\endhead
\endfoot
\\[-1.8ex] \hline \hline
\endlastfoot
""")
return
def write_end(f):
f.write(r"""\footnotetext[1]{Distance from fiber 105.}
\footnotetext[2]{An estimate of on-bench performance. See Equation \ref{GPtesting:eq:T_FRD}.}
\end{longtable}
\end{center}
\renewcommand{\thefootnote}{\arabic{footnote}}
""")
def do_single(folder):
print "Looking for " + '{}/*metrics.txt'.format(folder)
mfile = glob('{}/*metrics.txt'.format(folder))[0]
print "found ", mfile
fibnum, tput, w4, w44, w5 = np.loadtxt(mfile, usecols=(1,5,6,7,8), unpack=True)
return fibnum, tput, w44, w4, w5
def convert_arcsec(val):
return float(val.split('"')[0])
def main(output='Appendix/gradpak_cal_table_long.tex'):
conv = {2: convert_arcsec, 3: convert_arcsec, 4:convert_arcsec}
fibnum, r_arc, z_arc, rad_arc = np.loadtxt(astrotable, delimiter=';',unpack=True,
usecols=(0,2,3,4), converters=conv)
sidx = np.argsort(fibnum)
fibnum = fibnum[sidx]
r_arc = r_arc[sidx]
z_arc = z_arc[sidx]
rad_arc = rad_arc[sidx]
sublist = glob('{}/GradPak*micron'.format(basedir))
ttfibnum = np.array([])
tput = np.array([])
w44 = np.array([])
w4 = np.array([])
w5 = np.array([])
for sub in sublist:
print sub
tf, tt, t44, t4, t5 = do_single(sub)
ttfibnum = np.r_[ttfibnum,tf]
tput = np.r_[tput,tt]
w44 = np.r_[w44,t44]
w4 = np.r_[w4,t4]
w5 = np.r_[w5,t5]
tidx = np.where(ttfibnum > 58)[0]
ttfibnum[tidx] -= 1
sidx = np.argsort(ttfibnum)
ttfibnum = ttfibnum[sidx]
tput = tput[sidx]
w44 = w44[sidx]
w4 = w4[sidx]
w5 = w5[sidx]
idx = np.where(fibnum > 58)
fibnum[idx] -= 1 #We don't like to number the broken fiber
#103 because fibernums start at 1
ref_r = r_arc[104]
ref_z = z_arc[104]
r_arc -= ref_r
z_arc -= ref_z
fmt = '{:n} & '+'{:5.2f} & '*6 + '{:5.2f}'
with open(output,'w') as f:
write_header(f)
for i in range(fibnum.size):
if fibnum[i] != ttfibnum[i]:
print '!!!!!!WARNING!!!!!!!!'
print fibnum[i], ttfibnum[i]
f.write(fmt.format(fibnum[i],r_arc[i],z_arc[i],rad_arc[i]*2,tput[i],w4[i],w44[i],w5[i]))
if i != fibnum.size - 1:
f.write(r'\\')
f.write('\n')
write_end(f)
return
|
python
|
def bisection(fun, y, xl, xr, tol, maxiter):
"""
The program uses the bisection method to solve the equation
f(x)-y = 0
input:
fun:the function(x)
y : y=f(x)
xl: lower bound
xr: upper bound
tol: tolerance
maxiter: max iter
return:
x; solution
f : residual
num_iters: the count of iters
"""
fl = fun(xl)-y # residual for left bound
fr = fun(xr)-y # residual for right bound
num_iters = 0
for i in range(maxiter):
num_iters += 1
# get midpoint
x = 0.5*(xl + xr)
# evaluate residual at midpoint
f = fun(x)-y
# check for convergence
if (abs(f) < tol):
break
# reset the bounds
if (f*fl < 0.0):
# move right bound info to mid
xr = x
fr = f
else:
# move left bound info to mid
xl = x
fl = f
return x, f, num_iters
|
python
|
import sqlite3
conn = sqlite3.connect('northwind_small.sqlite3')
cur = conn.cursor()
top_products = cur.execute('SELECT ProductName, UnitPrice FROM Product \
ORDER BY UnitPrice DESC LIMIT 10').fetchall()
print(top_products)
"""[('Côte de Blaye',), ('Thüringer Rostbratwurst',), ('Mishi Kobe Niku
("Sir Rodney's Marmalade",), ('Carnarvon Tigers',),
('Raclette Courdavault',), ('Manjimup Dried Apples',),
('Tarte au sucre',), ('Ipoh Coffee',), ('Rössle Sauerkraut',)]
"""
avg_age = cur.execute("SELECT avg(HireDate -BirthDate) \
FROM Employee").fetchall()
print(avg_age[0][0])
"""37.22222222222222"""
supply = cur.execute("SELECT ProductName, UnitPrice, CompanyName \
FROM Product \
INNER JOIN Supplier on Supplier.Id = Product.SupplierID \
ORDER BY UnitPrice DESC LIMIT 10").fetchall()
print(supply)
cat = cur.execute("SELECT CategoryName, COUNT(DISTINCT Product.Id) \
FROM Product \
INNER JOIN Category on Category.Id = Product.CategoryID \
GROUP BY CategoryName \
ORDER BY COUNT(DISTINCT Product.Id) DESC \
LIMIT 1 \
").fetchall()
print(cat[0][0])
"""Confections"""
conn.close()
# No changes so no need to commit
|
python
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [
Extension("NVEnc", ["NVEnc.py"]),
Extension("QSVEnc", ["QSVEnc.py"]),
Extension("StaxRip", ["StaxRip.py"]),
]
install_requires=[
'requests',
'tqdm',
'beautifulsoup4',
'cython',
'win32api'
'psutil'
]
setup(
name = 'Update',
version = '0.2',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
platforms = 'Windows_x86_x64',
requires = install_requires,
)
|
python
|
# coding=utf-8
import logging
import os
import scrapy
from scrapy.exceptions import DropItem
from scrapy.pipelines.files import FilesPipeline
from .folder_path import get_file_size
import settings as project_settings
from items import AppDetail
from utils import cal_file_hash
from database import Database
from pipelines.folder_path import get_app_folder
class ApkDownloadPipeline(FilesPipeline):
logger = logging.getLogger("ApkDownloadPipeline")
def __init__(self, store_uri, download_func=None, settings=None):
super(ApkDownloadPipeline, self).__init__(store_uri, download_func, settings)
self.db_handler = Database()
def get_media_requests(self, item: AppDetail, info):
app_folder = get_app_folder(item)
download_link = item['download_link']
apk_name = item['apk_name']
file_path = os.path.join(app_folder, apk_name)
if item['market'] == "github_opensource":
file_path += ".zip"
elif not file_path.endswith('.apk'):
file_path += '.apk'
file_path = os.path.relpath(file_path, project_settings.FILES_STORE)
if not self.db_handler.get_update_status(item['update_id']):
yield scrapy.Request(download_link, meta={'file_path': file_path})
else:
raise DropItem("Apk File {} exists.".format(download_link))
def file_path(self, request, response=None, info=None, *, item=None):
return request.meta['file_path']
def item_completed(self, results, item: AppDetail, info):
if results[0][0]:
# download successfully
self.logger.info("Download app '{}' version '{}' from market '{}' successfully.".format(item['app_title'], item['version'], item['market']))
apk_path = results[0][1]['path']
apk_path = os.path.join(project_settings.FILES_STORE, apk_path)
apk_size = get_file_size(apk_path)
apk_hash = cal_file_hash(apk_path)
self.db_handler.set_update_available(item['update_id'], apk_size, apk_hash)
return item
else:
# download fail
self.logger.error("Fail to Download app '{}' version '{}' from market '{}'.".format(item['app_title'], item['version'], item['market']))
return item
|
python
|
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, MaxPool2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint, Callback
from keras import optimizers
from skimage import exposure
import numpy as np
from matplotlib import pyplot as plt
from IPython.display import clear_output
#CONFIG=======
batch_size= 10
image_width = 213
image_height = 180
#=============
class PlotLearning(Callback):
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
self.i += 1
if (epoch % 10 == 0):
f, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
clear_output(wait=True)
ax1.set_yscale('log')
ax1.plot(self.x, self.losses, label="loss")
ax1.plot(self.x, self.val_losses, label="val_loss")
ax1.legend()
ax2.plot(self.x, self.acc, label="accuracy")
ax2.plot(self.x, self.val_acc, label="validation accuracy")
ax2.legend()
plt.show();
plot = PlotLearning()
checkpoint = ModelCheckpoint('./model.h5', monitor='val_acc', verbose=1, save_best_only=False, mode='max')
callbacks_list = [checkpoint, plot]
train_data_dir = './data/train/'
val_data_dir = './data/val/'
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu', input_shape = (image_width,image_height,1)))
model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation = "sigmoid"))
adam = optimizers.Adam(lr=0.00009, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='binary_crossentropy',
optimizer=adam,
metrics=['accuracy'])
#Train and val data augmentors
train_datagen = ImageDataGenerator (
rescale=1./255,
fill_mode='nearest'
)
val_datagen = ImageDataGenerator(
rescale=1./255,
fill_mode='nearest'
)
#Generators for TRAIN and val
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(image_width, image_height),
batch_size=batch_size,
shuffle=True,
color_mode='grayscale',
class_mode='binary'
)
val_generator = val_datagen.flow_from_directory(
val_data_dir,
target_size=(image_width, image_height),
batch_size=batch_size,
shuffle=True,
color_mode='grayscale',
class_mode='binary'
)
model.fit_generator(
train_generator,
epochs=150,
shuffle=True,
callbacks=callbacks_list,
validation_data=val_generator,
)
|
python
|
'''
Created on Mar 26, 2014
@author: Simon
'''
from datahandler.abstract_statistics import AbstractStatistics
class ImageStats(AbstractStatistics):
'''
Image statistics
'''
def __init__(self):
pass
def encode(self):
return []
def decode(self, encoded_stats):
return []
|
python
|
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Save images to output files.
**Plugin Type: Global**
``SaveImage`` is a global plugin. Only one instance can be opened.
**Usage**
This global plugin is used to save any changes made in Ginga back to output
images. For example, a mosaic image that was created by the ``Mosaic``
plugin. Currently, only FITS images (single or multiple extensions) are
supported.
Given the output directory (e.g., ``/mypath/outputs/``), a suffix
(e.g., ``ginga``), an image channel (``Image``), and a selected image
(e.g., ``image1.fits``), the output file will be
``/mypath/outputs/image1_ginga_Image.fits``. Inclusion of the channel name is
optional and can be omitted using plugin configuration file,
``plugin_SaveImage.cfg``.
The modified extension(s) will have new header or data extracted from
Ginga, while those not modified will remain untouched. Relevant change
log entries from the ``ChangeHistory`` global plugin will be inserted into
the history of its ``PRIMARY`` header.
.. note:: This plugin uses the module ``astropy.io.fits`` to write the output
images, regardless of what is chosen for ``FITSpkg`` in the
``general.cfg`` configuration file.
"""
# STDLIB
import os
import shutil
# THIRD-PARTY
import astropy
from astropy.io import fits
from astropy.utils.introspection import minversion
# GINGA
from ginga.GingaPlugin import GlobalPlugin
from ginga.gw import Widgets
from ginga.misc import Bunch
from ginga.util.iohelper import shorten_name
try:
from ginga.gw.GwHelp import DirectorySelection
except ImportError: # This is needed for RTD to build
pass
__all__ = ['SaveImage']
class SaveImage(GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(SaveImage, self).__init__(fv)
# Image listing
self.columns = [('Image', 'IMAGE'), ('Mod. Ext.', 'MODEXT')]
# User preferences. Some are just default values and can also be
# changed by GUI.
prefs = self.fv.get_preferences()
self.settings = prefs.create_category('plugin_SaveImage')
self.settings.add_defaults(output_directory='.',
output_suffix='ginga',
include_chname=True,
clobber=False,
modified_only=True,
max_mosaic_size=1e8,
max_rows_for_col_resize=5000)
self.settings.load(onError='silent')
self.outdir = os.path.abspath(
self.settings.get('output_directory', '.'))
self.suffix = self.settings.get('output_suffix', 'ginga')
self.fv.add_callback('add-image', lambda *args: self.redo())
self.fv.add_callback('remove-image', lambda *args: self.redo())
self.fv.add_callback('add-channel',
lambda *args: self.update_channels())
self.fv.add_callback('delete-channel',
lambda *args: self.update_channels())
self.chnames = []
self.chname = None
self.gui_up = False
def build_gui(self, container):
"""Build GUI such that image list area is maximized."""
vbox, sw, orientation = Widgets.get_oriented_box(container)
captions = (('Channel:', 'label', 'Channel Name', 'combobox',
'Modified only', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.channel_name.set_tooltip('Channel for locating images to save')
b.channel_name.add_callback('activated', self.select_channel_cb)
mod_only = self.settings.get('modified_only', True)
b.modified_only.set_state(mod_only)
b.modified_only.add_callback('activated', lambda *args: self.redo())
b.modified_only.set_tooltip("Show only locally modified images")
container.add_widget(w, stretch=0)
captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'),
('Suffix:', 'llabel', 'Suffix', 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.outdir.set_text(self.outdir)
b.outdir.set_tooltip('Output directory')
b.outdir.add_callback('activated', lambda w: self.set_outdir())
b.browse.set_tooltip('Browse for output directory')
b.browse.add_callback('activated', lambda w: self.browse_outdir())
b.suffix.set_text(self.suffix)
b.suffix.set_tooltip('Suffix to append to filename')
b.suffix.add_callback('activated', lambda w: self.set_suffix())
container.add_widget(w, stretch=0)
self.treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview.setup_table(self.columns, 1, 'IMAGE')
self.treeview.add_callback('selected', self.toggle_save_cb)
container.add_widget(self.treeview, stretch=1)
captions = (('Status', 'llabel'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.status.set_text('')
b.status.set_tooltip('Status message')
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Save')
btn.set_tooltip('Save selected image(s)')
btn.add_callback('activated', lambda w: self.save_images())
btn.set_enabled(False)
btns.add_widget(btn, stretch=0)
self.w.save = btn
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize directory selection dialog
self.dirsel = DirectorySelection(self.fv.w.root.get_widget())
# Generate initial listing
self.update_channels()
def instructions(self):
self.tw.set_text("""Enter output directory and suffix, if different than default. Left click to select image name to save. Multiple images can be selected using click with Shift or CTRL key. Click Save to save the selected image(s).
Output image will have the filename of <inputname>_<suffix>.fits.""")
def redo(self, *args):
"""Generate listing of images that user can save."""
if not self.gui_up:
return
mod_only = self.w.modified_only.get_state()
treedict = Bunch.caselessDict()
self.treeview.clear()
self.w.status.set_text('')
channel = self.fv.get_channel(self.chname)
if channel is None:
return
# Only list modified images for saving. Scanning Datasrc is enough.
if mod_only:
all_keys = channel.datasrc.keys(sort='alpha')
# List all images in the channel.
else:
all_keys = channel.get_image_names()
# Extract info for listing and saving
for key in all_keys:
iminfo = channel.get_image_info(key)
path = iminfo.get('path')
idx = iminfo.get('idx')
t = iminfo.get('time_modified')
if path is None: # Special handling for generated buffer, eg mosaic
infile = key
is_fits = True
else:
infile = os.path.basename(path)
infile_ext = os.path.splitext(path)[1]
infile_ext = infile_ext.lower()
is_fits = False
if 'fit' in infile_ext:
is_fits = True
# Only list FITS files unless it is Ginga generated buffer
if not is_fits:
continue
# Only list modified buffers
if mod_only and t is None:
continue
# More than one ext modified, append to existing entry
if infile in treedict:
if t is not None:
treedict[infile].extlist.add(idx)
elist = sorted(treedict[infile].extlist)
treedict[infile].MODEXT = ';'.join(
map(self._format_extname, elist))
# Add new entry
else:
if t is None:
s = ''
extlist = set()
else:
s = self._format_extname(idx)
extlist = set([idx])
treedict[infile] = Bunch.Bunch(
IMAGE=infile, MODEXT=s, extlist=extlist, path=path)
self.treeview.set_tree(treedict)
# Resize column widths
n_rows = len(treedict)
if n_rows == 0:
self.w.status.set_text('Nothing available for saving')
elif n_rows < self.settings.get('max_rows_for_col_resize', 5000):
self.treeview.set_optimal_column_widths()
self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
def update_channels(self):
"""Update the GUI to reflect channels and image listing.
"""
if not self.gui_up:
return
self.logger.debug("channel configuration has changed--updating gui")
try:
channel = self.fv.get_channel(self.chname)
except KeyError:
channel = self.fv.get_channel_info()
if channel is None:
raise ValueError('No channel available')
self.chname = channel.name
w = self.w.channel_name
w.clear()
self.chnames = list(self.fv.get_channel_names())
#self.chnames.sort()
for chname in self.chnames:
w.append_text(chname)
# select the channel that is the current one
try:
i = self.chnames.index(channel.name)
except IndexError:
i = 0
self.w.channel_name.set_index(i)
# update the image listing
self.redo()
def select_channel_cb(self, w, idx):
self.chname = self.chnames[idx]
self.logger.debug("channel name changed to '%s'" % (self.chname))
self.redo()
def _format_extname(self, ext):
"""Pretty print given extension name and number tuple."""
if ext is None:
outs = ext
else:
outs = '{0},{1}'.format(ext[0], ext[1])
return outs
def browse_outdir(self):
"""Browse for output directory."""
self.dirsel.popup(
'Select directory', self.w.outdir.set_text, initialdir=self.outdir)
self.set_outdir()
def set_outdir(self):
"""Set output directory."""
dirname = self.w.outdir.get_text()
if os.path.isdir(dirname):
self.outdir = dirname
self.logger.debug('Output directory set to {0}'.format(self.outdir))
else:
self.w.outdir.set_text(self.outdir)
self.logger.error('{0} is not a directory'.format(dirname))
def set_suffix(self):
"""Set output suffix."""
self.suffix = self.w.suffix.get_text()
self.logger.debug('Output suffix set to {0}'.format(self.suffix))
def _write_history(self, pfx, hdu, linechar=60, indentchar=2):
"""Write change history to given HDU header.
Limit each HISTORY line to given number of characters.
Subsequent lines of the same history will be indented.
"""
channel = self.fv.get_channel(self.chname)
if channel is None:
return
history_plgname = 'ChangeHistory'
try:
history_obj = self.fv.gpmon.getPlugin(history_plgname)
except Exception:
self.logger.error(
'{0} plugin is not loaded. No HISTORY will be written to '
'{1}.'.format(history_plgname, pfx))
return
if channel.name not in history_obj.name_dict:
self.logger.error(
'{0} channel not found in {1}. No HISTORY will be written to '
'{2}.'.format(channel.name, history_plgname, pfx))
return
file_dict = history_obj.name_dict[channel.name]
chistory = []
ind = ' ' * indentchar
# NOTE: List comprehension too slow!
for key in file_dict:
if not key.startswith(pfx):
continue
for bnch in file_dict[key].values():
chistory.append('{0} {1}'.format(bnch.MODIFIED, bnch.DESCRIP))
# Add each HISTORY prettily into header, sorted by timestamp
for s in sorted(chistory):
for i in range(0, len(s), linechar):
subs = s[i:i + linechar]
if i > 0:
subs = ind + subs.lstrip()
hdu.header.add_history(subs)
def _write_header(self, image, hdu):
"""Write header from image object to given HDU."""
hduhdr = hdu.header
# Ginga image header object for the given extension only.
# Cannot use get_header() because that might also return PRI hdr.
ghdr = image.metadata['header']
for key in ghdr:
# Need this to avoid duplication because COMMENT is a weird field
if key.upper() == 'COMMENT':
continue
bnch = ghdr.get_card(key)
# Insert new keyword
if key not in hduhdr:
hduhdr[key] = (bnch.value, bnch.comment)
# Update existing keyword
elif hduhdr[key] != bnch.value:
hduhdr[key] = bnch.value
def _write_mosaic(self, key, outfile):
"""Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
"""
maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k
channel = self.fv.get_channel(self.chname)
image = channel.datasrc[key]
# Prevent writing very large mosaic
if (image.width * image.height) > maxsize:
s = 'Mosaic too large to be written {0}'.format(image.shape)
self.w.status.set_text(s)
self.logger.error(s)
return
# Insert mosaic data and header into output HDU
hdu = fits.PrimaryHDU(image.get_data())
self._write_header(image, hdu)
# Write history to PRIMARY
self._write_history(key, hdu)
# Write to file
if minversion(astropy, '1.3'):
hdu.writeto(outfile, overwrite=True)
else:
hdu.writeto(outfile, clobber=True)
def _write_mef(self, key, extlist, outfile):
"""Write out regular multi-extension FITS data."""
channel = self.fv.get_channel(self.chname)
with fits.open(outfile, mode='update') as pf:
# Process each modified data extension
for idx in extlist:
k = '{0}[{1}]'.format(key, self._format_extname(idx))
image = channel.datasrc[k]
# Insert data and header into output HDU
pf[idx].data = image.get_data()
self._write_header(image, pf[idx])
# Write history to PRIMARY
self._write_history(key, pf['PRIMARY'])
def toggle_save_cb(self, w, res_dict):
"""Only enable saving if something is selected."""
if len(res_dict) > 0:
self.w.save.set_enabled(True)
else:
self.w.save.set_enabled(False)
def save_images(self):
"""Save selected images.
This uses Astropy FITS package to save the outputs no matter
what user chose to load the images.
"""
res_dict = self.treeview.get_selected()
clobber = self.settings.get('clobber', False)
self.treeview.clear_selection() # Automatically disables Save button
# If user gives empty string, no suffix.
if self.suffix:
sfx = '_' + self.suffix
else:
sfx = ''
# Also include channel name in suffix. This is useful if user likes to
# open the same image in multiple channels.
if self.settings.get('include_chname', True):
sfx += '_' + self.chname
# Process each selected file. Each can have multiple edited extensions.
for infile in res_dict:
f_pfx = os.path.splitext(infile)[0] # prefix
f_ext = '.fits' # Only FITS supported
oname = f_pfx + sfx + f_ext
outfile = os.path.join(self.outdir, oname)
self.w.status.set_text(
'Writing out {0} to {1} ...'.format(shorten_name(infile, 10),
shorten_name(oname, 10)))
self.logger.debug(
'Writing out {0} to {1} ...'.format(infile, oname))
if os.path.exists(outfile) and not clobber:
self.logger.error('{0} already exists'.format(outfile))
continue
bnch = res_dict[infile]
if bnch.path is None or not os.path.isfile(bnch.path):
self._write_mosaic(f_pfx, outfile)
else:
shutil.copyfile(bnch.path, outfile)
self._write_mef(f_pfx, bnch.extlist, outfile)
self.logger.info('{0} written'.format(outfile))
self.w.status.set_text('Saving done, see log')
def close(self):
self.fv.stop_global_plugin(str(self))
def start(self):
self.resume()
def resume(self):
# turn off any mode user may be in
try:
self.modes_off()
except AttributeError:
pass
self.fv.show_status('Press "Help" for instructions')
def stop(self):
self.gui_up = False
self.fv.show_status('')
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'saveimage'
# Append module docstring with config doc for auto insert by Sphinx.
from ginga.util.toolbox import generate_cfg_example # noqa
if __doc__ is not None:
__doc__ += generate_cfg_example('plugin_SaveImage', package='ginga')
# END
|
python
|
from math import inf,nan
from ursina import *
from numpy import dot,cross
from hit_info import HitInfo
#fix bug where ray starts right from face boundary
class voxelcaster():
def __init__(self,chunks,size=16):
self.chunks=chunks
self.size=size
self.cubeTemplate=[[[0,0,0],[0,0,1],[1,0,0]],[[0,0,0],[1,0,0],[1,1,0]],[[0,0,0],[0,0,1],[0,1,1]],[[1,0,0],[1,0,1],[1,1,1]],[[0,0,1],[0,1,1],[1,1,1]],[[0,1,0],[1,1,0],[1,1,1]]]
self.faceNormals=[[0,-1,0],[0,0,-1],[-1,0,0],[1,0,0],[0,0,1],[0,1,0]]
def voxelcast(self,origin,direction,maxDistance=inf,debug=False):
origin=Vec3(*origin)
direction=Vec3(*direction)
#position=Vec3(*origin)
point=origin
normal=Vec3(0,1,0)
oldNormal=None
currentDistance=0
currentWorldCube=Vec3(origin[0]//1,origin[1]//1,origin[2]//1)
#print(direction)
while currentDistance < maxDistance:
cubeType,currentChunk,currentCube=self.getCube(currentWorldCube)
#print(cubeType)
if cubeType != "a" and cubeType != None:
return self.createHitInfo(hit=True,point=point,normal=-normal,currentChunk=currentChunk,currentCube=currentCube,cubeType=cubeType,distance=currentDistance)###
else:
error=True
for i in range(6):
start=Vec3(self.cubeTemplate[i][0][0],self.cubeTemplate[i][0][1],self.cubeTemplate[i][0][2])+currentWorldCube
normal=Vec3(self.faceNormals[i][0],self.faceNormals[i][1],self.faceNormals[i][2])
divider=dot(direction,self.faceNormals[i])
if divider != 0:
scalar=(dot(start,self.faceNormals[i])-dot(origin,self.faceNormals[i]))/divider
#print(scalar)
if scalar != nan and scalar != inf and scalar >=0:
point=Vec3(origin+scalar*direction)
if debug:
e=Entity(model="cube", scale=0.1,position=point)
destroy(e,delay=1)
e.fade_out(duration=1)
relPoint=point-currentWorldCube
#print(relPoint)
##print(oldPoint,point)
######switch to basing it off face rather than old point/new point to reduce issues with floating point arithmetic
if relPoint[0] >=0 and relPoint[0] <=1 and relPoint[1] >=0 and relPoint[1] <=1 and relPoint[2] >=0 and relPoint[2] <=1 and oldNormal != -normal and scalar >=0:
##print(oldPoint,point)
oldNormal=normal
currentWorldCube=currentWorldCube+normal
currentDistance=distance(origin,point)
##print(currentDistance)
error=False
break
if error:
print("breaking")
#print(0/0)
break
return self.createHitInfo()###
def createHitInfo(self,hit=False,point=None,normal=None,currentChunk=None,currentCube=None,cubeType=None,distance=None):
hit=HitInfo(hit=hit)
hit.point=point
hit.normal=normal
hit.currentChunk=currentChunk
hit.currentCube=currentCube
hit.cubeType=cubeType
hit.distance=distance
return hit
def getCube(self,position):
currentChunk=Vec3(0,0,0)
currentCube=Vec3(0,0,0)
for i in range(3):
currentChunk[i]=round(position[i]//self.size * self.size)
currentCube[i]=round(position[i] % self.size)
try:
chunkArray=self.getChunkArray(currentChunk)
return chunkArray[round(currentCube[0])][round(currentCube[1])][round(currentCube[2])],currentChunk,currentCube
except Exception as e: ##
#print(e)
return "b",None,None
def getChunkArray(self,chunk):##
return self.chunks[str(round(chunk[0]))+":"+str(round(chunk[1]))+":"+str(round(chunk[2]))].chunkArray
if __name__ == "__main__":
from worldGeneration import chunkGenerator
from chunks import voxelChunk
import random
app=Ursina()
Texture.default_filtering = None
Sky()
generator=chunkGenerator(seed=21)
count=0
chunksDict={}
caster=voxelcaster(chunks=chunksDict)
for i in range(1):
for j in range(4):
for k in range(1):
count+=1
print("\n"*10+"▓"*round(count/2)+"░"*round((256-count)/2))
x,y,z=i*16,j*16,k*16
chunk=generator.generateChunkArrayNew(position=Vec3(x,y,z))
chunk=voxelChunk(position=Vec3(x,y,z),chunkArray=chunk)
chunk.buildChunk()
chunksDict[str(x)+":"+str(y)+":"+str(z)]=chunk
#print("hit start")
for i in range(100):
##print(i)
hitTest=caster.voxelcast(origin=Vec3(random.randint(0,1599)/100,50,random.randint(0,1599)/100),direction=Vec3(0,-1,0),maxDistance=50)
#print(hitTest.currentChunk)
#print(hitTest.currentCube)
#print(hitTest.normal)
Entity(model="cube",scale=0.1,position=hitTest.point,color=color.black)
#print("hit end")
EditorCamera()
"""
pivot=Entity(rotation_z=0,rotation_x=30,rotation_y=0,y=32)
s=DirectionalLight(scale=-30, shadows=False)
s._light.show_frustum()
"""
sun = DirectionalLight(y=10, rotation=(90+40,45,0))
#sun._light.show_frustum()
sun._light.set_shadow_caster(True, 4096, 4096)
#sun._light.show_frustum()
# sun._light.set_shadow_caster(True, 4096, 4096)
#bmin, bmax = scene.get_tight_bounds(chunk)
lens = sun._light.get_lens()
lens.set_near_far(0, 10)
# lens.set_film_offset((bmin.xy + bmax.xy) * .5)
lens.set_film_size(0)
window.fullscreen=True
app.run()
|
python
|
#
# Copyright (C) 2015, Stanislaw Adaszewski
# [email protected]
# http://algoholic.eu
#
# License: 2-clause BSD
#
from markdown import Extension
from markdown.blockprocessors import BlockProcessor
from markdown.treeprocessors import Treeprocessor
from markdown.util import etree, AtomicString
import numpy as np
from collections import defaultdict
import re
from markdown.inlinepatterns import Pattern
from markdown.preprocessors import Preprocessor
_captions = {}
class FigureExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.inlinePatterns.add('figref', FigRefPattern(r'\[([A-Za-z]+ [0-9]+)\]', md), '<emphasis')
# md.inlinePatterns.add('fig', FigPattern(r'^((Figure|Table|Listing) ([0-9]+))\. (.+)', md), '<emphasis')
md.parser.blockprocessors.add('figure',
FigureProcessor(md.parser),
'<hashheader')
md.treeprocessors.add('figure', FigureTreeProcessor(md), '<prettify')
# raise ValueError(md.preprocessors)
# md.preprocessors.add('figure', FigPreproc(md), '<html_block')
def makeExtension(configs={}):
return FigureExtension(configs=configs)
class FigPreproc(Preprocessor):
def run(self, lines):
new_lines = []
in_caption = False
for line in lines:
m = re.match(r'((Table|Figure|Listing) ([0-9]+))\.', line)
if m is not None:
new_lines.append(u'<div class="figcaption">')
new_lines.append(u'')
in_caption = True
if line == '' and in_caption:
# raise ValueError('Here')
new_lines.append('')
new_lines.append(u'</div>')
in_caption = False
new_lines.append(line)
# raise ValueError(new_lines)
return new_lines
class FigRefPattern(Pattern):
def handleMatch(self, m):
hash = m.group(2).lower().replace(' ', '_')
a = etree.Element('a')
a.set('href', '#figref_%s' % hash)
a.text = AtomicString('[%s]' % m.group(2))
return a
class FigPattern(Pattern):
def handleMatch(self, m):
caption = m.group(5).strip() # block[m.span()[1]:].strip()
# raise ValueError(caption)
# p = etree.Element('p')
a = etree.Element('a')
# raise ValueError(m.group(0))
hash = m.group(2).lower().replace('.','').replace(' ', '_')
a.set('name', 'figref_%s' % hash)
a.set('class', 'figcaption')
a.text = '%s. %s' % (m.group(2), caption)
# raise ValueError(a.text)
return a
class FigureProcessor(BlockProcessor):
def test(self, parent, block):
return re.match(r'^[A-Za-z]+ [0-9]+\.', block) is not None
def run(self, parent, blocks):
block = blocks.pop(0)
m = re.match(r'[A-Za-z]+ [0-9]+\.', block)
caption = block[m.span()[1]:].strip()
p = etree.SubElement(parent, 'p')
a = etree.SubElement(p, 'a')
hash = m.group(0).lower().replace('.','').replace(' ', '_')
a.set('name', 'figref_%s' % hash)
# a.set('class', 'figcaption')
# bold = etree.SubElement(a, 'b')
# bold.text = m.group(0)
# regular = etree.SubElement(a, 'span')
# regular.text = caption
a.text = '%s %s' % (m.group(0), caption)
_captions[a.get('name')] = caption
# import sys
def stringify(el):
Q = [el]
ret = ''
# raise ValueError(el[0][0].text)
while len(Q) > 0:
el = Q.pop(0)
for ch in el:
Q.append(ch)
if el.text is not None: ret += el.text
if el.tail is not None: ret += el.tail
return ret
class FigureTreeProcessor(Treeprocessor):
def run(self, root, M={}):
# print 'Running...', dir(self)
hdrtags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
cnt = defaultdict(lambda: 0)
# M = {}
'''Q = [root]
test = u''
while len(Q) > 0:
el = Q.pop(0)
for ch in el: Q.append(ch)
#if el.tail is not None:
# dummy = etree.Element('dummy')
# dummy.text = el.tail
# Q.append(dummy)
if el.text is not None: test += el.text
if el.tail is not None: test += el.tail
# print (el)
print test.encode('utf-8')
# raise ValueError(test[:50])'''
Q = [root]
hdrcnt = []
active = False
nmbrs = ''
L = defaultdict(lambda: [])
while len(Q) > 0:
el = Q.pop(0)
for ch in el: Q.append(ch)
if el.tag == 'p' and el.text == 'CONTENT-START':
active = True
elif el.tag == 'p' and el.text == 'CONTENT-END':
active = False
nmbrs = ''
elif active and el.tag in hdrtags:
lvl = int(el.tag[1])
# lvl = min(lvl, 3)
if lvl <= 3:
hdrcnt = hdrcnt[0:lvl]
if len(hdrcnt) == lvl:
hdrcnt[-1] += 1
else:
hdrcnt += [1]
nmbrs = '.'.join(map(str, hdrcnt)) + '.'
# name = el.get('name')
name = None
if el.tag == 'p' and len(el)>0 and el[0].tag == 'a':
name = el[0].get('name')
# raise ValueError(name)
if name is not None and name.startswith('figref_'):
# raise ValueError(dir(el))
# raise ValueError(name)
type_ = name.split('_')[1]
if type_ == 'figure':
el.set('class', 'figcaption_img')
elif type_ == 'algorithm':
el.set('class', 'figcaption_algo')
else:
el.set('class', 'figcaption')
title = '.'.join(el[0].text.split('.')[1:])
# raise ValueError(title)
if name not in M:
cnt[nmbrs + type_] += 1
M[name] = nmbrs + str(cnt[nmbrs + type_])
L[type_].append({'href': '#' + name, 'el': el, 'text': type_[0].upper() + type_[1:] + ' ' + M[name] + '.' + ''.join(stringify(el).split('.')[1])}) # + _captions[name]})
# raise ValueError(len(type_))
span = etree.Element('span')
el[0].insert(0, span)
span2 = etree.SubElement(span, 'span')
span2.text = type_[0].upper() + type_[1:] + ' ' + M[name] + '. '
span2.tail = title
el[0].text = ''
md = self.markdown
this = self
def rewrite_self_references(txt):
fr = md.inlinePatterns['figref']
rx = fr.getCompiledRegExp()
# matches = rx.findall(txt)
while True:
match = rx.match(txt)
if match is not None:
pos = match.start(2)
endpos = match.end(2)
# raise ValueError(match.end(2))
a = fr.handleMatch(match)
this.run(a, M)
txt = txt[0:pos] + a.text[1:-1] + txt[endpos:]
else:
break
return txt
Q = [(-1, None, root)]
insert_cnt = defaultdict(lambda : 0)
while len(Q) > 0:
(idx, parent, el) = Q.pop(0)
# print 'Here', el
cnt = 0
for ch in el:
Q.append((cnt, el, ch))
cnt += 1
href = el.get('href')
if el.tag == 'a' and href is not None and href.startswith('#figref_'):
type_ = href.split('_')[1]
#if href in M:
# pass
#else:
# cnt[type_] += 1
# M[href] = str(cnt[type_])
el.text = '%s %s' % (type_[0].upper() + type_[1:], M[href[1:]])
elif el.tag == 'p' and el.text is not None and el.text.startswith('LIST-OF-'):
type_ = el.text[8:-1].lower()
# raise ValueError(L['figure'])
for fig in L[type_]:
p = etree.Element('p')
p.set('style', 'text-align: left; width: 75%;')
# p.text = fig['text']
a = etree.SubElement(p, 'a')
a.set('href', fig['href'])
# a.set('style', 'color: white; font-size: 1px; height: 1el; display: block-inline;')
a.text = rewrite_self_references(fig['text']) # ' + fig['text'][:20]
self.markdown.treeprocessors['myreferences'].run(p)
parent.insert(insert_cnt[parent] + idx, p)
insert_cnt[parent] += 1
# Q.append((-1, parent, p))
parent.remove(el)
|
python
|
"""
zoom.snippets
"""
import zoom
import zoom.html as h
class SystemSnippet(zoom.utils.Record):
"""SystemSnippet
A chunk of text (usually HTML) that can be rendered by
placing the {{snippet}} tag in a document or template.
>>> db = zoom.database.setup_test()
>>> snippets = get_snippets(db)
>>> snippets.delete(name='test')
>>> snippets.find(name='test')
[]
>>> t = snippets.put(Snippet(name='test', body='some text'))
>>> snippets.find(name='test')
[<SystemSnippet {'key': 'test', 'name': 'test', 'url': '/content/snippets/test', 'body': 'some text', 'link': '<a href="/content/snippets/test">test</a>'}>]
"""
@property
def link(self):
"""Return a link"""
return h.a(self.name, href=self.url)
@property
def url(self):
return '/content/snippets/' + self.key
@property
def key(self):
return zoom.utils.id_for(self.name)
def allows(self, user, action):
"""Item level policy"""
return True
Snippet = SystemSnippet
def snippet(name, default='', variant=None):
snippets = get_snippets()
snippet = snippets.first(name=name, variant=variant)
if snippet:
snippet['impressions'] = snippet.get('impressions', 0) + 1
snippets.put(snippet)
result = snippet.body
else:
result = default
return result
def get_snippets(db=None):
return zoom.store_of(Snippet, db=db)
|
python
|
# -*- coding: utf-8 -*-
#
from flask import Flask, Blueprint, make_response, jsonify, request
from flask.ext.bcrypt import check_password_hash
from app import db, app, return_response
# # Import module models (i.e. User)
from app.mod_user.models import User
# Define the blueprint: 'auth', set its url prefix: app.url/auth
mod_index = Blueprint('index', __name__, url_prefix='/')
@mod_index.route("login/", methods=['POST'])
def login():
print request.json
username = request.json.get("username")
password = request.json.get("password")
user = User.query.filter_by(username = username).first()
if user is None or not check_password_hash(user.password, password):
return return_response(400, "Wrong input")
#return jsonify({'error':'wronginput'}), 400
# Return token key to user
return return_response(200, "OK", {'token':user.generate_token_key()})
#return jsonify({'toke':user.generate_token_key()})
# Register new user
@mod_index.route("register/", methods=['POST'])
def new_user():
username = request.json.get('username')
email = request.json.get('email')
password = request.json.get('password')
print username,email,password
if username is "" or email is "" or password is "":
return return_response(400, "Missing properties")
if username is None or email is None or password is None:
return return_response(400, "Missing properties")
alreadyRegisterd = User.query.filter_by(username = username).first()
if alreadyRegisterd is not None:
return return_response(400, "User exist")
newUser = User(username, email, password)
db.session.add(newUser)
db.session.commit()
# Return that the user was created but he
return return_response(201, "OK", {'result':'User created'})
|
python
|
import os
from unittest import mock
import pytest
import requests_mock
from ewtwitterbot.imagery import get_quote_image
from ewtwitterbot.mastodon_bot import (
MastodonConfigurationError,
MastodonMediaError,
get_credentials_from_environ,
get_last_toot_id,
respond_to_toots,
save_last_toot_id,
upload_image_and_description,
)
@pytest.fixture
def save_a_toot_id():
with open("test_last_toot.txt", "w") as f:
f.write(str(14))
def test_retrieve_last_toot_id_saved(save_a_toot_id):
assert get_last_toot_id("test_last_toot.txt") == 14
def test_save_toot_id():
if os.path.exists("test_last_toot.txt"):
os.remove("test_last_toot.txt")
save_last_toot_id(40, "test_last_toot.txt")
assert os.path.exists("test_last_toot.txt")
assert get_last_toot_id("test_last_toot.txt") == 40
def test_retrieve_nonexistent_tweet_id():
if os.path.exists("test_last_toot.txt"):
os.remove("test_last_toot.txt")
assert get_last_toot_id("test_last_toot.txt") == 1
def test_mastodon_configuration_checks():
names_to_remove = [
"MASTODON_CLIENT_SECRET_FILE",
"MASTODON_USER_SECRET_FILE",
"MASTODON_API_BASE_URL",
]
modified_environ = {k: v for k, v in os.environ.items() if k not in names_to_remove}
with mock.patch.dict(os.environ, modified_environ, clear=True):
with pytest.raises(MastodonConfigurationError):
get_credentials_from_environ()
@pytest.fixture
def mastodon_environ_patch():
return {
"MASTODON_API_BASE_URL": "https://botsin.space",
"MASTODON_CLIENT_SECRET_FILE": "test_ewbot_clientcred.secret",
"MASTODON_USER_SECRET_FILE": "test_ewbot_usercred.secret",
}
def test_mastodon_media_upload_success(mastodon_environ_patch):
with mock.patch.dict(os.environ, mastodon_environ_patch, clear=False):
with requests_mock.Mocker() as m:
m.post(
"https://botsin.space/api/v1/media",
status_code=200,
json={
"id": "234567",
"type": "image",
"url": "https://files.botsin.space/media_attachments/files/022/033/641/original/quote_image.png",
"preview_url": "https://files.botsin.space/media_attachments/files/022/033/641/small/quote_image.png", # noqa: E501
"remote_url": None,
"text_url": "https://botsin.space/media/4Zj6ewxzzzDi0g8JnZQ",
"meta": {
"focus": {"x": -0.69, "y": 0.42},
"original": {
"width": 640,
"height": 480,
"size": "640x480",
"aspect": 1.3333333333333333,
},
"small": {
"width": 461,
"height": 346,
"size": "461x346",
"aspect": 1.3323699421965318,
},
},
"description": "test uploaded via api",
"blurhash": "UFBWY:8_0Jxv4mx]t8t64.%M-:IUWGWAt6M}",
},
)
get_quote_image("Hi There")
assert (
upload_image_and_description(
get_credentials_from_environ(),
"quote_image.png",
alt_text="Hi there",
)
== 234567
)
def test_media_upload_error(mastodon_environ_patch):
with mock.patch.dict(os.environ, mastodon_environ_patch, clear=False):
with requests_mock.Mocker() as m:
m.post(
"https://botsin.space/api/v1/media",
status_code=200,
json={
"id": "234567",
"type": "unknown",
"url": "https://files.botsin.space/media_attachments/files/022/033/641/original/quote_image.png",
"preview_url": "https://files.botsin.space/media_attachments/files/022/033/641/small/quote_image.png", # noqa: E501
"remote_url": None,
"text_url": "https://botsin.space/media/4Zj6ewxzzzDi0g8JnZQ",
"meta": {
"focus": {"x": -0.69, "y": 0.42},
"original": {
"width": 640,
"height": 480,
"size": "640x480",
"aspect": 1.3333333333333333,
},
"small": {
"width": 461,
"height": 346,
"size": "461x346",
"aspect": 1.3323699421965318,
},
},
"description": "test uploaded via api",
"blurhash": "UFBWY:8_0Jxv4mx]t8t64.%M-:IUWGWAt6M}",
},
)
get_quote_image("Hi There")
with pytest.raises(MastodonMediaError):
upload_image_and_description(
get_credentials_from_environ(),
"quote_image.png",
alt_text="Hi there",
)
def test_mastodon_mention_cycle(mastodon_environ_patch):
with mock.patch.dict(os.environ, mastodon_environ_patch, clear=False):
with requests_mock.Mocker() as m:
m.post(
"https://botsin.space/api/v1/media",
status_code=200,
json={
"id": "234567",
"type": "image",
"url": "https://files.botsin.space/media_attachments/files/022/033/641/original/quote_image.png",
"preview_url": "https://files.botsin.space/media_attachments/files/022/033/641/small/quote_image.png", # noqa: E501
# noqa: E501
"remote_url": None,
"text_url": "https://botsin.space/media/4Zj6ewxzzzDi0g8JnZQ",
"meta": {
"focus": {"x": -0.69, "y": 0.42},
"original": {
"width": 640,
"height": 480,
"size": "640x480",
"aspect": 1.3333333333333333,
},
"small": {
"width": 461,
"height": 346,
"size": "461x346",
"aspect": 1.3323699421965318,
},
},
"description": "test uploaded via api",
"blurhash": "UFBWY:8_0Jxv4mx]t8t64.%M-:IUWGWAt6M}",
},
)
m.get(
"https://botsin.space/api/v1/notifications",
status_code=200,
json=[
{
"id": 4772149,
"type": "mention",
"created_at": "2019-11-23T07:29:18.903Z",
"account": {
"id": 18639,
"username": "andrlik",
"acct": "[email protected]",
"display_name": "Daniel Andrlik",
"locked": True,
"bot": False,
"discoverable": True,
"group": False,
"created_at": "2019-11-23T07:29:18.903Z",
"note": '<p>Product exec, SFF Writer, Producer and GM of the Explorers Wanted actual play podcast. </p><p><a href="https://wandering.shop/tags/ActuallyAutistic" class="mention hashtag" rel="nofollow noopener noreferrer" target="_blank">#<span>ActuallyAutistic</span></a>/ADHD, with a dash of GAD for spice.</p><p>He/him</p><p>Your mom loves me.</p><p>Location: secluded in a blanket fort</p>', # noqa: E501
"url": "https://wandering.shop/@andrlik",
"avatar": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501
"avatar_static": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501
"header": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501
"header_static": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501
"followers_count": 81,
"following_count": 148,
"statuses_count": 869,
"last_status_at": "2019-11-23T07:29:18.903Z",
"emojis": [],
"fields": [
{
"name": "Website",
"value": '<a href="https://www.andrlik.org" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">andrlik.org</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": "2022-04-29T14:58:32.014+00:00",
},
{
"name": "Twitter",
"value": '<a href="https://twitter.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">twitter.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
{
"name": "Github",
"value": '<a href="https://github.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">github.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
{
"name": "Podcast",
"value": '<a href="https://www.explorerswanted.fm" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">explorerswanted.fm</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
],
},
"status": {
"id": 108216032166128570,
"created_at": "2019-11-23T07:29:18.903Z",
"in_reply_to_id": None,
"in_reply_to_account_id": None,
"sensitive": False,
"spoiler_text": "",
"visibility": "public",
"language": "en",
"uri": "https://wandering.shop/users/andrlik/statuses/108216031335496737",
"url": "https://wandering.shop/@andrlik/108216031335496737",
"replies_count": 0,
"reblogs_count": 0,
"favourites_count": 0,
"favourited": False,
"reblogged": False,
"muted": False,
"bookmarked": False,
"content": '<p><span class="h-card"><a href="https://botsin.space/@ewbot" class="u-url mention" rel="nofollow noopener noreferrer" target="_blank">@<span>ewbot</span></a></span> Quote please</p>', # noqa: E501
"reblog": None,
"account": {
"id": 18639,
"username": "andrlik",
"acct": "[email protected]",
"display_name": "Daniel Andrlik",
"locked": True,
"bot": False,
"discoverable": True,
"group": False,
"created_at": "2019-11-23T07:29:18.903Z",
"note": '<p>Product exec, SFF Writer, Producer and GM of the Explorers Wanted actual play podcast. </p><p><a href="https://wandering.shop/tags/ActuallyAutistic" class="mention hashtag" rel="nofollow noopener noreferrer" target="_blank">#<span>ActuallyAutistic</span></a>/ADHD, with a dash of GAD for spice.</p><p>He/him</p><p>Your mom loves me.</p><p>Location: secluded in a blanket fort</p>', # noqa: E501
"url": "https://wandering.shop/@andrlik",
"avatar": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501
"avatar_static": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501
"header": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501
"header_static": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501
"followers_count": 81,
"following_count": 148,
"statuses_count": 869,
"last_status_at": "2019-11-23T07:29:18.903Z",
"emojis": [],
"fields": [
{
"name": "Website",
"value": '<a href="https://www.andrlik.org" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">andrlik.org</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": "2022-04-29T14:58:32.014+00:00",
},
{
"name": "Twitter",
"value": '<a href="https://twitter.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">twitter.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
{
"name": "Github",
"value": '<a href="https://github.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">github.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
{
"name": "Podcast",
"value": '<a href="https://www.explorerswanted.fm" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">explorerswanted.fm</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
],
},
"media_attachments": [],
"mentions": [
{
"id": 108215876835523723,
"username": "ewbot",
"url": "https://botsin.space/@ewbot",
"acct": "ewbot",
}
],
"tags": [],
"emojis": [],
"card": None,
"poll": None,
},
}
],
)
m.post(
"https://botsin.space/api/v1/statuses",
status_code=200,
json={
"id": 108216032166128570,
"created_at": "2019-11-23T07:29:18.903Z",
"in_reply_to_id": None,
"in_reply_to_account_id": None,
"sensitive": False,
"spoiler_text": "",
"visibility": "public",
"language": "en",
"uri": "https://wandering.shop/users/andrlik/statuses/108216031335496737",
"url": "https://wandering.shop/@andrlik/108216031335496737",
"replies_count": 0,
"reblogs_count": 0,
"favourites_count": 0,
"favourited": False,
"reblogged": False,
"muted": False,
"bookmarked": False,
"content": '<p><span class="h-card"><a href="https://botsin.space/@ewbot" class="u-url mention" rel="nofollow noopener noreferrer" target="_blank">@<span>ewbot</span></a></span> Quote please</p>', # noqa: E501
"reblog": None,
"account": {
"id": 18639,
"username": "andrlik",
"acct": "[email protected]",
"display_name": "Daniel Andrlik",
"locked": True,
"bot": False,
"discoverable": True,
"group": False,
"created_at": "2019-11-23T07:29:18.903Z",
"note": '<p>Product exec, SFF Writer, Producer and GM of the Explorers Wanted actual play podcast. </p><p><a href="https://wandering.shop/tags/ActuallyAutistic" class="mention hashtag" rel="nofollow noopener noreferrer" target="_blank">#<span>ActuallyAutistic</span></a>/ADHD, with a dash of GAD for spice.</p><p>He/him</p><p>Your mom loves me.</p><p>Location: secluded in a blanket fort</p>', # noqa: E501
"url": "https://wandering.shop/@andrlik",
"avatar": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501
"avatar_static": "https://files.botsin.space/cache/accounts/avatars/000/018/639/original/91b7036b36a321fe.jpeg", # noqa: E501
"header": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501
"header_static": "https://files.botsin.space/cache/accounts/headers/000/018/639/original/08dfb894386d40d0.jpeg", # noqa: E501
"followers_count": 81,
"following_count": 148,
"statuses_count": 869,
"last_status_at": "2019-11-23T07:29:18.903Z",
"emojis": [],
"fields": [
{
"name": "Website",
"value": '<a href="https://www.andrlik.org" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">andrlik.org</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": "2022-04-29T14:58:32.014+00:00",
},
{
"name": "Twitter",
"value": '<a href="https://twitter.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">twitter.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
{
"name": "Github",
"value": '<a href="https://github.com/andrlik" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://</span><span class="">github.com/andrlik</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
{
"name": "Podcast",
"value": '<a href="https://www.explorerswanted.fm" rel="nofollow noopener noreferrer" target="_blank"><span class="invisible">https://www.</span><span class="">explorerswanted.fm</span><span class="invisible"></span></a>', # noqa: E501
"verified_at": None,
},
],
},
"media_attachments": [],
"mentions": [
{
"id": 108215876835523723,
"username": "ewbot",
"url": "https://botsin.space/@ewbot",
"acct": "ewbot",
}
],
"tags": [],
"emojis": [],
"card": None,
"poll": None,
},
)
m.get(
"https://quoteservice.andrlik.org/api/groups/ew/get_random_quote/",
json={
"quote": "We always go right.",
"quote_rendered": "<p>We always go right.</p>",
"citation": "Episode 3",
"citation_url": "https://www.explorerswanted.fm/3",
"source": {
"name": "Nix",
"slug": "ew-nix",
"description": "Glaive",
"description_rendered": "<p>Glaive</p>",
},
},
)
m.get(
"https://quoteservice.andrlik.org/api/sources/",
json=[{"name": "Nix", "slug": "ew-nix"}],
)
m.get(
"https://quoteservice.andrlik.org/api/sources/ew-nix/generate_sentence/",
json={"sentence": "fear the snek"},
)
respond_to_toots("test_last_toot.txt")
assert get_last_toot_id("test_last_toot.txt") == 4772149
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 24 19:04:13 2018
@author: kyungdoehan
"""
import numpy as np
#%% Making square arrays of x, y, z of the overall topography
class XYZ_data:
def __init__(self, a, x, y, z):
self.X = np.zeros((a, a))
self.Y = np.zeros((a, a))
self.Z = np.zeros((a, a))
for i in range(a):
for j in range(a):
self.X[j, i] = x[i + j * a]
self.Y[j, i] = y[i + j * a]
self.Z[j, i] = z[i + j * a]
def XYZ(grid, x, y, z):
return XYZ_data(grid, x, y, z)
#%%
class delz_ratio:
def __init__(self, i):
self.dzratio = np.exp(np.arange(1, i + 1) / 10)
self.dzratio = self.dzratio / np.sum(self.dzratio)
def dzratio(i):
return delz_ratio(i)
#%%
class bottom:
def __init__(self, inz, ifixed, j, top, dat_var, dat_new, dzratio):
self.tot_b = top - dat_var
self.bot = np.zeros((inz, j, j))
for irow in range(j):
for icol in range(j):
self.bot[:, irow, icol] = top[irow, icol] - \
np.cumsum(self.tot_b[irow, icol] * dzratio)
self.bot_fixed = np.zeros((ifixed, j, j))
self.bot_fixed[0, :, :] = self.bot[inz - 1, :, :] + dat_new / ifixed
for i in range(ifixed - 1):
self.bot_fixed[i+1, :, :] = self.bot_fixed[i, :, :]+dat_new/ifixed
self.bot = np.vstack((self.bot, self.bot_fixed))
def bot(inz, ifixed, j, top, dat_var, dat_new, dzratio):
return bottom(inz, ifixed, j, top, dat_var, dat_new, dzratio)
#%%
class delz:
def __init__(self, top, bot, nz, ny, nx):
self.dzs = np.zeros((nz, ny, nx), dtype=np.float32)
self.dzs[0, :, :] = top - bot[0, :, :]
for ilay in range(nz-1):
self.dzs[ilay+1, :, :] = bot[ilay, :, :] - bot[ilay+1, :, :]
def dzs(top, bot, nz, ny, nx):
return delz(top, bot, nz, ny, nx)
#%%
class nodes:
def __init__(self, bot, dzs, nz, ny, nx):
self.node = np.zeros((nz, ny, nx), dtype=np.float32)
for irow in range(ny):
for icol in range(nx):
self.node[:, irow, icol] = bot[:, irow, icol] + 0.5 * dzs[:, irow, icol]
def node(bot, dzs, nz, ny, nx):
return nodes(bot, dzs, nz, ny, nx)
|
python
|
# -*- coding: utf-8 -*-
import itertools
import os
import plistlib
import unicodedata
import sys
from xml.etree.ElementTree import Element, SubElement, tostring
"""
You should run your script via /bin/bash with all escape options ticked.
The command line should be
python yourscript.py "{query}" arg2 arg3 ...
"""
UNESCAPE_CHARACTERS = u""" ;()"""
_MAX_RESULTS_DEFAULT = 9
preferences = plistlib.readPlist('info.plist')
bundleid = preferences['bundleid']
class Item(object):
@classmethod
def unicode(cls, value):
try:
items = value.iteritems()
except AttributeError:
return unicode(value)
else:
return dict(map(unicode, item) for item in items)
def __init__(self, attributes, title, subtitle, icon=None):
self.attributes = attributes
self.title = title
self.subtitle = subtitle
self.icon = icon
def __str__(self):
return tostring(self.xml(), encoding='utf-8')
def xml(self):
item = Element(u'item', self.unicode(self.attributes))
for attribute in (u'title', u'subtitle', u'icon'):
value = getattr(self, attribute)
if value is None:
continue
try:
(value, attributes) = value
except:
attributes = {}
elem = SubElement(item, attribute, self.unicode(attributes))
elem.text = unicode(value)
return item
def args(characters=None):
return tuple(unescape(decode(arg), characters) for arg in sys.argv[1:])
def config():
return _create('config')
def decode(s):
return unicodedata.normalize('NFC', s.decode('utf-8'))
def get_uid(uid):
return u'-'.join(map(unicode, (bundleid, uid)))
def unescape(query, characters=None):
if not characters:
characters = UNESCAPE_CHARACTERS
for character in characters:
query = query.replace('\\%s' % character, character)
return query
def write(text):
sys.stdout.write(text)
def xml(items, maxresults=_MAX_RESULTS_DEFAULT):
root = Element('items')
for item in itertools.islice(items, maxresults):
root.append(item.xml())
return tostring(root, encoding='utf-8')
def _create(path):
if not os.path.isdir(path):
os.mkdir(path)
if not os.access(path, os.W_OK):
raise IOError('No write access: %s' % path)
return path
def work(volatile):
path = {
True: '~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data',
False: '~/Library/Application Support/Alfred 2/Workflow Data'
}[bool(volatile)]
return _create(os.path.join(os.path.expanduser(path), bundleid))
def config_set(key, value, volatile=True):
filepath = os.path.join(work(volatile), 'config.plist')
try:
conf = plistlib.readPlist(filepath)
except IOError:
conf = {}
conf[key] = value
plistlib.writePlist(conf, filepath)
def config_get(key, default=None, volatile=True):
filepath = os.path.join(work(volatile), 'config.plist')
try:
conf = plistlib.readPlist(filepath)
except IOError:
conf = {}
if key in conf:
return conf[key]
return default
class AlfredWorkflow(object):
_reserved_words = []
def write_text(self, text):
print(text)
def write_item(self, item):
return self.write_items([item])
def write_items(self, items):
return write(xml(items, maxresults=self.max_results))
def message_item(self, title, message, icon=None, uid=0):
return Item({u'uid': get_uid(uid), u'arg': '',
u'ignore': 'yes'}, title, message, icon)
def warning_item(self, title, message, uid=0):
return self.message_item(title=title, message=message, uid=uid,
icon='warning.png')
def error_item(self, title, message, uid=0):
return self.message_item(title=title, message=message, uid=uid,
icon='error.png')
def exception_item(self, title, exception, uid=0):
message = str(exception).replace('\n', ' ')
return self.error_item(title=title, message=message, uid=uid)
def route_action(self, action, query=None):
method_name = 'do_{}'.format(action)
if not hasattr(self, method_name):
raise RuntimeError('Unknown action {}'.format(action))
method = getattr(self, method_name)
return method(query)
def is_command(self, query):
try:
command, rest = query.split(' ', 1)
except ValueError:
command = query
command = command.strip()
return command in self._reserved_words or \
hasattr(self, 'do_{}'.format(command))
|
python
|
"""This module serves as a container to hold the global
:class:`~.ShowBase.ShowBase` instance, as an alternative to using the builtin
scope.
Note that you cannot directly import `base` from this module since ShowBase
may not have been created yet; instead, ShowBase dynamically adds itself to
this module's scope when instantiated."""
__all__ = []
from .ShowBase import ShowBase, WindowControls
from direct.directnotify.DirectNotifyGlobal import directNotify, giveNotify
from panda3d.core import VirtualFileSystem, Notify, ClockObject, PandaSystem
from panda3d.core import ConfigPageManager, ConfigVariableManager
from panda3d.core import NodePath, PGTop
from . import DConfig as config
__dev__ = config.GetBool('want-dev', __debug__)
#: The global instance of the :class:`panda3d.core.VirtualFileSystem`.
vfs = VirtualFileSystem.getGlobalPtr()
ostream = Notify.out()
globalClock = ClockObject.getGlobalClock()
cpMgr = ConfigPageManager.getGlobalPtr()
cvMgr = ConfigVariableManager.getGlobalPtr()
pandaSystem = PandaSystem.getGlobalPtr()
# This is defined here so GUI elements can be instantiated before ShowBase.
render2d = NodePath("render2d")
aspect2d = render2d.attachNewNode(PGTop("aspect2d"))
hidden = NodePath("hidden")
# Set direct notify categories now that we have config
directNotify.setDconfigLevels()
def run():
"""Deprecated alias for :meth:`base.run() <.ShowBase.run>`."""
assert ShowBase.notify.warning("run() is deprecated, use base.run() instead")
base.run()
def inspect(anObject):
"""Opens up a :mod:`direct.tkpanels.Inspector` GUI panel for inspecting an
object."""
# Don't use a regular import, to prevent ModuleFinder from picking
# it up as a dependency when building a .p3d package.
import importlib
Inspector = importlib.import_module('direct.tkpanels.Inspector')
return Inspector.inspect(anObject)
import sys
if sys.version_info >= (3, 0):
import builtins
else:
import __builtin__ as builtins
builtins.inspect = inspect
del sys
# this also appears in AIBaseGlobal
if (not __debug__) and __dev__:
ShowBase.notify.error("You must set 'want-dev' to false in non-debug mode.")
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:26:47 2019
@author: sercangul
"""
a, b = map(float, input().split())
x= float(input())
print(round(sum([(1 - (a / b))**(5 - x) * (a / b) for x in range(1, 6)]), 3))
|
python
|
# -*- coding: utf-8 -*-
from pygraph.fillpolygon_edge import fillPolygonEdge
from pygraph.util import mkGraph, saveG, enLarge
g = mkGraph((80, 60))
points = [
(10, 40),
(20, 10),
(30, 10),
(40, 5),
(60, 10),
(75, 25),
(30, 50)
]
fillPolygonEdge(g, points)
saveG("polygon_edge.png", g)
saveG("polygon_edge_large.png", enLarge(g, 10))
|
python
|
from .fasta import is_fasta
from .fasta import read_fasta
|
python
|
import streamlit as st
st.title('Streamlit custom theme tutorial')
st.subheader('Powered by @dataprojectswithMJ')
st.multiselect('Choose your favourite coding language(s)',
options=['Python','Java','Golang','C++'])
st.radio('Choose your favourite operation system:',
['Windows','Linux','MacOS'])
st.date_input('Enter your date of birth')
st.text_area('About you:')
|
python
|
import json
import requests
class Answer:
def __init__(self, client, input):
self.__client = client
self.id = input['id']
self.answer = input['answer']
self.likes_count= input['likesCount']
self.created_at = input['createdAt']
self.tell = input['tell']
self.sender_status = input['senderStatus']
self.sender = input['sender']
self.recipient_id = input['userId']
self.is_current_user_tell_sender = input['isCurrentUserTellSender']
self.likes = input['likes'] # to-do: put this in a seperate class (?)
def is_anonymous_tell(self):
"""
Checks wether or not the tell was received by an anonymous person
0: Anonymous
1: Unknown
2: Public Sender
Returns:
True: The tell was received by an anonymous person
False: The tell was received by a public sender
"""
if self.sender_status == 0:
return True
return False
def like(self):
"""
Likes the answer on the user's profile
Returns:
True (bool): Answers has been liked
UnknownError (exception): UnknownError has occurred
"""
body = {
"answerId": self.id,
"userId": self.recipient_id,
"limit": 13
}
r = requests.post(self.__client.create_like_url, json=body, headers=self.__client.auth_header)
if r.status_code == 200:
return True
raise UnknownError
def delete(self):
"""
Deletes the answer on the user's profile
"""
body = {
'answerId': self.id,
'userId': self.recipient_id,
'limit': 13
}
r = requests.post(self.__client.delete_answer_url, json=body, headers=self.__client.auth_header)
if r.status_code == 200:
return True
raise UnknownError
|
python
|
import mysql.connector
from mysql.connector import errorcode
try:
con = mysql.connector.connect(user='niminimda', password='123456', host='127.0.01', database='test')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("something is wrong with user or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("db doesn't exists")
else:
print(err)
else:
query = "SELECT * FROM employee; "
cursor = con.cursor()
cursor.execute(query)
myData = cursor.fetchall()
myData.sort(key=lambda x: x[2])
for item in range(0, len(myData) - 1):
if myData[item][2] == myData[item + 1][2]:
if myData[item][1] < myData[item + 1][1]:
x = myData[item]
myData[item] = myData[item + 1]
myData[item + 1] = x
for y in range(len(myData) - 1, -1, -1):
q = myData[y]
print(q[0], q[1], q[2])
cursor.close()
con.close()
|
python
|
import os, sys
import json, requests
# TODO: NEED TO UPDATE TO HAVE FILES RIGHT OUT AS THE TEAM ID NUMBER
# TODO: NOT THE TEAM NAME.
TEAM_ID = {
'fuel' : 4523,
'fusion' : 4524,
'outlaws' : 4525,
'uprising' : 4402,
'excelsior' : 4403,
'shock' : 4404,
'valiant' : 4405,
'gladiators': 4406,
'mayhem' : 4407,
'dragons' : 4408,
'dynasty' : 4409,
'spitfire' : 4410
}
OWLURL = 'https://api.overwatchleague.com'
STANDINGS = '/standings'
RANKING = '/ranking'
SCHEDULE = '/schedule'
save_path = './data/'
standings_file = open(save_path+'standings.json', 'w+')
ranking_file = open(save_path+'ranking.json', 'w+')
schedule_file = open(save_path+'schedule.json', 'w+')
standings_request = requests.get(OWLURL+STANDINGS)
standings_json_data = standings_request.json()
standings_data_str = json.dump(standings_json_data, standings_file)
ranking_request = requests.get(OWLURL+RANKING)
ranking_json_data = ranking_request.json()
standings_data_str = json.dump(ranking_json_data, ranking_file)
schedule_request = requests.get(OWLURL+SCHEDULE)
schedule_json_data = schedule_request.json()
schedule_data_str = json.dump(schedule_json_data, schedule_file)
save_path = './data/teams/'
for team, id in TEAM_ID.iteritems():
file = open(save_path+'{:s}'.format(team)+'.json', 'w+')
request = requests.get(OWLURL+'/teams/{:d}'.format(id))
json_data = request.json()
data_str = json.dump(json_data,file)
|
python
|
from ._Session import Session
from ._User import User
from ._UserAffiliation import UserAffiliation
from ._UserEntityPermission import UserEntityPermission
from ._UserRoles import UserRoles
|
python
|
"""
Get reaction forces at the support nodes of a form diagram.
"""
from ghpythonlib.componentbase import executingcomponent as component
import rhinoscriptsyntax as rs
class SupportNodeResultsComponent(component):
def RunScript(self, form, support_node_keys):
if form:
support_node_keys = support_node_keys or list(form.support_nodes())
reaction_forces = [rs.AddPoint(*form.reaction_force(nd)) for nd in support_node_keys]
return reaction_forces
|
python
|
from carts.models import Cart
from django.http import HttpRequest
from products.models import Product
from products.api.serializers import ProductSerializer
from rest_framework import serializers
class CartSerializer(serializers.ModelSerializer):
products = serializers.SerializerMethodField()
class Meta:
model = Cart
fields = (
'user',
'products',
'subtotal',
'total',
'updated',
'timestamp',
)
def get_products(self, obj):
l = []
user = self.context.get('request').user
results = Cart.objects.filter(user=user)
if results.exists():
for product in results:
l.append(product.products.all())
product_list = l[0]
products = product_list
response = ProductSerializer(products, many=True).data
else:
response = []
return response
|
python
|
import os
import re
import json
from setuptools import setup
with open('Setup.lock') as f:
c = json.loads(f.read())
with open(os.path.join(c['name'], '__init__.py')) as f:
version = re.findall("^__version__ = '(.*)'", f.read())[0]
with open('Pipfile.lock') as f:
p = json.loads(f.read())
def _install_requires():
for k, v in p['default'].items():
if isinstance(v, str):
yield k + v
else:
yield k + v['version']
install_requires = list(_install_requires())
kwargs = {
'name': c['name'],
'version': version,
'description': c['description'],
'url': c['url'],
'author': c['author'],
'author_email': c['author_email'],
'license': c['license'],
'packages': c.get('packages', []),
'zip_safe': False,
'scripts': c.get('scripts',[]),
'package_data': c.get('package_data',{}),
'install_requires': install_requires,
'classifiers': c.get('classifiers', [])
}
setup(**kwargs)
|
python
|
""" openconfig_local_routing
This module describes configuration and operational state data
for routes that are locally generated, i.e., not created by
dynamic routing protocols. These include static routes, locally
created aggregate routes for reducing the number of constituent
routes that must be advertised, summary routes for IGPs, etc.
This model expresses locally generated routes as generically as
possible, avoiding configuration of protocol\-specific attributes
at the time of route creation. This is primarily to avoid
assumptions about how underlying router implementations handle
route attributes in various routing table data structures they
maintain. Hence, the definition of locally generated routes
essentially creates 'bare' routes that do not have any protocol\-
specific attributes.
When protocol\-specific attributes must be attached to a route
(e.g., communities on a locally defined route meant to be
advertised via BGP), the attributes should be attached via a
protocol\-specific policy after importing the route into the
protocol for distribution (again via routing policy).
"""
import sys
from collections import OrderedDict
from ydk.types import Entity as _Entity_
from ydk.types import EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class LOCALDEFINEDNEXTHOP(Identity):
"""
A base identity type of local defined next\-hops
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self, ns="http://openconfig.net/yang/local-routing", pref="openconfig-local-routing", tag="openconfig-local-routing:LOCAL_DEFINED_NEXT_HOP"):
if sys.version_info > (3,):
super().__init__(ns, pref, tag)
else:
super(LOCALDEFINEDNEXTHOP, self).__init__(ns, pref, tag)
class LocalRoutes(_Entity_):
"""
Top\-level container for local routes
.. attribute:: config
Configuration data for locally defined routes
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.Config>`
.. attribute:: state
Operational state data for locally defined routes
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.State>`
**config**\: False
.. attribute:: static_routes
Enclosing container for the list of static routes
**type**\: :py:class:`StaticRoutes <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes>`
.. attribute:: local_aggregates
Enclosing container for locally\-defined aggregate routes
**type**\: :py:class:`LocalAggregates <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes, self).__init__()
self._top_entity = None
self.yang_name = "local-routes"
self.yang_parent_name = "openconfig-local-routing"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("config", ("config", LocalRoutes.Config)), ("state", ("state", LocalRoutes.State)), ("static-routes", ("static_routes", LocalRoutes.StaticRoutes)), ("local-aggregates", ("local_aggregates", LocalRoutes.LocalAggregates))])
self._leafs = OrderedDict()
self.config = LocalRoutes.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = LocalRoutes.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self.static_routes = LocalRoutes.StaticRoutes()
self.static_routes.parent = self
self._children_name_map["static_routes"] = "static-routes"
self.local_aggregates = LocalRoutes.LocalAggregates()
self.local_aggregates.parent = self
self._children_name_map["local_aggregates"] = "local-aggregates"
self._segment_path = lambda: "openconfig-local-routing:local-routes"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes, [], name, value)
class Config(_Entity_):
"""
Configuration data for locally defined routes
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "local-routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self._segment_path = lambda: "config"
self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path()
self._is_frozen = True
class State(_Entity_):
"""
Operational state data for locally defined routes
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "local-routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict()
self._segment_path = lambda: "state"
self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path()
self._is_frozen = True
class StaticRoutes(_Entity_):
"""
Enclosing container for the list of static routes
.. attribute:: static
List of locally configured static routes
**type**\: list of :py:class:`Static <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes, self).__init__()
self.yang_name = "static-routes"
self.yang_parent_name = "local-routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("static", ("static", LocalRoutes.StaticRoutes.Static))])
self._leafs = OrderedDict()
self.static = YList(self)
self._segment_path = lambda: "static-routes"
self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes, [], name, value)
class Static(_Entity_):
"""
List of locally configured static routes
.. attribute:: prefix (key)
Reference to the destination prefix list key
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$
**refers to**\: :py:class:`prefix <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.Config>`
.. attribute:: config
Configuration data for static routes
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.Config>`
.. attribute:: state
Operational state data for static routes
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.State>`
**config**\: False
.. attribute:: next_hops
Configuration and state parameters relating to the next\-hops that are to be utilised for the static route being specified
**type**\: :py:class:`NextHops <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static, self).__init__()
self.yang_name = "static"
self.yang_parent_name = "static-routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['prefix']
self._child_classes = OrderedDict([("config", ("config", LocalRoutes.StaticRoutes.Static.Config)), ("state", ("state", LocalRoutes.StaticRoutes.Static.State)), ("next-hops", ("next_hops", LocalRoutes.StaticRoutes.Static.NextHops))])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
])
self.prefix = None
self.config = LocalRoutes.StaticRoutes.Static.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = LocalRoutes.StaticRoutes.Static.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self.next_hops = LocalRoutes.StaticRoutes.Static.NextHops()
self.next_hops.parent = self
self._children_name_map["next_hops"] = "next-hops"
self._segment_path = lambda: "static" + "[prefix='" + str(self.prefix) + "']"
self._absolute_path = lambda: "openconfig-local-routing:local-routes/static-routes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static, ['prefix'], name, value)
class Config(_Entity_):
"""
Configuration data for static routes
.. attribute:: prefix
Destination prefix for the static route, either IPv4 or IPv6
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$
.. attribute:: set_tag
Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols
**type**\: union of the below types:
**type**\: int
**range:** 0..4294967295
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "static"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])),
('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])),
])
self.prefix = None
self.set_tag = None
self._segment_path = lambda: "config"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.Config, ['prefix', 'set_tag'], name, value)
class State(_Entity_):
"""
Operational state data for static routes
.. attribute:: prefix
Destination prefix for the static route, either IPv4 or IPv6
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$
**config**\: False
.. attribute:: set_tag
Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols
**type**\: union of the below types:
**type**\: int
**range:** 0..4294967295
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
**config**\: False
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "static"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])),
('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])),
])
self.prefix = None
self.set_tag = None
self._segment_path = lambda: "state"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.State, ['prefix', 'set_tag'], name, value)
class NextHops(_Entity_):
"""
Configuration and state parameters relating to the
next\-hops that are to be utilised for the static
route being specified
.. attribute:: next_hop
A list of next\-hops to be utilised for the static route being specified
**type**\: list of :py:class:`NextHop <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops, self).__init__()
self.yang_name = "next-hops"
self.yang_parent_name = "static"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("next-hop", ("next_hop", LocalRoutes.StaticRoutes.Static.NextHops.NextHop))])
self._leafs = OrderedDict()
self.next_hop = YList(self)
self._segment_path = lambda: "next-hops"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops, [], name, value)
class NextHop(_Entity_):
"""
A list of next\-hops to be utilised for the static
route being specified.
.. attribute:: index (key)
A reference to the index of the current next\-hop. The index is intended to be a user\-specified value which can be used to reference the next\-hop in question, without any other semantics being assigned to it
**type**\: str
**refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config>`
.. attribute:: config
Configuration parameters relating to the next\-hop entry
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config>`
.. attribute:: state
Operational state parameters relating to the next\-hop entry
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State>`
**config**\: False
.. attribute:: interface_ref
Reference to an interface or subinterface
**type**\: :py:class:`InterfaceRef <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop, self).__init__()
self.yang_name = "next-hop"
self.yang_parent_name = "next-hops"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['index']
self._child_classes = OrderedDict([("config", ("config", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config)), ("state", ("state", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State)), ("interface-ref", ("interface_ref", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef))])
self._leafs = OrderedDict([
('index', (YLeaf(YType.str, 'index'), ['str'])),
])
self.index = None
self.config = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self.interface_ref = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef()
self.interface_ref.parent = self
self._children_name_map["interface_ref"] = "interface-ref"
self._segment_path = lambda: "next-hop" + "[index='" + str(self.index) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop, ['index'], name, value)
class Config(_Entity_):
"""
Configuration parameters relating to the next\-hop
entry
.. attribute:: index
An user\-specified identifier utilised to uniquely reference the next\-hop entry in the next\-hop list. The value of this index has no semantic meaning other than for referencing the entry
**type**\: str
.. attribute:: next_hop
The next\-hop that is to be used for the static route \- this may be specified as an IP address, an interface or a pre\-defined next\-hop type \- for instance, DROP or LOCAL\_LINK. When this leaf is not set, and the interface\-ref value is specified for the next\-hop, then the system should treat the prefix as though it is directly connected to the interface
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$
**type**\: :py:class:`LOCALDEFINEDNEXTHOP <ydk.models.openconfig.openconfig_local_routing.LOCALDEFINEDNEXTHOP>`
.. attribute:: metric
A metric which is utilised to specify the preference of the next\-hop entry when it is injected into the RIB. The lower the metric, the more preferable the prefix is. When this value is not specified the metric is inherited from the default metric utilised for static routes within the network instance that the static routes are being instantiated. When multiple next\-hops are specified for a static route, the metric is utilised to determine which of the next\-hops is to be installed in the RIB. When multiple next\-hops have the same metric (be it specified, or simply the default) then these next\-hops should all be installed in the RIB
**type**\: int
**range:** 0..4294967295
.. attribute:: recurse
Determines whether the next\-hop should be allowed to be looked up recursively \- i.e., via a RIB entry which has been installed by a routing protocol, or another static route \- rather than needing to be connected directly to an interface of the local system within the current network instance. When the interface reference specified within the next\-hop entry is set (i.e., is not null) then forwarding is restricted to being via the interface specified \- and recursion is hence disabled
**type**\: bool
**default value**\: false
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "next-hop"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('index', (YLeaf(YType.str, 'index'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str','str',('ydk.models.openconfig.openconfig_local_routing', 'LOCALDEFINEDNEXTHOP')])),
('metric', (YLeaf(YType.uint32, 'metric'), ['int'])),
('recurse', (YLeaf(YType.boolean, 'recurse'), ['bool'])),
])
self.index = None
self.next_hop = None
self.metric = None
self.recurse = None
self._segment_path = lambda: "config"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.Config, ['index', 'next_hop', 'metric', 'recurse'], name, value)
class State(_Entity_):
"""
Operational state parameters relating to the
next\-hop entry
.. attribute:: index
An user\-specified identifier utilised to uniquely reference the next\-hop entry in the next\-hop list. The value of this index has no semantic meaning other than for referencing the entry
**type**\: str
**config**\: False
.. attribute:: next_hop
The next\-hop that is to be used for the static route \- this may be specified as an IP address, an interface or a pre\-defined next\-hop type \- for instance, DROP or LOCAL\_LINK. When this leaf is not set, and the interface\-ref value is specified for the next\-hop, then the system should treat the prefix as though it is directly connected to the interface
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$
**type**\: :py:class:`LOCALDEFINEDNEXTHOP <ydk.models.openconfig.openconfig_local_routing.LOCALDEFINEDNEXTHOP>`
**config**\: False
.. attribute:: metric
A metric which is utilised to specify the preference of the next\-hop entry when it is injected into the RIB. The lower the metric, the more preferable the prefix is. When this value is not specified the metric is inherited from the default metric utilised for static routes within the network instance that the static routes are being instantiated. When multiple next\-hops are specified for a static route, the metric is utilised to determine which of the next\-hops is to be installed in the RIB. When multiple next\-hops have the same metric (be it specified, or simply the default) then these next\-hops should all be installed in the RIB
**type**\: int
**range:** 0..4294967295
**config**\: False
.. attribute:: recurse
Determines whether the next\-hop should be allowed to be looked up recursively \- i.e., via a RIB entry which has been installed by a routing protocol, or another static route \- rather than needing to be connected directly to an interface of the local system within the current network instance. When the interface reference specified within the next\-hop entry is set (i.e., is not null) then forwarding is restricted to being via the interface specified \- and recursion is hence disabled
**type**\: bool
**config**\: False
**default value**\: false
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "next-hop"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('index', (YLeaf(YType.str, 'index'), ['str'])),
('next_hop', (YLeaf(YType.str, 'next-hop'), ['str','str',('ydk.models.openconfig.openconfig_local_routing', 'LOCALDEFINEDNEXTHOP')])),
('metric', (YLeaf(YType.uint32, 'metric'), ['int'])),
('recurse', (YLeaf(YType.boolean, 'recurse'), ['bool'])),
])
self.index = None
self.next_hop = None
self.metric = None
self.recurse = None
self._segment_path = lambda: "state"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.State, ['index', 'next_hop', 'metric', 'recurse'], name, value)
class InterfaceRef(_Entity_):
"""
Reference to an interface or subinterface
.. attribute:: config
Configured reference to interface / subinterface
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config>`
.. attribute:: state
Operational state for interface\-ref
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State>`
**config**\: False
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef, self).__init__()
self.yang_name = "interface-ref"
self.yang_parent_name = "next-hop"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("config", ("config", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config)), ("state", ("state", LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State))])
self._leafs = OrderedDict()
self.config = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self._segment_path = lambda: "interface-ref"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef, [], name, value)
class Config(_Entity_):
"""
Configured reference to interface / subinterface
.. attribute:: interface
Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
.. attribute:: subinterface
Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "interface-ref"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])),
])
self.interface = None
self.subinterface = None
self._segment_path = lambda: "config"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.Config, ['interface', 'subinterface'], name, value)
class State(_Entity_):
"""
Operational state for interface\-ref
.. attribute:: interface
Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
**config**\: False
.. attribute:: subinterface
Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>`
**config**\: False
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "interface-ref"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])),
])
self.interface = None
self.subinterface = None
self._segment_path = lambda: "state"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.StaticRoutes.Static.NextHops.NextHop.InterfaceRef.State, ['interface', 'subinterface'], name, value)
class LocalAggregates(_Entity_):
"""
Enclosing container for locally\-defined aggregate
routes
.. attribute:: aggregate
List of aggregates
**type**\: list of :py:class:`Aggregate <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate>`
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.LocalAggregates, self).__init__()
self.yang_name = "local-aggregates"
self.yang_parent_name = "local-routes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("aggregate", ("aggregate", LocalRoutes.LocalAggregates.Aggregate))])
self._leafs = OrderedDict()
self.aggregate = YList(self)
self._segment_path = lambda: "local-aggregates"
self._absolute_path = lambda: "openconfig-local-routing:local-routes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.LocalAggregates, [], name, value)
class Aggregate(_Entity_):
"""
List of aggregates
.. attribute:: prefix (key)
Reference to the configured prefix for this aggregate
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$
**refers to**\: :py:class:`prefix <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate.Config>`
.. attribute:: config
Configuration data for aggregate advertisements
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate.Config>`
.. attribute:: state
Operational state data for aggregate advertisements
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_local_routing.LocalRoutes.LocalAggregates.Aggregate.State>`
**config**\: False
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.LocalAggregates.Aggregate, self).__init__()
self.yang_name = "aggregate"
self.yang_parent_name = "local-aggregates"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['prefix']
self._child_classes = OrderedDict([("config", ("config", LocalRoutes.LocalAggregates.Aggregate.Config)), ("state", ("state", LocalRoutes.LocalAggregates.Aggregate.State))])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str'])),
])
self.prefix = None
self.config = LocalRoutes.LocalAggregates.Aggregate.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = LocalRoutes.LocalAggregates.Aggregate.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self._segment_path = lambda: "aggregate" + "[prefix='" + str(self.prefix) + "']"
self._absolute_path = lambda: "openconfig-local-routing:local-routes/local-aggregates/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.LocalAggregates.Aggregate, ['prefix'], name, value)
class Config(_Entity_):
"""
Configuration data for aggregate advertisements
.. attribute:: prefix
Aggregate prefix to be advertised
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$
.. attribute:: discard
When true, install the aggregate route with a discard next\-hop \-\- traffic destined to the aggregate will be discarded with no ICMP message generated. When false, traffic destined to an aggregate address when no constituent routes are present will generate an ICMP unreachable message
**type**\: bool
**default value**\: false
.. attribute:: set_tag
Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols
**type**\: union of the below types:
**type**\: int
**range:** 0..4294967295
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.LocalAggregates.Aggregate.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "aggregate"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])),
('discard', (YLeaf(YType.boolean, 'discard'), ['bool'])),
('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])),
])
self.prefix = None
self.discard = None
self.set_tag = None
self._segment_path = lambda: "config"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.LocalAggregates.Aggregate.Config, ['prefix', 'discard', 'set_tag'], name, value)
class State(_Entity_):
"""
Operational state data for aggregate
advertisements
.. attribute:: prefix
Aggregate prefix to be advertised
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])/(([0\-9])\|([1\-2][0\-9])\|(3[0\-2]))$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))/(12[0\-8]\|1[0\-1][0\-9]\|[1\-9][0\-9]\|[0\-9])$
**config**\: False
.. attribute:: discard
When true, install the aggregate route with a discard next\-hop \-\- traffic destined to the aggregate will be discarded with no ICMP message generated. When false, traffic destined to an aggregate address when no constituent routes are present will generate an ICMP unreachable message
**type**\: bool
**config**\: False
**default value**\: false
.. attribute:: set_tag
Set a generic tag value on the route. This tag can be used for filtering routes that are distributed to other routing protocols
**type**\: union of the below types:
**type**\: int
**range:** 0..4294967295
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
**config**\: False
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(LocalRoutes.LocalAggregates.Aggregate.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "aggregate"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('prefix', (YLeaf(YType.str, 'prefix'), ['str','str'])),
('discard', (YLeaf(YType.boolean, 'discard'), ['bool'])),
('set_tag', (YLeaf(YType.str, 'set-tag'), ['int','str'])),
])
self.prefix = None
self.discard = None
self.set_tag = None
self._segment_path = lambda: "state"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(LocalRoutes.LocalAggregates.Aggregate.State, ['prefix', 'discard', 'set_tag'], name, value)
def clone_ptr(self):
self._top_entity = LocalRoutes()
return self._top_entity
class DROP(LOCALDEFINEDNEXTHOP):
"""
Discard traffic for the corresponding destination
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self, ns="http://openconfig.net/yang/local-routing", pref="openconfig-local-routing", tag="openconfig-local-routing:DROP"):
if sys.version_info > (3,):
super().__init__(ns, pref, tag)
else:
super(DROP, self).__init__(ns, pref, tag)
class LOCALLINK(LOCALDEFINEDNEXTHOP):
"""
Treat traffic towards addresses within the specified
next\-hop prefix as though they are connected to a local
link. When the LOCAL\_LINK next\-hop type is specified,
an interface must also be specified such that
the local system can determine which link to trigger
link\-layer address discovery against
"""
_prefix = 'oc-loc-rt'
_revision = '2017-05-15'
def __init__(self, ns="http://openconfig.net/yang/local-routing", pref="openconfig-local-routing", tag="openconfig-local-routing:LOCAL_LINK"):
if sys.version_info > (3,):
super().__init__(ns, pref, tag)
else:
super(LOCALLINK, self).__init__(ns, pref, tag)
|
python
|
import time
import json
from pathlib import Path
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
from radam import RAdam
from model import GPT, GPTLMHead, GPTClsHead
def timeit(method):
def timed(*args, **kw):
_args = args[0].args
ts = time.time()
result = method(*args, **kw)
te = time.time()
if _args.distributed:
if _args.local_rank == 0:
print('Function Time: {}\t>\t{:.0f} min {:.0f} sec'.format(method.__name__, (te-ts)//60, (te-ts)%60))
else:
print('Function Time: {}\t>\t{:.0f} min {:.0f} sec'.format(method.__name__, (te-ts)//60, (te-ts)%60))
return result
return timed
class Trainer:
def __init__(self, args, train_loader, test_loader, tokenizer):
self.args = args
self.train_loader = train_loader
self.test_loader = test_loader
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size
self.pad_id = tokenizer.pad_token_id
self.eos_id = tokenizer.eos_token_id
self.device = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu', args.local_rank)
self.writer = SummaryWriter() if args.local_rank in [-1, 0] else None
self.n_gpus = torch.distributed.get_world_size() if args.distributed else torch.cuda.device_count()
assert args.pretrain != args.finetune # Do not set both finetune and pretrain arguments to the same (True, False)
if args.pretrained_model:
self.gpt = torch.load(args.pretrained_model)
else:
self.gpt = GPT(vocab_size=self.vocab_size,
seq_len=args.max_seq_len,
d_model=args.hidden,
n_layers=args.n_layers,
n_heads=args.n_attn_heads,
d_ff=args.ffn_hidden,
embd_pdrop=args.embd_dropout,
attn_pdrop=args.attn_dropout,
resid_pdrop=args.resid_dropout,
pad_id=self.pad_id)
if args.pretrain:
self.model = GPTLMHead(self.gpt)
self.model.to(self.device)
if args.finetune:
with open(args.cached_label_dict, 'r') as file:
label_dict = json.load(file)
self.model = GPTClsHead(self.gpt, n_class=len(label_dict), cls_token_id=self.eos_id)
self.model.to(self.device)
if args.distributed:
self.model = DistributedDataParallel(self.model, device_ids=[args.local_rank], output_device=args.local_rank)
self.optimizer = RAdam(self.model.parameters(), args.lr)
self.criterion = nn.CrossEntropyLoss(ignore_index = self.pad_id).to(self.device)
self.cls_criterion = nn.CrossEntropyLoss().to(self.device)
@timeit
def train(self, epoch):
if self.args.pretrain:
self.pretrain(epoch)
if self.args.finetune:
self.finetune(epoch)
def pretrain(self, epoch):
losses = 0
n_batches, n_samples = len(self.train_loader), len(self.train_loader.dataset)
self.model.train()
for i, batch in enumerate(self.train_loader):
inputs = batch[0].to(self.device)
targets = inputs[:, 1:].contiguous()
# |inputs| : (batch_size, seq_len), |targets| : (batch_size, seq_len-1)
lm_logits = self.model(inputs)
lm_logits = lm_logits[:, :-1].contiguous()
# |lm_logits| : (batch_size, seq_len-1, vocab_size)
loss = self.criterion(lm_logits.view(-1, self.vocab_size), targets.view(-1))
losses += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.args.local_rank in [-1, 0]:
self.writer.add_scalar('Loss/pre-train', loss.item(), ((epoch-1)*n_batches)+i)
if i % (n_batches//5) == 0 and i != 0:
print('Iteration {} ({}/{})\tLoss: {:.4f}'.format(i, i, n_batches, losses/i))
print('Train Epoch {} [rank: {}]\t>\tLoss: {:.4f}'.format(epoch, self.args.local_rank, losses/n_batches))
def finetune(self, epoch):
losses, accs = 0, 0
n_batches, n_samples = len(self.train_loader), len(self.train_loader.dataset) # n_batches = batch size per GPU
self.model.train()
for i, batch in enumerate(self.train_loader):
inputs, labels = map(lambda x: x.to(self.device), batch)
# |inputs| : (batch_size, seq_len), |labels| : (batch_size)
lm_logits, cls_logits = self.model(inputs)
lm_logits = lm_logits[:, :-1].contiguous()
# |lm_logits| : (batch_size, seq_len-1, vocab_size), |cls_logits| : (batch_size, n_class)
lm_loss = self.criterion(lm_logits.view(-1, self.vocab_size), inputs[:, 1:].contiguous().view(-1))
cls_loss = self.cls_criterion(cls_logits, labels)
loss = cls_loss + (self.args.auxiliary_ratio * lm_loss)
losses += loss.item()
acc = (cls_logits.argmax(dim=-1) == labels).to(dtype=cls_logits.dtype).mean()
accs += acc
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.args.local_rank in [-1, 0]:
self.writer.add_scalar('Loss/fine-tune', loss.item(), ((epoch-1)*n_batches)+i)
self.writer.add_scalar('Accuracy/fine-tune', acc, ((epoch-1)*n_batches)+i)
if i % (n_batches//5) == 0 and i != 0:
print('Iteration {} ({}/{})\tLoss: {:.4f} Acc: {:.1f}%'.format(i, i, n_batches, losses/i, accs/i*100.))
print('Train Epoch {} [rank: {}]\t>\tLoss: {:.4f} / Acc: {:.1f}%'.format(epoch, self.args.local_rank, losses/n_batches, accs/n_batches*100.))
def evaluate(self, epoch):
losses, accs = 0, 0
n_batches, n_samples = len(self.test_loader), len(self.test_loader.dataset)
self.model.eval()
with torch.no_grad():
for i, batch in enumerate(self.test_loader):
if self.args.pretrain:
inputs = batch.to(self.device)
targets = inputs[:, 1:].contiguous()
lm_logits = self.model(inputs)
lm_logits = lm_logits[:, :-1].contiguous()
loss = self.criterion(lm_logits.view(-1, self.vocab_size), targets.view(-1))
losses += loss.item()
if self.args.local_rank in [-1, 0]:
self.writer.add_scalar('Loss/pre-train(eval)', loss.item(), ((epoch-1)*n_batches)+i)
elif self.args.finetune:
inputs, labels = map(lambda x: x.to(self.device), batch)
lm_logits, cls_logits = self.model(inputs)
lm_logits = lm_logits[:, :-1].contiguous()
lm_loss = self.criterion(lm_logits.view(-1, self.vocab_size), inputs[:, 1:].contiguous().view(-1))
cls_loss = self.cls_criterion(cls_logits, labels)
loss = cls_loss + (self.args.auxiliary_ratio * lm_loss)
losses += loss.item()
acc = (cls_logits.argmax(dim=-1) == labels).to(dtype=cls_logits.dtype).mean()
accs += acc
if self.args.local_rank in [-1, 0]:
self.writer.add_scalar('Loss/fine-tune(eval)', loss.item(), ((epoch-1)*n_batches)+i)
self.writer.add_scalar('Accuracy/fine-tune(eval)', acc, ((epoch-1)*n_batches)+i)
print('Eval Epoch {} [rank: {}]\t>\tLoss: {:.4f} / Acc: {:.1f}%'.format(epoch, self.args.local_rank, losses/n_batches, accs/n_batches*100.))
def save(self, epoch, model_prefix='model', root='.model'):
path = Path(root) / (model_prefix + '.ep%d' % epoch)
if not path.parent.exists():
path.parent.mkdir()
if self.args.distributed:
if self.args.local_rank == 0:
torch.save(self.gpt, path)
else:
torch.save(self.gpt, path)
|
python
|
import os
import subprocess
files = [
"001",
"001a",
"001b",
"002",
"002a",
"002b",
"003",
"003a",
"003b",
"004",
"004a",
"004b",
"005",
"005a",
"005b",
"006",
"006a",
"006b",
"007",
"007a",
"007b",
"008",
"008a",
"008b",
"009",
"009a",
"009b",
"010",
"010a",
"010b",
"011",
"011a",
"011b",
"012",
"012a",
"012b",
"013",
"013a",
"013b",
"014",
"014a",
"014b",
"015",
"015a",
"015b",
"016",
"016a",
"016b",
"017",
"017a",
"017b",
"017c",
"018",
"018a",
"018b",
"018c",
"018d",
"018e",
"018f",
"018g",
"019",
"019a",
"019b",
"019c",
"019d",
"019e",
"020",
"021",
"021a",
"021b",
"021c",
"021d",
"021e",
"022",
"022a",
"022b",
"022c",
"022d",
"022e",
"023",
"023a",
"023b",
"023c",
"023d",
"023e",
"024",
"024a",
"024b",
"024c",
"024d",
"024e",
"025",
"025a",
"025b",
"025c",
"025d",
"025e",
"026",
"026a",
"026b",
"026c",
"026d",
"026e",
"027",
"027a",
"027b",
"027c",
"027d",
"027e",
"028",
"028a",
"028b",
"028c",
"028d",
"028e",
"029",
"029a",
"029b",
"029c",
"029d",
"029e",
"030",
"030a",
"030b",
"030c",
"030d",
"030e",
]
for n in files:
in_path = os.path.join("public", "img", "map", "pipo-charachip" + n + ".png")
for i, direction in enumerate(["down", "left", "right", "up"]):
out_path = os.path.join("public", "img", "avatar", n + "-" + direction + ".png")
offset = i * 32
subprocess.call(
["magick", "convert", in_path, "-crop", "32x32+0+" + str(offset), out_path]
)
|
python
|
#! /usr/bin/env python
import io
import os
from setuptools import setup
mydir = os.path.dirname(__file__)
def read_project_version():
# Version-trick to have version-info in a single place.
# http://stackoverflow.com/questions/2058802/how-can-i-get-the-version-defined-in-setup-py-setuptools-in-my-package
fglobals = {}
with io.open(os.path.join(mydir, '_version.py')) as fd:
exec(fd.read(), fglobals) # To read __version__
return fglobals['__version__']
setup(name='doit-graphx',
description="doit command plugin to generate task dependency-graphs using networkx",
version=read_project_version(),
license='MIT',
author='Kostis Anagnostopoulos',
author_email='[email protected]',
url='https://github.com/pydoit/doit-graphx',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Scientific/Engineering',
],
py_modules=['cmd_graphx', '_version'],
# TODO: Fatcor-out matplotlib in an extra-requires.
install_requires=['networkx', 'matplotlib'],
# doit>=0.28.0] # doit 0.28 unreleased
long_description="",
)
|
python
|
import os
from unittest.mock import patch
from util.job import get_job_id
def test_create_job_id():
assert get_job_id() == os.getenv('JOB_ID'), 'job id is created'
@patch.dict('os.environ', {'JOB_ID': 'job_123'})
def test_retrieve_job_id():
assert get_job_id() == 'job_123', 'job id is retrieved'
|
python
|
from kelvin.tests.test_cc_utils import *
from kelvin.tests.test_ccsd import *
from kelvin.tests.test_ft_cc_2rdm import *
from kelvin.tests.test_ft_cc_ampl import *
from kelvin.tests.test_ft_cc_relden import *
from kelvin.tests.test_ft_ccsd import *
from kelvin.tests.test_ft_ccsd_rdm import *
from kelvin.tests.test_ft_deriv import *
from kelvin.tests.test_ft_lambda import *
from kelvin.tests.test_ft_lambda_equations import *
from kelvin.tests.test_ft_mp2 import *
from kelvin.tests.test_hubbard import *
from kelvin.tests.test_hubbard_field import *
from kelvin.tests.test_kel_ccsd import *
from kelvin.tests.test_lambda import *
from kelvin.tests.test_mp2 import *
from kelvin.tests.test_neq_ccsd import *
from kelvin.tests.test_neq_density import *
from kelvin.tests.test_neq_lambda import *
from kelvin.tests.test_neq_lambda_equation import *
from kelvin.tests.test_neq_prop import *
from kelvin.tests.test_quadrature import *
from kelvin.tests.test_td_ccsd import *
from kelvin.tests.test_td_ccsd_ESN import *
from kelvin.tests.test_td_ccsd_lambda import *
from kelvin.tests.test_td_ccsd_1rdm import *
from kelvin.tests.test_td_ccsd_2rdm import *
from kelvin.tests.test_td_ccsd_relden import *
from kelvin.tests.test_scf import *
from kelvin.tests.test_test import *
from kelvin.tests.test_ueg import *
from kelvin.tests.test_ueg_utils import *
|
python
|
from person import Person
from bounding_box import BoundingBox
from typing import List
from video_frame import VideoFrame
from sort import Sort
import numpy as np
class Tracker:
"""
Trackes detected person and groups people with close trajectories.
Attributes
----------
minDist: float
People are considered to be in the same group if they are less the minDist meters from each other for enough video frames
"""
def __init__(self, analyzer, minDist = 100) -> None:
self._sort=Sort(max_age=10)
self._analyzer=analyzer
self._minDist = minDist
pass
def addBoundingBoxForPerson(self, person:Person, box:BoundingBox):
"""
Append the boundig box to the bounding boxes of the person. It also calculates and append the coordinate of the person to his/her coordinates.
Parameters
----------
person : Person
The owner of the bounding box
box : BoundingBox
The bounding box to append
"""
person.bounding_boxes.append(box)
if box==None:
person.coordinates.append(None)
else:
x, y = self._analyzer.transformation.transformPoint(box.left+box.width/2, box.top+box.height)
person.addCoordinates(x, y)
def updateTrajectories(self,current:VideoFrame,bounding_boxes:List[BoundingBox],scores:List[float])->None:
"""
Identifies new people on the videoFrame, tracks already identified people.
Deletes people, if they are missing for at least 10 video frames.
Parameters
----------
current : VideoFrame
New video frame
last : VideoFrame, optional
The video frame before, by default None
bounding_boxes: BoundingBox[]
Detected boundingboxes on current frame
scores: float[]
Certanity score of boundingboxes
"""
lenBB=len(bounding_boxes);
if (lenBB != 0):
npbb=np.array([[bb.left, bb.top, bb.left+bb.width,bb.top+bb.height] for bb in bounding_boxes])
npscores=np.array(scores)
npscores=np.resize(npscores,(lenBB,1))
bbs=np.hstack((npbb,npscores))
objs=self._sort.update(bbs)
activePeople:List[Person]=self._analyzer.activePeople
to_delete=[]
for person in activePeople:
found=False
for obj in objs:
if obj[4]==person.id:
self.addBoundingBoxForPerson(person, BoundingBox(int(obj[0]),int(obj[1]),int(obj[2]-obj[0]),int(obj[3]-obj[1])))
found=True
obj[4]=-1
break
if not found:
countNone=0
for bbid in range(1,min(len(person.bounding_boxes),6)):
if person.bounding_boxes[-bbid] is None:
countNone+=1
if(countNone==5):
to_delete.append(person)
self.addBoundingBoxForPerson(person,None)
for obj in objs:
if obj[4]!=-1:
newPerson=Person()
newPerson.id=obj[4]
self.addBoundingBoxForPerson(newPerson, BoundingBox(int(obj[0]),int(obj[1]),int(obj[2]-obj[0]),int(obj[3]-obj[1])))
self._analyzer.activePeople.append(newPerson)
for d in to_delete:
self._analyzer.activePeople.remove(d)
def groupTrajectories(self, dt = 100)->None:#4 * 30)->None:
"""
Considers two individuals as being in the same group if they are less then d meters apart for at least dt seconds.
Parameters
----------
dt : int
minimum seconds (sec * fps)
"""
for i, p1 in enumerate(self._analyzer.activePeople):
for j, p2 in enumerate(self._analyzer.activePeople):
if (i > j) and (p1 not in p2.inGroupWith):
if ((len(p1.coordinates) >= dt) and (len(p2.coordinates) >= dt)):
in_group = True
for k in range(dt):
if ((p1.coordinates[-k] != None) and (p2.coordinates[-k] != None) and (p1.coordinates[-k].DistanceFrom(p2.coordinates[-k]) > self._minDist)):
in_group = False
if in_group:
p1.inGroupWith.append(p2)
p2.inGroupWith.append(p1)
|
python
|
__version__ = "0.2.8"
from . import utils
from . import common
from . import manager
from .common import Module
from .common import Sequential
from .common import Linear
from .common import Identity
from .common import ModuleList
from .common import MultiModule
from .common import Parameter
from .manager import register_packages
from .manager import get_module_dict
from .manager import get_module_classes
from .manager import get_module_names
def create_model_cls(package=None, model_path=None, name=None, modargs=None):
"""
Create a model-initializing function that accepts positional arguments.
:param package:
the package to search for the model. If none given, all
known packages will be searched.
:param model_path:
yaml file path that contains keyword-only arguments.
:param name:
model name to search for. If no model path is specified, this
option will be used.
:param modargs:
keyword-only module arguments to initialize the function.
:return:
function
"""
if model_path is None:
if name is None:
classes = manager.get_module_classes(package)
assert len(classes) > 0, \
f"no modules found in package " \
f"'{package if package is not None else 'all'}"
name = classes[0].name
modargs = get_optarg_template(classes[0])
if modargs is None:
modargs = dict()
else:
opts = utils.load_yaml(model_path)
name, modargs = opts.get("type"), opts.get("vargs")
namemap = manager.get_module_dict(package)
assert name in namemap, \
f"module name '{name}' does not exist. available names: " \
f"{list(namemap.keys())}"
model_cls = namemap[name]
caster = common.get_caster(model_cls)
return caster({
"type": model_cls.name,
"vargs": modargs
})
def get_optarg_template(cls: common.Module):
def get_value_template(optarg: common.OptionalArgument):
if optarg.islist:
sample = optarg.default[0]
else:
sample = optarg.default
if common.is_module_cls(sample):
pkg = sample.get_package()
classes = manager.get_module_classes(pkg)
assert classes, \
f"no available modules found for package '{pkg}'"
cls = classes[0]
val = {"type": cls.name}
args = get_optarg_template(cls)
if args:
val["vargs"] = args
else:
val = sample
if optarg.islist:
val = [val]
return val
return {
name: get_value_template(optarg)
for name, optarg in cls.get_optargs().items()
}
|
python
|
from src.abstract_load_balancer import AbstractLoadBalancer, LoadBalancerQueue
class UtilisationAwareLoadBalancer(AbstractLoadBalancer):
def __init__(self, APISERVER, DEPLOYMENT):
self.apiServer = APISERVER
self.deployment = DEPLOYMENT
self.internalQueue = []
def UpdatePodList(self):
self.internalQueue.clear()
endPoints = self.apiServer.GetEndPointsByLabel(self.deployment.deploymentLabel)
for endPoint in endPoints:
if endPoint.pod and endPoint.pod.isRunning():
queueItem = LoadBalancerQueue(endPoint.pod, len(endPoint.pod.requests))
self.internalQueue.append(queueItem)
def FindPriorityQueueItem(self):
priorityQueueItem = self.internalQueue[0]
for queueItem in self.internalQueue:
if queueItem.priority < priorityQueueItem.priority:
priorityQueueItem = queueItem
return priorityQueueItem
def FindPod(self):
self.UpdatePodList()
if len(self.internalQueue) > 0:
queueItem = self.FindPriorityQueueItem()
if queueItem is not None:
return queueItem.pod
return None
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric that tests models against snow variations."""
import numpy as np
from tqdm import tqdm
from collections import Iterable
from .base import Metric
from .base import call_decorator
from PIL import Image
import warnings
from perceptron.benchmarks.motion_blur import MotionBlurMetric
import pdb
class SnowMetric(Metric):
"""Metric that tests models against snow variations."""
@call_decorator
def __call__(self, adv, angle=45, annotation=None, unpack=True,
abort_early=True, verify=False, epsilons=1000):
"""Change the snow of the image until it is misclassified.
Parameters
----------
adv : `numpy.ndarray`
The original, unperturbed input as a `numpy.ndarray`.
angle : float
Angle of snowfall.
annotation : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
abort_early : bool
If true, returns when got first adversarial, otherwise
returns when all the iterations are finished.
verify : bool
If True, return verifiable bound.
epsilons : int or Iterable[float]
Either Iterable of contrast levels or number of brightness
factors between 1 and 0 that should be tried. Epsilons are
one minus the brightness factor. Epsilons are not used if
verify = True.
"""
import cv2
if verify is True:
warnings.warn('epsilon is not used in verification mode '
'and abort_early is set to True.')
a = adv
del adv
del annotation
del unpack
image = a.original_image
min_, max_ = a.bounds()
axis = a.channel_axis(batch=False)
hw = [image.shape[i] for i in range(image.ndim) if i != axis]
img_height, img_width = hw
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 1, num=epsilons)[1:]
else:
epsilons = epsilons
snow_mask_np = np.zeros((img_height // 10, img_height // 10, 3))
ch = snow_mask_np.shape[0] // 2
cw = snow_mask_np.shape[1] // 2
cr = min(img_height, img_width) * 0.1
for i in range(snow_mask_np.shape[0]):
for j in range(snow_mask_np.shape[1]):
if (i - ch) ** 2 + (j - cw) ** 2 <= cr:
snow_mask_np[i, j] = np.ones(3)
kernel = MotionBlurMetric.motion_Kernel((int(ch * 0.9),
int(cw * 0.9)),
angle)
blured = cv2.filter2D(snow_mask_np, -1, kernel)
blured = np.clip(blured, min_, max_).astype(np.float32)
blured = blured * max_
blured_h, blured_w = blured.shape[:2]
if axis == 0:
blured = np.transpose(blured, (2, 0, 1))
cc0 = [1, 100]
for _, epsilon in enumerate(tqdm(epsilons)):
p0 = int(cc0[0] + epsilon * (cc0[1] - cc0[0]))
positions_h = np.random.randint(img_height - blured_h, size=p0)
positions_w = np.random.randint(img_width - blured_w, size=p0)
perturbed = np.copy(image)
for temp_h, temp_w in zip(positions_h, positions_w):
if axis == 0:
perturbed[:, temp_h: temp_h + blured_h, temp_w: temp_w + blured_w] += blured
else:
perturbed[temp_h: temp_h + blured_h, temp_w: temp_w + blured_w, :] += blured
perturbed = np.clip(perturbed, min_, max_)
_, is_adversarial = a.predictions(perturbed)
if is_adversarial:
if abort_early or verify:
break
else:
bound = epsilon
a.verifiable_bounds = (bound, None)
return
|
python
|
"""Utilities for reading configuration from settings."""
from collections import namedtuple
from functools import partial
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.text import slugify
import six
import logging
logger = logging.getLogger(__name__)
# Decorators that can be composed.
PIPES = []
# Placeholder decorators
PIPELINES = []
class Pipe:
"""Configuration class."""
def __init__(self, function, name, slug, meta, enabled):
"""Initialize Pipe."""
self.function = function
self.name = name
self.slug = slug
self.meta = meta
self.enabled = enabled
# Decorators used in codebase.
Pipeline = namedtuple('Pipeline', ['slug', 'name', 'meta'])
def conf_to_pipe(conf):
"""Create Pipe object out of configuration."""
# if conf is a string type, convert it to
if isinstance(conf, six.string_types):
conf = {'function': conf}
if not isinstance(conf, dict):
raise ImproperlyConfigured(
'Dynamicdecorator configuration should be string or dictionay:'
'%s' % conf)
# Default enabled value.
conf['enabled'] = False
# Only mandatory field is function:
if 'function' not in conf:
raise ImproperlyConfigured(
'Configuration do not have function item: %s' % conf)
# If name is not defined use function name as name
if 'name' not in conf:
conf['name'] = conf['function']
if 'slug' not in conf:
conf['slug'] = conf['name']
# Ensure that slug is slugified
conf['slug'] = slugify(conf['slug'])
# Group will be used in interface
if 'meta' not in conf:
conf['meta'] = {}
return Pipe(**conf)
def get_pipes():
"""Get pipes from settings."""
# TODO: If settings does not have PROVIDED_DECORATORS assign it.
# we should return default decorators in this case.
# TODO: PROVIDED_DECORATORS seems to be not used right now.
if PIPES:
return PIPES
for c in settings.DYNAMIC_DECORATORS:
# Set Default vaues.
p = conf_to_pipe(c)
if any(e for e in PIPES
if p.slug == e.slug):
raise ImproperlyConfigured(
'Duplicate name in decorator configuration: %s' % p)
PIPES.append(p)
return PIPES
def get_pipelines():
"""Get pipelines."""
return PIPELINES
def register_pipeline(slug, name, meta):
"""Register given pipeline."""
if not isinstance(meta, dict):
raise ImproperlyConfigured(
'Meta value of a decorator must be a dictionay:'
'%s' % meta)
pipeline = Pipeline(slug, name, meta)
if not any(p.slug == slug for p in PIPELINES):
PIPELINES.append(pipeline)
return pipeline
else:
logger.info('[DYNAMIC_DECORATORS] %s is already registered. Ignoring.'
% slug)
return next(p for p in PIPELINES if p.slug == slug)
def get_pipeline_by_slug(slug):
"""Search pipeline by slug value."""
return next(p for p in PIPELINES if p.slug == slug)
def is_match(pipeline, pipe):
"""Check pipe against pipeline.
Check if there is any meta property on pipeline that matches with
pipe.
"""
# if pipe does not have any meta attribute it automatically matches.
# if pipe has meta attributes it only matches if all meta attributes
# that exists on both pipe and pipeline has same values.
# This relationship is not surjective.
return not pipe.meta or all(pipe.meta[k] == v
for k, v in pipeline.meta.iteritems()
if k in pipe.meta)
def filter_pipes(pipeline, pipes):
"""Filter given pipes by meta values of current pipeline."""
return filter(partial(is_match, pipeline), pipes)
|
python
|
import pytest
from karp.domain.models.resource import create_resource
from karp.domain.models.entry import EntryRepository, create_entry
from karp.infrastructure.sql import sql_entry_repository
from karp.infrastructure.unit_of_work import unit_of_work
@pytest.fixture
def resource_blam():
resource = create_resource(
{
"resource_id": "blam",
"resource_name": "Blam",
"sort": ["baseform"],
"fields": {"baseform": {"type": "string", "required": True}},
"id": "baseform",
}
)
yield resource
resource.entry_repository.teardown()
def test_resource_has_entry_respository(resource_blam):
assert isinstance(resource_blam.entry_repository, EntryRepository)
with unit_of_work(using=resource_blam.entry_repository) as uw:
assert len(uw.entry_ids()) == 0
def test_resource_put_entry(resource_blam):
assert isinstance(resource_blam.entry_repository, EntryRepository)
with unit_of_work(using=resource_blam.entry_repository) as uw:
entry = create_entry("hubba", {})
uw.put(entry)
uw.commit()
entry_ids = uw.entry_ids()
assert len(entry_ids) == 1
assert "hubba" in entry_ids
|
python
|
# coding=utf-8
from __future__ import unicode_literals, print_function
import re
import datetime
from ..models import RawLog, DummyLogger, MacAddress, UserAction
CODE_WLAN_JOIN = "WLAN-Gerät angemeldet"
CODE_WLAN_LEAVE = "WLAN-Gerät hat sich abgemeldet"
CODE_WLAN_REMOVED = "WLAN-Gerät wurde abgemeldet"
def parse_logs(log=None):
if log is None:
log = DummyLogger()
report = {
"scanned": 0,
"new_macs": 0,
"failed": 0,
"new_actions": 0,
}
for rawlog in RawLog.objects.all():
# replace comma in things like '(2,4 Ghz)'
def _repl(match):
return "(%s.%s)" % match.groups()
text = re.sub(r"\(([^\).]+),([^\).]+)\)", _repl, rawlog.text)
if text.endswith("."):
text = text[:-1]
info = [x.strip() for x in text.split(",")]
print(info)
if len(info) < 1:
continue
def _parse_address(info, report):
name, ip, mac = info[0:3]
if not ip.startswith("IP") or not mac.startswith("MAC"):
log.error("Could not parse IP/MAC for log entry pk=%s" % rawlog.pk)
report["failed"] += 1
return None
ip = ip.split()[1]
mac = mac.split()[1][:17]
obj, created = MacAddress.objects.get_or_create(mac=mac[:20], name=name[:100])
if created:
report["new_macs"] += 1
return { "name": name, "ip": ip, "mac": mac }
def _add_action(address, action):
obj, created = UserAction.objects.get_or_create(
date=rawlog.date,
mac=address["mac"],
ip=address["ip"],
action=action,
)
if created:
report["new_actions"] += 1
code = info[0]
if code.startswith(CODE_WLAN_JOIN):
if len(info) >= 4:
address = _parse_address(info[2:5], report)
if address is None:
continue
_add_action(address, UserAction.ACT_WLAN_CONNECT)
elif code.startswith(CODE_WLAN_LEAVE) or code.startswith(CODE_WLAN_REMOVED):
if len(info) >= 4:
address = _parse_address(info[1:4], report)
if address is None:
continue
_add_action(address, UserAction.ACT_WLAN_DISCONNECT)
log.log("%(new_macs)s new MACs, %(new_actions)s new user-actions" % report)
|
python
|
# Load library
import numpy as np
# Create matrix
matrix = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
# View number of rows and columns
matrix.shape
# (3, 4)
# View number of elements (rows * columns)
matrix.size
# 12
# View number of dimensions
matrix.ndim
# 2
|
python
|
from django.core.cache import cache
from django.test import TestCase, override_settings
from django.urls import reverse
from posts.models import User, Post, Group, Follow
class TestPostCreation(TestCase):
"""Test for proper post creation and protection from anons"""
def setUp(self):
self.text = 'test_text'
self.user = User.objects.create_user(username='testuser',
password=12345)
def test_auth_user_post_creation(self):
# Login into our user and check for redirection.
self.client.login(username=self.user.username,
password=12345)
response = self.client.post(reverse('new_post'), {'text': self.text})
self.assertEqual(response.status_code, 302)
# Test that the text is equal
post = Post.objects.first()
self.assertEqual(post.text, self.text)
def test_anon_post_creation_redirect(self):
# Test, if anon is able to retrieve the new_post page
response = self.client.get(reverse('new_post'))
self.assertRedirects(response=response,
expected_url='/auth/login?next=/new/',
target_status_code=301)
def test_anon_post_creation_post_request(self):
# Test, if anon is able to create a post through a POST request.
self.client.post(reverse('new_post'), {'text': self.text})
post_count = Post.objects.filter(text=self.text).count()
self.assertEqual(post_count, 0)
class TestPostRender(TestCase):
"""Test for proper post's rendering."""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.text = 'test_text'
self.post = Post.objects.create(text=self.text, author=self.user)
def test_profile(self):
# Profile test
response = self.client.get(
reverse('profile', kwargs={'username': self.user.username}))
self.assertContains(response, self.text)
def test_index(self):
cache.clear()
# Index page test
response = self.client.get(reverse('index'))
self.assertContains(response, self.text)
def test_direct_post_view(self):
# Direct post's page test
response = self.client.get(
reverse('post_view',
kwargs={'username': 'testuser', 'post_id': self.post.pk}))
self.assertContains(response, self.text)
class TestPostEdit(TestCase):
"""Test for proper post editing."""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.text = 'test_text'
self.post = Post.objects.create(text=self.text, author=self.user)
self.text_edited = 'test_text_edit'
def test_post_edit(self):
self.client.login(username=self.user.username, password=12345)
# Post editing
self.client.post(reverse('post_edit',
kwargs={'username': self.user.username,
'post_id': self.post.pk}),
{'text': self.text_edited})
# Test that no unwanted entities got created and contents are ok
post_edited = Post.objects.first()
post_count = Post.objects.all().count()
self.assertEqual(self.post, post_edited)
self.assertEqual(post_edited.text, self.text_edited)
self.assertEqual(post_count, 1)
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}})
class TestEditedPostRender(TestCase):
"""Test for rendering edited posts."""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.text = 'test_text'
self.post = Post.objects.create(text=self.text, author=self.user)
self.text_edited = 'test_text_edit'
def test_post_render_all_pages(self):
# Post editing
self.client.login(username=self.user.username, password=12345)
self.client.post(reverse('post_edit',
kwargs={'username': self.user.username,
'post_id': self.post.pk}),
{'text': self.text_edited})
# Test for rendering
response = self.client.get(
reverse('profile', kwargs={'username': self.user.username}))
self.assertContains(response, self.text_edited)
response = self.client.get(reverse('index'))
self.assertContains(response, self.text_edited)
response = self.client.get(reverse(
'post_view',
kwargs={
'username': self.user.username,
'post_id': Post.objects.first().pk})
)
self.assertContains(response, self.text_edited)
class TestHandlers(TestCase):
"""Test for custom error handlers"""
def test_404(self):
response = self.client.get('/test_non_existing_url_qweqwe/')
self.assertEqual(response.status_code, 404)
class TestImageRender(TestCase):
"""Test for image handling,
and rendering looking for <img tag in a response."""
def setUp(self):
self.tag = '<img'
self.user = User.objects.create_user(username='testuser',
password=12345)
self.text = 'test_text'
self.post = Post.objects.create(
text=self.text, author=self.user,
image='posts/test_image/Test_image.jpg'
)
def test_direct_post_image_render(self):
response = self.client.get(
reverse('post_view', kwargs={'username': self.user.username,
'post_id': self.post.pk}))
self.assertContains(response, self.tag)
def test_profile_post_image_render(self):
response = self.client.get(
reverse('profile', kwargs={'username': self.user.username}))
self.assertContains(response, self.tag)
def test_group_post_Image_Render(self):
# Creating a new group and assigning it to the existing test post
self.group = Group.objects.create(title='Test group',
slug='test-group',
description='Test group description')
self.post.group_id = self.group.pk
self.post.save()
response = self.client.get(
reverse('group_posts', kwargs={'slug': self.group.slug}))
self.assertContains(response, self.tag)
class TestImageFormProtection(TestCase):
"""Test for image form protection"""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.client.force_login(self.user)
self.post = Post.objects.create(text='test_text', author=self.user)
self.image_path = 'media/posts/test_image/Test_image.jpg'
self.non_image_path = 'posts/tests.py'
self.error_message = f'Загрузите правильное изображение. Фа\
йл, который вы загрузили, поврежден или не является изображением.'
def test_correct_image_form_protection(self):
with open(self.image_path, 'rb') as img:
self.client.post(reverse('post_edit',
kwargs={
'username': self.user.username,
'post_id': self.post.pk}),
{'image': img,
'text': 'edited text with an image'})
post = Post.objects.first()
self.assertIsNotNone(post.image)
def test_incorrect_image_form_protection(self):
with open(self.non_image_path, 'rb') as non_img:
response = self.client.post(reverse(
'post_edit',
kwargs={
'username': self.user.username,
'post_id': self.post.pk}),
{'image': non_img,
'text': 'edited text with wrong file '}
)
self.assertFormError(response, 'form', 'image', self.error_message)
class TestCache(TestCase):
"""Test for caching"""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.client.force_login(self.user)
self.text = 'test_text'
def test_index_cache(self):
# Create a cached page and check that there's no new post yet.
self.client.get(reverse('index'))
self.client.post(reverse('new_post'), {'text': self.text})
response = self.client.get(reverse('index'))
self.assertNotContains(response, self.text)
class TestFollowerSystem(TestCase):
"""Test for follower system. Test for follow and unfollow,
and proper construction of follower-index page"""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.user_to_follow = User.objects.create_user(
username='test_user_to_follow',
password=12345)
self.client.force_login(self.user)
self.text = 'test_text'
self.post = Post.objects.create(
text=self.text, author=self.user_to_follow)
def test_auth_user_follow_follow(self):
response = self.client.get(
reverse('profile_follow',
kwargs={'username': self.user_to_follow.username}))
self.assertIsNotNone(Follow.objects.first())
def test_auth_user_follow_unfollow(self):
response = self.client.get(
reverse('profile_unfollow',
kwargs={'username': self.user_to_follow.username}))
self.assertIsNone(Follow.objects.first())
def test_follower_index(self):
self.client.get(reverse('profile_follow',
kwargs={
'username': self.user_to_follow.username}))
response = self.client.get(reverse('follow_index'))
self.assertContains(response, self.text)
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}})
def test_not_follower_index(self):
response = self.client.get(reverse('follow_index'))
self.assertNotContains(response, self.text)
class TestCommentSystem(TestCase):
"""Test for proper commenting. Test if anon and
non-anon can or cannot comment"""
def setUp(self):
self.user = User.objects.create_user(username='testuser',
password=12345)
self.text = 'test_text'
self.post = Post.objects.create(
text=self.text, author=self.user)
self.commenting_user = User.objects.create_user(
username='commenting_user',
password=12345)
self.comment_text = 'test_comment'
def test_auth_user_commenting(self):
self.client.force_login(self.commenting_user)
response = self.client.post(
reverse('add_comment', kwargs={'username': self.user.username,
'post_id': self.post.pk}),
{'text': self.comment_text}, follow=True)
self.assertContains(response, self.comment_text)
def test_anon_user_commenting(self):
"""Anons should not be able to comment,
make a POST request without logging"""
response = self.client.post(
reverse('add_comment', kwargs={'username': self.user.username,
'post_id': self.post.pk}),
{'text': self.comment_text}, follow=True)
self.assertNotContains(response, self.comment_text)
|
python
|
import unittest
from entity_embeddings.util import processor_utils
class TestProcessorUtils(unittest.TestCase):
def test_get_invalid_target_processor(self):
self.assertRaises(ValueError, processor_utils.get_target_processor, 1000)
|
python
|
import torch
import numpy as np
import os
from datasets.base_dataset import BaseDataset
from models.base_model import Model
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from utils.metrics import compute_chamfer_l1
from utils.util import quantize, downsample
class AutoencoderDataset(BaseDataset):
def __init__(self, config: dict, mode: str):
BaseDataset.__init__(self, config, mode)
self.z_dim = config['z_dim']
self.implicit_rep = config['implicit_rep']
self.voxel_size = config['voxel_size']
self.implicit_input_cnt = config['implicit_input_cnt']
self.query_cnt = config['query_cnt']
self.max_dist = config['max_dist']
def convert_rep(self, signed_rep: torch.Tensor):
"""
:param signed_rep: torch.tensor of N
Signed representation of the implicit field
:return: rep: torch.tensor of N
Converted representation
"""
if self.implicit_rep == 'sdf':
return signed_rep
elif self.implicit_rep == 'udf':
return torch.abs(signed_rep)
elif self.implicit_rep == 'occ':
return (signed_rep > 0.).float()
else:
raise ValueError('representation {} not allowed'.format(self.implicit_rep))
class AutoencoderShapenetDataset(AutoencoderDataset):
name = 'cgca_autoencoder_shapenet'
def __init__(self, config: dict, mode: str):
AutoencoderDataset.__init__(self, config, mode)
self.obj_class = config['obj_class']
self.summary_name = self.obj_class
self.surface_cnt = config['surface_cnt']
self.query_dist_filter = config['query_dist_filter_rate'] * self.max_dist
if mode == 'train':
self.data_root = os.path.join(
config['data_root'], self.obj_class, 'train'
)
data_list_file_name = 'train.txt'
elif mode == 'val' or mode == 'test':
self.data_root = os.path.join(
config['data_root'], self.obj_class, 'test'
)
data_list_file_name = 'test.txt'
else:
raise ValueError()
data_list_file_path = os.path.join(
config['data_root'], self.obj_class,
data_list_file_name
)
with open(data_list_file_path, 'r') as f:
self.data_list = f.read().splitlines()
self.data_list = sorted([
x[:-1] if x[-1] == '\n' else x
for x in self.data_list
])
if (mode == 'val') and (config['eval_size'] is not None):
# fix vis_indices
eval_size = config['eval_size']
if isinstance(eval_size, int):
val_indices = torch.linspace(0, len(self.data_list) - 1, eval_size).int().tolist()
self.data_list = [self.data_list[i] for i in val_indices]
def __getitem__(self, idx):
if self.config['overfit_one_ex'] is not None:
idx = self.config['overfit_one_ex']
data_name = self.data_list[idx]
data_path = os.path.join(self.data_root, data_name + '.npz')
data = np.load(data_path)
surface = downsample(torch.tensor(data['surface']), self.surface_cnt)
sdf_pos = data['sdf_pos']
sdf_pos = torch.tensor(sdf_pos[~np.isnan(sdf_pos).any(axis=1)])
sdf_neg = data['sdf_neg']
sdf_neg = torch.tensor(sdf_neg[~np.isnan(sdf_neg).any(axis=1)])
sdf = torch.cat([sdf_pos, sdf_neg], dim=0)
sdf = sdf[torch.randperm(sdf.shape[0]), :]
implicit_field = sdf[torch.abs(sdf[:, 3]) < self.voxel_size]
implicit_field = downsample(implicit_field, self.implicit_input_cnt)
query = sdf[torch.abs(sdf[:, 3]) < self.query_dist_filter]
query = downsample(query, self.query_cnt)
# translate
if self.mode == 'train':
translation = 4 * torch.rand([1, 4]) * self.voxel_size
translation[0, 3] = 0.
else:
translation = torch.zeros([1, 4])
surface = surface + translation[:, :3]
query = query + translation
implicit_field = implicit_field + translation
# normalize
surface = quantize(surface, self.voxel_size)
query = query / self.voxel_size
query_coord, query_val = query.split(3, 1)
implicit_field = implicit_field / self.voxel_size
query_val = query_val.view(-1)
query_val = self.convert_rep(query_val)
implicit_field[:, 3] = self.convert_rep(implicit_field[:, 3])
return {
'surface_voxel': surface, # torch tensor of N1 x 3
'implicit_field': implicit_field, # torch tensor of N2 x 4
'query_coord': query_coord, # torch tensor of N3 x 3
'query_val': query_val, # torch tensor of N3
'translation': translation, # torch tensor of 1 x 4
'file_name': data_name,
'path': data_path,
}
def __len__(self):
return len(self.data_list)
def test(self, model: Model, writer: SummaryWriter, step):
training = model.training
model.eval()
# collect testset
test_sample_num = self.config['test_sample_num']
surfaces = {}
for file_name in self.data_list:
data_path = os.path.join(self.data_root, file_name + '.npz')
data = np.load(data_path)
surfaces[file_name] = torch.tensor(
data['surface'][:test_sample_num]
).float()
print('Collected {} complete shapes'.format(len(surfaces)))
data_loader = DataLoader(
self,
batch_size=self.config['test_batch_size'],
num_workers=self.config['num_workers'],
collate_fn=self.collate_fn,
drop_last=False,
shuffle=False
)
test_chamfer_l1 = []
for test_step, data in tqdm(enumerate(data_loader)):
file_names = data['file_name']
gts = [surfaces[file_name].to(self.device) for file_name in file_names]
pred_pcs = model.get_pointcloud(data, step)
for batch_idx, pred_pc in enumerate(pred_pcs):
pred_coords_down = torch.stack(pred_pc, dim=0).to(self.device)
chamfer_l1s = compute_chamfer_l1(pred_coords_down, gts[batch_idx])
test_chamfer_l1.append(chamfer_l1s[0])
chamfer_l1 = np.array(test_chamfer_l1).mean()
print('chamfer_l1: {}'.format(chamfer_l1))
# write to tensorboard
model.scalar_summaries['metrics/chamfer_l1'] += [chamfer_l1]
model.write_dict_summaries(step)
model.train(training)
|
python
|
"""
factoidbot.py - A plugin for remembering facts.
Copyright (C) 2007 Kevin Smith
SleekBot is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
SleekBot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import logging
import pickle
from sleekbot.commandbot import botcmd
from sleekbot.plugbot import BotPlugin
class FactStore(object):
""" Storage for facts """
def __init__(self):
self.null = None
self.data = {}
self.loaddefault()
def list_terms(self):
return self.data.keys()
def add(self, term, fact):
self.data[term.lower()] = fact
self.savedefault()
def get(self, term):
if term.lower() in self.data:
return self.data[term.lower()]
return "No facts known about " + term
def delete(self, term):
if term.lower() in self.data:
del self.data[term.lower()]
self.savedefault()
def loaddefault(self):
self.load("factoids.dat")
def savedefault(self):
self.save("factoids.dat")
def load(self, filename):
try:
f = open(filename, 'rb')
except:
logging.warning("Error loading factoids. Cannot open fact file: %s",
filename)
return
self.data = pickle.load(f)
f.close()
def save(self, filename):
try:
f = open(filename, 'wb')
except IOError:
logging.warning("Error saving factoids. Cannot open fact file: %s",
filename)
return
pickle.dump(self.data, f)
f.close()
class Factoid(BotPlugin):
"""A plugin to remember facts."""
def _on_register(self):
self.factstore = FactStore()
@botcmd(name='fact', usage='fact [topic]')
def handle_fact(self, command, args, msg):
"""Returns a fact"""
subcommand = None
term = None
fact = None
if args.count(" ") > 1:
[subcommand, term, fact] = args.split(" ", 2)
elif args.count(" ") > 0:
[subcommand, term] = args.split(" ", 1)
else:
subcommand = args
admin_commands = ['list', 'add', 'delete']
#non-admin commands
if subcommand not in admin_commands:
response = "facts for " + args + "\n" + args + ": " + \
self.factstore.get(args)
return response
#admin commands
if "list" == subcommand:
if not self.bot.msg_from_admin(msg):
return "You do not have access to this function"
terms = self.factstore.list_terms()
response = "I know about the following topics:\n"
for term in terms:
response = response + "\t" + term
response = response + "."
elif "add" == subcommand:
if not self.bot.msg_from_admin(msg):
response = "You do not have access to this function"
elif term != None and fact != None:
self.factstore.add(term, fact)
response = "Fact added"
else:
response = "To add a fact, both a topic and " + \
"description are needed."
elif "delete" == subcommand:
if not self.bot.msg_from_admin(msg):
response = "You do not have access to this function"
else:
self.factstore.delete(term)
response = "Deleted (if found)"
logging.debug("handle_fact done: %s" % response)
return response
|
python
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class FilesConfig(AppConfig):
"""Application config for files."""
name = "apps.files"
verbose_name = _("Files")
label = "files"
|
python
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import copy
import argparse
import os
def histogram(image):
# determine the normalized histogram
m, n = image.shape
hist = [0.0] * 256
for i in range(m):
for j in range(n):
#for every intensity add the count
hist[image[i, j]] += 1
return np.array(hist)/(m*n)
def cumulativeSum(hist):
# calculate the cumulative sum
return [sum(hist[:i+1]) for i in range(len(hist))]
def histogramEqualization(image):
#calculate Histogram
hist = histogram(image)
#find the cdf function
cdf = np.array(cumulativeSum(hist))
#multiply cdf with 255
transfer = np.uint8(255 * cdf)
k, l = image.shape
final = np.zeros_like(image)
# construct the final histogram equalization image
for i in range(0, k):
for j in range(0, l):
final[i, j] = transfer[image[i, j]]
return final
def gamma_correction(img,gamma):
gamma = 1/gamma
lT =[]
for i in np.arange(0,256).astype(np.uint8):
lT.append(np.uint8(((i/255)**gamma)*255))
lookup = np.array(lT)
#Creating the lookup table to find values
corrected = cv2.LUT(img,lookup)
return corrected
def main(args):
video = cv2.VideoWriter('Night_Drive_Correction.avi',cv2.VideoWriter_fourcc(*'XVID'), 20,(1024,600))
cap = cv2.VideoCapture(args['file'])
method = args['method']
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (1024,600))
#split in b,g,r
b,g,r= cv2.split(frame)
if (method == 'histogram'):
#compute histogram equalization for each channel
b1 = histogramEqualization(b)
g1 = histogramEqualization(g)
r1 = histogramEqualization(r)
#merge the channels
final = cv2.merge((b1,g1,r1))
elif (method == 'gamma'):
final = gamma_correction(frame, 1.8)
else:
print('invalid method ; exit')
return
cv2.imshow('Final', final)
video.write(final)
if cv2.waitKey(25) & 0XFF == ord('q'):
break
cv2.destroyAllWindows()
video.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-method", "--method", required=True, help="Input: histogram or gamma", type=str)
parser.add_argument("-path", "--file", required=False, help="video path", default='Night Drive - 2689.mp4', type=str)
args = vars(parser.parse_args())
if (not os.path.exists(args['file'])):
print('File does not exist. Re run with correct path or place file in current directory and run')
exit()
main(args)
|
python
|
from json import JSONDecodeError
from typing import Dict
import pytest
from common.serializers.serialization import node_status_db_serializer
from plenum.common.constants import LAST_SENT_PRE_PREPARE
from plenum.common.util import getNoInstances
from plenum.test.test_node import ensureElectionsDone, getPrimaryReplica
from plenum.test.view_change.helper import ensure_view_change
nodeCount = 7
def pack_pp_key(value: Dict) -> bytes:
return node_status_db_serializer.serialize(value)
def unpack_pp_key(value: bytes) -> Dict:
return node_status_db_serializer.deserialize(value)
@pytest.fixture(scope="module")
def view_no_set(looper, txnPoolNodeSet):
for _ in range(2):
ensure_view_change(looper, txnPoolNodeSet)
ensureElectionsDone(looper, txnPoolNodeSet)
assert txnPoolNodeSet[0].viewNo == 2
@pytest.fixture(scope="function")
def setup(txnPoolNodeSet):
for node in txnPoolNodeSet:
if LAST_SENT_PRE_PREPARE in node.nodeStatusDB:
node.nodeStatusDB.remove(LAST_SENT_PRE_PREPARE)
for replica in node.replicas.values():
replica.h = 0
replica._lastPrePrepareSeqNo = 0
replica.last_ordered_3pc = (replica.viewNo, 0)
@pytest.fixture(scope="function")
def replica_with_unknown_primary_status(txnPoolNodeSet, setup):
replica = txnPoolNodeSet[0].replicas[1]
old_primary_name = replica._primaryName
replica._primaryName = None
yield replica
replica._primaryName = old_primary_name
def test_store_last_sent_pp_seq_no_if_some_stored(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
pack_pp_key({1: [2, 5]}))
node.last_sent_pp_store_helper.store_last_sent_pp_seq_no(inst_id=1,
pp_seq_no=6)
assert unpack_pp_key(node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE)) == \
{'1': [2, 6]}
def test_store_last_sent_pp_seq_no_if_none_stored(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.last_sent_pp_store_helper.store_last_sent_pp_seq_no(inst_id=1,
pp_seq_no=6)
assert unpack_pp_key(node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE)) == \
{'1': [2, 6]}
def test_erase_last_sent_pp_seq_no_if_some_stored(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
pack_pp_key({'1': [2, 5]}))
node.last_sent_pp_store_helper.erase_last_sent_pp_seq_no()
assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB
def test_erase_last_sent_pp_seq_no_if_none_stored(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.last_sent_pp_store_helper.erase_last_sent_pp_seq_no()
assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB
def test_try_restore_last_sent_pp_seq_no_if_relevant_stored(
tconf, txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
pack_pp_key({1: [2, 5]}))
node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no()
assert replica.lastPrePrepareSeqNo == 5
assert replica.last_ordered_3pc == (2, 5)
assert replica.h == 5
assert replica.H == 5 + tconf.LOG_SIZE
def test_try_restore_last_sent_pp_seq_no_if_irrelevant_stored(
tconf, txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
pack_pp_key({2: [1, 9]}))
node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no()
assert replica.lastPrePrepareSeqNo == 0
assert replica.last_ordered_3pc == (2, 0)
assert replica.h == 0
assert replica.H == 0 + tconf.LOG_SIZE
def test_try_restore_last_sent_pp_seq_no_if_none_stored(
tconf, txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no()
assert replica.lastPrePrepareSeqNo == 0
assert replica.last_ordered_3pc == (2, 0)
assert replica.h == 0
assert replica.H == 0 + tconf.LOG_SIZE
def test_try_restore_last_sent_pp_seq_no_if_invalid_stored(
tconf, txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
pack_pp_key({1: [2, 5]})[:-1])
node.last_sent_pp_store_helper.try_restore_last_sent_pp_seq_no()
assert replica.lastPrePrepareSeqNo == 0
assert replica.last_ordered_3pc == (2, 0)
assert replica.h == 0
assert replica.H == 0 + tconf.LOG_SIZE
def test_cannot_restore_last_sent_pp_seq_no_if_another_view(
txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no(
1, [1, 5])
assert can is False
def test_cannot_restore_last_sent_pp_seq_no_if_replica_absent(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
assert node.viewNo == 2
absent_replica_index = getNoInstances(nodeCount)
can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no(
absent_replica_index, [2, 5])
assert can is False
def test_cannot_restore_last_sent_pp_seq_no_if_replica_status_unknown(
view_no_set, setup, replica_with_unknown_primary_status):
replica = replica_with_unknown_primary_status
assert replica.instId == 1
node = replica.node
assert node.viewNo == 2
can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no(
1, [2, 5])
assert can is False
def test_cannot_restore_last_sent_pp_seq_no_if_replica_is_master(
txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=0)
node = replica.node
assert node.viewNo == 2
can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no(
0, [2, 5])
assert can is False
def test_can_restore_last_sent_pp_seq_no_if_relevant(
txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
can = node.last_sent_pp_store_helper._can_restore_last_sent_pp_seq_no(
1, [2, 5])
assert can is True
def test_restore_last_sent_pp_seq_no(
tconf, txnPoolNodeSet, view_no_set, setup):
replica = getPrimaryReplica(txnPoolNodeSet, instId=1)
node = replica.node
assert node.viewNo == 2
node.last_sent_pp_store_helper._restore_last_stored(
1, [2, 5])
for replica in node.replicas.values():
if replica.instId == 1:
assert replica.lastPrePrepareSeqNo == 5
assert replica.last_ordered_3pc == (2, 5)
assert replica.h == 5
assert replica.H == 5 + tconf.LOG_SIZE
else:
assert replica.lastPrePrepareSeqNo == 0
assert replica.last_ordered_3pc == (2, 0)
assert replica.h == 0
assert replica.H == tconf.LOG_SIZE
def test_can_load_absent_last_sent_pre_preapre_key(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
assert pp_key is None
def test_cannot_load_last_sent_pre_preapre_key_if_empty_value(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE, b'')
with pytest.raises(JSONDecodeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_not_valid_dict(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({1: [2, 5]})[:-1])
with pytest.raises(JSONDecodeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_none(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize(None))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_dict_has_no_entries(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({}))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_inst_id_missed(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize([2, 5]))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_view_no_missed(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize([1, 5]))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_pp_seq_no_missed(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize([1, 2]))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_json_has_extra_fields(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({'1': [2, 5, 1]}))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_inst_id_is_not_int(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({None: [2, 5]}))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_view_no_is_not_int(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({1: ['', 5]}))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_cannot_load_last_sent_pre_preapre_key_if_pp_seq_not_int(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({'1': [2, 5.0]}))
with pytest.raises(TypeError):
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
def test_can_load_valid_last_sent_pre_preapre_key_if_valid(
txnPoolNodeSet, view_no_set, setup):
node = txnPoolNodeSet[0]
node.nodeStatusDB.put(LAST_SENT_PRE_PREPARE,
node_status_db_serializer.serialize({'1': [2, 5]}))
pp_key = node.last_sent_pp_store_helper._load_last_sent_pp_key()
assert pp_key == {'1': [2, 5]}
|
python
|
__author__ = 'etuka'
__date__ = '22 March 2019'
import os
import csv
import ntpath
import pandas as pd
from django.conf import settings
from dal.copo_da import Sample, Description
from django.core.files.storage import FileSystemStorage
from web.apps.web_copo.lookup.copo_enums import Loglvl, Logtype
lg = settings.LOGGER
"""
class handles the ingestion of csv data to supply metadata for description
"""
class IngestData:
def __init__(self, description_token=str(), profile_id=str()):
self.description_token = description_token
self.profile_id = self.set_profile_id(profile_id)
self.schema = Sample().get_schema().get("schema_dict")
def set_profile_id(self, profile_id):
p_id = profile_id
if not p_id and self.description_token:
description = Description().GET(self.description_token)
p_id = description.get("profile_id", str())
return p_id
def get_object_path(self):
"""
function returns directory to description data
:return:
"""
object_path = os.path.join(settings.MEDIA_ROOT, 'description_data', self.description_token)
return object_path
def get_object_file_path(self):
"""
function returns file path to description data
:return:
"""
file_path = os.path.join(self.get_object_path(), 'uploaded.csv')
return file_path
def save_uploaded_csv(self, csv_file):
"""
function saves the passed file to the file system
:param csv_file:
:return: boolean - indicating success or otherwise of file save
"""
result = dict(status='success', message='')
if csv_file:
csv_file.name = ntpath.basename(self.get_object_file_path())
# removed previous file
if os.path.exists(self.get_object_file_path()):
os.remove(self.get_object_file_path())
fs = FileSystemStorage(location=self.get_object_path())
try:
fs.save(csv_file.name, csv_file)
except Exception as e:
message = 'Error Ingesting data: ' + str(e)
print(message)
lg.log(message, level=Loglvl.ERROR, type=Logtype.FILE)
raise
return result
def align_columns(self):
"""
function compares ingested columns to generated columns - they should align
:return:
"""
result = dict(status='success', message='')
if not os.path.exists(self.get_object_file_path()):
result["status"] = "error"
result["message"] = "Couldn't locate uploaded CSV. Try re-uploading."
return result
with open(self.get_object_file_path(), 'r') as fobject:
ingested_columns = (next(csv.reader(fobject)))
description = Description().GET(self.description_token)
stored_columns = description.get("meta", dict()).get("generated_columns", list())
ingested_columns = [x.strip().lower() for x in ingested_columns if x.strip()]
stored_columns = [x['title'].strip().lower() for x in stored_columns if x['title'].strip()]
if not ingested_columns == stored_columns:
result["status"] = "error"
result["message"] = "Headers from uploaded CSV do not match displayed columns."
return result
return result
def align_rows(self):
"""
function compares ingested sample names to generated names - they should align
:return:
"""
result = dict(status='success', message='')
ingested_df = pd.read_csv(self.get_object_file_path())
ingested_df.columns = [x.lower() for x in list(ingested_df.columns)]
ingested_names = list(ingested_df.name)
description = Description().GET(self.description_token)
stored_names = description.get("meta", dict()).get("generated_names", str()).split(",")
ingested_names.sort()
stored_names.sort()
if not ingested_names == stored_names:
result["status"] = "error"
result["message"] = "Sample names from uploaded CSV do not match displayed names."
return result
return result
def manage_process(self, csv_file):
"""
function orchestrates the ingestion of metadata to description metadata
:param csv_file: metadata file to be ingested
:return: returns updated dataset
"""
# save uploaded csv
result = self.save_uploaded_csv(csv_file=csv_file)
if result["status"] == "error":
return result
# match ingested columns to rendered columns
result = self.align_columns()
if result["status"] == "error":
return result
# match ingested sample names to rendered names
result = self.align_rows()
if result["status"] == "error":
return result
# process data
result = self.align_rows()
if result["status"] == "error":
return result
return result
def process_data(self):
"""
having passed preliminary tests, function processes ingested data
:return:
"""
|
python
|
from FeatureModel import pointPillarFeatureNet
from ModelBackbone import pointPillarModel
from ModelBackbone import model
class TrainingPipeline:
def __init__(self, trainPillars, trainLabels, testPillars, testLabels):
self.trainPillars = trainPillars
self.trainLabels = trainLabels
self.testPillars = testPillars
self.testLabels = testLabels
def trainModel(self):
'''ppFeatureNet = pointPillarFeatureNet.PointPillarFeatureNet()
ppFeatures, input_pillars, input_indices = ppFeatureNet.feedForward()
ppModel = pointPillarModel.PointPillarModel("./myModel.h5py")
ppModel.createModelBackbone(ppFeatures, self.trainPillars, self.trainLabels, self.testPillars, self.testLabels, input_pillars, input_indices) '''
mod = model.Model()
return mod.train(self.trainPillars, self.trainLabels, self.testPillars, self.testLabels)
|
python
|
import markdown
from atomicpress.app import app
from atomicpress.models import Post, PostStatus, PostType
from flask import send_from_directory
from sqlalchemy import desc
from werkzeug.contrib.atom import AtomFeed
from flask import request
@app.route("/uploads/<filename>")
def uploaded_file(filename):
return send_from_directory(app.config["UPLOADS_PATH"], filename)
@app.route("/feed/atom/")
def feed_latest_posts():
feed_url = request.url
url_root = request.url_root.strip("/")
if "SITE_URL" in app.config:
url_root = app.config["SITE_URL"]
feed_url = "%s%s" % (url_root, request.path)
feed = AtomFeed("Recent posts", feed_url=feed_url, url=url_root)
posts = Post.query.order_by(desc(Post.date)).\
filter(Post.status == PostStatus.PUBLISH).\
filter(Post.type == PostType.POST)
for post in posts:
content = post.content
if post.markdown:
content = markdown.markdown(content)
if post.author:
author_name = post.author.nicename
else:
author_name = "Empty"
feed.add(post.title, unicode(content),
content_type='html',
author=author_name,
url="%s/%s" % (url_root, post.name),
updated=post.date,
published=post.modified)
return feed.get_response()
|
python
|
#!/usr/bin/python
import csv
import os.path
from collections import namedtuple
import sn
import os
import sys,string
import numpy as np
import math
import vcf
import fnmatch
#try:
# file_map = sys.argv[1];dir_files_phenotype1 = sys.argv[2];dir_files_phenotype2 = sys.argv[3];outfilename = sys.argv[4]
#except:
# print "Usage:",sys.argv[0], "file.map dir_files_phenotype1 dir_files_phenotype2 outfile";sys.exit(1)
file_map="/home/cristovao/Desktop/AUS_project/public_datas/opensnp_datadump.201303070733/phenotypes_201303070733.csv"
folder="/home/cristovao/Desktop/AUS_project/public_datas/opensnp_datadump.201303070733"
def get_dataset():
"""return """
handle = csv.DictReader(open(file_map, "r"),
#fieldnames=["user_id","date_of_birth","chrom_sex","Jewish Ancestry","Subjective dream intensity","Webbed toes","Dyslexia","Artistic ability","lips size","ethnicity","Acrophobia","Myers-Briggs Type Indicator","Irritable Bowel Syndrome","Diego Blood Group","Cholesterol","Moles raised","Autism","Interest in Spirituality and Mysticism","Physician-diagnosed celiac/coeliac disease","Hypertriglyceridemia","SAT Writing","Panic Disorder","Bone Mineral Density","Sexual Preferences","Energy Level","Faktor 5 Leiden (F5)","Age learned to read","ear proximity to head ","Atheism","Earwax type","ring finger longer than index finger","Eye with Blue Halo ","Beard Color","Birth year","Migraine frequency","Serotonin transporter","Sport interest","Number of toes","Number of wisdom teeth","Widow's Peak","natural skinny","Wake up preference","Lisp","Do you like the taste of hops?","Wanting to be immortal","Purposefulness ","Ambition","Do hops taste like soap?","ABH Blood Group (antigens) ","Fish Preference","Smell of coffee in urine","hair on fingers","Neanderthal","Are You The Advertising Phenotype?","(male) penis releases pre-cum when sexually aroused.","Morton's Toe","Sports interest","Does cilantro taste like soap to you?","Tongue roller","Enjoy watching TV","Aspirin Allergy","libido ","Blood type","First word","Enjoy using the Internet","mtDNA Haplogroup (PhyloTree)","Like the taste of Stevia","Negative reaction to fluoroquinolone antibiotics","white skin","Fat-pad knee syndrome","Ability to Tan","Strabismus","Amblyopia","Autoimmune disorder","Y-DNA Haplogroup (ISOGG)","Asthma","Freckling","form of the nose","Ancestry","Metabolic Syndrome [MetS]","Enjoy riding a motorbike","Hair Color","Tea consumption","Height","Sex","Motion sickness","Cystic Fibrosis Like Disease","mouth size","Peanut butter preference","Sneezing induced by sexual ideation or orgasm?","Woolnerian Tip (Darwin's Tubercle)","SAT Math","prognathism","Taste of broccoli","Jogger","Phobia","Kell Blood Group (K/k antigens) ","Desmoid Tumor","SAT Verbal","Astigmatism","excessive daytime sleepiness","Enjoy driving a car","ABO Rh ","Kidd Blood Group","Sense of smell","apthous in mouth tendency","Allergic/bad reaction to fish oil supplements","Interested in news from real newspaper / news from the Internet","erectil disfunction ","Index Toe Longer than Big Toe","Hair Type","Penis Circumference at Glans","Penis Length","Intolerance: gluten, casein, soy","Weight","Short-sightedness (Myopia)","brown hair colour","SAT - when taken","Anorgasmia","Nicotine dependence","CMV serostatus","Musical Perfect Pitch","Rheumatoid Arthritis","(Male) Nipple's size","ADHD","Insect bites and stings","Colour Blindness","Lactose intolerance","Have ME/CFS","Atypical Sulfonomide Antibiotic Reaction","Cramps","Political Ideology","Handedness","cluster headache","Eye color","Social Level","Earlobe: Free or attached","Photic Sneeze Reflex (Photoptarmis)","Coffee consumption","Penicillin reaction","Do you have a parent who was diagnosed with Alzheimer's disease?","R1b1a2a1a1b","Good / poor eater as child","Abnormal Blood Pressure","Type II Diabetes","Migraine","Colon cancer ONLY FOR (rs3219489 GG)!","Ability to find a bug in openSNP","Eurogenes","head form","Cleverness","ENTP","Can you smell cut-grass?","Asparagus Metabolite Detection"],
delimiter=";")
return handle
def get_user(pheno, variation):
"""Return list of the user with a specific variation """
dataset = get_dataset()
user_list = []
for i in dataset:
if i[pheno] == variation:
user_list.append(i["user_id"])
dataset=[]
return user_list
def create_dir(user_list,variation):
"""Create a folder from a list of the user"""
user_list=list(set(user_list))
print "total of the user", len(user_list), user_list
files= os.listdir(folder)
#variation="_".join(variation.split())
os.system("mkdir "+variation)
n=0
for j in user_list:
for i in files:
if fnmatch.fnmatch(i, '*.txt'):
u="user"+j+"_"
if u in i:
print i
os.system("cp "+folder+"/"+i +" " +variation+"/")
n=1+n
print "total of the files copied", n
#------------------ execution ------------------- "Eye color"
fieldnames=open(file_map).readline().split(';')
fieldnames.sort()
print "\n\n--------------------------- fieldnames (Phenotypes)\n"
for i in fieldnames:
print i
p=raw_input("\n--------------------------- Phenotype: ")
variations_list=[]
for i in get_dataset():
if not i[p] in variations_list:
variations_list.append(i[p])
print i[p]
v=raw_input("\n--------------------------- Variations: ")
v=v.split(";")
print "\n"
os.system("mkdir "+"_".join(p.split()))
for i in v:
print "Variations: ", i
l=get_user( p, i)
variation="_".join(i.split())
create_dir(l,variation)
os.system("mv "+ variation+" "+"_".join(p.split()))
print "\n"
|
python
|
"""
This is a utility script for updating the spacy meta.json
Sample call
python --meta meta.json --augment metrics/dane_augmented_best_dacy_small_trf-0.1.0.json --
"""
import json
def main(meta_json, meta_augment_json, size, decimals=3):
with open(meta_json) as f:
meta = json.load(f)
with open(meta_augment_json) as f:
meta_augment = json.load(f)
meta["email"] = "[email protected]"
meta["author"] = "Centre for Humanities Computing Aarhus"
meta["url"] = "https://chcaa.io/#/"
meta["license"] = "Apache-2.0 License"
mdl_used = {
"small": {
"name": "Maltehb/-l-ctra-danish-electra-small-cased",
"author": "Malte Højmark-Bertelsen",
"url": "https://huggingface.co/Maltehb/-l-ctra-danish-electra-small-cased",
"license": "CC BY 4.0",
},
"medium": {
"name": "Maltehb/danish-bert-botxo",
"author": "BotXO.ai",
"url": "https://huggingface.co/Maltehb/danish-bert-botxo",
"license": "CC BY 4.0",
},
"large": {
"name": "xlm-roberta-large",
"author": "Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, Veselin Stoyanov",
"url": "https://huggingface.co/xlm-roberta-large",
"license": "CC BY 4.0",
},
}
model = mdl_used[size]
meta["sources"] = [
{
"name": "UD Danish DDT v2.5",
"url": "https://github.com/UniversalDependencies/UD_Danish-DDT",
"license": "CC BY-SA 4.0",
"author": "Johannsen, Anders; Mart\u00ednez Alonso, H\u00e9ctor; Plank, Barbara",
},
{
"name": "DaNE",
"url": "https://github.com/alexandrainst/danlp/blob/master/docs/datasets.md#danish-dependency-treebank-dane",
"license": "CC BY-SA 4.0",
"author": "Rasmus Hvingelby, Amalie B. Pauli, Maria Barrett, Christina Rosted, Lasse M. Lidegaard, Anders S\u00f8gaard",
},
model,
]
meta["requirements"] = ["spacy-transformers>=1.0.3,<1.1.0"]
meta[
"description"
] = f"""
<a href="https://github.com/centre-for-humanities-computing/Dacy"><img src="https://centre-for-humanities-computing.github.io/DaCy/_static/icon.png" width="175" height="175" align="right" /></a>
# DaCy {size} transformer
DaCy is a Danish language processing framework with state-of-the-art pipelines as well as functionality for analysing Danish pipelines.
DaCy's largest pipeline has achieved State-of-the-Art performance on Named entity recognition, part-of-speech tagging and dependency
parsing for Danish on the DaNE dataset. Check out the [DaCy repository](https://github.com/centre-for-humanities-computing/DaCy) for material on how to use DaCy and reproduce the results.
DaCy also contains guides on usage of the package as well as behavioural test for biases and robustness of Danish NLP pipelines.
"""
meta[
"notes"
] = """
## Bias and Robustness
Besides the validation done by SpaCy on the DaNE testset, DaCy also provides a series of augmentations to the DaNE test set to see how well the models deal with these types of augmentations.
The can be seen as behavioural probes akinn to the NLP checklist.
### Deterministic Augmentations
Deterministic augmentations are augmentation which always yield the same result.
| Augmentation | Part-of-speech tagging (Accuracy) | Morphological tagging (Accuracy) | Dependency Parsing (UAS) | Dependency Parsing (LAS) | Sentence segmentation (F1) | Lemmatization (Accuracy) | Named entity recognition (F1) |
| --- | --- | --- | --- | --- | --- | --- | --- |
"""
for aug, metrics in meta_augment.items():
if metrics["k"] == 1:
pos = f'{round(metrics["mean"]["pos_acc"], decimals)}'
morph = f'{round(metrics["mean"]["morph_acc"], decimals)}'
dep_uas = f'{round(metrics["mean"]["dep_uas"], decimals)}'
dep_las = f'{round(metrics["mean"]["dep_las"], decimals)}'
sent_f = f'{round(metrics["mean"]["sents_f"], decimals)}'
lemma = f'{round(metrics["mean"]["lemma_acc"], decimals)}'
ents_f = f'{round(metrics["mean"]["ents_f"], decimals)}'
meta[
"notes"
] += f"| {aug} | {pos} | {morph} | {dep_uas} | {dep_las} | {sent_f} | {lemma} | {ents_f} |\n"
meta[
"notes"
] += """
### Stochastic Augmentations
Stochastic augmentations are augmentation which are repeated mulitple times to estimate the effect of the augmentation.
| Augmentation | Part-of-speech tagging (Accuracy) | Morphological tagging (Accuracy) | Dependency Parsing (UAS) | Dependency Parsing (LAS) | Sentence segmentation (F1) | Lemmatization (Accuracy) | Named entity recognition (F1) |
| --- | --- | --- | --- | --- | --- | --- | --- |
"""
for aug, metrics in meta_augment.items():
if metrics["k"] > 1:
pos = f'{round(metrics["mean"]["pos_acc"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
morph = f'{round(metrics["mean"]["morph_acc"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
dep_uas = f'{round(metrics["mean"]["dep_uas"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
dep_las = f'{round(metrics["mean"]["dep_las"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
sent_f = f'{round(metrics["mean"]["sents_f"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
lemma = f'{round(metrics["mean"]["lemma_acc"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
ents_f = f'{round(metrics["mean"]["ents_f"], decimals)} ({round(metrics["std"]["pos_acc"], decimals)})'
meta[
"notes"
] += f"| {aug} | {pos} | {morph} | {dep_uas} | {dep_las} | {sent_f} | {lemma} | {ents_f} |\n"
meta["notes"] += create_description()
meta[
"notes"
] += "\n\n### Hardware\nThis was run an trained on a Quadro RTX 8000 GPU."
with open(f"template_meta_{size}.json", "w") as f:
json.dump(meta, f)
def create_description():
from augment import augmenters
describtion = """
<details>
<summary> Description of Augmenters </summary>
"""
describtion
for aug, nam, k, desc in augmenters:
describtion += f"\n\n**{nam}:**\n{desc}"
describtion += "\n </details> \n <br /> \n"
return describtion
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--meta", type=str, help="the meta file you wish to update", required=True
)
parser.add_argument(
"--augment",
type=str,
help="the json file of the augmented resutls",
required=True,
)
parser.add_argument("--size", type=str, help="the model size", required=True)
args = parser.parse_args()
main(args.meta, args.augment, args.size)
|
python
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import kaiming_init, normal_init
from mmdet.ops import ConvModule
from ..builder import build_loss
from ..registry import HEADS
@HEADS.register_module
class GridHead(nn.Module):
def __init__(self,
grid_points=9,
num_convs=8,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
point_feat_channels=64,
deconv_kernel_size=4,
class_agnostic=False,
loss_grid=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=15),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=36)):
super(GridHead, self).__init__()
self.grid_points = grid_points
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.point_feat_channels = point_feat_channels
self.conv_out_channels = self.point_feat_channels * self.grid_points
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
assert self.grid_points >= 4
self.grid_size = int(np.sqrt(self.grid_points))
if self.grid_size * self.grid_size != self.grid_points:
raise ValueError('grid_points must be a square number')
# the predicted heatmap is half of whole_map_size
if not isinstance(self.roi_feat_size, int):
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
self.whole_map_size = self.roi_feat_size * 4
# compute point-wise sub-regions
self.sub_regions = self.calc_sub_regions()
self.convs = []
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
stride = 2 if i == 0 else 1
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=True))
self.convs = nn.Sequential(*self.convs)
self.deconv1 = nn.ConvTranspose2d(
self.conv_out_channels,
self.conv_out_channels,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
self.deconv2 = nn.ConvTranspose2d(
self.conv_out_channels,
grid_points,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
# find the 4-neighbor of each grid point
self.neighbor_points = []
grid_size = self.grid_size
for i in range(grid_size): # i-th column
for j in range(grid_size): # j-th row
neighbors = []
if i > 0: # left: (i - 1, j)
neighbors.append((i - 1) * grid_size + j)
if j > 0: # up: (i, j - 1)
neighbors.append(i * grid_size + j - 1)
if j < grid_size - 1: # down: (i, j + 1)
neighbors.append(i * grid_size + j + 1)
if i < grid_size - 1: # right: (i + 1, j)
neighbors.append((i + 1) * grid_size + j)
self.neighbor_points.append(tuple(neighbors))
# total edges in the grid
self.num_edges = sum([len(p) for p in self.neighbor_points])
self.forder_trans = nn.ModuleList() # first-order feature transition
self.sorder_trans = nn.ModuleList() # second-order feature transition
for neighbors in self.neighbor_points:
fo_trans = nn.ModuleList()
so_trans = nn.ModuleList()
for _ in range(len(neighbors)):
# each transition module consists of a 5x5 depth-wise conv and
# 1x1 conv.
fo_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
stride=1,
padding=2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
so_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
1,
2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
self.forder_trans.append(fo_trans)
self.sorder_trans.append(so_trans)
self.loss_grid = build_loss(loss_grid)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# TODO: compare mode = "fan_in" or "fan_out"
kaiming_init(m)
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
normal_init(m, std=0.001)
nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01))
def forward(self, x):
assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
# RoI feature transformation, downsample 2x
x = self.convs(x)
c = self.point_feat_channels
# first-order fusion
x_fo = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_fo[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_fo[i] = x_fo[i] + self.forder_trans[i][j](
x[:, point_idx * c:(point_idx + 1) * c])
# second-order fusion
x_so = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_so[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
# predicted heatmap with fused features
x2 = torch.cat(x_so, dim=1)
x2 = self.deconv1(x2)
x2 = F.relu(self.norm1(x2), inplace=True)
heatmap = self.deconv2(x2)
# predicted heatmap with original features (applicable during training)
if self.training:
x1 = x
x1 = self.deconv1(x1)
x1 = F.relu(self.norm1(x1), inplace=True)
heatmap_unfused = self.deconv2(x1)
else:
heatmap_unfused = heatmap
return dict(fused=heatmap, unfused=heatmap_unfused)
def calc_sub_regions(self):
"""Compute point specific representation regions.
See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
"""
# to make it consistent with the original implementation, half_size
# is computed as 2 * quarter_size, which is smaller
half_size = self.whole_map_size // 4 * 2
sub_regions = []
for i in range(self.grid_points):
x_idx = i // self.grid_size
y_idx = i % self.grid_size
if x_idx == 0:
sub_x1 = 0
elif x_idx == self.grid_size - 1:
sub_x1 = half_size
else:
ratio = x_idx / (self.grid_size - 1) - 0.25
sub_x1 = max(int(ratio * self.whole_map_size), 0)
if y_idx == 0:
sub_y1 = 0
elif y_idx == self.grid_size - 1:
sub_y1 = half_size
else:
ratio = y_idx / (self.grid_size - 1) - 0.25
sub_y1 = max(int(ratio * self.whole_map_size), 0)
sub_regions.append(
(sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
return sub_regions
def get_target(self, sampling_results, rcnn_train_cfg):
# mix all samples (across images) together.
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
dim=0).cpu()
pos_gt_bboxes = torch.cat(
[res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
assert pos_bboxes.shape == pos_gt_bboxes.shape
# expand pos_bboxes to 2x of original size
x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
num_rois = pos_bboxes.shape[0]
map_size = self.whole_map_size
# this is not the final target shape
targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
dtype=torch.float)
# pre-compute interpolation factors for all grid points.
# the first item is the factor of x-dim, and the second is y-dim.
# for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
factors = []
for j in range(self.grid_points):
x_idx = j // self.grid_size
y_idx = j % self.grid_size
factors.append((1 - x_idx / (self.grid_size - 1),
1 - y_idx / (self.grid_size - 1)))
radius = rcnn_train_cfg.pos_radius
radius2 = radius**2
for i in range(num_rois):
# ignore small bboxes
if (pos_bbox_ws[i] <= self.grid_size
or pos_bbox_hs[i] <= self.grid_size):
continue
# for each grid point, mark a small circle as positive
for j in range(self.grid_points):
factor_x, factor_y = factors[j]
gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
1 - factor_x) * pos_gt_bboxes[i, 2]
gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
1 - factor_y) * pos_gt_bboxes[i, 3]
cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
map_size)
cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
map_size)
for x in range(cx - radius, cx + radius + 1):
for y in range(cy - radius, cy + radius + 1):
if x >= 0 and x < map_size and y >= 0 and y < map_size:
if (x - cx)**2 + (y - cy)**2 <= radius2:
targets[i, j, y, x] = 1
# reduce the target heatmap size by a half
# proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
sub_targets = []
for i in range(self.grid_points):
sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
sub_targets = torch.cat(sub_targets, dim=1)
sub_targets = sub_targets.cuda()
return sub_targets
def loss(self, grid_pred, grid_targets):
loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
loss_grid = loss_fused + loss_unfused
return dict(loss_grid=loss_grid)
def get_bboxes(self, det_bboxes, grid_pred, img_meta):
# TODO: refactoring
assert det_bboxes.shape[0] == grid_pred.shape[0]
det_bboxes = det_bboxes.cpu()
cls_scores = det_bboxes[:, [4]]
det_bboxes = det_bboxes[:, :4]
grid_pred = grid_pred.sigmoid().cpu()
R, c, h, w = grid_pred.shape
half_size = self.whole_map_size // 4 * 2
assert h == w == half_size
assert c == self.grid_points
# find the point with max scores in the half-sized heatmap
grid_pred = grid_pred.view(R * c, h * w)
pred_scores, pred_position = grid_pred.max(dim=1)
xs = pred_position % w
ys = pred_position // w
# get the position in the whole heatmap instead of half-sized heatmap
for i in range(self.grid_points):
xs[i::self.grid_points] += self.sub_regions[i][0]
ys[i::self.grid_points] += self.sub_regions[i][1]
# reshape to (num_rois, grid_points)
pred_scores, xs, ys = tuple(
map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
# get expanded pos_bboxes
widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
x1 = (det_bboxes[:, 0, None] - widths / 2)
y1 = (det_bboxes[:, 1, None] - heights / 2)
# map the grid point to the absolute coordinates
abs_xs = (xs.float() + 0.5) / w * widths + x1
abs_ys = (ys.float() + 0.5) / h * heights + y1
# get the grid points indices that fall on the bbox boundaries
x1_inds = [i for i in range(self.grid_size)]
y1_inds = [i * self.grid_size for i in range(self.grid_size)]
x2_inds = [
self.grid_points - self.grid_size + i
for i in range(self.grid_size)
]
y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
# voting of all grid points on some boundary
bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
bbox_res = torch.cat(
[bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
bbox_res[:, [0, 2]].clamp_(min=0, max=img_meta[0]['img_shape'][1] - 1)
bbox_res[:, [1, 3]].clamp_(min=0, max=img_meta[0]['img_shape'][0] - 1)
return bbox_res
|
python
|
#!/usr/bin/env python
import os
import shutil
import subprocess
import difflib
import filecmp
import sys
rootdir = "."
for subdir, dirs, files in os.walk(rootdir):
for file in files:
if "RLBin" in (os.path.join(subdir, file)):
os.remove(os.path.join(subdir, file))
print(os.path.join(subdir, file) + " removed" )
|
python
|
#!/usr/bin/env python2
# Copyright (c) 2019 Erik Schilling
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from emuvim.api.osm.pre_configured_osm import PreConfiguredOSM
from mininet.log import setLogLevel
setLogLevel('debug')
with PreConfiguredOSM() as osm:
osm.onboard_vnfd('vnfs/ping_vnf')
osm.onboard_vnfd('vnfs/pong_vnf')
nsd_id = osm.onboard_nsd('services/pingpong_ns')
ns_id = osm.ns_create('pingpong-test', nsd_id)
osm.ns_wait_until_all_in_status('running')
osm.ns_delete(ns_id)
osm.ns_wait_until_all_in_status('terminated')
|
python
|
from .db_api import DbApi
from .meta import Db
from .schema import *
class Impl(DbApi):
def __init__(self, db):
assert isinstance(db, Db)
DbApi.__init__(self)
self.__db = db
def __del__(self):
self.close()
def close(self):
if self.__db is not None:
self.__db.close()
self.__db = None
def add_source_file(self, filename):
"""
Returns the ID for the source file. Raises exception if it
already exists.
"""
return self.__db.table('SOURCE_FILE').insert(filename)
def get_source_file_id(self, filename):
"""
Returns the ID for the source file, or None if not present.
"""
c = self.__db.query(
"SELECT source_file_id FROM SOURCE_FILE WHERE source_location = ?",
filename
)
ret = None
for r in c:
ret = r[0]
c.close()
break
return ret
def get_source_file_for_id(self, source_file_id):
c = self.__db.query(
"SELECT source_location FROM SOURCE_FILE WHERE source_file_id = ?",
source_file_id
)
ret = None
for r in c:
ret = r[0]
c.close()
break
return ret
def add_tag(self, file_id, tag_name, tag_value):
"""
Returns the ID of the tag.
"""
return self.__db.table('TAG').insert(
file_id, tag_name, tag_value
)
def add_keyword(self, file_id, keyword):
"""
Returns the ID of the keyword.
"""
return self.__db.table('FILE_KEYWORD').insert(
file_id, keyword
)
def delete_keywords_for_source_id(self, file_id):
return self.__db.table('FILE_KEYWORD').delete_where(
'source_file_id = ?',
file_id
)
def get_keywords_for_id(self, file_id):
ret = set()
c = self.__db.query(
'SELECT keyword FROM FILE_KEYWORD WHERE source_file_id = ?',
file_id
)
for r in c:
ret.add(r[0])
return ret
def get_tags_for_id(self, file_id):
ret = {}
c = self.__db.query(
'SELECT tag_name, tag_value FROM TAG WHERE source_file_id = ?',
file_id
)
for r in c:
ret[r[0]] = r[1]
return ret
def add_target_file(self, source_file_id, target_filename):
"""
Returns the ID of the target file.
"""
return self.__db.table('TARGET_FILE').insert(
source_file_id, target_filename
)
def get_target_file(self, source_file_id):
ret = None
c = self.__db.query(
'SELECT target_location FROM TARGET_FILE WHERE source_file_id = ?',
source_file_id
)
for r in c:
ret = r[0]
c.close()
break
return ret
def get_source_id_for_target_file(self, target_filename):
ret = None
c = self.__db.query(
'SELECT source_file_id FROM TARGET_FILE WHERE target_location = ?',
target_filename
)
for r in c:
ret = r[0]
c.close()
break
return ret
def get_source_file_for_target_file(self, target_filename):
ret = None
c = self.__db.query(
"""
SELECT source_location FROM SOURCE_FILE sf
INNER JOIN TARGET_FILE tf ON sf.source_file_id = tf.source_file_id
WHERE target_location = ?
""",
target_filename
)
for r in c:
ret = r[0]
c.close()
break
return ret
def find_target_files(self, target_match=None):
ret = set()
if target_match is None:
c = self.__db.query('SELECT target_location FROM TARGET_FILE')
else:
c = self.__db.query(
'SELECT target_location FROM TARGET_FILE WHERE target_location LIKE ?',
target_match
)
for r in c:
ret.add(r[0])
return ret
def get_source_files_with_tags(self, tags, exact=True):
"""
Returns the source file names that has the matching tag keys to tag values.
If none are found, then an empty list is returned.
"""
# This is a messy query that really doesn't work with sqlite.
# So instead we'll do multiple queries and shrink the result
# down in code.
tag_keys = []
tag_values = []
for k, v in tags.items():
tag_keys.append(k)
tag_values.append(v)
if len(tag_keys) <= 0:
return []
matching_file_ids = set()
if exact:
value_match_sql = "tag_value = ?"
else:
value_match_sql = "tag_value LIKE ?"
c = self.__db.query(
'SELECT source_file_id FROM TAG WHERE tag_name = ? and {0}'.format(
value_match_sql),
tag_keys[0], tag_values[0]
)
for r in c:
matching_file_ids.add(str(r[0]))
if len(matching_file_ids) <= 0:
return []
for i in range(1, len(tag_keys)):
c = self.__db.query(
'SELECT source_file_id FROM TAG WHERE tag_name = ? AND {0} AND source_file_id in ({1})'.format(
value_match_sql, ','.join('?' * len(matching_file_ids))),
tag_keys[i], tag_values[i], *matching_file_ids
)
matching_file_ids = set()
for r in c:
matching_file_ids.add(str(r[0]))
c.close()
if len(matching_file_ids) <= 0:
return []
c = self.__db.query(
'SELECT source_location FROM SOURCE_FILE WHERE source_file_id in ({0})'.format(
','.join('?' * len(matching_file_ids))),
*matching_file_ids
)
ret = []
for r in c:
ret.append(r[0])
return ret
def get_source_files_with_matching_keywords(self, keywords):
"""
Returns a list of [source file name, keyword],
possibily with duplicate source files, for any keyword.
"""
ksql = []
for k in keywords:
ksql.append('?')
c = self.__db.query(
'''SELECT source_location, keyword FROM FILE_KEYWORD fk
INNER JOIN SOURCE_FILE sf
ON fk.source_file_id = sf.source_file_id
WHERE keyword IN ({0})'''.format(','.join(ksql)),
*keywords
)
ret = []
for r in c:
ret.append((r[0], r[1]))
return ret
def add_duplicate(self, source_id, duplicate_of_id):
return self.__db.table('DUPLICATE_FILE').insert(
source_id, duplicate_of_id
)
def get_duplicate_of_id(self, source_id):
"""
Returns the source file ID of the file marked as a duplicate of the
source file.
"""
c = self.__db.query(
'SELECT duplicate_of_source_file_id FROM DUPLICATE_FILE WHERE source_file_id = ?',
source_id
)
ret = None
for r in c:
ret = r[0]
c.close()
break
return ret
def get_duplicate_ids_for_id(self, duplicate_of_id):
"""
Get the source id for the duplicate_of_id.
"""
ret = set()
c = self.__db.query(
'SELECT source_file_id FROM DUPLICATE_FILE WHERE duplicate_of_source_file_id = ?',
duplicate_of_id
)
for r in c:
ret.add(r[0])
return ret
def get_duplicate_filenames_for_id(self, source_id):
"""
Get the filenames for any duplicate of the source id. Does not
look for duplicates of duplicates.
"""
ret = []
for d in self.get_duplicate_data_for_id(source_id):
ret.append(d['location'])
return ret
def get_duplicate_data_for_id(self, source_id):
"""
Returns any duplicate of the source id as get_duplicate_filenames_for_id.
Each value in the returned collection is a dictionary.
Does not look for duplicates of duplicates.
"""
dup_ids = set()
ret = []
c = self.__db.query(
"""SELECT
sf.source_file_id, sf.source_location, d.duplicate_id, d.duplicate_of_source_file_id
FROM SOURCE_FILE sf
INNER JOIN DUPLICATE_FILE d
ON sf.source_file_id = d.source_file_id
WHERE d.duplicate_of_source_file_id = ?
""",
source_id
)
for r in c:
if r[0] not in dup_ids and r[0] != source_id:
dup_ids.add(r[0])
ret.append({
'source_file_id': r[0],
'source_location': r[1],
'duplicate_id': r[2],
'duplicate_of_source_file_id': r[3],
# User meaningful data
'filename': r[1]
})
c = self.__db.query(
"""SELECT
sf.source_file_id, sf.source_location, d.duplicate_id, d.duplicate_of_source_file_id
FROM SOURCE_FILE sf
INNER JOIN DUPLICATE_FILE d
ON sf.source_file_id = d.duplicate_of_source_file_id
WHERE d.source_file_id = ?
""",
source_id
)
for r in c:
if r[2] not in dup_ids and r[2] != source_id:
dup_ids.add(r[0])
ret.append({
'source_file_id': r[0],
'source_location': r[1],
'duplicate_id': r[2],
'duplicate_of_source_file_id': r[3],
# User meaningful data
'filename': r[1]
})
return ret
def delete_duplicate_id(self, duplicate_id):
return self.__db.table('DUPLICATE_FILE').delete_by_id(duplicate_id)
def get_source_files_like(self, name_like=None):
ret = set()
if name_like is None:
c = self.__db.query('SELECT source_location FROM SOURCE_FILE')
else:
c = self.__db.query(
'SELECT source_location FROM SOURCE_FILE WHERE source_location LIKE ?',
name_like
)
for r in c:
ret.add(r[0])
return ret
def remove_tags_for_source_id(self, source_id):
return self.__db.table('TAG').delete_where(
"source_file_id = ?",
source_id
)
def delete_source_graph(self, source_id):
self.__db.table('DUPLICATE_FILE').delete_where(
"duplicate_of_source_file_id = ? OR source_file_id = ?",
source_id, source_id
)
self.__db.table('FILE_KEYWORD').delete_where(
"source_file_id = ?",
source_id
)
self.__db.table('TAG').delete_where(
"source_file_id = ?",
source_id
)
self.__db.table('TARGET_FILE').delete_where(
"source_file_id = ?",
source_id
)
return self.__db.table('SOURCE_FILE').delete_by_id(source_id)
def delete_transcoded_file_for_source_id(self, source_id):
return self.__db.table('TARGET_FILE').delete_where(
"source_file_id = ?",
source_id
)
def get_source_files_without_tag_names(self, tag_names):
ret = set()
# Need to perform the query for every tag name, individually.
for tag_name in tag_names:
c = self.__db.query("""
SELECT source_location FROM SOURCE_FILE
WHERE source_file_id NOT IN (
SELECT source_file_id FROM TAG WHERE tag_name = ?
)
""", tag_name)
for r in c:
ret.add(r[0])
return ret
# TODO temporary to get past a weird encoding.
def get_source_file_ids_like(self, like):
c = self.__db.query("""
SELECT source_file_id FROM SOURCE_FILE
WHERE source_location LIKE ?
""", like)
for r in c:
yield r[0]
|
python
|
# vpe6080 Analog Input Thermistor Module 8 Channel
# Demo Program reads 8 channels
# Thermistor 10K Ohm 3380 Beta installed in Channel 1 to read room temperature
import asyncio
from pywlmio import *
NodeID = 7 #NodeID location is the Bacplane ID (Jumpers) and Power Supply Slot location
async def main():
init()
th = VPE6080(NodeID)
try:
await asyncio.gather(
th.ch1.configure(1), # Channel Enabled, default 3380 Beta, 25°C Room Value
th.ch2.configure(0), # Channel Disabled
th.ch3.configure(0), # Channel Disabled
th.ch4.configure(0), # Channel Disabled
th.ch5.configure(0), # Channel Disabled
th.ch6.configure(0), # Channel Disabled
th.ch7.configure(0), # Channel Disabled
th.ch8.configure(0) # Channel Disabled
)
except WlmioWrongNodeError:
print("Error NodeID = %d Wrong module installed" % NodeID) # Error Check if wrong type of module installed
except WlmioInternalError:
print("Error NodeID = %d Timed out" % NodeID) # Error Check - Typically module not installed
while True:
try:
a = await asyncio.gather(
th.ch1.read(), # Read Channel 1
th.ch2.read(), # Read Channel 2
th.ch3.read(), # Read Channel 3
th.ch4.read(), # Read Channel 4
th.ch5.read(), # Read Channel 5
th.ch6.read(), # Read Channel 6
th.ch7.read(), # Read Channel 7
th.ch8.read() # Read Channel 8
)
print("Module VPE6080 NodeID = %d" % NodeID)
print("Reading Array = ", a) # Array holds all input channel readings
# Readings scaled x10 and are in °Kelvin, add 273.15 to convert to °C
print("Channel 1 Thermistor = %0.1f Deg C" % (a[0] / 10 - 273.15)) # Print channel 1
print("")
except WlmioWrongNodeError:
print("Error NodeID = %d Wrong module installed" % NodeID) # Error Check if wrong type of module installed
except WlmioInternalError:
print("Error NodeID = %d Timed out" % NodeID) # Error Check - Typically module not installed
await asyncio.sleep(1)
asyncio.run(main(), debug=True)
|
python
|
#!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
tests.sabnews - Fake newsserver to use in end-to-end testing
Run sabnews.py -h for parameters!
"""
import argparse
import asyncio
import logging
import os
import re
import time
from random import randint
import sabyenc3
logging.getLogger().setLevel(logging.INFO)
# Expecting the following message-id:
# ARTICLE <file=folder/filename.mkv|part=4|start=5000|size=5000>\r\n
ARTICLE_INFO = re.compile(
b"^(ARTICLE|BODY) (?P<message_id><file=(?P<file>.*)\\|part=(?P<part>\\d+)\\|start=(?P<start>\\d+)\\|size=(?P<size>\\d+)>)\\r\\n$",
re.MULTILINE,
)
YENC_ESCAPE = [0x00, 0x0A, 0x0D, ord("="), ord(".")]
class NewsServerProtocol(asyncio.Protocol):
def __init__(self):
self.transport = None
self.connected = False
self.in_article = False
super().__init__()
def connection_made(self, transport):
logging.info("Connection from %s", transport.get_extra_info("peername"))
self.transport = transport
self.connected = True
self.transport.write(b"200 Welcome (SABNews)\r\n")
def data_received(self, message):
logging.debug("Data received: %s", message.strip())
# Handle basic commands
if message.startswith(b"QUIT"):
self.close_connection()
elif message.startswith((b"ARTICLE", b"BODY")):
parsed_message = ARTICLE_INFO.search(message)
self.serve_article(parsed_message)
# self.transport.write(data)
def serve_article(self, parsed_message):
# Check if we parsed everything
try:
message_id = parsed_message.group("message_id")
file = parsed_message.group("file").decode("utf-8")
file_base = os.path.basename(file)
part = int(parsed_message.group("part"))
start = int(parsed_message.group("start"))
size = int(parsed_message.group("size"))
except (AttributeError, ValueError):
logging.warning("Can't parse article information")
self.transport.write(b"430 No Such Article Found (bad message-id)\r\n")
return
# Check if file exists
if not os.path.exists(file):
logging.warning("File not found: %s", file)
self.transport.write(b"430 No Such Article Found (no file on disk)\r\n")
return
# Check if sizes are valid
file_size = os.path.getsize(file)
if start + size > file_size:
logging.warning("Invalid start/size attributes")
self.transport.write(b"430 No Such Article Found (invalid start/size attributes)\r\n")
return
logging.debug("Serving %s" % message_id)
# File is found, send headers
self.transport.write(b"222 0 %s\r\n" % message_id)
self.transport.write(b"Message-ID: %s\r\n" % message_id)
self.transport.write(b'Subject: "%s"\r\n\r\n' % file_base.encode("utf-8"))
# Write yEnc headers
self.transport.write(
b"=ybegin part=%d line=128 size=%d name=%s\r\n" % (part, file_size, file_base.encode("utf-8"))
)
self.transport.write(b"=ypart begin=%d end=%d\r\n" % (start + 1, start + size))
with open(file, "rb") as inp_file:
inp_file.seek(start)
inp_buffer = inp_file.read(size)
# Encode data
output_string, crc = sabyenc3.encode(inp_buffer)
self.transport.write(output_string)
# Write footer
self.transport.write(b"\r\n=yend size=%d part=%d pcrc32=%08x\r\n" % (size, part, crc))
self.transport.write(b".\r\n")
def close_connection(self):
logging.debug("Closing connection")
self.transport.write(b"205 Connection closing\r\n")
self.transport.close()
async def serve_sabnews(hostname, port):
# Start server
logging.info("Starting SABNews on %s:%d", hostname, port)
# Needed for Python 3.5 support!
loop = asyncio.get_event_loop()
server = await loop.create_server(lambda: NewsServerProtocol(), hostname, port)
return server
def create_nzb(nzb_file=None, nzb_dir=None, metadata=None):
article_size = 500000
files_for_nzb = []
output_file = ""
# Either use directory or single file
if nzb_dir:
if not os.path.exists(nzb_dir) or not os.path.isdir(nzb_dir):
raise NotADirectoryError("%s is not a valid directory" % nzb_dir)
# List all files
files_for_nzb = [os.path.join(nzb_dir, fl) for fl in os.listdir(nzb_dir)]
files_for_nzb = [fl for fl in files_for_nzb if os.path.isfile(fl)]
output_file = os.path.join(nzb_dir, os.path.basename(os.path.normpath(nzb_dir)) + ".nzb")
if nzb_file:
if not os.path.exists(nzb_file) or not os.path.isfile(nzb_file):
raise FileNotFoundError("Cannot find %s or it is not a file" % nzb_file)
files_for_nzb = [nzb_file]
output_file = os.path.splitext(nzb_file)[0] + ".nzb"
if not files_for_nzb:
raise RuntimeError("No files found to include in NZB")
# Let's write a file!
with open(output_file, "w", encoding="utf-8") as nzb:
nzb.write('<?xml version="1.0" encoding="UTF-8"?>\n')
nzb.write('<!DOCTYPE nzb PUBLIC "-//newzBin//DTD NZB 1.0//EN" "http://www.newzbin.com/DTD/nzb/nzb-1.0.dtd">\n')
nzb.write('<nzb xmlns="http://www.newzbin.com/DTD/2003/nzb">\n')
if metadata:
nzb.write("<head>\n")
for meta_name, meta_value in metadata.items():
nzb.write('<meta type="%s">%s</meta>\n' % (meta_name, meta_value))
nzb.write("</head>\n")
nzb_time = time.time() - randint(0, int(time.time() - 746863566))
for fl in files_for_nzb:
nzb.write('<file poster="SABNews" date="%d" subject=""%s"">\n' % (nzb_time, os.path.basename(fl)))
nzb.write("<groups><group>alt.binaries.test</group></groups>\n")
nzb.write("<segments>\n")
# Create segments
file_size = os.path.getsize(fl)
for seg_nr, seg_start in enumerate(range(0, file_size, article_size), 1):
segement_size = min(article_size, file_size - seg_start)
nzb.write(
'<segment number="%d" bytes="%d">file=%s|part=%s|start=%d|size=%d</segment>\n'
% (seg_nr, segement_size, fl, seg_nr, seg_start, segement_size)
)
nzb.write("</segments>\n")
nzb.write("</file>\n")
nzb.write("</nzb>\n")
logging.info("NZB saved to %s" % output_file)
return output_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", help="Hostname", dest="hostname", default="127.0.0.1")
parser.add_argument("-p", help="Port", dest="port", type=int, default=8888)
parser.add_argument("--nzbfile", help="Create NZB of specified file", dest="nzb_file", metavar="FILE")
parser.add_argument("--nzbdir", help="Create NZB for files in specified directory", dest="nzb_dir", metavar="DIR")
args = parser.parse_args()
# Serve if we are not creating NZB's
if not args.nzb_file and not args.nzb_dir:
loop = asyncio.get_event_loop()
loop.run_until_complete(serve_sabnews(args.hostname, args.port))
loop.run_forever()
else:
create_nzb(args.nzb_file, args.nzb_dir)
if __name__ == "__main__":
main()
|
python
|
from setuptools import find_packages, setup
setup(
name = 'upbit_wrapper',
version = '0.0.9',
description = 'Python wrapper for upbit',
long_description = open('README.md','rt').read(),
long_description_content_type='text/markdown',
author = 'BS LEE',
author_email = '[email protected]',
url = 'https://github.com/beomsu317/upbit_wrapper',
install_requires = ['websocket','websocket-client','requests'],
keyword = ['upbit'],
python_requires = '>=3',
license = 'MIT',
packages = find_packages(),
classifiers = [
'Programming Language :: Python :: 3.8'
],
zip_safe = False
)
|
python
|
from fqf_iqn_qrdqn.agent.base_agent import BaseAgent
from DMoGDiscrete.DMoGQ import DMoGQ
from fqf_iqn_qrdqn.utils import disable_gradients, update_params
from torch.optim import Adam
import torch
from DMoGDiscrete.utils import calculate_dmog_loss, evaluate_mog_at_action
class DMoGQAgent(BaseAgent):
def __init__(self, env, test_env, log_dir, num_steps=5 * (10 ** 7),
batch_size=32,
num_gaussians=5, eta=0.5, beta=3, delta=10,
lr=5e-5, memory_size=10 ** 6,
gamma=0.99, multi_step=1, update_interval=4,
target_update_interval=10000, start_steps=50000,
epsilon_train=0.01, epsilon_eval=0.001,
epsilon_decay_steps=250000, double_q_learning=False,
dueling_net=False, noisy_net=False, use_per=False,
log_interval=100, eval_interval=250000, num_eval_steps=125000,
max_episode_steps=27000, grad_cliping=None, cuda=True,
seed=0):
super(DMoGQAgent, self).__init__(env, test_env, log_dir, num_steps, batch_size, memory_size,
gamma, multi_step, update_interval, target_update_interval,
start_steps, epsilon_train, epsilon_eval, epsilon_decay_steps,
double_q_learning, dueling_net, noisy_net, use_per, log_interval,
eval_interval, num_eval_steps, max_episode_steps, grad_cliping,
cuda, seed)
self.num_gaussians = num_gaussians
self.eta = eta
self.beta = beta
self.delta = delta
# Online network.
self.online_net = DMoGQ(
num_channels=env.observation_space.shape[0],
num_actions=self.num_actions,
num_gaussians=num_gaussians,
dueling_net=dueling_net,
noisy_net=noisy_net).to(self.device)
# Target network.
self.target_net = DMoGQ(
num_channels=env.observation_space.shape[0],
num_actions=self.num_actions,
num_gaussians=num_gaussians,
dueling_net=dueling_net,
noisy_net=noisy_net).to(self.device).to(self.device)
# Copy parameters of the learning network to the target network.
self.update_target()
# Disable calculations of gradients of the target network.
disable_gradients(self.target_net)
self.optim = Adam(
self.online_net.parameters(),
lr=lr, eps=1e-2 / batch_size)
def learn(self):
self.learning_steps += 1
self.online_net.sample_noise()
self.target_net.sample_noise()
if self.use_per:
(states, actions, rewards, next_states, dones), weights = \
self.memory.sample(self.batch_size)
else:
states, actions, rewards, next_states, dones = \
self.memory.sample(self.batch_size)
weights = None
dmog_loss = self.calculate_loss(
states, actions, rewards, next_states, dones, weights)
update_params(
self.optim, dmog_loss,
networks=[self.online_net],
retain_graph=False, grad_cliping=self.grad_cliping)
def calculate_loss(self, states, actions, rewards, next_states, dones,
weights):
mog_pi, mog_mu, mog_sigma = self.online_net(states=states)
mog_pi_sa, mog_mu_sa, mog_sigma_sa = evaluate_mog_at_action(mog_pi=mog_pi, mog_mu=mog_mu, mog_sigma=mog_sigma,
actions=actions)
assert mog_pi_sa.shape == (self.batch_size, self.num_gaussians, 1)
with torch.no_grad():
next_mog_pi, next_mog_mu, next_mog_sigma = self.target_net(states=next_states)
mog_q_value = torch.sum(next_mog_pi * next_mog_mu, dim=1)
next_actions = torch.argmax(mog_q_value, dim=1, keepdim=True)
assert next_actions.shape == (self.batch_size, 1)
next_mog_pi_sa, next_mog_mu_sa, next_mog_sigma_sa = \
evaluate_mog_at_action(mog_pi=next_mog_pi, mog_mu=next_mog_mu, mog_sigma=next_mog_sigma,
actions=next_actions)
assert next_mog_pi_sa.shape == (self.batch_size, 1, self.num_gaussians)
# Calculate target mog values.
target_mog_mu_sa = rewards[..., None] + (1.0 - dones[..., None]) * self.gamma_n * next_mog_mu_sa
target_mog_pi_sa = torch.tensor(1.0 / self.num_gaussians) * dones[..., None] + (
1.0 - dones[..., None]) * next_mog_pi_sa
target_mog_sigma_sa = torch.tensor(1.0) * dones[..., None] + (
1.0 - dones[..., None]) * self.gamma_n * next_mog_sigma_sa
assert target_mog_mu_sa.shape == (self.batch_size, self.num_gaussians, 1)
dmog_loss = calculate_dmog_loss(mog_pi_sa, mog_mu_sa, mog_sigma_sa,
target_mog_mu_sa, target_mog_pi_sa, target_mog_sigma_sa,
eta=self.eta, beta=self.beta, delta=self.delta,
weight=weights)
return dmog_loss
|
python
|
# 用random.randint(1,10),随机生成一个有100个元素的列表,然后按照元素出现次数的高低,从高到底排序并输出
import random
numbers = [random.randint(1, 10) for i in range(100)]
numbers_info = {}
def sorted_by_freq(numbers):
for number in numbers: # 遍历随机数列表
if number not in numbers_info: # 若该元素没有统计过
numbers_info[number] = numbers.count(number) # 以该元素为key,出现次数为value,加入字典
return sorted(numbers_info.items(), key=lambda item: item[1], reverse=True) # 降序排序后返回
ans = sorted_by_freq(numbers)
print(ans)
|
python
|
# Copyright 2018, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reformulation of function statements.
Consult the developer manual for information. TODO: Add ability to sync
source code comments with developer manual sections.
"""
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementAssignmentVariableName,
StatementReleaseVariable
)
from nuitka.nodes.AsyncgenNodes import (
ExpressionAsyncgenObjectBody,
ExpressionMakeAsyncgenObject
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
StatementSpecialUnpackCheck
)
from nuitka.nodes.BuiltinNextNodes import ExpressionSpecialUnpack
from nuitka.nodes.BuiltinRefNodes import makeExpressionBuiltinRef
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.CoroutineNodes import (
ExpressionCoroutineObjectBody,
ExpressionMakeCoroutineObject
)
from nuitka.nodes.FunctionNodes import (
ExpressionFunctionBody,
ExpressionFunctionCreation,
ExpressionFunctionRef
)
from nuitka.nodes.GeneratorNodes import (
ExpressionGeneratorObjectBody,
ExpressionMakeGeneratorObject,
StatementGeneratorReturnNone
)
from nuitka.nodes.LocalsDictNodes import StatementSetLocalsDictionary
from nuitka.nodes.OutlineNodes import ExpressionOutlineFunction
from nuitka.nodes.ReturnNodes import StatementReturn, StatementReturnNone
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableNameRef,
ExpressionVariableRef
)
from nuitka.PythonVersions import python_version
from nuitka.specs.ParameterSpecs import ParameterSpec
from .ReformulationTryFinallyStatements import makeTryFinallyStatement
from .SyntaxErrors import raiseSyntaxError
from .TreeHelpers import (
buildAnnotationNode,
buildFrameNode,
buildNode,
buildNodeList,
detectFunctionBodyKind,
extractDocFromBody,
getKind,
makeCallNode,
makeDictCreationOrConstant2,
makeStatementsSequenceFromStatement,
mangleName
)
def _insertFinalReturnStatement(function_statements_body, return_statement):
if function_statements_body is None:
function_statements_body = makeStatementsSequenceFromStatement(
statement = return_statement
)
elif not function_statements_body.isStatementAborting():
function_statements_body.setStatements(
function_statements_body.getStatements() +
(
return_statement,
)
)
return function_statements_body
def _insertInitialSetLocalsDictStatement(function_body, function_statements_body):
locals_statement = StatementSetLocalsDictionary(
locals_scope = function_body.getFunctionLocalsScope(),
source_ref = function_body.source_ref
)
if function_statements_body is None:
function_statements_body = makeStatementsSequenceFromStatement(
statement = locals_statement
)
else:
function_statements_body.setStatements(
(
locals_statement,
) + function_statements_body.getStatements()
)
return function_statements_body
def _injectDecorator(decorators, inject, acceptable, source_ref):
assert type(inject) is str
assert type(acceptable) is tuple
for decorator in decorators:
if decorator.isExpressionVariableNameRef() and \
decorator.getVariableName() in acceptable:
break
else:
decorators.append(
makeExpressionBuiltinRef(
builtin_name = inject,
source_ref = source_ref
)
)
def buildFunctionNode(provider, node, source_ref):
# Functions have way too many details, pylint: disable=too-many-locals
assert getKind(node) == "FunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
function_kind, flags = detectFunctionBodyKind(
nodes = function_statement_nodes
)
function_body, code_body, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = function_kind,
name = node.name,
function_doc = function_doc,
flags = flags,
node = node,
source_ref = source_ref
)
if function_kind in ("Generator", "Coroutine"):
if function_kind == "Coroutine":
code_body = ExpressionCoroutineObjectBody(
provider = function_body,
name = node.name,
code_object = code_object,
flags = flags,
source_ref = source_ref
)
maker_class = ExpressionMakeCoroutineObject
else:
code_body = ExpressionGeneratorObjectBody(
provider = function_body,
name = node.name,
code_object = code_object,
flags = flags,
source_ref = source_ref
)
maker_class = ExpressionMakeGeneratorObject
code_body.qualname_provider = provider
for variable in function_body.getVariables():
code_body.getVariableForReference(variable.getName())
function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = maker_class(
ExpressionFunctionRef(
function_body = code_body,
source_ref = source_ref
),
source_ref = source_ref
),
source_ref = source_ref
)
)
)
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = function_body,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = code_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
if function_kind == "Function":
# TODO: Generators might have to raise GeneratorExit instead.
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_statement = StatementReturnNone(
source_ref = source_ref
)
)
if "has_exec" in flags:
function_statements_body = _insertInitialSetLocalsDictStatement(
function_body = code_body,
function_statements_body = function_statements_body,
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
code_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
# Add the "staticmethod" decorator to __new__ methods if not provided.
# CPython made these optional, but secretly applies them when it does
# "class __new__". We add them earlier, so our optimization will see it.
if python_version < 300 and \
node.name == "__new__" and \
provider.isExpressionClassBody():
_injectDecorator(decorators, "staticmethod", ("staticmethod", "classmethod"), source_ref)
# Add the "classmethod" decorator to __init_subclass__ methods if not provided.
if python_version >= 360 and \
node.name == "__init_subclass__" and \
provider.isExpressionClassBody():
_injectDecorator(decorators, "classmethod", ("classmethod",), source_ref)
if python_version >= 370 and \
node.name == "__class_getitem__" and \
provider.isExpressionClassBody():
_injectDecorator(decorators, "classmethod", ("classmethod",), source_ref)
decorated_function = function_creation
for decorator in decorators:
decorated_function = makeCallNode(
decorator,
decorated_function,
decorator.getSourceReference()
)
result = StatementAssignmentVariableName(
provider = provider,
variable_name = mangleName(node.name, provider),
source = decorated_function,
source_ref = source_ref
)
if python_version >= 340:
function_body.qualname_setup = result.getVariableName()
return result
def buildAsyncFunctionNode(provider, node, source_ref):
# We are creating a function here that creates coroutine objects, with
# many details each, pylint: disable=too-many-locals
assert getKind(node) == "AsyncFunctionDef"
function_statement_nodes, function_doc = extractDocFromBody(node)
function_kind, flags = detectFunctionBodyKind(
nodes = function_statement_nodes,
start_value = "Coroutine"
)
creator_function_body, _, code_object = buildFunctionWithParsing(
provider = provider,
function_kind = function_kind,
name = node.name,
function_doc = function_doc,
flags = (),
node = node,
source_ref = source_ref
)
if function_kind == "Coroutine":
function_body = ExpressionCoroutineObjectBody(
provider = creator_function_body,
name = node.name,
code_object = code_object,
flags = flags,
source_ref = source_ref
)
else:
function_body = ExpressionAsyncgenObjectBody(
provider = creator_function_body,
name = node.name,
code_object = code_object,
flags = flags,
source_ref = source_ref
)
function_body.qualname_provider = provider
for variable in creator_function_body.getVariables():
function_body.getVariableForReference(variable.getName())
decorators = buildNodeList(
provider = provider,
nodes = reversed(node.decorator_list),
source_ref = source_ref
)
defaults = buildNodeList(
provider = provider,
nodes = node.args.defaults,
source_ref = source_ref
)
function_statements_body = buildFrameNode(
provider = function_body,
nodes = function_statement_nodes,
code_object = code_object,
source_ref = source_ref
)
function_statements_body = _insertFinalReturnStatement(
function_statements_body = function_statements_body,
return_statement = StatementGeneratorReturnNone(
source_ref = source_ref
)
)
if function_statements_body.isStatementsFrame():
function_statements_body = makeStatementsSequenceFromStatement(
statement = function_statements_body
)
function_body.setBody(
function_statements_body
)
annotations = buildParameterAnnotations(provider, node, source_ref)
kw_defaults = buildParameterKwDefaults(
provider = provider,
node = node,
function_body = creator_function_body,
source_ref = source_ref
)
if function_kind == "Coroutine":
creation_node = ExpressionMakeCoroutineObject(
coroutine_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
source_ref = source_ref
)
else:
creation_node = ExpressionMakeAsyncgenObject(
asyncgen_ref = ExpressionFunctionRef(
function_body = function_body,
source_ref = source_ref
),
source_ref = source_ref
)
creator_function_body.setBody(
makeStatementsSequenceFromStatement(
statement = StatementReturn(
expression = creation_node,
source_ref = source_ref
)
)
)
function_creation = ExpressionFunctionCreation(
function_ref = ExpressionFunctionRef(
function_body = creator_function_body,
source_ref = source_ref
),
defaults = defaults,
kw_defaults = kw_defaults,
annotations = annotations,
source_ref = source_ref
)
decorated_function = function_creation
for decorator in decorators:
decorated_function = makeCallNode(
decorator,
decorated_function,
decorator.getSourceReference()
)
result = StatementAssignmentVariableName(
provider = provider,
variable_name = mangleName(node.name, provider),
source = decorated_function,
source_ref = source_ref
)
function_body.qualname_setup = result.getVariableName()
# Share the non-local declarations. TODO: This may also apply to generators
# and async generators.
creator_function_body.non_local_declarations = function_body.non_local_declarations
return result
def buildParameterKwDefaults(provider, node, function_body, source_ref):
# Build keyword only arguments default values. We are hiding here, that it
# is a Python3 only feature.
if python_version >= 300:
kw_only_names = function_body.getParameters().getKwOnlyParameterNames()
if kw_only_names:
keys = []
values = []
for kw_only_name, kw_default in \
zip(kw_only_names, node.args.kw_defaults):
if kw_default is not None:
keys.append(kw_only_name)
values.append(
buildNode(provider, kw_default, source_ref)
)
kw_defaults = makeDictCreationOrConstant2(
keys = keys,
values = values,
source_ref = source_ref
)
else:
kw_defaults = None
else:
kw_defaults = None
return kw_defaults
def buildParameterAnnotations(provider, node, source_ref):
# Too many branches, because there is too many cases, pylint: disable=too-many-branches
# Build annotations. We are hiding here, that it is a Python3 only feature.
if python_version < 300:
return None
# Starting with Python 3.4, the names of parameters are mangled in
# annotations as well.
if python_version < 340:
mangle = lambda variable_name: variable_name
else:
mangle = lambda variable_name: mangleName(variable_name, provider)
keys = []
values = []
def addAnnotation(key, value):
keys.append(mangle(key))
values.append(value)
def extractArg(arg):
if getKind(arg) == "Name":
assert arg.annotation is None
elif getKind(arg) == "arg":
if arg.annotation is not None:
addAnnotation(
key = arg.arg,
value = buildAnnotationNode(provider, arg.annotation, source_ref)
)
elif getKind(arg) == "Tuple":
for sub_arg in arg.elts:
extractArg(sub_arg)
else:
assert False, getKind(arg)
for arg in node.args.args:
extractArg(arg)
for arg in node.args.kwonlyargs:
extractArg(arg)
if python_version < 340:
if node.args.varargannotation is not None:
addAnnotation(
key = node.args.vararg,
value = buildNode(
provider, node.args.varargannotation, source_ref
)
)
if node.args.kwargannotation is not None:
addAnnotation(
key = node.args.kwarg,
value = buildNode(
provider, node.args.kwargannotation, source_ref
)
)
else:
if node.args.vararg is not None:
extractArg(node.args.vararg)
if node.args.kwarg is not None:
extractArg(node.args.kwarg)
# Return value annotation (not there for lambdas)
if hasattr(node, "returns") and node.returns is not None:
addAnnotation(
key = "return",
value = buildAnnotationNode(
provider, node.returns, source_ref
)
)
if keys:
return makeDictCreationOrConstant2(
keys = keys,
values = values,
source_ref = source_ref
)
else:
return None
def _wrapFunctionWithSpecialNestedArgs(name, outer_body, parameters, special_args, source_ref):
inner_name = name.strip("<>") + "$inner"
iter_vars = []
values = []
statements = []
def unpackFrom(source, arg_names):
accesses = []
sub_special_index = 0
iter_var = outer_body.allocateTempVariable(None, "arg_iter_%d" % len(iter_vars))
iter_vars.append(iter_var)
statements.append(
StatementAssignmentVariable(
variable = iter_var,
source = ExpressionBuiltinIter1(
value = source,
source_ref = source_ref
),
source_ref = source_ref
)
)
for element_index, arg_name in enumerate(arg_names):
if getKind(arg_name) == "Name":
arg_var = outer_body.createProvidedVariable(arg_name.id)
outer_body.registerProvidedVariable(arg_var)
statements.append(
StatementAssignmentVariable(
variable = arg_var,
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
starred = False,
source_ref = source_ref
),
source_ref = source_ref
)
)
accesses.append(
ExpressionVariableRef(
variable = arg_var,
source_ref = source_ref
)
)
elif getKind(arg_name) == "Tuple":
accesses.extend(
unpackFrom(
source = ExpressionSpecialUnpack(
value = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = element_index + 1,
expected = len(arg_names),
starred = False,
source_ref = source_ref
),
arg_names = arg_name.elts
)
)
sub_special_index += 1
else:
assert False, arg_name
statements.append(
StatementSpecialUnpackCheck(
iterator = ExpressionTempVariableRef(
variable = iter_var,
source_ref = source_ref
),
count = len(arg_names),
source_ref = source_ref
)
)
return accesses
for arg_name in parameters.getParameterNames():
if arg_name.startswith('.'):
source = ExpressionVariableNameRef(
provider = outer_body,
variable_name = arg_name,
source_ref = source_ref
)
values.extend(
unpackFrom(source, special_args[arg_name])
)
else:
values.append(
ExpressionVariableNameRef(
provider = outer_body,
variable_name = arg_name,
source_ref = source_ref
)
)
code_body = ExpressionOutlineFunction(
provider = outer_body,
name = inner_name,
source_ref = source_ref
)
statements.append(
StatementReturn(
expression = code_body,
source_ref = source_ref
)
)
outer_body.setBody(
makeStatementsSequenceFromStatement(
statement = makeTryFinallyStatement(
provider = outer_body,
tried = statements,
final = [
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
for variable in
sorted(
outer_body.getTempVariables(),
key = lambda variable: variable.getName()
)
],
source_ref = source_ref,
public_exc = False
)
)
)
return code_body
def buildFunctionWithParsing(provider, function_kind, name, function_doc, flags,
node, source_ref):
# This contains a complex re-formulation for nested parameter functions.
# pylint: disable=too-many-locals
kind = getKind(node)
assert kind in ("FunctionDef", "Lambda", "AsyncFunctionDef"), "unsupported for kind " + kind
def extractArg(arg):
if arg is None:
return None
elif type(arg) is str:
return mangleName(arg, provider)
elif getKind(arg) == "Name":
return mangleName(arg.id, provider)
elif getKind(arg) == "arg":
return mangleName(arg.arg, provider)
elif getKind(arg) == "Tuple":
# These are to be re-formulated on the outside.
assert False
else:
assert False, getKind(arg)
special_args = {}
def extractNormalArgs(args):
normal_args = []
for arg in args:
if type(arg) is not str and getKind(arg) == "Tuple":
special_arg_name = ".%d" % (len(special_args) + 1)
special_args[special_arg_name] = arg.elts
normal_args.append(special_arg_name)
else:
normal_args.append(extractArg(arg))
return normal_args
normal_args = extractNormalArgs(node.args.args)
parameters = ParameterSpec(
ps_name = name,
ps_normal_args = normal_args,
ps_kw_only_args = [
extractArg(arg)
for arg in
node.args.kwonlyargs
]
if python_version >= 300 else
[],
ps_list_star_arg = extractArg(node.args.vararg),
ps_dict_star_arg = extractArg(node.args.kwarg),
ps_default_count = len(node.args.defaults)
)
message = parameters.checkParametersValid()
if message is not None:
raiseSyntaxError(
message,
source_ref.atColumnNumber(node.col_offset),
)
parent_module = provider.getParentModule()
code_object = CodeObjectSpec(
co_name = name,
co_kind = function_kind,
co_varnames = parameters.getParameterNames(),
co_argcount = parameters.getArgumentCount(),
co_kwonlyargcount = parameters.getKwOnlyParameterCount(),
co_has_starlist = parameters.getStarListArgumentName() is not None,
co_has_stardict = parameters.getStarDictArgumentName() is not None,
co_filename = parent_module.getRunTimeFilename(),
co_lineno = source_ref.getLineNumber(),
future_spec = parent_module.getFutureSpec()
)
outer_body = ExpressionFunctionBody(
provider = provider,
name = name,
code_object = code_object,
flags = flags,
doc = function_doc,
parameters = parameters,
source_ref = source_ref
)
# Wrap if necessary for special nested arguments.
if special_args:
code_body = _wrapFunctionWithSpecialNestedArgs(
name = name,
outer_body = outer_body,
parameters = parameters,
special_args = special_args,
source_ref = source_ref
)
else:
code_body = outer_body
return outer_body, code_body, code_object
def addFunctionVariableReleases(function):
assert function.isExpressionFunctionBodyBase()
releases = []
# We attach everything to the function definition source location.
source_ref = function.getSourceReference()
for variable in function.getLocalVariables():
# Shared variables are freed by function object attachment.
if variable.getOwner() is not function:
continue
releases.append(
StatementReleaseVariable(
variable = variable,
source_ref = source_ref
)
)
if releases:
body = function.getBody()
if body.isStatementsFrame():
body = makeStatementsSequenceFromStatement(
statement = body
)
body = makeTryFinallyStatement(
provider = function,
tried = body,
final = releases,
source_ref = source_ref
)
function.setBody(
makeStatementsSequenceFromStatement(
statement = body
)
)
# assert body.isStatementAborting(), body.asXmlText()
|
python
|
# 5 Faça um Programa que converta metros para centímetros.
distancia = int(input('Digite uma distância em metros: '))
converção = distancia * 100
print(f'De acordo com a distância informada: {distancia} M, Sua conversão em centímetros é: {converção} CM ')
|
python
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
from io import StringIO
import os
import subprocess
import sys
import types
from .exceptions import (DeliveryTransportError, DeliveryPackingError)
from .pickle import pickle, unpickle, ModulePickle
class DeliveryBox(object):
"""Container for data exchange"""
# OUTPUT VALUES
stdout = None
stderr = None
return_value = None
exception = None
# INPUT VALUES
instance = None
func = None
args = None
kwargs = None
modules = set()
pickled_modules = set()
def __str__(self):
return "\n".join(["{:15s}: {}".format(key, value)
for (key, value) in self.__dict__.items()])
def __eq__(self, other):
return self.__dict__ == other.__dict__
class DeliveryBoy(object):
"""Operator for call the new process and handle input/output
When called the decorated function and non-standard modules stored in its
`__globals__` attribute are pickled and passed via the transport command
to the newly started python process.
If an exception is raised during execution of the decorated function, this
exception is pickled and reraised.
If `async` is `False`, STDOUT, STDERR and the return value of the decorated
function are returned upon calling the decorated function. Otherwise only
the process ID is returned; if a transport is defined, it is the process ID
of the transport, otherwise the process ID of the interpreter.
After execution STDOUT and STDERR writing during execution of the callable
are written to STDOUT and STDERR of the main process. This applies only to
synchronous execution!
:param func: Function object that is called in the new process
:type func: callable
:param transport: Transport command
:type transport: str
:param transport_params: Additional arguments for the transport command.
:type transport_params: list
:param executable: The python executable to be called.
Default: `sys.executable`.
:type executable: Absolute path of python interpreter
:param async: If set to `True`, this process will not wait for the process
called via the transport command to finish. Default: `False`
:type async: bool
:param discard_excess: If set to `False`, all output written to STDOUT by
the new process that is not redirected gets pre- or
appended accordingly to the delivery box.
Default: `True`
:type discard_excess: bool
:return: Return value of the decorated callable
:raises deliveryboy.exceptions.DeliveryPackingError: if decorated callable
is not supported, if a module cannot be added to the delivery box
:raises deliveryboy.exceptions.DeliveryTransportError: if calling the
transport or executable fail (e.g. command not found, exit code not
equal zero.
"""
def __init__(self, func, transport=None, transport_params=[],
executable=sys.executable, async=False, discard_excess=True,
**params):
self.func = func
self.params = params
self.async = async
self.discard_excess= discard_excess
self.executable = executable
self.transport = transport
self.transport_params = transport_params
self.inbox = DeliveryBox()
self.outbox = None
def __call__(self, *args, **kwargs):
self._pack_box(args, kwargs)
response = self._run_delivery()
if self.transport:
self.outbox, prefix, suffix = unpickle(response[0],
self.discard_excess)
if prefix or suffix:
self.outbox.stdout = prefix + self.outbox.stdout + suffix
self._pipe_stdout_err()
self._reraise()
return self.outbox.return_value
def __get__(self, obj, classobj=None):
if obj is not None:
self.inbox.instance = obj
return self
def _pack_box(self, args, kwargs):
"""Pack callable, arguments and modules
:param args: Arguments to be passed to the callable
:type args: list
:param kwargs: Arguments to be passed to the callable
:type kwargs: dict
"""
self.inbox.args = args
self.inbox.kwargs = kwargs
if isinstance(self.func, types.FunctionType):
self.inbox.func = self.func.__code__
self._pack_box_modules()
# myglobals = self.func.__globals__
else:
raise DeliveryPackingError(
"This type of callable is not supported"
)
def _pack_box_modules(self):
"""Add modules to box for pickling"""
allmodules = [(k, v) for (k, v) in self.func.__globals__.items()
if isinstance(v, types.ModuleType)
and not k.startswith("__")]
venv = os.environ.get("VIRTUAL_ENV", None)
path = sys.path[1:]
if venv:
path = [p for p in path if p and not p.startswith(venv)]
path.append(venv)
try:
# Handle builtins and modules from virtual env
# Start with those that have no __file__ attribute
self.inbox.modules |= set([k for (k, v) in allmodules
if getattr(v, '__file__', None) is None])
# Then add those from the system paths
for sitepath in path:
self.inbox.modules |= {
k for (k, v) in allmodules
if getattr(v, '__file__', '').startswith(sitepath)
}
except Exception as error:
raise DeliveryPackingError(
"Cannot pack built-in/venv modules",
real_exception=error
)
# TODO: This breaks availability of imported submodules
mod_pickle = ModulePickle(modules=[v for (k, v) in allmodules
if k not in self.inbox.modules])
self.inbox.pickled_modules = mod_pickle.pickle()
self.inbox.modules |= set([k for (k, v) in allmodules
if k not in self.inbox.modules])
def _run_delivery(self):
"""Executes the actual transport/executable
If `transport` is `None`, it and `transport_params` will be omitted
from the command line. In this case the callable is run directly.
Also, in this case the `async` option is ignored.
"""
if self.transport:
cmd = [self.transport, ] + self.transport_params + [
self.executable, "-m", "deliveryboy", pickle(self.inbox)
]
try:
child_process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
except Exception as error:
raise DeliveryTransportError(real_exception=error)
if not self.async:
response = child_process.communicate()
self._handle_call_error(response, child_process.returncode)
return response
else:
return child_process.pid
else:
self.outbox = execute(self.inbox)
def _handle_call_error(self, response, returncode):
if returncode:
raise DeliveryTransportError(
"Child process exited with {}: {}".format(
returncode, response[1].decode("utf8")
))
def _pipe_stdout_err(self):
"""Redirect STDOUT and STDERR from delivered callable"""
for stream in ["stdout", "stderr"]:
if isinstance(self.outbox, DeliveryBox) \
and getattr(self.outbox, stream, None):
print(
getattr(self.outbox, stream),
file=getattr(sys, stream)
)
def _reraise(self):
"""Re-raises an exception originating from the callable"""
if self.outbox and isinstance(self.outbox.exception, Exception):
raise self.outbox.exception
class DeliveryBoyDecorator(object):
"""Decorator for functions
Decorated functions are pickled and passed to a newly started python process
that is called via a transport command (e.g. sudo)
:param transport: Transport command
:type transport: str
:param executable: The python executable to be called.
Default: `sys.executable`.
:type executable: Absolute path of python interpreter
:param async: If set to `True`, this process will not wait for the process
called via the transport command to finish. Default: `False`
:type async: bool
"""
def __init__(self, **params):
self.params = params
def __call__(self, func, *args, **kwargs):
return DeliveryBoy(func, **self.params)
def execute(inbox):
"""Setup the environment and execute the decorated callable
:param inbox: Pickled :py:obj:`DeliveryBox` instance
:return: :py:obj:`DeliveryBox`
:raises deliveryboy.exception.DeliveryPackingError: If callable is missing
"""
# Load pickled modules
mod_pickle = ModulePickle(pickled=inbox.pickled_modules)
mod_pickle.unpickle()
# Import modules
globals().update({x: __import__(x) for x in inbox.modules})
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = StringIO()
sys.stderr = StringIO()
if inbox.func is not None and isinstance(inbox.func, types.CodeType):
func = types.FunctionType(inbox.func, globals())
else:
del mod_pickle
raise DeliveryPackingError("No callable to run in delivery box")
box = DeliveryBox()
try:
if inbox.instance is not None:
box.return_value = func(inbox.instance, *inbox.args, **inbox.kwargs)
else:
box.return_value = func(*inbox.args, **inbox.kwargs)
except Exception as error:
box.exception = error
box.stdout = sys.stdout.getvalue()
box.stderr = sys.stderr.getvalue()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
del mod_pickle
return box
def main():
"""Entry function for new process
This method unpickles data from the command line, redirects STDOUT + STDERR
and pickles the return value and exception
Input and output of this function are base64 encoded strings representing
pickled :py:obj:`deliveryboy.core.DeliveryBox` objects.
"""
try:
inbox = unpickle(bytes(sys.argv[1], "utf8"))[0]
except Exception as error:
box = DeliveryBox()
box.exception = error
else:
box = execute(inbox)
print(pickle(box))
|
python
|
# Кириллов Алексей, ИУ7-22
from math import sqrt
from tkinter import *
root = Tk()
draw_pole = Canvas(root, width = 800, height = 600, bg = "white")
def dist(x, y, x1, y1, x2, y2):
lenth = abs((x-x1) * (y2-y1) - (y-y1) * (x2-x1)) /\
sqrt((x2-x1)**2 + (y2-y1)**2)
#print(lenth)
return lenth
def uline(a1, b1, a2, b2, b):
if b1 == b2:
return 0
else:
return (a2 - a1)*(b - b1)/(b2 - b1) + a1
R = float(input("Задайте радиус окружностей: "))
points = []
print("\nВведите x и y точки через пробел; пустая строка завершает ввод:")
s = input()
while s != "":
points.append(list(map(float, s.split())))
s = input()
circles = []
print("Введите x и y центра окружности через пробел; \
пустая строка завершает ввод:")
s = input()
while s != "":
circles.append(list(map(float, s.split())))
s = input()
max_k = 0
point_a = 0
point_b = 0
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
k = 0
a = 0
b = 0
c = 0
for circle in circles:
if dist(circle[0], circle[1],
points[i][0], points[i][1],
points[j][0], points[j][1]) < R:
k += 1
if k > max_k:
point_a = i
point_b = j
max_k = k
print("{:} пересечений с окружностями у линии, проходящей\nчерез точки \
({:}; {:}) и ({:}; {:})".format(max_k, points[point_a][0], points[point_a][1],
points[point_b][0], points[point_b][1]))
min_x = max_x = points[0][0]
min_y = max_y = points[0][1]
for point in points:
max_x = max(max_x, point[0])
min_x = min(min_x, point[0])
max_y = max(max_y, point[1])
min_y = min(min_y, point[1])
for circle in circles:
max_x = max(max_x, circle[0])
min_x = min(min_x, circle[0])
max_y = max(max_y, circle[1])
min_y = min(min_y, circle[1])
scale = min(500/(max_y - min_y), 700/(max_x - min_x))
disp_x = 50 + round((700 - (max_x - min_x)*scale)/2)
disp_y = 550 - round((500 - (max_y - min_y)*scale)/2)
x = round(disp_x - min_x*scale)
y = round(disp_y + min_y*scale)
draw_pole.create_line(0, y, 800, y, width=2, fill="grey", arrow=LAST)
draw_pole.create_line(x, 600, x, 0, width=2, fill="grey", arrow=LAST)
draw_pole.create_text(x + 8, 9, text = "y",\
font="Arial 8", justify=CENTER, fill="green")
draw_pole.create_text(790, y - 9, text = "x",\
font="Arial 8", justify=CENTER, fill="green")
x1 = uline(points[point_a][0], points[point_a][1],
points[point_b][0], points[point_b][1], (max_y-min_y)*2)
x2 = uline(points[point_a][0], points[point_a][1],
points[point_b][0], points[point_b][1], -(max_y-min_y)*2)
y1 = uline(points[point_a][1], points[point_a][0],
points[point_b][0], points[point_b][1], x1)
y2 = uline(points[point_a][1], points[point_a][0],
points[point_b][0], points[point_b][1], x2)
x1 = round(disp_x + (x1 - min_x)*scale)
y1 = round(disp_y - (y1 - min_y)*scale)
x2 = round(disp_x + (x2 - min_x)*scale)
y2 = round(disp_y - (y2 - min_y)*scale)
draw_pole.create_line(x1, y1, x2, y2, width=2, fill="magenta")
R = round(R * scale)
for point in points:
x = round(disp_x + (point[0] - min_x)*scale)
y = round(disp_y - (point[1] - min_y)*scale)
draw_pole.create_oval(x-2, y-2, x+2, y+2, fill="black")
draw_pole.create_text(x, y - 13,text="({:};{:})".format(point[0], point[1]),\
font="Arial 8", justify=CENTER, fill="blue")
for circle in circles:
x = round(disp_x + (circle[0] - min_x)*scale)
y = round(disp_y - (circle[1] - min_y)*scale)
draw_pole.create_oval(x - R, y - R, x + R, y + R, outline = "red")
draw_pole.create_oval(x - 1, y - 1, x + 1, y + 1, fill = "red")
draw_pole.create_text(x, y - 13, text="({:};{:})".format(circle[0],\
circle[1]), font="Arial 8", justify=CENTER, \
fill="green")
draw_pole.pack()
root.mainloop()
|
python
|
# --------------------------------------
#! /usr/bin/python
# File: 7. Reverse Integer.py
# Author: Kimberly Gao
# My solution: (Run time: 28ms)
# Memory Usage: 14.4 MB
class Solution:
def _init_(self,name):
self.name = name
def reverse1(self, x: int) -> int:
string = str(x)
list1 = list(string)
if list1[0] == '-':
list_no_sign = list1[1:] # remove the sign
list_reverse = list_no_sign[::-1] # reverse the numbers
list_reverse.insert(0, '-')
else:
list_reverse = list1[::-1]
num_reverse_str = ''.join(list_reverse) # ['3','2','1'] -> ['321']
num_reverse = int(num_reverse_str)
if num_reverse < pow(2, 31)-1 and num_reverse >= -pow(2, 31):
return num_reverse
else:
return 0
# Best solution: (Run time: 20ms)
def reverse2(self, x: int):
rev, flg = 0, 1
if x < 0:
flg = -1
x = abs(x)
while (x):
unit = x % 10
rev = rev * 10 + unit
x = x // 10
if rev > 2 ** 31 - 1 or rev < -2 ** 31:
return 0
return rev * flg
if __name__ == '__main__':
x = 1534236469
# x = 15346
my_solution = Solution().reverse1(x)
print(my_solution)
best_solution = Solution().reverse2(x)
print(best_solution)
|
python
|
from abaqusConstants import *
from .Section import Section
from ..Connector.ConnectorBehaviorOptionArray import ConnectorBehaviorOptionArray
class ConnectorSection(Section):
"""A ConnectorSection object describes the connection type and the behavior of a connector.
The ConnectorSection object is derived from the Section object.
Notes
-----
This object can be accessed by:
.. code-block:: python
import section
mdb.models[name].sections[name]
import odbSection
session.odbs[name].sections[name]
The corresponding analysis keywords are:
- CONNECTOR SECTION
- CONNECTOR BEHAVIOR
- CONNECTOR CONSTITUTIVE REFERENCE
"""
def __init__(self, name: str, assembledType: SymbolicConstant = NONE,
rotationalType: SymbolicConstant = NONE, translationalType: SymbolicConstant = NONE,
integration: SymbolicConstant = UNSPECIFIED, u1ReferenceLength: float = None,
u2ReferenceLength: float = None, u3ReferenceLength: float = None,
ur1ReferenceAngle: float = None, ur2ReferenceAngle: float = None,
ur3ReferenceAngle: float = None, massPerLength: float = None,
contactAngle: float = None, materialFlowFactor: float = 1, regularize: Boolean = ON,
defaultTolerance: Boolean = ON, regularization: float = 0,
extrapolation: SymbolicConstant = CONSTANT,
behaviorOptions: ConnectorBehaviorOptionArray = None):
"""This method creates a ConnectorSection object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ConnectorSection
session.odbs[name].ConnectorSection
Parameters
----------
name
A String specifying the repository key.
assembledType
A SymbolicConstant specifying the assembled connection type. Possible values
are:NONEBEAMBUSHINGCVJOINTCYLINDRICALHINGEPLANARRETRACTORSLIPRINGTRANSLATORUJOINTWELDThe
default value is NONE.You cannot include the *assembledType* argument if
*translationalType* or *rotationalType* are given a value other than NONE. At least one
of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given
a value other than NONE.
rotationalType
A SymbolicConstant specifying the basic rotational connection type. Possible values
are:NONEALIGNCARDANCONSTANT_VELOCITYEULERFLEXION_TORSIONFLOW_CONVERTERPROJECTION_FLEXION_TORSIONREVOLUTEROTATIONROTATION_ACCELEROMETERUNIVERSALThe
default value is NONE.You cannot include the *rotationalType* argument if
*assembledType* is given a value other than NONE. At least one of the arguments
*assembledType*, *translationalType*, or *rotationalType* must be given an value other
than NONE.
translationalType
A SymbolicConstant specifying the basic translational connection type. Possible values
are:NONEACCELEROMETERAXIALCARTESIANJOINLINKPROJECTION_CARTESIANRADIAL_THRUSTSLIDE_PLANESLOTThe
default value is NONE.You cannot include the *translationalType* argument if
*assembledType* is given a value other than NONE. At least one of the arguments
*assembledType*, *translationalType*, or *rotationalType* must be given an value other
than NONE.
integration
A SymbolicConstant specifying the time integration scheme to use for analysis. This
argument is applicable only to an Abaqus/Explicit analysis. Possible values are
UNSPECIFIED, IMPLICIT, and EXPLICIT. The default value is UNSPECIFIED.
u1ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the first component of relative motion. The default value is None.
u2ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the second component of relative motion. The default value is None.
u3ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the third component of relative motion. The default value is None.
ur1ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the fourth component of relative motion. The default value is None.
ur2ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the fifth component of relative motion. The default value is None.
ur3ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the sixth component of relative motion. The default value is None.
massPerLength
None or a Float specifying the mass per unit reference length of belt material. This
argument is applicable only when *assembledType*=SLIPRING, and must be specified in that
case. The default value is None.
contactAngle
None or a Float specifying the contact angle made by the belt wrapping around node b.
This argument is applicable only to an Abaqus/Explicit analysis, and only when
*assembledType*=SLIPRING. The default value is None.
materialFlowFactor
A Float specifying the scaling factor for material flow at node b. This argument is
applicable only when *assembledType*=RETRACTOR or *rotationalType*=FLOW_CONVERTER. The
default value is 1.0.
regularize
A Boolean specifying whether or not all tabular data associated with the
*behaviorOptions* will be regularized. This argument is applicable only for an
Abaqus/Explicit analysis. The default value is ON.
defaultTolerance
A Boolean specifying whether or not the default regularization tolerance will be used
for all tabular data associated with the *behaviorOptions*. This argument is applicable
only for an Abaqus/Explicit analysis and only if *regularize*=ON. The default value is
ON.
regularization
A Float specifying the regularization increment to be used for all tabular data
associated with the *behaviorOptions*. This argument is applicable only for an
Abaqus/Explicit analysis and only if *regularize*=ON and *defaultTolerance*=OFF. The
default value is 0.03.
extrapolation
A SymbolicConstant specifying the extrapolation technique to be used for all tabular
data associated with the *behaviorOptions*. Possible values are CONSTANT and LINEAR. The
default value is CONSTANT.
behaviorOptions
A ConnectorBehaviorOptionArray object.
Returns
-------
A ConnectorSection object.
Raises
------
InvalidNameError
RangeError
"""
super().__init__()
pass
def setValues(self, assembledType: SymbolicConstant = NONE, rotationalType: SymbolicConstant = NONE,
translationalType: SymbolicConstant = NONE, integration: SymbolicConstant = UNSPECIFIED,
u1ReferenceLength: float = None, u2ReferenceLength: float = None,
u3ReferenceLength: float = None, ur1ReferenceAngle: float = None,
ur2ReferenceAngle: float = None, ur3ReferenceAngle: float = None,
massPerLength: float = None, contactAngle: float = None, materialFlowFactor: float = 1,
regularize: Boolean = ON, defaultTolerance: Boolean = ON, regularization: float = 0,
extrapolation: SymbolicConstant = CONSTANT,
behaviorOptions: ConnectorBehaviorOptionArray = None):
"""This method modifies the ConnectorSection object.
Parameters
----------
assembledType
A SymbolicConstant specifying the assembled connection type. Possible values
are:NONEBEAMBUSHINGCVJOINTCYLINDRICALHINGEPLANARRETRACTORSLIPRINGTRANSLATORUJOINTWELDThe
default value is NONE.You cannot include the *assembledType* argument if
*translationalType* or *rotationalType* are given a value other than NONE. At least one
of the arguments *assembledType*, *translationalType*, or *rotationalType* must be given
a value other than NONE.
rotationalType
A SymbolicConstant specifying the basic rotational connection type. Possible values
are:NONEALIGNCARDANCONSTANT_VELOCITYEULERFLEXION_TORSIONFLOW_CONVERTERPROJECTION_FLEXION_TORSIONREVOLUTEROTATIONROTATION_ACCELEROMETERUNIVERSALThe
default value is NONE.You cannot include the *rotationalType* argument if
*assembledType* is given a value other than NONE. At least one of the arguments
*assembledType*, *translationalType*, or *rotationalType* must be given an value other
than NONE.
translationalType
A SymbolicConstant specifying the basic translational connection type. Possible values
are:NONEACCELEROMETERAXIALCARTESIANJOINLINKPROJECTION_CARTESIANRADIAL_THRUSTSLIDE_PLANESLOTThe
default value is NONE.You cannot include the *translationalType* argument if
*assembledType* is given a value other than NONE. At least one of the arguments
*assembledType*, *translationalType*, or *rotationalType* must be given an value other
than NONE.
integration
A SymbolicConstant specifying the time integration scheme to use for analysis. This
argument is applicable only to an Abaqus/Explicit analysis. Possible values are
UNSPECIFIED, IMPLICIT, and EXPLICIT. The default value is UNSPECIFIED.
u1ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the first component of relative motion. The default value is None.
u2ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the second component of relative motion. The default value is None.
u3ReferenceLength
None or a Float specifying the reference length associated with constitutive response
for the third component of relative motion. The default value is None.
ur1ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the fourth component of relative motion. The default value is None.
ur2ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the fifth component of relative motion. The default value is None.
ur3ReferenceAngle
None or a Float specifying the reference angle in degrees associated with constitutive
response for the sixth component of relative motion. The default value is None.
massPerLength
None or a Float specifying the mass per unit reference length of belt material. This
argument is applicable only when *assembledType*=SLIPRING, and must be specified in that
case. The default value is None.
contactAngle
None or a Float specifying the contact angle made by the belt wrapping around node b.
This argument is applicable only to an Abaqus/Explicit analysis, and only when
*assembledType*=SLIPRING. The default value is None.
materialFlowFactor
A Float specifying the scaling factor for material flow at node b. This argument is
applicable only when *assembledType*=RETRACTOR or *rotationalType*=FLOW_CONVERTER. The
default value is 1.0.
regularize
A Boolean specifying whether or not all tabular data associated with the
*behaviorOptions* will be regularized. This argument is applicable only for an
Abaqus/Explicit analysis. The default value is ON.
defaultTolerance
A Boolean specifying whether or not the default regularization tolerance will be used
for all tabular data associated with the *behaviorOptions*. This argument is applicable
only for an Abaqus/Explicit analysis and only if *regularize*=ON. The default value is
ON.
regularization
A Float specifying the regularization increment to be used for all tabular data
associated with the *behaviorOptions*. This argument is applicable only for an
Abaqus/Explicit analysis and only if *regularize*=ON and *defaultTolerance*=OFF. The
default value is 0.03.
extrapolation
A SymbolicConstant specifying the extrapolation technique to be used for all tabular
data associated with the *behaviorOptions*. Possible values are CONSTANT and LINEAR. The
default value is CONSTANT.
behaviorOptions
A ConnectorBehaviorOptionArray object.
Raises
------
RangeError
"""
pass
|
python
|
#!/usr/bin/env python
# encoding: utf-8
#use nc -u 127.0.0.1 8888 to communicate with the server 1-way
"""A non-blocking, single-threaded TCP server."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import socket
import ssl
import stat
import sys
from tornado.log import app_log
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
from tornado.netutil import ssl_wrap_socket
from tornado import process
#from tornado.netutil import set_close_exec
#web socket support
import tornado.web
import tornado.httpserver
import tornado.ioloop
import tornado.websocket
import tornado.options
PIPE = None
class UDPServer(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop
self._sockets = {} # fd -> socket object
self._pending_sockets = []
self._started = False
def add_sockets(self, sockets):
if self.io_loop is None:
self.io_loop = IOLoop.instance()
for sock in sockets:
self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._on_recive,
io_loop=self.io_loop)
def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=25):
sockets = bind_sockets(port, address=address, family=family,
backlog=backlog)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
def start(self, num_processes=1):
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
def stop(self):
for fd, sock in self._sockets.iteritems():
self.io_loop.remove_handler(fd)
sock.close()
def _on_recive(self, data, address):
print(data)
host = address[0]
port = address[1]
print(host)
print(port)
if(PIPE):
PIPE.write_message(data)
#sock = socket.socket(
#socket.AF_INET, socket.SOCK_STREAM)
#sock.connect((host, port))
#sock.send("abcde\r\n\r\n")
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=25):
sockets = []
if address == "":
address = None
flags = socket.AI_PASSIVE
if hasattr(socket, "AI_ADDRCONFIG"):
flags |= socket.AI_ADDRCONFIG
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_DGRAM,
0, flags)):
af, socktype, proto, canonname, sockaddr = res
sock = socket.socket(af, socktype, proto)
#set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setblocking(0)
sock.bind(sockaddr)
sockets.append(sock)
return sockets
if hasattr(socket, 'AF_UNIX'):
def bind_unix_socket(file, mode=0o0600, backlog=128):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
#set_close_exec(sock.fileno())
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
except (OSError) as err:
if err.errno != errno.ENOENT:
raise
else:
if st.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(sock, callback, io_loop=None):
if io_loop is None:
io_loop = IOLoop.instance()
def accept_handler(fd, events):
while True:
try:
data, address = sock.recvfrom(2500)
except (socket.error) as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
callback(data, address)
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
LISTEN_PORT = 8000
LISTEN_ADDRESS = '127.0.0.1'
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
global PIPE
PIPE = self
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
global PIPE
PIPE = None
def check_origin(self, origin):
"""
Override the origin check if needed
"""
return True
class ChannelHandler(tornado.websocket.WebSocketHandler):
"""
Handler that handles a websocket channel
"""
@classmethod
def urls(cls):
return [
(r'/web-socket/', cls, {}), # Route/Handler/kwargs
]
def initialize(self):
self.channel = None
def open(self, channel):
"""
Client opens a websocket
"""
self.channel = channel
def on_message(self, message):
"""
Message received on channel
"""
print("Received",message)
def on_close(self):
"""
Channel is closed
"""
def check_origin(self, origin):
"""
Override the origin check if needed
"""
return True
server = UDPServer()
server.bind(8888)
server.start(1)
print("Start UDP Server on Port:8888")
app = tornado.web.Application([
(r'/web-socket/', EchoWebSocket, {}), # Route/Handler/kwargs
])#ChannelHandler.urls())
# Setup HTTP Server
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(8013)
print("Start websocket server on port 8013")
IOLoop.instance().start()
|
python
|
from django.contrib.gis import admin
from leaflet.admin import LeafletGeoAdmin
from world.models import Border, School, Facility, Busstop
class BorderAdmin(LeafletGeoAdmin):
search_fields = ['n03_001','n03_003','n03_004']
list_filter = ('n03_003')
admin.site.register(Border, LeafletGeoAdmin)
admin.site.register(School, LeafletGeoAdmin)
admin.site.register(Facility, LeafletGeoAdmin)
admin.site.register(Busstop, LeafletGeoAdmin)
admin.site.site_title = 'GeoDjangoログイン'
admin.site.site_header = 'GeoDjangoハンズオン'
admin.site.index_title = 'GeoDjangoメニュー'
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.