content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import tensorflow as tf
from tensorflow import keras
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
import numpy as np
from glob import glob
class pneumonia:
def __init__(self):
pass
def main(self):
# re-size all the images to this
IMAGE_SIZE = [224, 224]
train_path = r'/content/drive/MyDrive/chest_xray/train'
valid_path = '/content/drive/MyDrive/chest_xray/val'
test_path = r'/content/drive/MyDrive/chest_xray/test'
# Import the Vgg 16 library as shown below and add preprocessing layer to the front of VGG
# Here we will be using imagenet weights
vgg16 = VGG16(input_shape=(224,224,3), weights='imagenet', include_top=False) #[3] indicates the RGB channel
# don't train existing weights
for layer in vgg16.layers:
layer.trainable = False
# our layers - you can add more if you want
x = Flatten()(vgg16.output)
prediction = Dense(1, activation='sigmoid')(x)
# create a model object
model = Model(inputs=vgg16.input, outputs=prediction)
# tell the model what cost and optimization method to use
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
# Use the Image Data Generator to import the images from the dataset
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
rotation_range=30,
width_shift_range=0.2,
vertical_flip = True,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
# Make sure you provide the same target size as initialied for the image size
training_set = train_datagen.flow_from_directory(r'/content/drive/MyDrive/chest_xray/train',
target_size = (224, 224),
batch_size = 8,
class_mode = 'binary')
validation_set = test_datagen.flow_from_directory(r'/content/drive/MyDrive/chest_xray/val',
target_size = (224, 224),
batch_size = 8,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory(r'/content/drive/MyDrive/chest_xray/test',
target_size = (224, 224),
batch_size = 8,
class_mode = 'binary')
# fit the model
# Run the cell. It will take some time to execute
from tensorflow.keras.callbacks import EarlyStopping,ReduceLROnPlateau,ModelCheckpoint
mc = ModelCheckpoint(monitor='val_accuracy',save_best_only=True,filepath='/content/drive/MyDrive/chest_xray',mode='max')
es = EarlyStopping(patience=10)
rlp = ReduceLROnPlateau(patience=5,monitor='val_accuracy',min_lr=0.001)
r = model.fit_generator(training_set, validation_data=test_set, epochs= 50 ,validation_steps = len(test_set),callbacks=[es,rlp,mc])
def prediction(self, images):
from tensorflow import keras
model = keras.models.load_model('./models/pneumonia.h5')
import numpy as np
from tensorflow.keras.preprocessing import image
test_image = image.load_img(images, target_size = (112, 112))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = model.predict(test_image)
if result[0][0] == 1:
prediction = 'Normal'
print(prediction)
elif result[0][1]:
prediction = 'Positive'
print(prediction)
p= pneumonia()
p.prediction("IM-0119-0001.jpeg")
|
python
|
from django.test import TestCase
from ..models import Tag, Medium
from ..management.commands.add_tag_to_media import AssignTag
class RenameTagTest(TestCase):
fixtures = ['test_basic_data.yaml']
def setUp(self):
pass
def tearDown(self):
pass
def test_assign_tag_to_one_file(self):
"""Assign a new tag to one file using its object storage key, where tag already exists. """
object_storage_key = 'disk1/test01.jpg'
tagname = 'Aerial'
assigner = AssignTag()
assigner.add_tag(object_storage_key, tagname, False)
# Check tag exists or has been created
tags = Tag.objects.filter(name__name=tagname)
tag_count = Tag.objects.filter(name__name=tagname, importer=Tag.MANUAL).count()
self.assertEqual(tag_count, 1)
# Get tag's id
tag = Tag.objects.get(name__name=tagname, importer=Tag.MANUAL)
tag_id = tag.id
# Get the medium from the object storage
medium = Medium.objects.get(file__object_storage_key=object_storage_key)
medium_id = medium.id
# Check that the tag has been assigned
self.assertTrue(tag in medium.tags.all())
medium_tag_count = medium.tags.filter(id=tag_id).count()
self.assertEqual(medium_tag_count, 1)
|
python
|
r"""
``$ mwtext preprocess_text -h``
::
Converts MediaWiki XML dumps to plaintext. One line per text chunk with
wiki markup and punctuation cleaned up. This utility is designed with word
embeddings in mind. Generally, you can expect one line per paragraph.
Usage:
preprocess_text (-h|--help)
preprocess_text [<input-file>...]
[--namespace=<id>]... [--wiki-host=<url>]
[--labels=<path>] [--label-field=<k>]
[--min-line=<chrs>]
[--threads=<num>] [--output=<path>]
[--compress=<type>] [--verbose] [--debug]
Options:
-h|--help Print this documentation
<input-file> The path to a MediaWiki XML Dump file
[default: <stdin>]
--namespace=<id> Limit processing to this namespace. Can be
repeated to select for multiple namespaces.
--wiki-host=<url> The hostname of the MediaWiki install to query
for metadata from.
--labels=<path> The path to a file containing label data for
associating with text. If not set, no labels will
be included.
--label-field=<k> The field to examine within the labels file
[default: taxo_labels]
--min-line=<words> Do not output lines that have fewer than this many
words. [default: 10]
--threads=<num> If a collection of files are provided, how many
processor threads? [default: <cpu_count>]
--output=<path> Write output to a directory with one output file
per input path. [default: <stdout>]
--compress=<type> If set, output written to the output-dir will be
compressed in this format. [default: bz2]
--verbose Print progress information to stderr. Kind of a
mess when running multi-threaded.
--debug Print debug logs.
"""
import json
import logging
import re
import sys
import mwapi
import mwcli
import mwcli.files
from ..wikitext_preprocessor import WikitextPreprocessor
logger = logging.getLogger(__name__)
REDIRECT_RE = re.compile("#redirect", re.I)
def preprocess_text(dump, forbidden_namespaces, title2labels=None,
namespaces=None, min_line=10, verbose=False):
wikitext_preprocessor = WikitextPreprocessor(forbidden_namespaces)
for page in dump:
if namespaces and page.namespace not in namespaces:
continue
if title2labels is not None:
if page.title not in title2labels:
continue
else:
labels = title2labels[page.title]
else:
labels = []
if verbose:
sys.stderr.write(page.title + ": ")
sys.stderr.flush()
for revision in page:
if not is_article(revision.text):
continue
for line in wikitext_preprocessor.process(revision.text):
if len(line) >= min_line:
yield format_labels(labels) + " ".join(line)
if verbose:
sys.stderr.write(".")
sys.stderr.flush()
else:
if verbose:
sys.stderr.write("-")
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
sys.stderr.flush()
def format_labels(label_ids):
if len(label_ids) > 0:
return " ".join("__label__{0}".format(id) for id in label_ids) + " "
else:
return ""
def process_args(args):
session = mwapi.Session(
args['--wiki-host'], user_agent="mwtext preprocess_text")
lang, forbidden_namespaces = get_wiki_info(session)
logger.info(
"Gathered details from site_info: lang={0}, forbidden_namespaces={1}"
.format(lang, forbidden_namespaces))
if args['--labels'] is not None:
label_field = args['--label-field']
logger.info("Reading label file {0}...".format(args['--labels']))
title2labels, label2ids = create_label_map(
mwcli.files.reader(args['--labels']), lang, label_field)
logger.info("Label2ids: {0}".format(label2ids))
else:
title2labels = None
if len(args['--namespace']) == 0:
namespaces = None
else:
namespaces = [int(v) for v in args['--namespace']]
min_line = int(args['--min-line'])
return {
'forbidden_namespaces': forbidden_namespaces,
'title2labels': title2labels,
'namespaces': namespaces,
'min_line': min_line}
def create_label_map(f, lang, label_field):
label2ids = {}
title2labels = {}
for line in f:
ob = json.loads(line)
# Get title
if lang not in ob['sitelinks']:
continue
else:
title = ob['sitelinks'][lang]
# Get labels
label_ids = []
for label in ob[label_field]:
if label not in label2ids:
label2ids[label] = len(label2ids)
label_ids.append(label2ids[label])
title2labels[title] = set(label_ids)
return title2labels, label2ids
def get_wiki_info(session):
doc = session.get(action="query", meta="siteinfo",
siprop=["namespaces", "namespacealiases", "general"],
formatversion=2)
forbidden_namespaces = set()
for namespace in doc['query']['namespaces'].values():
if namespace['id'] in WikitextPreprocessor.FORBIDDEN_NAMESPACE_IDS:
forbidden_namespaces.add(namespace['name'].lower())
forbidden_namespaces.add(namespace['canonical'].lower())
for namespace in doc['query']['namespacealiases']:
if namespace['id'] in WikitextPreprocessor.FORBIDDEN_NAMESPACE_IDS:
forbidden_namespaces.add(namespace['alias'].lower())
return doc['query']['general']['lang'], forbidden_namespaces
def is_article(text):
return not (text is None or
len(text) < 50 or
REDIRECT_RE.match(text))
streamer = mwcli.Streamer(
__doc__,
__name__,
preprocess_text,
process_args=process_args,
file_reader=mwcli.Streamer.read_xml,
line_writer=mwcli.Streamer.write_line
)
main = streamer.main
|
python
|
# -*- coding: utf-8 -*-
# Transformers installation
import numpy as np
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from qtorch.quant import posit_quantize, float_quantize, configurable_table_quantize
device = 'cuda'
model_id = 'gpt2-large'
tokenizer = GPT2TokenizerFast.from_pretrained(model_id)
from datasets import load_dataset
#test = load_dataset("lambada", split='test')
test = load_dataset('wikitext', 'wikitext-103-v1', split='test')
#test = load_dataset("ptb_text_only", split='test')
encodings = tokenizer('\n\n'.join(test['text']), return_tensors='pt')
def run(weight_table, act_table ):
import torch
import torch.nn as nn
model = GPT2LMHeadModel.from_pretrained(model_id)
model = model.to(device)
layer_count = 0
linear_layer_count = 0
op_count = 0
#print ("MAC operation count ", op_count)
print ("Layer count ", layer_count)
#model = model.to(device)
import torch
from tqdm import tqdm
max_length = model.config.n_positions
stride = 1024
#stride = 32
lls = []
for i in tqdm(range(0, encodings.input_ids.size(1), stride)):
begin_loc = max(i + stride - max_length, 0)
end_loc = min(i + stride, encodings.input_ids.size(1))
trg_len = end_loc - i # may be different from stride on last loop
input_ids = encodings.input_ids[:,begin_loc:end_loc].to(device)
target_ids = input_ids.clone()
target_ids[:,:-trg_len] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
log_likelihood = outputs[0] * trg_len
lls.append(log_likelihood)
ppl = torch.exp(torch.stack(lls).sum() / end_loc)
return ppl.item()
print (run ([],[]))
|
python
|
# coding=utf-8
import tensorflow as tf
from tensorflow.python.ops import variable_scope
import numpy as np
import sys
import os
sys.path.append('..')
from tensor2tensor.models import transformer
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import registry
from tensor2tensor import problems
import functools
from tensor2tensor.models.research import universal_transformer_util
from test_input_generator import positional_encoding_generator
data_dir = '/home/murphyhuang/dev/mldata/en_ch_translate_output_ut_analy/recurret_conduct'
def input_generator():
return tf.random.normal(shape=(1, 26, 1, 1024), mean=50)
class Args(object):
def __init__(self):
self.problem = 'translate_enzh_wmt32k'
self.data_dir = '/home/murphyhuang/dev/mldata/en_ch_translate_vocab'
self.model_dir = '/home/murphyhuang/dev/mldata/en_ch_translate_output_ut_20190509'
self.model = 'universal_transformer'
self.hparams_set = 'adaptive_universal_transformer_base'
self.ut_type = 'act'
self.step_num = 1
self.save_dir = os.path.join(data_dir, 'ut_0509_recurrent_1024_randomonly')
def main():
FLAGS = Args()
# Enable TF Eager execution
tfe = tf.contrib.eager
tfe.enable_eager_execution()
batch_inputs = input_generator()
# initialize translation model
hparams_set = FLAGS.hparams_set
Modes = tf.estimator.ModeKeys
hparams = trainer_lib.create_hparams(hparams_set, data_dir=FLAGS.data_dir, problem_name=FLAGS.problem)
translate_model = registry.model(FLAGS.model)(hparams, Modes.EVAL)
# recover parameters and conduct recurrent conduction
ckpt_dir = tf.train.latest_checkpoint(FLAGS.model_dir)
with tfe.restore_variables_on_create(ckpt_dir):
with variable_scope.EagerVariableStore().as_default():
features = {'inputs': batch_inputs}
with tf.variable_scope('universal_transformer/body'):
input_tensor = tf.convert_to_tensor(features['inputs'])
input_tensor = common_layers.flatten4d3d(input_tensor)
encoder_input, self_attention_bias, _ = (
transformer.transformer_prepare_encoder(
input_tensor, tf.convert_to_tensor([0]), translate_model.hparams, features=None))
with tf.variable_scope('universal_transformer/body/encoder'):
ffn_unit = functools.partial(
universal_transformer_util.transformer_encoder_ffn_unit,
hparams=translate_model.hparams)
attention_unit = functools.partial(
universal_transformer_util.transformer_encoder_attention_unit,
hparams=translate_model.hparams,
encoder_self_attention_bias=None,
attention_dropout_broadcast_dims=[],
save_weights_to={},
make_image_summary=True)
storing_list = []
transformed_state = encoder_input
for step_index in range(1024):
storing_list.append(transformed_state.numpy())
with tf.variable_scope('universal_transformer/body/encoder/universal_transformer_{}'.format(FLAGS.ut_type)):
transformed_state = universal_transformer_util.step_preprocess(
transformed_state,
tf.convert_to_tensor(step_index % FLAGS.step_num),
translate_model.hparams
)
with tf.variable_scope('universal_transformer/body/encoder/universal_transformer_{}/rec_layer_0'.format(FLAGS.ut_type)):
transformed_new_state = ffn_unit(attention_unit(transformed_state))
with tf.variable_scope('universal_transformer/body/encoder'):
if (step_index + 1) % FLAGS.step_num == 0:
transformed_new_state = common_layers.layer_preprocess(transformed_new_state, translate_model.hparams)
if step_index == 5:
print(transformed_new_state)
transformed_state = transformed_new_state
storing_list = np.asarray(storing_list)
np.save(FLAGS.save_dir, storing_list)
if __name__ == '__main__':
main()
|
python
|
#!/bin/python
import listify_circuits
listify_circuits.optimize_circuits(48, 'forward')
|
python
|
"""
Register Me Silly
Repeatedly checks Drexel's Term Master Schedule for availability of class sections
Author: Anshul Kharbanda
Created: 9 - 21 - 2018
"""
from . import LabeledInput
from tkinter import *
class LabeledSpinbox(LabeledInput):
"""
Docstring for LabeledSpinbox
"""
def __init__(self, master=None, title='Label', value=''):
"""
Initializes instance
"""
super(LabeledSpinbox, self).__init__(master, title)
# Value for entry
self.entry = Spinbox(self, from_=0, to=1000)
self.entry.pack(side=TOP, anchor=W, fill=X)
self.entry.delete(0, END)
self.entry.insert(0, value)
|
python
|
#Detector Functions
# ------------------ Importing Libraries ------------------ #
import cv2
import os
import time
# ------------------ Importing Functions ------------------ #
from utils import open_thresholds, get_audio_list, reshape_image, input_output_details, make_prediction, get_dist_values, play_audio_recording
from debug import draw_keypoints, draw_connections, get_edge_dictionary
# ------------------ Detector Function ------------------ #
def detector(model, interpretor, debug):
"""
detector:
Main function that operates the detection and event trigger for the application.
"""
capture_front = cv2.VideoCapture(0)
time_threshold = 15
dist_thresholds = open_thresholds()
basepath = os.getcwd()
audio_filepath = os.path.join(basepath, '..\..\Database\Audio Recordings\Converted')
audio_list = get_audio_list(audio_filepath)
playing_audio = False
time_count = 0
start_time = time.time()
while capture_front.isOpened():
#Read Camera Input
ret_front, frame_front = capture_front.read()
#Image Reshape
input_image_front = reshape_image(frame=frame_front, model=model)
#Setup Tensor Input and Output
input_details, output_details = input_output_details(interpreter=interpretor)
#Make Prediction
keypoint_score_front = make_prediction(interpreter=interpretor, input_details=input_details, output_details=output_details, input_image=input_image_front)
if debug:
#Rendering Points
confidence_threshold=0.4
draw_keypoints(frame=frame_front, keypoints=keypoint_score_front, confidence_threshold=0.4)
#Rendering Edges
EDGES = get_edge_dictionary()
draw_connections(frame=frame_front, keypoints=keypoint_score_front, edges=EDGES, confidence_threshold=confidence_threshold)
#Determine Distances
current_distances = get_dist_values(frame=frame_front, keypoints=keypoint_score_front)
#If all distances are above threshold, then posture is correct. Else, posture is not correct.
if all([current_distances[i] > dist_thresholds[threshold] for i, threshold in enumerate(dist_thresholds)]):
time_count = 0
else:
if not playing_audio:
current_time = time.time()
time_count += current_time - start_time
start_time = current_time
else:
time_count = 0
if (time_count > time_threshold):
playing_audio = True
time_count = 0
play_audio_recording(audio_list)
playing_audio = False
time_count = 0
if debug:
print(int(time_count))
cv2.imshow("Front", frame_front)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture_front.release()
cv2.destroyAllWindows()
|
python
|
"""
Xero Accounts API
"""
from .api_base import ApiBase
class Accounts(ApiBase):
"""
Class for Accounts API
"""
GET_ACCOUNTS = '/api.xro/2.0/accounts'
def get_all(self):
"""
Get all accounts
Returns:
List of all accounts
"""
return self._get_request(Accounts.GET_ACCOUNTS)
|
python
|
"""Defines a Tornado Server that consumes Kafka Event data for display"""
import logging
import logging.config
from pathlib import Path
import tornado.ioloop
import tornado.template
import tornado.web
# Import logging before models to ensure configuration is picked up
logging.config.fileConfig(f"{Path(__file__).parents[0]}/logging.ini")
from consumer import KafkaConsumer
from models import Lines, Weather
import topic_check
logger = logging.getLogger(__name__)
class MainHandler(tornado.web.RequestHandler):
"""Defines a web request handler class"""
template_dir = tornado.template.Loader(f"{Path(__file__).parents[0]}/templates")
template = template_dir.load("status.html")
def initialize(self, weather, lines):
"""Initializes the handler with required configuration"""
self.weather = weather
self.lines = lines
def get(self):
"""Responds to get requests"""
logging.debug("rendering and writing handler template")
self.write(
MainHandler.template.generate(weather=self.weather, lines=self.lines)
)
def run_server():
"""Runs the Tornado Server and begins Kafka consumption"""
if topic_check.topic_exists("TURNSTILE_SUMMARY") is False:
logger.fatal(
"Ensure that the KSQL Command has run successfully before running the web server!"
)
exit(1)
if topic_check.topic_exists("org.chicago.cta.stations.table.v1") is False:
logger.fatal(
"Ensure that Faust Streaming is running successfully before running the web server!"
)
exit(1)
weather_model = Weather()
lines = Lines()
application = tornado.web.Application(
[(r"/", MainHandler, {"weather": weather_model, "lines": lines})]
)
application.listen(8888)
# Build kafka consumers
consumers = [
KafkaConsumer(
topic_name_pattern="^mh_weather_channel", # "org.chicago.cta.weather.v1",
message_handler=weather_model.process_message,
offset_earliest=True,
),
KafkaConsumer(
"mh_station_db_stations", # "org.chicago.cta.stations.table.v1",
lines.process_message,
offset_earliest=True,
is_avro=False,
),
KafkaConsumer(
"^mh_station_arrival_", # "^org.chicago.cta.station.arrivals.",
lines.process_message,
offset_earliest=True,
),
KafkaConsumer(
"TURNSTILE_SUMMARY",
lines.process_message,
offset_earliest=True,
is_avro=False,
),
]
try:
logger.info(
"Open a web browser to http://localhost:8888 to see the Transit Status Page"
)
for consumer in consumers:
tornado.ioloop.IOLoop.current().spawn_callback(consumer.consume)
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt as e:
logger.info("shutting down server")
tornado.ioloop.IOLoop.current().stop()
for consumer in consumers:
consumer.close()
if __name__ == "__main__":
run_server()
|
python
|
link_template="""\
<ul>
<li><a href="{langfam_code:}/overview/introduction.html">Introduction</a></li>
<li><a href="{langfam_code:}/overview/tokenization.html">Tokenization</a></li>
<li>Morphology
<ul>
<li><a href="{langfam_code:}/overview/morphology.html">General principles</a></li>
<li><a href="{langfam_code:}/pos/index.html">{langfam_name:} POS tags</a> (<a href="{langfam_code:}/pos/all.html">single document</a>)</li>
<li><a href="{langfam_code:}/feat/index.html">{langfam_name:} features</a> (<a href="{langfam_code:}/feat/all.html">single document</a>)</li>
</ul>
</li>
<li>Syntax
<ul>
<li><a href="{langfam_code:}/overview/syntax.html">General principles</a></li>
<li><a href="{langfam_code:}/overview/specific-syntax.html">Specific constructions</a></li>
<li><a href="{langfam_code:}/dep/index.html">{langfam_name:} relations</a> (<a href="{langfam_code:}/dep/all.html">single document</a>)</li>
</ul>
</li>
</ul>
"""
import sys
import glob
import os.path
import json
import re
import codecs
import StringIO
SUBSET_ALL, SUBSET_NONEMPTY, SUBSET_EMPTY = range(3)
no_data_token_count_span="""<span class="widespan" style="color:gray"><span class="hint--top hint--info" data-hint="No corpus data">-</span></span>"""
token_count_span="""<span class="widespan"><span class="hint--top hint--info" data-hint="{token_count:,} tokens {word_count:,} words {tree_count:,} sentences">{tcountk}K</span></span>"""
def get_token_count_span(corpus_data):
token_count=corpus_data.get("token_count",0)
if token_count==0: #No data
return no_data_token_count_span
else:
if token_count<1000:
tcountk="<1"
else:
tcountk="{:,}".format(token_count//1000)
return token_count_span.format(tcountk=tcountk,**corpus_data)
def get_column_icons(corpus_data):
r="""<span class="widespan">"""
if corpus_data.get("words_with_lemma_count",0)>int(corpus_data.get("word_count")*0.9):
#r+="""<span class="tagspan"><span class="hint--top hint--info" data-hint="Lemmas">Ⓛ</span></span>"""
r+="""<span class="tagspan"><span class="hint--top hint--info" data-hint="Lemmas"><img class="propertylogo" src="logos/L.svg" /></span></span>"""
else:
r+="""<span class="tagspan"></span>"""
if corpus_data.get("catvals",0)>0:
#r+="""<span class="tagspan"><span class="hint--top hint--info" data-hint="Features">Ⓕ</span></span>"""
r+="""<span class="tagspan"><span class="hint--top hint--info" data-hint="Features"><img class="propertylogo" src="logos/F.svg" /></span></span>"""
else:
r+="""<span class="tagspan"></span>"""
if corpus_data.get("words_with_deps_count",0)>0:
#r+="""<span class="tagspan"><span class="hint--top hint--info" data-hint="Secondary dependencies">Ⓓ</span></span>"""
r+="""<span class="tagspan"><span class="hint--top hint--info" data-hint="Secondary dependencies"><img class="propertylogo" src="logos/D.svg" /></span></span>"""
else:
r+="""<span class="tagspan"></span>"""
r+="""</span>"""
return r
categories={(u"Documentation status",u"stub"):"""<span class="widespan" style="color:gray"><span class="hint--top hint--info" data-hint="No documentation">-</span></span>""",
(u"Documentation status",u"partial"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Partial documentation"><i class="fa fa-file-o"></i></span></span>""",
(u"Documentation status",u"complete"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Complete documentation"><i class="fa fa-file-text-o"></i></span></span>""",
(u"Data source",u"unknown"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Data source not known">-</span></span>""",
(u"Data source",u"automatic"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Automatic conversion"><i class="fa fa-cogs"></i></span></span>""",
(u"Data source",u"semi-automatic"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Automatic conversion with manual corrections"><i class="fa fa-cogs"></i><!--<i class="fa fa-plus" style="font-size: 0.75em; line-height: 1.33em; vertical-align: +10%;">--><i class="fa fa-check"></i></span></span>""",
(u"Data source",u"manual"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Full manual check of the data"><i class="fa fa-user"></i></span></span>""",
(u"License",u"none"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="License not known">-</span></span>""",
(u"Data available since",u"UD v1.0"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.0 (Jan 2015)"><i class="fa fa-check"></i></span></span>""",
(u"Data available since",u"UD v1.1"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.1 (May 2015)"><i class="fa fa-check"></i></span></span>""",
(u"Data available since",u"UD v1.2"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.2 (November 2015)"><i class="fa fa-check"></i></span></span>""",
(u"Data available since",u"UD v1.3"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.3 (May 2016)"><i class="fa fa-check"></i></span></span>""",
(u"Data available since",u"UD v1.4"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 1.4 (November 2016)"><i class="fa fa-check"></i></span></span>""",
(u"Data available since",u"UD v2.0"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="First released in UD version 2.0 (March 2017)"><i class="fa fa-check"></i></span></span>""",
(u"Data available since",u"UD v2.1"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="Scheduled for release in UD version 2.1 (November 2017)"><i class="fa fa-hourglass-end"></i></span></span>""",
(u"Data available since",u"none"):"""<span class="widespan"><span class="hint--top hint--info" data-hint="No firm schedule for data release">-</span></span>"""}
empty_wide_span="""<span class="widespan"><span class="hint--top hint--info" data-hint="{hint:}">?</span></span>"""
license_span="""<span class="widespan"><span class="hint--top hint--info" data-hint="{license:}">{licenseshort:}</span></span>"""
def get_license_span(lic):
if "CC BY-NC-SA" in lic:
return license_span.format(license=lic,licenseshort="""<img class="license" src="logos/by-nc-sa.svg"/>""")
elif "CC BY-SA" in lic:
return license_span.format(license=lic,licenseshort="""<img class="license" src="logos/by-sa.svg"/>""")
elif "CC BY" in lic:
return license_span.format(license=lic,licenseshort="""<img class="license" src="logos/by.svg"/>""")
elif "LGPLLR" in lic:
return license_span.format(license=lic,licenseshort="""<img class="license" src="logos/LGPLLR.svg"/>""")
elif "GPL" in lic:
return license_span.format(license=lic,licenseshort="""<img class="license" src="logos/gpl.svg"/>""")
else:
return license_span.format(license=lic,licenseshort=""" """)
valueRe=re.compile(u"^([a-zA-Z ]+): (.+)$")
#known_cats=set(cat for cat,val in categories)
def analyze_readme(dir_name):
readme_data={u"Documentation status":u"stub",u"Data source":u"automatic conversion",u"License":u"none",u"Data available since":u"none", u"Genre":u"none",u"Contributors":u""}
readmes=sorted(x for x in glob.glob(os.path.join(dir_name,"*")) if "readme" in x.lower())
if not readmes: #No readme file!
return readme_data
with codecs.open(readmes[0],"r","utf-8") as f:
for line in f:
match=valueRe.match(line)
if match: #Maybe one of our values?
cat,val=match.group(1).strip(),match.group(2).strip()
if (cat,val) in categories:
#Yes! this is a known category, we have a perfect match
readme_data[cat]=val
elif cat in readme_data: #Known category but unknown value, I guess it's okay
#Known cat, but weird val --- should we warn?
readme_data[cat]=val
return readme_data
def get_language_span(l):
return """<span class="doublewidespan">{}</span>""".format(l.replace(u"_",u" "))
flags=json.loads(open("flags.json").read())
def get_flag_span(lang_name):
ccode=flags.get(lang_name)
if ccode:
return """<span class="flagspan"><img class="flag" src="flags/svg/{}.svg"></span>""".format(ccode)
else:
return """<span class="flagspan"> </span>"""
lcodes=json.loads(open("lcodes.json").read())
genres_map=json.loads(open("genre_symbols.json","r").read())
for k,v in genres_map.iteritems():
genres_map[k]=v.replace("_","-")
def get_genre_span(genres):
spans=""
for g in genres.split():
if g in genres_map:
spans+="""<span class="genreicon"><i class="fa fa-%s"></i></span>"""%genres_map[g]
else:
spans+="""<span class="genreicon"><i class="fa fa-%s"></i></span>"""%genres_map["none"]
return """<span class="doublewidespan"><span class="hint--top hint--info" data-hint="%s">%s</span></span>"""%(genres,spans)
def is_empty(args, lang, corpus_data):
# readme_data = analyze_readme(os.path.join(args.ud_data, "UD_"+lang))
# no_docs = "No documentation" in readme_data["Documentation status"]
# no_data = corpus_data.get("token_count", 0) == 0
#return no_data and no_docs
return corpus_data.get("token_count", 0) == 0
def gen_table(args, subset=SUBSET_NONEMPTY):
jekyll_data=[] #this will go to jekyll then as data
a_data=StringIO.StringIO()
print >> a_data, "<!-- content of _includes/at_glance.html -->"
print >> a_data, "<!-- do NOT edit by hand, that file is autogenerated using gen_index/index_page.py -->"
# Will create a line for every language which has a repository
langs=sorted(os.path.basename(x).replace(".json","") for x in glob.glob("_corpus_data/*.json"))
for l in langs:
with open(os.path.join("_corpus_data",l+".json"),"r") as f:
corpus_data=json.load(f)
corpus_data[u"lang_code"]=lcodes[l]
corpus_data[u"lang_name"]=l
corpus_data[u"langfam_code"]=lcodes[l].split("_")[0]
corpus_data[u"langfam_name"]=l.split("-")[0]
readme_data=analyze_readme(os.path.join(args.ud_data,"UD_"+l))
empty = is_empty(args, l, corpus_data)
if ((empty and subset == SUBSET_NONEMPTY) or
(not empty and subset == SUBSET_EMPTY)):
pass # Don't write table for this dataset
else:
print >> a_data, '<div data-lc="%s">' % lcodes[l]
print >> a_data, get_flag_span(l)
print >> a_data, get_language_span(l)
print >> a_data, get_token_count_span(corpus_data)
print >> a_data, get_column_icons(corpus_data)
print >> sys.stderr, l
for c in (u"Documentation status", u"Data source", u"Data available since"):
print >> a_data, categories.get((c,readme_data[c]),empty_wide_span.format(hint=readme_data[c]))
print >> a_data, get_license_span(readme_data[u"License"])
print >> a_data, get_genre_span(readme_data["Genre"])
print >> a_data, "</div>"
print >> a_data, "<div>"
print >> a_data, link_template.format(**corpus_data)
print >> a_data, "</div>"
ldict={}
ldict[u"lang_name"]=corpus_data[u"lang_name"]
ldict[u"lang_code"]=corpus_data[u"lang_code"]
ldict[u"contributors"]=[]
if readme_data["Contributors"].strip():
for c in readme_data["Contributors"].strip().split(u";"):
c=c.strip()
lf=c.split(u",",1)
if len(lf)==2:
ldict[u"contributors"].append({u"last":lf[0].strip(),u"first":lf[1].strip(), u"full":lf[1].strip()+u" "+lf[0].strip()})
else:
ldict[u"contributors"].append({u"last":c,u"first":u"?",u"full":c})
jekyll_data.append(ldict)
return a_data,jekyll_data
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='generates the index page')
parser.add_argument('--ud-data', required=True, help='Where is the UD data, so I can grab the readmes? (DIRECTORY)')
parser.add_argument('--ldict', default="../_data/ldata.json", help='Where to write the language dict file? (Default %(default)s)')
parser.add_argument('--empty', default=False, action='store_true', help='Generate for empty treebanks')
args = parser.parse_args()
subset = SUBSET_NONEMPTY if not args.empty else SUBSET_EMPTY
a_data,ldict=gen_table(args, subset)
print a_data.getvalue()
if args.ldict:
with open(args.ldict,"w") as out:
json.dump(ldict,out,indent=2)
|
python
|
import tensorflow as tf
hello =tf.constant('hello')
sess=tf.Session()
print(sess.run(hello))
|
python
|
import os, numpy, sys
from numpy.linalg import norm
import isambard_dev
def get_SHparams(protein):
sequence = protein[0].sequence
residue_ids = [protein[0][i].id for i in range(len(protein[0].sequence))]
reference_axis = isambard_dev.analyse_protein.reference_axis_from_chains(protein)
residue_code = [sequence[n]+str(n+1) for n in range(len(sequence))]
N_chains = len(protein.sequences)
# Calculate mean radius just for residues in range
data = []
for n in range(N_chains):
chain = protein[n]
radius_list = isambard_dev.analyse_protein.polymer_to_reference_axis_distances(chain, reference_axis)
data.append(radius_list)
data = numpy.array(data).T
mean_radii = list(numpy.mean(data, axis=1))
std_radii = list(numpy.std(data, axis=1))
ca_radii = [[residue_code[n], mean_radii[n], std_radii[n]] for n in range(len(residue_code))]
# Get cylindrical azimuthal phi-angle (radians)
data = []
ra_xyz = reference_axis.coordinates
for n in range(N_chains):
chain = protein[n]
protein_xyz = chain.primitive.coordinates
r0 = protein_xyz[0] - ra_xyz[0]
data_per_chain = []
data_per_chain.append(0)
for j in range(1,len(protein_xyz)):
distance2axis = protein_xyz[j] - ra_xyz[j]
cos_angle = numpy.dot(r0, distance2axis)/(norm(r0)*norm(distance2axis))
phi_angle = numpy.rad2deg(numpy.arccos(cos_angle))
data_per_chain.append(phi_angle)
data.append(data_per_chain)
data = numpy.array(data).T
mean_phis = list(numpy.mean(data, axis=1))
std_phis = list(numpy.std(data, axis=1))
azimuthal_angles = [[residue_code[n], mean_phis[n], std_phis[n]] for n in range(len(residue_code))]
# Get CA positions relative to reference axis
ra_xyz = reference_axis.coordinates
L = [norm(r - ra_xyz[0]) for r in ra_xyz]
# Get interface angles
data = []
for n in range(N_chains):
chain = protein[n]
protein_xyz = chain.primitive.coordinates
crangles = isambard_dev.analyse_protein.crick_angles(chain, reference_axis)
crangles = [x for x in crangles if x is not None]
data.append(crangles)
data = numpy.array(data).T
mean_crangles = list(numpy.mean(data, axis=1))
mean_crangles.append('None')
std_crangles = list(numpy.std(data, axis=1))
std_crangles.append('None')
interface_angles = [[residue_code[n], mean_crangles[n], std_crangles[n]] for n in range(len(residue_code))]
return ca_radii, azimuthal_angles, L, interface_angles
if __name__ == "__main__":
infile = sys.argv[1] # input file
protein = isambard_dev.ampal.ampal.convert_pdb_to_ampal(infile)
ca_radii, azimuthal_angles, axial_positions, interface_angles = get_SHparams(protein)
print("Z-axis positions: ", axial_positions, "\n")
print("Mean and Std values of CA radius wrt reference axis per aa, per chain:\n", ca_radii, "\n")
print("Mean and Std values of Azimuthal Angle per aa, per chain; wrt reference axis:\n" , azimuthal_angles, "\n")
print("Mean and Std values of Inerface Angle wrt reference axis per aa, per chain:\n", interface_angles, "\n")
|
python
|
# coding=utf-8
import toml
class Config(object):
def __init__(self, config_file_name):
dictionary = toml.load(config_file_name)
mqtt = Config.get_value_or_default(dictionary, "mqtt")
self.broker_address = Config.get_value_or_default(mqtt, "broker_address")
csv = Config.get_value_or_default(dictionary, "csv")
self.base_directory_path = Config.get_value_or_default(csv, "base_directory_path")
@staticmethod
def get_value_or_default(dictionary, field):
try:
return dictionary[field]
except KeyError:
return None
|
python
|
#! /usr/bin/env python
import argparse
import itertools
import cv2
import numpy as np
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='Arrange a number of images as a matrix.')
parser.add_argument('f', help='Output filename.')
parser.add_argument('w', type=int,
help='Width of the matrix (number of images).')
parser.add_argument('h', type=int,
help='Height of the matrix (number of images).')
parser.add_argument('img', nargs='+', help='Images (w x h files).')
parser.add_argument('--margin', metavar='m', nargs=1,
help='Margin between images: integers are '
'interpreted as pixels, floats as proportions.')
args = parser.parse_args()
w = args.w
h = args.h
n = w*h
if len(args.img) != n:
raise ValueError('Number of images ({}) does not match '
'matrix size {}x{}'.format(w, h, len(args.img)))
imgs = [cv2.imread(i) for i in args.img]
if any(i.shape != imgs[0].shape for i in imgs[1:]):
raise ValueError('Not all images have the same shape.')
img_h, img_w, img_c = imgs[0].shape
m_x = 0
m_y = 0
if args.margin is not None:
margin = args.margin[0]
if '.' in margin:
m = float(margin)
m_x = int(m*img_w)
m_y = int(m*img_h)
else:
m_x = int(margin)
m_y = m_x
imgmatrix = np.zeros((img_h * h + m_y * (h - 1),
img_w * w + m_x * (w - 1),
img_c),
np.uint8)
imgmatrix.fill(255)
positions = itertools.product(range(w), range(h))
for (x_i, y_i), img in itertools.izip(positions, imgs):
x = x_i * (img_w + m_x)
y = y_i * (img_h + m_y)
imgmatrix[y:y+img_h, x:x+img_w, :] = img
cv2.imwrite(args.f, imgmatrix)
|
python
|
#!/usr/bin/env python
import rospy
import numpy as np
from sensor_msgs.msg import Image, CompressedImage
import cv2
from cv_bridge import CvBridge, CvBridgeError
bridge = CvBridge()
image_pub = rospy.Publisher("output",Image,queue_size=1)
def callback(original_image):
np_arr = np.fromstring(original_image.data, np.uint8)
image_in = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
new_image = cv2.flip(image_in,1)
image_pub.publish(bridge.cv2_to_imgmsg(new_image,"bgr8"))
def listener():
rospy.init_node('virtual_mirror_node', anonymous=False)
rospy.Subscriber("input", CompressedImage, callback)
rospy.spin()
if __name__ == '__main__':
listener()
|
python
|
import sys
filename=sys.argv[1]
def tableHTMLtoMatrix(filename):
file=open(filename,"r")
for i in range (2):
file.readline()
all_lines=file.readline().split("</td></tr></tbody></table></td></tr></thead>")
entries=all_lines[1].split('<tr align="center">')
dict,Enodes={},[]
for entry in entries[1:]:
entry=entry.strip('<th>').split('Highlight')[:-1]
for sets in entry:
sets=sets.split('nowrap')[1:]
A,E=sets[0].split("id=")[1:],sets[1].split("id=")[1:-1]
classA=A[0].split()[0][1:-2] #E should have the same class
resA=A[1].split('"> ')[1].split(' <span style="background-color:')[0].split()
Elist=E[1].split('<td align="center">')[:-1]
resE=Elist[0].split('"> ')[1].split(' <span style="background-color:')[0].split()
resA,resE=resA[0]+resA[1].split(":")[-1],resE[0]+resE[1].split(":")[-1]
resA,resE=resA.split("@"),resE.split("@")
if resE[0] not in Enodes:
Enodes.append(resE[0])
Elist=[s.replace('</td>', '') for s in Elist[1:]]
Elist.append(resA[1]+"-"+resE[1])
if len(Elist) < 4:
bond=classA.split("_")[1][:-1]
Elist.append(bond)
if resA[0] not in dict:
dict[resA[0]]={resE[0]:[Elist]}
else:
if resE[0] not in dict[resA[0]]:
dict[resA[0]][resE[0]]=[Elist]
else:
dict[resA[0]][resE[0]].append(Elist)
struct=filename.split("-")[0]
ofile1=open(struct+"_NumContact.csv","w")
ofile2=open(struct+"_MinDistance.csv","w")
ofile3=open(struct+"_C-alphaDistance.csv","w")
ofile4=open(struct+"_atom-pairs.csv","w")
ofile1.write(","+','.join(Enodes)+ '\r\n')
ofile2.write(","+','.join(Enodes)+ '\r\n')
ofile3.write(","+','.join(Enodes)+ '\r\n')
ofile4.write(","+','.join(Enodes)+ '\r\n')
for A in dict:
row1,row2,row3,row4=A,A,A,A
for E in Enodes:
if E not in dict[A]:
row1+=",0"
row2+=",0"
row3+=",0"
row4+=",0"
else:
if len(dict[A][E]) == 1 and len(dict[A][E][0]) == 4:
row1+=","+dict[A][E][0][0]
row2+=","+dict[A][E][0][1]
row3+=","+dict[A][E][0][2]
row4+=","+dict[A][E][0][3]
elif len(dict[A][E]) == 1 and len(dict[A][E][0]) < 4:
row1+=",0"
row2+=","+dict[A][E][0][0]
row3+=",0"
row4+=","+dict[A][E][0][1]
elif len(dict[A][E]) > 1:
row4+=","
for grp in dict[A][E]:
if len(grp) == 4:
row1+=","+grp[0]
row2+=","+grp[1]
row3+=","+grp[2]
row4+=grp[3]+"_"
else:
row4+=grp[1]+"_"
row4=row4.strip("_")
ofile1.write(row1+"\r\n")
ofile2.write(row2+"\r\n")
ofile3.write(row3+"\r\n")
ofile4.write(row4+"\r\n")
ofile1.close()
ofile2.close()
ofile3.close()
ofile4.close()
tableHTMLtoMatrix(filename)
def linegraphtoMatrix(filename):
file=open(filename,"r")
for i in range (2):
file.readline()
all_lines=file.readline().split("<title>")
colors=all_lines[0].split("<span")
interactions={'#888':'contact'}
for color in colors[1:]:
if color.startswith(" style") and "Grey" not in color:
color=color.split("</div>")[0].split('; font-weight:bold">')
fullcode=color[0].split("color:")[-1]
codeused=fullcode[:2]+fullcode[4]+fullcode[6]
interaction=color[-1].split(":")[-1].strip().strip(";")
interactions[codeused]=interaction
dict,Enodes={},{}
for ln in all_lines[1:]:
struct=ln.split("resid")[-1].split("_")[0].split('="')[1]
if struct not in dict:
dict[struct]={}
Enodes[struct]=[]
if not ln.startswith("Interaction"):
ln=ln.split("fill=")
node=ln[0].split("</title>")[0]
struct=node.split(".")[-1]
color=ln[1].split()[0].strip('"') ##color for conservation info
if node.split(".")[1] == "E":
Enodes[struct].append(node)
else:
ln=ln.split('stroke="')
color=ln[1].split('"')[0]
interaction=interactions[color]
ln=ln[0].split("residue")
resid1=ln[1].split("with")[0].strip().split("_")[0]
resid2=ln[2].split("</title>")[0].strip().split("_")[0]
struct=resid1.split(".")[-1]
if resid1 not in dict[struct]:
dict[struct][resid1]={resid2:[interaction]}
else:
if resid2 not in dict[struct][resid1]:
dict[struct][resid1][resid2]=[interaction]
elif resid2 in dict[struct][resid1]:
dict[struct][resid1][resid2].append(interaction)
for struct in dict:
outfile=open(struct+"_matrix.csv","w")
outfile.write(","+','.join(Enodes[struct])+ '\r\n')
for A in dict[struct]:
row=A
for i in Enodes[struct]:
if i not in dict[struct][A]:
row+=",0"
else:
interaction=""
for j in dict[struct][A][i]:
interaction+=j+"_"
row+=","+interaction.strip("_")
outfile.write(row+"\r\n")
outfile.close()
#linegraphtoMatrix(filename) #using 6M0J-2AJF-linegraph.html from Link 10
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as col
import math
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
#import string
INTP_METHODS = ["bilinear", "bicubic"]
COLOR_SPECTRUMS = ["rainbow", "gray", "BuGn"]
FILE_NAMES = [ "aug_6_temp","Aug-2016-meridional-current-181x189", "Aug-2016-potential-temperature-180x188", "Aug-2016-salinity-180x188", "Aug-2016-tropical-heat-potential-180x188", "Aug-2016-zonal-current-181x189" ]
cdict_gray = {'red': ((0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 1.0, 1.0))
}
cdict_BuGn = {'green': ((0.0, 0.0, 1.0),
(1.0, 0.0, 0.0)),
'red': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(1.0, 1.0, 1.0))
}
cdict_rainbow = {'red': ((0.0, 0.0, 1.0),
(0.2, 1.0, 1.0),
(0.4, 0.0, 0.0),
(0.6, 0.0, 0.0),
(0.8, 0.0, 0.0),
(1.0, 1.0, 0.0)),
'green': ((0.0, 0.0, 0.0),
(0.2, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.6, 1.0, 1.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.2, 0.0, 0.0),
(0.4, 0.0, 0.0),
(0.6, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 1.0, 1.0)),
}
BAD_FLAG_KEY = "BAD FLAG"
folder_path = '/home/shyam8/Sem2/DV/a1-dataset-indian-ocean-consolidated/'
file_name = FILE_NAMES[2]
file_path = folder_path + "/" + file_name + ".txt"
def get_bad_flag( BAD_FLAG_KEY, file_path ):
bad_flag = "NA"
file = open( file_path )
for line in file:
if BAD_FLAG_KEY in line:
flag = line.split(":")[1]
bad_flag = flag.strip()
break
file.close()
return bad_flag
def get_lines_to_skip( file_path ):
bad_flag = "NA"
i=0
file = open( file_path )
for line in file:
split = line.split(":")
if len(split) < 2 :
break
i = i + 1
file.close()
return i
def read_file( file_path, lines_to_skip, bad_flag ) :
return pd.read_csv (file_path, skiprows=lines_to_skip, sep='\t', na_values=[bad_flag])
def mask_array( array, mask_value ):
return np.ma.masked_equal(data, mask_value )
def format_latitudes( latitudes ):
for i in range(len(latitudes)):
if 'N' in latitudes[i]:
latitudes[i] = float( str(latitudes[i]).replace("N", "" ) )
elif 'S' in latitudes[i] :
latitudes[i] = float( "-" + str(latitudes[i]).replace("S","") )
def format_longitudes( longitudes ):
for i in range(len(longitudes)):
if 'E' in longitudes[i]:
longitudes[i] = float( str(longitudes[i]).replace("E","") )
elif 'W' in longitudes[i]:
longitudes[i] = float( "-" + str(longitudes[i]).replace("W","") )
def perform_task( latitudes, longitudes, array, cmap, func_type ):
X= latitudes
Y= longitudes
X, Y = np.meshgrid(X, Y)
Z = np.array(array)
if func_type == 'exp' :
Z = np.exp( Z )
Z = np.transpose(Z)
Z = np.ma.masked_invalid(Z)
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z)
plt.xlabel('LATITUDE')
plt.ylabel('LONGITUDE')
plt.show()
'''
X = []
Y = []
Z = []
for i in range( len(latitudes) ):
for j in range(len(longitudes)):
X.append( latitudes[i] )
Y.append( longitudes[j] )
Z.append(array[i][j] )
df = pd.DataFrame({'x': X, 'y': Y, 'z': np.ma.masked_invalid(Z) })
fig = plt.figure()
ax = Axes3D(fig)
surf = ax.plot_trisurf(df.x, df.y, df.z, cmap=cm.jet, linewidth=0.001)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
'''
def normalize_values( array ):
min = np.nanmin(array)
max = np.nanmax(array)
if max == min :
return
for i in range( len(array) ):
for j in range( len(array[0]) ):
array[i][j] = (array[i][j] - min) / ( max - min )
def normalize_values_1d( array ):
min = np.nanmin(array)
max = np.nanmax(array)
if max == min :
return
for i in range( len(array) ):
array[i] = (array[i] - min) / ( max - min )
def custom_color_map( c_name,c_dict ):
#https://matplotlib.org/gallery/images_contours_and_fields/custom_cmap.html
return col.LinearSegmentedColormap( c_name, c_dict)
bad_flag = get_bad_flag( BAD_FLAG_KEY, file_path )
num_lines_to_skip = get_lines_to_skip( file_path )
data = read_file( file_path, num_lines_to_skip, bad_flag )
#extract longitudes
longitudes = np.array(data.columns.values)
#get firts column key to extract latitudes
first_cloumn_key = longitudes[0]
#remove first element
longitudes = longitudes[1:]
#extract latitudes
latitudes = np.array( data[first_cloumn_key] )
#delete first clumn
data = data.drop(columns=first_cloumn_key)
#convert data to numpy 2D array
data = np.array(data)
#normalize data(all values between 0-1)
normalize_values( data )
#mask bad values
data = np.ma.masked_invalid( data )
#format_latitudes
format_latitudes(latitudes)
format_longitudes(longitudes)
normalize_values_1d(latitudes)
normalize_values_1d(longitudes)
perform_task( latitudes, longitudes, data, custom_color_map( "BlueGreen" ,cdict_BuGn), 'exp' )
with open('your_file.txt', 'w') as f:
for item in data:
f.write("%s\n" % item)
|
python
|
from brownie import accounts, web3, Wei, reverts, chain
from brownie.network.transaction import TransactionReceipt
from brownie.convert import to_address
import pytest
from brownie import Contract
from settings import *
# reset the chain after every test case
@pytest.fixture(autouse=True)
def isolation(fn_isolation):
pass
def _create_token(token_factory, token_template, name, symbol, total_supply, template_id, integrator_account, owner):
data = token_template.getInitData(name, symbol, accounts[0], total_supply)
tx = token_factory.createToken(template_id, integrator_account, data, {"from": owner})
assert "TokenCreated" in tx.events
return tx.return_value
########## TEST CREATE TOKEN ###################
def test_create_token(token_factory, fixed_token_template):
name = "Fixed Token"
symbol = "FXT"
template_id = 1 # Fixed Token Template
total_supply = 100 * TENPOW18
integrator_account = accounts[1]
_create_token(token_factory, fixed_token_template, name, symbol, total_supply, template_id, integrator_account, accounts[0])
def test_add_token_template_wrong_operator(token_factory, fixed_token_template):
with reverts("MISOTokenFactory: Sender must be operator"):
token_factory.addTokenTemplate(fixed_token_template, {"from": accounts[2]})
def test_add_token_template_already_exist(token_factory,fixed_token_template):
with reverts():
token_factory.addTokenTemplate(fixed_token_template, {"from": accounts[0]})
def test_number_of_tokens(token_factory, fixed_token_template):
name = "Fixed Token"
symbol = "FXT"
template_id = 1 # Fixed Token Template
test_tokens = 100 * TENPOW18
integrator_account = accounts[1]
number_of_tokens_before = token_factory.numberOfTokens()
data = fixed_token_template.getInitData(name, symbol, accounts[0], test_tokens)
tx = token_factory.createToken(template_id, integrator_account, data)
assert "TokenCreated" in tx.events
name = "Mintable Token"
symbol = "MNT"
template_id = 2 # Mintable Token Template
test_tokens = 0
integrator_account = accounts[1]
data = fixed_token_template.getInitData(name, symbol, accounts[0], test_tokens)
tx = token_factory.createToken(template_id, integrator_account, data)
assert "TokenCreated" in tx.events
assert number_of_tokens_before + 2 == token_factory.numberOfTokens()
def test_remove_token_template(token_factory):
template_id = 1 # Fixed Token Template
tx = token_factory.removeTokenTemplate(template_id,{"from": accounts[0]})
assert "TokenTemplateRemoved" in tx.events
assert token_factory.getTokenTemplate(template_id) == ZERO_ADDRESS
def test_token_factory_integrator_fee_accounts(token_factory,fixed_token_template):
integrator_fee_account = accounts[6]
miso_dev = accounts[5]
minimum_fee = 0.1 * TENPOW18
integrator_fee_percent = 10
ETH_FOR_FEE = 1 * TENPOW18
template_id = token_factory.getTemplateId(fixed_token_template)
token_factory.setMinimumFee(minimum_fee,{"from":accounts[0]})
token_factory.setIntegratorFeePct(integrator_fee_percent, {"from":accounts[0]})
token_factory.setDividends(miso_dev, {"from":accounts[0]})
before_deploy_balance_miso_dev = miso_dev.balance()
before_deploy_balance_integrator = integrator_fee_account.balance()
tx = token_factory.deployToken(template_id,integrator_fee_account,{"from":accounts[0],"value":ETH_FOR_FEE})
assert "TokenCreated" in tx.events
after_deploy_balance_miso_dev = miso_dev.balance()
after_deploy_balance_integrator = integrator_fee_account.balance()
assert after_deploy_balance_miso_dev > before_deploy_balance_miso_dev
assert after_deploy_balance_integrator - before_deploy_balance_integrator == 0.01 * TENPOW18
assert token_factory.integratorFeePct() == integrator_fee_percent
######## Fail cases ########################
with reverts("MISOTokenFactory: Failed to transfer minimumFee"):
tx = token_factory.deployToken(template_id,integrator_fee_account,{"from":accounts[0],"value":0})
template_id = 100
with reverts():
token_factory.deployToken(template_id,integrator_fee_account)
########### Checking ZEROADDRESS Integrator account#######
integrator_fee_account = ZERO_ADDRESS
template_id = token_factory.getTemplateId(fixed_token_template)
before_deploy_balance_miso_dev = miso_dev.balance()
tx = token_factory.deployToken(template_id,integrator_fee_account,{"from":accounts[0],"value":ETH_FOR_FEE})
assert "TokenCreated" in tx.events
after_deploy_balance_miso_dev = miso_dev.balance()
assert after_deploy_balance_miso_dev - before_deploy_balance_miso_dev == 1 * TENPOW18
########## Checking Miso Dev Integrator account ###############
integrator_fee_account = miso_dev
template_id = token_factory.getTemplateId(fixed_token_template)
before_deploy_balance_miso_dev = miso_dev.balance()
tx = token_factory.deployToken(template_id,integrator_fee_account,{"from":accounts[0],"value":ETH_FOR_FEE})
assert "TokenCreated" in tx.events
after_deploy_balance_miso_dev = miso_dev.balance()
assert after_deploy_balance_miso_dev - before_deploy_balance_miso_dev == 1 * TENPOW18
def test_token_factory_set_minimum_fee_with_not_operator(token_factory):
minimum_fee = 0.1 * TENPOW18
with reverts():
token_factory.setMinimumFee(minimum_fee, {"from":accounts[9]})
def test_token_factory_set_integrator_pct_not_operator(token_factory):
integrator_fee_percent = 10
with reverts("MISOTokenFactory: Sender must be operator"):
token_factory.setIntegratorFeePct(integrator_fee_percent, {"from":accounts[9]})
def test_token_factory_set_integrator_pct_not_in_range(token_factory):
integrator_fee_percent = 2000
with reverts("MISOTokenFactory: Range is from 0 to 1000"):
token_factory.setIntegratorFeePct(integrator_fee_percent, {"from":accounts[0]})
def test_token_factory_remove_template_not_operator(token_factory):
with reverts():
token_factory.removeTokenTemplate(1, {"from":accounts[5]})
def test_token_factory_set_dividends_not_operator(token_factory):
miso_dev = accounts[5]
with reverts():
token_factory.setDividends(miso_dev,{"from":accounts[5]})
def test_token_factory_init_again(token_factory,miso_access_controls):
with reverts():
token_factory.initMISOTokenFactory(miso_access_controls, {'from': accounts[0]})
|
python
|
from __future__ import annotations
import re
from librespot.common import Utils
from librespot.metadata import SpotifyId
from librespot.metadata.PlayableId import PlayableId
class TrackId(PlayableId, SpotifyId):
_PATTERN = re.compile("spotify:track:(.{22})")
_hexId: str
def __init__(self, hex_id: str):
self._hexId = hex_id.lower()
@staticmethod
def from_uri(uri: str) -> TrackId:
search = TrackId._PATTERN.search(uri)
if search is not None:
track_id = search.group(1)
return TrackId(
Utils.bytes_to_hex(PlayableId.BASE62.decode(track_id, 16)))
raise RuntimeError("Not a Spotify track ID: {}".format(uri))
@staticmethod
def from_base62(base62: str) -> TrackId:
return TrackId(Utils.bytes_to_hex(PlayableId.BASE62.decode(base62,
16)))
@staticmethod
def from_hex(hex_str: str) -> TrackId:
return TrackId(hex_str)
def to_spotify_uri(self) -> str:
return "spotify:track:{}".format(self._hexId)
def hex_id(self) -> str:
return self._hexId
def get_gid(self) -> bytes:
return Utils.hex_to_bytes(self._hexId)
|
python
|
import torch
import numpy as np
from models_repo.massive_resnets import *
from models_repo.tiny_resnets import *
from models_repo.Middle_Logit_Generator import *
import argparse
from train_funcs import train_regular_ce,\
train_regular_middle_logits,\
train_kd_or_fitnets_2,\
stage_1_fitnet_train,\
dml_train_regular
from train_dih import train_via_dih
from dih_utils import load_trained_intermediate_heads
parser = argparse.ArgumentParser()
#General training setting
parser.add_argument('--training_type', default='dih', type=str,
help='The mode for training, could be either "ce" (regular cross-entropy)'
' "kd" (canonical knowledge distillation) "fine_tune" (fine_tuning the intermediate heads) "fitnets", "dml" (deep mutual learning) or dih. default = "dih"')
parser.add_argument('--epochs', default=240, type=int, help='Input the number of epochs: default(240)')
parser.add_argument('--momentum', default=0.9, type=float, help='Input the momentum: default(0.9)')
parser.add_argument('--nesterov', default=True)
parser.add_argument('--no-nesterov', action='store_false', dest='nesterov', help='Disable Nesterov: default(True)')
parser.add_argument('--batch_size', default=128, type=int, help='Input the batch size: default(128)')
parser.add_argument('--lr', default=0.05, type=float, help='Input the learning rate: default(0.05)')
parser.add_argument('--wd', default=5e-4, type=float, help='Input the weight decay rate: default(5e-4)')
parser.add_argument('--schedule', nargs='+', type=int, default=[150, 180, 210],
help='Decrease learning rate at these epochs.')
parser.add_argument('--schedule_gamma', type=float, default=0.2,
help='multiply the learning rate to this factor at pre-defined epochs in schedule (default : 0.2)')
parser.add_argument('--dataset', default='CIFAR100', type=str, help='Input the name of dataset: default(CIFAR100)')
parser.add_argument('--student', default='res8', type=str, help='The student model. default: ResNet 8')
parser.add_argument('--teacher', default=None, type=str, help='The teacher model. default: ResNet 110')
parser.add_argument('--path_to_save', default='./model.pth', type=str,
help='the path to save the model and/or headers after training')
parser.add_argument('--saved_path', default='/model.pth', type=str,
help='the path of the saved model')
parser.add_argument('--saved_intermediates_directory', default='./saved_headers/', type=str,
help='the directory of fined-tuned mounted intermediate heads')
parser.add_argument('--gpu_id', default='cuda:0', type=str, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--kd_alpha', default=0.1, type=float, help='alpha weigth in knowedge distiilation loss function')
parser.add_argument('--kd_temperature', default=5, type=int, help='Temperature in knowedge distiilation loss function')
parser.add_argument('--seed', default=3, type=int, help='seed value for reproducibility')
#FitNets stage 1
parser.add_argument('--student_stage_1_saved', default='/model.pth', type=str,
help='the path of the saved partial student upto the guided layer (stage 1 of FitNets)')
parser.add_argument('--epochs_fitnets_1', default=40, type=int, help='Input the number of epochs: default(40) FitNets stage 1')
parser.add_argument('--momentum_fitnets_1', default=0.9, type=float, help='Input the momentum: default(0.9) FitNets stage 1')
parser.add_argument('--nesterov_fitnets_1', default=True, type=bool, help='Input the status of nesterov: default(True) FitNets stage 1')
parser.add_argument('--lr_fitnets_1', default=0.1, type=float, help='Input the learning rate: default(0.1) FitNets stage 1')
parser.add_argument('--wd_fitnets_1', default=5e-4, type=float, help='Input the weight decay rate: default(5e-4) FitNets stage 1')
parser.add_argument('--schedule_fitnets_1', type=int, nargs='+', default=[60, 120, 180],
help='Decrease learning rate at these epochs. FitNets stage 1')
parser.add_argument('--schedule_gamma_fitnets_1', type=float, default=0.2,
help='multiply the learning rate to this factor at pre-defined epochs in schedule (default : 0.2) FitNets stage 1')
# Create a dictionary (key,value) pair with the arguments
# state = {'batch_size': 64, 'dataset': 'cifar100', 'epochs': 200, 'epochs_fitnets_1': 40, 'gpu_id': 'cuda:0', 'kd_alpha': 0.1, 'kd_temperature': 5, 'lr': 0.1, 'lr_fitnets_1': 0.1, 'momentum': 0.9, 'momentum_fitnets_1': 0.9, 'nesterov': True, 'nesterov_fitnets_1': True, 'path_to_save': './teacher_res8_cifar100_seed_3_epochs_200.th', 'saved_intermediates_directory': './saved_headers/', 'saved_path': '/model.pth', 'schedule': [60, 120, 180], 'schedule_fitnets_1': [60, 120, 180], 'schedule_gamma': 0.2, 'schedule_gamma_fitnets_1': 0.2, 'seed': 3, 'student': 'res8', 'student_stage_1_saved': '/model.pth', 'teacher': 'res8', 'training_type': 'ce', 'wd': 0.0005, 'wd_fitnets_1': 0.0005}
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Print the arguments
for (arg,value) in state.items():
print(arg+" : "+str(value)+"\n"+"*"*30)
# Models for this experiment
models_dict = {"res8": resnet8_cifar,
"res14": resnet14_cifar,
"res20": resnet20_cifar,
"res110": resnet110_cifar,
"res34": ResNet34,
"res18": ResNet18}
# The number of mounted intermediate heads based on the model architecture in this paper.
intermediate_heads_quantity = {"res8": 3,
"res14": 3,
"res20": 3,
"res110": 3,
"res34": 4,
"res18": 4}
# Output classes
if args.dataset == "cifar10":
num_classes = 10
else:
num_classes = 100
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Regular Cross-entrophy (no distillation)
# args.teacher = teacher architecture (res8, res110...)
if args.teacher != None:
# teacher = resnet8_cifar (PyTorch nn.module)
teacher = models_dict[args.teacher](num_classes=num_classes)
# regular cross_entropy for the teacher
if args.training_type == "ce":
# state = {'batch_size': 64, 'dataset': 'cifar100', 'epochs': 200, 'epochs_fitnets_1': 40, 'gpu_id': 'cuda:0', 'kd_alpha': 0.1, 'kd_temperature': 5, 'lr': 0.1, 'lr_fitnets_1': 0.1, 'momentum': 0.9, 'momentum_fitnets_1': 0.9, 'nesterov': True, 'nesterov_fitnets_1': True, 'path_to_save': './teacher_res8_cifar100_seed_3_epochs_200.th', 'saved_intermediates_directory': './saved_headers/', 'saved_path': '/model.pth', 'schedule': [60, 120, 180], 'schedule_fitnets_1': [60, 120, 180], 'schedule_gamma': 0.2, 'schedule_gamma_fitnets_1': 0.2, 'seed': 3, 'student': 'res8', 'student_stage_1_saved': '/model.pth', 'teacher': 'res8', 'training_type': 'ce', 'wd': 0.0005, 'wd_fitnets_1': 0.0005}
optimizer = torch.optim.SGD(teacher.parameters(),
lr=args.lr, # 'lr': 0.1
weight_decay=args.wd, # 'wd': 0.0005
momentum=args.momentum, # 'momentum': 0.9
nesterov=args.nesterov) # 'nesterov': True
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[60, 120, 180], # args.schedule, # 'schedule': [60, 120, 180]
gamma=args.schedule_gamma, # 'schedule_gamma': 0.2
last_epoch=-1)
# return is used for nothing
# train_funcs::train_regular_ce
trained_model = train_regular_ce(model=teacher,
optimizer=optimizer,
epochs=args.epochs,
dataset=args.dataset,
train_on=args.gpu_id,
batch_size=args.batch_size,
scheduler=scheduler,
seed=args.seed,
path_to_save=args.path_to_save)
elif args.training_type == "fine_tune": #Fine_Tuning the mounted intermedeiate headers
saved_state_dict = torch.load(args.saved_path)
teacher.to(args.gpu_id)
testing_state_dict = {}
for (key, value), (key_saved, value_saved) in zip(teacher.state_dict().items(), saved_state_dict.items()):
testing_state_dict[key] = value_saved
teacher.load_state_dict(testing_state_dict)
teacher.eval()
#using a random virtual input in size of our dataset's input images(3,32,32) in order to exploit intermediate outputs of the teacher.
virtual_input = torch.rand((1, 3, 32, 32),device=args.gpu_id)
outputs = teacher(virtual_input)
intermediate_classifier_models = {}
for mounted_head_index in range(intermediate_heads_quantity[args.teacher]):
# create intermediate classifier modules which can be mounted in different depth of the core teacher model.
intermediate_classifier_models[mounted_head_index+1] = Middle_Logit_Generator(outputs[mounted_head_index+1], num_classes=num_classes)
total_internal_heads_params = [] #sum all the trainable parameters in all of the mounted intermediate heads
for classifier in intermediate_classifier_models.values():
total_internal_heads_params += (list(classifier.parameters()))
optimizer_combined = torch.optim.SGD(total_internal_heads_params,
lr=args.lr,
weight_decay=args.wd,
momentum=args.momentum,
nesterov=args.nesterov)
scheduler_combined = torch.optim.lr_scheduler.MultiStepLR(optimizer_combined,
milestones=args.schedule,
gamma=args.schedule_gamma,
last_epoch=-1)
#fine_tuning the added intermediate headers
train_regular_middle_logits(model=teacher,
optimizer=optimizer_combined,
path_to_save=args.path_to_save,
middle_logits_model_dict=intermediate_classifier_models,
epochs=args.epochs,
train_on=args.gpu_id,
scheduler=scheduler_combined,
batch_size=args.batch_size,
dataset=args.dataset)
elif args.training_type == "dih": #DIH Distillation
if args.student != None:
student = models_dict[args.student](num_classes=num_classes)
optimizer = torch.optim.SGD(student.parameters(),
lr=args.lr,
weight_decay=args.wd,
momentum = args.momentum,
nesterov = args.nesterov)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=args.schedule,
gamma=args.schedule_gamma,
last_epoch=-1)
#load the fine_tuned intermediate headers
trained_intermediate_heads = load_trained_intermediate_heads(core_model=teacher,
core_model_saved_path=args.saved_path,
heads_directory=args.saved_intermediates_directory,
num_classes=num_classes)
#DIH distillation
train_via_dih(student=student,
trained_core_teacher=teacher,
traind_intermediate_classifers_dict=trained_intermediate_heads,
optimizer=optimizer,
dataset=args.dataset,
path_to_save=args.path_to_save,
epochs=args.epochs,
device_to_train_on= args.gpu_id,
input_sample_size=(args.batch_size, 32, 32),
multiple_gpu=None,
scheduler=scheduler,
kd_alpha=args.kd_alpha,
kd_temperature=args.kd_temperature,
seed=args.seed)
elif args.training_type == "kd":
if args.student != None:
student = models_dict[args.student](num_classes=num_classes)
if args.saved_path != None:
temp_dict = {}
teacher_saved_state_dict = torch.load(args.saved_path)
for (key,_),(key_saved,value_saved) in zip(teacher.state_dict().items(),teacher_saved_state_dict.items()):
if "module."+ key == key_saved:
temp_dict[key] = value_saved
teacher.load_state_dict(temp_dict)
teacher.eval()
optimizer = torch.optim.SGD(student.parameters(),
lr=args.lr,
weight_decay=args.wd,
momentum = args.momentum,
nesterov = args.nesterov)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=args.schedule,
gamma=args.schedule_gamma,
last_epoch=-1)
train_kd_or_fitnets_2(student=student,
trained_teacher=teacher,
optimizer=optimizer,
scheduler=scheduler,
input_data_size=(args.batch_size, 32, 32),
kd_alpha=args.kd_alpha,
kd_temperature=args.kd_temperature,
seed=args.seed,
epochs=args.epochs,
train_on=args.gpu_id,
dataset=args.dataset,
path_to_save=args.path_to_save)
elif args.training_type == "fitnets":
if args.student != None:
student = models_dict[args.student](num_classes=num_classes)
if args.saved_path != None:
temp_dict = {}
teacher_saved_state_dict = torch.load(args.saved_path)
for (key, _), (key_saved, value_saved) in zip(teacher.state_dict().items(),
teacher_saved_state_dict.items()):
if "module." + key == key_saved:
temp_dict[key] = value_saved
teacher.load_state_dict(temp_dict)
teacher.eval()
optimizer = torch.optim.SGD(student.parameters(),
lr=args.lr_fitnets_1,
weight_decay=args.wd_fitnets_1,
momentum=args.momentum_fitnets_1,
nesterov=args.nesterov_fitnets_1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=args.schedule_fitnets_1,
gamma=args.schedule_gamma_fitnets_1,
last_epoch=-1)
teacher_path = args.saved_path
saved_state_dict = torch.load(teacher_path)
testing_state_dict = {}
for (key, value), (key_saved, value_saved) in zip(teacher.state_dict().items(),
saved_state_dict.items()):
testing_state_dict[key] = value_saved
teacher.load_state_dict(testing_state_dict)
teacher.eval()
frozen_student_modules = [student.avgpool, student.fc, student.layer3]
partial_student_satet_dict = stage_1_fitnet_train(partial_student=student,
frozen_student_modules=frozen_student_modules,
partail_teacher=teacher,
guided_layer=None,
optimizer=optimizer,
path_to_save=args.path_to_save,
dataset=args.dataset,
epochs=args.epochs_fitnets_1,
train_on=args.gpu_id,
scheduler=scheduler,
input_data_size=(args.batch_size, 32, 32))
student_temp_weight = {}
for (key, value), (key_saved, value_saved) in zip(student.state_dict().items(),
partial_student_satet_dict.items()):
student_temp_weight[key] = value_saved
student.load_state_dict(student_temp_weight)
optimizer = torch.optim.SGD(student.parameters(),
lr=args.lr,
weight_decay=args.wd,
momentum=args.momentum,
nesterov=args.nesterov)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=args.schedule,
gamma=args.schedule_gamma,
last_epoch=-1)
train_kd_or_fitnets_2(student=student,
trained_teacher=teacher,
optimizer=optimizer,
scheduler=scheduler,
input_data_size=(args.batch_size, 32, 32),
kd_alpha=args.kd_alpha,
kd_temperature=args.kd_temperature,
seed=args.seed,
epochs=args.epochs,
train_on=args.gpu_id,
dataset=args.dataset,
path_to_save=args.path_to_save)
elif args.training_type == "dml":
if args.student != None:
student = models_dict[args.student](num_classes=num_classes)
peer1 = student
peer2 = teacher
optimizer_peer1 = torch.optim.SGD(student.parameters(),
lr=args.lr,
weight_decay=args.wd,
momentum=args.momentum,
nesterov=args.nesterov)
peer1_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer_peer1,
milestones=args.schedule,
gamma=args.schedule_gamma,
last_epoch=-1)
optimizer_peer2 = torch.optim.SGD(teacher.parameters(),
lr=args.lr,
weight_decay=args.wd,
momentum=args.momentum,
nesterov=args.nesterov)
peer2_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer_peer2,
milestones=args.schedule,
gamma=args.schedule_gamma,
last_epoch=-1)
peers = {}
peers["peer1"] = peer1
peers["peer2"] = peer2
optimizers = {}
optimizers["peer1"] = optimizer_peer1
optimizers["peer2"] = optimizer_peer2
schedulers = {}
schedulers["peer1"] = peer1_scheduler
schedulers["peer2"] = peer2_scheduler
kd_temperature_dict = {}
kd_temperature_dict["peer1"] = 1.0
kd_temperature_dict["peer2"] = 1.0
kd_alpha_dict = {}
kd_alpha_dict["peer1"] = 0.1
kd_alpha_dict["peer2"] = 0.1
dml_train_regular(peers=peers,
optimizers=optimizers,
train_on=args.gpu_id,
dataset=args.dataset,
scheduler=schedulers,
alpha_dict=kd_alpha_dict,
temperature_dict=kd_temperature_dict,
path_directory_to_save=args.path_to_save,data_input_size=(args.batch_size, 32, 32),
seed = args.seed)
|
python
|
from nlgen.cfg import read_cfg
EXAMPLE_NO_FEATURE = """
# example.nlcfg
SENTENCE -> PRONOUN VI NOUN;
VI -> VERB;
PRONOUN -> "I";
VERB -> "play";
NOUN -> "blackjack" | "poker";
""".strip()
def test_no_feature_example():
cfg = read_cfg(EXAMPLE_NO_FEATURE)
# permutations returns a generator.
# we use sets for comparisons as there's
# no guaranteed order for generated
# values.
assert set(cfg.permutation_values("SENTENCE")) == set([
("I", "play", "blackjack"),
("I", "play", "poker"),
])
EXAMPLE_WITH_FEATURE = """
# example.nlcfg
SENTENCE -> PRONOUN VI NOUN;
VI -> VERB;
PRONOUN -> "I" {"person": 1} |
"you" {"person": "2"} |
"she" {"person": 3};
VERB -> "play" {"person": ["1", "2"]} |
"plays" {"person": 3};
NOUN -> "blackjack" | "poker";
"""
def test_with_feature_example():
cfg = read_cfg(EXAMPLE_WITH_FEATURE)
# permutations returns a generator.
# we use sets for comparisons as there's
# no guaranteed order for generated
# values.
assert set(cfg.permutation_values("SENTENCE")) == set([
("I", "play", "blackjack"),
("you", "play", "blackjack"),
("she", "plays", "blackjack"),
("I", "play", "poker"),
("you", "play", "poker"),
("she", "plays", "poker"),
])
|
python
|
from typing import Optional
from pydantic import BaseModel, Field
class UserModel(BaseModel):
name: str = Field(...)
user_id: str = Field(...)
password: str = Field(...)
email: str = Field(...)
is_admin: bool = Field(...)
en_date: str = Field(...)
de_date: str = Field(...)
birth_date: str = Field(...)
now_class: str = Field(...)
unit_company: str = Field(...)
unit_platoon: str = Field(...)
unit_squad: str = Field(...)
position: str = Field(...)
work_list: list = Field(...)
vacation: list = Field(...)
total_worked_time: dict = Field(...)
this_month_worked_time: dict = Field(...)
this_month_work_time_left: dict = Field(...)
prev_month_worked_time: dict = Field(...)
prev_day_worktime: int = Field(...)
prev_night_worktime: int = Field(...)
prev_free_worktime: int = Field(...)
new_day_worktime: int = Field(...)
new_night_worktime: int = Field(...)
new_free_worktime: int = Field(...)
fatigue: int = Field(...)
class Config:
schema_extra = {
"example": {
"name": "홍길동",
"user_id": "gildong21",
"password": "gildongpasswd21",
"email": "[email protected]",
"is_admin": False,
"en_date": "2020-11-09",
"de_date": "2022-05-08",
"birth_date": "1995-05-26",
"now_class": "상병",
"unit_company": "종합정비창",
"unit_platoon": "본부소대",
"unit_squad": "통신분대",
"position": "군사과학기술병",
"work_list": [ 1, 2 ],
"vacation": [
{
"start_date": "2021-05-01",
"end_date": "2021-05-04",
"description": "신병위로휴가",
},
{
"start_date": "2021-08-04",
"end_date": "2021-08-10",
"description": "청원휴가",
}
],
"total_worked_time": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"this_month_worked_time": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"this_month_work_time_left": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"prev_month_worked_time": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"prev_day_worktime": 0,
"prev_night_worktime": 0,
"prev_free_worktime": 0,
"new_day_worktime": 0,
"new_night_worktime": 0,
"new_free_worktime": 0,
"fatigue": 0
}
}
class UpdateUserModel(BaseModel):
name: Optional[str]
user_id: Optional[str]
password: Optional[str]
email: Optional[str]
is_admin: Optional[bool]
en_date: Optional[str]
de_date: Optional[str]
birth_date: Optional[str]
now_class: Optional[str]
unit_company: Optional[str]
unit_platoon: Optional[str]
unit_squad: Optional[str]
position: Optional[str]
work_list: Optional[list]
vacation: Optional[list]
total_worked_time: Optional[dict]
this_month_worked_time: Optional[dict]
this_month_work_time_left: Optional[dict]
prev_month_worked_time: Optional[dict]
prev_day_worktime: Optional[int]
prev_night_worktime: Optional[int]
prev_free_worktime: Optional[int]
new_day_worktime: Optional[int]
new_night_worktime: Optional[int]
new_free_worktime: Optional[int]
fatigue: Optional[int]
class Config:
schema_extra = {
"example": {
"name": "홍길동",
"user_id": "gildong21",
"password": "gildongpasswd21",
"email": "[email protected]",
"is_admin": False,
"en_date": "2020-11-09",
"de_date": "2022-05-08",
"birth_date": "1995-05-26",
"now_class": "상병",
"unit_company": "종합정비창",
"unit_platoon": "본부소대",
"unit_squad": "통신분대",
"position": "군사과학기술병",
"work_list": [ 1, 2 ],
"vacation": [
{
"start_date": "2021-05-01",
"end_date": "2021-05-04",
"description": "신병위로휴가",
},
{
"start_date": "2021-08-04",
"end_date": "2021-08-10",
"description": "청원휴가",
}
],
"total_worked_time": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"this_month_worked_time": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"this_month_work_time_left": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"prev_month_worked_time": {
"day_worktime": 0,
"night_worktime": 0,
"free_worktime": 0
},
"prev_day_worktime": 0,
"prev_night_worktime": 0,
"prev_free_worktime": 0,
"new_day_worktime": 0,
"new_night_worktime": 0,
"new_free_worktime": 0,
"fatigue": 0
}
}
def ResponseModel(data, message):
return {
"data": [
data
],
"code": 200,
"message": message,
}
def ErrorResponseModel(error, code, message):
return {
"error": error,
"code": code,
"message": message
}
|
python
|
"""
Misc lr helper
"""
from torch.optim import Adam, Adamax
from .adamw import AdamW
def build_optimizer(model, opts):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': opts.weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
# currently Adam only
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
|
python
|
# Generated by Django 3.0.5 on 2020-04-07 16:08
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('django_products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('product_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_products.Product')),
('sn', models.CharField(blank=True, max_length=150, null=True, verbose_name='serial number')),
('stock_on_hand', models.IntegerField(default=0, verbose_name='stock on hand')),
('stock_on_delivery', models.IntegerField(default=0, verbose_name='stock on delivery')),
('stock_on_request', models.IntegerField(default=0, verbose_name='stock on request')),
('min_stock', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='min stock')),
('max_stock', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='max stock')),
],
options={
'verbose_name': 'Book',
},
bases=('django_products.product', models.Model),
),
migrations.CreateModel(
name='Music',
fields=[
('product_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_products.Product')),
('sn', models.CharField(blank=True, max_length=150, null=True, verbose_name='serial number')),
('stock_on_hand', models.IntegerField(default=0, verbose_name='stock on hand')),
('stock_on_delivery', models.IntegerField(default=0, verbose_name='stock on delivery')),
('stock_on_request', models.IntegerField(default=0, verbose_name='stock on request')),
('min_stock', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='min stock')),
('max_stock', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='max stock')),
],
options={
'verbose_name': 'Music',
},
bases=('django_products.product', models.Model),
),
migrations.CreateModel(
name='Video',
fields=[
('product_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='django_products.Product')),
('sn', models.CharField(blank=True, max_length=150, null=True, verbose_name='serial number')),
('stock_on_hand', models.IntegerField(default=0, verbose_name='stock on hand')),
('stock_on_delivery', models.IntegerField(default=0, verbose_name='stock on delivery')),
('stock_on_request', models.IntegerField(default=0, verbose_name='stock on request')),
('min_stock', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='min stock')),
('max_stock', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='max stock')),
],
options={
'verbose_name': 'Video',
},
bases=('django_products.product', models.Model),
),
]
|
python
|
import aiodocker
async def get_docker_images():
docker = aiodocker.Docker()
print("== Images ==")
for image in await docker.images.list():
tags = image["RepoTags"][0] if image["RepoTags"] else ""
print(image["Id"], tags)
async def pull(name: str):
docker = aiodocker.Docker()
await docker.pull(name)
|
python
|
from train import d_conv_dim, g_conv_dim, z_size, model_name, img_size
import torch
import pickle as pkl
import matplotlib.pyplot as plt
import numpy as np
from train_class.load_model import Discriminator, Generator
from train_class.load_model import build_network
import torch.nn.functional as F
# load pretrained models (D -> discriminator, G -> generator)
D, G = build_network(d_conv_dim, g_conv_dim, z_size)
D = torch.load(f"models/trained_D{model_name}.pt")
G = torch.load(f"models/trained_G{model_name}.pt")
# show sample generated images from specific epoch
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(16,4), nrows=2, ncols=8, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
img = img.detach().cpu().numpy()
img = np.transpose(img, (1, 2, 0))
img = ((img + 1)*255 / 2).astype(np.uint8)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((img_size, img_size, 3)))
# generates new faces
def generate():
sample_size = 16
fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))
fixed_z = torch.from_numpy(fixed_z).float()
fixed_z = fixed_z.cuda()
G.eval() # for generating samples
samples_z = G(fixed_z)
G.train() # back to training mode
view_samples(0, [samples_z])
# generates new faces and picks the one that is assessed as the most real by the discriminator
def intelligent_generate():
sample_size = 16
G.eval() # for generating samples
max_val = 0
while max_val < 0.3:
fixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))
fixed_z = torch.from_numpy(fixed_z).float()
fixed_z = fixed_z.cuda()
samples_z = G(fixed_z)
predictions = F.sigmoid(D(samples_z)).cpu()
predictions = predictions.detach().numpy()
predictions = predictions.flatten()
max_val = np.max(predictions)
id = np.where(predictions == max_val)[0][0]
G.train() # back to training mode
fig, axes = plt.subplots(figsize=(1, 1), nrows=1, ncols=1, sharey=True, sharex=True)
img = samples_z[id].cpu().detach().numpy()
img = np.transpose(img, (1, 2, 0))
img = ((img + 1) * 255 / 2).astype(np.uint8)
axes.imshow(img.reshape((img_size, img_size, 3)))
view_samples(0, [samples_z])
# opens samples saved during training for each epoch
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
#view_samples(0, samples)
#generate()
intelligent_generate()
|
python
|
import numpy as np
from core import Cache
from core import scipy_diffev
from core import notification
from core import preset
from core import track_and_update_metric
from visualization import display_iterations
def diffev_function_test(w, *args):
cache = args[0]
cache.w = w
residual = 0
for x, y in zip(cache.test_x, cache.test_y):
poly = cache.w[0] + cache.w[1]*x + cache.w[2]*x**2 + cache.w[3]*x**3
residual += (poly - y) ** 2
cache.output = np.sqrt(residual/len(cache.test_x))
cache = track_and_update_metric(cache)
return cache.output
def main():
cache = Cache()
# scipy differential evolution parameters
cache.diffev_function = diffev_function_test
cache.diffev_bounds = [(-2, 2), (-2, 2), (-2, 2), (-2, 2)]
cache.diffev_args = ([cache])
cache.diffev_strategy = 'best1bin'
cache.diffev_maxiter = 1000
cache.diffev_popsize = 15
cache.diffev_tol = 1e-16
cache.diffev_atol = 1e-16
cache.diffev_mutation = (0.5, 1)
cache.diffev_recombination = 0.9
cache.diffev_seed = None
cache.diffev_callback = None
cache.diffev_disp = False
cache.diffev_polish = True
cache.diffev_init = 'latinhypercube'
cache.diffev_updating = 'immediate'
cache.diffev_workers = 1
# fileio parameters
cache.dirname = 'test'
cache.filename = 'cache.pkl'
# test parameter
w = [-1.2, 0.4, -0.6, -0.8]
x = np.linspace(0.1, 2.0, 100)
y = w[0] + w[1] * x + w[2] * x ** 2 + w[3] * x ** 3
cache.test_x = x
cache.test_y = y
cache.parameter_names = ['w0', 'w1', 'w2', 'w3']
# evaluation reset parameters
cache.iteration = 0
cache.reset_interval = 10
# initialization of tracking and fileio variables
cache = preset(cache)
# actual differential evoltion step
cache = scipy_diffev(cache)
# notification step
cache = notification(cache)
# fileio step
cache.save()
# instant visualization step
cache = display_iterations(cache)
return cache
if __name__ == '__main__':
main()
|
python
|
from stockpyle._base import BaseDictionaryStore
class ShoveStore(BaseDictionaryStore):
"""Represents a store that places all objects in a Shove (see http://pypi.python.org/pypi/shove)"""
def __init__(self, shove=None, shoveuri=None, polymorphic=False, lifetime_cb=None):
# TODO: deprecate 'shoveuri' in favor of 'uri'
if shove is not None and shoveuri is not None:
raise ValueError("you can only provide either a Shove object or a Shove URI, not both")
elif shove is not None:
self.__shove = shove
elif shoveuri is not None:
from shove import Shove
self.__shove = Shove(shoveuri)
else:
raise ValueError("you must provide either a Shove object or a Shove URI to create a ShoveStore")
super(ShoveStore, self).__init__(dictionary=self.__shove, polymorphic=polymorphic, lifetime_cb=lifetime_cb)
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from typing import Optional, List, Dict, Set
from pydolphinscheduler.constants import ProcessDefinitionReleaseState, ProcessDefinitionDefault
from pydolphinscheduler.core.base import Base
from pydolphinscheduler.java_gateway import launch_gateway
from pydolphinscheduler.side import Tenant, Project, User
class ProcessDefinitionContext:
_context_managed_process_definition: Optional["ProcessDefinition"] = None
@classmethod
def set(cls, pd: "ProcessDefinition") -> None:
cls._context_managed_process_definition = pd
@classmethod
def get(cls) -> Optional["ProcessDefinition"]:
return cls._context_managed_process_definition
@classmethod
def delete(cls) -> None:
cls._context_managed_process_definition = None
class ProcessDefinition(Base):
"""
ProcessDefinition
TODO :ref: comment may not correct ref
TODO: maybe we should rename this class, currently use DS object name
"""
# key attribute for identify ProcessDefinition object
_KEY_ATTR = {
"name",
"project",
"tenant",
"release_state",
"param",
}
_TO_DICT_ATTR = {
"name",
"description",
"_project",
"_tenant",
"timeout",
"release_state",
"param",
"tasks",
"task_definition_json",
"task_relation_json",
}
def __init__(
self,
name: str,
description: Optional[str] = None,
user: Optional[str] = ProcessDefinitionDefault.USER,
project: Optional[str] = ProcessDefinitionDefault.PROJECT,
tenant: Optional[str] = ProcessDefinitionDefault.TENANT,
queue: Optional[str] = ProcessDefinitionDefault.QUEUE,
timeout: Optional[int] = 0,
release_state: Optional[str] = ProcessDefinitionReleaseState.ONLINE,
param: Optional[List] = None
):
super().__init__(name, description)
self._user = user
self._project = project
self._tenant = tenant
self._queue = queue
self.timeout = timeout
self.release_state = release_state
self.param = param
self.tasks: dict = {}
# TODO how to fix circle import
self._task_relations: set["TaskRelation"] = set()
self._process_definition_code = None
def __enter__(self) -> "ProcessDefinition":
ProcessDefinitionContext.set(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
ProcessDefinitionContext.delete()
@property
def tenant(self) -> Tenant:
return Tenant(self._tenant)
@tenant.setter
def tenant(self, tenant: Tenant) -> None:
self._tenant = tenant.name
@property
def project(self) -> Project:
return Project(self._project)
@project.setter
def project(self, project: Project) -> None:
self._project = project.name
@property
def user(self) -> User:
return User(self._user,
ProcessDefinitionDefault.USER_PWD,
ProcessDefinitionDefault.USER_EMAIL,
ProcessDefinitionDefault.USER_PHONE,
ProcessDefinitionDefault.TENANT,
ProcessDefinitionDefault.QUEUE,
ProcessDefinitionDefault.USER_STATE)
@property
def task_definition_json(self) -> List[Dict]:
if not self.tasks:
return [self.tasks]
else:
return [task.to_dict() for task in self.tasks.values()]
@property
def task_relation_json(self) -> List[Dict]:
if not self.tasks:
return [self.tasks]
else:
self._handle_root_relation()
return [tr.to_dict() for tr in self._task_relations]
# TODO inti DAG's tasks are in the same place
@property
def task_location(self) -> List[Dict]:
if not self.tasks:
return [self.tasks]
else:
return [{"taskCode": task_code, "x": 0, "y": 0} for task_code in self.tasks]
@property
def task_list(self) -> List["Task"]:
return list(self.tasks.values())
def _handle_root_relation(self):
from pydolphinscheduler.core.task import TaskRelation
post_relation_code = set()
for relation in self._task_relations:
post_relation_code.add(relation.post_task_code)
for task in self.task_list:
if task.code not in post_relation_code:
root_relation = TaskRelation(pre_task_code=0, post_task_code=task.code)
self._task_relations.add(root_relation)
def add_task(self, task: "Task") -> None:
self.tasks[task.code] = task
task._process_definition = self
def add_tasks(self, tasks: List["Task"]) -> None:
for task in tasks:
self.add_task(task)
def get_task(self, code: str) -> "Task":
if code not in self.tasks:
raise ValueError("Task with code %s can not found in process definition %", (code, self.name))
return self.tasks[code]
# TODO which tying should return in this case
def get_tasks_by_name(self, name: str) -> Set["Task"]:
find = set()
for task in self.tasks.values():
if task.name == name:
find.add(task)
return find
def get_one_task_by_name(self, name: str) -> "Task":
tasks = self.get_tasks_by_name(name)
if not tasks:
raise ValueError(f"Can not find task with name {name}.")
return tasks.pop()
def run(self):
"""
Run ProcessDefinition instance, a shortcut for :ref: submit and :ref: start
Only support manual for now, schedule run will coming soon
:return:
"""
self.submit()
self.start()
def _ensure_side_model_exists(self):
"""
Ensure side model exists which including :ref: Project, Tenant, User.
If those model not exists, would create default value in :ref: ProcessDefinitionDefault
"""
# TODO used metaclass for more pythonic
self.tenant.create_if_not_exists(self._queue)
# model User have to create after Tenant created
self.user.create_if_not_exists()
# Project model need User object exists
self.project.create_if_not_exists(self._user)
def submit(self) -> int:
"""
Submit ProcessDefinition instance to java gateway
:return:
"""
self._ensure_side_model_exists()
gateway = launch_gateway()
self._process_definition_code = gateway.entry_point.createOrUpdateProcessDefinition(
self._user,
self._project,
self.name,
str(self.description) if self.description else "",
str(self.param) if self.param else None,
json.dumps(self.task_location),
self.timeout,
self._tenant,
# TODO add serialization function
json.dumps(self.task_relation_json),
json.dumps(self.task_definition_json),
)
return self._process_definition_code
def start(self) -> None:
"""
Start ProcessDefinition instance which post to `start-process-instance` to java gateway
:return:
"""
gateway = launch_gateway()
gateway.entry_point.execProcessInstance(
self._user,
self._project,
self.name,
"",
"default",
24 * 3600,
)
|
python
|
##########################################################
### Import Necessary Modules
import argparse #provides options at the command line
import sys #take command line arguments and uses it in the script
import gzip #allows gzipped files to be read
import re #allows regular expressions to be used
import subprocess #allows external programs to be called
##########################################################
### Command-line Arguments
parser = argparse.ArgumentParser(description="A script to read in a vcf file and return a fasta file. Requires samtools to be installed and in path.")
parser.add_argument("-vcf", help = "The location of the VCF file (accepts .gz format)", default=sys.stdin, required=True)
parser.add_argument("-fasta", help = "The location of the fasta file. Can be indexed (which will speed up the script).", default=sys.stdin, required=True)
parser.add_argument("-fai", help = "The location of the index of a fasta file (.fai format), optional, default = NA", default="NA")
parser.add_argument("-flanking", help = "The distance of flanking sequence around the SNP variant, if the sequence is too close to an edge it will not be returned, default = 50", default=50)
args = parser.parse_args()
#########################################################
### Open file (object-oriented programming)
class OpenFile():
sequences = {}
def __init__ (self, v, t):
if re.search(".gz$", v):
self.file = gzip.open(v, 'r')
else:
self.file = open(v, 'r')
if t == "vcf":
sys.stderr.write("Opened vcf file\n")
self.readLinesVCF(self.file)
elif t == "fai":
sys.stderr.write("Opened index of fasta file\n")
self.readLinesFastaFai(self.file)
else:
sys.stderr.write("Opened fasta file\n")
self.readLinesFasta(self.file)
def readLinesVCF(self, vc):
self.open_vcf = vc
self.variants_found = 0
self.variants_used = 0
for line in self.open_vcf:
try:
line = line.decode('utf-8')
except:
pass
line = line.rstrip('\n')
if not re.search("^#", line):
line_list = line.split("\t")
self.chrom = line_list[0]
self.pos = line_list[1]
self.ref = line_list[3]
self.alt = line_list[4]
self.start = int(self.pos) - int(args.flanking)
self.end = int(self.pos) + int(args.flanking)
self.snpPosInFlanking = int(args.flanking) + 1
self.variants_found += 1
if int(self.start) > 0 and int(OpenFile.sequences[self.chrom]) >= int(self.end):
###Affx-88959408:A:G:36
self.location = str(self.chrom) + ":" + str(self.start) + "-" + str(self.end)
self.header = str(self.chrom) + "_" + str(self.pos) + ":" + str(self.ref) + ":" + str(self.alt) + ":" + str(self.snpPosInFlanking)
self.sequence = "NA"
process = subprocess.Popen(["samtools", "faidx", args.fasta, self.location], stdout=subprocess.PIPE)
process.wait()
for line in process.stdout:
try:
line = line.decode('utf-8')
except:
pass
line = line.rstrip("\n")
if not re.search("^>", line) and re.search("\w", line):
if self.sequence == "NA":
self.sequence = line
else:
self.sequence += line
if self.sequence != "NA":
print (">{}\n{}".format(self.header, self.sequence))
self.variants_used += 1
self.open_vcf.close()
sys.stderr.write("\tFinished reading vcf file: Found {} variants, used {}\n".format(self.variants_found, self.variants_used))
def readLinesFasta(self, f):
"""Measures the lengths of the scaffolds in the fasta file"""
self.filename = f
self.header = "NA"
self.seq = ""
self.number_scaffolds = 0
self.total_size = 0
for line in self.filename:
try:
line = line.decode('utf-8')
except:
pass
line = line.rstrip('\n')
if re.search("^\>", line):
if self.header == "NA":
self.header = line.split(" ")[0][1:]
else:
if not (int(len(self.header)) > 0 and int(len(self.seq)) > 0):
sys.stderr.write("\tSequence Found not conforming to fasta: {}\n".format(self.header))
OpenFile.sequences[self.header] = int(len(self.seq))
self.total_size += int(len(self.seq))
self.seq = ""
self.header = line.split(" ")[0][1:]
self.number_scaffolds += 1
elif re.search("\w", line):
self.seq += line
if not (int(len(self.header)) > 0 and int(len(self.seq)) > 0):
sys.stderr.write("\tSequence Found not conforming to fasta: {}\n".format(self.header))
OpenFile.sequences[self.header] = int(len(self.seq))
self.total_size += int(len(self.seq))
self.seq = ""
sys.stderr.write("\tFinished reading scaffold fasta file: Found {} sequence(s)\n".format(self.number_scaffolds))
sys.stderr.write(" Total Nucleotide(s): {}\n\n".format(self.total_size))
self.filename.close()
def readLinesFastaFai(self, f):
"""Measures the lengths of the scaffolds in the fai file"""
self.filename = f
self.number_scaffolds = 0
self.total_size = 0
for line in self.filename:
try:
line = line.decode('utf-8')
except:
pass
line = line.rstrip('\n')
(self.header, self.nucleotideCount, self.offset, self.lineBases, self.lineWidth) = line.split("\t")
OpenFile.sequences[self.header] = int(self.nucleotideCount)
self.total_size += int(self.nucleotideCount)
self.number_scaffolds += 1
sys.stderr.write("\tFinished reading scaffold fasta fai file: Found {} sequence(s)\n".format(self.number_scaffolds))
sys.stderr.write(" Total Nucleotide(s): {}\n\n".format(self.total_size))
self.filename.close()
if __name__ == '__main__':
if args.fai == "NA":
open_fasta = OpenFile(args.fasta, "fasta")
else:
open_fai = OpenFile(args.fai, "fai")
open_vcf = OpenFile(args.vcf, "vcf")
|
python
|
# from .__S3File import s3_set_profile as set_profile
from .__S3File import s3_xlist as xlist
from .__S3File import s3_download as download
from .__S3File import s3_upload as upload
from .__S3File import s3_load as load
from .__S3File import s3_save as save
from .__S3File import s3_open as open
from .__S3File import s3_stream as streampip
|
python
|
# Copyright 2017, 2018 Google, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The integrated LexNET model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import lexnet_common
import numpy as np
import tensorflow as tf
from six.moves import xrange
class LexNETModel(object):
"""The LexNET model for classifying relationships between noun compounds."""
@classmethod
def default_hparams(cls):
"""Returns the default hyper-parameters."""
return tf.contrib.training.HParams(
batch_size=10,
num_classes=37,
num_epochs=30,
input_keep_prob=0.9,
input='integrated', # dist/ dist-nc/ path/ integrated/ integrated-nc
learn_relata=False,
corpus='wiki_gigawords',
random_seed=133, # zero means no random seed
relata_embeddings_file='glove/glove.6B.300d.bin',
nc_embeddings_file='nc_glove/vecs.6B.300d.bin',
path_embeddings_file='path_embeddings/tratz/fine_grained/wiki',
hidden_layers=1,
path_dim=60)
def __init__(self, hparams, relata_embeddings, path_embeddings, nc_embeddings,
path_to_index):
"""Initialize the LexNET classifier.
Args:
hparams: the hyper-parameters.
relata_embeddings: word embeddings for the distributional component.
path_embeddings: embeddings for the paths.
nc_embeddings: noun compound embeddings.
path_to_index: a mapping from string path to an index in the path
embeddings matrix.
"""
self.hparams = hparams
self.path_embeddings = path_embeddings
self.relata_embeddings = relata_embeddings
self.nc_embeddings = nc_embeddings
self.vocab_size, self.relata_dim = 0, 0
self.path_to_index = None
self.path_dim = 0
# Set the random seed
if hparams.random_seed > 0:
tf.set_random_seed(hparams.random_seed)
# Get the vocabulary size and relata dim
if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']:
self.vocab_size, self.relata_dim = self.relata_embeddings.shape
# Create the mapping from string path to an index in the embeddings matrix
if self.hparams.input in ['path', 'integrated', 'integrated-nc']:
self.path_to_index = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(
tf.constant(path_to_index.keys()),
tf.constant(path_to_index.values()),
key_dtype=tf.string, value_dtype=tf.int32), 0)
self.path_dim = self.path_embeddings.shape[1]
# Create the network
self.__create_computation_graph__()
def __create_computation_graph__(self):
"""Initialize the model and define the graph."""
network_input = 0
# Define the network inputs
# Distributional x and y
if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']:
network_input += 2 * self.relata_dim
self.relata_lookup = tf.get_variable(
'relata_lookup',
initializer=self.relata_embeddings,
dtype=tf.float32,
trainable=self.hparams.learn_relata)
# Path-based
if self.hparams.input in ['path', 'integrated', 'integrated-nc']:
network_input += self.path_dim
self.path_initial_value_t = tf.placeholder(tf.float32, None)
self.path_lookup = tf.get_variable(
name='path_lookup',
dtype=tf.float32,
trainable=False,
shape=self.path_embeddings.shape)
self.initialize_path_op = tf.assign(
self.path_lookup, self.path_initial_value_t, validate_shape=False)
# Distributional noun compound
if self.hparams.input in ['dist-nc', 'integrated-nc']:
network_input += self.relata_dim
self.nc_initial_value_t = tf.placeholder(tf.float32, None)
self.nc_lookup = tf.get_variable(
name='nc_lookup',
dtype=tf.float32,
trainable=False,
shape=self.nc_embeddings.shape)
self.initialize_nc_op = tf.assign(
self.nc_lookup, self.nc_initial_value_t, validate_shape=False)
hidden_dim = network_input // 2
# Define the MLP
if self.hparams.hidden_layers == 0:
self.weights1 = tf.get_variable(
'W1',
shape=[network_input, self.hparams.num_classes],
dtype=tf.float32)
self.bias1 = tf.get_variable(
'b1',
shape=[self.hparams.num_classes],
dtype=tf.float32)
elif self.hparams.hidden_layers == 1:
self.weights1 = tf.get_variable(
'W1',
shape=[network_input, hidden_dim],
dtype=tf.float32)
self.bias1 = tf.get_variable(
'b1',
shape=[hidden_dim],
dtype=tf.float32)
self.weights2 = tf.get_variable(
'W2',
shape=[hidden_dim, self.hparams.num_classes],
dtype=tf.float32)
self.bias2 = tf.get_variable(
'b2',
shape=[self.hparams.num_classes],
dtype=tf.float32)
else:
raise ValueError('Only 0 or 1 hidden layers are supported')
# Define the variables
self.instances = tf.placeholder(dtype=tf.string,
shape=[self.hparams.batch_size])
(self.x_embedding_id,
self.y_embedding_id,
self.nc_embedding_id,
self.path_embedding_id,
self.path_counts,
self.labels) = parse_tensorflow_examples(
self.instances, self.hparams.batch_size, self.path_to_index)
# Create the MLP
self.__mlp__()
self.instances_to_load = tf.placeholder(dtype=tf.string, shape=[None])
self.labels_to_load = lexnet_common.load_all_labels(self.instances_to_load)
self.pairs_to_load = lexnet_common.load_all_pairs(self.instances_to_load)
def load_labels(self, session, instances):
"""Loads the labels for these instances.
Args:
session: The current TensorFlow session,
instances: The instances for which to load the labels.
Returns:
the labels of these instances.
"""
return session.run(self.labels_to_load,
feed_dict={self.instances_to_load: instances})
def load_pairs(self, session, instances):
"""Loads the word pairs for these instances.
Args:
session: The current TensorFlow session,
instances: The instances for which to load the labels.
Returns:
the word pairs of these instances.
"""
word_pairs = session.run(self.pairs_to_load,
feed_dict={self.instances_to_load: instances})
return [pair[0].split('::') for pair in word_pairs]
def __train_single_batch__(self, session, batch_instances):
"""Train a single batch.
Args:
session: The current TensorFlow session.
batch_instances: TensorFlow examples containing the training intances
Returns:
The cost for the current batch.
"""
cost, _ = session.run([self.cost, self.train_op],
feed_dict={self.instances: batch_instances})
return cost
def fit(self, session, inputs, on_epoch_completed, val_instances, val_labels,
save_path):
"""Train the model.
Args:
session: The current TensorFlow session.
inputs:
on_epoch_completed: A method to call after each epoch.
val_instances: The validation set instances (evaluation between epochs).
val_labels: The validation set labels (for evaluation between epochs).
save_path: Where to save the model.
"""
for epoch in range(self.hparams.num_epochs):
losses = []
epoch_indices = list(np.random.permutation(len(inputs)))
# If the number of instances doesn't divide by batch_size, enlarge it
# by duplicating training examples
mod = len(epoch_indices) % self.hparams.batch_size
if mod > 0:
epoch_indices.extend([np.random.randint(0, high=len(inputs))] * mod)
# Define the batches
n_batches = len(epoch_indices) // self.hparams.batch_size
for minibatch in range(n_batches):
batch_indices = epoch_indices[minibatch * self.hparams.batch_size:(
minibatch + 1) * self.hparams.batch_size]
batch_instances = [inputs[i] for i in batch_indices]
loss = self.__train_single_batch__(session, batch_instances)
losses.append(loss)
epoch_loss = np.nanmean(losses)
if on_epoch_completed:
should_stop = on_epoch_completed(self, session, epoch, epoch_loss,
val_instances, val_labels, save_path)
if should_stop:
print('Stopping training after %d epochs.' % epoch)
return
def predict(self, session, inputs):
"""Predict the classification of the test set.
Args:
session: The current TensorFlow session.
inputs: the train paths, x, y and/or nc vectors
Returns:
The test predictions.
"""
predictions, _ = zip(*self.predict_with_score(session, inputs))
return np.array(predictions)
def predict_with_score(self, session, inputs):
"""Predict the classification of the test set.
Args:
session: The current TensorFlow session.
inputs: the test paths, x, y and/or nc vectors
Returns:
The test predictions along with their scores.
"""
test_pred = [0] * len(inputs)
for chunk in xrange(0, len(test_pred), self.hparams.batch_size):
# Initialize the variables with the current batch data
batch_indices = list(
range(chunk, min(chunk + self.hparams.batch_size, len(test_pred))))
# If the batch is too small, add a few other examples
if len(batch_indices) < self.hparams.batch_size:
batch_indices += [0] * (self.hparams.batch_size - len(batch_indices))
batch_instances = [inputs[i] for i in batch_indices]
predictions, scores = session.run(
[self.predictions, self.scores],
feed_dict={self.instances: batch_instances})
for index_in_batch, index_in_dataset in enumerate(batch_indices):
prediction = predictions[index_in_batch]
score = scores[index_in_batch][prediction]
test_pred[index_in_dataset] = (prediction, score)
return test_pred
def __mlp__(self):
"""Performs the MLP operations.
Returns: the prediction object to be computed in a Session
"""
# Define the operations
# Network input
vec_inputs = []
# Distributional component
if self.hparams.input in ['dist', 'dist-nc', 'integrated', 'integrated-nc']:
for emb_id in [self.x_embedding_id, self.y_embedding_id]:
vec_inputs.append(tf.nn.embedding_lookup(self.relata_lookup, emb_id))
# Noun compound component
if self.hparams.input in ['dist-nc', 'integrated-nc']:
vec = tf.nn.embedding_lookup(self.nc_lookup, self.nc_embedding_id)
vec_inputs.append(vec)
# Path-based component
if self.hparams.input in ['path', 'integrated', 'integrated-nc']:
# Get the current paths for each batch instance
self.path_embeddings = tf.nn.embedding_lookup(self.path_lookup,
self.path_embedding_id)
# self.path_embeddings is of shape
# [batch_size, max_path_per_instance, output_dim]
# We need to multiply it by path counts
# ([batch_size, max_path_per_instance]).
# Start by duplicating path_counts along the output_dim axis.
self.path_freq = tf.tile(tf.expand_dims(self.path_counts, -1),
[1, 1, self.path_dim])
# Compute the averaged path vector for each instance.
# First, multiply the path embeddings and frequencies element-wise.
self.weighted = tf.multiply(self.path_freq, self.path_embeddings)
# Second, take the sum to get a tensor of shape [batch_size, output_dim].
self.pair_path_embeddings = tf.reduce_sum(self.weighted, 1)
# Finally, divide by the total number of paths.
# The number of paths for each pair has a shape [batch_size, 1],
# We duplicate it output_dim times along the second axis.
self.num_paths = tf.clip_by_value(
tf.reduce_sum(self.path_counts, 1), 1, np.inf)
self.num_paths = tf.tile(tf.expand_dims(self.num_paths, -1),
[1, self.path_dim])
# And finally, divide pair_path_embeddings by num_paths element-wise.
self.pair_path_embeddings = tf.div(
self.pair_path_embeddings, self.num_paths)
vec_inputs.append(self.pair_path_embeddings)
# Concatenate the inputs and feed to the MLP
self.input_vec = tf.nn.dropout(
tf.concat(vec_inputs, 1),
keep_prob=self.hparams.input_keep_prob)
h = tf.matmul(self.input_vec, self.weights1)
self.output = h
if self.hparams.hidden_layers == 1:
self.output = tf.matmul(tf.nn.tanh(h), self.weights2)
self.scores = self.output
self.predictions = tf.argmax(self.scores, axis=1)
# Define the loss function and the optimization algorithm
self.cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.scores, labels=self.labels)
self.cost = tf.reduce_sum(self.cross_entropies, name='cost')
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.optimizer = tf.train.AdamOptimizer()
self.train_op = self.optimizer.minimize(
self.cost, global_step=self.global_step)
def parse_tensorflow_examples(record, batch_size, path_to_index):
"""Reads TensorFlow examples from a RecordReader.
Args:
record: a record with TensorFlow examples.
batch_size: the number of instances in a minibatch
path_to_index: mapping from string path to index in the embeddings matrix.
Returns:
The word embeddings IDs, paths and counts
"""
features = tf.parse_example(
record, {
'x_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64),
'y_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64),
'nc_embedding_id': tf.FixedLenFeature([1], dtype=tf.int64),
'reprs': tf.FixedLenSequenceFeature(
shape=(), dtype=tf.string, allow_missing=True),
'counts': tf.FixedLenSequenceFeature(
shape=(), dtype=tf.int64, allow_missing=True),
'rel_id': tf.FixedLenFeature([1], dtype=tf.int64)
})
x_embedding_id = tf.squeeze(features['x_embedding_id'], [-1])
y_embedding_id = tf.squeeze(features['y_embedding_id'], [-1])
nc_embedding_id = tf.squeeze(features['nc_embedding_id'], [-1])
labels = tf.squeeze(features['rel_id'], [-1])
path_counts = tf.to_float(tf.reshape(features['counts'], [batch_size, -1]))
path_embedding_id = None
if path_to_index:
path_embedding_id = path_to_index.lookup(features['reprs'])
return (
x_embedding_id, y_embedding_id, nc_embedding_id,
path_embedding_id, path_counts, labels)
|
python
|
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
class SEIRModelAge:
"""
This class implements a SEIR-like compartmental epidemic model
consisting of SEIR states plus death, and hospitalizations and age
structure.
In the diff eq modeling, these parameters are assumed exponentially
distributed and modeling occurs in the thermodynamic limit,
i.e. we do not perform monte carlo for individual cases.
Parameters
----------
N: np.array
Total population per age group.
t_list: array-like
Array of timesteps. Usually these are spaced daily.
suppression_policy: callable
Suppression_policy(t) should return a scalar in [0, 1] which
represents the contact rate reduction from social distancing.
A_initial: np.array
Initial asymptomatic per age group
I_initial: np.array
Initial infections per age group
R_initial: int
Initial recovered (combining all age groups)
E_initial: int
Initial exposed per age group
HGen_initial: int
Initial number of General hospital admissions per age group
HICU_initial: int
Initial number of ICU cases per age group
HICUVent_initial: int
Initial number of ICU cases per age group
D_initial: int
Initial number of deaths (combining all age groups)
n_days: int
Number of days to simulate.
birth_rate : float
Birth per capita per day
natural_death_rate : float
Fatility rate due to natural cause.
max_age: int
Age upper limit.
age_steps : np.array
Time people spend in each age group.
Last age bin edge is assumed to be 120 years old.
age_groups : np.array
Age groups, e.g.
num_compartments_by_age: int
Number of compartments with age structure.
Default 7: S, E, A, I, HGen, HICU, HICUVent.
num_compartments_not_by_age: int
Number of compartments without age structure.
Default 7: R, D, D_no_hgen, D_no_icu, HAdmissions_general,
HAdmissions_ICU, TotalAllInfections.
R0: float
Basic Reproduction number
R0_hospital: float
Basic Reproduction number in the hospital.
kappa: float
Fractional contact rate for those with symptoms since they
should be
isolated vs asymptomatic who are less isolated. A value 1
implies
the same rate. A value 0 implies symptomatic people never infect
others.
sigma: float
Latent decay scale is defined as 1 / incubation period.
1 / 4.8: https://www.imperial.ac.uk/media/imperial-college
/medicine/sph/ide/gida-fellowships/Imperial-College-COVID19
-Global-Impact-26-03-2020.pdf
1 / 5.2 [3, 8]: https://arxiv.org/pdf/2003.10047.pdf
delta : float
1 / infectious period.
delta_hospital: float
Infectious period for patients in the hospital which is
usually a bit
longer.
gamma: float
Clinical outbreak rate (fraction of infected that show symptoms)
contact_matrix : np.array
With cell at ith row and jth column as contact rate made by ith
age group with jth age group.
approximate_R0: bool
If True calculate R(t) as funciton of initial R0 and suppression policy.
hospitalization_rate_general: np.array
Fraction of infected that are hospitalized generally (not in
ICU)
by age group.
hospitalization_rate_icu: np.array
Fraction of infected that are hospitalized in the ICU
by age group.
hospitalization_length_of_stay_icu_and_ventilator: float
Mean LOS for those requiring ventilators
fraction_icu_requiring_ventilator: float
Of the ICU cases, which require ventilators.
beds_general: int
General (non-ICU) hospital beds available.
beds_ICU: int
ICU beds available
ventilators: int
Ventilators available.
symptoms_to_hospital_days: float
Mean number of days elapsing between infection and
hospital admission.
symptoms_to_mortality_days: float
Mean number of days for an infected individual to die.
Hospitalization to death Needs to be added to time to
15.16 [0, 42] - https://arxiv.org/pdf/2003.10047.pdf
hospitalization_length_of_stay_general: float
Mean number of days for a hospitalized individual to be
discharged.
hospitalization_length_of_stay_icu
Mean number of days for a ICU hospitalized individual to be
discharged.
mortality_rate_no_ICU_beds: float
The percentage of those requiring ICU that die if ICU beds
are not available.
mortality_rate_no_ventilator: float
The percentage of those requiring ventilators that die if
they are not available.
mortality_rate_no_general_beds: float
The percentage of those requiring general hospital beds that
die if they are not available.
mortality_rate_from_ICU: np.array
Mortality rate among patients admitted to ICU by age group.
mortality_rate_from_ICUVent: float
Mortality rate among patients admitted to ICU with ventilator.
initial_hospital_bed_utilization: float
Starting utilization fraction for hospital beds and ICU beds.
"""
def __init__(
self,
N,
t_list,
suppression_policy,
A_initial=np.array([1] * 18),
I_initial=np.array([1] * 18),
R_initial=0,
E_initial=np.array([0] * 18),
HGen_initial=np.array([0] * 18),
HICU_initial=np.array([0] * 18),
HICUVent_initial=np.array([0] * 18),
birth_rate=0.0003, # birth rate per capita per day
natural_death_rate=1 / (120 * 365),
age_bin_edges=np.array(
[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85]
),
num_compartments_by_age=7,
num_compartments_not_by_age=7,
max_age=120,
D_initial=0,
R0=3.75,
R0_hospital=0.6,
sigma=1 / 5.2,
delta=1 / 2.5,
delta_hospital=1 / 8.0,
kappa=1,
gamma=0.5,
contact_matrix=np.random.rand(18, 18),
approximate_R0=True,
# data source: https://www.cdc.gov/mmwr/volumes/69/wr/mm6912e2.htm#T1_down
# rates have been interpolated through centers of age_bin_edges
hospitalization_rate_general=np.array(
[
0.02,
0.02,
0.06,
0.11,
0.15,
0.16,
0.18,
0.19,
0.2,
0.23,
0.22,
0.23,
0.27,
0.33,
0.33,
0.38,
0.37,
0.51,
]
),
hospitalization_rate_icu=np.array(
[
0.0001,
0.0001,
0.01,
0.02,
0.02,
0.03,
0.03,
0.04,
0.05,
0.07,
0.06,
0.07,
0.08,
0.11,
0.12,
0.16,
0.13,
0.18,
]
),
fraction_icu_requiring_ventilator=0.75,
symptoms_to_hospital_days=5,
symptoms_to_mortality_days=13,
hospitalization_length_of_stay_general=7,
hospitalization_length_of_stay_icu=16,
hospitalization_length_of_stay_icu_and_ventilator=17,
beds_general=300,
beds_ICU=100,
ventilators=60,
# obtained by interpolating through age groups and shift to
# get average mortality rate 0.4
mortality_rate_from_ICU=np.array(
[
0.373,
0.373,
0.373,
0.374,
0.374,
0.374,
0.375,
0.376,
0.378,
0.379,
0.384,
0.391,
0.397,
0.406,
0.414,
0.433,
0.464,
0.562,
]
),
mortality_rate_from_hospital=0.0,
mortality_rate_no_ICU_beds=1.0,
mortality_rate_from_ICUVent=1.0,
mortality_rate_no_general_beds=0.0,
initial_hospital_bed_utilization=0.6,
):
self.N = np.array(N)
if suppression_policy is None:
suppression_policy = lambda x: 1
self.suppression_policy = suppression_policy
self.I_initial = np.array(I_initial)
self.A_initial = np.array(A_initial)
self.R_initial = R_initial
self.E_initial = np.array(E_initial)
self.D_initial = D_initial
self.HGen_initial = np.array(HGen_initial)
self.HICU_initial = np.array(HICU_initial)
self.HICUVent_initial = np.array(HICUVent_initial)
self.S_initial = (
self.N
- self.A_initial
- self.I_initial
- self.R_initial
- self.E_initial
- self.D_initial
- self.HGen_initial
- self.HICU_initial
- self.HICUVent_initial
)
self.birth_rate = birth_rate
self.natural_death_rate = natural_death_rate
# Create age steps and groups to define age compartments
self.age_steps = np.array(age_bin_edges)[1:] - np.array(age_bin_edges)[:-1]
self.age_steps *= 365 # the model is using day as time unit
self.age_steps = np.append(self.age_steps, max_age * 365 - age_bin_edges[-1])
self.age_groups = list(zip(list(age_bin_edges[:-1]), list(age_bin_edges[1:])))
self.age_groups.append((age_bin_edges[-1], max_age))
self.num_compartments_by_age = num_compartments_by_age
self.num_compartments_not_by_age = num_compartments_not_by_age
# Epidemiological Parameters
self.R0 = R0 # Reproduction Number
self.R0_hospital = R0_hospital # Reproduction Number at hospital
self.delta = delta # 1 / infectious period
self.delta_hospital = delta_hospital
self.beta_hospital = self.R0_hospital * self.delta_hospital
self.sigma = sigma # Latent Period = 1 / incubation
self.gamma = gamma # Clinical outbreak rate
self.kappa = kappa # Discount fraction due to isolation of symptomatic cases.
self.contact_matrix = np.array(contact_matrix)
self.approximate_R0 = approximate_R0
self.symptoms_to_hospital_days = symptoms_to_hospital_days
self.symptoms_to_mortality_days = symptoms_to_mortality_days
self.hospitalization_rate_general = np.array(hospitalization_rate_general)
self.hospitalization_rate_icu = np.array(hospitalization_rate_icu)
self.hospitalization_length_of_stay_general = hospitalization_length_of_stay_general
self.hospitalization_length_of_stay_icu = hospitalization_length_of_stay_icu
self.hospitalization_length_of_stay_icu_and_ventilator = (
hospitalization_length_of_stay_icu_and_ventilator
)
self.fraction_icu_requiring_ventilator = fraction_icu_requiring_ventilator
# Capacity
self.beds_general = beds_general
self.beds_ICU = beds_ICU
self.ventilators = ventilators
self.mortality_rate_no_general_beds = mortality_rate_no_general_beds
self.mortality_rate_no_ICU_beds = mortality_rate_no_ICU_beds
self.mortality_rate_from_ICUVent = mortality_rate_from_ICUVent
self.initial_hospital_bed_utilization = initial_hospital_bed_utilization
self.mortality_rate_from_ICU = np.array(mortality_rate_from_ICU)
self.mortality_rate_from_hospital = mortality_rate_from_hospital
# beta as the transmission probability per contact times the rescale
# factor to rescale contact matrix data to match expected R0
self.beta = self._estimate_beta(self.R0)
# List of times to integrate.
self.t_list = t_list
self.results = None
def _aging_rate(self, v):
"""
Calculate rate of aging given compartments size.
Parameters
----------
v : np.array
age compartments that correspond to each age group.
Returns
-------
age_in: np.array
Rate of flow into each compartment in v as result of aging.
age_out: np.array
Rate of flow out of each compartment in v as result of aging.
"""
age_in = v[:-1] / self.age_steps[:-1]
age_in = np.insert(age_in, 0, 0)
age_out = v / self.age_steps
return age_in, age_out
def calculate_R0(self, beta=None, S_fracs=None):
"""
Using Next Generation Matrix method to calculate R0 given beta.
When beta is None, use its default value 1.
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2871801/
The method R0 is calculated as the dominant eigenvalue of next
generation matrix K, which is the product of transmission matrix T
and negative inverse of transition matrix Z.
Transmission matrix T describes rate of immediate new infections in
state of row caused by state of column. Since all new infections
first become exposed, values are only nonzero for corresponding rows.
Transition matrix Z describes rate of flow from column state to row
state or out of a state (diagonal).
Parameters
----------
beta: float
Transmission probability per contact.
S_fracs: float
fraction of susceptible population,
Returns
-------
R0 : float
Basic reproduction number.
"""
# percentage of susceptible in each age group (assuming that initial
# condition is disease-free equilibrium)
if S_fracs is None:
S_fracs = self.N / self.N.sum()
beta = beta or 1
age_group_num = len(self.age_groups)
# contact with susceptible at disease-free equilibrium
# [C_11 * P_1, C_12 * P_1, ... C_1n * P_n]
# ...
# [C_n1 * P_n, C_n2 * P_n, ... C_nn * P_n]
contact_with_susceptible = self.contact_matrix * S_fracs.T
# transmission matrix with rates of immediate new infections into
# compartment of rows due to transmission from compartment from
# columns.
# All sub_matrix is named as T_<row compartment>_<column compartment>
T_E_E = np.zeros((age_group_num, age_group_num)) # E to E
T_E_A = contact_with_susceptible * beta # A to E
T_E_I = contact_with_susceptible * beta * self.kappa # I to E
T_E_nonICU = contact_with_susceptible * self.beta_hospital # nonICU to E
T_E_ICU = contact_with_susceptible * self.beta_hospital # ICU to E
# all immediate new infections flow into E so the rest values are
# zeros
T_A_E = np.zeros((age_group_num, age_group_num)) # E to A
T_A_A = np.zeros((age_group_num, age_group_num)) # A to A
T_A_I = np.zeros((age_group_num, age_group_num)) # I to A
T_A_nonICU = np.zeros((age_group_num, age_group_num)) # nonICU to A
T_A_ICU = np.zeros((age_group_num, age_group_num)) # ICU to A
T_I_E = np.zeros((age_group_num, age_group_num)) # E to I
T_I_A = np.zeros((age_group_num, age_group_num)) # A to I
T_I_I = np.zeros((age_group_num, age_group_num)) # I to I
T_I_nonICU = np.zeros((age_group_num, age_group_num)) # nonICU to I
T_I_ICU = np.zeros((age_group_num, age_group_num)) # ICU to I
T_nonICU_E = np.zeros((age_group_num, age_group_num)) # E to nonICU
T_nonICU_A = np.zeros((age_group_num, age_group_num)) # A to nonICU
T_nonICU_I = np.zeros((age_group_num, age_group_num)) # I to nonICU
T_nonICU_nonICU = np.zeros((age_group_num, age_group_num)) # nonICU TO nonICU
T_nonICU_ICU = np.zeros((age_group_num, age_group_num)) # ICU to nonICU
T_ICU_E = np.zeros((age_group_num, age_group_num)) # E to nonICU
T_ICU_A = np.zeros((age_group_num, age_group_num)) # A to nonICU
T_ICU_I = np.zeros((age_group_num, age_group_num)) # I to nonICU
T_ICU_nonICU = np.zeros((age_group_num, age_group_num)) # nonICU TO nonICU
T_ICU_ICU = np.zeros((age_group_num, age_group_num)) # ICU to nonICU
T_E = np.concatenate([T_E_E, T_E_A, T_E_I, T_E_nonICU, T_E_ICU], axis=1) # all rates to E
T_A = np.concatenate([T_A_E, T_A_A, T_A_I, T_A_nonICU, T_A_ICU], axis=1) # all rates to A
T_I = np.concatenate([T_I_E, T_I_A, T_I_I, T_I_nonICU, T_I_ICU], axis=1) # all rates to I
T_nonICU = np.concatenate(
[T_nonICU_E, T_nonICU_A, T_nonICU_I, T_nonICU_nonICU, T_nonICU_ICU], axis=1
) # all rates to nonICU
T_ICU = np.concatenate(
[T_ICU_E, T_ICU_A, T_ICU_I, T_ICU_nonICU, T_ICU_ICU], axis=1
) # all rates to ICU
T = np.concatenate([T_E, T_A, T_I, T_nonICU, T_ICU])
# Matrix of rates of transitions from compartment of rows to
# compartments of columns, or out of a compartment (when row = column)
# All sub_matrix is named as Z_<row compartment>_<column compartment>
# rates of transition out of E (incubation or aging) and into E (aging)
aging_rate_in = 1 / self.age_steps[:-1]
aging_rate_out = 1 / self.age_steps
Z_E_E = np.diag(-(aging_rate_out + self.sigma)) + np.diag(aging_rate_in, k=-1)
Z_E_A = np.zeros((age_group_num, age_group_num))
Z_E_I = np.zeros((age_group_num, age_group_num))
Z_E_nonICU = np.zeros((age_group_num, age_group_num))
Z_E_ICU = np.zeros((age_group_num, age_group_num))
Z_E = np.concatenate([Z_E_E, Z_E_A, Z_E_I, Z_E_nonICU, Z_E_ICU], axis=1)
# rates of transition out of A (recovery and aging) and into A (aging
# and from exposed)
Z_A_E = np.zeros((age_group_num, age_group_num))
np.fill_diagonal(Z_A_E, self.sigma * (1 - self.gamma)) # from E to A
Z_A_A = np.diag(-(aging_rate_out + self.delta)) + np.diag(aging_rate_in, k=-1)
Z_A_I = np.zeros((age_group_num, age_group_num))
Z_A_nonICU = np.zeros((age_group_num, age_group_num))
Z_A_ICU = np.zeros((age_group_num, age_group_num))
Z_A = np.concatenate([Z_A_E, Z_A_A, Z_A_I, Z_A_nonICU, Z_A_ICU], axis=1)
# rates of transition out of I (recovery, hospitalization and aging)
# and into I (aging and from exposed)
rate_recovered = self.delta
rate_in_hospital_general = (
self.hospitalization_rate_general - self.hospitalization_rate_icu
) / self.symptoms_to_hospital_days
rate_in_hospital_icu = self.hospitalization_rate_icu / self.symptoms_to_hospital_days
rate_out_of_I = (
aging_rate_out + rate_recovered + rate_in_hospital_general + rate_in_hospital_icu
)
Z_I_E = np.zeros((age_group_num, age_group_num))
np.fill_diagonal(Z_I_E, self.sigma * self.gamma) # from E to I
Z_I_A = np.zeros((age_group_num, age_group_num))
Z_I_I = np.diag(-rate_out_of_I) + np.diag(aging_rate_in, k=-1) # transition out of I
Z_I_nonICU = np.zeros((age_group_num, age_group_num))
Z_I_ICU = np.zeros((age_group_num, age_group_num))
Z_I = np.concatenate([Z_I_E, Z_I_A, Z_I_I, Z_I_nonICU, Z_I_ICU], axis=1)
# rates of transition out of nonICU (recovery, death and aging)
# and into nonICU (aging and from infected)
died_from_hosp = (
self.mortality_rate_from_hospital / self.hospitalization_length_of_stay_general
)
recovered_after_hospital_general = (
1 - self.mortality_rate_from_hospital
) / self.hospitalization_length_of_stay_general
rate_out_of_nonICU = aging_rate_out + died_from_hosp + recovered_after_hospital_general
Z_nonICU_E = np.zeros((age_group_num, age_group_num))
Z_nonICU_A = np.zeros((age_group_num, age_group_num))
Z_nonICU_I = np.diag(rate_in_hospital_general)
Z_nonICU_nonICU = np.diag(-(rate_out_of_nonICU)) + np.diag(aging_rate_in, k=-1)
Z_nonICU_ICU = np.zeros((age_group_num, age_group_num))
Z_nonICU = np.concatenate(
[Z_nonICU_E, Z_nonICU_A, Z_nonICU_I, Z_nonICU_nonICU, Z_nonICU_ICU], axis=1
)
# rates of transition out of ICU (recovery, death and aging) and into
# ICU (aging and from infected)
died_from_icu = (
(1 - self.fraction_icu_requiring_ventilator)
* self.mortality_rate_from_ICU
/ self.hospitalization_length_of_stay_icu
)
died_from_icu_vent = (
self.mortality_rate_from_ICUVent
/ self.hospitalization_length_of_stay_icu_and_ventilator
)
recovered_from_icu_no_vent = (
(1 - self.mortality_rate_from_ICU)
* (1 - self.fraction_icu_requiring_ventilator)
/ self.hospitalization_length_of_stay_icu
)
recovered_from_icu_vent = (
1 - np.maximum(self.mortality_rate_from_ICU, self.mortality_rate_from_ICUVent)
) / self.hospitalization_length_of_stay_icu_and_ventilator
rate_out_of_ICU = (
aging_rate_out
+ died_from_icu
+ died_from_icu_vent
+ recovered_from_icu_no_vent
+ recovered_from_icu_vent
)
Z_ICU_E = np.zeros((age_group_num, age_group_num))
Z_ICU_A = np.zeros((age_group_num, age_group_num))
Z_ICU_I = np.diag(rate_in_hospital_icu)
Z_ICU_nonICU = np.zeros((age_group_num, age_group_num))
Z_ICU_ICU = np.diag(-(rate_out_of_ICU)) + np.diag(aging_rate_in, k=-1)
Z_ICU = np.concatenate([Z_ICU_E, Z_ICU_A, Z_ICU_I, Z_ICU_nonICU, Z_ICU_ICU], axis=1)
Z = np.concatenate([Z_E, Z_A, Z_I, Z_nonICU, Z_ICU])
# Calculate R0 from transmission and transition matrix
Z_inverse = np.linalg.inv(Z)
K = T.dot(-Z_inverse)
eigen_values = np.linalg.eigvals(K)
R0 = max(eigen_values) # R0 is the dominant eigenvalue
return R0
def _estimate_beta(self, expected_R0):
"""
Estimate beta for given R0. Note that beta can be greater than 1 for
some expected R0 given a contact matrix. This is because the
contact matrix sometimes underestimate overall contact rate.
In this case, beta is the product of transmission probability per
contact and the factor that lift the average contact rate to match
the expected R0.
Parameters
----------
expected_R0 : float
R0 to solve for beta.
Returns
-------
: float
Beta that give rise the expected R0.
"""
# transmission matrix with rates of immediate new infections into rows
# due to transmission from columns
R0 = self.calculate_R0()
beta = expected_R0 / R0 # R0 linearly increases as beta increases
return float(beta.real)
def calculate_Rt(self, S_fracs, suppression_policy=None):
"""
Calculate R(t)
Parameters
----------
S_fracs: np.array
Fraction of each age group among susceptible population.
suppression_policy: int or np.array
Fraction of remained effective contacts as result suppression
policy through time.
Returns
-------
Rt: np.array
Basic reproduction number through time.
"""
Rt = np.zeros(S_fracs.shape[1])
for n in range(S_fracs.shape[1]):
Rt[n] += self.calculate_R0(self.beta, S_fracs[:, n])
Rt *= suppression_policy
return Rt
def _time_step(self, t, y):
"""
One integral moment.
Parameters
----------
y: array
Input compartment size
t: float
Time step.
Returns
-------
: np.array
ODE derivatives.
"""
# np.split(y[:-7], 7) <--- This is 7x slower than the code below.
chunk_size = y[: -self.num_compartments_not_by_age].shape[0] // self.num_compartments_by_age
S, E, A, I, HNonICU, HICU, HICUVent = [
y[(i * chunk_size) : ((i + 1) * chunk_size)]
for i in range(self.num_compartments_by_age)
]
R = y[-7]
# TODO: County-by-county affinity matrix terms can be used to describe
# transmission network effects. ( also known as Multi-Region SEIR)
# https://arxiv.org/pdf/2003.09875.pdf
# For those living in county i, the interacting county j exposure is given
# by A term dE_i/dt += N_i * Sum_j [ beta_j * mix_ij * I_j * S_i + beta_i *
# mix_ji * I_j * S_i ] mix_ij can be proxied by Census-based commuting
# matrices as workplace interactions are the dominant term. See:
# https://www.census.gov/topics/employment/commuting/guidance/flows.html
# Effective contact rate * those that get exposed * those susceptible.
total_ppl = self.N.sum()
# get matrix:
# [C_11 * S_1 * I_1/N, ... C1j * S_1 * I_j/N...]
# [C_21 * S_2 * I_1/N, ... C1j * S_2 * I_j/N...]
# ...
# [C_21 * S_n * I_1/N, ... C_21 * S_n * I_j/N ...]
frac_infected = (self.kappa * I + A) / total_ppl
frac_hospt = (HICU + HNonICU) / total_ppl
S_and_I = S[:, np.newaxis].dot(frac_infected[:, np.newaxis].T)
S_and_hosp = S[:, np.newaxis].dot(frac_hospt[:, np.newaxis].T)
contacts_S_and_I = (S_and_I * self.contact_matrix).sum(axis=1)
contacts_S_and_hosp = (S_and_hosp * self.contact_matrix).sum(axis=1)
number_exposed = (
self.beta * self.suppression_policy(t) * contacts_S_and_I
+ self.beta_hospital * contacts_S_and_hosp
)
age_in_S, age_out_S = self._aging_rate(S)
age_in_S[0] = self.N.sum() * self.birth_rate
dSdt = age_in_S - number_exposed - age_out_S
exposed_and_symptomatic = (
self.gamma * self.sigma * E
) # latent period moving to infection = 1 / incubation
exposed_and_asymptomatic = (
(1 - self.gamma) * self.sigma * E
) # latent period moving to asymptomatic but infected) = 1 / incubation
age_in_E, age_out_E = self._aging_rate(E)
dEdt = (
age_in_E
+ number_exposed
- exposed_and_symptomatic
- exposed_and_asymptomatic
- age_out_E
)
asymptomatic_and_recovered = self.delta * A
age_in_A, age_out_A = self._aging_rate(A)
dAdt = age_in_A + exposed_and_asymptomatic - asymptomatic_and_recovered - age_out_A
# Fraction that didn't die or go to hospital
infected_and_recovered_no_hospital = self.delta * I
infected_and_in_hospital_general = (
I
* (self.hospitalization_rate_general - self.hospitalization_rate_icu)
/ self.symptoms_to_hospital_days
)
infected_and_in_hospital_icu = (
I * self.hospitalization_rate_icu / self.symptoms_to_hospital_days
)
age_in_I, age_out_I = self._aging_rate(I)
dIdt = (
age_in_I
+ exposed_and_symptomatic
- infected_and_recovered_no_hospital
- infected_and_in_hospital_general
- infected_and_in_hospital_icu
- age_out_I
)
mortality_rate_ICU = (
self.mortality_rate_from_ICU
if sum(HICU) <= self.beds_ICU
else self.mortality_rate_no_ICU_beds
)
mortality_rate_NonICU = (
self.mortality_rate_from_hospital
if sum(HNonICU) <= self.beds_general
else self.mortality_rate_no_general_beds
)
died_from_hosp = (
HNonICU * mortality_rate_NonICU / self.hospitalization_length_of_stay_general
)
died_from_icu = (
HICU
* (1 - self.fraction_icu_requiring_ventilator)
* mortality_rate_ICU
/ self.hospitalization_length_of_stay_icu
)
died_from_icu_vent = (
HICUVent
* self.mortality_rate_from_ICUVent
/ self.hospitalization_length_of_stay_icu_and_ventilator
)
recovered_after_hospital_general = (
HNonICU * (1 - mortality_rate_NonICU) / self.hospitalization_length_of_stay_general
)
recovered_from_icu_no_vent = (
HICU
* (1 - mortality_rate_ICU)
* (1 - self.fraction_icu_requiring_ventilator)
/ self.hospitalization_length_of_stay_icu
)
recovered_from_icu_vent = (
HICUVent
* (1 - np.maximum(mortality_rate_ICU, self.mortality_rate_from_ICUVent))
/ self.hospitalization_length_of_stay_icu_and_ventilator
)
age_in_HNonICU, age_out_HNonICU = self._aging_rate(HNonICU)
dHNonICU_dt = (
age_in_HNonICU
+ infected_and_in_hospital_general
- recovered_after_hospital_general
- died_from_hosp
- age_out_HNonICU
)
age_in_HICU, age_out_HICU = self._aging_rate(HICU)
dHICU_dt = (
age_in_HICU
+ infected_and_in_hospital_icu
- recovered_from_icu_no_vent
- recovered_from_icu_vent
- died_from_icu
- died_from_icu_vent
- age_out_HICU
)
# This compartment is for tracking ventillator count. The beds are
# accounted for in the ICU cases.
age_in_HICUVent, age_out_HICUVent = self._aging_rate(HICUVent)
rate_ventilator_needed = (
infected_and_in_hospital_icu * self.fraction_icu_requiring_ventilator
)
rate_removing_ventilator = HICUVent / self.hospitalization_length_of_stay_icu_and_ventilator
dHICUVent_dt = (
age_in_HICUVent + rate_ventilator_needed - rate_removing_ventilator - age_out_HICUVent
)
# Tracking categories...
dTotalInfections = sum(exposed_and_symptomatic) + sum(exposed_and_asymptomatic)
dHAdmissions_general = sum(infected_and_in_hospital_general)
dHAdmissions_ICU = sum(infected_and_in_hospital_icu) # Ventilators also count as ICU beds.
# Fraction that recover
dRdt = (
sum(asymptomatic_and_recovered)
+ sum(infected_and_recovered_no_hospital)
+ sum(recovered_after_hospital_general)
+ sum(recovered_from_icu_vent)
+ sum(recovered_from_icu_no_vent)
- R * self.natural_death_rate
)
# Death among hospitalized.
dDdt = sum(died_from_icu) + sum(died_from_icu_vent) + sum(died_from_hosp)
died_from_hospital_bed_limits = (
max(sum(HNonICU) - self.beds_general, 0)
* self.mortality_rate_no_general_beds
/ self.hospitalization_length_of_stay_general
)
died_from_icu_bed_limits = (
max(sum(HICU) - self.beds_ICU, 0)
* self.mortality_rate_no_ICU_beds
/ self.hospitalization_length_of_stay_icu
)
# death due to hospital bed limitation
dD_no_hgendt = died_from_hospital_bed_limits
dD_no_icudt = died_from_icu_bed_limits
return np.concatenate(
[
dSdt,
dEdt,
dAdt,
dIdt,
dHNonICU_dt,
dHICU_dt,
dHICUVent_dt,
np.array(
[
dRdt,
dDdt,
dD_no_hgendt,
dD_no_icudt,
dHAdmissions_general,
dHAdmissions_ICU,
dTotalInfections,
]
),
]
)
def run(self):
"""
Integrate the ODE numerically.
Returns
-------
results: dict
{
't_list': self.t_list,
'S': susceptible population combining all age groups,
'E': exposed population combining all age groups,
'I': symptomatic population combining all age groups,
'A': asymptomatic population combining all age groups,
'R': recovered population,
'HGen': general hospitalized population combining all age groups,
'HICU': icu admitted population combining all age groups,
'HVent': population on ventilator combining all age groups,
'D': Deaths during hospitalization,
'deaths_from_hospital_bed_limits': deaths due to hospital bed
limitation
'deaths_from_icu_bed_limits': deaths due to icu limitation
'deaths_from_ventilator_limits': deaths due to ventilator limitation
'total_deaths': Deaths
'by_age':
{
'S': susceptible population by age group
'E': exposed population by age group
'I': symptomatic population by age group
'A': asymptomatic population by age group
'HGen': general hospitalized population by age group
'HICU': icu admitted population by age group
'HVent': population on ventilator by age group
}
"""
# Initial conditions vector
D_no_hgen, D_no_icu, HAdmissions_general, HAdmissions_ICU, TotalAllInfections = (
0,
0,
0,
0,
0,
)
y0 = np.concatenate(
[
self.S_initial,
self.E_initial,
self.A_initial,
self.I_initial,
self.HGen_initial,
self.HICU_initial,
self.HICUVent_initial,
np.array(
[
self.R_initial,
self.D_initial,
D_no_hgen,
D_no_icu,
HAdmissions_general,
HAdmissions_ICU,
TotalAllInfections,
]
),
]
)
# Integrate the SEIR equations over the time grid, t.
result_time_series = solve_ivp(
fun=self._time_step,
t_span=[self.t_list.min(), self.t_list.max()],
y0=y0,
t_eval=self.t_list,
method="RK23",
rtol=1e-3,
atol=1e-3,
).y
S, E, A, I, HGen, HICU, HICUVent = np.split(
result_time_series[: -self.num_compartments_not_by_age], self.num_compartments_by_age
)
(
R,
D,
D_no_hgen,
D_no_icu,
HAdmissions_general,
HAdmissions_ICU,
TotalAllInfections,
) = result_time_series[-7:]
if self.approximate_R0:
Rt = np.zeros(len(self.t_list))
Rt += self.R0 * self.suppression_policy(self.t_list)
else:
S_fracs_within_age_group = S / S.sum(axis=0)
Rt = self.calculate_Rt(S_fracs_within_age_group, self.suppression_policy(self.t_list))
self.results = {
"t_list": self.t_list,
"S": S.sum(axis=0),
"E": E.sum(axis=0),
"A": A.sum(axis=0),
"I": I.sum(axis=0),
"R": R,
"HGen": HGen.sum(axis=0),
"HICU": HICU.sum(axis=0),
"HVent": HICUVent.sum(axis=0),
"D": D,
"Rt": Rt,
"direct_deaths_per_day": np.array([0] + list(np.diff(D))), # Derivative...
# Here we assume that the number of person days above the saturation
# divided by the mean length of stay approximates the number of
# deaths from each source.
# Ideally this is included in the dynamics, but this is left as a TODO.
"deaths_from_hospital_bed_limits": D_no_hgen,
# Here ICU = ICU + ICUVent, but we want to remove the ventilated fraction and account for that below.
"deaths_from_icu_bed_limits": D_no_icu,
"HGen_cumulative": np.cumsum(HGen.sum(axis=0))
/ self.hospitalization_length_of_stay_general,
"HICU_cumulative": np.cumsum(HICU.sum(axis=0))
/ self.hospitalization_length_of_stay_icu,
"HVent_cumulative": np.cumsum(HICUVent.sum(axis=0))
/ self.hospitalization_length_of_stay_icu_and_ventilator,
}
self.results["total_deaths"] = D + D_no_hgen + D_no_icu
# Derivatives of the cumulative give the "new" infections per day.
self.results["total_new_infections"] = np.append([0], np.diff(TotalAllInfections))
self.results["total_deaths_per_day"] = np.append([0], np.diff(self.results["total_deaths"]))
self.results["general_admissions_per_day"] = np.append([0], np.diff(HAdmissions_general))
self.results["icu_admissions_per_day"] = np.append(
[0], np.diff(HAdmissions_ICU)
) # Derivative of the
# cumulative.
self.results["by_age"] = dict()
self.results["by_age"]["S"] = S
self.results["by_age"]["E"] = E
self.results["by_age"]["A"] = A
self.results["by_age"]["I"] = I
self.results["by_age"]["HGen"] = HGen
self.results["by_age"]["HICU"] = HICU
self.results["by_age"]["HVent"] = HICUVent
def plot_results(self, y_scale="log", by_age_group=False, xlim=None):
"""
Generate a summary plot for the simulation.
Parameters
----------
y_scale: str
Matplotlib scale to use on y-axis. Typically 'log' or 'linear'
by_age_group: bool
Whether plot projections by age group.
xlim: float
Limits of x axis.
"""
if not by_age_group:
# Plot the data on three separate curves for S(t), I(t) and R(t)
fig = plt.figure(facecolor="w", figsize=(10, 8))
plt.subplot(221)
plt.plot(self.t_list, self.results["S"], alpha=1, lw=2, label="Susceptible")
plt.plot(self.t_list, self.results["E"], alpha=0.5, lw=2, label="Exposed")
plt.plot(self.t_list, self.results["A"], alpha=0.5, lw=2, label="Asymptomatic")
plt.plot(self.t_list, self.results["I"], alpha=0.5, lw=2, label="Infected")
plt.plot(
self.t_list,
self.results["R"],
alpha=1,
lw=2,
label="Recovered & Immune",
linestyle="--",
)
plt.plot(
self.t_list,
self.results["S"]
+ self.results["E"]
+ self.results["A"]
+ self.results["I"]
+ self.results["R"]
+ self.results["D"]
+ self.results["HGen"]
+ self.results["HICU"],
label="Total",
)
plt.xlabel("Time [days]", fontsize=12)
plt.yscale(y_scale)
# plt.ylim(1, plt.ylim(1))
plt.grid(True, which="both", alpha=0.35)
plt.legend(framealpha=0.5)
if xlim:
plt.xlim(*xlim)
else:
plt.xlim(0, self.t_list.max())
plt.ylim(1, self.N.sum(axis=0) * 1.1)
plt.subplot(222)
plt.plot(
self.t_list,
self.results["D"],
alpha=0.4,
c="k",
lw=1,
label="Direct Deaths",
linestyle="-",
)
plt.plot(
self.t_list,
self.results["HGen"],
alpha=1,
lw=2,
c="steelblue",
label="General Beds Required",
linestyle="-",
)
plt.hlines(
self.beds_general,
self.t_list[0],
self.t_list[-1],
"steelblue",
alpha=1,
lw=2,
label="ICU Bed Capacity",
linestyle="--",
)
plt.plot(
self.t_list,
self.results["HICU"],
alpha=1,
lw=2,
c="firebrick",
label="ICU Beds Required",
linestyle="-",
)
plt.hlines(
self.beds_ICU,
self.t_list[0],
self.t_list[-1],
"firebrick",
alpha=1,
lw=2,
label="General Bed Capacity",
linestyle="--",
)
plt.plot(
self.t_list,
self.results["HVent"],
alpha=1,
lw=2,
c="seagreen",
label="Ventilators Required",
linestyle="-",
)
plt.hlines(
self.ventilators,
self.t_list[0],
self.t_list[-1],
"seagreen",
alpha=1,
lw=2,
label="Ventilator Capacity",
linestyle="--",
)
plt.xlabel("Time [days]", fontsize=12)
plt.ylabel("")
plt.yscale(y_scale)
plt.ylim(1, plt.ylim()[1])
plt.grid(True, which="both", alpha=0.35)
plt.legend(framealpha=0.5)
if xlim:
plt.xlim(*xlim)
else:
plt.xlim(0, self.t_list.max())
plt.subplot(223)
plt.plot(self.t_list, [self.suppression_policy(t) for t in self.t_list], c="steelblue")
plt.ylabel("Contact Rate Reduction")
plt.xlabel("Time [days]", fontsize=12)
plt.grid(True, which="both")
# Reproduction number through time
plt.subplot(224)
plt.plot(self.t_list, self.results["Rt"], c="steelblue")
plt.ylabel("R(t)")
plt.xlabel("Time [days]", fontsize=12)
plt.grid(True, which="both")
plt.tight_layout()
else:
# Plot the data by age group
fig, axes = plt.subplots(len(self.age_groups), 2, figsize=(10, 50))
for ax, n in zip(axes, range(len(self.age_groups))):
ax1, ax2 = ax
ax1.plot(
self.t_list,
self.results["by_age"]["S"][n, :],
alpha=1,
lw=2,
label="Susceptible",
)
ax1.plot(
self.t_list, self.results["by_age"]["E"][n, :], alpha=0.5, lw=2, label="Exposed"
)
ax1.plot(
self.t_list,
self.results["by_age"]["A"][n, :],
alpha=0.5,
lw=2,
label="Asymptomatic",
)
ax1.plot(
self.t_list,
self.results["by_age"]["I"][n, :],
alpha=0.5,
lw=2,
label="Infected",
)
ax2.plot(
self.t_list,
self.results["by_age"]["HGen"][n, :],
alpha=1,
lw=2,
label="Hospital general",
linestyle="--",
)
ax2.plot(
self.t_list,
self.results["by_age"]["HICU"][n, :],
alpha=1,
lw=2,
label="ICU",
linestyle="--",
)
ax2.plot(
self.t_list,
self.results["by_age"]["HVent"][n, :],
alpha=1,
lw=2,
label="ICUVent",
linestyle="--",
)
ax1.legend()
ax2.legend()
ax1.set_xlabel("days")
ax2.set_xlabel("days")
ax1.set_title("age group %d-%d" % (self.age_groups[n][0], self.age_groups[n][1]))
ax1.set_yscale("log")
ax2.set_yscale("log")
ax1.set_ylim(ymin=1)
ax2.set_ylim(ymin=1)
plt.tight_layout()
return fig
|
python
|
#!/usr/bin/env python
from dissect.cstruct import cstruct, dumpstruct
import socket
import struct
protocol = cstruct()
protocol.load("""
enum AttackType : uint8 {
ATK_OPT_DPORT = 7,
ATK_OPT_DOMAIN = 8,
ATK_OPT_NUM_SOCKETS = 24,
};
struct AttackTarget {
DWORD ipv4;
BYTE netmask;
};
struct AttackOption {
AttackType type;
uint8 value_length;
char value[value_length];
};
struct MiraiAttack {
uint16 total_length;
uint32 duration;
uint8 attack_id;
uint8 target_count;
AttackTarget targets[target_count];
uint8 num_opts;
AttackOption attack_options[num_opts];
};
""")
protocol.endian = ">"
if __name__ == '__main__':
data = b"\x000\x00\x00\x00d\n\x01\x08\x08\x08\x08 \x03\x08\x16http://www.example.com\x07\x0280\x18\x045000"
record = protocol.MiraiAttack(data)
print(record)
print('--')
for t in record.targets:
print('TARGET: {}/{}'.format(socket.inet_ntoa(struct.pack('!L', t.ipv4)), t.netmask))
for o in record.attack_options:
print('OPTION: {} - {}'.format(o.type, o.value))
dumpstruct(protocol.MiraiAttack, data)
|
python
|
from celery import task
import re
from django.utils import timezone
from backups.models import Backup, BackupRun, BackupSetOfRun, BackupNotification
import os
from django.conf import settings
from app.utils import DjangoLock
@task(ignore_result=True, queue="backups")
def run_backup(id, mode='hourly', backupsetpk=None):
"""Run a backup"""
backup = Backup.objects.get(pk=id)
# Create the run
backuprun = BackupRun(backup=backup, start_date=timezone.now(), type=mode)
backuprun.save()
def _notify_set_if_needed():
if backupsetpk:
check_end_of_backupset.delay(backupsetpk, backuprun.pk)
# Backup
if not backup.server_to.ssh_connection_string_from_gestion:
print("Error: No connection from gestion")
_notify_set_if_needed()
return
if not backup.server_from.ssh_connection_string_from_backup:
print("Error: No connection from backup")
_notify_set_if_needed()
return
os.system('ssh ' + backup.server_to.ssh_connection_string_from_gestion + ' wget ' + settings.GESTION_URL + 'backups/get_conf/' + str(backup.pk) + '/ -O /tmp/azimut-gestion-backup-config-' + str(backup.pk))
to_do_string = ['rsnapshot -c /tmp/azimut-gestion-backup-config-' + str(backup.pk) + ' -v ' + mode]
import subprocess
p = subprocess.Popen(['ssh'] + backup.server_to.ssh_connection_string_from_gestion.split(' ') + [' '.join(to_do_string)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
backuprun.end_date = timezone.now()
backuprun.stdout = out
backuprun.stderr = err
try:
backuprun.nb_files = re.search('Number of files: (\d*)', out).group(1) or 0
except:
print("Error getting nb files")
pass
try:
backuprun.size = re.search('Total file size: (\d*)', out).group(1) or 0
except:
print("Error getting total file size")
pass
backuprun.save()
if mode == 'hourly':
if not backuprun.nb_files or not backuprun.size:
bn = BackupNotification(type='bkpfailled')
bn.message = "The backuprun for the backup %s started at %s, ended at %s, type %s has failled:" % (backuprun.backup.name, str(backuprun.start_date), str(backuprun.end_date), mode,)
else:
bn = BackupNotification(type='bkpdone')
bn.message = "The backuprun for the backup %s started at %s, ended at %s, type %s was succesfull:" % (backuprun.backup.name, str(backuprun.start_date), str(backuprun.end_date), mode,)
bn.message += "\n\n%s files where copied for a total size of %s." % (str(backuprun.nb_files), str(backuprun.size),)
else:
bn = BackupNotification(type='bkpdone')
bn.message = "The backuprun for the backup %s started at %s, ended at %s, type %s was probably succesfull." % (backuprun.backup.name, str(backuprun.start_date), str(backuprun.end_date), mode,)
bn.save()
bn.send_notifications()
_notify_set_if_needed()
@task(ignore_result=True, queue="backups")
def run_active_backups(mode):
"""Run all actives backups"""
# If we're in hourly mode, check that the previous backup was done. (In
# othercases, it's ok to run more than one at a time)
if mode == 'hourly':
oldbackupruns = BackupSetOfRun.objects.filter(type='hourly', status='running').all()
if oldbackupruns:
bn = BackupNotification(type='bkpsetnotstarted')
bn.message = "The backupset of type %s whould should have been started at %s was not executed. A previous backupset was still running. If it's not the case, you can, after carefully checked what happened, use the interface to manually ignore the set." % (mode, str(timezone.now()),)
bn.save()
bn.send_notifications()
print("Aborting run as there is still backup runnning !")
return
backups_to_run = Backup.objects.filter(active=True).all()
backupset = BackupSetOfRun(type=mode)
backupset.save()
for bpk in backups_to_run:
backupset.backups.add(bpk)
backupset.save()
for bkp in backups_to_run:
run_backup.delay(bkp.pk, mode, backupset.pk)
@task(ignore_result=True, queue="backups")
def check_end_of_backupset(backupsetpk, backuprunpk):
l = DjangoLock(settings.BACKUP_SET_LOCK)
l.acquire()
backupset = BackupSetOfRun.objects.get(pk=backupsetpk)
backuprun = BackupRun.objects.get(pk=backuprunpk)
try:
backupset.backups.remove(backuprun.backup)
backupset.backupruns.add(backuprun)
backupset.total_size += backuprun.size
backupset.total_files += backuprun.nb_files
if not backupset.backups.all(): # End :)
backupset.end_date = timezone.now()
backupset.status = 'done'
bn = BackupNotification(type='bkpsetdone')
bn.message = "The backupset of type %s, started at %s, ended at %s (runned for %s hours) is now Done. %s files where copied for a total size of %s." % (backupset.type, str(backupset.start_date), str(backupset.end_date), str(backupset.get_total_time()), str(backupset.total_files), str(backupset.total_size))
bn.save()
bn.send_notifications()
backupset.save()
except Exception as e:
print("Error during check end of backup set !" + str(e))
l.release()
|
python
|
import logging
from dicewars.client.ai_driver import BattleCommand, EndTurnCommand
class AI:
def __init__(self, player_name, board, players_order):
self.player_name = player_name
self.logger = logging.getLogger('AI')
def ai_turn(self, board, nb_moves_this_turn, nb_turns_this_game, time_left):
return EndTurnCommand()
|
python
|
import random
from typing import List
from project.conf import get_configuration
from project.utils import ITEMS_SECTION
from project.effect import SideEffect, instantiate_side_effects
from project.interface import IItem, IHealingItem, IRecoveryItem, IEquipmentItem, ISideEffect
class Item(IItem):
def __init__(self, name: str, tier: str, description: str, weight: float) -> None:
"""
Constructor of Item (Primitive Class)
:param str name: Name of the item.
:param str tier: The tier of the item(common, uncommon, rare, legendary),
:param str description: Description of the item.
:param float weight: Weight of the item.
:rtype: None
"""
self.name = name
self.tier = tier
self.description = description
self.weight = weight
class HealingItem(IHealingItem, Item):
def __init__(self, name: str, tier: str, description: str, weight: float, attribute: str, base: int) -> None:
"""
Constructor of HealingItem
:param str name: Name of the item.
:param str tier: The tier of the item(common, uncommon, rare, legendary),
:param str description: Description of the item.
:param float weight: Weight of the item.
:param str attribute: Attribute to be healed.
:param int base: How much the attribute will be healed.
:rtype: None
"""
self.attribute = attribute
self.base = base
super().__init__(name, tier, description, weight)
class RecoveryItem(IRecoveryItem, Item):
def __init__(self, name: str, tier: str, description: str, weight: float, status: str) -> None:
"""
Constructor of RecoveryItem
:param str name: Name of the item.
:param str tier: The tier of the item(common, uncommon, rare, legendary),
:param str description: Description of the item.
:param float weight: Weight of the item.
:param str status: The status(side-effect) to be recovered.
:rtype: None
"""
self.status = status
super().__init__(name, tier, description, weight)
class EquipmentItem(IEquipmentItem, Item):
def __init__(self, name: str, tier: str, description: str, weight: float, attribute: str, base: int,
side_effects: List[ISideEffect], category: str, usage: str, wielding: int = 1) -> None:
"""
Constructor of EquipmentItem
:param str name: Name of the equipment.
:param str tier: The tier of the equipment(common, uncommon, rare, legendary).
:param str description: Description of the item.
:param float weight: Weight of the item.
:param str attribute: Attribute that the equip improves.
:param int base: The value of that attribute that this equip will improve.
:param List[ISideEffect] side_effects: The side effects of that equipment.
:param str category: The category of the equipment.
:param str usage: If the equipment attributes apply to all the cases, to melee attack only or ranged only.
:rtype: None
"""
self.attribute = attribute
self.base = base
self.side_effects = side_effects
self.category = category
self.usage = usage
self.wielding = wielding
super().__init__(name, tier, description, weight)
def get_random_item(tier: str, item_type: str) -> Item:
"""
Function to get a random item along all the items from the items.yaml file,
considering a specific tier and type.
:param str tier: The tier of the item.
:param str item_type: The type of the item(healing, recovery, equipment)
:rtype: Item
"""
items_dicts = get_configuration(ITEMS_SECTION)
item_key = random.choice(
list({k: v for (k, v) in items_dicts.items() if v.get('tier') == tier and v.get('type') == item_type}.keys()))
item_dict = items_dicts.get(item_key)
if item_dict.get('type') == 'healing':
return HealingItem(name=item_dict.get('name'),
tier=item_dict.get('tier'),
description=item_dict.get('description'),
weight=item_dict.get('weight'),
attribute=item_dict.get('attribute'),
base=item_dict.get('base'))
elif item_dict.get('type') == 'recovery':
return RecoveryItem(name=item_dict.get('name'),
tier=item_dict.get('tier'),
description=item_dict.get('description'),
weight=item_dict.get('weight'),
status=item_dict.get('status'))
elif item_dict.get('type') == 'equipment':
'''
Side-effects are instantiated here, because when the user equips the item, the side-effect it's
already instantiated and it will be appended into the user's side-effect list, passing the same instance,
which makes the changes to duration attribute, each time a new turn comes, reflects both in the list of side
effects and also in the bag.
'''
side_effects = instantiate_side_effects(item_dict.get('side_effects'))
return EquipmentItem(name=item_dict.get('name'),
tier=item_dict.get('tier'),
description=item_dict.get('description'),
weight=item_dict.get('weight'),
attribute=item_dict.get('attribute'),
base=item_dict.get('base'),
side_effects=side_effects,
category=item_dict.get('category'),
usage=item_dict.get('usage'),
wielding=item_dict.get('wielding', 0))
|
python
|
import RPi.GPIO as GPIO
import time
from libdw import pyrebase
#Database Set-Up
projectid = "cleanbean-9e2f5"
dburl = "https://" + projectid + ".firebaseio.com"
authdomain = projectid + ".firebaseapp.com"
apikey = "AIzaSyA6H-rDpfGJZcTqFhf69t3VYbbOzfUW0EM"
email = "[email protected]"
password = "123456"
config = {
"apiKey": apikey,
"authDomain": authdomain,
"databaseURL": dburl,
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
user = auth.sign_in_with_email_and_password(email, password)
db = firebase.database()
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 6
GPIO_ECHO = 5
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
binHeight = 18.6
def trashHeight(distance):
return 1-(binHeight-distance)/binHeight
def distance():
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
return distance
if __name__ == '__main__':
try:
distold = binHeight
distance_list = []
trash_height_list = []
while True:
dist = distance()
distance_list.append(dist)
print ("Measured Distance = %.1f cm" % dist)
time.sleep(1)
print(trashHeight(dist))
trash_height = trashHeight(dist)
db.child("Bin_1").set(trash_height, user['idToken'])
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
|
python
|
"""Metaclass that overrides creating a new pipeline by wrapping methods with validators and setters."""
from functools import wraps
from rayml.exceptions import PipelineNotYetFittedError
from rayml.utils.base_meta import BaseMeta
class PipelineBaseMeta(BaseMeta):
"""Metaclass that overrides creating a new pipeline by wrapping methods with validators and setters."""
@classmethod
def check_for_fit(cls, method):
"""`check_for_fit` wraps a method that validates if `self._is_fitted` is `True`.
Args:
method (callable): Method to wrap.
Returns:
The wrapped method.
Raises:
PipelineNotYetFittedError: If pipeline is not yet fitted.
"""
@wraps(method)
def _check_for_fit(self, *args, **kwargs):
klass = type(self).__name__
if not self._is_fitted:
raise PipelineNotYetFittedError(
f"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}."
)
return method(self, *args, **kwargs)
return _check_for_fit
|
python
|
from setuptools import setup
import glob
import os
import sys
import json
def package_files(package_dir, subdirectory):
# walk the input package_dir/subdirectory
# return a package_data list
paths = []
directory = os.path.join(package_dir, subdirectory)
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
path = path.replace(package_dir + '/', '')
paths.append(os.path.join(path, filename))
return paths
data_files = package_files('casa_imaging', 'data') + package_files('casa_imaging', '../scripts')
setup_args = {
'name': 'casa_imaging',
'author': 'HERA Team',
'url': 'https://github.com/HERA-Team/casa_imaging',
'license': 'BSD',
'description': 'Collection of scripts for calibration, imaging and data processing in CASA and Python.',
'package_dir': {'casa_imaging': 'casa_imaging'},
'packages': ['casa_imaging'],
'include_package_data': True,
'scripts': ['scripts/pbcorr.py', 'scripts/source2file.py', 'scripts/make_model_cube.py',
'scripts/skynpz2calfits.py', 'scripts/source_extract.py',
'scripts/find_sources.py', 'scripts/calfits_to_Bcal.py',
'pipelines/skycal_pipe.py', 'scripts/get_model_vis.py', 'scripts/plot_fits.py'],
'version': '0.1',
'package_data': {'casa_imaging': data_files},
'zip_safe': False,
}
if __name__ == '__main__':
setup(*(), **setup_args)
|
python
|
""" Advent of Code Day 2 - Bathroom Security"""
def get_code(start_pos, keypad, valid_pos):
"""Returns the code generated from instructions on specified keypad."""
pos = start_pos
code = ''
for line in lines:
for move in line:
if move == 'R':
next_pos = [pos[0], pos[1] + 1]
elif move == 'L':
next_pos = [pos[0], pos[1] - 1]
elif move == 'D':
next_pos = [pos[0] + 1, pos[1]]
elif move == 'U':
next_pos = [pos[0] - 1, pos[1]]
if next_pos in valid_pos:
pos = next_pos
code += keypad[pos[0]][pos[1]]
return code
basic = [
['1', '2', '3'],
['4', '5', '6'],
['7', '8', '9'],
]
advanced = [
['', '', '1', '', ''],
['', '2', '3', '4', ''],
['5', '6', '7', '8', '9'],
['', 'A', 'B', 'C', ''],
['', '', 'D', '', ''],
]
with open('inputs/day_02.txt') as f:
lines = [line.strip() for line in f.readlines()]
# Answer Part One
buttons = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
print("Bathroom Code =", get_code([1, 1], basic, buttons))
# Answer Part Two
adv_but = [[0, 2], [1, 1], [1, 2], [1, 3], [2, 0], [2, 1], [2, 2], [2, 3],
[2, 4], [3, 1], [3, 2], [3, 3],[4, 2]]
print("Advanced Bathroom Code =", get_code([1, 1], advanced, adv_but))
|
python
|
import chanutils.reddit
from chanutils import get_json
from playitem import PlayItem, PlayItemList
_SEARCH_URL = "https://www.googleapis.com/youtube/v3/search"
_FEEDLIST = [
{'title':'Trending', 'url':'http://www.reddit.com/domain/youtube.com/top/.json'},
{'title':'Popular', 'url':'https://www.googleapis.com/youtube/v3/videos?maxResults=50&key=AIzaSyAzkfoVmKXf3520e5WLBMnOMXXbyjIMJLk&part=snippet&chart=mostPopular'},
]
def name():
return 'Youtube'
def image():
return "icon.png"
def description():
return "Youtube Channel (<a target='_blank' href='https://www.youtube.com/'>https://www.youtube.com/</a>)."
def feedlist():
return _FEEDLIST
def feed(idx):
url = _FEEDLIST[idx]['url']
if url.endswith('.json'):
return chanutils.reddit.get_feed(_FEEDLIST[idx])
else:
data = get_json(url)
return _extract(data)
def search(q):
query = {'part':'snippet', 'q':q, 'maxResults': 50,
'key':'AIzaSyAzkfoVmKXf3520e5WLBMnOMXXbyjIMJLk'}
data = get_json(_SEARCH_URL, params=query)
return _extract(data)
def _extract(data):
results = PlayItemList()
rtree = data['items']
for r in rtree:
title= r['snippet']['title']
subtitle= r['snippet']['publishedAt'][:10]
synopsis= r['snippet']['description']
if len(synopsis) > 200:
synopsis = synopsis[:200] + "..."
img = r['snippet']['thumbnails']['default']['url']
if isinstance(r['id'], basestring):
vid = r['id']
elif 'videoId' in r['id']:
vid = r['id']['videoId']
else:
continue
url = 'https://www.youtube.com/watch?v=' + vid
results.add(PlayItem(title, img, url, subtitle, synopsis))
return results
|
python
|
"""
Demo for chunk extraction in pyClarion.
Prerequisite: Understanding of the basics of pyClarion as discussed in the demo
`free_association.py`.
"""
from pyClarion import (
Structure, Construct,
agent, subsystem, buffer, flow_bt, flow_tb, features, chunks, terminus,
feature, updater,
Chunks, Assets,
Stimulus, MaxNodes, TopDown, BottomUp,
BoltzmannSelector, ChunkExtractor, ChunkDBUpdater,
pprint
)
#############
### Setup ###
#############
# This simulation demonstrates a basic recipe for chunk extraction in
# pyClarion.
# The basic premise of the recipe is to create a special terminus construct
# for chunk extraction. On each cycle, the chunk extractor recommends chunks
# based on the state of the bottom level at propagation time. If the bottom
# level state matches an existing chunk, that chunk is recommended. Otherwise,
# a new chunk is recommended. These recommendations are then placed in the
# corresponding chunk database by an updater object.
# Here is the scenario:
# We are teaching Alice about fruits by showing her pictures of fruits and
# simultaneously speaking out their names. Afterwards, we quiz alice by either
# showing her more pictures or naming fruits.
### Knowledge Setup ###
# For this simulation, we develop a simple feature domain containing visual and
# auditory features. The visual features include color, shape, size, and
# texture. The only auditory dimension is that of words.
fspecs = [
feature("word", "/banana/"),
feature("word", "/apple/"),
feature("word", "/orange/"),
feature("word", "/plum/"),
feature("color", "red"),
feature("color", "green"),
feature("color", "yellow"),
feature("color", "orange"),
feature("color", "purple"),
feature("shape", "round"),
feature("shape", "oblong"),
feature("size", "small"),
feature("size", "medium"),
feature("texture", "smooth"),
feature("texture", "grainy"),
feature("texture", "spotty")
]
# As in `free_association.py`, we construct a chunk database to store chunks.
# However, instead of populating the database manually, we will have the agent
# create chunks based on its interactions with audio-visual stimuli.
chunk_db = Chunks()
### Agent Assembly ###
# The assembly process should be familiar from the free association example,
# with only a couple of mild novelties.
alice = Structure(
name=agent("alice")
)
with alice:
stimulus = Construct(
name=buffer("stimulus"),
process=Stimulus()
)
nacs = Structure(
name=subsystem("nacs"),
assets=Assets(chunk_db=chunk_db)
)
with nacs:
# Although the entry point for the NACS are chunks, in this example we
# start with features as there are no constructs that initially
# activate chunks in the NACS activation cycle.
Construct(
name=features("main"),
process=MaxNodes(
sources=[buffer("stimulus")]
)
)
Construct(
name=flow_bt("main"),
process=BottomUp(
source=features("main"),
chunks=nacs.assets.chunk_db
)
)
Construct(
name=chunks("main"),
process=MaxNodes(
sources=[flow_bt("main")]
)
)
# Termini
# In addition to introducting chunk extraction, this example
# demonstrates the use of two temrmini in one single subsytem. We
# include one terminus for the output of the top level and one for the
# bottom level.
# The top level terminus is basically the same as the one used in the
# free association example. It randomly selects a chunk through a
# competitive process which involves sampling chunks from a boltzmann
# distribution constructed from their respective strength values. This
# terminus is relevant for the quizzing/querying section of the
# simulation.
Construct(
name=terminus("tl"),
process=BoltzmannSelector(
source=chunks("main"),
temperature=0.01,
threshold=0.0
)
)
# The bottom level ('bl') terminus introduces the new emitter, the
# `ChunkExtractor`. As suggested by the name, this emitter extracts
# chunks capturing the state of the bottom level. More precisely, it
# first applies a thresholding function to feature activations in the
# bottom level. Then, it looks for a chunk whose form matches exactly
# the features above threshold. If a match is found, the corresponding
# chunk is emitted as output (fully activated). If no match is found,
# a new chunk is named and emitted, and a request is sent to the chunk
# database to add the new chunk.
Construct(
name=terminus("bl"),
process=ChunkExtractor(
source=features("main"),
threshold=0.9,
chunks=nacs.assets.chunk_db,
prefix="bl"
)
)
# The chunk database updater is responsible for applying any requested
# updates to the chunk database. This pattern gives control over the
# timing of updates to the chunk database.
Construct(
name=updater("cdb"),
process=ChunkDBUpdater(chunks=nacs.assets.chunk_db)
)
# Agent setup is now complete!
##################
### Simulation ###
##################
# In the learning stage of the simulation, let us imagine that we present the
# agent with a sequence of fruit pictures paired with the words that name the
# respective fruits.
# In this demo, we will present four instances of such stimuli, as defined
# below.
stimuli = [
{
feature("word", "/apple/"): 1.0,
feature("color", "red"): 1.0,
feature("shape", "round"): 1.0,
feature("size", "medium"): 1.0,
feature("texture", "smooth"): 1.0
},
{
feature("word", "/orange/"): 1.0,
feature("color", "orange"): 1.0,
feature("shape", "round"): 1.0,
feature("size", "medium"): 1.0,
feature("texture", "grainy"): 1.0
},
{
feature("word", "/banana/"): 1.0,
feature("color", "yellow"): 1.0,
feature("shape", "oblong"): 1.0,
feature("size", "medium"): 1.0,
feature("texture", "spotty"): 1.0
},
{
feature("word", "/plum/"): 1.0,
feature("color", "purple"): 1.0,
feature("shape", "round"): 1.0,
feature("size", "small"): 1.0,
feature("texture", "smooth"): 1.0
}
]
# In the loop below, we present each stimulus in turn and print the state of
# the agent at each step. The agent will automatically extract chunks. The
# final chunk database is printed on loop termination.
for i, s in enumerate(stimuli):
print("Presentation {}".format(i + 1))
stimulus.process.input(s)
alice.step()
pprint(alice.output)
print("Learned Chunks:")
pprint(nacs.assets.chunk_db)
# Visual and Auditory Queries
# Having formed some chunks, we can perform visual and auditory queries. In
# this simple setting, we represent queries as feature activations and take
# query responses to be the chunks selected by the top level terminus called
# 'terminus("tl")'.
# There are two queries defined below, one visual and one auditory.
queries = [
{
feature("color", "green"): .85,
feature("shape", "round"): .85,
feature("size", "medium"): .85,
feature("texture", "smooth"): .85
},
{
feature("word", "/orange/"): .85,
}
]
# To prevent formation of new chunks, we set the query strengths to be below
# the chunk inclusion threshold. This is a simple, but hacky, solution. A more
# elegant approach would be to use some kind of control mechanism (e.g. a
# `ControlledExtractor`).
# Once again we run a loop, presenting the stimuli and printing the agent
# state at each step.
for i, s in enumerate(queries):
print("Presentation {}".format(i + 1))
stimulus.process.input(s)
alice.step()
pprint(alice.output)
##################
### CONCLUSION ###
##################
# This simple simulation sought to demonstrate the following:
# - The basics of using updaters, and
# - A recipe for chunk extraction from the state of the bottom level.
|
python
|
import numpy
from pylab import *
from scipy.interpolate import interp1d
d1,e1,ee1,f1,ef1=numpy.loadtxt("full.txt",unpack=True)
f1=-f1*31.6e-15
inds=argsort(d1)
d1=d1[inds]
f1=f1[inds]
d2,e2,ee2,f2,ef2=numpy.loadtxt("PEC.txt",unpack=True)
f2=-f2*31.6e-15
inds=argsort(d2)
d2=d2[inds]
f2=f2[inds]
d3,e3,ee3,f3,ef3=numpy.loadtxt("temp.txt",unpack=True)
f3=-f3*31.6e-15
inds=argsort(d3)
d3=d3[inds]
f3=f3[inds]
datafile="../../Mathematica/calculated_vals.tsv"
dist,fpfa,fnaive,fright,ftemp=numpy.loadtxt(datafile,unpack=True)
dist=dist*1e6
PFA_datafile="../../Mathematica/calculated_pfa_vals.tsv"
dist2,fpfa2,fnaive2,fright2,ftemp2=numpy.loadtxt(PFA_datafile,unpack=True)
dist2=dist2*1e6
plot(d2,f2,label="PEC")
plot(d1,f1,label="SiO2/Au")
plot(d3,f3,label="PEC T=300")
plot(dist,fpfa,label="PFA",linestyle='dashed')
plot(dist,fright,label="SiO2/Au",linestyle='dashed')
plot(dist,ftemp,label="SiO2/Au T=300",linestyle='dashed')
xscale('log')
yscale('log')
xlabel('Distance (microns)')
ylabel('Force (N)')
title('Analytical (Dashed) v Numerical (Solid) Calculations')
legend()
show()
#savefig('analytic_v_numerical')
clf()
iPFA = interp1d(dist,fpfa)
rPFA=iPFA(d2)/f2
iPFA2 = interp1d(dist2,fpfa2)
rPFA2=iPFA2(d2)/f2
plot(d2,rPFA,label="Gradient Expansion PFA")
plot(d2,rPFA2,label="Normal PFA")
xscale('log')
yscale('log')
xlabel('Distance (Microns)')
ylabel('(PFA/BEM) Force Ratio')
title('Comparion between Calculations, grid=1 micron')
legend()
savefig("pfa_v_pec.png")
clf()
tag,xi,e,f=numpy.loadtxt("mesh_byXi.txt",unpack=True)
f=-f*31.6e-15
xis=numpy.unique(xi)
for ix in range(0,len(xis)):
inds = numpy.where(xi == xis[ix])
xplot=tag[inds]
yplot=f[inds]
plot(xplot,yplot,label=str(xis[ix]))
xscale('log')
yscale('log')
xlim(0.5,5.0)
legend(title='Int. Frequency',loc='lower left')
xlabel('Mesh length scale (microns)')
ylabel('Force Integrand Value')
title('Convergence Tests @ 1 micron')
savefig('convergence.png')
|
python
|
# _*_ coding:utf-8 _*_
# 练习函数调用。
def hello(name):
print "hello",name
pass
def manyHello():
for i in ["a","bobo","green"]:
hello(i)
pass
manyHello()
|
python
|
from comex_stat.assets.models import (CGCE, CUCI, NCM, SH, AssetExportFacts,
AssetImportFacts, Country,
FederativeUnit, TradeBlocs,
Transportation, Urf)
from graphene_django.filter.fields import DjangoFilterConnectionField
from graphene_django.types import DjangoObjectType
from django_filters.utils import handle_timezone
from django_filters import FilterSet, CharFilter
from django_filters.filters import RangeFilter
from django.forms import DateField, Field
from datetime import datetime, time
from django.db.models import Sum
import graphene
import json
class DateRangeField(Field):
'''
Class that expects to receive a JSON string,
like: "[\"yyyy-mm-dd\",\"yyyy-mm-dd\"]"
The first date being the begin and the second the end.
The function then splits the two dates into DateFields
and interprets the range between them by using datetime functions.
'''
def compress(self, data_list):
if data_list:
start_date, stop_date = data_list
if start_date:
start_date = handle_timezone(
datetime.combine(start_date, time.min))
if stop_date:
stop_date = handle_timezone(
datetime.combine(stop_date, time.max))
return slice(start_date, stop_date)
return None
def clean(self, value):
if value:
clean_data = []
values = json.loads(value)
if isinstance(values, (list, tuple)):
for field_value in values:
clean_data.append(DateField().clean(field_value))
return self.compress(clean_data)
else:
return self.compress([])
class DateFromToRangeFilter(RangeFilter):
'''
Class to define cutom field-filter that will use the DateRange function.
'''
field_class = DateRangeField
class AssetImportFilter(FilterSet):
"""
Custom filter-set class for Import facts
"""
# temporary field used to filter date fields by range
commercialized_between = DateFromToRangeFilter('date')
date = CharFilter(
field_name="date", lookup_expr="icontains")
registries = CharFilter(
field_name="registries", lookup_expr="iexact")
net_kilogram = CharFilter(
field_name="net_kilogram", lookup_expr="iexact")
fob_value = CharFilter(
field_name="fob_value", lookup_expr="iexact")
country_name_pt = CharFilter(
field_name="origin_country__country_name_pt",
lookup_expr="iexact")
country_name_en = CharFilter(
field_name="origin_country__country_name_en",
lookup_expr="iexact")
country_name_es = CharFilter(
field_name="origin_country__country_name_es",
lookup_expr="iexact")
country_code_iso3 = CharFilter(
field_name="origin_country__country_code_iso3",
lookup_expr="iexact")
trade_bloc_name_pt = CharFilter(
field_name="origin_country__trade_bloc__bloc_name_pt",
lookup_expr="iexact")
trade_bloc_name_en = CharFilter(
field_name="origin_country__trade_bloc__bloc_name_en",
lookup_expr="iexact")
trade_bloc_name_es = CharFilter(
field_name="origin_country__trade_bloc__bloc_name_es",
lookup_expr="iexact")
trade_bloc_code = CharFilter(
field_name="origin_country__trade_bloc__bloc_code",
lookup_expr="iexact")
federative_unity_name = CharFilter(
field_name="destination_fed_unit__uf_name",
lookup_expr="iexact")
federative_unity_code = CharFilter(
field_name="destination_fed_unit__uf_code",
lookup_expr="iexact")
federative_unity_initials = CharFilter(
field_name="destination_fed_unit__uf_initials",
lookup_expr="iexact")
urf_name = CharFilter(
field_name="urf__urf_name", lookup_expr="iexact")
urf_code = CharFilter(
field_name="urf__urf_code", lookup_expr="iexact")
transportation_name = CharFilter(
field_name="transportation__transportation_name",
lookup_expr="iexact")
transportation_code = CharFilter(
field_name="transportation__transportation_code",
lookup_expr="iexact")
ncm_code = CharFilter(
field_name="ncm__ncm_code",
lookup_expr="iexact")
statistic_unit_code = CharFilter(
field_name="ncm__statistic_unit_code",
lookup_expr="iexact")
ppe_code = CharFilter(
field_name="ncm__ppe_code",
lookup_expr="iexact")
ppi_code = CharFilter(
field_name="ncm__ppi_code",
lookup_expr="iexact")
aggregate_factor_code = CharFilter(
field_name="ncm__aggregate_factor_code",
lookup_expr="iexact")
isic4_code = CharFilter(
field_name="ncm__isic4_code",
lookup_expr="iexact")
exportation_subset = CharFilter(
field_name="ncm__exportation_subset",
lookup_expr="iexact")
siit_code = CharFilter(
field_name="ncm__siit_code",
lookup_expr="iexact")
cuci_item_code = CharFilter(
field_name="ncm__cuci__item_code",
lookup_expr="iexact")
cuci_subitem_code = CharFilter(
field_name="ncm__cuci__subitem_code",
lookup_expr="iexact")
cuci_position_code = CharFilter(
field_name="ncm__cuci__position_code",
lookup_expr="iexact")
cuci_chapter_code = CharFilter(
field_name="ncm__cuci__chapter_code",
lookup_expr="iexact")
cuci_section_code = CharFilter(
field_name="ncm__cuci__section_code",
lookup_expr="iexact")
cgce_level1_code = CharFilter(
field_name="ncm__cgce__level1_code",
lookup_expr="iexact"
)
cgce_level2_code = CharFilter(
field_name="ncm__cgce__level2_code",
lookup_expr="iexact"
)
cgce_level3_code = CharFilter(
field_name="ncm__cgce__level3_code",
lookup_expr="iexact"
)
sh_chapter_code = CharFilter(
field_name="ncm__sh__chapter_code",
lookup_expr="iexact"
)
sh_position_code = CharFilter(
field_name="ncm__sh__position_code",
lookup_expr="iexact"
)
sh_subposition_code = CharFilter(
field_name="ncm__sh__subposition_code",
lookup_expr="iexact"
)
sh_section_code = CharFilter(
field_name="ncm__sh__section_code",
lookup_expr="iexact"
)
class Meta:
model = AssetImportFacts
fields = ['commercialized_between',
'date', 'registries', 'net_kilogram', 'fob_value']
class AssetExportFilter(FilterSet):
'''
Custom filter-set class for Export facts
'''
# temporary field used to filter date fields by range
commercialized_between = DateFromToRangeFilter('date')
date = CharFilter(
field_name="date", lookup_expr="icontains")
registries = CharFilter(
field_name="registries", lookup_expr="iexact")
net_kilogram = CharFilter(
field_name="net_kilogram", lookup_expr="iexact")
fob_value = CharFilter(
field_name="fob_value", lookup_expr="iexact")
country_name_pt = CharFilter(
field_name="destination_country__country_name_pt",
lookup_expr="iexact")
country_name_en = CharFilter(
field_name="destination_country__country_name_en",
lookup_expr="iexact")
country_name_es = CharFilter(
field_name="destination_country__country_name_es",
lookup_expr="iexact")
country_code_iso3 = CharFilter(
field_name="destination_country__country_code_iso3",
lookup_expr="iexact")
trade_bloc_name_pt = CharFilter(
field_name="destination_country__trade_bloc__bloc_name_pt",
lookup_expr="iexact")
trade_bloc_name_en = CharFilter(
field_name="destination_country__trade_bloc__bloc_name_en",
lookup_expr="iexact")
trade_bloc_name_es = CharFilter(
field_name="destination_country__trade_bloc__bloc_name_es",
lookup_expr="iexact")
trade_bloc_code = CharFilter(
field_name="destination_country__trade_bloc__bloc_code",
lookup_expr="iexact")
federative_unity_name = CharFilter(
field_name="origin_fed_unit__uf_name", lookup_expr="iexact")
federative_unity_code = CharFilter(
field_name="origin_fed_unit__uf_code", lookup_expr="iexact")
federative_unity_initials = CharFilter(
field_name="origin_fed_unit__uf_initials", lookup_expr="iexact")
transportation_name = CharFilter(
field_name="transportation__transportation_name",
lookup_expr="iexact")
transportation_code = CharFilter(
field_name="transportation__transportation_code",
lookup_expr="iexact")
ncm_code = CharFilter(
field_name="ncm__ncm_code",
lookup_expr="iexact")
statistic_unit_code = CharFilter(
field_name="ncm__statistic_unit_code",
lookup_expr="iexact")
ppe_code = CharFilter(
field_name="ncm__ppe_code",
lookup_expr="iexact")
ppi_code = CharFilter(
field_name="ncm__ppi_code",
lookup_expr="iexact")
aggregate_factor_code = CharFilter(
field_name="ncm__aggregate_factor_code",
lookup_expr="iexact")
isic4_code = CharFilter(
field_name="ncm__isic4_code",
lookup_expr="iexact")
exportation_subset = CharFilter(
field_name="ncm__exportation_subset",
lookup_expr="iexact")
siit_code = CharFilter(
field_name="ncm__siit_code",
lookup_expr="iexact")
cuci_item_code = CharFilter(
field_name="ncm__cuci__item_code",
lookup_expr="iexact")
cuci_subitem_code = CharFilter(
field_name="ncm__cuci__subitem_code",
lookup_expr="iexact")
cuci_position_code = CharFilter(
field_name="ncm__cuci__position_code",
lookup_expr="iexact")
cuci_chapter_code = CharFilter(
field_name="ncm__cuci__chapter_code",
lookup_expr="iexact")
cuci_section_code = CharFilter(
field_name="ncm__cuci__section_code",
lookup_expr="iexact")
cgce_level1_code = CharFilter(
field_name="ncm__cgce__level1_code",
lookup_expr="iexact"
)
cgce_level2_code = CharFilter(
field_name="ncm__cgce__level2_code",
lookup_expr="iexact"
)
cgce_level3_code = CharFilter(
field_name="ncm__cgce__level3_code",
lookup_expr="iexact"
)
sh_chapter_code = CharFilter(
field_name="ncm__sh__chapter_code",
lookup_expr="iexact"
)
sh_position_code = CharFilter(
field_name="ncm__sh__position_code",
lookup_expr="iexact"
)
sh_subposition_code = CharFilter(
field_name="ncm__sh__subposition_code",
lookup_expr="iexact"
)
sh_section_code = CharFilter(
field_name="ncm__sh__section_code",
lookup_expr="iexact"
)
class Meta:
model = AssetExportFacts
fields = ['commercialized_between',
'date', 'registries', 'net_kilogram', 'fob_value']
class AssetImportFactsNode(DjangoObjectType):
total_fob_value = graphene.String()
class Meta:
model = AssetImportFacts
filter_fields = ['commercialized_between',
'date', 'registries', 'net_kilogram', 'fob_value']
interfaces = (graphene.Node, )
def resolve_total_fob_value(self, info):
a = AssetImportFacts.objects.filter(
date=self.date).aggregate(Sum('fob_value'))
return a['fob_value__sum']
class AssetExportFactsNode(DjangoObjectType):
class Meta:
model = AssetExportFacts
filter_fields = ['commercialized_between',
'date', 'registries', 'net_kilogram', 'fob_value']
interfaces = (graphene.Node, )
class AssetImportFactsType(DjangoObjectType):
class Meta:
model = AssetImportFacts
class AssetExportFactsType(DjangoObjectType):
class Meta:
model = AssetExportFacts
class NCMType(DjangoObjectType):
class Meta:
model = NCM
filter_fields = {
'ncm_code': ['iexact'],
'ncm_name_pt': ['icontains'],
'ncm_name_en': ['icontains'],
'ncm_name_es': ['icontains'],
'statistic_unit_code': ['iexact'],
'ppe_code': ['iexact'],
'ppi_code': ['iexact'],
'aggregate_factor_code': ['iexact'],
'cuci': [],
'cgce': [],
'sh': [],
'isic4_code': ['iexact'],
'exportation_subset': ['iexact'],
'siit_code': ['iexact']
}
interfaces = {graphene.Node, }
class CUCIType(DjangoObjectType):
class Meta:
model = CUCI
filter_fields = {
'item_code': ['iexact'],
'item_name_pt': ['icontains'],
'item_name_en': ['icontains'],
'item_name_es': ['icontains'],
'subitem_code': ['iexact'],
'subitem_name_pt': ['icontains'],
'subitem_name_en': ['icontains'],
'subitem_name_es': ['icontains'],
'position_code': ['iexact'],
'position_name_pt': ['icontains'],
'position_name_en': ['icontains'],
'position_name_es': ['icontains'],
'chapter_code': ['iexact'],
'chapter_name_pt': ['icontains'],
'chapter_name_en': ['icontains'],
'chapter_name_es': ['icontains'],
'section_code': ['iexact'],
'section_name_pt': ['icontains'],
'section_name_en': ['icontains'],
'section_name_es': ['icontains']
}
interfaces = {graphene.Node, }
class CGCEType(DjangoObjectType):
class Meta:
model = CGCE
filter_fields = {
'level1_code': ['iexact'],
'level1_name_pt': ['icontains'],
'level1_name_en': ['icontains'],
'level1_name_es': ['icontains'],
'level2_code': ['iexact'],
'level2_name_pt': ['icontains'],
'level2_name_en': ['icontains'],
'level2_name_es': ['icontains'],
'level3_code': ['iexact'],
'level3_name_pt': ['icontains'],
'level3_name_en': ['icontains'],
'level3_name_es': ['icontains']
}
interfaces = {graphene.Node, }
class SHType(DjangoObjectType):
class Meta:
model = SH
filter_fields = {
'chapter_code': ['iexact'],
'chapter_name_pt': ['icontains'],
'chapter_name_en': ['icontains'],
'chapter_name_es': ['icontains'],
'position_code': ['iexact'],
'position_name_pt': ['icontains'],
'position_name_en': ['icontains'],
'position_name_es': ['icontains'],
'subposition_code': ['iexact'],
'subposition_name_pt': ['icontains'],
'subposition_name_en': ['icontains'],
'subposition_name_es': ['icontains'],
'section_code': ['iexact'],
'section_name_pt': ['icontains'],
'section_name_en': ['icontains'],
'section_name_es': ['icontains']
}
interfaces = {graphene.Node, }
class TradeBlocsType(DjangoObjectType):
class Meta:
model = TradeBlocs
filter_fields = {
'bloc_name_pt': ['icontains'],
'bloc_name_en': ['icontains'],
'bloc_name_es': ['icontains'],
'bloc_code': ['iexact']
}
interfaces = {graphene.Node, }
class CountryType(DjangoObjectType):
class Meta:
model = Country
filter_fields = {
'country_name_pt': ['icontains'],
'country_name_en': ['icontains'],
'country_name_es': ['icontains'],
'country_code_iso3': ['iexact']
}
interfaces = {graphene.Node, }
class FederativeUnitType(DjangoObjectType):
class Meta:
model = FederativeUnit
filter_fields = {
'uf_code': ['iexact'],
'uf_name': ['iexact'],
'uf_initials': ['iexact']
}
interfaces = {graphene.Node, }
class TransportationType(DjangoObjectType):
class Meta:
model = Transportation
filter_fields = {
'transportation_name': ['icontains'],
'transportation_code': ['iexact']
}
interfaces = {graphene.Node, }
class UrfType(DjangoObjectType):
class Meta:
model = Urf
filter_fields = {
'urf_code': ['iexact'],
'urf_name': ['iexact'],
}
interfaces = {graphene.Node, }
class Aggregated_Import(DjangoObjectType):
total_fob_value_country = graphene.String()
total_fob_value_transportation = graphene.String()
total_fob_value_date = graphene.String()
total_fob_value_urf = graphene.String()
total_fob_value_trade_bloc = graphene.String()
total_registries_country = graphene.String()
total_registries_transportation = graphene.String()
total_registries_date = graphene.String()
total_registries_urf = graphene.String()
total_registries_trade_bloc = graphene.String()
total_net_kilogram_country = graphene.String()
total_net_kilogram_transportation = graphene.String()
total_net_kilogram_date = graphene.String()
total_net_kilogram_urf = graphene.String()
total_net_kilogram_trade_bloc = graphene.String()
class Meta:
model = AssetImportFacts
filter_fields = ['date', 'registries', 'net_kilogram', 'fob_value']
interfaces = (graphene.Node, )
def resolve_total_fob_value_date(self, info):
a = AssetImportFacts.objects.filter(date=self.date).aggregate(
Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_country(self, info):
a = AssetImportFacts.objects.filter(
origin_country__country_name_pt=self.origin_country.
country_name_pt).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_transportation(self, info):
a = AssetImportFacts.objects.filter(
transportation__transportation_name=self.transportation.
transportation_name).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_urf(self, info):
a = AssetImportFacts.objects.filter(
urf__urf_name=self.urf.urf_name).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_trade_bloc(self, info):
a = AssetImportFacts.objects.filter(
origin_country__trade_bloc__bloc_name_pt=self.origin_country.
trade_bloc.bloc_name_pt).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_registries_date(self, info):
a = AssetImportFacts.objects.filter(date=self.date).aggregate(
Sum('registries'))
return a['registries__sum']
def resolve_total_registries_country(self, info):
a = AssetImportFacts.objects.filter(
origin_country__country_name_pt=self.origin_country.
country_name_pt).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_registries_transportation(self, info):
a = AssetImportFacts.objects.filter(
transportation__transportation_name=self.transportation.
transportation_name).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_registries_urf(self, info):
a = AssetImportFacts.objects.filter(
urf__urf_name=self.urf.urf_name).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_registries_trade_bloc(self, info):
a = AssetImportFacts.objects.filter(
origin_country__trade_bloc__bloc_name_pt=self.origin_country.
trade_bloc.bloc_name_pt).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_net_kilogram_date(self, info):
a = AssetImportFacts.objects.filter(date=self.date).aggregate(
Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_country(self, info):
a = AssetImportFacts.objects.filter(
origin_country__country_name_pt=self.origin_country.
country_name_pt).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_transportation(self, info):
a = AssetImportFacts.objects.filter(
transportation__transportation_name=self.transportation.
transportation_name).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_urf(self, info):
a = AssetImportFacts.objects.filter(
urf__urf_name=self.urf.urf_name).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_trade_bloc(self, info):
a = AssetImportFacts.objects.filter(
origin_country__trade_bloc__bloc_name_pt=self.origin_country.
trade_bloc.bloc_name_pt).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
class Aggregated_Export(DjangoObjectType):
total_fob_value_country = graphene.String()
total_fob_value_transportation = graphene.String()
total_fob_value_date = graphene.String()
total_fob_value_urf = graphene.String()
total_fob_value_trade_bloc = graphene.String()
total_registries_country = graphene.String()
total_registries_transportation = graphene.String()
total_registries_date = graphene.String()
total_registries_urf = graphene.String()
total_registries_trade_bloc = graphene.String()
total_net_kilogram_country = graphene.String()
total_net_kilogram_transportation = graphene.String()
total_net_kilogram_date = graphene.String()
total_net_kilogram_urf = graphene.String()
total_net_kilogram_trade_bloc = graphene.String()
class Meta:
model = AssetExportFacts
filter_fields = ['date', 'registries', 'net_kilogram', 'fob_value']
interfaces = (graphene.Node, )
def resolve_total_fob_value_date(self, info):
a = AssetExportFacts.objects.filter(date=self.date).aggregate(
Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_country(self, info):
a = AssetExportFacts.objects.filter(
destination_country__country_name_pt=self.destination_country.
country_name_pt).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_transportation(self, info):
a = AssetExportFacts.objects.filter(
transportation__transportation_name=self.transportation.
transportation_name).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_urf(self, info):
a = AssetExportFacts.objects.filter(
urf__urf_name=self.urf.urf_name).aggregate(Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_fob_value_trade_bloc(self, info):
a = AssetExportFacts.objects.filter(
destination_country__trade_bloc__bloc_name_pt=self.
destination_country.trade_bloc.bloc_name_pt).aggregate(
Sum('fob_value'))
return a['fob_value__sum']
def resolve_total_registries_date(self, info):
a = AssetExportFacts.objects.filter(date=self.date).aggregate(
Sum('registries'))
return a['registries__sum']
def resolve_total_registries_country(self, info):
a = AssetExportFacts.objects.filter(
destination_country__country_name_pt=self.destination_country.
country_name_pt).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_registries_transportation(self, info):
a = AssetExportFacts.objects.filter(
transportation__transportation_name=self.transportation.
transportation_name).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_registries_urf(self, info):
a = AssetExportFacts.objects.filter(
urf__urf_name=self.urf.urf_name).aggregate(Sum('registries'))
return a['registries__sum']
def resolve_total_registries_trade_bloc(self, info):
a = AssetExportFacts.objects.filter(
destination_country__trade_bloc__bloc_name_pt=self.
destination_country.trade_bloc.bloc_name_pt).aggregate(
Sum('registries'))
return a['registries__sum']
def resolve_total_net_kilogram_date(self, info):
a = AssetExportFacts.objects.filter(date=self.date).aggregate(
Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_country(self, info):
a = AssetExportFacts.objects.filter(
destination_country__country_name_pt=self.destination_country.
country_name_pt).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_transportation(self, info):
a = AssetExportFacts.objects.filter(
transportation__transportation_name=self.transportation.
transportation_name).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_urf(self, info):
a = AssetExportFacts.objects.filter(
urf__urf_name=self.urf.urf_name).aggregate(Sum('net_kilogram'))
return a['net_kilogram__sum']
def resolve_total_net_kilogram_trade_bloc(self, info):
a = AssetExportFacts.objects.filter(
destination_country__trade_bloc__bloc_name_pt=self.
destination_country.trade_bloc.bloc_name_pt).aggregate(
Sum('net_kilogram'))
return a['net_kilogram__sum']
class Query(graphene.ObjectType):
all_import = DjangoFilterConnectionField(
AssetImportFactsNode, filterset_class=AssetImportFilter,
description="Método que retorna os objetos do tipo importação")
all_export = DjangoFilterConnectionField(
AssetExportFactsNode, filterset_class=AssetExportFilter,
description="Método que retorna os objetos do tipo exportação")
all_tradeBlocs = DjangoFilterConnectionField(
TradeBlocsType,
description="Método que retorna os registros de blocos econômicos")
all_country = DjangoFilterConnectionField(
CountryType,
description="Método que retorna os países registrados")
all_federativeUnit = DjangoFilterConnectionField(
FederativeUnitType,
description="Método que retorna as unidades federativas registradas")
all_transportation = DjangoFilterConnectionField(
TransportationType,
description="Método que retorna as vias de trasportação cadastradas")
all_urf = DjangoFilterConnectionField(
UrfType, description="Método que retorna as URFs registradas")
all_ncm = DjangoFilterConnectionField(
NCMType, description="Método que retorna os NCMs dos registros")
all_cuci = DjangoFilterConnectionField(
CUCIType,
description="Método que retorna as nomenclaturas CUCI registradas")
all_cgce = DjangoFilterConnectionField(
CGCEType,
description="Método que retorna as nomenclaturas CGCE registradas")
all_sh = DjangoFilterConnectionField(
SHType,
description="Método que retorna as nomenclaturas SH registradas")
aggregated_import_transportation = DjangoFilterConnectionField(
Aggregated_Import, filterset_class=AssetImportFilter)
aggregated_import_urf = DjangoFilterConnectionField(
Aggregated_Import, filterset_class=AssetImportFilter)
aggregated_import_date = DjangoFilterConnectionField(
Aggregated_Import, filterset_class=AssetImportFilter)
aggregated_import_country = DjangoFilterConnectionField(
Aggregated_Import, filterset_class=AssetImportFilter)
aggregated_import_trade_bloc = DjangoFilterConnectionField(
Aggregated_Import, filterset_class=AssetImportFilter)
aggregated_export_transportation = DjangoFilterConnectionField(
Aggregated_Export, filterset_class=AssetExportFilter)
aggregated_export_urf = DjangoFilterConnectionField(
Aggregated_Export, filterset_class=AssetExportFilter)
aggregated_export_date = DjangoFilterConnectionField(
Aggregated_Export, filterset_class=AssetExportFilter)
aggregated_export_country = DjangoFilterConnectionField(
Aggregated_Export, filterset_class=AssetExportFilter)
aggregated_export_trade_bloc = DjangoFilterConnectionField(
Aggregated_Export, filterset_class=AssetExportFilter)
def resolve_all_import(self, info, **kwargs):
return AssetImportFacts.objects.all()
def resolve_all_export(self, info, **kwargs):
return AssetExportFacts.objects.all()
def resolve_all_ncm(self, info, **kwargs):
return NCM.objects.all()
def resolve_all_tradeBlocs(self, info, **kwargs):
return TradeBlocs.objects.all()
def resolve_all_country(self, info, **kwargs):
return Country.objects.all()
def resolve_all_federativeUnit(self, info, **kwargs):
return FederativeUnit.objects.all()
def resolve_all_transportation(self, info, **kwargs):
return Transportation.objects.all()
def resolve_all_urf(self, info, **kwargs):
return Urf.objects.all()
def resolve_all_cuci(self, info, **kwargs):
return CUCI.objects.all()
def resolve_all_cgce(self, info, **kwargs):
return CGCE.objects.all()
def resolve_all_sh(self, info, **kwargs):
return SH.objects.all()
def resolve_aggregated_import_transportation(self, info, **kwargs):
return list(AssetImportFacts.objects.raw(
'''SELECT b.[id], a.[transportation_code], a.[transportation_name]
FROM assets_Transportation a INNER JOIN assets_AssetImportFacts b
ON a.[transportation_code]=b.[transportation_id]
GROUP BY a.[transportation_name]'''))
def resolve_aggregated_import_urf(self, info, **kwargs):
return list(AssetImportFacts.objects.raw(
'''SELECT b.[id], a.[urf_code], a.[urf_name]
FROM assets_Urf a INNER JOIN assets_AssetImportFacts b
ON a.[urf_code]=b.[urf_id]
GROUP BY a.[urf_name]'''))
def resolve_aggregated_import_date(self, info, **kwargs):
return list(AssetImportFacts.objects.raw('''Select id, COUNT(date)
FROM assets_AssetImportFacts
GROUP BY date'''))
def resolve_aggregated_import_country(self, info, **kwargs):
return list(AssetImportFacts.objects.raw(
'''SELECT b.[id], a.[id], a.[country_name_pt]
FROM assets_Country a INNER JOIN assets_AssetImportFacts b
ON a.[id]=b.[origin_country_id]
GROUP BY a.[country_name_pt]'''))
def resolve_aggregated_import_trade_bloc(self, info, **kwargs):
return list(AssetImportFacts.objects.raw(
'''SELECT c.[bloc_code], c.[bloc_name_pt], b.[origin_country_id],
a.[id], a.[trade_bloc_id]
FROM assets_AssetImportFacts b
INNER JOIN assets_Country a
ON a.[id]=b.[origin_country_id]
INNER JOIN assets_TradeBlocs c
ON c.[bloc_code]=a.[trade_bloc_id]
GROUP BY c.[bloc_name_pt]'''))
def resolve_aggregated_export_transportation(self, info, **kwargs):
return list(AssetExportFacts.objects.raw(
'''SELECT b.[id], a.[transportation_code], a.[transportation_name]
FROM assets_Transportation a INNER JOIN assets_AssetExportFacts b
ON a.[transportation_code]=b.[transportation_id]
GROUP BY a.[transportation_name]'''))
def resolve_aggregated_export_urf(self, info, **kwargs):
return list(AssetExportFacts.objects.raw(
'''SELECT b.[id], a.[urf_code], a.[urf_name]
FROM assets_Urf a INNER JOIN assets_AssetExportFacts b
ON a.[urf_code]=b.[urf_id]
GROUP BY a.[urf_name]'''))
def resolve_aggregated_export_date(self, info, **kwargs):
return list(AssetExportFacts.objects.raw('''Select id, COUNT(date)
FROM assets_AssetExportFacts
GROUP BY date'''))
def resolve_aggregated_export_country(self, info, **kwargs):
return list(AssetExportFacts.objects.raw(
'''SELECT b.[id], a.[id], a.[country_name_pt]
FROM assets_Country a INNER JOIN assets_AssetExportFacts b
ON a.[id]=b.[destination_country_id]
GROUP BY a.[country_name_pt]'''))
def resolve_aggregated_export_trade_bloc(self, info, **kwargs):
return list(AssetExportFacts.objects.raw(
'''SELECT c.[bloc_code], c.[bloc_name_pt], b.[destination_country_id],
a.[id], a.[trade_bloc_id]
FROM assets_AssetExportFacts b
INNER JOIN assets_Country a
ON a.[id]=b.[destination_country_id]
INNER JOIN assets_TradeBlocs c
ON c.[bloc_code]=a.[trade_bloc_id]
GROUP BY c.[bloc_name_pt]'''))
|
python
|
import numpy as np
import pandas as pd
np.random.seed(101)
df = pd.DataFrame(np.random.randn(5, 4), index='A B C D E'.split(), columns='W X Y Z'. split())
print(df)
print(df[['W', 'Z']])
df['new'] = df['W'] + df['X']
print(df)
print(df.loc['A', 'X'])
print(df.loc['A'])
|
python
|
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.waas_api_spec import WaasAPISpec # noqa: E501
from openapi_client.rest import ApiException
class TestWaasAPISpec(unittest.TestCase):
"""WaasAPISpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test WaasAPISpec
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.waas_api_spec.WaasAPISpec() # noqa: E501
if include_optional :
return WaasAPISpec(
description = '',
effect = '[\"ban\",\"prevent\",\"alert\",\"allow\",\"disable\",\"reCAPTCHA\"]',
endpoints = [
openapi_client.models.waas/endpoint.waas.Endpoint(
base_path = '',
exposed_port = 56,
host = '',
http2 = True,
internal_port = 56,
tls = True, )
],
fallback_effect = '[\"ban\",\"prevent\",\"alert\",\"allow\",\"disable\",\"reCAPTCHA\"]',
paths = [
openapi_client.models.waas/path.waas.Path(
methods = [
openapi_client.models.waas/method.waas.Method(
method = '',
parameters = [
openapi_client.models.waas/param.waas.Param(
allow_empty_value = True,
array = True,
explode = True,
location = '[\"path\",\"query\",\"cookie\",\"header\",\"body\",\"json\",\"formData\",\"multipart\"]',
max = 1.337,
min = 1.337,
name = '',
required = True,
style = '[\"simple\",\"spaceDelimited\",\"tabDelimited\",\"pipeDelimited\",\"form\",\"matrix\",\"label\"]',
type = '[\"integer\",\"number\",\"string\",\"boolean\",\"array\",\"object\"]', )
], )
],
path = '', )
],
skip_learning = True
)
else :
return WaasAPISpec(
)
def testWaasAPISpec(self):
"""Test WaasAPISpec"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
python
|
import boto3
def post_handler(event, context):
sns= boto3.client('sns')
response = sns.publish(
TopicArn='arn:aws:sns:eu-west-1:538353771716:mytopic',
Message='testinglambda'
)
return{
"message" : "successful"
}
|
python
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""Test scripts for `findinfiles` plugin."""
# Standard library imports
import re
import os
import os.path as osp
# 3rd party imports
import pytest
# Local imports
from spyder.config.main import EXCLUDE_PATTERNS
from spyder.plugins.findinfiles import FindInFiles
from spyder.widgets.findinfiles import SELECT_OTHER
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
NONASCII_DIR = osp.join(LOCATION, u"èáïü Øαôå 字分误")
if not osp.exists(NONASCII_DIR):
os.makedirs(NONASCII_DIR)
@pytest.fixture
def findinfiles_bot(qtbot):
"""Set up SearchInComboBox combobox."""
findinfiles_plugin = FindInFiles()
qtbot.addWidget(findinfiles_plugin)
return findinfiles_plugin, qtbot
def check_regex(patterns):
"""
Check that regular expression patterns provided by compiling them.
Return a list of booleans for each of the provided patterns.
"""
checks = []
for pattern in patterns:
try:
re.compile(pattern)
is_valid = True
except re.error:
is_valid = False
checks.append(is_valid)
return checks
def test_exclude_patterns_are_valid_regex():
checks = check_regex(EXCLUDE_PATTERNS)
assert all(checks)
# ---- Tests for FindInFiles plugin
def test_closing_plugin(qtbot, mocker):
"""
Test that the external paths listed in the combobox are saved and loaded
correctly from the spyder config file.
"""
findinfiles_plugin, qtbot = findinfiles_bot(qtbot)
path_selection_combo = findinfiles_plugin.findinfiles.find_options.path_selection_combo
path_selection_combo.clear_external_paths()
assert path_selection_combo.get_external_paths() == []
# Add external paths to the path_selection_combo.
expected_results = [
LOCATION,
osp.dirname(LOCATION),
osp.dirname(osp.dirname(LOCATION)),
NONASCII_DIR
]
for external_path in expected_results:
mocker.patch('spyder.widgets.findinfiles.getexistingdirectory',
return_value=external_path)
path_selection_combo.setCurrentIndex(SELECT_OTHER)
assert path_selection_combo.get_external_paths() == expected_results
# Force the options to be saved to the config file. Something needs to be
# set in the search_text combobox first or else the find in files options
# won't be save to the config file (see PR #6095).
findinfiles_plugin.findinfiles.find_options.search_text.set_current_text("test")
findinfiles_plugin.closing_plugin()
assert findinfiles_plugin.get_option('path_history') == expected_results
# Close and restart the plugin and assert that the external_path_history
# has been saved and loaded as expected.
findinfiles_plugin.close()
findinfiles_plugin, qtbot = findinfiles_bot(qtbot)
path_selection_combo = findinfiles_plugin.findinfiles.find_options.path_selection_combo
assert path_selection_combo.get_external_paths() == expected_results
if __name__ == "__main__":
pytest.main(['-x', osp.basename(__file__), '-v', '-rw'])
|
python
|
from __future__ import absolute_import
from django.core import mail
from sentry.utils.compat.mock import patch
from social_auth.models import UserSocialAuth
from sentry.exceptions import InvalidIdentity, PluginError
from sentry.models import (
Commit,
Deploy,
Integration,
LatestRepoReleaseEnvironment,
Release,
ReleaseHeadCommit,
Repository,
)
from sentry.tasks.commits import fetch_commits, handle_invalid_identity
from sentry.testutils import TestCase
class FetchCommitsTest(TestCase):
def test_simple(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
deploy = Deploy.objects.create(organization_id=org.id, release=release2, environment_id=5)
with self.tasks():
with patch.object(Deploy, "notify_if_ready") as mock_notify_if_ready:
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
commit_list = list(
Commit.objects.filter(releasecommit__release=release2).order_by("releasecommit__order")
)
# see DummyRepositoryProvider.compare_commits
assert len(commit_list) == 3
assert commit_list[0].repository_id == repo.id
assert commit_list[0].organization_id == org.id
assert commit_list[0].key == "62de626b7c7cfb8e77efb4273b1a3df4123e6216"
assert commit_list[1].repository_id == repo.id
assert commit_list[1].organization_id == org.id
assert commit_list[1].key == "58de626b7c7cfb8e77efb4273b1a3df4123e6345"
assert commit_list[2].repository_id == repo.id
assert commit_list[2].organization_id == org.id
assert commit_list[2].key == "b" * 40
mock_notify_if_ready.assert_called_with(deploy.id, fetch_complete=True)
latest_repo_release_environment = LatestRepoReleaseEnvironment.objects.get(
repository_id=repo.id, environment_id=5
)
assert latest_repo_release_environment.deploy_id == deploy.id
assert latest_repo_release_environment.release_id == release2.id
assert latest_repo_release_environment.commit_id == commit_list[0].id
@patch("sentry.tasks.commits.handle_invalid_identity")
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_invalid_identity(self, mock_compare_commits, mock_handle_invalid_identity):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
usa = UserSocialAuth.objects.create(user=self.user, provider="dummy")
mock_compare_commits.side_effect = InvalidIdentity(identity=usa)
fetch_commits(
release_id=release2.id, user_id=self.user.id, refs=refs, previous_release_id=release.id
)
mock_handle_invalid_identity.assert_called_once_with(identity=usa, commit_failure=True)
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_plugin_error(self, mock_compare_commits):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
UserSocialAuth.objects.create(user=self.user, provider="dummy")
mock_compare_commits.side_effect = Exception("secrets")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "secrets" not in msg.body
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_plugin_error_for_sentry_app(self, mock_compare_commits):
org = self.create_organization(owner=self.user, name="baz")
sentry_app = self.create_sentry_app(
organization=org, published=True, verify_install=False, name="Super Awesome App"
)
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
mock_compare_commits.side_effect = Exception("secrets")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=sentry_app.proxy_user_id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "secrets" not in msg.body
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_random_exception(self, mock_compare_commits):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
UserSocialAuth.objects.create(user=self.user, provider="dummy")
mock_compare_commits.side_effect = PluginError("You can read me")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "You can read me" in msg.body
def test_fetch_error_random_exception_integration(self):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
integration = Integration.objects.create(provider="example", name="Example")
integration.add_organization(org)
repo = Repository.objects.create(
name="example",
provider="integrations:example",
organization_id=org.id,
integration_id=integration.id,
)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "Repository not found" in msg.body
class HandleInvalidIdentityTest(TestCase):
def test_simple(self):
usa = UserSocialAuth.objects.create(user=self.user, provider="dummy")
with self.tasks():
handle_invalid_identity(usa)
assert not UserSocialAuth.objects.filter(id=usa.id).exists()
msg = mail.outbox[-1]
assert msg.subject == "Action Required"
assert msg.to == [self.user.email]
def test_commit_failure(self):
usa = UserSocialAuth.objects.create(user=self.user, provider="dummy")
with self.tasks():
handle_invalid_identity(usa, commit_failure=True)
assert not UserSocialAuth.objects.filter(id=usa.id).exists()
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
|
python
|
from src.models import Movie
from src.api import Api
from flask import request
from os import remove , listdir
from os.path import exists
class Movies(Api):
def __init__(self):
super().__init__()
def create_model(self):
self.model = Movie()
def sanitize_data(self):
data = [
request.json['title'].lower().capitalize(),
float( request.json['rating'] ),
request.json['image'].lower(),
request.json['category'].lower().capitalize()
]
return data
def delete(self,id=None):
valid_file = lambda f : f != 'default.png'
if id is None:
self.model.delete()
for file in listdir('img/'):
if valid_file(file):
remove(f'img/{file}')
else:
filename = self.model.delete_by_id(id)
if filename != False:
if exists(f'img/{filename}'):
if valid_file(filename):
remove(f'img/{filename}')
return {} , 204
|
python
|
import unittest
from python_tuya_oittm.api_client import ApiClient
TEST_DEVICE_ID = '<enter a device id>'
TEST_ENCRYPTION_KEY = '<enter an encryption key>'
class TestTuyaApiClientIntegratedMethods(unittest.TestCase):
def test_get_metadata(self):
api_client = ApiClient()
data = api_client.get_metadata()
self.assertIsNotNone(data)
def test_get_metadata_with_device_id(self):
api_client = ApiClient()
data = api_client.get_metadata(TEST_DEVICE_ID)
self.assertIsNotNone(data)
def test_get_metadata_with_bad_device_id(self):
api_client = ApiClient()
data = api_client.get_metadata('gibberish', 5)
self.assertEqual(data, False)
def test_get_metadata_with_discovery(self):
api_client = ApiClient()
data = api_client.get_metadata(None, 5, discover=True)
self.assertIsNotNone(data)
def test_get_status(self):
api_client = ApiClient()
metaData = api_client.get_metadata()
status = api_client.get_status(metaData['ip'], metaData['gwId'])
self.assertIsNotNone(status)
def test_set_status(self):
dps = {'dps': '6', 'set': '2'}
api_client = ApiClient()
metaData = api_client.get_metadata()
result = api_client.set_status(
dps, metaData['ip'], metaData['gwId'], TEST_ENCRYPTION_KEY)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
"""Sentinel Tools API Class
Sentinel Tools - API Class
============================
Simple wrapper class for the Sentinel API that takes a Config object.
This file is a part of Sentinel Tools
Copyright 2020 - MET Norway (Machine Ocean Project)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from sentinelsat import SentinelAPI
from .config import Config
logger = logging.getLogger(__name__)
class AutoAPI():
def __init__(self, config):
self._theConfig = config
self._apiUser = ""
self._apiPass = ""
self._apiURL = "https://scihub.copernicus.eu"
self._theAPI = None
if isinstance(config, Config):
self.apiUser = config.apiUser
self.apiPass = config.apiPass
self.apiURL = config.apiURL
else:
raise ValueError("Property 'config' is not an instance of sentinel.Config")
try:
self._theAPI = SentinelAPI(self._apiUser, self._apiPass, api_url=self._apiURL)
except Exception as e:
logger.error("Could not set up Sentinel API")
logger.error(str(e))
return
return
##
# Properties
##
@property
def apiUser(self):
return self._apiUser
@property
def apiPass(self):
return self._apiPass
@property
def apiURL(self):
return self._apiURL
##
# Setters
##
@apiUser.setter
def apiUser(self, username):
if isinstance(username, str):
self._apiUser = username
else:
raise ValueError("Attribute 'username' must be a string")
return
@apiPass.setter
def apiPass(self, password):
if isinstance(password, str):
self._apiPass = password
else:
raise ValueError("Attribute 'password' must be a string")
return
@apiURL.setter
def apiURL(self, url):
if isinstance(url, str):
self._apiURL = url
else:
raise ValueError("Attribute 'url' must be a string")
return
##
# Getters
##
def getAPI(self):
"""Returns the API object.
"""
if self._theAPI is None or not isinstance(self._theAPI, SentinelAPI):
raise ConnectionError("Not connected to the API")
return self._theAPI
def getConfig(self):
"""Return the config object.
"""
return self._theConfig
# END Class AutoAPI
|
python
|
"""
plenum package metadata
"""
import os
import json
from typing import Tuple, List, Union
import collections.abc
from common.version import PlenumVersion, InvalidVersionError
VERSION_FILENAME = '__version__.json'
VERSION_FILE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), VERSION_FILENAME)
def load_version(version_file: str = VERSION_FILE) -> PlenumVersion:
with open(version_file, 'r') as _f:
version = json.load(_f)
if not isinstance(version, collections.abc.Iterable):
raise InvalidVersionError(
"Failed to load from {}: data '{}' is not iterable"
.format(version_file, version)
)
return PlenumVersion('.'.join([str(i) for i in version if str(i)]))
def set_version(version: str, version_file: str = VERSION_FILE):
version = PlenumVersion(version)
with open(version_file, 'w') as _f:
json.dump(['' if i is None else i for i in version.parts], _f)
_f.write('\n')
__title__ = 'indy-plenum'
__version_info__ = (1, 6)
__version__ = '.'.join(map(str, __version_info__))
# TODO activate once new versioning scheme becomes implemented
# Note. double underscores
# _version_info__ = load_version()
# _version__ = __version_info__.full
__author__ = "Hyperledger"
__author_email__ = '[email protected]'
__maintainer__ = 'Hyperledger'
__maintainer_email__ = '[email protected]'
__url__ = 'https://github.com/hyperledger/indy-plenum'
__description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__long_description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__download_url__ = "https://github.com/hyperledger/indy-plenum/tarball/{}".format(__version__)
__license__ = "Apache 2.0"
__all__ = [
'__title__',
'__version_info__',
'__version__',
'__author__',
'__author_email__',
'__maintainer__',
'__maintainer_email__',
'__url__',
'__description__',
'__long_description__',
'__download_url__',
'__license__',
'load_version',
'set_version'
]
|
python
|
from utils.orm.db import BaseModel
import httpx
import json
from sanic.log import logger as _logger
class DemoModel(BaseModel):
__tablename__ = 'demo_model'
async def demo_method(self):
print('demo method')
|
python
|
import json
from operator import itemgetter
from orchard_watch import query_db
from orchard_watch import respond
annoucement_schema = [
['annoucementid', 'longValue'],
['title', 'stringValue'],
['description', 'stringValue'],
['dateTime', 'stringValue'],
['email', 'stringValue']
]
def annoucement_entry_to_dict(entry):
annoucement = {}
for index, data in enumerate(entry):
# print(str(data))
if ('isNull' in data.keys()):
continue
annoucement[annoucement_schema[index][0]] = data[annoucement_schema[index][1]]
# print(str(annoucement))
return annoucement
def lambda_handler(event, context):
print(str(event))
query = "SELECT * FROM announcements"
result = query_db(query)
records = result['records']
annoucements = []
for record in records:
annoucements.append(annoucement_entry_to_dict(record))
sorted_annoucements = sorted(annoucements, key=itemgetter('dateTime'), reverse=True)
# for a in announcements:
# print(a)
# sorted(announcements, key = lambda i: i['age'])
# print(result['records'])
print(sorted_annoucements)
return respond(statusCode = "200", res = result['records'])
|
python
|
# Copyright 2021 Adam Byerly. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import pandas as pd
from cycler import cycler
import matplotlib.pyplot as plt
def go(input_file, output_file):
df = pd.read_csv(input_file, header=None)
linestyle_cycler = (cycler("color", ["blue", "orange", "green"])
+ cycler("linestyle", ["solid", "dotted", "dashed"]))
plt.figure(figsize=(7.48, 5.92))
plt.rc("axes", prop_cycle=linestyle_cycler)
plt.plot(df[0])
plt.plot(df[1])
plt.plot(df[2])
plt.legend(["Branch 1", "Branch 2", "Branch 3"])
plt.xticks(ticks=range(len(df[0])),
labels=sum([["", i+2] for i in range(0, 32, 2)], []))
plt.xlabel("Trial #")
plt.savefig(output_file, bbox_inches="tight")
################################################################################
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("--input_file",
default=r"../../logs_ms2/final_branch_weights.txt")
p.add_argument("--output_file",
default=r"../../logs_ms2/final_branch_weights.png")
a = p.parse_args()
go(input_file=a.input_file, output_file=a.output_file)
|
python
|
import copy
import logging
from random import uniform
import matplotlib.pyplot as plt
import numpy as np
from morpho import BrillouinZonePath as BZPath
from morpho import SymmetryPoint as SPoint
from scipy.signal import find_peaks
from fdtd import EFieldDetector, Grid, HFieldDetector, Material
from fdtd.boundaries import PeriodicBlochBoundary as PBBoundary
from fdtd.constants import SPEED_LIGHT as C
from fdtd.objects import Sphere
from fdtd.sources import ImpressedElectricCurrentSource as JSource
from fdtd.sources import ImpressedMagneticCurrentSource as MSource
from fdtd.waveforms import GaussianWaveform
logger = logging.getLogger("fdtd")
print("Loading materials...")
# Material.load("materials.json")
print("Defining initial parameters...")
a = 1e-6 # unit cell size
n = 64 # grid 64x64
r = 0.2 * a # cylinder radius
print("Creating grid...")
grid_tm = Grid(shape=(n, n, 1), spacing=(1/n) * a)
grid_te = Grid(shape=(n, n, 1), spacing=(1/n) * a)
print("Creating components...")
diel_mat = Material(name="diel", eps_r=8.9)
def rpoints():
px = uniform(0.1 * a, 0.9 * a)
py = uniform(0.1 * a, 0.9 * a)
return (px, py, 0, px, py, 0)
tau = 20 * (a/20) / (2*C)
t_0 = 4.5 * tau
g_wf = GaussianWaveform(t_0=t_0, tau=tau)
j_sources = [JSource(*rpoints(), waveform=g_wf) for _ in range(5)]
m_sources = [MSource(*rpoints(), waveform=g_wf) for _ in range(5)]
e_detectors = [EFieldDetector(*rpoints()) for _ in range(5)]
h_detectors = [HFieldDetector(*rpoints()) for _ in range(5)]
p_boundary_tm = PBBoundary(x_direction=True, y_direction=True)
p_boundary_te = copy.deepcopy(p_boundary_tm)
sphere_tm = Sphere(*(0.5 * a, 0.5 * a, 0), radius=r, material=diel_mat)
sphere_te = copy.deepcopy(sphere_tm)
print("Adding components to grid...")
for elm in [*j_sources, *e_detectors, sphere_tm, p_boundary_tm]:
grid_tm.add(elm)
for elm in [*m_sources, *h_detectors, sphere_te, p_boundary_te]:
grid_te.add(elm)
print("Running simulation...")
n_steps = 10000
frames = 10
G = SPoint((0, 0), "Γ")
X = SPoint((1 / 2, 0), "X")
M = SPoint((1 / 2, 1 / 2, 0), "M")
t1, t2 = (a, 0), (0, a)
bz_path = BZPath([G, X, M, G], t1, t2, n_points=50)
betas = [bz_path.beta_vec[:, col] for col in range(bz_path.beta_vec.shape[1])]
betas_len = bz_path.beta_vec_len
fig, ax = plt.subplots(figsize=(5, 4))
ax.set_xticklabels(bz_path.symmetry_names)
ax.set_xticks(bz_path.symmetry_locations)
ax.set_xlim(0, bz_path.symmetry_locations[-1])
ax.set_ylim(0, 0.8)
ax.set_xlabel(r"Bloch Wave Vector $\beta$")
ax.set_ylabel(r"Frequency $\omega a/2\pi c}$")
ax.grid(True)
fig.tight_layout()
plt.ion()
plt.show()
for beta, beta_len in zip(betas, betas_len):
p_boundary_tm.b_vec = (beta[0], beta[1], 0)
grid_tm.reset()
grid_tm.run(n_steps=n_steps)
print("Showing results...")
psd = np.zeros((n_steps // 2, ))
for e_detector in e_detectors:
e_detector.pos_processing()
psd += np.abs(e_detector.values_freq)**2
peaks, _ = find_peaks(np.abs(psd), threshold=1e-30)
fs = e_detectors[0].freq[peaks]
ax.scatter(beta_len * (1 + 0*fs),
fs * a / C,
color="b",
marker=".",
label="TM")
plt.draw()
plt.pause(0.001)
for beta, beta_len in zip(betas, betas_len):
p_boundary_te.b_vec = (beta[0], beta[1], 0)
grid_te.reset()
grid_te.run(n_steps=n_steps)
print("Showing results...")
psd = np.zeros((n_steps // 2, ))
for h_detector in h_detectors:
h_detector.pos_processing()
psd += np.abs(h_detector.values_freq)**2
peaks, _ = find_peaks(np.abs(psd), threshold=1e-30)
fs = h_detectors[0].freq[peaks]
ax.scatter(beta_len * (1 + 0*fs),
fs * a / C,
color="r",
marker=".",
label="TE")
plt.draw()
plt.pause(0.001)
handles, labels = ax.get_legend_handles_labels()
labels, ids = np.unique(labels, return_index=True)
handles = [handles[i] for i in ids]
plt.legend(handles, labels, loc="best")
plt.ioff()
plt.show()
|
python
|
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# books/__init__.py
#
from pagebot.publications.books.literature import Literature
from pagebot.publications.books.photobook import PhotoBook
BOOK_CLASSES = {
'Literature': Literature, # Single column
#'Multilingual': Multilingual,
'Photo book': PhotoBook,
#'Poetry': Poetry
#'Children book', ChildrenBook
#'Study book': StudyBook,
}
if __name__ == "__main__":
import doctest
import sys
sys.exit(doctest.testmod()[0])
|
python
|
import sys
sys.path.append('objection_engine/')
import objection_engine
# You can also import the components like this
from objection_engine.renderer import render_comment_list
from objection_engine.beans.comment import Comment
foo = [objection_engine.comment.Comment(), objection_engine.comment.Comment(text_content='Second comment', user_name="Second user")]
objection_engine.renderer.render_comment_list(foo)
|
python
|
from bs4 import BeautifulSoup
from bs4 import NavigableString
import requests
import logging
import pandas as pd
import json
def get_cleaned_movie(movie_bs4):
"""Get a dictionary of a movie from its bs4 object"""
# parse directors and stars arrays
movie_directors_array = []
movie_stars_array = []
array = movie_directors_array
try:
for content in movie_bs4.find_all('p')[2].contents:
# switch array to stars array after seeing its NavigableString el
if type(content) == NavigableString:
if 'Stars' in content:
array = movie_stars_array
else:
# check for ghost span if NOT a NavigableString
if 'class' in content.attrs and\
'ghost' in content.attrs['class']:
continue
# add to array if not a ghost span
array.append(content.text)
except (KeyError, Exception) as e:
logging.error(f'Error parsing directors and stars: {e=}, {type(e)=}')
# parse remaining movie fields
movie_name = None
movie_year = None
movie_genres = None
try:
movie_name = movie_bs4.h3.a.text
movie_year = int(movie_bs4.find(class_='lister-item-year').text[-5:-1])
movie_genres = movie_bs4.find(class_='genre')\
.text.replace('\n', '').replace(' ', '').split(',')
except Exception as e:
logging.error(f'Error parsing movie details: {e=}, {type(e)=}')
cleaned_movie = {
'name': movie_name,
'year': movie_year,
'directors': movie_directors_array,
'stars': movie_stars_array,
'genres': movie_genres
}
return cleaned_movie
def get_fake_top_1000_api_response():
"""Returns fake cleaned movie data"""
fake_top_5_movies = json.loads(
'[{"name": "Jai Bhim", "year": 2021, "directors": ["T.J. Gnanavel"], '
'"stars": ["Suriya", "Lijo Mol Jose", "Manikandan", "Rajisha '
'Vijayan"], "genres": ["Crime", "Drama"]}, {"name": "The Shawshank '
'Redemption", "year": 1994, "directors": ["Frank Darabont"], '
'"stars": ["Tim Robbins", "Morgan Freeman", "Bob Gunton", "William '
'Sadler"], "genres": ["Drama"]}, {"name": "The Godfather", "year": '
'1972, "directors": ["Francis Ford Coppola"], "stars": ["Marlon '
'Brando", "Al Pacino", "James Caan", "Diane Keaton"], "genres": ['
'"Crime", "Drama"]}, {"name": "Soorarai Pottru", "year": 2020, '
'"directors": ["Sudha Kongara"], "stars": ["Suriya", "Paresh Rawal", '
'"Urvashi", "Aparna Balamurali"], "genres": ["Drama"]}, {"name": '
'"The Dark Knight", "year": 2008, "directors": ["Christopher '
'Nolan"], "stars": ["Christian Bale", "Heath Ledger", "Aaron '
'Eckhart", "Michael Caine"], "genres": ["Action", "Crime", '
'"Drama"]}]')
return fake_top_5_movies
def get_movies_data(count=100, start=1):
"""Calls imdb (top 1000) api based on count and offset(start) """
url = "https://www.imdb.com/search/title/?groups=top_1000&sort" \
"=user_rating,desc&count={count}&start={" \
"start}&ref_=adv_nxt".format(count=count, start=start)
response = requests.get(url)
# parse response using bs4
soup = BeautifulSoup(response.content, "html.parser")
movie_list_bs4 = soup.find_all("div", attrs={"lister-item-content"})
# get cleaned movies (list of dictionaries)
cleaned_movies = [get_cleaned_movie(movie) for movie in movie_list_bs4]
return cleaned_movies
def get_top_1000_movies():
"""Returns list of top 1000 imdb movies"""
movies_list = []
count = 100
for start_ix in range(1, 1000, 100):
movies_list += get_movies_data(count, start_ix)
return movies_list
def get_movies_dataframe():
"""returns a dataframe containing the top 1000 imdb movies"""
top_1000_movies = get_top_1000_movies()
movie_df = pd.DataFrame.from_dict(top_1000_movies)
return movie_df
|
python
|
from ...kernel import core
from ...kernel.core import VSkillModifier as V
from ...kernel.core import CharacterModifier as MDF
from ...character import characterKernel as ck
from functools import partial
class AuraWeaponBuilder():
def __init__(self, enhancer, skill_importance, enhance_importance):
self.AuraWeaponBuff = core.BuffSkill(
"오라 웨폰(버프)", 0, (80 +2*enhancer.getV(skill_importance,enhance_importance)) * 1000,
cooltime = 180 * 1000, armor_ignore = 15, pdamage_indep = (enhancer.getV(skill_importance, enhance_importance) // 5)
).isV(enhancer, skill_importance, enhance_importance).wrap(core.BuffSkillWrapper) #두 스킬 syncronize 할 것!
self.AuraWeaponCooltimeDummy = core.BuffSkill("오라웨폰(딜레이 더미)", 0, 5000, cooltime = -1).wrap(core.BuffSkillWrapper) # 한 번 발동된 이후에는 4초간 발동되지 않도록 합니다.
self.target_skill = core.DamageSkill("오라웨폰(파동)", 0, 500 + 20 * enhancer.getV(skill_importance,enhance_importance), 6).wrap(core.DamageSkillWrapper)
self.target_skill.onAfter(self.AuraWeaponCooltimeDummy)
self.optional_skill = core.OptionalElement(lambda : (self.AuraWeaponCooltimeDummy.is_not_active() and self.AuraWeaponBuff.is_active()), self.target_skill)
def add_aura_weapon(self, origin_skill):
origin_skill.onAfter(self.optional_skill)
def get_buff(self):
return self.AuraWeaponBuff, self.AuraWeaponCooltimeDummy
|
python
|
import datetime
import decimal
import functools
from sqlalchemy.ext.associationproxy import _AssociationList
from geojson import dumps as _dumps
from geojson.codec import PyGFPEncoder
class GeoJSONEncoder(PyGFPEncoder):
# SQLAlchemy's Reflecting Tables mechanism uses decimal.Decimal
# for numeric columns and datetime.date for dates. Python json
# doesn't deal with these types. This class provides a simple
# encoder to deal with objects of these types.
def default(self, obj):
if isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):
return obj.isoformat()
if isinstance(obj, _AssociationList):
return list(obj)
if isinstance(obj, decimal.Decimal):
# The decimal is converted to a lossy float
return float(obj)
return PyGFPEncoder.default(self, obj)
dumps = functools.partial(_dumps, cls=GeoJSONEncoder)
"""
A partial function for ``geojson.dumps`` that sets ``cls`` to
:class:`GeoJSONEncoder`.
"""
|
python
|
# Generated by Django 3.1.7 on 2021-03-25 19:48
from django.db import migrations, models
import django.utils.timezone
import phonenumber_field.modelfields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='EmailConfirmation',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('email', models.EmailField(max_length=255, verbose_name='email')),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('short_code', models.CharField(max_length=8, null=True, unique=True, verbose_name='Short code')),
('category', models.CharField(max_length=200, verbose_name='Location category')),
('category_description', models.CharField(max_length=200, null=True, verbose_name='Location category description')),
('name', models.CharField(max_length=200, verbose_name='Location name')),
('address', models.CharField(max_length=200, verbose_name='Address line 1')),
('address_2', models.CharField(max_length=200, verbose_name='Address line 2')),
('city', models.CharField(max_length=100, verbose_name='City')),
('province', models.CharField(max_length=100, verbose_name='Province')),
('postal_code', models.CharField(max_length=10, verbose_name='Postal code')),
('contact_name', models.CharField(max_length=200, null=True, verbose_name='Name of contact')),
('contact_email', models.EmailField(max_length=255, verbose_name='Email address')),
('contact_phone', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Registrant',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('name', models.CharField(max_length=200, verbose_name='Full name')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
|
python
|
import logging
import allure
import pytest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from models.auth import AuthData
from pages.app import Application
logger = logging.getLogger("moodle")
@pytest.fixture(scope="session")
def app(request):
base_url = request.config.getoption("--base-url")
chrome_options = Options()
chrome_options.headless = request.config.getoption("--headless")
fixture = Application(
webdriver.Chrome(
ChromeDriverManager().install(), chrome_options=chrome_options
),
base_url,
)
yield fixture
fixture.quit()
def pytest_addoption(parser):
parser.addoption(
"--base-url",
action="store",
default="https://qacoursemoodle.innopolis.university",
help="enter base_url",
),
parser.addoption(
"--login",
action="store",
default="admi-test",
help="enter userlogin",
),
parser.addoption(
"--password",
action="store",
default="TestQa-1",
help="enter password",
),
parser.addoption(
"--headless",
action="store_true",
default=False,
help="launching browser without gui",
),
@pytest.fixture
def auth(app, request):
username = request.config.getoption("--login")
password = request.config.getoption("--password")
app.open_auth_page()
auth_data = AuthData(login=username, password=password)
app.login.auth(auth_data)
assert app.login.is_auth(), "You are not auth"
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item):
outcome = yield
rep = outcome.get_result()
if rep.when == "call" and rep.failed:
try:
if "app" in item.fixturenames:
web_driver = item.funcargs["app"]
else:
logger.error("Fail to take screen-shot")
return
logger.info("Screen-shot done")
allure.attach(
web_driver.driver.get_screenshot_as_png(),
name="screenshot",
attachment_type=allure.attachment_type.PNG,
)
except Exception as e:
logger.error("Fail to take screen-shot: {}".format(e))
|
python
|
class RouteException(Exception):
"""Raise when there is a general problem in a route"""
def __init__(self, message, status_code=500, data=None, *args):
self.message = message
self.status_code = status_code
self.data = data
super().__init__(message, status_code, data, *args)
class BadRequestException(RouteException):
def __init__(self, message, status_code=400, data=None, *args):
super().__init__(message, status_code, data, *args)
class DuplicatePrimaryKeyException(RouteException):
def __init__(self, id, status_code=409, data=None, *args):
super().__init__('primaryKey already exists: \'%s\'' % id, status_code, data, *args)
class UnauthorizedException(RouteException):
def __init__(self, message, status_code=401, data=None, *args):
super().__init__(message, status_code, data, *args)
class NotFoundException(RouteException):
def __init__(self, message, status_code=404, data=None, *args):
super().__init__(message, status_code, data, *args)
class HasLinksException(RouteException):
def __init__(self, message, status_code=406, data=None, *args):
super().__init__(message, status_code, data, *args)
|
python
|
"""
class to extract domain-specific terms
for examples, 'JavaScript' in CS and 'limit' in math
"""
import logging
logger = logging.getLogger(__name__)
from DST.utils.TermUtil import filterTerm
class DomainTerm(object):
"""
class to extract domain-specific terms
"""
def __init__(self, maxTermsCount=300000, thresholdScore=10.0, termFreqRange=(30, float("inf"))):
"""
:param maxTermsCount: the max number of domain terms
:param thresholdScore: word larger than thresholdScore will be recognized as domain term
:param termFreqRange: tuple-like object(minFreq,maxFreq), if word is not in the range, it will be not considered
as term
"""
self.maxTermsCount = maxTermsCount
self.thresholdScore = thresholdScore
self.termFreqRange = termFreqRange
def extract_term(self, domainSpecificVocab, generalVocab):
"""
extract domain term
:param domainSpecificVocab: all words in domain-corpus, dict object, {"word1":word1Count,"word1":word1Count,...}
:param generalVocab: all words in general-corpus, dict object, {"word1":word1Count,"word1":word1Count,...}
:return: terms of list.[term1,term2,term3 ....]
"""
# get word count in the two vocabulary
domainSpecificVocabCount, generalVocabCount = 0.0, 0.0
for _, v in domainSpecificVocab.items():
domainSpecificVocabCount += v
for _, v in generalVocab.items():
generalVocabCount += v
# extract domain specific terms
candidateTerms = []
for word, freq in domainSpecificVocab.items():
if freq < self.termFreqRange[0] or freq > self.termFreqRange[1] or not filterTerm(word):
continue
if word not in generalVocab:
candidateTerms.append((word, float("inf")))
else:
score = (freq / domainSpecificVocabCount) / (generalVocab[word] / generalVocabCount)
if score > self.thresholdScore:
candidateTerms.append((word, score))
candidateTerms.sort(key=lambda x: x[1], reverse=True)
terms = candidateTerms[0:self.maxTermsCount]
logging.info("extract %d terms in total" % len(terms))
return [term[0] for term in terms]
if __name__ == "__main__":
print(30, 1 < float("inf"))
|
python
|
"""Bare wrapper around IPOPT using numpy arrays and exception handling.
This module has an intermediate level of abstraction in the wrapping. Exception
handling is done in the callbacks and the error is signalled back to IPOPT. The
parameters in the callbacks are converted to ndarrays and the function inputs
are validated more thoroughly. However, the functions and callbacks map almost
directly to the IPOPT c interface.
"""
import functools
import numpy as np
from numpy.ctypeslib import as_array
from . import bare
def default_handler(e):
"""Exception handler for IPOPT ctypes callbacks, prints the traceback."""
import traceback
traceback.print_exc()
class Problem:
def __init__(self, x_bounds, g_bounds, nele_jac, nele_hess, index_style,
f, g, grad_f, jac_g, h=None, *, handler=default_handler):
# Unpack and validate decision variable bounds
x_L, x_U = x_bounds
x_L = np.require(x_L, np.double, ['A', 'C'])
x_U = np.require(x_U, np.double, ['A', 'C'])
n = x_L.size
if x_U.size != n:
raise ValueError("Inconsistent sizes of 'x' lower and upper bounds")
# Unpack and validate constraint bounds
g_L, g_U = g_bounds
g_L = np.require(g_L, np.double, ['A', 'C'])
g_U = np.require(g_U, np.double, ['A', 'C'])
m = g_L.size
if g_U.size != m:
raise ValueError("Inconsistent sizes of 'g' lower and upper bounds")
# Wrap the callbacks
eval_f = wrap_f(f, handler)
eval_g = wrap_g(g, handler)
eval_grad_f = wrap_grad_f(grad_f, handler)
eval_jac_g = wrap_jac_g(jac_g, handler)
eval_h = wrap_h(h, handler) if h is not None else bare.Eval_H_CB()
problem = bare.CreateIpoptProblem(
n, data_ptr(x_L), data_ptr(x_U), m, data_ptr(g_L), data_ptr(g_U),
nele_jac, nele_hess, index_style,
eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h
)
if not problem:
raise RuntimeError('Error creating IPOPT problem')
# Save object data
self._problem = problem
"""Pointer to the underlying `IpoptProblemInfo` structure."""
self.n = n
"""Number of decision variables (length of `x`)."""
self.m = m
"""Number of constraints (length of `g`)."""
self._callbacks = dict(
eval_f=eval_f, eval_g=eval_g, eval_grad_f=eval_grad_f,
eval_jac_g=eval_jac_g, eval_h=eval_h
)
"""Reference to callbacks to ensure they aren't garbage collected."""
# Set options
if h is None:
self.add_str_option('hessian_approximation', 'limited-memory')
def free(self):
if not self._problem:
raise RuntimeError('Problem invalid or already freed')
bare.FreeIpoptProblem(self._problem)
del self._callbacks
self._problem = None
def add_str_option(self, keyword, val):
if not bare.AddIpoptStrOption(self._problem, keyword, val):
raise ValueError(f'invalid option or value')
def add_int_option(self, keyword, val):
if not bare.AddIpoptIntOption(self._problem, keyword, val):
raise ValueError(f'invalid option or value')
def add_num_option(self, keyword, val):
if not bare.AddIpoptNumOption(self._problem, keyword, val):
raise ValueError(f'invalid option or value')
def open_output_file(self, file_name, print_level):
if not bare.OpenIpoptOutputFile(self._problem, file_name, print_level):
raise RuntimeError("error opening output file")
def set_scaling(self, obj_scaling, x_scaling, g_scaling):
x_scaling = np.require(x_scaling, np.double, 'A')
g_scaling = np.require(g_scaling, np.double, 'A')
if x_scaling.shape != (self.n,):
raise ValueError('invalid shape for the x scaling')
if g_scaling.shape != (self.m,):
raise ValueError('invalid shape for the g scaling')
obj_s = float(obj_scaling)
x_s = data_ptr(x_scaling)
g_s = data_ptr(g_scaling)
if not bare.SetIpoptProblemScaling(self._problem, obj_s, x_s, g_s):
raise RuntimeError("error setting problem scaling")
self.add_str_option('nlp_scaling_method', 'user-scaling')
def set_intermediate_callback(self, cb):
intermediate_cb = wrap_intermediate_cb(cb)
if not bare.SetIntermediateCallback(self._problem, intermediate_cb):
raise RuntimeError("error setting problem intermediate callback")
self._callbacks['intermediate_cb'] = intermediate_cb
def solve(self, x, g=None, obj_val=None,
mult_g=None, mult_x_L=None, mult_x_U=None):
exc = (validate_io_array(x, (self.n,), 'x', none_ok=False)
or validate_io_array(g, (self.m,), 'g')
or validate_io_array(obj_val, (), 'obj_val')
or validate_io_array(mult_g, (self.m,), 'mult_g')
or validate_io_array(mult_x_L, (self.n,), 'mult_x_L')
or validate_io_array(mult_x_U, (self.n,), 'mult_x_U'))
if exc:
raise exc
return bare.IpoptSolve(self._problem, data_ptr(x), data_ptr(g),
data_ptr(obj_val), data_ptr(mult_g),
data_ptr(mult_x_L), data_ptr(mult_x_U), None)
def __enter__(self):
if not self._problem:
raise RuntimeError('Invalid context or reentering context.')
return self
def __exit__(self, exc_type, exc_value, traceback):
self.free()
def wrap_f(f, handler=default_handler):
@functools.wraps(f)
@bare.Eval_F_CB
def wrapper(n, x, new_x, obj_value, user_data):
try:
x_array = as_array(x, (n,))
obj_value_array = as_array(obj_value, ())
return f(x_array, new_x, obj_value_array)
except BaseException as e:
if callable(handler):
handler(e)
return 0
return wrapper
def wrap_grad_f(grad_f, handler=default_handler):
@functools.wraps(grad_f)
@bare.Eval_Grad_F_CB
def wrapper(n, x, new_x, grad_ptr, user_data):
try:
x_array = as_array(x, (n,))
grad_f_array = as_array(grad_ptr, (n,))
return grad_f(x_array, new_x, grad_f_array)
except BaseException as e:
if callable(handler):
handler(e)
return 0
return wrapper
def wrap_g(g, handler=default_handler):
@functools.wraps(g)
@bare.Eval_G_CB
def wrapper(n, x, new_x, m, g_ptr, user_data):
try:
x_array = as_array(x, (n,))
g_array = as_array(g_ptr, (m,))
return g(x_array, new_x, g_array)
except BaseException as e:
if callable(handler):
handler(e)
return 0
return wrapper
def wrap_jac_g(jac_g, handler=default_handler):
@functools.wraps(jac_g)
@bare.Eval_Jac_G_CB
def wrapper(n, x, new_x, m, nele_jac, iRow, jCol, values, user_data):
try:
x_array = as_array(x, (n,)) if x else None
i_array = as_array(iRow, (nele_jac,)) if iRow else None
j_array = as_array(jCol, (nele_jac,)) if jCol else None
values_array = as_array(values, (nele_jac,)) if values else None
return jac_g(x_array, new_x, i_array, j_array, values_array)
except BaseException as e:
if callable(handler):
handler(e)
return 0
return wrapper
def wrap_h(h, handler=default_handler):
@functools.wraps(h)
@bare.Eval_H_CB
def wrapper(n, x, new_x, obj_factor, m, mult, new_mult, nele_hess,
iRow, jCol, values, user_data):
try:
x_array = as_array(x, (n,)) if x else None
mult_array = as_array(mult, (m,)) if mult else None
i_array = as_array(iRow, (nele_hess,)) if iRow else None
j_array = as_array(jCol, (nele_hess,)) if jCol else None
values_array = as_array(values, (nele_hess,)) if values else None
return h(x_array, new_x, obj_factor, mult_array, new_mult,
i_array, j_array, values_array)
except BaseException as e:
if callable(handler):
handler(e)
return 0
return wrapper
def wrap_intermediate_cb(cb, handler=default_handler):
@functools.wraps(cb)
@bare.Intermediate_CB
def wrapper(alg_mod, iter_count, obj_value, inf_pr, inf_du, mu, d_norm,
regularization_size, alpha_du, alpha_pr, ls_trials, user_data):
try:
return cb(alg_mod, iter_count, obj_value, inf_pr, inf_du, mu,
d_norm, regularization_size, alpha_du, alpha_pr,ls_trials)
except BaseException as e:
if callable(handler):
handler(e)
return 0
return wrapper
def validate_io_array(a, shape, name, none_ok=True):
if none_ok and a is None:
return
if none_ok and not isinstance(a, np.ndarray):
return TypeError(f'{name} must be a numpy ndarray instance or None')
if not none_ok and not isinstance(a, np.ndarray):
return TypeError(f'{name} must be a numpy ndarray instance')
if a.dtype != np.double:
return TypeError(f'{name} must be an array of doubles')
if not (a.flags['A'] and a.flags['W']):
return ValueError(f'{name} must be an aligned writeable array')
if a.shape != shape:
return ValueError(f'invalid shape for {name}')
def data_ptr(arr):
if arr is None:
return arr
assert isinstance(arr, np.ndarray)
assert arr.dtype == np.double
return arr.ctypes.data_as(bare.c_double_p) if arr.size else None
|
python
|
import omemo
from omemo_backend_signal import BACKEND as SignalBackend
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "tests")))
from deletingotpkpolicy import DeletingOTPKPolicy
from dr_chat import mainLoop
import example_data
try:
input = raw_input
except NameError:
pass
X3DHDoubleRatchet = omemo.make_X3DHDoubleRatchet(SignalBackend)
def main(who, use_wireformat = False):
alice_state = X3DHDoubleRatchet()
bob_state = X3DHDoubleRatchet()
alice_public_bundle = alice_state.getPublicBundle()
bob_public_bundle = bob_state.getPublicBundle()
if use_wireformat:
# Ask for the initial message to send
initial_message = input("Initial message: ")
if who == "a":
# Prepare the session init data and the DoubleRatchet from the active part
session_init_data = alice_state.getSharedSecretActive(bob_public_bundle)
alice_dr = session_init_data["dr"]
session_init_data = session_init_data["to_other"]
if use_wireformat:
# Encrypt the initial message
initial_message_encrypted = alice_dr.encryptMessage(
initial_message.encode("UTF-8")
)
# Prepare the message
initial_message_serialized = SignalBackend.WireFormat.messageToWire(
initial_message_encrypted["ciphertext"],
initial_message_encrypted["header"],
{
"DoubleRatchet": initial_message_encrypted["additional"]
}
)
# Bundle the session init data and the initial message into a pre_key packet
pre_key_message_serialized = SignalBackend.WireFormat.preKeyMessageToWire(
session_init_data,
initial_message_serialized,
{
"DoubleRatchet": initial_message_encrypted["additional"]
}
)
# Send to the receiver...
# Unpack the session init data into the initial message
pre_key_message = SignalBackend.WireFormat.preKeyMessageFromWire(
pre_key_message_serialized
)
initial_message_serialized = pre_key_message["message"]
# Unpack the contained message
initial_message_encrypted = SignalBackend.WireFormat.messageFromWire(
initial_message_serialized
)
# Create the session for the passive part
bob_dr = bob_state.getSharedSecretPassive(
pre_key_message["session_init_data"],
example_data.ALICE_BARE_JID,
example_data.ALICE_DEVICE_ID,
DeletingOTPKPolicy
)
# Decrypt the initial message
initial_message_plaintext = bob_dr.decryptMessage(
initial_message_encrypted["ciphertext"],
initial_message_encrypted["header"]
)
# Check the authentication
SignalBackend.WireFormat.finalizeMessageFromWire(
initial_message_serialized,
{
"DoubleRatchet": initial_message_plaintext["additional"],
"WireFormat": initial_message_encrypted["additional"]
}
)
initial_message_plaintext = initial_message_plaintext["plaintext"]
initial_message_plaintext = initial_message_plaintext.decode("UTF-8")
else:
# Otherwise, just initialize the passive session directly
bob_dr = bob_state.getSharedSecretPassive(
session_init_data,
example_data.ALICE_BARE_JID,
example_data.ALICE_DEVICE_ID,
DeletingOTPKPolicy
)
if who == "b":
session_init_data = bob_state.getSharedSecretActive(alice_public_bundle)
bob_dr = session_init_data["dr"]
session_init_data = session_init_data["to_other"]
if use_wireformat:
# Encrypt the initial message
initial_message_encrypted = bob_dr.encryptMessage(
initial_message.encode("UTF-8")
)
# Prepare the message
initial_message_serialized = SignalBackend.WireFormat.messageToWire(
initial_message_encrypted["ciphertext"],
initial_message_encrypted["header"],
{
"DoubleRatchet": initial_message_encrypted["additional"]
}
)
# Bundle the session init data and the initial message into a pre_key packet
pre_key_message_serialized = SignalBackend.WireFormat.preKeyMessageToWire(
session_init_data,
initial_message_serialized,
{
"DoubleRatchet": initial_message_encrypted["additional"]
}
)
# Send to the receiver...
# Unpack the session init data into the initial message
pre_key_message = SignalBackend.WireFormat.preKeyMessageFromWire(
pre_key_message_serialized
)
initial_message_serialized = pre_key_message["message"]
# Unpack the contained message
initial_message_encrypted = SignalBackend.WireFormat.messageFromWire(
initial_message_serialized
)
# Create the session for the passive part
alice_dr = alice_state.getSharedSecretPassive(
pre_key_message["session_init_data"],
example_data.BOB_BARE_JID,
example_data.BOB_DEVICE_ID,
DeletingOTPKPolicy
)
# Decrypt the initial message
initial_message_plaintext = alice_dr.decryptMessage(
initial_message_encrypted["ciphertext"],
initial_message_encrypted["header"]
)
# Check the authentication
SignalBackend.WireFormat.finalizeMessageFromWire(
initial_message_serialized,
{
"DoubleRatchet": initial_message_plaintext["additional"],
"WireFormat": initial_message_encrypted["additional"]
}
)
initial_message_plaintext = initial_message_plaintext["plaintext"]
initial_message_plaintext = initial_message_plaintext.decode("UTF-8")
else:
# Otherwise, just initialize the passive session directly
alice_dr = alice_state.getSharedSecretPassive(
session_init_data,
example_data.BOB_BARE_JID,
example_data.BOB_DEVICE_ID,
DeletingOTPKPolicy
)
if use_wireformat:
print("Initial message received: " + initial_message_plaintext)
mainLoop(alice_dr, bob_dr, use_wireformat)
if __name__ == "__main__":
if len(sys.argv) < 3:
while True:
who = input("Who should actively initialize the session? (a or b): ")
if who in ["a", "b"]:
break
while True:
use_wireformat = input("Use the wireformat? (y or n): ")
if use_wireformat in ["y", "n"]:
break
else:
who = sys.argv[1]
use_wireformat = sys.argv[2]
main(who, use_wireformat == "y")
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. William H. Green ([email protected]), #
# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import distutils.spawn
import logging
import os
import re
import shutil
import tempfile
from subprocess import Popen, PIPE
import cclib
from rmgpy.exceptions import DependencyError
from rmgpy.molecule.molecule import Molecule
from rmgpy.qm.molecule import QMMolecule
class Mopac(object):
"""
A base class for all QM calculations that use MOPAC.
Classes such as :class:`MopacMol` will inherit from this class.
"""
inputFileExtension = '.mop'
outputFileExtension = '.out'
executablesToTry = ('MOPAC2016.exe', 'MOPAC2012.exe', 'MOPAC2009.exe', 'mopac')
for exe in executablesToTry:
try:
executablePath = distutils.spawn.find_executable(exe)
except:
executablePath = None
if executablePath is not None:
break
else: # didn't break
logging.debug("Did not find MOPAC on path, checking if it exists in a declared MOPAC_DIR...")
mopacEnv = os.getenv('MOPAC_DIR', default="/opt/mopac")
for exe in executablesToTry:
executablePath = os.path.join(mopacEnv, exe)
if os.path.exists(executablePath):
break
else: # didn't break
executablePath = os.path.join(mopacEnv, '(MOPAC 2009 or 2012 or 2016)')
usePolar = False # use polar keyword in MOPAC
"Keywords for the multiplicity"
multiplicityKeywords = {
1: '',
2: 'uhf doublet',
3: 'uhf triplet',
4: 'uhf quartet',
5: 'uhf quintet',
6: 'uhf sextet',
7: 'uhf septet',
8: 'uhf octet',
9: 'uhf nonet',
}
#: List of phrases that indicate failure
#: NONE of these must be present in a succesful job.
failureKeys = [
'IMAGINARY FREQUENCIES',
'EXCESS NUMBER OF OPTIMIZATION CYCLES',
'NOT ENOUGH TIME FOR ANOTHER CYCLE',
]
#: List of phrases to indicate success.
#: ALL of these must be present in a successful job.
successKeys = [
'DESCRIPTION OF VIBRATIONS',
'MOPAC DONE'
]
def test_ready(self):
if not os.path.exists(self.executablePath):
raise DependencyError("Couldn't find MOPAC executable at {0}. Try setting your MOPAC_DIR "
"environment variable.".format(self.executablePath))
# Check if MOPAC executable works properly
process = Popen(self.executablePath,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
stdout, stderr = process.communicate()
self.expired = False
stderr = stderr.decode('utf-8')
if 'has expired' in stderr:
# The MOPAC executable is expired
logging.warning('\n'.join(stderr.split('\n')[2:7]))
self.expired = True
elif 'To install the MOPAC license' in stderr:
# The MOPAC executable exists, but the license has not been installed
raise DependencyError('\n'.join(stderr.split('\n')[0:9]))
elif 'MOPAC_LICENSE' in stderr:
# The MOPAC executable is in the wrong location on Windows; MOPAC_LICENSE must be set
raise DependencyError('\n'.join(stderr.split('\n')[0:11]))
def run(self):
self.test_ready()
# submits the input file to mopac
dirpath = tempfile.mkdtemp()
# copy input file to temp dir:
tempInpFile = os.path.join(dirpath, os.path.basename(self.input_file_path))
shutil.copy(self.input_file_path, dirpath)
process = Popen([self.executablePath, tempInpFile], stdin=PIPE, stdout=PIPE, stderr=PIPE)
command = b'\n' if self.expired else None # press enter to pass expiration notice
stdout, stderr = process.communicate(input=command) # necessary to wait for executable termination!
if b"ended normally" not in stderr.strip():
logging.warning("Mopac error message:" + stderr.decode('utf-8'))
# copy output file from temp dir to output dir:
tempOutFile = os.path.join(dirpath, os.path.basename(self.output_file_path))
shutil.copy(tempOutFile, self.output_file_path)
# delete temp folder:
shutil.rmtree(dirpath)
return self.verify_output_file()
def verify_output_file(self):
"""
Check's that an output file exists and was successful.
Returns a boolean flag that states whether a successful MOPAC simulation already exists for the molecule with the
given (augmented) InChI Key.
The definition of finding a successful simulation is based on these criteria:
1) finding an output file with the file name equal to the InChI Key
2) NOT finding any of the keywords that are denote a calculation failure
3) finding all the keywords that denote a calculation success.
4) finding a match between the InChI of the given molecule and the InchI found in the calculation files
5) checking that the optimized geometry, when connected by single bonds, is isomorphic with self.molecule (converted to single bonds)
If any of the above criteria is not matched, False will be returned.
If all succeed, then it will return True.
"""
if not os.path.exists(self.output_file_path):
logging.debug("Output file {0} does not (yet) exist.".format(self.output_file_path))
return False
inchi_found = False # flag (1 or 0) indicating whether an InChI was found in the log file
# Initialize dictionary with "False"s
success_keys_found = dict([(key, False) for key in self.successKeys])
with open(self.output_file_path) as outputFile:
for line in outputFile:
line = line.strip()
for element in self.failureKeys: # search for failure keywords
if element in line:
logging.error("MOPAC output file contains the following error: {0}".format(element))
return False
for element in self.successKeys: # search for success keywords
if element in line:
success_keys_found[element] = True
if "InChI=" in line:
log_file_inchi = line # output files should take up to 240 characters of the name in the input file
inchi_found = True
if self.unique_id_long in log_file_inchi:
pass
elif self.unique_id_long.startswith(log_file_inchi):
logging.info("InChI too long to check, but beginning matches so assuming OK.")
else:
logging.warning("InChI in log file ({0}) didn't match that in geometry "
"({1}).".format(log_file_inchi, self.unique_id_long))
# Use only up to first 80 characters to match due to MOPAC bug which deletes 81st character of InChI string
if self.unique_id_long.startswith(log_file_inchi[:80]):
logging.warning("but the beginning matches so it's probably just a truncation problem.")
# Check that ALL 'success' keywords were found in the file.
if not all(success_keys_found.values()):
logging.error('Not all of the required keywords for success were found in the output file!')
return False
if not inchi_found:
logging.error("No InChI was found in the MOPAC output file {0}".format(self.output_file_path))
return False
# Compare the optimized geometry to the original molecule
qm_data = self.parse()
cclib_mol = Molecule()
cclib_mol.from_xyz(qm_data.atomicNumbers, qm_data.atomCoords.value)
test_mol = self.molecule.to_single_bonds()
if not cclib_mol.is_isomorphic(test_mol):
logging.info("Incorrect connectivity for optimized geometry in file {0}".format(self.output_file_path))
return False
logging.info("Successful {1} quantum result in {0}".format(self.output_file_path, self.__class__.__name__))
return True
def get_parser(self, output_file):
"""
Returns the appropriate cclib parser.
"""
return cclib.parser.Mopac(output_file)
class MopacMol(QMMolecule, Mopac):
"""
A base Class for calculations of molecules using MOPAC.
Inherits from both :class:`QMMolecule` and :class:`Mopac`.
"""
#: Keywords that will be added at the top and bottom of the qm input file
keywords = [
{'top': "precise nosym THREADS=1", 'bottom': "oldgeo thermo nosym precise THREADS=1 "},
{'top': "precise nosym gnorm=0.0 nonr THREADS=1", 'bottom': "oldgeo thermo nosym precise THREADS=1 "},
{'top': "precise nosym gnorm=0.0 THREADS=1", 'bottom': "oldgeo thermo nosym precise THREADS=1 "},
{'top': "precise nosym gnorm=0.0 bfgs THREADS=1", 'bottom': "oldgeo thermo nosym precise THREADS=1 "},
{'top': "precise nosym recalc=10 dmax=0.10 nonr cycles=2000 t=2000 THREADS=1", 'bottom': "oldgeo thermo nosym precise THREADS=1 "},
]
def write_input_file(self, attempt):
"""
Using the :class:`Geometry` object, write the input file
for the `attempt`.
"""
molfile = self.get_mol_file_path_for_calculation(attempt)
atomline = re.compile('\s*([\- ][0-9.]+)\s+([\- ][0-9.]+)+\s+([\- ][0-9.]+)\s+([A-Za-z]+)')
output = [self.geometry.unique_id_long, '']
atom_count = 0
with open(molfile) as molinput:
for line in molinput:
match = atomline.match(line)
if match:
output.append("{0:4s} {1} 1 {2} 1 {3} 1".format(match.group(4), match.group(1),
match.group(2), match.group(3)))
atom_count += 1
assert atom_count == len(self.molecule.atoms)
output.append('')
input_string = '\n'.join(output)
top_keys, bottom_keys, polar_keys = self.input_file_keywords(attempt)
with open(self.input_file_path, 'w') as mopac_file:
mopac_file.write(top_keys)
mopac_file.write('\n')
mopac_file.write(input_string)
mopac_file.write('\n')
mopac_file.write(bottom_keys)
if self.usePolar:
mopac_file.write('\n\n\n')
mopac_file.write(polar_keys)
def input_file_keywords(self, attempt):
"""
Return the top, bottom, and polar keywords.
"""
raise NotImplementedError("Should be defined by subclass, eg. MopacMolPM3")
def generate_qm_data(self):
"""
Calculate the QM data and return a QMData object, or None if it fails.
"""
for atom in self.molecule.vertices:
if atom.charge != 0:
return None
if self.verify_output_file():
logging.info("Found a successful output file already; using that.")
source = "QM {0} calculation found from previous run.".format(self.__class__.__name__)
else:
self.create_geometry()
success = False
for attempt in range(1, self.max_attempts + 1):
self.write_input_file(attempt)
logging.info('Trying {3} attempt {0} of {1} on molecule {2}.'.format(attempt, self.max_attempts,
self.molecule.to_smiles(),
self.__class__.__name__))
success = self.run()
if success:
logging.info('Attempt {0} of {1} on species {2} succeeded.'.format(attempt, self.max_attempts,
self.molecule.to_augmented_inchi()))
source = "QM {0} calculation attempt {1}".format(self.__class__.__name__, attempt)
break
else:
logging.error('QM thermo calculation failed for {0}.'.format(self.molecule.to_augmented_inchi()))
return None
result = self.parse() # parsed in cclib
result.source = source
return result
class MopacMolPMn(MopacMol):
"""
Mopac PMn calculations for molecules (n undefined here)
This is a parent class for MOPAC PMn calculations.
Inherit it, and define the pm_method, then redefine
anything you wish to do differently.
"""
pm_method = '(should be defined by sub class)'
def input_file_keywords(self, attempt):
"""
Return the top, bottom, and polar keywords for attempt number `attempt`.
NB. `attempt` begins at 1, not 0.
"""
assert attempt <= self.max_attempts
if attempt > self.script_attempts:
attempt -= self.script_attempts
multiplicity_keys = self.multiplicityKeywords[self.geometry.molecule.multiplicity]
top_keys = "{method} {mult} {top}".format(
method=self.pm_method,
mult=multiplicity_keys,
top=self.keywords[attempt - 1]['top'],
)
bottom_keys = "{bottom} {method} {mult}".format(
method=self.pm_method,
bottom=self.keywords[attempt - 1]['bottom'],
mult=multiplicity_keys,
)
polar_keys = "oldgeo {polar} nosym precise {method} {mult}".format(
method=self.pm_method,
polar=('polar' if self.geometry.molecule.multiplicity == 1 else 'static'),
mult=multiplicity_keys,
)
return top_keys, bottom_keys, polar_keys
class MopacMolPM3(MopacMolPMn):
"""
Mopac PM3 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's the same as all the MOPAC PMn calculations, only pm3
"""
pm_method = 'pm3'
class MopacMolPM6(MopacMolPMn):
"""
Mopac PM6 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's the same as all the MOPAC PMn calculations, only pm6
"""
pm_method = 'pm6'
class MopacMolPM7(MopacMolPMn):
"""
Mopac PM7 calculations for molecules
This is a class of its own in case you wish to do anything differently,
but for now it's the same as all the MOPAC PMn calculations, only pm7
"""
pm_method = 'pm7'
|
python
|
import os
import numpy as np
from tqdm import tqdm
from stable_baselines import PPO2
from navrep.envs.navreptrainencodedenv import NavRepTrainEncodedEnv
from crowd_sim.envs.utils.info import Timeout, ReachGoal, Danger, Collision, CollisionOtherAgent
from navrep.tools.commonargs import parse_common_args
class NavRepCPolicy(object):
""" wrapper for gym policies """
def __init__(self, model=None):
if model is not None:
self.model = model
else:
self.model_path = os.path.expanduser(
"~/navrep/models/gym/navreptrainencodedenv_latest_PPO_ckpt")
self.model = PPO2.load(self.model_path)
print("Model '{}' loaded".format(self.model_path))
def act(self, obs):
action, _states = self.model.predict(obs, deterministic=True)
return action
def run_test_episodes(env, policy, render=False, print_failure=True, print_success=True, num_episodes=500):
success_times = []
success_cases = []
collision_times = []
collision_other_agent_times = []
timeout_times = []
success = 0
collision = 0
collision_other_agent = 0
total_reward = 0
timeout = 0
too_close = 0
min_dist = []
collision_cases = []
collision_other_agent_cases = []
timeout_cases = []
progress_bar = tqdm(range(num_episodes), total=num_episodes)
for i in progress_bar:
progress_bar.set_description("Case {}".format(i))
ob = env.reset()
done = False
env_time = 0
while not done:
action = policy.act(ob)
ob, reward, done, info = env.step(action)
event = info['event']
total_reward += reward
if render:
env.render('human') # robocentric=True, save_to_file=True)
env_time += env._get_dt()
if isinstance(event, Danger):
too_close += 1
min_dist.append(event.min_dist)
if isinstance(event, ReachGoal):
success += 1
success_times.append(env_time)
success_cases.append(i)
elif isinstance(event, Collision):
collision += 1
collision_cases.append(i)
collision_times.append(env_time)
elif isinstance(event, CollisionOtherAgent):
collision_other_agent += 1
collision_other_agent_cases.append(i)
collision_other_agent_times.append(env_time)
elif isinstance(event, Timeout):
timeout += 1
timeout_cases.append(i)
timeout_times.append(env_time)
else:
raise ValueError('Invalid end signal from environment')
success_rate = success / float(num_episodes) * 100
timeout_rate = timeout / float(num_episodes) * 100
collision_rate = collision / float(num_episodes) * 100
collision_other_agent_rate = collision_other_agent / \
float(num_episodes) * 100
assert success + collision + timeout + collision_other_agent == num_episodes
avg_nav_time = sum(success_times) / float(len(success_times)
) if success_times else np.nan
print(
"""has success rate: {:.2f}%, collision rate: {:.2f}%, timeout_rate: {:.2f}%,
collision from other agents rate: {:.2f}%, nav time: {:.2f}, total reward: {:.4f}""".format(
success_rate,
collision_rate,
timeout_rate,
collision_other_agent_rate,
avg_nav_time,
total_reward / float(num_episodes)
)
)
total_time = sum(success_times + collision_times + collision_other_agent_times + timeout_times)
print(
'Frequency of being in danger: {:.2f} and average min separate distance in danger: {:.2f}'.format(
too_close / float(total_time),
np.mean(min_dist)
)
)
if print_success:
print('Success cases: ' + ' '.join([str(x) for x in success_cases]))
if print_failure:
print('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))
print('Collision from other agent cases: ' + ' '.join([str(x) for x in collision_other_agent_cases]))
print('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))
return success_rate, avg_nav_time
if __name__ == '__main__':
args, _ = parse_common_args()
if args.environment is None or args.environment == "navreptrain":
env = NavRepTrainEncodedEnv(args.backend, args.encoding, silent=True, scenario='test')
policy = NavRepCPolicy()
else:
raise NotImplementedError
run_test_episodes(env, policy, render=args.render)
|
python
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class DbusGlib(AutotoolsPackage):
"""dbus-glib package provides GLib interface for D-Bus API."""
homepage = "https://dbus.freedesktop.org"
url = "https://dbus.freedesktop.org/releases/dbus-glib/dbus-glib-0.110.tar.gz"
version('0.110', sha256='7ce4760cf66c69148f6bd6c92feaabb8812dee30846b24cd0f7395c436d7e825')
depends_on('pkgconfig', type='build')
depends_on('expat')
depends_on('glib')
depends_on('dbus')
|
python
|
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
class Meeting(models.Model):
title = models.CharField(
max_length=256,
verbose_name=_('Title'),
help_text=_('Title of the meeting')
)
slug = models.SlugField(
max_length=32,
verbose_name=_('Slug'),
help_text=_('Slug for the URL of the meeting')
)
registration_message = models.TextField(
blank=True, null=True,
verbose_name=_('Registration message'),
help_text=_('Message on registration page, you can use Markdown here.')
)
registration_done_message = models.TextField(
blank=True, null=True,
verbose_name=_('Registration done message'),
help_text=_('Message on the page displayed after registration, you can use Markdown here.')
)
participants_message = models.TextField(
blank=True, null=True,
verbose_name=_('Participants message'),
help_text=_('Message on participants page, you can use Markdown here.')
)
contributions_message = models.TextField(
blank=True, null=True,
verbose_name=_('Contributions message'),
help_text=_('Message on contributions page, you can use Markdown here.')
)
registration_open = models.BooleanField(
default=False,
verbose_name=_('Registration open'),
help_text=_('Designates whether the registration page is publicly accessible.')
)
participants_open = models.BooleanField(
default=False,
verbose_name=_('Participants list open'),
help_text=_('Designates whether the participants page is publicly accessible.')
)
contributions_open = models.BooleanField(
default=False,
verbose_name=_('Contributions list open'),
help_text=_('Designates whether the contributions page is publicly accessible.')
)
class Meta:
ordering = ('title', )
verbose_name = _('Meeting')
verbose_name_plural = _('Meetings')
def __str__(self):
return self.title
class Participant(models.Model):
STATUS_ORGANIZER = 'ORGANIZER'
STATUS_DISCUSSION_LEADER = 'DISCUSSION_LEADER'
STATUS_INVITED = 'INVITED'
STATUS_REGISTERED = 'REGISTERED'
STATUS_ACCEPTED = 'ACCEPTED'
STATUS_REJECTED = 'REJECTED'
STATUS_CANCELED = 'CANCELED'
STATUS_CHOICES = (
(STATUS_ORGANIZER, _('organizer')),
(STATUS_DISCUSSION_LEADER, _('discussion leader')),
(STATUS_INVITED, _('invited')),
(STATUS_REGISTERED, _('registered')),
(STATUS_ACCEPTED, _('accepted')),
(STATUS_REJECTED, _('rejected')),
(STATUS_CANCELED, _('canceled')),
)
meeting = models.ForeignKey(
Meeting, related_name='participants', on_delete=models.CASCADE,
verbose_name=_('Meeting'),
help_text=_('Meeting this participant has registered for'),
)
first_name = models.CharField(
max_length=256,
verbose_name=_('First name'),
)
last_name = models.CharField(
max_length=256,
verbose_name=_('Last name'),
)
email = models.EmailField(
max_length=256,
verbose_name=_('Email'),
)
details = JSONField(
null=True, blank=True, default={},
verbose_name=_('Details'),
help_text=_('Choices are given by settings.MEETINGS_PARTICIPANT_DETAIL_KEYS')
)
registered = models.DateTimeField(
verbose_name=_('Registered on'),
help_text=_('Datetime this participant has submitted his/her registration')
)
status = models.CharField(
max_length=32, choices=STATUS_CHOICES,
verbose_name=_('Status'),
help_text=_('Status of the participant.')
)
payment = models.CharField(
max_length=32, blank=True,
verbose_name=_('Payment'),
help_text=_('Type of payment for the participant.')
)
payment_complete = models.BooleanField(
default=False,
verbose_name=_('Payment complete'),
help_text=_('Designates whether the payment is completed.')
)
class Meta:
ordering = ('meeting', 'last_name', 'first_name')
verbose_name = _('Participant')
verbose_name_plural = _('Participants')
def __str__(self):
return '%s (%s)' % (self.full_name, self.meeting)
@property
def full_name(self):
return '%s %s' % (self.first_name, self.last_name)
@property
def as_text(self):
values = [
(_('Name'), self.full_name),
(_('Email'), self.email)
]
for detail_key in settings.MEETINGS_PARTICIPANT_DETAIL_KEYS:
if self.details[detail_key['key']]:
values.append((detail_key['label'], self.details[detail_key['key']]))
return '\n' + ''.join(['%s: %s\n' % value for value in values])
def get_payment_display(self):
try:
return dict(settings.MEETINGS_PAYMENT_CHOICES)[self.payment]
except KeyError:
return ''
class Contribution(models.Model):
participant = models.ForeignKey(
Participant, related_name='contributions', on_delete=models.CASCADE,
verbose_name=_('Participant'),
help_text=_('Participant who submitted this contribution')
)
title = models.CharField(
max_length=256,
verbose_name=_('Title')
)
abstract = models.TextField(
verbose_name=_('Abstract')
)
contribution_type = models.CharField(
max_length=8, blank=True,
verbose_name=_('Contribution type'),
help_text=_('Choices are given by settings.MEETINGS_CONTRIBUTION_TYPES')
)
accepted = models.BooleanField(
default=False,
verbose_name=_('Accepted'),
help_text=_('Designates whether the contribution is accepted.')
)
class Meta:
ordering = ('participant', 'title')
verbose_name = _('Contribution')
verbose_name_plural = _('Contributions')
def __str__(self):
return self.title
@property
def as_text(self):
values = [
(_('Type'), dict(settings.MEETINGS_CONTRIBUTION_TYPES)[self.contribution_type]),
(_('Title'), self.title),
(_('Abstract'), self.abstract)
]
return '\n' + ''.join(['%s: %s\n' % value for value in values])
def get_contribution_type_display(self):
try:
return dict(settings.MEETINGS_CONTRIBUTION_TYPES)[self.contribution_type]
except KeyError:
return ''
|
python
|
"""Test the sum_odd_numbers function."""
import pytest
def test_nth_even():
"""test the nth_even function."""
from get_nth_number import nth_even
test_value = nth_even(100)
assert test_value == 198
|
python
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, Yung-Yu Chen <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Basic code for :py:mod:`solvcon.case`.
"""
from . import gendata
from . import anchor
from . import hook
class _ArrangementRegistry(gendata.SingleAssignDict, gendata.AttributeDict):
"""
Arrangement registry class. An "arrangement" is a callable that returns a
:py:class:`MeshCase` object.
"""
def __setitem__(self, key, value):
"""
>>> regy = _ArrangementRegistry()
>>> # assigning a key to a function is OK.
>>> regy['func1'] = lambda a: a
>>> # assigning a key to anything else isn't allowed.
>>> regy['func2'] = None # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: None should be a callable, but a <... 'NoneType'> is got.
"""
if not callable(value):
raise ValueError("%s should be a callable, but a %s is got." % (
str(value), str(type(value))))
super(_ArrangementRegistry, self).__setitem__(key, value)
arrangements = _ArrangementRegistry() # overall registry singleton.
class CaseInfoMeta(type):
"""
Meta class for case information class.
"""
def __new__(cls, name, bases, namespace):
newcls = super(CaseInfoMeta, cls).__new__(cls, name, bases, namespace)
# incremental modification of defdict.
defdict = {}
for base in bases:
defdict.update(getattr(base, 'defdict', {}))
defdict.update(newcls.defdict)
newcls.defdict = defdict
# create different simulation registry objects for case classes.
newcls.arrangements = _ArrangementRegistry()
return newcls
class CaseInfo(dict, metaclass=CaseInfoMeta):
"""
Generic case information abstract class. It's the base class that all case
information classes should subclass, to form hierarchical information
object.
"""
defdict = {}
def __getattr__(self, name):
"""
Consult self dictionary for attribute. It's a shorthand.
"""
if name == '__setstate__':
raise AttributeError
return self[name]
def __setattr__(self, name, value):
"""
Save to self dictionary first, then self object table.
@note: This method is overriden as a stupid-preventer. It makes
attribute setting consistent with attribute getting.
"""
if name in self:
self[name] = value
else:
super(CaseInfo, self).__setattr__(name, value)
def _set_through(self, key, val):
"""
Set to self with the dot-separated key.
"""
tokens = key.split('.', 1)
fkey = tokens[0]
if len(tokens) == 2:
self[fkey]._set_through(tokens[1], val)
else:
self[fkey] = val
def __init__(self, _defdict=None, *args, **kw):
"""
Assign default values to self after initiated.
@keyword _defdict: customized defdict; internal use only.
@type _defdict: dict
"""
super(CaseInfo, self).__init__(*args, **kw)
# customize defdict.
if _defdict is None:
defdict = self.defdict
else:
defdict = dict(self.defdict)
defdict.update(_defdict)
# parse first hierarchy to form key groups.
keygrp = dict()
for key in defdict.keys():
if key is None or key == '':
continue
tokens = key.split('.', 1)
if len(tokens) == 2:
fkey, rkey = tokens
keygrp.setdefault(fkey, dict())[rkey] = defdict[key]
else:
fkey = tokens[0]
keygrp[fkey] = defdict[fkey]
# set up first layer keys recursively.
for fkey in keygrp.keys():
data = keygrp[fkey]
if isinstance(data, dict):
self[fkey] = CaseInfo(_defdict=data)
elif isinstance(data, type):
try:
self[fkey] = data()
except TypeError:
self[fkey] = data
else:
self[fkey] = data
class HookList(list):
"""
Hook container and invoker.
@ivar cse: case object.
@itype cse: solvcon.case.BaseCase
"""
def __init__(self, cse, *args, **kw):
self.cse = cse
super(HookList, self).__init__(*args, **kw)
def append(self, obj, **kw):
"""
The object to be appended (the first and only argument) should be a
Hook object, but this method actually accept either a Hook type or an
Anchor type. The method will automatically create the necessary Hook
object when detect acceptable type object passed as the first argument.
All the keywords go to the creation of the Hook object if the first
argument is a type. If the first argument is an instantiated Hook
object, the method accepts no keywords.
@param obj: the hook object to be appended.
@type obj: solvcon.hook.Hook
"""
if isinstance(obj, type):
if issubclass(obj, (anchor.MeshAnchor, anchor.Anchor)):
kw['ankcls'] = obj
obj = hook.Hook
obj = obj(self.cse, **kw)
else:
assert len(kw) == 0
super(HookList, self).append(obj)
def __call__(self, method):
"""
Invoke the specified method for each hook object.
@param method: name of the method to run.
@type method: str
"""
runhooks = self
if method == 'postloop':
runhooks = reversed(runhooks)
for hook in runhooks:
getattr(hook, method)()
def drop_anchor(self, svr):
for hok in self:
hok.drop_anchor(svr)
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
|
python
|
'''
HecTime and writing Irregular time-series safely
'''
import sys
from pydsstools.heclib.dss.HecDss import Open
from pydsstools.core import TimeSeriesContainer
from pydsstools.heclib.utils import HecTime, DssLastError
dss_file = "example.dss"
fid = Open(dss_file)
def decor(func):
def f(*arg,**kwargs):
try:
result = func(*arg,**kwargs)
except:
print(sys.exc_info()[1])
print(func.__name__ + ' !!!!!Failed!!!!!\n')
else:
print(func.__name__ + ' *****Passed******\n')
return result
return f
@decor
def test1():
# Pass but dss "warning" level error
fid.read_ts('/a/b/c//e/f/')
err = DssLastError()
print("DSS Warning = " + err.errorMessage)
@decor
def test2():
# Pass
t = HecTime('02JAN2019 00:00',granularity=60)
print('dss times value = %r'%t.datetimeValue)
@decor
def test3():
# Fail
# time value overflows due to seconds granularity
t = HecTime('02JAN2019 00:00:10',granularity=1)
print('dss times value = %r'%t.datetimeValue)
@decor
def test4():
# Pass
# Minute granularity does not overflow here (but minute presicion)
t = HecTime('02JAN2019 00:00:10',granularity=60)
print('dss times value = %r'%t.datetimeValue)
@decor
def test5():
# Pass
# Second granularity does flow without larger julianBaseDate
pathname ="/IRREGULAR/TIMESERIES/PARAM//IR-DECADE/Ex14_Test5/"
t = HecTime('02JAN2019 00:10',granularity=1, julianBaseDate='01JAN2000')
print('dss times value = %r'%t.datetimeValue)
@decor
def test6():
# Fail
# Large date variation and seconds granularity although
# prevent_overflow = True tries to prevent overflow by setting smallest date (01JAN2019) as julianBaseDate
pathname ="/IRREGULAR/TIMESERIES/PARAM//IR-DAY/Ex14_Test6/"
T = ['01JAN2019 01:00','01JAN2455 00:00']
tsc = TimeSeriesContainer()
tsc.pathname = pathname
tsc.interval = -1
tsc.granularity = 1
tsc.times = T
tsc.values = [2019,5000]
tsc.numberValues = 2
fid.put_ts(tsc,prevent_overflow=True)
#
ts = fid.read_ts(pathname,regular=False)
print(ts.pytimes)
print(ts.values)
@decor
def test7():
# Pass
# Writing one time,value pair with prevent_overflow = True is safest
pathname = "/IRREGULAR/TIMESERIES/PARAM//IR-DECADE/Ex14_Test7/"
T = ['01JAN2019 02:01:05','01JAN5000 01:02:06']
V = [2019,5000]
for t,v in zip(T,V):
tsc = TimeSeriesContainer()
tsc.pathname = pathname
tsc.interval = -1
tsc.granularity = 60
tsc.times = [t]
tsc.values = [v]
tsc.numberValues = 1
fid.put_ts(tsc,prevent_overflow=True)
ts = fid.read_ts(pathname,regular=False)
print(ts.times)
print(ts.pytimes)
print(ts.values)
return tsc,ts
test1()
test2()
test3()
test4()
test5()
test6()
test7()
|
python
|
# -*- coding: utf-8 -*-
# @File : baseview.py
# @Date : 2021/2/25
# @Desc :
from rest_framework.generics import UpdateAPIView, DestroyAPIView
from rest_framework.serializers import Serializer
from rest_framework.viewsets import ModelViewSet
class FakeSerializer(Serializer):
pass
class BaseView(ModelViewSet, UpdateAPIView, DestroyAPIView):
queryset = None # 设置类的queryset
serializer_class = FakeSerializer # 设置类的serializer_class
|
python
|
import math
import time
import pytest
from . import assert_javascript_entry
@pytest.mark.asyncio
async def test_types_and_values(bidi_session, current_session, inline, wait_for_event):
await bidi_session.session.subscribe(events=["log.entryAdded"])
on_entry_added = wait_for_event("log.entryAdded")
expected_text = current_session.execute_script(
"const err = new Error('foo'); return err.toString()")
time_start = math.floor(time.time() * 1000)
# TODO: To be replaced with the BiDi implementation for navigate.
current_session.url = inline(
"<script>function bar() { throw new Error('foo'); }; bar();</script>")
event_data = await on_entry_added
time_end = math.ceil(time.time() * 1000)
assert_javascript_entry(
event_data,
level="error",
text=expected_text,
time_start=time_start,
time_end=time_end
)
# Navigate to a page with no error to avoid polluting the next tests with
# JavaScript errors.
current_session.url = inline("<p>foo")
|
python
|
"""
LibriParty Dataset creation by using official metadata.
Author
------
Samuele Cornell, 2020
Mirco Ravanelli, 2020
"""
import os
import sys
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.data_utils import download_file
from local.create_mixtures_from_metadata import create_mixture
import json
from tqdm import tqdm
URL_METADATA = (
"https://www.dropbox.com/s/0u6x6ndyedb4rl7/LibriParty_metadata.zip?dl=1"
)
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
metadata_folder = params["metadata_folder"]
if not os.path.exists(metadata_folder):
os.makedirs(metadata_folder)
# Download meta data from the web
download_file(
URL_METADATA,
metadata_folder + "/meta.zip",
unpack=True,
dest_unpack=metadata_folder,
)
for data_split in ["train", "dev", "eval"]:
with open(os.path.join(metadata_folder, data_split + ".json"), "r") as f:
metadata = json.load(f)
print("Creating data for {} set".format(data_split))
c_folder = os.path.join(params["out_folder"], data_split)
os.makedirs(c_folder, exist_ok=True)
for sess in tqdm(metadata.keys()):
create_mixture(sess, c_folder, params, metadata[sess])
|
python
|
import simplejson as json
class PConf():
def __init__(self, filename="", schema={}):
"""initialize with a schema dictionary"""
if not filename or not isinstance(schema, dict):
return None
with open(filename, "r") as f:
self.conf = json.loads(f.read())
self.schema = schema
self.validate(self.conf, self.schema)
def get(self, value, default=None):
""" get a value from the config """
if not default:
return self.conf.get(value)
else:
return self.conf.get(value, default)
def pretty(self):
"""pretty print the config"""
print json.dumps(self.conf, indent=4)
def validate(self, config, schema):
"""recursively check against types in schema"""
for key, val in config.iteritems():
if key not in schema:
raise Exception(key + " not in schema")
if isinstance(val, list):
if val == []:
continue
type_first_element = type(val[0])
schema_first_element = schema[key][0]
if isinstance(schema_first_element, list):
# don't allow multi dimensional lists!
raise Exception("Can't have list in a list")
for index, item in enumerate(val):
if not isinstance(item, type_first_element):
# lists have standardized items so we reference all of them with the first one
raise Exception(str(val) + " not of type " + repr(type_first_element))
if isinstance(item, dict):
self.validate(item, schema[key][0])
else:
pass
elif isinstance(val, dict):
self.validate(val, schema[key])
else:
if not isinstance(schema[key], type):
raise Exception (repr(schema[key]) + " not a type ")
if not isinstance(val, schema[key]):
raise Exception (repr(val) + " not of type " + repr(schema[key]))
|
python
|
from flask import request
def get_locale():
rv = request.accept_languages.best_match(['zh', 'en'])
return rv or 'en'
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2022 askusay
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import yaml
def make_filename(filename):
'''Checks to see if desired file exists, if so it is renamed
Can provide filename with or without directory i.e. "filename", "../filename" and "/directory/filename"
If the directory does not exist then it will be made
'''
base, file = os.path.split(filename)
# check if file exists
'''This secion is adapted from https://github.com/csblab/md_scripts/blob/master/openmm/amberff/_utils.py'''
if os.path.isfile(filename):
fname = filename
num = 1
while True:
file = '#{}.{}#'.format(file, num)
# make complete path
filename = os.path.join(base, file)
if os.path.isfile(filename):
num += 1
else:
os.rename(fname, filename)
break
return fname
# if the file does not exists, then check if dir exits
else:
# ignore if base is current dir '.' - previous dir '..' - nothing ''
if not base in ['.', '..', '']:
# Make if necessary
if not os.path.isdir(base):
os.makedirs(base)
return filename
def fix_atb(file_in, file_out, template):
with open(template) as file:
convert_mols = yaml.load(file, Loader=yaml.FullLoader)
for i, v in convert_mols.items():
assert type(v["conversions"]) == list, 'Each conversions line must be formatted like this: - [source_numbering, source, template]'
assert all([len(j) == 3 for j in v["conversions"]]), f'"conversions" of {i} in {template} does not appear to be formatted correctly,\nEach conversions line must be formatted like this: - [source_numbering, source, template]'
if v['atoms'] > 1:
as_array = np.array(convert_mols[i]['conversions'])
v['map'] = as_array[:,0].astype(int) - 1
with open(file_in) as f:
content = f.readlines()
content = [x.strip() for x in content]
content_iter = iter(content)
fixed = ""
for i in content_iter:
# limit search to 1 space around residue name in PDB file
res = i[17:21].strip()
if i.startswith('TER'):
fixed += i + '\n'
continue
if res in convert_mols.keys():
no = convert_mols[res]['atoms']
tmp = convert_mols[res]['conversions']
assert len(tmp[0]) == 3, ''
if no == 1:
tmp = tmp[0]
replaced = i.replace(tmp[1],tmp[2])
if tmp[1] != tmp[2]:
assert i != replaced, f'Replacement failed for {i}\nsource: {tmp[1]}, target: {tmp[2]}\nafter replacement:{replaced}'
fixed += replaced + "\n"
else:
atm_map = convert_mols[res]['map']
lig = [i]
[lig.append(next(content_iter)) for _ in range(1,no)] # skip n lines and append
np_lig = np.array(lig)
np_lig_filter = np_lig[atm_map]
assert len(np_lig_filter) == len(tmp), ''
for current, t in zip(np_lig_filter, tmp):
replaced = current.replace(t[1],t[2])
if t[1] != t[2]:
assert current != replaced, f'Replacement failed for {i}\nsource: {t[1]}, target: {t[2]}\nafter replacement:{replaced}'
fixed += replaced + "\n"
else:
fixed += i + '\n'
with open(make_filename(file_out), 'w+') as f:
f.write(fixed)
return
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('pdb_in', help='Input PDB file')
parser.add_argument('pdb_out', help='Output PDB file')
parser.add_argument('template', help='template yaml file i.e. "mol_dict.yaml"')
args = parser.parse_args()
fix_atb(args.pdb_in, args.pdb_out, args.template)
|
python
|
from pyit.cop import ITokenCop, Cop
from pyit.offence import Offence
from token import *
class SpaceIndentationCop(Cop):
COP_CONFIG = {}
OPEN_BRACKETS = {
'(': LPAR,
'[': LSQB,
'{': LBRACE,
}
CLOSE_BRACKETS = {
'}': RBRACE,
']': RSQB,
')': RPAR,
}
__implements__ = [ITokenCop]
offences = []
def __init__(self, cop_conf=None):
if cop_conf is None:
self.cop_conf = {**self.DEFAULT_CONFIG, **self.COP_CONFIG}
else:
self.cop_conf = {**self.DEFAULT_CONFIG, **self.COP_CONFIG, **cop_conf}
@classmethod
def name(cls):
return 'space_indentation_cop'
def process_tokens(self, tokens, filename):
if not self.processable():
return
opened_brackets = 0
for i, tkn in enumerate(tokens):
if tkn.type in self.CLOSE_BRACKETS.values():
opened_brackets -= 1
if tkn.type in self.OPEN_BRACKETS.values():
opened_brackets += 1
if tkn.type == INDENT:
if tkn.string.startswith(' ') and \
len(tkn.string) % 4 != 0 and \
opened_brackets == 0:
off = Offence(
cop_name=self.name(),
location=tkn.start,
message="Indentation is not multiple of four.",
filename=filename,
severity='convention'
)
self.offences.append(off)
|
python
|
import sys
import pandas as pd
import matplotlib.pyplot as plt
sys.path.append('../minvime')
import estimator_classification as esti # The file ../minvime/estimator_classification.py
fprates = [0.0, 0.00001, 0.0001, 0.001, 0.002, 0.003, 0.004, 0.005, 0.01, 0.015,
0.02,0.025, 0.03, 0.035, 0.04, 0.045, 0.05, 0.055, 0.06, 0.065, 0.07,
0.075, 0.08, 0.09, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.91, 0.92, 0.93, 0.94,
0.95, 0.96, 0.97, 0.98, 0.99, 0.999, 0.9999, 1.0]
tp = 1000
fp = -140
tn = 0
fn = 0
minroi = 10000
cases = 1000000
baserate = 0.0015
auc, prec, recall, x3, y3 = esti.estimate_binary_model_requirements(
tp=tp, fp=fp, tn=tn, fn=fn, cases=cases,
baserate=baserate, minroi=minroi
)
auc, x1, y1 = esti.generate_roc_auc(fprates, 0.5, 2)
auc, x2, y2 = esti.generate_roc_auc(fprates, 0.9, 6)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True,figsize=(12,4))
fig.suptitle('Synthetically Generated ROC Plots')
ax1.plot(x1, y1)
ax2.plot(x2, y2)
ax3.plot(x3, y3)
ax1.set(xlabel='FPR', ylabel='TPR')
ax2.set(xlabel='FPR')
ax2.label_outer()
ax3.set(xlabel='FPR')
ax3.label_outer()
plt.savefig("../paper/images/roc_plots.png")
|
python
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of service agent command for Cloud Storage."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.calliope import base
@base.Hidden
class ServiceAgent(base.Command):
"""Manage a project's Cloud Storage service agent, which is used to perform Cloud KMS operations."""
detailed_help = {
'DESCRIPTION':
"""
*{command}* displays the Cloud Storage service agent, which is used to
perform Cloud KMS operations against your a default or supplied project.
If the project does not yet have a service agent, *{command}* creates one.
""",
'EXAMPLES':
"""
To show the service agent for your default project:
$ {command}
To show the service account for ``my-project'':
$ {command} --project=my-project
To authorize your default project to use a Cloud KMS key:
$ {command} --authorize-cmek=projects/key-project/locations/us-east1/keyRings/key-ring/cryptoKeys/my-key
""",
}
@staticmethod
def Args(parser):
parser.add_argument(
'--authorize-cmek',
help=textwrap.dedent("""\
Adds appropriate encrypt/decrypt permissions to the specified Cloud
KMS key. This allows the Cloud Storage service agent to write and
read Cloud KMS-encrypted objects in buckets associated with the
service agent's project."""))
def Run(self, args):
raise NotImplementedError
|
python
|
from flask import Flask, jsonify, request
from pymongo import MongoClient
from flask_script import Manager, Command, Shell
from flask_mail import Mail, Message
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
from nltk import word_tokenize, pos_tag
from tenseflow import change_tense
from win10toast import ToastNotifier
import time
app = Flask(__name__)
app.config['SECRET_KEY'] = 'it is not readable'
manager = Manager(app)
EmailTemplate = [
{
'id': 0,
'Route': 'GOTO http://127.0.0.1:5000/Intro/',
'Name': 'Introduction Email Template'
},
{
'id': 1,
'Route': 'GOTO http://127.0.0.1:5000/FollowUp/',
'Name': 'Follow-Up Email Template'
},
{
'id': 2,
'Route': 'GOTO http://127.0.0.1:5000/Reengagement/',
'Name': 'Reengagement Email Template'
},
{
'id': 3,
'Route': 'GOTO http://127.0.0.1:5000/CustomerApp/',
'Name': 'Customer-Appreciation Email Template'
},
{
'id': 4,
'Route': 'GOTO http://127.0.0.1:5000/EventInvite/',
'Name': 'Event-Invite Email Template'
},
{
'id': 5,
'Route': 'GOTO http://127.0.0.1:5000/SpecialDiscount/',
'Name': 'Discount-Offer Email Template'
},
{
'id': 6,
'Route': 'GOTO http://127.0.0.1:5000/ServiceUpdate/',
'Name': 'Service-Update Template'
},
{
'id': 7,
'Route': 'GOTO http://127.0.0.1:5000/ThankPurchase/',
'Name' : 'Thank you for Purchase Email Template'
},
{
'id': 8,
'Route': 'GOTO http://127.0.0.1:5000/Testimonial/',
'Name': 'Testimonial Request Email Template'
},
{
'id': 9,
'Route': 'GOTO http://127.0.0.1:5000/Review/',
'Name' : 'Review Request Email Template'
},
{
'id': 10,
'Route': 'GOTO http://127.0.0.1:5000/Blog/',
'Name': 'Blog-Update Email Template'
},
{
'id': 11,
'Route': 'GOTO http://127.0.0.1:5000/UpcEvent/',
'Name': 'Upcoming-Event Email Template'
},
{
'id': 12,
'Route': 'GOTO http://127.0.0.1:5000/SickLeave/',
'Name': 'Sick Leave Mail'
},
{
'id': 13,
'Route': 'GOTO http://127.0.0.1:5000/DayLongMeetings/',
'Name': 'Day Long Meeting Mail'
},
{
'id': 14,
'Route': 'GOTO http://127.0.0.1:5000/Meeting1/',
'Name': 'Meeting1 Mail'
},
{
'id': 15,
'Route': 'GOTO http://127.0.0.1:5000/Meeting2/',
'Name': 'Meeting2 Mail'
},
{
'id': 16,
'Route': 'GOTO http://127.0.0.1:5000/Leave/',
'Name': 'Leave Mail '
},
{
'id': 17,
'Route': 'GOTO http://127.0.0.1:5000/Deliverable/',
'Name' : 'Deliverable Sent Revision Mail'
},
{
'id': 18,
'Route': 'GOTO http://127.0.0.1:5000/DeliverableSent/',
'Name': 'Deliverable Sent Mail'
},
{
'id': 19,
'Route': 'GOTO http://127.0.0.1:5000/SickLeave1/',
'Name' : 'Sick Leave 1'
},
{
'id': 20,
'Route': 'GOTO http://127.0.0.1:5000/SickLeave2/',
'Name' : 'Sick Leave 2'
},
{
'id': 21,
'Route': 'GOTO http://127.0.0.1:5000/SickLeave3/',
'Name' : 'Sick Leave 3'
},
{
'id': 22,
'Route': 'GOTO http://127.0.0.1:5000/AnnualLeave/',
'Name' : 'Annual Leave'
},
{
'id': 23,
'Route': 'GOTO http://127.0.0.1:5000/Resignation/',
'Name': 'Resignation Mail'
},
{
'id': 24,
'Route': 'GOTO http://127.0.0.1:5000/Farewell/',
'Name': 'Farewell/Last Working Day mail'
},
{
'id': 25,
'Route': 'GOTO http://127.0.0.1:5000/Reminder/',
'Name': 'Reminder'
}
]
def findtense(find_tense): # find tense of sentense
train_text = state_union.raw("2005-GWBush.txt")
custom_sent_tokenizer = PunktSentenceTokenizer(train_text)
tokenized = custom_sent_tokenizer.tokenize(find_tense)
for i in tokenized:
words = nltk.word_tokenize(i)
tagged = nltk.pos_tag(words)
# print(tagged)
for j in tagged:
j = list(j)
if (j[1] == "MD"):
return ("future")
break
elif (j[1] in ["VBP", "VBZ", "VBG"]):
return ("present")
break
elif (j[1] in ["VBD", "VBN"]):
return ("past")
break
def Convert(string): # function of converting string into list
li = list(string.split(" "))
return li
def listToString(s): # function of converting list to string
str1 = " "
# return string
return (str1.join(s))
def SendMail(sub,body):
mail = Mail(app)
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = '[email protected]'
app.config['MAIL_PASSWORD'] = 'hacker#blocks09'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
msg = Message(sub, sender='[email protected]', recipients=['[email protected]'])
msg.body = body
mail.send(msg)
@app.route('/')
def api_all():
return jsonify(EmailTemplate)
@app.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
#IntroEmail
@app.route('/Intro/', methods=['POST'])
def Intro():
req_data = request.get_json()
string = "Hi [name] ,\n Thank you for showing interest in [your product/service] . By taking this initial step, you are already well on your way to meeting your [goals your business can help your prospect meet] . \n For our prospective customers, we [describe a special offer] . [Describe the unique value and benefits of your offer for your audience.] . Please let us know if you would like us to help you find a [solution] ! Give us a call/Email us at [Insert your contact information] . \n Best regards, \n [Your name] "
new = Convert(string)
sub = req_data['sub'] # subject
rname = req_data['rname'] # recipientname
prod = req_data['prod'] # product
spo = req_data['spo'] # specialoffer
unv = req_data['unv'] # uniquevalue
sol = req_data['sol'] # solution
con = req_data['con'] # contact
sname = req_data['sname'] # sendername
gyb = req_data['gyb'] # goals your business
new.remove(new[1])
new.insert(1, sname)
new.remove(new[3])
new.insert(3, prod)
new.remove(new[6])
if findtense(gyb) is not None:
tense1 = findtense(gyb)
aaa1 = new[5]
try:
new_string1 = change_tense(aaa1 + gyb, tense1)
except:
new_string1 = aaa1 + gyb
new.remove(new[5])
new.insert(5, new_string1)
else:
aaa1 = new[5]
new_string1 = aaa1 + gyb
new.remove(new[5])
new.insert(5, new_string1)
new.remove(new[8])
if findtense(spo) is not None:
tense2 = findtense(spo)
aaa2 = new[7]
try:
new_string2 = change_tense(aaa2 + spo, tense2)
except:
new_string2 = aaa2 + spo
new.remove(new[7])
new.insert(7, new_string2)
else:
aaa2 = new[7]
new_string2 = aaa2 + spo
new.remove(new[7])
new.insert(7, new_string2)
new.remove(new[9])
new.insert(9, unv)
new.remove(new[12])
if findtense(sol) is not None:
tense3 = findtense(sol)
aaa3 = new[11]
try:
new_string3 = change_tense(aaa3 + sol, tense3)
except:
new_string3 = aaa3 + sol
new.remove(new[11])
new.insert(11, new_string3)
else:
aaa3 = new[11]
new_string3 = aaa3 + sol
new.remove(new[11])
new.insert(11, new_string3)
new.remove(new[13])
new.insert(13, con)
new.remove(new[15])
new.insert(15, rname)
body = listToString(new)
print(body)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.data
col = db.dataemail
db.dataemail.insert_many([{"subject": subject, "recipient": rname, "Contact": scont, "Sender":sname,"Topic": "Introduction Email Template", "Body": new}])
'''
return '<h1>Body: {}</h1>'.format(body)
#FollowUp
@app.route('/FollowUp/', methods=['POST'])
def FollowUp():
req_data = request.get_json()
string = "Dear [name] ,\nYou [how and when the person approached your business initially] and we hope you were able to [insert the value of your prospect gained from your last offer] .This week, we are excited to introduce to you [new offer such as valuable content or a discount] . [Describe the value of your offer] . \n[Define the problem or concern your audience faces] . [Represent your product or service’s unique solution to your prospective customer’s problem] . [Brief reiteration of your product/service’s answer to your audience’s needs] .\nPlease let us know how we can help you by [insert your contact information] .\nWe hope to hear from you soon. \nBest regards,\n [Name] "
new = Convert(string)
rname = req_data['rname'] #[name]
tp = req_data['tp'] #[how and when the person approached your business initially]
aut = req_data['aut'] # [insert the value of your prospect gained from your last offer]
link = req_data['link'] #[new offer such as valuable content or a discount]
firstpa = req_data['firstpa'] #[Describe the value of your offer]
pblog = req_data['pblog'] #[Define the problem or concern your audience faces]
blgtle = req_data['blgtle'] #[Represent your product or service’s unique solution to your prospective customer’s problem]
freq = req_data['freq'] #[Brief reiteration of your product/service’s answer to your audience’s needs]
sub = req_data['sub'] #[insert your contact information]
sname = req_data['sname'] #[Name]
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, tp)
new.remove(new[6])
if findtense(aut) is not None:
tense1 = findtense(aut)
aaa1 = new[5]
try:
new_string1 = change_tense(aaa1 + aut, tense1)
except:
new_string1 = aaa1 + aut
new.remove(new[5])
new.insert(5, new_string1)
else:
aaa1 = new[5]
new_string1 = aaa1 + aut
new.remove(new[5])
new.insert(5, new_string1)
new.remove(new[7])
if findtense(link) is not None:
tense1 = findtense(link)
aaa1 = new[6]
try:
new_string1 = change_tense(aaa1 + link, tense1)
except:
new_string1 = aaa1 + link
new.remove(new[6])
new.insert(6, new_string1)
else:
aaa1 = new[6]
new_string1 = aaa1 + link
new.remove(new[6])
new.insert(6, new_string1)
new.remove(new[8])
new.insert(8, firstpa)
new.remove(new[10])
new.insert(10, pblog)
new.remove(new[12])
new.insert(12, blgtle)
new.remove(new[14])
new.insert(14, freq)
new.remove(new[16])
new.insert(16, sub)
new.remove(new[18])
new.insert(18, sname)
body = listToString(new)
SendMail(sub,body)#sendmail
'''db logic
client = MongoClient("localhost", 27017)
db = client.d4
col = db.dataemaildb.dataemail.insert_many([{"recipient": rname, "TimePeriod": tp, "Link": link, "Author": aut, "Description": descp, "PreviousBlogDescription": pblog, "Sender": sname}])
'''
return '<h1>Body: {}</h1>'.format(body)
#ReengagementEmail
@app.route('/Reengagement/', methods=['POST'])
def Reengagement():
req_data = request.get_json()
string="Subject Line: Following up on [last encounter]\n Hi [name] ,\nI [insert contact method] you [insert last time you reached out] in response to [how your potential customer initially reached out to you] . [Refresh their memory of what you talked about] .\n [Send your prospects an additional offer that would address questions or concerns them had during your conversation with them] . I hope you will find it helpful and informative. Please feel free to let me know if you had any questions or feedback.\nIf you are currently looking for better ways to [what is the problem that your prospect is looking to solve with your product or service] , I believe that [your business’s/product’s name] may be a great solution for you and here is why:\n [Make a bulleted list of your business’s unique selling points. Bold or highlight any key words to make the email easier to scan for your audience] .\nIf you would like to discuss your business in greater detail, you can reach me directly at [Insert your contact information] .\nBest regards,\n [Name] "
new = Convert(string)
sub = req_data['sub'] #[last encounter]
rname = req_data['rname'] #[name]
conn = req_data['conn'] #[insert contact method]
ltc = req_data['ltc'] #[insert last time you reached out]
adoff = req_data['adoff'] #[how your potential customer initially reached out to you]
bmsg = req_data['bmsg'] #[Refresh their memory of what you talked about]
fback = req_data['fback'] #[Send your prospects an additional offer that would address questions or concerns them had during your conversation with them]
mkey = req_data['mkey'] #[what is the problem that your prospect is looking to solve with your product or service]
product_name = req_data['product_name']
selling_point = req_data['selling_point']
contact_info = req_data['contact_info']
sname = req_data['sname'] #[Name]
new.remove(new[1])
new.insert(1, sub)
new.remove(new[3])
new.insert(3, rname)
new.remove(new[5])
new.insert(5, conn)
new.remove(new[7])
new.insert(7, ltc)
new.remove(new[9])
new.insert(9, adoff)
new.remove(new[11])
new.insert(11, bmsg)
new.remove(new[13])
new.insert(13, fback)
new.remove(new[15])
if findtense(mkey) is not None:
tense1 = findtense(mkey)
aaa1 = new[14]
try:
new_string1 = change_tense(aaa1 + mkey, tense1)
except:
new_string1 = aaa1 + mkey
new.remove(new[14])
new.insert(14, new_string1)
else:
aaa1 = new[14]
new_string1 = aaa1 + mkey
new.remove(new[14])
new.insert(14, mkey)
new.remove(new[16])
new.insert(16, product_name)
new.remove(new[18])
new.insert(18, selling_point)
new.remove(new[20])
new.insert(20, contact_info)
new.remove(new[22])
new.insert(22, sname)
body = listToString(new)
SendMail(sub, body) # sendmail
print(body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d2
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "recipient": rname, "ContactMethod": conn, "LatTimeContacted": ltc, "AddtionalOffer": adoff, "Message": bmsg, "feedback": fback, "MainKeywords": mkey, "Sender": sname,"SellingPoint":selling_point,"ContactInfo":contact_info,"ProductName":product_name}])
'''
return '<h1>Body: {}</h1>'.format(body, rname, conn, ltc, adoff, bmsg, selling_point, contact_info, product_name, body)
#EventInvite
@app.route('/EventInvite/', methods=['POST'])
def EventInvite():
req_data = request.get_json()
string = "Dear [rname] ,\nYou are invited to attend [event name] to [briefly describe the value of your event] .\n [If your event is featuring an industry expert, include information here] .\nDate/Time/Venue [Date, Time and time zone, Venue] \nCost [Cost if applicable] . \nWe will be discussing: [List 1] , [List 2] , [List 3] and more to go. \nSign up now!, If you cannot attend this event, be sure to check out [special offer] for additional resources.\nHope you see you soon!\nBest regards,\n [sname] "
sub = req_data['sub']
rname = req_data['rname']
ename = req_data['ename']
bmsg = req_data['bmsg']
exp = req_data['exp']
tm = req_data['tm']
cost = req_data['cost']
ben1 = req_data['ben1']
ben2 = req_data['ben2']
ben3 = req_data['ben3']
sof = req_data['sof']
sname = req_data['sname']
new = Convert(string)
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, ename)
new.remove(new[5])
new.insert(5, bmsg)
new.remove(new[7])
new.insert(7, exp)
new.remove(new[9])
new.insert(9, tm)
new.remove(new[11])
new.insert(11, cost)
new.remove(new[13])
new.insert(13, ben1)
new.remove(new[15])
new.insert(15, ben2)
new.remove(new[17])
new.insert(17, ben3)
new.remove(new[19])
new.insert(19, sof)
new.remove(new[21])
new.insert(21, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d4
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "recipient": rname, "Event": ename, "Message": msg, "Venue/Date": tm, "Cost": cost, "Benefit1": ben1, "Benefit2": ben2, "Benefit3": ben3, "SpecialOffer": sof, "Sender": sname}])
'''
return '''<h1>Body: {}</h1>'''.format(body)
# SpecialDiscount
@app.route('/SpecialDiscount/', methods=['POST'])
def SpecialDiscount():
req_data = request.get_json()
string = "Dear [name] ,\n As a valued customer, we would like to offer you a [Special discount offer] for [State length of time offer is valid for] . \n We hope you will take advantage of our special offer. [State the unique value of your offer] . \n As always, do not hesitate to reach out to us if you need any help by emailing us at [your email] or calling us directly [your phone number] . \nThanks, \n [Name] "
new = Convert(string)
print(new)
sub = req_data['sub'] # subject
rname = req_data['rname'] # recipientname
spo = req_data['spo'] # specialoffer
tov = req_data['tov'] # timeofferisvalid
unv = req_data['unv'] # uniquevalue
mail = req_data['mail'] # email
phn = req_data['phn'] # phonenumber
sname = req_data['sname'] # sendername
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
if findtense(spo) is not None:
tense = findtense(spo)
aaa = new[2]
try:
new_string = change_tense(aaa + spo, tense)
except:
new_string = aaa + spo
new.remove(new[2])
new.insert(2, new_string)
else:
aaa = new[2]
new_string = aaa + spo
new.remove(new[2])
new.insert(3, spo)
new.remove(new[4])
new.insert(4, tov)
new.remove(new[6])
new.insert(6, unv)
new.remove(new[8])
new.insert(8, mail)
new.remove(new[10])
new.insert(10, phn)
new.remove(new[12])
new.insert(12, sname)
body = listToString(new)
print(body)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.data
col = db.dataemail
db.dataemail.insert_many([{"subject": subject, "recipient": rname, "Contact": scont, "Sender":sname,"Topic": "Special Discount Email Template", "Body": new}])
'''
return '<h1>Body: {}</h1>'.format(body)
#ServiceUpdate
@app.route('/ServiceUpdate/', methods=['POST'])
def ServiceUpdate():
req_data = request.get_json()
string = "Dear [Name] , \n We are excited to introduce to you our latest [Brief outline of new product or service] . [Give a brief description of your new offering and bold or underline any key points] . [Describe the benefits of your new offering] . [Tell people how to get the new offering] . \n [Closing remarks about the new offering] . Please get in touch [Insert contact information] , if you have any questions or if there is anything else we can do to help you [Your audience’s goal] ! \n Best regards, \n [Name] "
new = Convert(string)
sub = req_data['sub']#subject
rname = req_data['rname']#recipientname
onp = req_data['onp']#outlineofnewproduct
bdsc = req_data['bdsc']#briefdesctiption
bno = req_data['bno']#benifitsofnewofferings
howt = req_data['howt']#howtogetofferings
rem = req_data['rem']#remarks
con = req_data['con']#contact
agoal = req_data['agoal']#audience'sgoal
sname = req_data['sname']#sendername
new.remove(new[1])
new.insert(1, sname)
new.remove(new[3])
if findtense(onp) is not None:
tense = findtense(onp)
aaa = new[2]
try:
new_string = change_tense(aaa + onp, tense)
except:
new_string = aaa + onp
new.remove(new[2])
new.insert(2, new_string)
else:
aaa = new[2]
new_string = aaa + onp
new.remove(new[2])
new.insert(2, new_string)
new.remove(new[4])
if findtense(bdsc) is not None:
tense = findtense(bdsc)
aaa = new[3]
try:
new_string = change_tense(aaa + bdsc, tense)
except:
new_string = aaa + bdsc
new.remove(new[3])
new.insert(3, new_string)
else:
aaa = new[3]
new_string = aaa + bdsc
new.remove(new[3])
new.insert(3,new_string)
new.remove(new[5])
if findtense(bno) is not None:
tense = findtense(bno)
aaa = new[4]
try:
new_string = change_tense(aaa + bno, tense)
except:
new_string = aaa + bno
new.remove(new[4])
new.insert(4, new_string)
else:
aaa = new[4]
new_string = aaa + bno
new.remove(new[4])
new.insert(4, new_string)
new.remove(new[6])
new.insert(6, howt)
new.remove(new[8])
new.insert(8, rem)
new.remove(new[10])
new.insert(10, con)
new.remove(new[12])
if findtense(agoal) is not None:
tense = findtense(agoal)
aaa = new[11]
try:
new_string = change_tense(aaa + agoal, tense)
except:
new_string = aaa + agoal
new.remove(new[11])
new.insert(11, new_string)
else:
aaa = new[11]
new_string = aaa + agoal
new.remove(new[11])
new.insert(11, new_string)
new.remove(new[13])
new.insert(13, rname)
body = listToString(new)
print(body)
SendMail(sub, body) # sendmail
'''db logic
client = MongoClient("localhost", 27017)
db = client.data
col = db.dataemail
db.dataemail.insert_many([{"subject": subject, "recipient": rname, "Contact": scont, "Sender":sname,"Topic": "Introduction Email Template", "Body": new}])
'''
return '''<h1>Body: {}</h1>'''.format(body)
#ThankPurchase
@app.route('/ThankPurchase/', methods=['POST'])
def ThankPurchase():
req_data = request.get_json()
string = "Dear [name] ,\nOn behalf of [your business name] , I would like to thank you for purchasing [buying/using your product/service] . We sincerely hope that you will continue to enjoy our [name of your product/service] and use it to for [your product/service’s unique selling point] .\nIf you have any questions or if we can further assist you in any way, please feel free to contact on [insert contact method] me.\n [Promote any upcoming events/new products or services/related products/services] .\nI hope to hear from you soon!\nThank you once again,\n [Name]"
new = Convert(string)
sub = req_data['sub']
rname = req_data['rname']
cname = req_data['cname']
np = req_data['np']
msg = req_data['msg']
cont = req_data['cont']
prom = req_data['prom']
sname = req_data['sname']
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, cname)
new.remove(new[5])
new.insert(5, np)
new.remove(new[7])
new.insert(7, np)
new.remove(new[9])
new.insert(9, msg)
new.remove(new[11])
new.insert(11, cont)
new.remove(new[13])
new.insert(13, prom)
new.remove(new[15])
new.insert(15, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d11
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "CompanyName": cname, "Event": ename, "Description": descp, "Location": loc, "Date": ddt, "time": tm, "Link": link, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#Testimonial
@app.route('/Testimonial/', methods=['POST'])
def Testimonial():
req_data = request.get_json()
string = "Dear [Name] ,\n We hope that you are enjoying your experience with [Insert your product/company name] . [Add any specifics about your business or the customer to personalize the email] .\nBecause your opinion means a great deal to us, we would appreciate your feedback. With your permission, we would also love to share your insights about our product/service with potential customers/clients.\nSimply reply to this email with your story. Feel free to write whatever you like, but we have included a couple of questions that you can use as a guideline.\n1. What was the reason why you approached us?\n [r1] \n2. What’s one specific feature you like most about our product/service?\n [r2] \n3. What was the outcome you found from buying this product/using this service?\n [r3] \nWe appreciate your time and thank you again for your business. \nBest regards, \n [Your name] "
new = Convert(string)
sub = "Testimonal"
rname = req_data['rname'] # recipient name
cname = req_data['cname'] # company name
syb = req_data['syb'] # specifics about business/customers
rto = req_data['rto'] # reason to contact
sop = req_data['sop'] # specific feature of product
ofb = req_data['ofb'] # outcome from buying product
sname = req_data['sname'] # senders name
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, cname)
new.remove(new[5])
new.insert(5, syb)
new.remove(new[7])
new.insert(7, rto)
new.remove(new[9])
new.insert(9, sop)
new.remove(new[11])
new.insert(11, ofb)
new.remove(new[13])
new.insert(13, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d8
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "Company": cname, "Message": msg, "ReasonToContact": rto, "Sender": sname}])
'''
return '<h1>Body: {}</h1>'.format(body)
#Review
@app.route('/Review/', methods=['POST'])
def Review():
req_data = request.get_json()
string = "Dear [NAME] ,\n [A personalized greeting] . We appreciate the trust that you have placed in our [YOUR BUSINESS’S NAME] and we will continue to strive to provide you with [Description of what your objective is] . Online reviews are an important part of our business so that we get your insightful feedback and continue to deliver the best service to all of our customers.\nWe would love for you to share your comments with us by leaving a review on [Link to the site or platform you would like your recipient to leave a review on e.g Yelp and Google Maps] .\n [Your business name] welcomes unbiased and informative reviews. If we have not fulfilled your expectations of us, we would like to take this opportunity to do what we can to correct any issues that might have arisen and continue to build our relationship with you.\nThank you for your continued patronage of our business and we look forward to hearing from you.\nBest Regards,\n [Name]"
new = Convert(string)
sub = 'Your Feedback Means a Lot'
rname = req_data['rname']
grt = req_data['grt']
bname = req_data['bname']
msg = req_data['msg']
link = req_data['link']
sname = req_data['sname']
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, grt)
new.remove(new[5])
new.insert(5, bname)
new.remove(new[7])
if findtense(msg) is not None:
tense1 = findtense(msg)
aaa1 = new[6]
try:
new_string1 = change_tense(aaa1 + msg, tense1)
except:
new_string1 = aaa1 + msg
new.remove(new[6])
new.insert(6, new_string1)
else:
aaa1 = new[6]
new_string1 = aaa1 + msg
new.remove(new[6])
new.insert(6, msg)
new.remove(new[8])
new.insert(8, link)
new.remove(new[10])
new.insert(10, bname)
new.remove(new[12])
new.insert(12, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d11
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "CompanyName": cname, "Event": ename, "Description": descp, "Location": loc, "Date": ddt, "time": tm, "Link": link, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#Blog
@app.route('/Blog/', methods=['POST'])
def Blog():
req_data = request.get_json()
string = "Subject Line: [Frequency] Blog Post Roundup \n Dear [Name] , \n Here’s what we’ve been working on for blog posts this [Time period] .Whether you re-read your favorites or catch up on ones you missed, we hope you’ll get something out of them! \n [Insert blog post title hyperlinked to blog URL] \n By: [Author’s name] \n [First paragraph or quick snippet] \n Read full post [Hyperlink to blog post] \n [Insert up to 3 blog posts] \n Best Regards, \n [ Name] "
new = Convert(string)
sub = req_data['sub']
rname = req_data['rname'] #[Name]
tp = req_data['tp'] #timeperiod
link = req_data['link'] #hyperlink
aut = req_data['aut'] #author
firstpa = req_data['firstpa'] #[First paragraph or quick snippet
pblog = req_data['pblog'] #[Insert up to 3 blog posts]
freq= req_data['freq'] #[Frequency]
blgtle = req_data['blgtle'] #[Insert blog post title hyperlinked to blog URL]
sname = req_data['sname'] #[ name]
new.remove(new[1])
new.insert(1,freq)
new.remove(new[3])
new.insert(3,rname)
new.remove(new[6])
new.insert(6,tp)
new.remove(new[8])
new.insert(8,blgtle)
new.remove(new[10])
new.insert(10,aut)
new.remove(new[12])
new.insert(12,firstpa)
new.remove(new[14])
new.insert(14,link)
new.remove(new[16])
new.insert(16,pblog)
new.remove(new[18])
new.insert(18,sname)
body = listToString(new)
SendMail(sub,body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d10
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "TimePeriod": tp, "Link": link, "Author": aut, "Description": descp, "PreviousBlogDescription": pblog, "Sender": sname}])
'''
return '''<h1>Body:{}</h1>'''.format(body)
#UpcEvent
@app.route('/UpcEvent/', methods=['POST'])
def UpcEvent():
req_data = request.get_json()
string = "Dear [rname] ,\nHere at [your company name] , we are committed to providing regular workshops and live practical opportunities to continue learning, grow your network, and get involved with the community. \nAs a valued customer, we want to let you know about our upcoming events. \n• [Event title] : [Brief description of event] \n• Location: [Location] \n• Date: [Event date] \n• When: [time] \nLearn more details and register here [Link to event page] \nWe hope to see you there! Feel free to reply back with questions regarding these events!\nBest regards,\n [sname] "
new = Convert(string)
rname = req_data['rname']
cname = req_data['cname']
ename = req_data['ename']
descp = req_data['descp']
loc = req_data['loc']
ddt = req_data['ddt']
tm = req_data['tm']
link = req_data['link']
sname = req_data['sname']
sub = "UP coming event"
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, cname)
new.remove(new[5])
new.insert(5, ename)
new.remove(new[7])
new.insert(7, descp)
new.remove(new[10])
new.insert(10, loc)
new.remove(new[13])
new.insert(13, ddt)
new.remove(new[16])
new.insert(16, tm)
new.remove(new[18])
new.insert(18, link)
new.remove(new[20])
new.insert(20, link)
body = listToString(new)
SendMail(sub,body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d11
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "CompanyName": cname, "Event": ename, "Description": descp, "Location": loc, "Date": ddt, "time": tm, "Link": link, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#SickLeave
@app.route('/SickLeave/', methods=['POST'])
#DayLongMeetings
def SickLeave():
req_data = request.get_json()
string = "Dear [rname] ,\n \n Thank you for your email! \n \n I am on a sick leave today with no access to mails. \n \n Please expect a delay in response. In case of anything urgent, please reach out to [pname] [email] or I will respond once I am back in the office. \n \n For anything urgent, please message me on [conn] and I will try to get back to you as soon as I can. \n \n Regards, \n [sname] "
# variables
sub = 'Leave application - Sick Leave' # subject
rname = req_data['rname'] # recipientname
pname = req_data['pname'] # person'sname
email = req_data['email'] # email
conn = req_data['conn'] # contact
sname = req_data['sname'] # sendername
new = Convert(string) # converting string into list
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, pname)
new.remove(new[4])
new.insert(4, email)
new.remove(new[6])
new.insert(6, conn)
new.remove(new[8])
new.insert(8, sname)
body = listToString(new)
print(body)
# db logic
'''client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "Recipient Name": rname, "Alternate contact Name": pname, "Contact": conn, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
@app.route('/DayLongMeetings/', methods=['POST'])
#Meeting1
def Daylongmeeting():
req_data = request.get_json()
string = "Hi [rname] \n Thank you for your email. \n \n I am in back to back meetings today and so I may revert late on your email. For live projects, please get in touch with the respective team members – [tm1] [ email1] , [tm2] [ email2] or [tm3] [ email3] .\n \n For anything urgent, please message me on [conn] and I will try to get back to you as soon as I can. \n \n Thanks, \n [sname] "
# variables
sub = req_data['sub'] # subject
rname = req_data['rname'] # recipientname
tm1 = req_data['tm1'] # teammember1
email1 = req_data['email1'] # email1
tm2 = req_data['tm2'] # teammember2
email2 = req_data['email2'] # email2
tm3 = req_data['tm3'] # teammember3
email3 = req_data['email3'] # email3
conn = req_data['conn'] # contact
sname = req_data['sname'] # sendername
new = Convert(string) # converting string into list
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, tm1)
new.remove(new[4])
new.insert(4, email1)
new.remove(new[6])
new.insert(6, tm2)
new.remove(new[7])
new.insert(7, email2)
new.remove(new[9])
new.insert(9, tm3)
new.remove(new[10])
new.insert(10, email3)
new.remove(new[12])
new.insert(12, conn)
new.remove(new[14])
new.insert(14, sname)
body = listToString(new)
print(body)
SendMail(sub, body)
return '''<h1>subject: {}</h1>
<h1>recipientname: {}</h1>
<h1>team member1: {}</h1>
<h1>email1: {}</h1>
<h1>team member2: {}</h1>
<h1>email2: {}</h1>
<h1>team member3: {}</h1>
<h1>email3: {}</h1>
<h1>sendername: {}</h1>
<h1>Body: {}</h1>'''.format(sub, rname, tm1, email1, tm2, email2, tm3, email3, sname, body)
@app.route('/Meeting1/', methods=['POST'])
#Meeting1
def Meeting1():
req_data = request.get_json()
string = "Hi [rname] , \n We have a meeting scheduled tomorrow with the [cname] Team [tname] . Hence, we need to share the issues that we have faced recently. Request everyone to plan your work accordingly in order to attend the meeting. \n The meeting is scheduled on [mdate] - [mtime] . \n \n Regards, [sname] "
# variables
sub = req_data['sub'] # subject
rname = req_data['rname'] # recipientname
cname = req_data['cname'] # company name
tname = req_data['tname'] # team name
mdate = req_data['mdate'] # meeting date
mtime = req_data['mtime'] # meeting time
sname = req_data['sname'] # sendername
new = Convert(string) # converting string into list
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, cname)
new.remove(new[5])
new.insert(5, tname)
new.remove(new[7])
new.insert(7, mdate)
new.remove(new[9])
new.insert(9, mtime)
new.remove(new[11])
new.insert(11, sname)
body = listToString(new)
print(body)
SendMail(sub, body)
return '''<h1>subject: {}</h1>
<h1>recipientname: {}</h1>
<h1>company name: {}</h1>
<h1>team name: {}</h1>
<h1>meeting date: {}</h1>
<h1>meeting time: {}</h1>
<h1>sendername: {}</h1>
<h1>Body: {}</h1>'''.format(sub, rname, cname, tname, mdate, mtime, sname, body)
return render_template('index.html', form=form)
@app.route('/Meeting2/', methods=['POST'])
def Meeting2():
req_data = request.get_json()
string = "Dear [rname] , \n Sharing the calendar invite for the face to face presentation at [cname] , [cadd] office on [mDate] - [mTime] . \n Regards, [sname] \n ([conn]) "
# variables
sub = req_data['sub'] # subject
rname =req_data['rname'] # recipientname
cname = req_data['cname'] # company name
cadd = req_data['cadd'] # company address
mdate = req_data['mdate'] # meeting date
mtime = req_data['mtime'] # meeting time
sname = req_data['sname'] # sendername
conn = req_data['conn'] # contact
new = Convert(string) # converting string into list
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, cname)
new.remove(new[5])
new.insert(5, cadd)
new.remove(new[7])
new.insert(7, mdate)
new.remove(new[9])
new.insert(9, mtime)
new.remove(new[11])
new.insert(11, sname)
new.remove(new[13])
new.insert(13, conn)
body = listToString(new)
print(body)
SendMail(sub, body)
return '''<h1>subject: {}</h1>
<h1>recipientname: {}</h1>
<h1>company name: {}</h1>
<h1>company address: {}</h1>
<h1>meeting date: {}</h1>
<h1>meeting time: {}</h1>
<h1>sendername: {}</h1>
<h1>contact: {}</h1>
<h1>Body: {}</h1>'''.format(sub, rname, cname, cadd, mdate, mtime, sname, conn, body)
#LeaveMail
@app.route('/LeaveMail/', methods=['POST'])
def LeaveMail():
req_data = request.get_json()
string = "Dear Sender ,\nI am on personal leave from 14th Oct to 17th Oct with no access to mails or calls. Kindly expect certain delay in reply.\nIn case of anything urgent kindly connect with FirstnameLastname1 on [email protected] \nIn case of anything related to Businessfunction1 Businessfunction2 Businessfunction3 please reach out to FirstnameLastname2 [email protected]; +91 XXXXX XXXXX \nRegards,\n FirstnameLastname"
new = Convert(string)
sub = 'Leave Mail - Out of Office 3'
rname = req_data['rname']
leavedate = req_data['leavedate']
name1 = req_data['name1']
email = req_data['email']
bfunc1 = req_data['bfunc1']
bfunc2 = req_data['bfunc2']
bfunc3 = req_data['bfunc3']
name2 = req_data['name2']
bemail = req_data['bemail']
conn = req_data['conn']
sname = req_data['sname']
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, leavedate)
new.remove(new[5])
new.insert(5, name1)
new.remove(new[7])
new.insert(7, email)
new.remove(new[9])
new.insert(9, bfunc1)
new.remove(new[10])
new.insert(10, bfunc2)
new.remove(new[11])
new.insert(11, bfunc3)
new.remove(new[13])
new.insert(13, name2)
new.remove(new[14])
new.insert(14, bemail)
new.remove(new[15])
new.insert(15, conn)
new.remove(new[17])
new.insert(17, sname)
body = listToString(new)
SendMail(sub,body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d11
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "CompanyName": cname, "Event": ename, "Description": descp, "Location": loc, "Date": ddt, "time": tm, "Link": link, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#Deliverablesentrevision
@app.route('/Deliverable/', methods=['get', 'post'])
def Deliverable():
req_data = request.get_json()
string=" Subject: Deliverable sent with revisions \n Hi [name] ,\n Sharing the revised deliverable for all the changes as discussed over the call- \n 1.Have mentioned Change [1] \n2. Have mentioned Change [2] \n3.Have checked Change [3] \n4. Also, we have included the Change [4] .\nPlease do let me know in case of any clarifications.\nRegards, [name] "
new = Convert(string)
sub = 'Deliverable'
rname = req_data['rname']
change1= req_data['change1']
change2 = req_data['change2']
Checked_Change = req_data['Checked_Change']
Change_included = req_data['Change_included']
sname = req_data['Name']
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, change1)
new.remove(new[5])
new.insert(5, change2)
new.remove(new[7])
new.insert(7, Checked_Change)
new.remove(new[9])
new.insert(9, Change_included)
new.remove(new[11])
new.insert(11, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d9
col = db.dataemail
db.dataemail.insert_many([{"Recipient-Name:": rname, "Change 1: ": change1, "Change 2:": change2, "Checked Change": Checked_Change, "Change Included": Change_included, "Sender-Name": sname}])
'''
return '''<h1>Body:{}</h1>'''.format(body)
# DeliverableSent
@app.route('/DeliverableSent/', methods=['POST'])
def DeliverableSent():
req_data = request.get_json()
string = "Hi [Name] ,\nSharing the deliverable for the project.\nPlease let us know when we can connect to discuss the same.\nRegards,\n [Your name] "
new = Convert(string)
sub = 'Deliverable'
rname = req_data['rname'] # recipient name
sname = req_data['sname'] # senders name
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, sname)
body = listToString(new)
SendMail(sub,body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d8
col = db.dataemail
db.dataemail.insert_many([{"recipient": rname, "Sender": sname}])'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
# SickLeave1
@app.route('/SickLeave1/', methods=['POST'])
def SickLeave1():
req_data = request.get_json()
string = "Hi [name] ,\n Today I am not keeping well as I am having through stomach upset and fever. Would be taking leave for today.\nHope this is fine.\nRegards,\n [Your name]"
new = Convert(string)
sub = "Sick leave"
rname = req_data['rname']
sname = req_data['sname']
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, sname)
body = listToString(new)
SendMail(sub,body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "ManagerName": rname, "Sender": sname}])
'''
return '''<h1>Mail body:{}</h1>'''.format(body)
# SickLeave2
@app.route('/SickLeave2/', methods=['POST'])
def SickLeave2():
req_data = request.get_json()
string = "Dear (Managername) \nWanted to inform you that I am down with fever and diarrhoea and hence will be taking a leave today.\nI shall complete all the pending tasks once I resume office back tomorrow.\nRegards,\n employee name"
new = Convert(string)
rname = req_data['rname'] # Manager-Name
sname = req_data['sname'] # Employee-Name
sub = "Leave application - Sick Leave 2" # subject
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "ManagerName": rname, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
# SickLeave3
@app.route('/SickLeave3/', methods=['POST'])
def SickLeave3():
req_data = request.get_json()
string = "Dear (Manager Name) ,\nJust wanted to inform you that I will be taking a leave due to some unavoidable personal circumstances.\nWill be reachable over the calls in case of any queries.\nThanks,\n Employee"
new = Convert(string)
rname = req_data['rname'] # Manager-Name
sname = req_data['sname'] # Employee-Name
sub = 'Leave application - Sick Leave 3' # subject
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "ManagerName": rname, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#AnnualLeave
@app.route('/AnnualLeave/', methods=['POST'])
def AnnualLeave():
req_data = request.get_json()
string = "Hi (Manager name) \nWanted to inform you that I am planning to take the annual leave from DD/MM/YY to DD/MM/YY for X working days.\nWould request you to approve the same. We can discuss the pending tasks and plan the same accordingly to avoid any last-minute rush.\nI shall also be applying the same in the system as well.\nRegards,\n employeename"
new = Convert(string)
rname = req_data['rname'] #Manager-Name
tp = req_data['tp'] #Time-Period
nod = req_data['nod'] #number-of-days
sname = req_data['sname'] #Employee-Name
sub = 'Leave application - Annual Leave' #subject
new.remove(new[1])
new.insert(1, rname)
new.remove(new[3])
new.insert(3, tp)
new.remove(new[5])
new.insert(5, nod)
new.remove(new[7])
new.insert(7, sname)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "ManagerName": rname, "TimePeriod": tp, "Numberofdays": nod, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#Resignation
@app.route('/Resignation/', methods=['POST'])
def Resignation():
req_data = request.get_json()
string = "Subject: Re: Resignation MailDear (Manager's Name) , \n Please accept this as formal notification of resigning from my position as (Mention Designation) with (Mention Company Name) . \n I enjoyed working under you and have learnt a lot, which will definitely help me in my career ahead. \n This was not an easy decision on my part, however, I have considered this option as the new opportunity provides better prospects of learning and growth. \n I wish you and (Mention Company name/Team) all the best. I do hope our paths cross again in the future.\n Regards,"
new = Convert(string)
sub = "Resignation Mail"
mname = req_data['mname']
des = req_data['des']
comp = req_data['comp']
new.remove(new[1])
new.insert(1,mname)
new.remove(new[3])
new.insert(3,des)
new.remove(new[5])
new.insert(5,comp)
new.remove(new[7])
new.insert(7,comp)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "ManagerName": rname, "TimePeriod": tp, "Numberofdays": nod, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
#Farewell
@app.route('/Farewell/', methods=['POST'])
def Farewell():
req_data = request.get_json()
string = "Hello Everyone, \n I would like to take this moment and inform you that today is my last day at (mention comapany name) and I wanted to take a moment to let you know how grateful I am to have had the opportunity to work with all of you.\n Though I never expected my stint to be short, I has an enriching experience and learnings I have got during my tenure here. These learnings will always hold me in good stead - both professionally & personally. I have made some life- long friends at (mention comapany name) , with whom I connected beyond work. I am sure that our friendship will endure forever. \n While I am excited about what lies ahead of me, leaving behind such an amazing people is definitely bittersweet. \n I also wish to thank the maagement for giving me the opportunity to learn, grow and become part of this organization. \n Since it is a small world and as I move on, I am sure that our paths will cross again. Till then you can reach me at (+91XXXXXXXXXX) and (mention personal email address) \nLinkedin: (pasteprofile link here) \n Thank you all for the support and help. Keep rocking! Cheers!!! \n Warm Regards,"
new = Convert(string)
sub = "Farewell/Last Working Day mail"
comp = req_data['comp']
email = req_data['email']
lkd = req_data['lkd']
con = req_data['con']
new.remove(new[1])
new.insert(1,comp)
new.remove(new[3])
new.insert(3,comp)
new.remove(new[7])
new.insert(7,con)
new.remove(new[9])
new.insert(9,email)
new.remove((new[11]))
new.insert(11,lkd)
body = listToString(new)
SendMail(sub, body)
'''db logic
client = MongoClient("localhost", 27017)
db = client.d1
col = db.dataemail
db.dataemail.insert_many([{"subject": sub, "ManagerName": rname, "TimePeriod": tp, "Numberofdays": nod, "Sender": sname}])
'''
return '''<h1>Mail Body: {}</h1>'''.format(body)
@app.route('/Reminder/', methods=['POST'])
def Reminder():
req_data = request.get_json()
msg = req_data['msg']
tm = req_data['tm']
#str to int()
n = int(tm)
n = n * 60
time.sleep(n) #sleep
#notification
toaster = ToastNotifier()
toaster.show_toast("Notification",
msg,
icon_path="custom.ico",
duration=5)
return '''<h1>Message: {}</h1>'''.format(msg)
app.run()
|
python
|
from mayan.apps.appearance.classes import Icon
icon_statistics = Icon(driver_name='fontawesome', symbol='chart-line')
icon_execute = Icon(driver_name='fontawesome', symbol='cog')
icon_namespace_details = Icon(driver_name='fontawesome', symbol='chart-line')
icon_namespace_list = Icon(driver_name='fontawesome', symbol='chart-line')
icon_view = Icon(driver_name='fontawesome', symbol='eye')
|
python
|
from distutils.core import setup
# py2exe stuff
import py2exe, os
# find pythoncard resources to add as 'data_files'
pycard_resources=[]
for filename in os.listdir('.'):
if filename.find('.rsrc.')>-1:
pycard_resources+=[filename]
# includes for py2exe
includes=[]
for comp in ['button','multicolumnlist','statictext', 'textfield']:
includes += ['PythonCard.components.'+comp]
print 'includes',includes
opts = { 'py2exe': { 'includes':includes } }
print 'opts',opts
# end of py2exe stuff
setup(name='w3jdebug',
version='0.1',
url='http://www.wc3campaigns.net',
author='PipeDream',
author_email='[email protected]',
package_dir={'blarg':'.'},
packages=['blarg'],
data_files=[('.',pycard_resources)],
console=['pyw3jdebug.py'],
options=opts
)
|
python
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import shutil
from pathlib import Path
from typing import Tuple
import subprocess
import tempfile
import pytest
from flexmock import flexmock
from packit.utils.commands import cwd
from packit.utils.repo import create_new_repo
from tests.spellbook import (
initiate_git_repo,
UPSTREAM,
prepare_dist_git_repo,
DISTGIT,
DISTGIT_WITH_AUTOCHANGELOG,
DG_OGR,
UPSTREAM_SPEC_NOT_IN_ROOT,
UPSTREAM_WITH_MUTLIPLE_SOURCES,
UPSTREAM_WEIRD_SOURCES,
)
# define own tmp_path fixture for older version of pytest (Centos)
try:
from _pytest import tmpdir
_ = tmpdir.tmp_path
except (ImportError, AttributeError, KeyError):
@pytest.fixture()
def tmp_path():
TMP_DIR = "/tmp/pytest_tmp_path/"
Path(TMP_DIR).mkdir(exist_ok=True, parents=True)
return Path(tempfile.mkdtemp(prefix=TMP_DIR))
def get_git_repo_and_remote(
target_dir: Path, repo_template_path: Path
) -> Tuple[Path, Path]:
"""
:param target_dir: tmpdir from pytest - we'll work here
:param repo_template_path: git repo template from tests/data/
"""
u_remote_path = target_dir / f"upstream_remote-{repo_template_path.name}"
u_remote_path.mkdir(parents=True, exist_ok=True)
create_new_repo(u_remote_path, ["--bare"])
u = target_dir / f"local_clone-{repo_template_path.name}"
shutil.copytree(repo_template_path, u)
initiate_git_repo(u, tag="0.1.0", push=True, upstream_remote=str(u_remote_path))
return u, u_remote_path
@pytest.fixture()
def upstream_and_remote(tmp_path) -> Tuple[Path, Path]:
return get_git_repo_and_remote(tmp_path, UPSTREAM)
@pytest.fixture()
def upstream_and_remote_with_multiple_sources(tmp_path) -> Tuple[Path, Path]:
return get_git_repo_and_remote(tmp_path, UPSTREAM_WITH_MUTLIPLE_SOURCES)
@pytest.fixture()
def upstream_and_remote_weird_sources(tmp_path) -> Tuple[Path, Path]:
return get_git_repo_and_remote(tmp_path, UPSTREAM_WEIRD_SOURCES)
@pytest.fixture()
def upstream_spec_not_in_root(tmp_path) -> Tuple[Path, Path]:
return get_git_repo_and_remote(tmp_path, UPSTREAM_SPEC_NOT_IN_ROOT)
@pytest.fixture()
def distgit_and_remote(tmp_path) -> Tuple[Path, Path]:
d_remote_path = tmp_path / "dist_git_remote"
d_remote_path.mkdir(parents=True, exist_ok=True)
create_new_repo(d_remote_path, ["--bare"])
d = tmp_path / "dist_git"
shutil.copytree(DISTGIT, d)
initiate_git_repo(
d,
push=True,
remotes=[
("origin", str(d_remote_path)),
("i_am_distgit", "https://src.fedoraproject.org/rpms/python-ogr"),
],
)
prepare_dist_git_repo(d)
return d, d_remote_path
@pytest.fixture()
def distgit_with_autochangelog_and_remote(tmp_path) -> Tuple[Path, Path]:
d_remote_path = tmp_path / "autochangelog_dist_git_remote"
d_remote_path.mkdir(parents=True, exist_ok=True)
create_new_repo(d_remote_path, ["--bare"])
d = tmp_path / "autochangelog_dist_git"
shutil.copytree(DISTGIT_WITH_AUTOCHANGELOG, d)
initiate_git_repo(
d,
push=True,
remotes=[
("origin", str(d_remote_path)),
("i_am_distgit", "https://src.fedoraproject.org/rpms/python-ogr"),
],
)
prepare_dist_git_repo(d)
return d, d_remote_path
@pytest.fixture()
def ogr_distgit_and_remote(tmp_path) -> Tuple[Path, Path]:
d_remote_path = tmp_path / "ogr_dist_git_remote"
d_remote_path.mkdir(parents=True, exist_ok=True)
create_new_repo(d_remote_path, ["--bare"])
d = tmp_path / "ogr_dist_git"
shutil.copytree(DG_OGR, d)
initiate_git_repo(
d,
push=True,
remotes=[
("origin", str(d_remote_path)),
("i_am_distgit", "https://src.fedoraproject.org/rpms/python-ogr"),
],
)
prepare_dist_git_repo(d)
return d, d_remote_path
@pytest.fixture(params=["upstream", "ogr-distgit"])
def upstream_or_distgit_path(
request, upstream_and_remote, distgit_and_remote, ogr_distgit_and_remote
):
"""
Parametrize the test to upstream, downstream [currently skipped] and ogr distgit
"""
return {
"upstream": upstream_and_remote[0],
"distgit": distgit_and_remote[0],
"ogr-distgit": ogr_distgit_and_remote[0],
}[request.param]
@pytest.fixture(
params=["upstream", "distgit", "ogr-distgit", "upstream-with-multiple-sources"]
)
def cwd_upstream_or_distgit(
request,
upstream_and_remote,
distgit_and_remote,
ogr_distgit_and_remote,
upstream_and_remote_with_multiple_sources,
):
"""
Run the code from upstream, downstream and ogr-distgit.
When using be careful to
- specify this fixture in the right place
(the order of the parameters means order of the execution)
- to not overwrite the cwd in the other fixture or in the test itself
"""
cwd_path = {
"upstream": upstream_and_remote[0],
"distgit": distgit_and_remote[0],
"ogr-distgit": ogr_distgit_and_remote[0],
"upstream-with-multiple-sources": upstream_and_remote_with_multiple_sources[0],
}[request.param]
with cwd(cwd_path):
yield cwd_path
@pytest.fixture
def copr_client_mock(get_list_return=None):
get_list_return = {
"centos-stream-aarch64": "",
"custom-1-x86_64": "",
"epel-6-x86_64": "",
"epel-8-x86_64": "",
"fedora-31-aarch64": "",
"fedora-31-armhfp": "This is emulated on x86_64",
"fedora-31-x86_64": "",
"fedora-32-armhfp": "This is emulated on x86_64",
"fedora-32-i386": "Not-released Koji packages",
"fedora-32-s390x": "This is emulated on x86_64",
"fedora-32-x86_64": "",
"fedora-33-x86_64": "",
"fedora-eln-x86_64": "",
"fedora-rawhide-aarch64": "",
"fedora-rawhide-x86_64": "",
}
return flexmock(mock_chroot_proxy=flexmock(get_list=lambda: get_list_return))
@pytest.fixture(autouse=True, scope="function")
def configure_git():
CMDS = [
["git", "config", "--global", "user.email", "[email protected]"],
["git", "config", "--global", "user.name", "Packit Test"],
]
# verify that git is already configured
try:
output = subprocess.check_output(["git", "config", "-l", "--global"])
except subprocess.CalledProcessError:
output = ""
if "user.name" in output if isinstance(output, str) else output.decode():
return
for item in CMDS:
subprocess.call(item)
|
python
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging, hashlib, sys, os
import numpy as np
np.set_printoptions(precision=2)
from opticks.ana.base import opticks_main, manual_mixin
from opticks.ana.nbase import Buf
from ddbase import Dddb
from ddpart import Parts, ddpart_manual_mixin
from geom import Part
log = logging.getLogger(__name__)
from opticks.analytic.treebase import Tree, Node
class NodePartitioner(object):
"""
All NodePartitioner methods are added to treebase.Node
on calling the below treepart_manual_mixin function
"""
def parts(self):
"""
Divvy up geometry into parts that
split "intersection" into union lists. This boils
down to judicious choice of bounding box according
to intersects of the source gemetry.
"""
if not hasattr(self, '_parts'):
_parts = self.lv.parts()
for p in _parts:
p.node = self
pass
self._parts = _parts
pass
return self._parts
def num_parts(self):
parts = self.parts()
return len(parts)
class TreePartitioner(object):
"""
All TreePartitioner methods are added to treebase.Tree
on calling the below treepart_manual_mixin function
"""
@classmethod
def num_parts(cls):
nn = cls.num_nodes()
tot = 0
for i in range(nn):
node = cls.get(i)
tot += node.num_parts()
pass
return tot
@classmethod
def parts(cls):
tnodes = cls.num_nodes()
tparts = cls.num_parts()
log.info("tnodes %s tparts %s " % (tnodes, tparts))
pts = Parts()
gcsg = []
for i in range(tnodes):
node = cls.get(i)
log.debug("tree.parts node %s parent %s" % (repr(node),repr(node.parent)))
log.info("tree.parts node.lv %s " % (repr(node.lv)))
log.info("tree.parts node.pv %s " % (repr(node.pv)))
npts = node.parts()
pts.extend(npts)
if hasattr(npts, 'gcsg') and len(npts.gcsg) > 0:
for c in npts.gcsg:
c.node = node
pass
gcsg.extend(npts.gcsg)
pass
pass
assert len(pts) == tparts
pts.gcsg = gcsg
return pts
@classmethod
def convert(cls, parts, explode=0.):
"""
:param parts: array of parts, essentially just a list of part instances
:return: np.array buffer of parts
Tree.convert
#. collect Part instances from each of the nodes into list
#. serialize parts into array, converting relationships into indices
#. this cannot live at lower level as serialization demands to
allocate all at once and fill in the content, also conversion
of relationships to indices demands an all at once conversion
Five solids of DYB PMT represented in part buffer
* part.typecode 1:sphere, 2:tubs
* part.flags, only 1 for tubs
* part.node.index 0,1,2,3,4 (0:4pt,1:4pt,2:2pt,3:1pt,4:1pt)
::
In [19]: p.buf.view(np.int32)[:,(1,2,3),3]
Out[19]:
Buf([[0, 1, 0], part.flags, part.typecode, nodeindex
[0, 1, 0],
[0, 1, 0],
[1, 2, 0],
[0, 1, 1],
[0, 1, 1],
[0, 1, 1],
[1, 2, 1],
[0, 1, 2],
[0, 1, 2],
[0, 1, 3],
[0, 2, 4]], dtype=int32)
In [22]: p.buf.view(np.int32)[:,1,1] # 1-based part index
Out[22]: Buf([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=int32)
* where are the typecodes hailing from, not using OpticksCSG.h enum ?
nope hardcoded into geom.py Part.__init__ Sphere:1, Tubs:2 Box:3
"""
data = np.zeros([len(parts),4,4],dtype=np.float32)
for i,part in enumerate(parts):
#print "part (%d) tc %d %r " % (i, part.typecode, part)
data[i] = part.as_quads()
data[i].view(np.int32)[1,1] = i + 1 # 1-based part index, where parent 0 means None
data[i].view(np.int32)[1,2] = 0 # set to boundary index in C++ ggeo-/GPmt
data[i].view(np.int32)[1,3] = part.flags # used in intersect_ztubs
data[i].view(np.int32)[2,3] = part.typecode # bbmin.w : typecode
data[i].view(np.int32)[3,3] = part.node.index # bbmax.w : solid index
if explode>0:
dx = i*explode
data[i][0,0] += dx
data[i][2,0] += dx
data[i][3,0] += dx
pass
pass
buf = data.view(Buf)
buf.boundaries = map(lambda _:_.boundary, parts)
if hasattr(parts, "gcsg"):
buf.gcsg = parts.gcsg
buf.materials = map(lambda cn:cn.lv.material,filter(lambda cn:cn.lv is not None, buf.gcsg))
buf.lvnames = map(lambda cn:cn.lv.name,filter(lambda cn:cn.lv is not None, buf.gcsg))
buf.pvnames = map(lambda lvn:lvn.replace('lv','pv'), buf.lvnames)
pass
return buf
def treepart_manual_mixin():
"""
Using manual mixin approach to avoid changing
the class hierarchy whilst still splitting base
functionality from partitioner methods.
"""
manual_mixin(Node, NodePartitioner)
manual_mixin(Tree, TreePartitioner)
if __name__ == '__main__':
args = opticks_main()
ddpart_manual_mixin() # add methods to Tubs, Sphere, Elem and Primitive
treepart_manual_mixin() # add methods to Node and Tree
g = Dddb.parse(args.apmtddpath)
lv = g.logvol_("lvPmtHemi")
tr = Tree(lv)
parts = tr.parts()
partsbuf = tr.convert(parts)
log.warning("use analytic so save the PMT, this is just for testing tree conversion")
|
python
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import six
import tensorflow as tf
from tfsnippet.bayes import Bernoulli
from tests.helper import TestCase
from tests.bayes.distributions._helper import (DistributionTestMixin,
BigNumberVerifyTestMixin,
AnalyticKldTestMixin)
class BernoulliTestCase(TestCase,
DistributionTestMixin,
BigNumberVerifyTestMixin,
AnalyticKldTestMixin):
dist_class = Bernoulli
simple_params = {
'logits': np.asarray([0.0, 0.7, -0.7], dtype=np.float32)
}
extended_dimensional_params = {
k: v + np.asarray([[0.0], [-0.5], [0.5], [1.0], [-1.0]],
dtype=np.float32)
for k, v in six.iteritems(simple_params)
}
kld_simple_params = {
'logits': np.asarray([1.0, -0.3, 2.0], dtype=np.float32),
}
is_continuous = False
is_reparameterized = True
def get_shapes_for_param(self, **params):
return (), params['logits'].shape
def get_dtype_for_param_dtype(self, param_dtype):
return tf.int32
def log_prob(self, x, group_event_ndims=None, **params):
logits = params['logits']
log_p = -np.log(1. + np.exp(-logits))
log_one_minus_p = -np.log(1. + np.exp(logits))
log_prob = x * log_p + (1 - x) * log_one_minus_p
if group_event_ndims:
grouped_shape = log_prob.shape[: -group_event_ndims] + (-1,)
log_prob = np.sum(log_prob.reshape(grouped_shape), axis=-1)
return log_prob
def get_mean_stddev(self, **params):
p = 1. / (1 + np.exp(-params['logits']))
return p, p * (1. - p)
def analytic_kld(self, params1, params2):
p = 1. / (1. + np.exp(-params1['logits']))
q = 1. / (1. + np.exp(-params2['logits']))
return (
p * np.log(p) + (1 - p) * np.log(1 - p)
- p * np.log(q) - (1 - p) * np.log(1 - q)
)
# test cases for Bernoulli distribution
def test_construction_error(self):
with self.get_session():
# test construction due to data type error
with self.assertRaisesRegex(
TypeError, 'Bernoulli distribution parameters must be '
'float numbers'):
Bernoulli(1)
# test construction with no parameter
with self.assertRaisesRegex(
ValueError, 'One and only one of `logits`, `probs` should '
'be specified.'):
Bernoulli()
with self.assertRaisesRegex(
ValueError, 'One and only one of `logits`, `probs` should '
'be specified.'):
Bernoulli(1., 2.)
def test_other_properties(self):
logits = self.simple_params['logits']
probs, _ = self.get_mean_stddev(logits=logits)
with self.get_session():
# test construction with logits
dist = Bernoulli(logits=logits)
self.assert_allclose(dist.logits.eval(), logits)
self.assert_allclose(dist.mean.eval(), probs)
self.assert_allclose(dist.probs.eval(), probs)
# test construction with probs
dist = Bernoulli(probs=probs)
self.assert_allclose(dist.logits.eval(), logits)
self.assert_allclose(dist.mean.eval(), probs)
self.assert_allclose(dist.probs.eval(), probs)
def test_specify_data_type(self):
dist = Bernoulli(dtype=tf.int64, **self.simple_params)
self.assertEqual(dist.dtype, tf.int64)
dist = Bernoulli(dtype='int32', **self.simple_params)
self.assertEqual(dist.dtype, tf.int32)
def test_sampling_with_probs(self):
logits = self.simple_params['logits']
probs, _ = self.get_mean_stddev(logits=logits)
with self.get_session(use_gpu=True):
params = {'probs': probs}
x, prob, log_prob = self.get_samples_and_prob(**params)
value_shape, batch_shape = \
self.get_shapes_for_param(**self.simple_params)
np.testing.assert_equal(x.shape, batch_shape + value_shape)
self.assert_allclose(
prob, self.prob(x, **self.simple_params))
self.assert_allclose(
log_prob, self.log_prob(x, **self.simple_params))
def test_analytic_kld_with_probs(self):
logits = self.simple_params['logits']
probs, _ = self.get_mean_stddev(logits=logits)
kl_logits = self.kld_simple_params['logits']
kl_probs, _ = self.get_mean_stddev(logits=kl_logits)
with self.get_session(use_gpu=True):
params = {'probs': probs}
kld_params = {'probs': kl_probs}
self.assert_allclose(
(self.dist_class(**params).
analytic_kld(self.dist_class(**kld_params))).eval(),
self.analytic_kld(self.simple_params, self.kld_simple_params)
)
def test_boundary_values_of_probs(self):
with self.get_session():
dist = self.dist_class(probs=0.)
np.testing.assert_almost_equal(dist.probs.eval(), 0.)
np.testing.assert_almost_equal(dist.prob(0).eval(), 1.)
np.testing.assert_almost_equal(dist.prob(1).eval(), 0.)
np.testing.assert_almost_equal(dist.log_prob(0).eval(), 0.)
dist = self.dist_class(probs=1.)
np.testing.assert_almost_equal(dist.probs.eval(), 1.)
np.testing.assert_almost_equal(dist.prob(0).eval(), 0.)
np.testing.assert_almost_equal(dist.prob(1).eval(), 1.)
np.testing.assert_almost_equal(dist.log_prob(1).eval(), 0.)
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
" The code for the sc2 environment, maybe add some functions for the original pysc2 version "
# modified from AlphaStar pseudo-code
import numpy as np
from s2clientprotocol import sc2api_pb2 as sc_pb
from pysc2 import run_configs
from pysc2.lib import features
from pysc2.lib import point
from pysc2.env import sc2_env
from alphastarmini.core.rl.alphastar_agent import RandomAgent, AlphaStarAgent
from alphastarmini.core.rl.env_run_loop import run_loop
from alphastarmini.lib.hyper_parameters import AlphaStar_Raw_Interface_Format_Params as ARIFP
__author__ = "Ruo-Ze Liu"
debug = False
class SC2Environment:
"""See PySC2 environment."""
"""
It is noted that AlphaStar only acts on pysc2 3.0 version! If with previouse pysc2 version,
no raw observations and actions can be used. And the pysc2-3.0 is released on September, 2019,
the time AlphaStar was publiciliy accessed in the Nature. These informations are important, please
do not miss them.
"""
def __init__(self, settings):
pass
def step(self, home_action, away_action):
pass
def reset(self):
pass
# from the player timestep to get the outcome
# not used
def get_env_outcome(timestep):
outcome = 0
o = timestep.raw_observation
player_id = o.observation.player_common.player_id
for r in o.player_result:
if r.player_id == player_id:
outcome = sc2_env.possible_results.get(r.result, default=0)
frames = o.observation.game_loop
return outcome
def test_multi_player_env(agent_interface_format):
steps = 10000
step_mul = 1
players = 2
agent_names = ["Protoss", "Terran"]
# create env
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.protoss, agent_names[0]),
sc2_env.Agent(sc2_env.Race.terran, agent_names[1])],
step_mul=step_mul,
game_steps_per_episode=steps * step_mul // 2,
agent_interface_format=agent_interface_format,
version=None,
random_seed=1) as env:
# begin env
#agents = [RandomAgent(x) for x in agent_names]
agents = [AlphaStarAgent(x) for x in agent_names]
run_loop(agents, env, steps)
def random_agent_test():
aif_1 = sc2_env.AgentInterfaceFormat(**ARIFP._asdict())
aif_2 = sc2_env.AgentInterfaceFormat(**ARIFP._asdict())
aif = [aif_1, aif_2]
test_multi_player_env(aif)
# not used
def run_thread_test():
run_config = run_configs.get(version="3.16.1")
camera_width = 24
interface = sc_pb.InterfaceOptions(
raw=True, score=True,
feature_layer=sc_pb.SpatialCameraSetup(width=camera_width))
screen_resolution = point.Point(32, 32)
minimap_resolution = point.Point(32, 32)
screen_resolution.assign_to(interface.feature_layer.resolution)
minimap_resolution.assign_to(interface.feature_layer.minimap_resolution)
with run_config.start(full_screen=False) as controller:
pass
def test(on_server=False):
random_agent_test()
|
python
|
"""definition of problem instance
Author: Keisuke Okumura
Affiliation: TokyoTech & OSX
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional, Union
import numpy as np
from .obstacle import Obstacle, ObstacleSphere
from .static_objects import StaticObjects
@dataclass(frozen=True)
class Instance:
"""definition of problem instance"""
num_agents: int # number of agents
starts: list[np.ndarray] # pos * num_agents, pos: np.ndarray
goals: list[np.ndarray] # pos * num_agents, pos: np.ndarray
max_speeds: list[float] # scalar * num_agents
rads: list[float] # list of radius, assuming sphere
goal_rads: list[float] # list of goal radius, assuming sphere
obs: list[Obstacle] = field(default_factory=list) # user defined obstacles
dim: int = 2 # dimension
objs: StaticObjects = field(
init=False
) # static objects, set automatically
def __post_init__(self):
"""to maintain immutability"""
object.__setattr__(self, "objs", StaticObjects(self.obs))
def __getstate__(self):
"""called when pickling"""
state = self.__dict__.copy()
del state["objs"]
return state
def __setstate__(self, state):
"""called when un-pickling"""
self.__dict__.update(state)
self.__post_init__()
# below: several functions that generate instances automatically
def generate_ins_2d_with_obs_hetero(
num_agents: int,
max_speeds_cands: Union[list[float], str],
rads_cands: Union[list[float], str],
obs_num: int,
obs_size_lower_bound: float = 0.05,
obs_size_upper_bound: float = 0.2,
min_dist: Optional[float] = None,
) -> Instance:
"""generate one instance with heterogeneous agents and sphere obstacles
Args:
num_agents (int): number of agents
max_speeds_cands (Union[list[float], str]): candidates of max speeds
rads_cands (Union[list[float], str]): candidates of radius
obs_num (int): number of obstacles
obs_size_lower_bound (:obj:`float`, optional): size of obstacles
obs_size_upper_bound (:obj:`float`, optional): size of obstacles
min_dist (:obj:`Optional[float]`): minimum distance between agents
Returns:
Instance: instance
"""
# accept string with comma
if isinstance(rads_cands, str):
rads_cands = [float(i) for i in rads_cands.split(",")]
if isinstance(max_speeds_cands, str):
max_speeds_cands = [float(i) for i in max_speeds_cands.split(",")]
obs: list[ObstacleSphere] = [
ObstacleSphere(
pos=np.random.rand(2),
rad=np.random.rand()
* (obs_size_upper_bound - obs_size_lower_bound)
/ 2
+ obs_size_lower_bound / 2,
)
for _ in range(obs_num)
]
# set radius and max_speeds
rads: list[float] = [
rads_cands[i] for i in np.random.choice(len(rads_cands), num_agents)
]
max_speeds: list[float] = [
max_speeds_cands[i]
for i in np.random.choice(len(max_speeds_cands), num_agents)
]
# set starts
starts: list[np.ndarray] = []
while len(starts) < num_agents:
rad = rads[len(starts)]
pos = np.random.rand(2) * (1 - 2 * rad) + rad
# check collisions with obstacles
if any([np.linalg.norm(pos - o.pos) <= rad + o.rad for o in obs]):
continue
# check collisions with other starts
if any(
[
np.linalg.norm(pos - starts[i]) <= rad + rads[i]
for i in range(len(starts))
]
):
continue
starts.append(pos)
# set goals
goals: list[np.ndarray] = []
while len(goals) < num_agents:
rad = rads[len(goals)]
pos = np.random.rand(2) * (1 - 2 * rad) + rad
if (
min_dist is not None
and np.linalg.norm(pos - starts[len(goals)]) < min_dist
):
continue
if any([np.linalg.norm(pos - o.pos) <= rad + o.rad for o in obs]):
continue
# check collisions with other goals
if any(
[
np.linalg.norm(pos - goals[i]) <= rad + rads[i]
for i in range(len(goals))
]
):
continue
goals.append(pos)
return Instance(
num_agents=num_agents,
starts=starts,
goals=goals,
max_speeds=max_speeds,
rads=rads,
goal_rads=[0.01] * num_agents,
obs=obs, # type: ignore
dim=2,
)
def generate_ins_2d_with_obs_hetero_nonfix_agents(
num_agents_min: int,
num_agents_max: int,
max_speeds_cands: Union[list[float], str],
rads_cands: Union[list[float], str],
obs_num: int,
obs_size_lower_bound: float = 0.05,
obs_size_upper_bound: float = 0.2,
min_dist: Optional[float] = None,
) -> Instance:
"""generate one instance with various number of
heterogeneous agents and sphere obstacles
Note:
For details, check generate_ins_2d_with_obs_hetero
"""
num_agents = np.random.randint(num_agents_min, num_agents_max)
return generate_ins_2d_with_obs_hetero(
num_agents=num_agents,
max_speeds_cands=max_speeds_cands,
rads_cands=rads_cands,
obs_num=obs_num,
obs_size_lower_bound=obs_size_lower_bound,
obs_size_upper_bound=obs_size_upper_bound,
min_dist=min_dist,
)
def generate_ins_2d_with_obs_sphere(
num_agents: int,
max_speed: float,
rad: float,
obs_num: int,
obs_size_lower_bound: float = 0.05,
obs_size_upper_bound: float = 0.2,
min_dist: Optional[float] = None,
) -> Instance:
"""generate one instance with fixed number of
homogeneous agents and sphere obstacles
Note:
For details, check generate_ins_2d_with_obs_hetero
"""
return generate_ins_2d_with_obs_hetero(
num_agents=num_agents,
max_speeds_cands=[max_speed],
rads_cands=[rad],
obs_num=obs_num,
obs_size_lower_bound=obs_size_lower_bound,
obs_size_upper_bound=obs_size_upper_bound,
min_dist=min_dist,
)
def generate_ins_2d_with_obs_sphere_nonfix_agents(
num_agents_min: int,
num_agents_max: int,
max_speed: float,
rad: float,
obs_num: int,
obs_size_lower_bound: float = 0.05,
obs_size_upper_bound: float = 0.2,
min_dist: Optional[float] = None,
) -> Instance:
"""generate one instance with various number of
homogeneous agents and sphere obstacles
Note:
For details, check generate_ins_2d_with_obs_hetero
"""
num_agents = np.random.randint(num_agents_min, num_agents_max)
return generate_ins_2d_with_obs_sphere(
num_agents=num_agents,
max_speed=max_speed,
rad=rad,
obs_num=obs_num,
obs_size_lower_bound=obs_size_lower_bound,
obs_size_upper_bound=obs_size_upper_bound,
min_dist=min_dist,
)
def generate_ins_2d_without_obs(
num_agents: int,
max_speed: float,
rad: float,
min_dist: Optional[float] = None,
) -> Instance:
"""generate one instance with fixed number of
homogeneous agents without obstacles
Note:
For details, check generate_ins_2d_with_obs_hetero
"""
return generate_ins_2d_with_obs_sphere(
num_agents=num_agents,
max_speed=max_speed,
rad=rad,
obs_num=0,
obs_size_lower_bound=0,
obs_size_upper_bound=0,
min_dist=min_dist,
)
def generate_ins_2d_cross(
num_agents: int,
max_speed: float,
rad: float,
set_obs: bool = False,
noise: float = 0,
rotation: bool = False,
dispersion: Union[float, list[float]] = 0.45,
**kwargs
) -> Instance:
"""deprecated: generate symmetry breaking instance
Args:
num_agents (int): number of agents, <= 4
max_speed (float): max_speed
rad (float): radius
set_obs (:obj:`bool`, optional): set one obstacle at the center
noise (:obj:`float`, optional):
set uniform noise for agents' starts and goals
rotation (:obj:`bool`): rotate configuration
dispersion (:obj:`Union[float, list[float]]`):
initial distance between agents
Returns:
Instance: instance
"""
assert num_agents <= 4
d = (
dispersion
if isinstance(dispersion, float)
else np.random.choice(dispersion)
)
assert d > rad, "collide"
start_goal_cands = np.array(
[[-d, -d, d, d], [d, d, -d, -d], [d, -d, -d, d], [-d, d, d, -d],]
)
# rotate
if rotation:
t = np.random.rand() * np.pi * 2
R = np.array([[np.cos(t), -np.sin(t)], [np.sin(t), np.cos(t)]])
start_goal_cands = np.dot(R, start_goal_cands.reshape(2, -1)).reshape(
4, -1
)
# noise
if noise > 0:
start_goal_cands = (
start_goal_cands + np.random.rand(4, 4) * noise - noise / 2
)
np.random.shuffle(start_goal_cands)
start_goal_cands += 0.5
start_goal_cands = np.clip(start_goal_cands, rad, 1 - rad) # type: ignore
starts = list(start_goal_cands[:num_agents, :2])
goals = list(start_goal_cands[:num_agents, 2:])
obs = (
[]
if set_obs is False
else [ObstacleSphere(pos=np.array([0.5, 0.5]), rad=0.2)]
)
return Instance(
num_agents=num_agents,
starts=starts,
goals=goals,
max_speeds=[max_speed] * num_agents,
rads=[rad] * num_agents,
goal_rads=[0.01] * num_agents,
obs=obs, # type: ignore
dim=2,
)
|
python
|
# ptif_uploader script / webhook
# The developement has been resumed from uploader_jyhton
# Designed to use models from slide-atlas
__author__ = 'dhan'
#TODO: Extend the uploader for
# - Wrapping c++ image_uploader for ndpi and jp2 images
#noqua E501
import os
import sys
import argparse
# from celery import Celery
# from celery.task.control import inspect
# from celery.result import AsyncResult
# import time
# import sys
# import json
# import cStringIO as StringIO
# from math import floor
# import subprocess
import logging
logger = logging.getLogger('slideatlas')
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../..")
from slideatlas import create_app
from slideatlas.models import Collection, Session
from slideatlas.tasks.dicer import process_file
from bson import ObjectId
def process_zip(args):
"""
TODO: Extract files and call process_dir
"""
# Extracts zip
zname = args.input
assert zname.endswith("zip")
session_name = os.path.splitext(os.path.basename(zname))[0]
logger.info('Session name wil be: %s', session_name)
# Creates the session
# Create app context locally
flaskapp = create_app()
# Get collection
with flaskapp.app_context():
# Locate the session
try:
coll = Collection.objects.get(id=ObjectId(args.collection))
logger.info('collection: %s', coll.to_son())
session = Session(collection=coll, image_store=coll.image_store, label=session_name)
logger.info('Creating session: %s', str(session.to_json()))
session.save()
except Exception as e:
logger.error('While creating session: %s', e.message)
sys.exit(-1)
args.session = str(session.id)
temp = ObjectId()
import zipfile
fh = open(zname, 'rb')
z = zipfile.ZipFile(fh)
for name in z.namelist():
logger.info('Extracting ..%s', name)
outpath = str(temp)
z.extract(name, outpath)
fh.close()
# In a loop calls the corresponding files
import glob
for afile in glob.glob(str(temp) + "/*"):
logger.info('Processing inside of: %s', afile)
args.input = os.path.abspath(afile)
process_file(args)
# Remove the folder
import shutil
shutil.rmtree(str(temp))
def process_dir(args):
"""
processes dir
"""
# Extracts zip
dir_name = args.input
session_name = os.path.split(dir_name)[1]
logger.info('Session name wil be: %s', session_name)
# Creates the session
flaskapp = create_app()
# Get collection
with flaskapp.app_context():
# Locate the session
try:
coll = Collection.objects.get(id=ObjectId(args.collection))
logger.info('collection: %s', coll.to_son())
except Exception as e:
logger.error('Collection not found: %s', e.message)
sys.exit(-1)
try:
session = Session.objects.get(label=session_name)
except Exception as e:
logger.info('No session: %s', e.message)
session = None
if session is None:
logger.info('Session will be created')
try:
logger.info('Creating session: %s', session_name)
session = Session(collection=coll, image_store=coll.image_store, label=session_name)
session.save()
except Exception as e:
logger.error('Could not create session: %s', e.message)
sys.exit(-1)
else:
logger.info('Session Exists')
args.session = str(session.id)
import glob
for afile in glob.glob(str(dir_name) + "/*"):
logger.info('Processing inside of: ', afile)
args.input = os.path.abspath(afile)
process_file.delay(args)
def make_argument_parser():
parser = argparse.ArgumentParser(description='Utility to upload images to slide-atlas using BioFormats')
# Input image
parser.add_argument("-i", "--input", help='Only ptif images on the file location are supporte das of now', required=True)
# Where to upload ?
# The admin database will be already accessible from flaskapp
# Collection implicitly contains image_store
parser.add_argument("-c", "--collection", help="Collection id", required=True)
parser.add_argument("-s", "--session", help="Required for non-zip files", required=False)
parser.add_argument("--bindir", help="Path of the image uploader binary", required=False, default="./")
# Optional parameters
# Tile size if the input image is not already tiled
parser.add_argument("-t", "--tilesize", help="Tile size in pixes. (power of two recommended). Defaults to tiles in input or 256", default=256, type=int)
parser.add_argument("-m", "--mongo-collection", help="ObjectId, Destination mongodb collection name")
parser.add_argument("-o", "--overwrite", help="Specified image, if exists, is emptied before upload is attempted", action='store_true', default=False)
# TODO: Support parallel operations
# parser.add_argument('-j', '--parallel', help='Turn parallel processing ON', action='store_true')
# Optional flags
parser.add_argument("-b", "--base-only", help="Upload only base, otherwise uploads all levels in a pyramidal tiff format", default=False, action='store_true')
parser.add_argument('-n', '--dry-run', help='Entirely removes the session and re-creates', action='store_true')
parser.add_argument('-v', '--verbose', help='Increases verbosity for each occurence', action='count')
return parser
# Parse the command line
if __name__ == '__main__':
"""
Main entry point for image uploader
Uploads a single image
..code-block:: shell-session
(slideatlas) $python slideatlas/tasks/ptif_upload.py -i ~/data/ptif/20140721T182320-963749.ptif -c 53d0971cdd98b50867b0eecd -s 53d09798ca7b3a021caff678 -s dj1 -vv -n
"""
parser = make_argument_parser()
args = parser.parse_args()
if args.verbose is None:
verbose = 0
else:
verbose = args.verbose
if verbose > 1:
print "Arguments : "
for akey in vars(args).keys():
print " ", akey, ": ", vars(args)[akey]
if args.input is None:
print "No input files ! (please use -i <inputfile>"
sys.exit(-1)
else:
logger.info('Processing: %s', args.input)
# Find the extension of the file
if args.input.endswith(".zip"):
logger.info('Got a %s', args.input[-4:])
process_zip(args)
elif os.path.isdir(args.input):
logger.info('Got a DIR')
logger.info('Got: %s', args.input)
process_dir(args)
else:
if args.session is None:
logger.error('Session required for single file input')
sys.exit(-1)
logger.info('Submitting job')
# Submits the task
process_file.delay(vars(args))
|
python
|
import warnings
import math
from typing import Optional, TypeVar
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import Module
from torch.nn import init
from torch.nn.parameter import Parameter
import torchshard as ts
class ParallelLoss(Module):
"""
Build a customized parallel loss function.
Steps:
0. change the loss function's name
1. add / delete additional input parameters for it
2. write the forward flow.
"""
__constants__ = ['ignore_index', 'reduction']
ignore_index: int
def __init__(self,
weight: Optional[Tensor] = None,
size_average=None,
ignore_index: int = -100,
reduce=None,
reduction: str = 'mean'
) -> None:
super(ParallelLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input: Tensor, target: Tensor) -> Tensor:
assert self.weight is None or isinstance(self.weight, Tensor)
# the forward code ... _forward
input = _forward(input)
# pass final result to torchshard.nn.functional ops to calculate loss values
return ts.nn.functional.parallel_cross_entropy(input, target, weight=self.weight,
ignore_index=self.ignore_index, reduction=self.reduction)
|
python
|
# Generated by nuclio.export.NuclioExporter
import os
import pandas as pd
import numpy as np
import scipy as sp
import pickle
import datetime
import v3io_frames as v3f
import matplotlib.pyplot as plt
from sklearn.preprocessing import KBinsDiscretizer
def to_observations(context, t, u, key):
t = (
t.apply(lambda row: f"{'_'.join([str(row[val]) for val in t.columns])}", axis=1)
.value_counts()
.sort_index()
)
u = (
u.apply(lambda row: f"{'_'.join([str(row[val]) for val in u.columns])}", axis=1)
.value_counts()
.sort_index()
)
joined_uniques = pd.DataFrame([t, u]).T.fillna(0).sort_index()
joined_uniques.columns = ["t", "u"]
t_obs = joined_uniques.loc[:, "t"]
u_obs = joined_uniques.loc[:, "u"]
t_pdf = t_obs / t_obs.sum()
u_pdf = u_obs / u_obs.sum()
context.log_dataset(f"{key}_t_pdf", pd.DataFrame(t_pdf), format="parquet")
context.log_dataset(f"{key}_u_pdf", pd.DataFrame(u_pdf), format="parquet")
return t_pdf, u_pdf
def tvd(t, u):
return sum(abs(t - u)) / 2
def helinger(t, u):
return (np.sqrt(np.sum(np.power(np.sqrt(t) - np.sqrt(u), 2)))) / np.sqrt(2)
def kl_divergence(t, u):
t_u = np.sum(np.where(t != 0, t * np.log(t / u), 0))
u_t = np.sum(np.where(u != 0, u * np.log(u / t), 0))
return t_u + u_t
def all_metrics(t, u):
return tvd(t, u), helinger(t, u), kl_divergence(t, u)
def drift_magnitude(
context,
t: pd.DataFrame,
u: pd.DataFrame,
label_col=None,
prediction_col=None,
discretizers: dict = None,
n_bins=5,
stream_name: str = "some_stream",
results_tsdb_container: str = "bigdata",
results_tsdb_table: str = "concept_drift/drift_magnitude",
):
"""Drift magnitude metrics
Computes drift magnitude metrics between base dataset t and dataset u.
Metrics:
- TVD (Total Variation Distance)
- Helinger
- KL Divergence
:param context: MLRun context
:param t: Base dataset for the drift metrics
:param u: Test dataset for the drift metrics
:param label_col: Label colum in t and u
:param prediction_col: Predictions column in t and u
:param discritizers: Dictionary of dicsritizers for the features if available
(Created automatically if not provided)
:param n_bins: Number of bins to be used for histrogram creation from continuous variables
:param stream_name: Output stream to push metrics to
:param results_tsdb_container: TSDB table container to push metrics to
:param results_tsdb_table: TSDB table to push metrics to
"""
v3io_client = v3f.Client("framesd:8081", container=results_tsdb_container)
try:
v3io_client.create("tsdb", results_tsdb_table, if_exists=1, rate="1/s")
except:
v3io_client.create(
"tsdb", results_tsdb_table, if_exists=1, attrs={"rate": "1/s"}
)
df_t = t.as_df()
df_u = u.as_df()
drop_columns = []
if label_col is not None:
drop_columns.append(label_col)
if prediction_col is not None:
drop_columns.append(prediction_col)
continuous_features = df_t.select_dtypes(["float"])
if discretizers is None:
discretizers = {}
for feature in continuous_features.columns:
context.logger.info(f"Fitting discretizer for {feature}")
discretizer = KBinsDiscretizer(
n_bins=n_bins, encode="ordinal", strategy="uniform"
)
discretizer.fit(continuous_features.loc[:, feature].values.reshape(-1, 1))
discretizers[feature] = discretizer
os.makedirs(context.artifact_path, exist_ok=True)
discretizers_path = os.path.abspath(f"{context.artifact_path}/discritizer.pkl")
with open(discretizers_path, "wb") as f:
pickle.dump(discretizers, f)
context.log_artifact("discritizers", target_path=discretizers_path)
context.logger.info("Discretizing featuers")
for feature, discretizer in discretizers.items():
df_t[feature] = discretizer.transform(
df_t.loc[:, feature].values.reshape(-1, 1)
)
df_u[feature] = discretizer.transform(
df_u.loc[:, feature].values.reshape(-1, 1)
)
df_t[feature] = df_t[feature].astype("int")
df_u[feature] = df_u[feature].astype("int")
context.log_dataset("t_discrete", df_t, format="parquet")
context.log_dataset("u_discrete", df_u, format="parquet")
context.logger.info("Compute prior metrics")
results = {}
t_prior, u_prior = to_observations(
context,
df_t.drop(drop_columns, axis=1),
df_u.drop(drop_columns, axis=1),
"features",
)
results["prior_tvd"], results["prior_helinger"], results["prior_kld"] = all_metrics(
t_prior, u_prior
)
if prediction_col is not None:
context.logger.info("Compute prediction metrics")
t_predictions = pd.DataFrame(df_t.loc[:, prediction_col])
u_predictions = pd.DataFrame(df_u.loc[:, prediction_col])
t_class, u_class = to_observations(
context, t_predictions, u_predictions, "prediction"
)
(
results["prediction_shift_tvd"],
results["prediction_shift_helinger"],
results["prediction_shift_kld"],
) = all_metrics(t_class, u_class)
if label_col is not None:
context.logger.info("Compute class metrics")
t_labels = pd.DataFrame(df_t.loc[:, label_col])
u_labels = pd.DataFrame(df_u.loc[:, label_col])
t_class, u_class = to_observations(context, t_labels, u_labels, "class")
(
results["class_shift_tvd"],
results["class_shift_helinger"],
results["class_shift_kld"],
) = all_metrics(t_class, u_class)
for key, value in results.items():
if value == float("inf"):
context.logger.info(f"value: {value}")
results[key] = 10
for key, result in results.items():
context.log_result(key, round(result, 3))
now = pd.to_datetime(str(datetime.datetime.now()))
now
results["timestamp"] = pd.to_datetime(str((datetime.datetime.now())))
context.logger.info(f"Timestamp: {results['timestamp']}")
results["stream"] = stream_name
results_df = pd.DataFrame(
data=[list(results.values())], columns=list(results.keys())
)
results_df = results_df.set_index(["timestamp", "stream"])
v3io_client.write("tsdb", results_tsdb_table, dfs=results_df)
|
python
|
from django.db import models
class Author(models.Model):
"""
An `Article` author
Attributes:
name (CharField):
Author name.
email (EmailField):
Author email.
"""
name = models.CharField(max_length=255)
email = models.EmailField()
class Article(models.Model):
"""
Article.
Attributes:
title (CharField):
An article title.
description (TextField):
An article description.
author (ForeignKey[`author`]):
Article author - instance of `Author` model.
"""
title = models.CharField(max_length=255)
description = models.TextField()
author = models.ForeignKey(
'Author',
related_name='articles',
on_delete=models.CASCADE
)
|
python
|
allData = {'AK': {'Aleutians East': {'pop': 3141, 'tracts': 1},
'Aleutians West': {'pop': 5561, 'tracts': 2},
'Anchorage': {'pop': 291826, 'tracts': 55},
'Bethel': {'pop': 17013, 'tracts': 3},
'Bristol Bay': {'pop': 997, 'tracts': 1},
'Denali': {'pop': 1826, 'tracts': 1},
'Dillingham': {'pop': 4847, 'tracts': 2},
'Fairbanks North Star': {'pop': 97581, 'tracts': 19},
'Haines': {'pop': 2508, 'tracts': 1},
'Hoonah-Angoon': {'pop': 2150, 'tracts': 2},
'Juneau': {'pop': 31275, 'tracts': 6},
'Kenai Peninsula': {'pop': 55400, 'tracts': 13},
'Ketchikan Gateway': {'pop': 13477, 'tracts': 4},
'Kodiak Island': {'pop': 13592, 'tracts': 5},
'Lake and Peninsula': {'pop': 1631, 'tracts': 1},
'Matanuska-Susitna': {'pop': 88995, 'tracts': 24},
'Nome': {'pop': 9492, 'tracts': 2},
'North Slope': {'pop': 9430, 'tracts': 3},
'Northwest Arctic': {'pop': 7523, 'tracts': 2},
'Petersburg': {'pop': 3815, 'tracts': 1},
'Prince of Wales-Hyder': {'pop': 5559, 'tracts': 4},
'Sitka': {'pop': 8881, 'tracts': 2},
'Skagway': {'pop': 968, 'tracts': 1},
'Southeast Fairbanks': {'pop': 7029, 'tracts': 2},
'Valdez-Cordova': {'pop': 9636, 'tracts': 3},
'Wade Hampton': {'pop': 7459, 'tracts': 1},
'Wrangell': {'pop': 2369, 'tracts': 1},
'Yakutat': {'pop': 662, 'tracts': 1},
'Yukon-Koyukuk': {'pop': 5588, 'tracts': 4}},
'AL': {'Autauga': {'pop': 54571, 'tracts': 12},
'Baldwin': {'pop': 182265, 'tracts': 31},
'Barbour': {'pop': 27457, 'tracts': 9},
'Bibb': {'pop': 22915, 'tracts': 4},
'Blount': {'pop': 57322, 'tracts': 9},
'Bullock': {'pop': 10914, 'tracts': 3},
'Butler': {'pop': 20947, 'tracts': 9},
'Calhoun': {'pop': 118572, 'tracts': 31},
'Chambers': {'pop': 34215, 'tracts': 9},
'Cherokee': {'pop': 25989, 'tracts': 6},
'Chilton': {'pop': 43643, 'tracts': 9},
'Choctaw': {'pop': 13859, 'tracts': 4},
'Clarke': {'pop': 25833, 'tracts': 9},
'Clay': {'pop': 13932, 'tracts': 4},
'Cleburne': {'pop': 14972, 'tracts': 4},
'Coffee': {'pop': 49948, 'tracts': 14},
'Colbert': {'pop': 54428, 'tracts': 14},
'Conecuh': {'pop': 13228, 'tracts': 5},
'Coosa': {'pop': 11539, 'tracts': 3},
'Covington': {'pop': 37765, 'tracts': 14},
'Crenshaw': {'pop': 13906, 'tracts': 6},
'Cullman': {'pop': 80406, 'tracts': 18},
'Dale': {'pop': 50251, 'tracts': 14},
'Dallas': {'pop': 43820, 'tracts': 15},
'DeKalb': {'pop': 71109, 'tracts': 14},
'Elmore': {'pop': 79303, 'tracts': 15},
'Escambia': {'pop': 38319, 'tracts': 9},
'Etowah': {'pop': 104430, 'tracts': 30},
'Fayette': {'pop': 17241, 'tracts': 5},
'Franklin': {'pop': 31704, 'tracts': 9},
'Geneva': {'pop': 26790, 'tracts': 6},
'Greene': {'pop': 9045, 'tracts': 3},
'Hale': {'pop': 15760, 'tracts': 6},
'Henry': {'pop': 17302, 'tracts': 6},
'Houston': {'pop': 101547, 'tracts': 22},
'Jackson': {'pop': 53227, 'tracts': 11},
'Jefferson': {'pop': 658466, 'tracts': 163},
'Lamar': {'pop': 14564, 'tracts': 3},
'Lauderdale': {'pop': 92709, 'tracts': 22},
'Lawrence': {'pop': 34339, 'tracts': 9},
'Lee': {'pop': 140247, 'tracts': 27},
'Limestone': {'pop': 82782, 'tracts': 16},
'Lowndes': {'pop': 11299, 'tracts': 4},
'Macon': {'pop': 21452, 'tracts': 12},
'Madison': {'pop': 334811, 'tracts': 73},
'Marengo': {'pop': 21027, 'tracts': 6},
'Marion': {'pop': 30776, 'tracts': 8},
'Marshall': {'pop': 93019, 'tracts': 18},
'Mobile': {'pop': 412992, 'tracts': 114},
'Monroe': {'pop': 23068, 'tracts': 7},
'Montgomery': {'pop': 229363, 'tracts': 65},
'Morgan': {'pop': 119490, 'tracts': 27},
'Perry': {'pop': 10591, 'tracts': 3},
'Pickens': {'pop': 19746, 'tracts': 5},
'Pike': {'pop': 32899, 'tracts': 8},
'Randolph': {'pop': 22913, 'tracts': 6},
'Russell': {'pop': 52947, 'tracts': 13},
'Shelby': {'pop': 195085, 'tracts': 48},
'St. Clair': {'pop': 83593, 'tracts': 13},
'Sumter': {'pop': 13763, 'tracts': 4},
'Talladega': {'pop': 82291, 'tracts': 22},
'Tallapoosa': {'pop': 41616, 'tracts': 10},
'Tuscaloosa': {'pop': 194656, 'tracts': 47},
'Walker': {'pop': 67023, 'tracts': 18},
'Washington': {'pop': 17581, 'tracts': 5},
'Wilcox': {'pop': 11670, 'tracts': 4},
'Winston': {'pop': 24484, 'tracts': 7}},
'AR': {'Arkansas': {'pop': 19019, 'tracts': 8},
'Ashley': {'pop': 21853, 'tracts': 7},
'Baxter': {'pop': 41513, 'tracts': 9},
'Benton': {'pop': 221339, 'tracts': 49},
'Boone': {'pop': 36903, 'tracts': 7},
'Bradley': {'pop': 11508, 'tracts': 5},
'Calhoun': {'pop': 5368, 'tracts': 2},
'Carroll': {'pop': 27446, 'tracts': 5},
'Chicot': {'pop': 11800, 'tracts': 4},
'Clark': {'pop': 22995, 'tracts': 5},
'Clay': {'pop': 16083, 'tracts': 6},
'Cleburne': {'pop': 25970, 'tracts': 7},
'Cleveland': {'pop': 8689, 'tracts': 2},
'Columbia': {'pop': 24552, 'tracts': 5},
'Conway': {'pop': 21273, 'tracts': 6},
'Craighead': {'pop': 96443, 'tracts': 17},
'Crawford': {'pop': 61948, 'tracts': 11},
'Crittenden': {'pop': 50902, 'tracts': 20},
'Cross': {'pop': 17870, 'tracts': 6},
'Dallas': {'pop': 8116, 'tracts': 3},
'Desha': {'pop': 13008, 'tracts': 5},
'Drew': {'pop': 18509, 'tracts': 5},
'Faulkner': {'pop': 113237, 'tracts': 25},
'Franklin': {'pop': 18125, 'tracts': 3},
'Fulton': {'pop': 12245, 'tracts': 2},
'Garland': {'pop': 96024, 'tracts': 20},
'Grant': {'pop': 17853, 'tracts': 4},
'Greene': {'pop': 42090, 'tracts': 9},
'Hempstead': {'pop': 22609, 'tracts': 5},
'Hot Spring': {'pop': 32923, 'tracts': 7},
'Howard': {'pop': 13789, 'tracts': 3},
'Independence': {'pop': 36647, 'tracts': 8},
'Izard': {'pop': 13696, 'tracts': 4},
'Jackson': {'pop': 17997, 'tracts': 5},
'Jefferson': {'pop': 77435, 'tracts': 24},
'Johnson': {'pop': 25540, 'tracts': 6},
'Lafayette': {'pop': 7645, 'tracts': 2},
'Lawrence': {'pop': 17415, 'tracts': 6},
'Lee': {'pop': 10424, 'tracts': 4},
'Lincoln': {'pop': 14134, 'tracts': 4},
'Little River': {'pop': 13171, 'tracts': 4},
'Logan': {'pop': 22353, 'tracts': 6},
'Lonoke': {'pop': 68356, 'tracts': 16},
'Madison': {'pop': 15717, 'tracts': 4},
'Marion': {'pop': 16653, 'tracts': 4},
'Miller': {'pop': 43462, 'tracts': 12},
'Mississippi': {'pop': 46480, 'tracts': 12},
'Monroe': {'pop': 8149, 'tracts': 3},
'Montgomery': {'pop': 9487, 'tracts': 3},
'Nevada': {'pop': 8997, 'tracts': 3},
'Newton': {'pop': 8330, 'tracts': 2},
'Ouachita': {'pop': 26120, 'tracts': 6},
'Perry': {'pop': 10445, 'tracts': 3},
'Phillips': {'pop': 21757, 'tracts': 6},
'Pike': {'pop': 11291, 'tracts': 3},
'Poinsett': {'pop': 24583, 'tracts': 7},
'Polk': {'pop': 20662, 'tracts': 6},
'Pope': {'pop': 61754, 'tracts': 11},
'Prairie': {'pop': 8715, 'tracts': 3},
'Pulaski': {'pop': 382748, 'tracts': 95},
'Randolph': {'pop': 17969, 'tracts': 4},
'Saline': {'pop': 107118, 'tracts': 21},
'Scott': {'pop': 11233, 'tracts': 3},
'Searcy': {'pop': 8195, 'tracts': 3},
'Sebastian': {'pop': 125744, 'tracts': 26},
'Sevier': {'pop': 17058, 'tracts': 4},
'Sharp': {'pop': 17264, 'tracts': 4},
'St. Francis': {'pop': 28258, 'tracts': 6},
'Stone': {'pop': 12394, 'tracts': 3},
'Union': {'pop': 41639, 'tracts': 10},
'Van Buren': {'pop': 17295, 'tracts': 5},
'Washington': {'pop': 203065, 'tracts': 32},
'White': {'pop': 77076, 'tracts': 13},
'Woodruff': {'pop': 7260, 'tracts': 2},
'Yell': {'pop': 22185, 'tracts': 6}},
'AZ': {'Apache': {'pop': 71518, 'tracts': 16},
'Cochise': {'pop': 131346, 'tracts': 32},
'Coconino': {'pop': 134421, 'tracts': 28},
'Gila': {'pop': 53597, 'tracts': 16},
'Graham': {'pop': 37220, 'tracts': 9},
'Greenlee': {'pop': 8437, 'tracts': 3},
'La Paz': {'pop': 20489, 'tracts': 9},
'Maricopa': {'pop': 3817117, 'tracts': 916},
'Mohave': {'pop': 200186, 'tracts': 43},
'Navajo': {'pop': 107449, 'tracts': 31},
'Pima': {'pop': 980263, 'tracts': 241},
'Pinal': {'pop': 375770, 'tracts': 75},
'Santa Cruz': {'pop': 47420, 'tracts': 10},
'Yavapai': {'pop': 211033, 'tracts': 42},
'Yuma': {'pop': 195751, 'tracts': 55}},
'CA': {'Alameda': {'pop': 1510271, 'tracts': 360},
'Alpine': {'pop': 1175, 'tracts': 1},
'Amador': {'pop': 38091, 'tracts': 9},
'Butte': {'pop': 220000, 'tracts': 51},
'Calaveras': {'pop': 45578, 'tracts': 10},
'Colusa': {'pop': 21419, 'tracts': 5},
'Contra Costa': {'pop': 1049025, 'tracts': 208},
'Del Norte': {'pop': 28610, 'tracts': 7},
'El Dorado': {'pop': 181058, 'tracts': 43},
'Fresno': {'pop': 930450, 'tracts': 199},
'Glenn': {'pop': 28122, 'tracts': 6},
'Humboldt': {'pop': 134623, 'tracts': 30},
'Imperial': {'pop': 174528, 'tracts': 31},
'Inyo': {'pop': 18546, 'tracts': 6},
'Kern': {'pop': 839631, 'tracts': 151},
'Kings': {'pop': 152982, 'tracts': 27},
'Lake': {'pop': 64665, 'tracts': 15},
'Lassen': {'pop': 34895, 'tracts': 9},
'Los Angeles': {'pop': 9818605, 'tracts': 2343},
'Madera': {'pop': 150865, 'tracts': 23},
'Marin': {'pop': 252409, 'tracts': 55},
'Mariposa': {'pop': 18251, 'tracts': 6},
'Mendocino': {'pop': 87841, 'tracts': 20},
'Merced': {'pop': 255793, 'tracts': 49},
'Modoc': {'pop': 9686, 'tracts': 4},
'Mono': {'pop': 14202, 'tracts': 3},
'Monterey': {'pop': 415057, 'tracts': 93},
'Napa': {'pop': 136484, 'tracts': 40},
'Nevada': {'pop': 98764, 'tracts': 20},
'Orange': {'pop': 3010232, 'tracts': 583},
'Placer': {'pop': 348432, 'tracts': 85},
'Plumas': {'pop': 20007, 'tracts': 7},
'Riverside': {'pop': 2189641, 'tracts': 453},
'Sacramento': {'pop': 1418788, 'tracts': 317},
'San Benito': {'pop': 55269, 'tracts': 11},
'San Bernardino': {'pop': 2035210, 'tracts': 369},
'San Diego': {'pop': 3095313, 'tracts': 628},
'San Francisco': {'pop': 805235, 'tracts': 196},
'San Joaquin': {'pop': 685306, 'tracts': 139},
'San Luis Obispo': {'pop': 269637, 'tracts': 53},
'San Mateo': {'pop': 718451, 'tracts': 158},
'Santa Barbara': {'pop': 423895, 'tracts': 90},
'Santa Clara': {'pop': 1781642, 'tracts': 372},
'Santa Cruz': {'pop': 262382, 'tracts': 52},
'Shasta': {'pop': 177223, 'tracts': 48},
'Sierra': {'pop': 3240, 'tracts': 1},
'Siskiyou': {'pop': 44900, 'tracts': 14},
'Solano': {'pop': 413344, 'tracts': 96},
'Sonoma': {'pop': 483878, 'tracts': 99},
'Stanislaus': {'pop': 514453, 'tracts': 94},
'Sutter': {'pop': 94737, 'tracts': 21},
'Tehama': {'pop': 63463, 'tracts': 11},
'Trinity': {'pop': 13786, 'tracts': 5},
'Tulare': {'pop': 442179, 'tracts': 78},
'Tuolumne': {'pop': 55365, 'tracts': 11},
'Ventura': {'pop': 823318, 'tracts': 174},
'Yolo': {'pop': 200849, 'tracts': 41},
'Yuba': {'pop': 72155, 'tracts': 14}},
'CO': {'Adams': {'pop': 441603, 'tracts': 97},
'Alamosa': {'pop': 15445, 'tracts': 4},
'Arapahoe': {'pop': 572003, 'tracts': 147},
'Archuleta': {'pop': 12084, 'tracts': 4},
'Baca': {'pop': 3788, 'tracts': 2},
'Bent': {'pop': 6499, 'tracts': 1},
'Boulder': {'pop': 294567, 'tracts': 68},
'Broomfield': {'pop': 55889, 'tracts': 18},
'Chaffee': {'pop': 17809, 'tracts': 5},
'Cheyenne': {'pop': 1836, 'tracts': 1},
'Clear Creek': {'pop': 9088, 'tracts': 3},
'Conejos': {'pop': 8256, 'tracts': 2},
'Costilla': {'pop': 3524, 'tracts': 2},
'Crowley': {'pop': 5823, 'tracts': 1},
'Custer': {'pop': 4255, 'tracts': 1},
'Delta': {'pop': 30952, 'tracts': 7},
'Denver': {'pop': 600158, 'tracts': 144},
'Dolores': {'pop': 2064, 'tracts': 1},
'Douglas': {'pop': 285465, 'tracts': 61},
'Eagle': {'pop': 52197, 'tracts': 14},
'El Paso': {'pop': 622263, 'tracts': 130},
'Elbert': {'pop': 23086, 'tracts': 7},
'Fremont': {'pop': 46824, 'tracts': 14},
'Garfield': {'pop': 56389, 'tracts': 11},
'Gilpin': {'pop': 5441, 'tracts': 1},
'Grand': {'pop': 14843, 'tracts': 3},
'Gunnison': {'pop': 15324, 'tracts': 4},
'Hinsdale': {'pop': 843, 'tracts': 1},
'Huerfano': {'pop': 6711, 'tracts': 2},
'Jackson': {'pop': 1394, 'tracts': 1},
'Jefferson': {'pop': 534543, 'tracts': 138},
'Kiowa': {'pop': 1398, 'tracts': 1},
'Kit Carson': {'pop': 8270, 'tracts': 3},
'La Plata': {'pop': 51334, 'tracts': 10},
'Lake': {'pop': 7310, 'tracts': 2},
'Larimer': {'pop': 299630, 'tracts': 73},
'Las Animas': {'pop': 15507, 'tracts': 6},
'Lincoln': {'pop': 5467, 'tracts': 2},
'Logan': {'pop': 22709, 'tracts': 6},
'Mesa': {'pop': 146723, 'tracts': 29},
'Mineral': {'pop': 712, 'tracts': 1},
'Moffat': {'pop': 13795, 'tracts': 4},
'Montezuma': {'pop': 25535, 'tracts': 7},
'Montrose': {'pop': 41276, 'tracts': 10},
'Morgan': {'pop': 28159, 'tracts': 8},
'Otero': {'pop': 18831, 'tracts': 7},
'Ouray': {'pop': 4436, 'tracts': 1},
'Park': {'pop': 16206, 'tracts': 5},
'Phillips': {'pop': 4442, 'tracts': 2},
'Pitkin': {'pop': 17148, 'tracts': 4},
'Prowers': {'pop': 12551, 'tracts': 5},
'Pueblo': {'pop': 159063, 'tracts': 55},
'Rio Blanco': {'pop': 6666, 'tracts': 2},
'Rio Grande': {'pop': 11982, 'tracts': 3},
'Routt': {'pop': 23509, 'tracts': 8},
'Saguache': {'pop': 6108, 'tracts': 2},
'San Juan': {'pop': 699, 'tracts': 1},
'San Miguel': {'pop': 7359, 'tracts': 4},
'Sedgwick': {'pop': 2379, 'tracts': 1},
'Summit': {'pop': 27994, 'tracts': 5},
'Teller': {'pop': 23350, 'tracts': 6},
'Washington': {'pop': 4814, 'tracts': 2},
'Weld': {'pop': 252825, 'tracts': 77},
'Yuma': {'pop': 10043, 'tracts': 2}},
'CT': {'Fairfield': {'pop': 916829, 'tracts': 211},
'Hartford': {'pop': 894014, 'tracts': 224},
'Litchfield': {'pop': 189927, 'tracts': 51},
'Middlesex': {'pop': 165676, 'tracts': 36},
'New Haven': {'pop': 862477, 'tracts': 190},
'New London': {'pop': 274055, 'tracts': 66},
'Tolland': {'pop': 152691, 'tracts': 29},
'Windham': {'pop': 118428, 'tracts': 25}},
'DC': {'District of Columbia': {'pop': 601723, 'tracts': 179}},
'DE': {'Kent': {'pop': 162310, 'tracts': 33},
'New Castle': {'pop': 538479, 'tracts': 131},
'Sussex': {'pop': 197145, 'tracts': 54}},
'FL': {'Alachua': {'pop': 247336, 'tracts': 56},
'Baker': {'pop': 27115, 'tracts': 4},
'Bay': {'pop': 168852, 'tracts': 44},
'Bradford': {'pop': 28520, 'tracts': 4},
'Brevard': {'pop': 543376, 'tracts': 113},
'Broward': {'pop': 1748066, 'tracts': 361},
'Calhoun': {'pop': 14625, 'tracts': 3},
'Charlotte': {'pop': 159978, 'tracts': 39},
'Citrus': {'pop': 141236, 'tracts': 27},
'Clay': {'pop': 190865, 'tracts': 30},
'Collier': {'pop': 321520, 'tracts': 73},
'Columbia': {'pop': 67531, 'tracts': 12},
'DeSoto': {'pop': 34862, 'tracts': 9},
'Dixie': {'pop': 16422, 'tracts': 3},
'Duval': {'pop': 864263, 'tracts': 173},
'Escambia': {'pop': 297619, 'tracts': 71},
'Flagler': {'pop': 95696, 'tracts': 20},
'Franklin': {'pop': 11549, 'tracts': 4},
'Gadsden': {'pop': 46389, 'tracts': 9},
'Gilchrist': {'pop': 16939, 'tracts': 5},
'Glades': {'pop': 12884, 'tracts': 4},
'Gulf': {'pop': 15863, 'tracts': 3},
'Hamilton': {'pop': 14799, 'tracts': 3},
'Hardee': {'pop': 27731, 'tracts': 6},
'Hendry': {'pop': 39140, 'tracts': 7},
'Hernando': {'pop': 172778, 'tracts': 45},
'Highlands': {'pop': 98786, 'tracts': 27},
'Hillsborough': {'pop': 1229226, 'tracts': 321},
'Holmes': {'pop': 19927, 'tracts': 4},
'Indian River': {'pop': 138028, 'tracts': 30},
'Jackson': {'pop': 49746, 'tracts': 11},
'Jefferson': {'pop': 14761, 'tracts': 3},
'Lafayette': {'pop': 8870, 'tracts': 2},
'Lake': {'pop': 297052, 'tracts': 56},
'Lee': {'pop': 618754, 'tracts': 166},
'Leon': {'pop': 275487, 'tracts': 68},
'Levy': {'pop': 40801, 'tracts': 9},
'Liberty': {'pop': 8365, 'tracts': 2},
'Madison': {'pop': 19224, 'tracts': 5},
'Manatee': {'pop': 322833, 'tracts': 78},
'Marion': {'pop': 331298, 'tracts': 63},
'Martin': {'pop': 146318, 'tracts': 35},
'Miami-Dade': {'pop': 2496435, 'tracts': 519},
'Monroe': {'pop': 73090, 'tracts': 30},
'Nassau': {'pop': 73314, 'tracts': 12},
'Okaloosa': {'pop': 180822, 'tracts': 41},
'Okeechobee': {'pop': 39996, 'tracts': 12},
'Orange': {'pop': 1145956, 'tracts': 207},
'Osceola': {'pop': 268685, 'tracts': 41},
'Palm Beach': {'pop': 1320134, 'tracts': 337},
'Pasco': {'pop': 464697, 'tracts': 134},
'Pinellas': {'pop': 916542, 'tracts': 245},
'Polk': {'pop': 602095, 'tracts': 154},
'Putnam': {'pop': 74364, 'tracts': 17},
'Santa Rosa': {'pop': 151372, 'tracts': 25},
'Sarasota': {'pop': 379448, 'tracts': 94},
'Seminole': {'pop': 422718, 'tracts': 86},
'St. Johns': {'pop': 190039, 'tracts': 40},
'St. Lucie': {'pop': 277789, 'tracts': 44},
'Sumter': {'pop': 93420, 'tracts': 19},
'Suwannee': {'pop': 41551, 'tracts': 7},
'Taylor': {'pop': 22570, 'tracts': 4},
'Union': {'pop': 15535, 'tracts': 3},
'Volusia': {'pop': 494593, 'tracts': 113},
'Wakulla': {'pop': 30776, 'tracts': 4},
'Walton': {'pop': 55043, 'tracts': 11},
'Washington': {'pop': 24896, 'tracts': 7}},
'GA': {'Appling': {'pop': 18236, 'tracts': 5},
'Atkinson': {'pop': 8375, 'tracts': 3},
'Bacon': {'pop': 11096, 'tracts': 3},
'Baker': {'pop': 3451, 'tracts': 2},
'Baldwin': {'pop': 45720, 'tracts': 9},
'Banks': {'pop': 18395, 'tracts': 4},
'Barrow': {'pop': 69367, 'tracts': 18},
'Bartow': {'pop': 100157, 'tracts': 15},
'Ben Hill': {'pop': 17634, 'tracts': 5},
'Berrien': {'pop': 19286, 'tracts': 6},
'Bibb': {'pop': 155547, 'tracts': 44},
'Bleckley': {'pop': 13063, 'tracts': 3},
'Brantley': {'pop': 18411, 'tracts': 3},
'Brooks': {'pop': 16243, 'tracts': 5},
'Bryan': {'pop': 30233, 'tracts': 7},
'Bulloch': {'pop': 70217, 'tracts': 12},
'Burke': {'pop': 23316, 'tracts': 6},
'Butts': {'pop': 23655, 'tracts': 3},
'Calhoun': {'pop': 6694, 'tracts': 2},
'Camden': {'pop': 50513, 'tracts': 10},
'Candler': {'pop': 10998, 'tracts': 3},
'Carroll': {'pop': 110527, 'tracts': 17},
'Catoosa': {'pop': 63942, 'tracts': 11},
'Charlton': {'pop': 12171, 'tracts': 2},
'Chatham': {'pop': 265128, 'tracts': 72},
'Chattahoochee': {'pop': 11267, 'tracts': 5},
'Chattooga': {'pop': 26015, 'tracts': 6},
'Cherokee': {'pop': 214346, 'tracts': 26},
'Clarke': {'pop': 116714, 'tracts': 30},
'Clay': {'pop': 3183, 'tracts': 1},
'Clayton': {'pop': 259424, 'tracts': 50},
'Clinch': {'pop': 6798, 'tracts': 2},
'Cobb': {'pop': 688078, 'tracts': 120},
'Coffee': {'pop': 42356, 'tracts': 9},
'Colquitt': {'pop': 45498, 'tracts': 10},
'Columbia': {'pop': 124053, 'tracts': 20},
'Cook': {'pop': 17212, 'tracts': 4},
'Coweta': {'pop': 127317, 'tracts': 20},
'Crawford': {'pop': 12630, 'tracts': 3},
'Crisp': {'pop': 23439, 'tracts': 6},
'Dade': {'pop': 16633, 'tracts': 4},
'Dawson': {'pop': 22330, 'tracts': 3},
'DeKalb': {'pop': 691893, 'tracts': 145},
'Decatur': {'pop': 27842, 'tracts': 7},
'Dodge': {'pop': 21796, 'tracts': 6},
'Dooly': {'pop': 14918, 'tracts': 3},
'Dougherty': {'pop': 94565, 'tracts': 27},
'Douglas': {'pop': 132403, 'tracts': 20},
'Early': {'pop': 11008, 'tracts': 5},
'Echols': {'pop': 4034, 'tracts': 2},
'Effingham': {'pop': 52250, 'tracts': 10},
'Elbert': {'pop': 20166, 'tracts': 5},
'Emanuel': {'pop': 22598, 'tracts': 6},
'Evans': {'pop': 11000, 'tracts': 3},
'Fannin': {'pop': 23682, 'tracts': 5},
'Fayette': {'pop': 106567, 'tracts': 20},
'Floyd': {'pop': 96317, 'tracts': 20},
'Forsyth': {'pop': 175511, 'tracts': 45},
'Franklin': {'pop': 22084, 'tracts': 5},
'Fulton': {'pop': 920581, 'tracts': 204},
'Gilmer': {'pop': 28292, 'tracts': 5},
'Glascock': {'pop': 3082, 'tracts': 1},
'Glynn': {'pop': 79626, 'tracts': 15},
'Gordon': {'pop': 55186, 'tracts': 9},
'Grady': {'pop': 25011, 'tracts': 6},
'Greene': {'pop': 15994, 'tracts': 7},
'Gwinnett': {'pop': 805321, 'tracts': 113},
'Habersham': {'pop': 43041, 'tracts': 8},
'Hall': {'pop': 179684, 'tracts': 36},
'Hancock': {'pop': 9429, 'tracts': 2},
'Haralson': {'pop': 28780, 'tracts': 5},
'Harris': {'pop': 32024, 'tracts': 5},
'Hart': {'pop': 25213, 'tracts': 5},
'Heard': {'pop': 11834, 'tracts': 3},
'Henry': {'pop': 203922, 'tracts': 25},
'Houston': {'pop': 139900, 'tracts': 23},
'Irwin': {'pop': 9538, 'tracts': 2},
'Jackson': {'pop': 60485, 'tracts': 11},
'Jasper': {'pop': 13900, 'tracts': 3},
'Jeff Davis': {'pop': 15068, 'tracts': 3},
'Jefferson': {'pop': 16930, 'tracts': 4},
'Jenkins': {'pop': 8340, 'tracts': 2},
'Johnson': {'pop': 9980, 'tracts': 3},
'Jones': {'pop': 28669, 'tracts': 6},
'Lamar': {'pop': 18317, 'tracts': 3},
'Lanier': {'pop': 10078, 'tracts': 2},
'Laurens': {'pop': 48434, 'tracts': 13},
'Lee': {'pop': 28298, 'tracts': 5},
'Liberty': {'pop': 63453, 'tracts': 14},
'Lincoln': {'pop': 7996, 'tracts': 2},
'Long': {'pop': 14464, 'tracts': 3},
'Lowndes': {'pop': 109233, 'tracts': 25},
'Lumpkin': {'pop': 29966, 'tracts': 4},
'Macon': {'pop': 14740, 'tracts': 4},
'Madison': {'pop': 28120, 'tracts': 6},
'Marion': {'pop': 8742, 'tracts': 2},
'McDuffie': {'pop': 21875, 'tracts': 5},
'McIntosh': {'pop': 14333, 'tracts': 4},
'Meriwether': {'pop': 21992, 'tracts': 4},
'Miller': {'pop': 6125, 'tracts': 3},
'Mitchell': {'pop': 23498, 'tracts': 5},
'Monroe': {'pop': 26424, 'tracts': 5},
'Montgomery': {'pop': 9123, 'tracts': 3},
'Morgan': {'pop': 17868, 'tracts': 5},
'Murray': {'pop': 39628, 'tracts': 8},
'Muscogee': {'pop': 189885, 'tracts': 53},
'Newton': {'pop': 99958, 'tracts': 13},
'Oconee': {'pop': 32808, 'tracts': 6},
'Oglethorpe': {'pop': 14899, 'tracts': 4},
'Paulding': {'pop': 142324, 'tracts': 19},
'Peach': {'pop': 27695, 'tracts': 6},
'Pickens': {'pop': 29431, 'tracts': 6},
'Pierce': {'pop': 18758, 'tracts': 4},
'Pike': {'pop': 17869, 'tracts': 4},
'Polk': {'pop': 41475, 'tracts': 7},
'Pulaski': {'pop': 12010, 'tracts': 3},
'Putnam': {'pop': 21218, 'tracts': 5},
'Quitman': {'pop': 2513, 'tracts': 1},
'Rabun': {'pop': 16276, 'tracts': 5},
'Randolph': {'pop': 7719, 'tracts': 2},
'Richmond': {'pop': 200549, 'tracts': 47},
'Rockdale': {'pop': 85215, 'tracts': 15},
'Schley': {'pop': 5010, 'tracts': 2},
'Screven': {'pop': 14593, 'tracts': 5},
'Seminole': {'pop': 8729, 'tracts': 3},
'Spalding': {'pop': 64073, 'tracts': 12},
'Stephens': {'pop': 26175, 'tracts': 5},
'Stewart': {'pop': 6058, 'tracts': 2},
'Sumter': {'pop': 32819, 'tracts': 8},
'Talbot': {'pop': 6865, 'tracts': 3},
'Taliaferro': {'pop': 1717, 'tracts': 1},
'Tattnall': {'pop': 25520, 'tracts': 5},
'Taylor': {'pop': 8906, 'tracts': 3},
'Telfair': {'pop': 16500, 'tracts': 3},
'Terrell': {'pop': 9315, 'tracts': 4},
'Thomas': {'pop': 44720, 'tracts': 11},
'Tift': {'pop': 40118, 'tracts': 9},
'Toombs': {'pop': 27223, 'tracts': 6},
'Towns': {'pop': 10471, 'tracts': 3},
'Treutlen': {'pop': 6885, 'tracts': 2},
'Troup': {'pop': 67044, 'tracts': 14},
'Turner': {'pop': 8930, 'tracts': 2},
'Twiggs': {'pop': 9023, 'tracts': 2},
'Union': {'pop': 21356, 'tracts': 6},
'Upson': {'pop': 27153, 'tracts': 7},
'Walker': {'pop': 68756, 'tracts': 13},
'Walton': {'pop': 83768, 'tracts': 15},
'Ware': {'pop': 36312, 'tracts': 9},
'Warren': {'pop': 5834, 'tracts': 2},
'Washington': {'pop': 21187, 'tracts': 5},
'Wayne': {'pop': 30099, 'tracts': 6},
'Webster': {'pop': 2799, 'tracts': 2},
'Wheeler': {'pop': 7421, 'tracts': 2},
'White': {'pop': 27144, 'tracts': 5},
'Whitfield': {'pop': 102599, 'tracts': 18},
'Wilcox': {'pop': 9255, 'tracts': 4},
'Wilkes': {'pop': 10593, 'tracts': 4},
'Wilkinson': {'pop': 9563, 'tracts': 3},
'Worth': {'pop': 21679, 'tracts': 5}},
'HI': {'Hawaii': {'pop': 185079, 'tracts': 34},
'Honolulu': {'pop': 953207, 'tracts': 244},
'Kalawao': {'pop': 90, 'tracts': 1},
'Kauai': {'pop': 67091, 'tracts': 16},
'Maui': {'pop': 154834, 'tracts': 37}},
'IA': {'Adair': {'pop': 7682, 'tracts': 3},
'Adams': {'pop': 4029, 'tracts': 2},
'Allamakee': {'pop': 14330, 'tracts': 5},
'Appanoose': {'pop': 12887, 'tracts': 5},
'Audubon': {'pop': 6119, 'tracts': 3},
'Benton': {'pop': 26076, 'tracts': 7},
'Black Hawk': {'pop': 131090, 'tracts': 38},
'Boone': {'pop': 26306, 'tracts': 7},
'Bremer': {'pop': 24276, 'tracts': 8},
'Buchanan': {'pop': 20958, 'tracts': 6},
'Buena Vista': {'pop': 20260, 'tracts': 6},
'Butler': {'pop': 14867, 'tracts': 5},
'Calhoun': {'pop': 9670, 'tracts': 4},
'Carroll': {'pop': 20816, 'tracts': 6},
'Cass': {'pop': 13956, 'tracts': 5},
'Cedar': {'pop': 18499, 'tracts': 5},
'Cerro Gordo': {'pop': 44151, 'tracts': 11},
'Cherokee': {'pop': 12072, 'tracts': 4},
'Chickasaw': {'pop': 12439, 'tracts': 4},
'Clarke': {'pop': 9286, 'tracts': 3},
'Clay': {'pop': 16667, 'tracts': 4},
'Clayton': {'pop': 18129, 'tracts': 6},
'Clinton': {'pop': 49116, 'tracts': 12},
'Crawford': {'pop': 17096, 'tracts': 5},
'Dallas': {'pop': 66135, 'tracts': 15},
'Davis': {'pop': 8753, 'tracts': 2},
'Decatur': {'pop': 8457, 'tracts': 3},
'Delaware': {'pop': 17764, 'tracts': 4},
'Des Moines': {'pop': 40325, 'tracts': 11},
'Dickinson': {'pop': 16667, 'tracts': 5},
'Dubuque': {'pop': 93653, 'tracts': 26},
'Emmet': {'pop': 10302, 'tracts': 4},
'Fayette': {'pop': 20880, 'tracts': 7},
'Floyd': {'pop': 16303, 'tracts': 5},
'Franklin': {'pop': 10680, 'tracts': 3},
'Fremont': {'pop': 7441, 'tracts': 3},
'Greene': {'pop': 9336, 'tracts': 4},
'Grundy': {'pop': 12453, 'tracts': 4},
'Guthrie': {'pop': 10954, 'tracts': 3},
'Hamilton': {'pop': 15673, 'tracts': 5},
'Hancock': {'pop': 11341, 'tracts': 4},
'Hardin': {'pop': 17534, 'tracts': 6},
'Harrison': {'pop': 14928, 'tracts': 5},
'Henry': {'pop': 20145, 'tracts': 5},
'Howard': {'pop': 9566, 'tracts': 3},
'Humboldt': {'pop': 9815, 'tracts': 4},
'Ida': {'pop': 7089, 'tracts': 3},
'Iowa': {'pop': 16355, 'tracts': 4},
'Jackson': {'pop': 19848, 'tracts': 6},
'Jasper': {'pop': 36842, 'tracts': 9},
'Jefferson': {'pop': 16843, 'tracts': 4},
'Johnson': {'pop': 130882, 'tracts': 24},
'Jones': {'pop': 20638, 'tracts': 5},
'Keokuk': {'pop': 10511, 'tracts': 4},
'Kossuth': {'pop': 15543, 'tracts': 6},
'Lee': {'pop': 35862, 'tracts': 11},
'Linn': {'pop': 211226, 'tracts': 45},
'Louisa': {'pop': 11387, 'tracts': 3},
'Lucas': {'pop': 8898, 'tracts': 4},
'Lyon': {'pop': 11581, 'tracts': 3},
'Madison': {'pop': 15679, 'tracts': 3},
'Mahaska': {'pop': 22381, 'tracts': 7},
'Marion': {'pop': 33309, 'tracts': 8},
'Marshall': {'pop': 40648, 'tracts': 10},
'Mills': {'pop': 15059, 'tracts': 5},
'Mitchell': {'pop': 10776, 'tracts': 3},
'Monona': {'pop': 9243, 'tracts': 4},
'Monroe': {'pop': 7970, 'tracts': 3},
'Montgomery': {'pop': 10740, 'tracts': 4},
'Muscatine': {'pop': 42745, 'tracts': 10},
"O'Brien": {'pop': 14398, 'tracts': 4},
'Osceola': {'pop': 6462, 'tracts': 2},
'Page': {'pop': 15932, 'tracts': 6},
'Palo Alto': {'pop': 9421, 'tracts': 4},
'Plymouth': {'pop': 24986, 'tracts': 6},
'Pocahontas': {'pop': 7310, 'tracts': 3},
'Polk': {'pop': 430640, 'tracts': 98},
'Pottawattamie': {'pop': 93158, 'tracts': 30},
'Poweshiek': {'pop': 18914, 'tracts': 5},
'Ringgold': {'pop': 5131, 'tracts': 2},
'Sac': {'pop': 10350, 'tracts': 4},
'Scott': {'pop': 165224, 'tracts': 47},
'Shelby': {'pop': 12167, 'tracts': 4},
'Sioux': {'pop': 33704, 'tracts': 7},
'Story': {'pop': 89542, 'tracts': 20},
'Tama': {'pop': 17767, 'tracts': 6},
'Taylor': {'pop': 6317, 'tracts': 3},
'Union': {'pop': 12534, 'tracts': 4},
'Van Buren': {'pop': 7570, 'tracts': 2},
'Wapello': {'pop': 35625, 'tracts': 11},
'Warren': {'pop': 46225, 'tracts': 12},
'Washington': {'pop': 21704, 'tracts': 5},
'Wayne': {'pop': 6403, 'tracts': 3},
'Webster': {'pop': 38013, 'tracts': 12},
'Winnebago': {'pop': 10866, 'tracts': 3},
'Winneshiek': {'pop': 21056, 'tracts': 5},
'Woodbury': {'pop': 102172, 'tracts': 26},
'Worth': {'pop': 7598, 'tracts': 3},
'Wright': {'pop': 13229, 'tracts': 5}},
'ID': {'Ada': {'pop': 392365, 'tracts': 59},
'Adams': {'pop': 3976, 'tracts': 2},
'Bannock': {'pop': 82839, 'tracts': 22},
'Bear Lake': {'pop': 5986, 'tracts': 2},
'Benewah': {'pop': 9285, 'tracts': 2},
'Bingham': {'pop': 45607, 'tracts': 8},
'Blaine': {'pop': 21376, 'tracts': 4},
'Boise': {'pop': 7028, 'tracts': 1},
'Bonner': {'pop': 40877, 'tracts': 9},
'Bonneville': {'pop': 104234, 'tracts': 21},
'Boundary': {'pop': 10972, 'tracts': 2},
'Butte': {'pop': 2891, 'tracts': 1},
'Camas': {'pop': 1117, 'tracts': 1},
'Canyon': {'pop': 188923, 'tracts': 29},
'Caribou': {'pop': 6963, 'tracts': 2},
'Cassia': {'pop': 22952, 'tracts': 6},
'Clark': {'pop': 982, 'tracts': 1},
'Clearwater': {'pop': 8761, 'tracts': 2},
'Custer': {'pop': 4368, 'tracts': 1},
'Elmore': {'pop': 27038, 'tracts': 5},
'Franklin': {'pop': 12786, 'tracts': 2},
'Fremont': {'pop': 13242, 'tracts': 3},
'Gem': {'pop': 16719, 'tracts': 3},
'Gooding': {'pop': 15464, 'tracts': 2},
'Idaho': {'pop': 16267, 'tracts': 5},
'Jefferson': {'pop': 26140, 'tracts': 4},
'Jerome': {'pop': 22374, 'tracts': 5},
'Kootenai': {'pop': 138494, 'tracts': 25},
'Latah': {'pop': 37244, 'tracts': 7},
'Lemhi': {'pop': 7936, 'tracts': 3},
'Lewis': {'pop': 3821, 'tracts': 3},
'Lincoln': {'pop': 5208, 'tracts': 1},
'Madison': {'pop': 37536, 'tracts': 6},
'Minidoka': {'pop': 20069, 'tracts': 5},
'Nez Perce': {'pop': 39265, 'tracts': 10},
'Oneida': {'pop': 4286, 'tracts': 1},
'Owyhee': {'pop': 11526, 'tracts': 3},
'Payette': {'pop': 22623, 'tracts': 4},
'Power': {'pop': 7817, 'tracts': 2},
'Shoshone': {'pop': 12765, 'tracts': 3},
'Teton': {'pop': 10170, 'tracts': 1},
'Twin Falls': {'pop': 77230, 'tracts': 14},
'Valley': {'pop': 9862, 'tracts': 3},
'Washington': {'pop': 10198, 'tracts': 3}},
'IL': {'Adams': {'pop': 67103, 'tracts': 18},
'Alexander': {'pop': 8238, 'tracts': 4},
'Bond': {'pop': 17768, 'tracts': 4},
'Boone': {'pop': 54165, 'tracts': 7},
'Brown': {'pop': 6937, 'tracts': 2},
'Bureau': {'pop': 34978, 'tracts': 10},
'Calhoun': {'pop': 5089, 'tracts': 2},
'Carroll': {'pop': 15387, 'tracts': 6},
'Cass': {'pop': 13642, 'tracts': 5},
'Champaign': {'pop': 201081, 'tracts': 43},
'Christian': {'pop': 34800, 'tracts': 10},
'Clark': {'pop': 16335, 'tracts': 4},
'Clay': {'pop': 13815, 'tracts': 4},
'Clinton': {'pop': 37762, 'tracts': 8},
'Coles': {'pop': 53873, 'tracts': 12},
'Cook': {'pop': 5194675, 'tracts': 1318},
'Crawford': {'pop': 19817, 'tracts': 6},
'Cumberland': {'pop': 11048, 'tracts': 3},
'De Witt': {'pop': 16561, 'tracts': 5},
'DeKalb': {'pop': 105160, 'tracts': 21},
'Douglas': {'pop': 19980, 'tracts': 5},
'DuPage': {'pop': 916924, 'tracts': 216},
'Edgar': {'pop': 18576, 'tracts': 5},
'Edwards': {'pop': 6721, 'tracts': 3},
'Effingham': {'pop': 34242, 'tracts': 8},
'Fayette': {'pop': 22140, 'tracts': 7},
'Ford': {'pop': 14081, 'tracts': 5},
'Franklin': {'pop': 39561, 'tracts': 12},
'Fulton': {'pop': 37069, 'tracts': 12},
'Gallatin': {'pop': 5589, 'tracts': 2},
'Greene': {'pop': 13886, 'tracts': 5},
'Grundy': {'pop': 50063, 'tracts': 10},
'Hamilton': {'pop': 8457, 'tracts': 3},
'Hancock': {'pop': 19104, 'tracts': 7},
'Hardin': {'pop': 4320, 'tracts': 2},
'Henderson': {'pop': 7331, 'tracts': 3},
'Henry': {'pop': 50486, 'tracts': 13},
'Iroquois': {'pop': 29718, 'tracts': 9},
'Jackson': {'pop': 60218, 'tracts': 14},
'Jasper': {'pop': 9698, 'tracts': 3},
'Jefferson': {'pop': 38827, 'tracts': 11},
'Jersey': {'pop': 22985, 'tracts': 6},
'Jo Daviess': {'pop': 22678, 'tracts': 6},
'Johnson': {'pop': 12582, 'tracts': 4},
'Kane': {'pop': 515269, 'tracts': 82},
'Kankakee': {'pop': 113449, 'tracts': 29},
'Kendall': {'pop': 114736, 'tracts': 10},
'Knox': {'pop': 52919, 'tracts': 16},
'La Salle': {'pop': 113924, 'tracts': 28},
'Lake': {'pop': 703462, 'tracts': 153},
'Lawrence': {'pop': 16833, 'tracts': 5},
'Lee': {'pop': 36031, 'tracts': 9},
'Livingston': {'pop': 38950, 'tracts': 10},
'Logan': {'pop': 30305, 'tracts': 8},
'Macon': {'pop': 110768, 'tracts': 34},
'Macoupin': {'pop': 47765, 'tracts': 13},
'Madison': {'pop': 269282, 'tracts': 61},
'Marion': {'pop': 39437, 'tracts': 12},
'Marshall': {'pop': 12640, 'tracts': 5},
'Mason': {'pop': 14666, 'tracts': 6},
'Massac': {'pop': 15429, 'tracts': 4},
'McDonough': {'pop': 32612, 'tracts': 10},
'McHenry': {'pop': 308760, 'tracts': 52},
'McLean': {'pop': 169572, 'tracts': 41},
'Menard': {'pop': 12705, 'tracts': 3},
'Mercer': {'pop': 16434, 'tracts': 4},
'Monroe': {'pop': 32957, 'tracts': 6},
'Montgomery': {'pop': 30104, 'tracts': 8},
'Morgan': {'pop': 35547, 'tracts': 10},
'Moultrie': {'pop': 14846, 'tracts': 4},
'Ogle': {'pop': 53497, 'tracts': 11},
'Peoria': {'pop': 186494, 'tracts': 48},
'Perry': {'pop': 22350, 'tracts': 6},
'Piatt': {'pop': 16729, 'tracts': 4},
'Pike': {'pop': 16430, 'tracts': 5},
'Pope': {'pop': 4470, 'tracts': 2},
'Pulaski': {'pop': 6161, 'tracts': 2},
'Putnam': {'pop': 6006, 'tracts': 2},
'Randolph': {'pop': 33476, 'tracts': 9},
'Richland': {'pop': 16233, 'tracts': 5},
'Rock Island': {'pop': 147546, 'tracts': 40},
'Saline': {'pop': 24913, 'tracts': 9},
'Sangamon': {'pop': 197465, 'tracts': 53},
'Schuyler': {'pop': 7544, 'tracts': 3},
'Scott': {'pop': 5355, 'tracts': 2},
'Shelby': {'pop': 22363, 'tracts': 6},
'St. Clair': {'pop': 270056, 'tracts': 60},
'Stark': {'pop': 5994, 'tracts': 2},
'Stephenson': {'pop': 47711, 'tracts': 13},
'Tazewell': {'pop': 135394, 'tracts': 30},
'Union': {'pop': 17808, 'tracts': 5},
'Vermilion': {'pop': 81625, 'tracts': 24},
'Wabash': {'pop': 11947, 'tracts': 4},
'Warren': {'pop': 17707, 'tracts': 5},
'Washington': {'pop': 14716, 'tracts': 4},
'Wayne': {'pop': 16760, 'tracts': 5},
'White': {'pop': 14665, 'tracts': 5},
'Whiteside': {'pop': 58498, 'tracts': 18},
'Will': {'pop': 677560, 'tracts': 152},
'Williamson': {'pop': 66357, 'tracts': 15},
'Winnebago': {'pop': 295266, 'tracts': 77},
'Woodford': {'pop': 38664, 'tracts': 9}},
'IN': {'Adams': {'pop': 34387, 'tracts': 7},
'Allen': {'pop': 355329, 'tracts': 96},
'Bartholomew': {'pop': 76794, 'tracts': 15},
'Benton': {'pop': 8854, 'tracts': 3},
'Blackford': {'pop': 12766, 'tracts': 4},
'Boone': {'pop': 56640, 'tracts': 10},
'Brown': {'pop': 15242, 'tracts': 4},
'Carroll': {'pop': 20155, 'tracts': 7},
'Cass': {'pop': 38966, 'tracts': 11},
'Clark': {'pop': 110232, 'tracts': 26},
'Clay': {'pop': 26890, 'tracts': 6},
'Clinton': {'pop': 33224, 'tracts': 8},
'Crawford': {'pop': 10713, 'tracts': 3},
'Daviess': {'pop': 31648, 'tracts': 7},
'DeKalb': {'pop': 42223, 'tracts': 9},
'Dearborn': {'pop': 50047, 'tracts': 10},
'Decatur': {'pop': 25740, 'tracts': 6},
'Delaware': {'pop': 117671, 'tracts': 30},
'Dubois': {'pop': 41889, 'tracts': 7},
'Elkhart': {'pop': 197559, 'tracts': 36},
'Fayette': {'pop': 24277, 'tracts': 7},
'Floyd': {'pop': 74578, 'tracts': 20},
'Fountain': {'pop': 17240, 'tracts': 5},
'Franklin': {'pop': 23087, 'tracts': 5},
'Fulton': {'pop': 20836, 'tracts': 6},
'Gibson': {'pop': 33503, 'tracts': 7},
'Grant': {'pop': 70061, 'tracts': 16},
'Greene': {'pop': 33165, 'tracts': 9},
'Hamilton': {'pop': 274569, 'tracts': 39},
'Hancock': {'pop': 70002, 'tracts': 10},
'Harrison': {'pop': 39364, 'tracts': 6},
'Hendricks': {'pop': 145448, 'tracts': 21},
'Henry': {'pop': 49462, 'tracts': 13},
'Howard': {'pop': 82752, 'tracts': 20},
'Huntington': {'pop': 37124, 'tracts': 9},
'Jackson': {'pop': 42376, 'tracts': 10},
'Jasper': {'pop': 33478, 'tracts': 8},
'Jay': {'pop': 21253, 'tracts': 7},
'Jefferson': {'pop': 32428, 'tracts': 7},
'Jennings': {'pop': 28525, 'tracts': 6},
'Johnson': {'pop': 139654, 'tracts': 22},
'Knox': {'pop': 38440, 'tracts': 10},
'Kosciusko': {'pop': 77358, 'tracts': 19},
'LaGrange': {'pop': 37128, 'tracts': 8},
'LaPorte': {'pop': 111467, 'tracts': 28},
'Lake': {'pop': 496005, 'tracts': 117},
'Lawrence': {'pop': 46134, 'tracts': 10},
'Madison': {'pop': 131636, 'tracts': 37},
'Marion': {'pop': 903393, 'tracts': 224},
'Marshall': {'pop': 47051, 'tracts': 12},
'Martin': {'pop': 10334, 'tracts': 3},
'Miami': {'pop': 36903, 'tracts': 10},
'Monroe': {'pop': 137974, 'tracts': 31},
'Montgomery': {'pop': 38124, 'tracts': 9},
'Morgan': {'pop': 68894, 'tracts': 13},
'Newton': {'pop': 14244, 'tracts': 4},
'Noble': {'pop': 47536, 'tracts': 10},
'Ohio': {'pop': 6128, 'tracts': 2},
'Orange': {'pop': 19840, 'tracts': 6},
'Owen': {'pop': 21575, 'tracts': 5},
'Parke': {'pop': 17339, 'tracts': 4},
'Perry': {'pop': 19338, 'tracts': 5},
'Pike': {'pop': 12845, 'tracts': 4},
'Porter': {'pop': 164343, 'tracts': 32},
'Posey': {'pop': 25910, 'tracts': 7},
'Pulaski': {'pop': 13402, 'tracts': 4},
'Putnam': {'pop': 37963, 'tracts': 7},
'Randolph': {'pop': 26171, 'tracts': 8},
'Ripley': {'pop': 28818, 'tracts': 6},
'Rush': {'pop': 17392, 'tracts': 5},
'Scott': {'pop': 24181, 'tracts': 5},
'Shelby': {'pop': 44436, 'tracts': 10},
'Spencer': {'pop': 20952, 'tracts': 5},
'St. Joseph': {'pop': 266931, 'tracts': 75},
'Starke': {'pop': 23363, 'tracts': 7},
'Steuben': {'pop': 34185, 'tracts': 9},
'Sullivan': {'pop': 21475, 'tracts': 5},
'Switzerland': {'pop': 10613, 'tracts': 3},
'Tippecanoe': {'pop': 172780, 'tracts': 37},
'Tipton': {'pop': 15936, 'tracts': 4},
'Union': {'pop': 7516, 'tracts': 2},
'Vanderburgh': {'pop': 179703, 'tracts': 49},
'Vermillion': {'pop': 16212, 'tracts': 5},
'Vigo': {'pop': 107848, 'tracts': 28},
'Wabash': {'pop': 32888, 'tracts': 8},
'Warren': {'pop': 8508, 'tracts': 2},
'Warrick': {'pop': 59689, 'tracts': 11},
'Washington': {'pop': 28262, 'tracts': 6},
'Wayne': {'pop': 68917, 'tracts': 17},
'Wells': {'pop': 27636, 'tracts': 7},
'White': {'pop': 24643, 'tracts': 8},
'Whitley': {'pop': 33292, 'tracts': 7}},
'KS': {'Allen': {'pop': 13371, 'tracts': 5},
'Anderson': {'pop': 8102, 'tracts': 2},
'Atchison': {'pop': 16924, 'tracts': 4},
'Barber': {'pop': 4861, 'tracts': 2},
'Barton': {'pop': 27674, 'tracts': 8},
'Bourbon': {'pop': 15173, 'tracts': 5},
'Brown': {'pop': 9984, 'tracts': 3},
'Butler': {'pop': 65880, 'tracts': 13},
'Chase': {'pop': 2790, 'tracts': 1},
'Chautauqua': {'pop': 3669, 'tracts': 1},
'Cherokee': {'pop': 21603, 'tracts': 6},
'Cheyenne': {'pop': 2726, 'tracts': 1},
'Clark': {'pop': 2215, 'tracts': 1},
'Clay': {'pop': 8535, 'tracts': 2},
'Cloud': {'pop': 9533, 'tracts': 4},
'Coffey': {'pop': 8601, 'tracts': 3},
'Comanche': {'pop': 1891, 'tracts': 1},
'Cowley': {'pop': 36311, 'tracts': 11},
'Crawford': {'pop': 39134, 'tracts': 11},
'Decatur': {'pop': 2961, 'tracts': 2},
'Dickinson': {'pop': 19754, 'tracts': 6},
'Doniphan': {'pop': 7945, 'tracts': 3},
'Douglas': {'pop': 110826, 'tracts': 22},
'Edwards': {'pop': 3037, 'tracts': 2},
'Elk': {'pop': 2882, 'tracts': 1},
'Ellis': {'pop': 28452, 'tracts': 6},
'Ellsworth': {'pop': 6497, 'tracts': 2},
'Finney': {'pop': 36776, 'tracts': 12},
'Ford': {'pop': 33848, 'tracts': 7},
'Franklin': {'pop': 25992, 'tracts': 5},
'Geary': {'pop': 34362, 'tracts': 8},
'Gove': {'pop': 2695, 'tracts': 2},
'Graham': {'pop': 2597, 'tracts': 2},
'Grant': {'pop': 7829, 'tracts': 2},
'Gray': {'pop': 6006, 'tracts': 2},
'Greeley': {'pop': 1247, 'tracts': 1},
'Greenwood': {'pop': 6689, 'tracts': 3},
'Hamilton': {'pop': 2690, 'tracts': 1},
'Harper': {'pop': 6034, 'tracts': 3},
'Harvey': {'pop': 34684, 'tracts': 6},
'Haskell': {'pop': 4256, 'tracts': 1},
'Hodgeman': {'pop': 1916, 'tracts': 1},
'Jackson': {'pop': 13462, 'tracts': 3},
'Jefferson': {'pop': 19126, 'tracts': 4},
'Jewell': {'pop': 3077, 'tracts': 2},
'Johnson': {'pop': 544179, 'tracts': 130},
'Kearny': {'pop': 3977, 'tracts': 1},
'Kingman': {'pop': 7858, 'tracts': 3},
'Kiowa': {'pop': 2553, 'tracts': 1},
'Labette': {'pop': 21607, 'tracts': 8},
'Lane': {'pop': 1750, 'tracts': 1},
'Leavenworth': {'pop': 76227, 'tracts': 16},
'Lincoln': {'pop': 3241, 'tracts': 1},
'Linn': {'pop': 9656, 'tracts': 2},
'Logan': {'pop': 2756, 'tracts': 1},
'Lyon': {'pop': 33690, 'tracts': 8},
'Marion': {'pop': 12660, 'tracts': 4},
'Marshall': {'pop': 10117, 'tracts': 4},
'McPherson': {'pop': 29180, 'tracts': 7},
'Meade': {'pop': 4575, 'tracts': 2},
'Miami': {'pop': 32787, 'tracts': 8},
'Mitchell': {'pop': 6373, 'tracts': 2},
'Montgomery': {'pop': 35471, 'tracts': 13},
'Morris': {'pop': 5923, 'tracts': 2},
'Morton': {'pop': 3233, 'tracts': 1},
'Nemaha': {'pop': 10178, 'tracts': 3},
'Neosho': {'pop': 16512, 'tracts': 5},
'Ness': {'pop': 3107, 'tracts': 2},
'Norton': {'pop': 5671, 'tracts': 1},
'Osage': {'pop': 16295, 'tracts': 5},
'Osborne': {'pop': 3858, 'tracts': 1},
'Ottawa': {'pop': 6091, 'tracts': 2},
'Pawnee': {'pop': 6973, 'tracts': 2},
'Phillips': {'pop': 5642, 'tracts': 3},
'Pottawatomie': {'pop': 21604, 'tracts': 4},
'Pratt': {'pop': 9656, 'tracts': 3},
'Rawlins': {'pop': 2519, 'tracts': 1},
'Reno': {'pop': 64511, 'tracts': 17},
'Republic': {'pop': 4980, 'tracts': 3},
'Rice': {'pop': 10083, 'tracts': 3},
'Riley': {'pop': 71115, 'tracts': 14},
'Rooks': {'pop': 5181, 'tracts': 2},
'Rush': {'pop': 3307, 'tracts': 2},
'Russell': {'pop': 6970, 'tracts': 2},
'Saline': {'pop': 55606, 'tracts': 12},
'Scott': {'pop': 4936, 'tracts': 1},
'Sedgwick': {'pop': 498365, 'tracts': 124},
'Seward': {'pop': 22952, 'tracts': 5},
'Shawnee': {'pop': 177934, 'tracts': 43},
'Sheridan': {'pop': 2556, 'tracts': 2},
'Sherman': {'pop': 6010, 'tracts': 2},
'Smith': {'pop': 3853, 'tracts': 2},
'Stafford': {'pop': 4437, 'tracts': 2},
'Stanton': {'pop': 2235, 'tracts': 1},
'Stevens': {'pop': 5724, 'tracts': 2},
'Sumner': {'pop': 24132, 'tracts': 6},
'Thomas': {'pop': 7900, 'tracts': 2},
'Trego': {'pop': 3001, 'tracts': 1},
'Wabaunsee': {'pop': 7053, 'tracts': 2},
'Wallace': {'pop': 1485, 'tracts': 1},
'Washington': {'pop': 5799, 'tracts': 2},
'Wichita': {'pop': 2234, 'tracts': 1},
'Wilson': {'pop': 9409, 'tracts': 4},
'Woodson': {'pop': 3309, 'tracts': 2},
'Wyandotte': {'pop': 157505, 'tracts': 70}},
'KY': {'Adair': {'pop': 18656, 'tracts': 7},
'Allen': {'pop': 19956, 'tracts': 6},
'Anderson': {'pop': 21421, 'tracts': 5},
'Ballard': {'pop': 8249, 'tracts': 3},
'Barren': {'pop': 42173, 'tracts': 10},
'Bath': {'pop': 11591, 'tracts': 3},
'Bell': {'pop': 28691, 'tracts': 9},
'Boone': {'pop': 118811, 'tracts': 22},
'Bourbon': {'pop': 19985, 'tracts': 6},
'Boyd': {'pop': 49542, 'tracts': 13},
'Boyle': {'pop': 28432, 'tracts': 7},
'Bracken': {'pop': 8488, 'tracts': 3},
'Breathitt': {'pop': 13878, 'tracts': 7},
'Breckinridge': {'pop': 20059, 'tracts': 6},
'Bullitt': {'pop': 74319, 'tracts': 18},
'Butler': {'pop': 12690, 'tracts': 5},
'Caldwell': {'pop': 12984, 'tracts': 3},
'Calloway': {'pop': 37191, 'tracts': 9},
'Campbell': {'pop': 90336, 'tracts': 25},
'Carlisle': {'pop': 5104, 'tracts': 3},
'Carroll': {'pop': 10811, 'tracts': 3},
'Carter': {'pop': 27720, 'tracts': 7},
'Casey': {'pop': 15955, 'tracts': 5},
'Christian': {'pop': 73955, 'tracts': 19},
'Clark': {'pop': 35613, 'tracts': 10},
'Clay': {'pop': 21730, 'tracts': 6},
'Clinton': {'pop': 10272, 'tracts': 3},
'Crittenden': {'pop': 9315, 'tracts': 4},
'Cumberland': {'pop': 6856, 'tracts': 2},
'Daviess': {'pop': 96656, 'tracts': 23},
'Edmonson': {'pop': 12161, 'tracts': 4},
'Elliott': {'pop': 7852, 'tracts': 2},
'Estill': {'pop': 14672, 'tracts': 4},
'Fayette': {'pop': 295803, 'tracts': 82},
'Fleming': {'pop': 14348, 'tracts': 4},
'Floyd': {'pop': 39451, 'tracts': 10},
'Franklin': {'pop': 49285, 'tracts': 11},
'Fulton': {'pop': 6813, 'tracts': 2},
'Gallatin': {'pop': 8589, 'tracts': 2},
'Garrard': {'pop': 16912, 'tracts': 4},
'Grant': {'pop': 24662, 'tracts': 4},
'Graves': {'pop': 37121, 'tracts': 9},
'Grayson': {'pop': 25746, 'tracts': 7},
'Green': {'pop': 11258, 'tracts': 4},
'Greenup': {'pop': 36910, 'tracts': 9},
'Hancock': {'pop': 8565, 'tracts': 3},
'Hardin': {'pop': 105543, 'tracts': 22},
'Harlan': {'pop': 29278, 'tracts': 11},
'Harrison': {'pop': 18846, 'tracts': 5},
'Hart': {'pop': 18199, 'tracts': 5},
'Henderson': {'pop': 46250, 'tracts': 11},
'Henry': {'pop': 15416, 'tracts': 5},
'Hickman': {'pop': 4902, 'tracts': 1},
'Hopkins': {'pop': 46920, 'tracts': 12},
'Jackson': {'pop': 13494, 'tracts': 3},
'Jefferson': {'pop': 741096, 'tracts': 191},
'Jessamine': {'pop': 48586, 'tracts': 9},
'Johnson': {'pop': 23356, 'tracts': 6},
'Kenton': {'pop': 159720, 'tracts': 41},
'Knott': {'pop': 16346, 'tracts': 5},
'Knox': {'pop': 31883, 'tracts': 8},
'Larue': {'pop': 14193, 'tracts': 4},
'Laurel': {'pop': 58849, 'tracts': 13},
'Lawrence': {'pop': 15860, 'tracts': 5},
'Lee': {'pop': 7887, 'tracts': 3},
'Leslie': {'pop': 11310, 'tracts': 3},
'Letcher': {'pop': 24519, 'tracts': 7},
'Lewis': {'pop': 13870, 'tracts': 4},
'Lincoln': {'pop': 24742, 'tracts': 6},
'Livingston': {'pop': 9519, 'tracts': 2},
'Logan': {'pop': 26835, 'tracts': 6},
'Lyon': {'pop': 8314, 'tracts': 3},
'Madison': {'pop': 82916, 'tracts': 19},
'Magoffin': {'pop': 13333, 'tracts': 4},
'Marion': {'pop': 19820, 'tracts': 6},
'Marshall': {'pop': 31448, 'tracts': 6},
'Martin': {'pop': 12929, 'tracts': 3},
'Mason': {'pop': 17490, 'tracts': 5},
'McCracken': {'pop': 65565, 'tracts': 17},
'McCreary': {'pop': 18306, 'tracts': 4},
'McLean': {'pop': 9531, 'tracts': 3},
'Meade': {'pop': 28602, 'tracts': 8},
'Menifee': {'pop': 6306, 'tracts': 2},
'Mercer': {'pop': 21331, 'tracts': 5},
'Metcalfe': {'pop': 10099, 'tracts': 3},
'Monroe': {'pop': 10963, 'tracts': 4},
'Montgomery': {'pop': 26499, 'tracts': 6},
'Morgan': {'pop': 13923, 'tracts': 5},
'Muhlenberg': {'pop': 31499, 'tracts': 9},
'Nelson': {'pop': 43437, 'tracts': 9},
'Nicholas': {'pop': 7135, 'tracts': 2},
'Ohio': {'pop': 23842, 'tracts': 7},
'Oldham': {'pop': 60316, 'tracts': 14},
'Owen': {'pop': 10841, 'tracts': 3},
'Owsley': {'pop': 4755, 'tracts': 2},
'Pendleton': {'pop': 14877, 'tracts': 3},
'Perry': {'pop': 28712, 'tracts': 8},
'Pike': {'pop': 65024, 'tracts': 19},
'Powell': {'pop': 12613, 'tracts': 2},
'Pulaski': {'pop': 63063, 'tracts': 14},
'Robertson': {'pop': 2282, 'tracts': 1},
'Rockcastle': {'pop': 17056, 'tracts': 4},
'Rowan': {'pop': 23333, 'tracts': 4},
'Russell': {'pop': 17565, 'tracts': 5},
'Scott': {'pop': 47173, 'tracts': 14},
'Shelby': {'pop': 42074, 'tracts': 9},
'Simpson': {'pop': 17327, 'tracts': 4},
'Spencer': {'pop': 17061, 'tracts': 4},
'Taylor': {'pop': 24512, 'tracts': 5},
'Todd': {'pop': 12460, 'tracts': 4},
'Trigg': {'pop': 14339, 'tracts': 5},
'Trimble': {'pop': 8809, 'tracts': 2},
'Union': {'pop': 15007, 'tracts': 4},
'Warren': {'pop': 113792, 'tracts': 24},
'Washington': {'pop': 11717, 'tracts': 3},
'Wayne': {'pop': 20813, 'tracts': 5},
'Webster': {'pop': 13621, 'tracts': 4},
'Whitley': {'pop': 35637, 'tracts': 8},
'Wolfe': {'pop': 7355, 'tracts': 2},
'Woodford': {'pop': 24939, 'tracts': 8}},
'LA': {'Acadia': {'pop': 61773, 'tracts': 12},
'Allen': {'pop': 25764, 'tracts': 5},
'Ascension': {'pop': 107215, 'tracts': 14},
'Assumption': {'pop': 23421, 'tracts': 6},
'Avoyelles': {'pop': 42073, 'tracts': 9},
'Beauregard': {'pop': 35654, 'tracts': 7},
'Bienville': {'pop': 14353, 'tracts': 5},
'Bossier': {'pop': 116979, 'tracts': 22},
'Caddo': {'pop': 254969, 'tracts': 64},
'Calcasieu': {'pop': 192768, 'tracts': 44},
'Caldwell': {'pop': 10132, 'tracts': 3},
'Cameron': {'pop': 6839, 'tracts': 3},
'Catahoula': {'pop': 10407, 'tracts': 3},
'Claiborne': {'pop': 17195, 'tracts': 5},
'Concordia': {'pop': 20822, 'tracts': 5},
'De Soto': {'pop': 26656, 'tracts': 7},
'East Baton Rouge': {'pop': 440171, 'tracts': 92},
'East Carroll': {'pop': 7759, 'tracts': 3},
'East Feliciana': {'pop': 20267, 'tracts': 5},
'Evangeline': {'pop': 33984, 'tracts': 8},
'Franklin': {'pop': 20767, 'tracts': 6},
'Grant': {'pop': 22309, 'tracts': 5},
'Iberia': {'pop': 73240, 'tracts': 15},
'Iberville': {'pop': 33387, 'tracts': 7},
'Jackson': {'pop': 16274, 'tracts': 5},
'Jefferson': {'pop': 432552, 'tracts': 127},
'Jefferson Davis': {'pop': 31594, 'tracts': 7},
'La Salle': {'pop': 14890, 'tracts': 3},
'Lafayette': {'pop': 221578, 'tracts': 43},
'Lafourche': {'pop': 96318, 'tracts': 23},
'Lincoln': {'pop': 46735, 'tracts': 10},
'Livingston': {'pop': 128026, 'tracts': 17},
'Madison': {'pop': 12093, 'tracts': 5},
'Morehouse': {'pop': 27979, 'tracts': 8},
'Natchitoches': {'pop': 39566, 'tracts': 9},
'Orleans': {'pop': 343829, 'tracts': 177},
'Ouachita': {'pop': 153720, 'tracts': 40},
'Plaquemines': {'pop': 23042, 'tracts': 9},
'Pointe Coupee': {'pop': 22802, 'tracts': 6},
'Rapides': {'pop': 131613, 'tracts': 33},
'Red River': {'pop': 9091, 'tracts': 2},
'Richland': {'pop': 20725, 'tracts': 6},
'Sabine': {'pop': 24233, 'tracts': 7},
'St. Bernard': {'pop': 35897, 'tracts': 18},
'St. Charles': {'pop': 52780, 'tracts': 13},
'St. Helena': {'pop': 11203, 'tracts': 2},
'St. James': {'pop': 22102, 'tracts': 7},
'St. John the Baptist': {'pop': 45924, 'tracts': 11},
'St. Landry': {'pop': 83384, 'tracts': 19},
'St. Martin': {'pop': 52160, 'tracts': 11},
'St. Mary': {'pop': 54650, 'tracts': 16},
'St. Tammany': {'pop': 233740, 'tracts': 43},
'Tangipahoa': {'pop': 121097, 'tracts': 20},
'Tensas': {'pop': 5252, 'tracts': 3},
'Terrebonne': {'pop': 111860, 'tracts': 21},
'Union': {'pop': 22721, 'tracts': 6},
'Vermilion': {'pop': 57999, 'tracts': 12},
'Vernon': {'pop': 52334, 'tracts': 12},
'Washington': {'pop': 47168, 'tracts': 11},
'Webster': {'pop': 41207, 'tracts': 11},
'West Baton Rouge': {'pop': 23788, 'tracts': 5},
'West Carroll': {'pop': 11604, 'tracts': 3},
'West Feliciana': {'pop': 15625, 'tracts': 3},
'Winn': {'pop': 15313, 'tracts': 4}},
'MA': {'Barnstable': {'pop': 215888, 'tracts': 57},
'Berkshire': {'pop': 131219, 'tracts': 39},
'Bristol': {'pop': 548285, 'tracts': 126},
'Dukes': {'pop': 16535, 'tracts': 4},
'Essex': {'pop': 743159, 'tracts': 163},
'Franklin': {'pop': 71372, 'tracts': 18},
'Hampden': {'pop': 463490, 'tracts': 103},
'Hampshire': {'pop': 158080, 'tracts': 36},
'Middlesex': {'pop': 1503085, 'tracts': 318},
'Nantucket': {'pop': 10172, 'tracts': 6},
'Norfolk': {'pop': 670850, 'tracts': 130},
'Plymouth': {'pop': 494919, 'tracts': 100},
'Suffolk': {'pop': 722023, 'tracts': 204},
'Worcester': {'pop': 798552, 'tracts': 172}},
'MD': {'Allegany': {'pop': 75087, 'tracts': 23},
'Anne Arundel': {'pop': 537656, 'tracts': 104},
'Baltimore': {'pop': 805029, 'tracts': 214},
'Baltimore City': {'pop': 620961, 'tracts': 200},
'Calvert': {'pop': 88737, 'tracts': 18},
'Caroline': {'pop': 33066, 'tracts': 9},
'Carroll': {'pop': 167134, 'tracts': 38},
'Cecil': {'pop': 101108, 'tracts': 19},
'Charles': {'pop': 146551, 'tracts': 30},
'Dorchester': {'pop': 32618, 'tracts': 10},
'Frederick': {'pop': 233385, 'tracts': 61},
'Garrett': {'pop': 30097, 'tracts': 7},
'Harford': {'pop': 244826, 'tracts': 57},
'Howard': {'pop': 287085, 'tracts': 55},
'Kent': {'pop': 20197, 'tracts': 5},
'Montgomery': {'pop': 971777, 'tracts': 215},
"Prince George's": {'pop': 863420, 'tracts': 218},
"Queen Anne's": {'pop': 47798, 'tracts': 12},
'Somerset': {'pop': 26470, 'tracts': 8},
"St. Mary's": {'pop': 105151, 'tracts': 18},
'Talbot': {'pop': 37782, 'tracts': 10},
'Washington': {'pop': 147430, 'tracts': 32},
'Wicomico': {'pop': 98733, 'tracts': 19},
'Worcester': {'pop': 51454, 'tracts': 17}},
'ME': {'Androscoggin': {'pop': 107702, 'tracts': 28},
'Aroostook': {'pop': 71870, 'tracts': 24},
'Cumberland': {'pop': 281674, 'tracts': 67},
'Franklin': {'pop': 30768, 'tracts': 9},
'Hancock': {'pop': 54418, 'tracts': 17},
'Kennebec': {'pop': 122151, 'tracts': 31},
'Knox': {'pop': 39736, 'tracts': 11},
'Lincoln': {'pop': 34457, 'tracts': 9},
'Oxford': {'pop': 57833, 'tracts': 17},
'Penobscot': {'pop': 153923, 'tracts': 46},
'Piscataquis': {'pop': 17535, 'tracts': 8},
'Sagadahoc': {'pop': 35293, 'tracts': 8},
'Somerset': {'pop': 52228, 'tracts': 17},
'Waldo': {'pop': 38786, 'tracts': 8},
'Washington': {'pop': 32856, 'tracts': 14},
'York': {'pop': 197131, 'tracts': 41}},
'MI': {'Alcona': {'pop': 10942, 'tracts': 5},
'Alger': {'pop': 9601, 'tracts': 3},
'Allegan': {'pop': 111408, 'tracts': 25},
'Alpena': {'pop': 29598, 'tracts': 10},
'Antrim': {'pop': 23580, 'tracts': 7},
'Arenac': {'pop': 15899, 'tracts': 5},
'Baraga': {'pop': 8860, 'tracts': 2},
'Barry': {'pop': 59173, 'tracts': 11},
'Bay': {'pop': 107771, 'tracts': 26},
'Benzie': {'pop': 17525, 'tracts': 5},
'Berrien': {'pop': 156813, 'tracts': 48},
'Branch': {'pop': 45248, 'tracts': 12},
'Calhoun': {'pop': 136146, 'tracts': 39},
'Cass': {'pop': 52293, 'tracts': 11},
'Charlevoix': {'pop': 25949, 'tracts': 13},
'Cheboygan': {'pop': 26152, 'tracts': 8},
'Chippewa': {'pop': 38520, 'tracts': 14},
'Clare': {'pop': 30926, 'tracts': 11},
'Clinton': {'pop': 75382, 'tracts': 22},
'Crawford': {'pop': 14074, 'tracts': 5},
'Delta': {'pop': 37069, 'tracts': 11},
'Dickinson': {'pop': 26168, 'tracts': 7},
'Eaton': {'pop': 107759, 'tracts': 28},
'Emmet': {'pop': 32694, 'tracts': 8},
'Genesee': {'pop': 425790, 'tracts': 131},
'Gladwin': {'pop': 25692, 'tracts': 9},
'Gogebic': {'pop': 16427, 'tracts': 7},
'Grand Traverse': {'pop': 86986, 'tracts': 16},
'Gratiot': {'pop': 42476, 'tracts': 10},
'Hillsdale': {'pop': 46688, 'tracts': 12},
'Houghton': {'pop': 36628, 'tracts': 11},
'Huron': {'pop': 33118, 'tracts': 12},
'Ingham': {'pop': 280895, 'tracts': 81},
'Ionia': {'pop': 63905, 'tracts': 13},
'Iosco': {'pop': 25887, 'tracts': 9},
'Iron': {'pop': 11817, 'tracts': 5},
'Isabella': {'pop': 70311, 'tracts': 15},
'Jackson': {'pop': 160248, 'tracts': 38},
'Kalamazoo': {'pop': 250331, 'tracts': 57},
'Kalkaska': {'pop': 17153, 'tracts': 5},
'Kent': {'pop': 602622, 'tracts': 128},
'Keweenaw': {'pop': 2156, 'tracts': 2},
'Lake': {'pop': 11539, 'tracts': 4},
'Lapeer': {'pop': 88319, 'tracts': 24},
'Leelanau': {'pop': 21708, 'tracts': 6},
'Lenawee': {'pop': 99892, 'tracts': 23},
'Livingston': {'pop': 180967, 'tracts': 61},
'Luce': {'pop': 6631, 'tracts': 3},
'Mackinac': {'pop': 11113, 'tracts': 6},
'Macomb': {'pop': 840978, 'tracts': 216},
'Manistee': {'pop': 24733, 'tracts': 9},
'Marquette': {'pop': 67077, 'tracts': 24},
'Mason': {'pop': 28705, 'tracts': 8},
'Mecosta': {'pop': 42798, 'tracts': 11},
'Menominee': {'pop': 24029, 'tracts': 7},
'Midland': {'pop': 83629, 'tracts': 19},
'Missaukee': {'pop': 14849, 'tracts': 4},
'Monroe': {'pop': 152021, 'tracts': 39},
'Montcalm': {'pop': 63342, 'tracts': 13},
'Montmorency': {'pop': 9765, 'tracts': 5},
'Muskegon': {'pop': 172188, 'tracts': 42},
'Newaygo': {'pop': 48460, 'tracts': 11},
'Oakland': {'pop': 1202362, 'tracts': 338},
'Oceana': {'pop': 26570, 'tracts': 7},
'Ogemaw': {'pop': 21699, 'tracts': 7},
'Ontonagon': {'pop': 6780, 'tracts': 4},
'Osceola': {'pop': 23528, 'tracts': 6},
'Oscoda': {'pop': 8640, 'tracts': 5},
'Otsego': {'pop': 24164, 'tracts': 6},
'Ottawa': {'pop': 263801, 'tracts': 53},
'Presque Isle': {'pop': 13376, 'tracts': 6},
'Roscommon': {'pop': 24449, 'tracts': 10},
'Saginaw': {'pop': 200169, 'tracts': 56},
'Sanilac': {'pop': 43114, 'tracts': 12},
'Schoolcraft': {'pop': 8485, 'tracts': 3},
'Shiawassee': {'pop': 70648, 'tracts': 17},
'St. Clair': {'pop': 163040, 'tracts': 49},
'St. Joseph': {'pop': 61295, 'tracts': 17},
'Tuscola': {'pop': 55729, 'tracts': 13},
'Van Buren': {'pop': 76258, 'tracts': 15},
'Washtenaw': {'pop': 344791, 'tracts': 100},
'Wayne': {'pop': 1820584, 'tracts': 610},
'Wexford': {'pop': 32735, 'tracts': 8}},
'MN': {'Aitkin': {'pop': 16202, 'tracts': 6},
'Anoka': {'pop': 330844, 'tracts': 83},
'Becker': {'pop': 32504, 'tracts': 10},
'Beltrami': {'pop': 44442, 'tracts': 10},
'Benton': {'pop': 38451, 'tracts': 9},
'Big Stone': {'pop': 5269, 'tracts': 3},
'Blue Earth': {'pop': 64013, 'tracts': 16},
'Brown': {'pop': 25893, 'tracts': 8},
'Carlton': {'pop': 35386, 'tracts': 7},
'Carver': {'pop': 91042, 'tracts': 19},
'Cass': {'pop': 28567, 'tracts': 10},
'Chippewa': {'pop': 12441, 'tracts': 4},
'Chisago': {'pop': 53887, 'tracts': 10},
'Clay': {'pop': 58999, 'tracts': 13},
'Clearwater': {'pop': 8695, 'tracts': 3},
'Cook': {'pop': 5176, 'tracts': 3},
'Cottonwood': {'pop': 11687, 'tracts': 4},
'Crow Wing': {'pop': 62500, 'tracts': 16},
'Dakota': {'pop': 398552, 'tracts': 95},
'Dodge': {'pop': 20087, 'tracts': 5},
'Douglas': {'pop': 36009, 'tracts': 9},
'Faribault': {'pop': 14553, 'tracts': 6},
'Fillmore': {'pop': 20866, 'tracts': 6},
'Freeborn': {'pop': 31255, 'tracts': 10},
'Goodhue': {'pop': 46183, 'tracts': 10},
'Grant': {'pop': 6018, 'tracts': 2},
'Hennepin': {'pop': 1152425, 'tracts': 299},
'Houston': {'pop': 19027, 'tracts': 5},
'Hubbard': {'pop': 20428, 'tracts': 7},
'Isanti': {'pop': 37816, 'tracts': 8},
'Itasca': {'pop': 45058, 'tracts': 11},
'Jackson': {'pop': 10266, 'tracts': 4},
'Kanabec': {'pop': 16239, 'tracts': 4},
'Kandiyohi': {'pop': 42239, 'tracts': 12},
'Kittson': {'pop': 4552, 'tracts': 2},
'Koochiching': {'pop': 13311, 'tracts': 4},
'Lac qui Parle': {'pop': 7259, 'tracts': 3},
'Lake': {'pop': 10866, 'tracts': 3},
'Lake of the Woods': {'pop': 4045, 'tracts': 2},
'Le Sueur': {'pop': 27703, 'tracts': 6},
'Lincoln': {'pop': 5896, 'tracts': 2},
'Lyon': {'pop': 25857, 'tracts': 7},
'Mahnomen': {'pop': 5413, 'tracts': 2},
'Marshall': {'pop': 9439, 'tracts': 4},
'Martin': {'pop': 20840, 'tracts': 6},
'McLeod': {'pop': 36651, 'tracts': 7},
'Meeker': {'pop': 23300, 'tracts': 6},
'Mille Lacs': {'pop': 26097, 'tracts': 7},
'Morrison': {'pop': 33198, 'tracts': 8},
'Mower': {'pop': 39163, 'tracts': 11},
'Murray': {'pop': 8725, 'tracts': 3},
'Nicollet': {'pop': 32727, 'tracts': 7},
'Nobles': {'pop': 21378, 'tracts': 6},
'Norman': {'pop': 6852, 'tracts': 3},
'Olmsted': {'pop': 144248, 'tracts': 33},
'Otter Tail': {'pop': 57303, 'tracts': 17},
'Pennington': {'pop': 13930, 'tracts': 5},
'Pine': {'pop': 29750, 'tracts': 8},
'Pipestone': {'pop': 9596, 'tracts': 5},
'Polk': {'pop': 31600, 'tracts': 10},
'Pope': {'pop': 10995, 'tracts': 4},
'Ramsey': {'pop': 508640, 'tracts': 137},
'Red Lake': {'pop': 4089, 'tracts': 2},
'Redwood': {'pop': 16059, 'tracts': 6},
'Renville': {'pop': 15730, 'tracts': 6},
'Rice': {'pop': 64142, 'tracts': 13},
'Rock': {'pop': 9687, 'tracts': 3},
'Roseau': {'pop': 15629, 'tracts': 5},
'Scott': {'pop': 129928, 'tracts': 21},
'Sherburne': {'pop': 88499, 'tracts': 11},
'Sibley': {'pop': 15226, 'tracts': 4},
'St. Louis': {'pop': 200226, 'tracts': 66},
'Stearns': {'pop': 150642, 'tracts': 29},
'Steele': {'pop': 36576, 'tracts': 8},
'Stevens': {'pop': 9726, 'tracts': 3},
'Swift': {'pop': 9783, 'tracts': 4},
'Todd': {'pop': 24895, 'tracts': 8},
'Traverse': {'pop': 3558, 'tracts': 2},
'Wabasha': {'pop': 21676, 'tracts': 6},
'Wadena': {'pop': 13843, 'tracts': 3},
'Waseca': {'pop': 19136, 'tracts': 5},
'Washington': {'pop': 238136, 'tracts': 50},
'Watonwan': {'pop': 11211, 'tracts': 3},
'Wilkin': {'pop': 6576, 'tracts': 2},
'Winona': {'pop': 51461, 'tracts': 10},
'Wright': {'pop': 124700, 'tracts': 17},
'Yellow Medicine': {'pop': 10438, 'tracts': 4}},
'MO': {'Adair': {'pop': 25607, 'tracts': 7},
'Andrew': {'pop': 17291, 'tracts': 4},
'Atchison': {'pop': 5685, 'tracts': 2},
'Audrain': {'pop': 25529, 'tracts': 7},
'Barry': {'pop': 35597, 'tracts': 7},
'Barton': {'pop': 12402, 'tracts': 3},
'Bates': {'pop': 17049, 'tracts': 4},
'Benton': {'pop': 19056, 'tracts': 6},
'Bollinger': {'pop': 12363, 'tracts': 3},
'Boone': {'pop': 162642, 'tracts': 29},
'Buchanan': {'pop': 89201, 'tracts': 25},
'Butler': {'pop': 42794, 'tracts': 10},
'Caldwell': {'pop': 9424, 'tracts': 2},
'Callaway': {'pop': 44332, 'tracts': 8},
'Camden': {'pop': 44002, 'tracts': 11},
'Cape Girardeau': {'pop': 75674, 'tracts': 16},
'Carroll': {'pop': 9295, 'tracts': 3},
'Carter': {'pop': 6265, 'tracts': 2},
'Cass': {'pop': 99478, 'tracts': 20},
'Cedar': {'pop': 13982, 'tracts': 3},
'Chariton': {'pop': 7831, 'tracts': 3},
'Christian': {'pop': 77422, 'tracts': 14},
'Clark': {'pop': 7139, 'tracts': 3},
'Clay': {'pop': 221939, 'tracts': 44},
'Clinton': {'pop': 20743, 'tracts': 4},
'Cole': {'pop': 75990, 'tracts': 15},
'Cooper': {'pop': 17601, 'tracts': 5},
'Crawford': {'pop': 24696, 'tracts': 6},
'Dade': {'pop': 7883, 'tracts': 2},
'Dallas': {'pop': 16777, 'tracts': 3},
'Daviess': {'pop': 8433, 'tracts': 2},
'DeKalb': {'pop': 12892, 'tracts': 2},
'Dent': {'pop': 15657, 'tracts': 4},
'Douglas': {'pop': 13684, 'tracts': 3},
'Dunklin': {'pop': 31953, 'tracts': 10},
'Franklin': {'pop': 101492, 'tracts': 17},
'Gasconade': {'pop': 15222, 'tracts': 5},
'Gentry': {'pop': 6738, 'tracts': 2},
'Greene': {'pop': 275174, 'tracts': 62},
'Grundy': {'pop': 10261, 'tracts': 4},
'Harrison': {'pop': 8957, 'tracts': 3},
'Henry': {'pop': 22272, 'tracts': 6},
'Hickory': {'pop': 9627, 'tracts': 3},
'Holt': {'pop': 4912, 'tracts': 3},
'Howard': {'pop': 10144, 'tracts': 3},
'Howell': {'pop': 40400, 'tracts': 8},
'Iron': {'pop': 10630, 'tracts': 4},
'Jackson': {'pop': 674158, 'tracts': 199},
'Jasper': {'pop': 117404, 'tracts': 22},
'Jefferson': {'pop': 218733, 'tracts': 42},
'Johnson': {'pop': 52595, 'tracts': 9},
'Knox': {'pop': 4131, 'tracts': 2},
'Laclede': {'pop': 35571, 'tracts': 6},
'Lafayette': {'pop': 33381, 'tracts': 7},
'Lawrence': {'pop': 38634, 'tracts': 7},
'Lewis': {'pop': 10211, 'tracts': 4},
'Lincoln': {'pop': 52566, 'tracts': 7},
'Linn': {'pop': 12761, 'tracts': 5},
'Livingston': {'pop': 15195, 'tracts': 5},
'Macon': {'pop': 15566, 'tracts': 5},
'Madison': {'pop': 12226, 'tracts': 3},
'Maries': {'pop': 9176, 'tracts': 3},
'Marion': {'pop': 28781, 'tracts': 8},
'McDonald': {'pop': 23083, 'tracts': 4},
'Mercer': {'pop': 3785, 'tracts': 2},
'Miller': {'pop': 24748, 'tracts': 5},
'Mississippi': {'pop': 14358, 'tracts': 4},
'Moniteau': {'pop': 15607, 'tracts': 4},
'Monroe': {'pop': 8840, 'tracts': 3},
'Montgomery': {'pop': 12236, 'tracts': 4},
'Morgan': {'pop': 20565, 'tracts': 5},
'New Madrid': {'pop': 18956, 'tracts': 6},
'Newton': {'pop': 58114, 'tracts': 12},
'Nodaway': {'pop': 23370, 'tracts': 5},
'Oregon': {'pop': 10881, 'tracts': 3},
'Osage': {'pop': 13878, 'tracts': 4},
'Ozark': {'pop': 9723, 'tracts': 2},
'Pemiscot': {'pop': 18296, 'tracts': 6},
'Perry': {'pop': 18971, 'tracts': 5},
'Pettis': {'pop': 42201, 'tracts': 11},
'Phelps': {'pop': 45156, 'tracts': 10},
'Pike': {'pop': 18516, 'tracts': 5},
'Platte': {'pop': 89322, 'tracts': 20},
'Polk': {'pop': 31137, 'tracts': 4},
'Pulaski': {'pop': 52274, 'tracts': 9},
'Putnam': {'pop': 4979, 'tracts': 2},
'Ralls': {'pop': 10167, 'tracts': 3},
'Randolph': {'pop': 25414, 'tracts': 6},
'Ray': {'pop': 23494, 'tracts': 4},
'Reynolds': {'pop': 6696, 'tracts': 2},
'Ripley': {'pop': 14100, 'tracts': 4},
'Saline': {'pop': 23370, 'tracts': 8},
'Schuyler': {'pop': 4431, 'tracts': 2},
'Scotland': {'pop': 4843, 'tracts': 2},
'Scott': {'pop': 39191, 'tracts': 10},
'Shannon': {'pop': 8441, 'tracts': 2},
'Shelby': {'pop': 6373, 'tracts': 3},
'St. Charles': {'pop': 360485, 'tracts': 79},
'St. Clair': {'pop': 9805, 'tracts': 3},
'St. Francois': {'pop': 65359, 'tracts': 11},
'St. Louis': {'pop': 998954, 'tracts': 199},
'St. Louis City': {'pop': 319294, 'tracts': 106},
'Ste. Genevieve': {'pop': 18145, 'tracts': 4},
'Stoddard': {'pop': 29968, 'tracts': 8},
'Stone': {'pop': 32202, 'tracts': 6},
'Sullivan': {'pop': 6714, 'tracts': 3},
'Taney': {'pop': 51675, 'tracts': 10},
'Texas': {'pop': 26008, 'tracts': 4},
'Vernon': {'pop': 21159, 'tracts': 6},
'Warren': {'pop': 32513, 'tracts': 5},
'Washington': {'pop': 25195, 'tracts': 5},
'Wayne': {'pop': 13521, 'tracts': 4},
'Webster': {'pop': 36202, 'tracts': 8},
'Worth': {'pop': 2171, 'tracts': 1},
'Wright': {'pop': 18815, 'tracts': 4}},
'MS': {'Adams': {'pop': 32297, 'tracts': 9},
'Alcorn': {'pop': 37057, 'tracts': 7},
'Amite': {'pop': 13131, 'tracts': 3},
'Attala': {'pop': 19564, 'tracts': 6},
'Benton': {'pop': 8729, 'tracts': 2},
'Bolivar': {'pop': 34145, 'tracts': 8},
'Calhoun': {'pop': 14962, 'tracts': 5},
'Carroll': {'pop': 10597, 'tracts': 2},
'Chickasaw': {'pop': 17392, 'tracts': 4},
'Choctaw': {'pop': 8547, 'tracts': 3},
'Claiborne': {'pop': 9604, 'tracts': 3},
'Clarke': {'pop': 16732, 'tracts': 4},
'Clay': {'pop': 20634, 'tracts': 5},
'Coahoma': {'pop': 26151, 'tracts': 7},
'Copiah': {'pop': 29449, 'tracts': 6},
'Covington': {'pop': 19568, 'tracts': 4},
'DeSoto': {'pop': 161252, 'tracts': 33},
'Forrest': {'pop': 74934, 'tracts': 17},
'Franklin': {'pop': 8118, 'tracts': 2},
'George': {'pop': 22578, 'tracts': 5},
'Greene': {'pop': 14400, 'tracts': 2},
'Grenada': {'pop': 21906, 'tracts': 5},
'Hancock': {'pop': 43929, 'tracts': 7},
'Harrison': {'pop': 187105, 'tracts': 46},
'Hinds': {'pop': 245285, 'tracts': 64},
'Holmes': {'pop': 19198, 'tracts': 5},
'Humphreys': {'pop': 9375, 'tracts': 3},
'Issaquena': {'pop': 1406, 'tracts': 1},
'Itawamba': {'pop': 23401, 'tracts': 5},
'Jackson': {'pop': 139668, 'tracts': 28},
'Jasper': {'pop': 17062, 'tracts': 4},
'Jefferson': {'pop': 7726, 'tracts': 2},
'Jefferson Davis': {'pop': 12487, 'tracts': 3},
'Jones': {'pop': 67761, 'tracts': 14},
'Kemper': {'pop': 10456, 'tracts': 2},
'Lafayette': {'pop': 47351, 'tracts': 10},
'Lamar': {'pop': 55658, 'tracts': 8},
'Lauderdale': {'pop': 80261, 'tracts': 19},
'Lawrence': {'pop': 12929, 'tracts': 3},
'Leake': {'pop': 23805, 'tracts': 5},
'Lee': {'pop': 82910, 'tracts': 19},
'Leflore': {'pop': 32317, 'tracts': 8},
'Lincoln': {'pop': 34869, 'tracts': 6},
'Lowndes': {'pop': 59779, 'tracts': 14},
'Madison': {'pop': 95203, 'tracts': 21},
'Marion': {'pop': 27088, 'tracts': 6},
'Marshall': {'pop': 37144, 'tracts': 6},
'Monroe': {'pop': 36989, 'tracts': 9},
'Montgomery': {'pop': 10925, 'tracts': 3},
'Neshoba': {'pop': 29676, 'tracts': 7},
'Newton': {'pop': 21720, 'tracts': 5},
'Noxubee': {'pop': 11545, 'tracts': 3},
'Oktibbeha': {'pop': 47671, 'tracts': 8},
'Panola': {'pop': 34707, 'tracts': 6},
'Pearl River': {'pop': 55834, 'tracts': 9},
'Perry': {'pop': 12250, 'tracts': 3},
'Pike': {'pop': 40404, 'tracts': 8},
'Pontotoc': {'pop': 29957, 'tracts': 6},
'Prentiss': {'pop': 25276, 'tracts': 5},
'Quitman': {'pop': 8223, 'tracts': 3},
'Rankin': {'pop': 141617, 'tracts': 27},
'Scott': {'pop': 28264, 'tracts': 6},
'Sharkey': {'pop': 4916, 'tracts': 2},
'Simpson': {'pop': 27503, 'tracts': 5},
'Smith': {'pop': 16491, 'tracts': 3},
'Stone': {'pop': 17786, 'tracts': 3},
'Sunflower': {'pop': 29450, 'tracts': 7},
'Tallahatchie': {'pop': 15378, 'tracts': 4},
'Tate': {'pop': 28886, 'tracts': 5},
'Tippah': {'pop': 22232, 'tracts': 4},
'Tishomingo': {'pop': 19593, 'tracts': 4},
'Tunica': {'pop': 10778, 'tracts': 3},
'Union': {'pop': 27134, 'tracts': 6},
'Walthall': {'pop': 15443, 'tracts': 3},
'Warren': {'pop': 48773, 'tracts': 12},
'Washington': {'pop': 51137, 'tracts': 19},
'Wayne': {'pop': 20747, 'tracts': 4},
'Webster': {'pop': 10253, 'tracts': 3},
'Wilkinson': {'pop': 9878, 'tracts': 2},
'Winston': {'pop': 19198, 'tracts': 5},
'Yalobusha': {'pop': 12678, 'tracts': 3},
'Yazoo': {'pop': 28065, 'tracts': 6}},
'MT': {'Beaverhead': {'pop': 9246, 'tracts': 3},
'Big Horn': {'pop': 12865, 'tracts': 5},
'Blaine': {'pop': 6491, 'tracts': 4},
'Broadwater': {'pop': 5612, 'tracts': 2},
'Carbon': {'pop': 10078, 'tracts': 5},
'Carter': {'pop': 1160, 'tracts': 1},
'Cascade': {'pop': 81327, 'tracts': 22},
'Chouteau': {'pop': 5813, 'tracts': 2},
'Custer': {'pop': 11699, 'tracts': 6},
'Daniels': {'pop': 1751, 'tracts': 1},
'Dawson': {'pop': 8966, 'tracts': 3},
'Deer Lodge': {'pop': 9298, 'tracts': 3},
'Fallon': {'pop': 2890, 'tracts': 1},
'Fergus': {'pop': 11586, 'tracts': 2},
'Flathead': {'pop': 90928, 'tracts': 19},
'Gallatin': {'pop': 89513, 'tracts': 22},
'Garfield': {'pop': 1206, 'tracts': 1},
'Glacier': {'pop': 13399, 'tracts': 4},
'Golden Valley': {'pop': 884, 'tracts': 1},
'Granite': {'pop': 3079, 'tracts': 1},
'Hill': {'pop': 16096, 'tracts': 6},
'Jefferson': {'pop': 11406, 'tracts': 3},
'Judith Basin': {'pop': 2072, 'tracts': 1},
'Lake': {'pop': 28746, 'tracts': 8},
'Lewis and Clark': {'pop': 63395, 'tracts': 14},
'Liberty': {'pop': 2339, 'tracts': 1},
'Lincoln': {'pop': 19687, 'tracts': 5},
'Madison': {'pop': 7691, 'tracts': 3},
'McCone': {'pop': 1734, 'tracts': 1},
'Meagher': {'pop': 1891, 'tracts': 1},
'Mineral': {'pop': 4223, 'tracts': 2},
'Missoula': {'pop': 109299, 'tracts': 20},
'Musselshell': {'pop': 4538, 'tracts': 2},
'Park': {'pop': 15636, 'tracts': 6},
'Petroleum': {'pop': 494, 'tracts': 1},
'Phillips': {'pop': 4253, 'tracts': 1},
'Pondera': {'pop': 6153, 'tracts': 2},
'Powder River': {'pop': 1743, 'tracts': 1},
'Powell': {'pop': 7027, 'tracts': 2},
'Prairie': {'pop': 1179, 'tracts': 1},
'Ravalli': {'pop': 40212, 'tracts': 10},
'Richland': {'pop': 9746, 'tracts': 4},
'Roosevelt': {'pop': 10425, 'tracts': 3},
'Rosebud': {'pop': 9233, 'tracts': 4},
'Sanders': {'pop': 11413, 'tracts': 3},
'Sheridan': {'pop': 3384, 'tracts': 2},
'Silver Bow': {'pop': 34200, 'tracts': 8},
'Stillwater': {'pop': 9117, 'tracts': 3},
'Sweet Grass': {'pop': 3651, 'tracts': 1},
'Teton': {'pop': 6073, 'tracts': 3},
'Toole': {'pop': 5324, 'tracts': 3},
'Treasure': {'pop': 718, 'tracts': 1},
'Valley': {'pop': 7369, 'tracts': 3},
'Wheatland': {'pop': 2168, 'tracts': 1},
'Wibaux': {'pop': 1017, 'tracts': 1},
'Yellowstone': {'pop': 147972, 'tracts': 32}},
'NC': {'Alamance': {'pop': 151131, 'tracts': 36},
'Alexander': {'pop': 37198, 'tracts': 7},
'Alleghany': {'pop': 11155, 'tracts': 3},
'Anson': {'pop': 26948, 'tracts': 6},
'Ashe': {'pop': 27281, 'tracts': 6},
'Avery': {'pop': 17797, 'tracts': 5},
'Beaufort': {'pop': 47759, 'tracts': 11},
'Bertie': {'pop': 21282, 'tracts': 4},
'Bladen': {'pop': 35190, 'tracts': 6},
'Brunswick': {'pop': 107431, 'tracts': 33},
'Buncombe': {'pop': 238318, 'tracts': 56},
'Burke': {'pop': 90912, 'tracts': 18},
'Cabarrus': {'pop': 178011, 'tracts': 37},
'Caldwell': {'pop': 83029, 'tracts': 17},
'Camden': {'pop': 9980, 'tracts': 2},
'Carteret': {'pop': 66469, 'tracts': 38},
'Caswell': {'pop': 23719, 'tracts': 6},
'Catawba': {'pop': 154358, 'tracts': 31},
'Chatham': {'pop': 63505, 'tracts': 13},
'Cherokee': {'pop': 27444, 'tracts': 7},
'Chowan': {'pop': 14793, 'tracts': 3},
'Clay': {'pop': 10587, 'tracts': 2},
'Cleveland': {'pop': 98078, 'tracts': 22},
'Columbus': {'pop': 58098, 'tracts': 13},
'Craven': {'pop': 103505, 'tracts': 21},
'Cumberland': {'pop': 319431, 'tracts': 68},
'Currituck': {'pop': 23547, 'tracts': 8},
'Dare': {'pop': 33920, 'tracts': 11},
'Davidson': {'pop': 162878, 'tracts': 34},
'Davie': {'pop': 41240, 'tracts': 7},
'Duplin': {'pop': 58505, 'tracts': 11},
'Durham': {'pop': 267587, 'tracts': 60},
'Edgecombe': {'pop': 56552, 'tracts': 14},
'Forsyth': {'pop': 350670, 'tracts': 93},
'Franklin': {'pop': 60619, 'tracts': 12},
'Gaston': {'pop': 206086, 'tracts': 65},
'Gates': {'pop': 12197, 'tracts': 3},
'Graham': {'pop': 8861, 'tracts': 3},
'Granville': {'pop': 59916, 'tracts': 13},
'Greene': {'pop': 21362, 'tracts': 4},
'Guilford': {'pop': 488406, 'tracts': 119},
'Halifax': {'pop': 54691, 'tracts': 12},
'Harnett': {'pop': 114678, 'tracts': 27},
'Haywood': {'pop': 59036, 'tracts': 16},
'Henderson': {'pop': 106740, 'tracts': 27},
'Hertford': {'pop': 24669, 'tracts': 5},
'Hoke': {'pop': 46952, 'tracts': 9},
'Hyde': {'pop': 5810, 'tracts': 2},
'Iredell': {'pop': 159437, 'tracts': 44},
'Jackson': {'pop': 40271, 'tracts': 9},
'Johnston': {'pop': 168878, 'tracts': 25},
'Jones': {'pop': 10153, 'tracts': 3},
'Lee': {'pop': 57866, 'tracts': 13},
'Lenoir': {'pop': 59495, 'tracts': 15},
'Lincoln': {'pop': 78265, 'tracts': 18},
'Macon': {'pop': 33922, 'tracts': 9},
'Madison': {'pop': 20764, 'tracts': 6},
'Martin': {'pop': 24505, 'tracts': 6},
'McDowell': {'pop': 44996, 'tracts': 10},
'Mecklenburg': {'pop': 919628, 'tracts': 233},
'Mitchell': {'pop': 15579, 'tracts': 4},
'Montgomery': {'pop': 27798, 'tracts': 6},
'Moore': {'pop': 88247, 'tracts': 18},
'Nash': {'pop': 95840, 'tracts': 18},
'New Hanover': {'pop': 202667, 'tracts': 45},
'Northampton': {'pop': 22099, 'tracts': 5},
'Onslow': {'pop': 177772, 'tracts': 32},
'Orange': {'pop': 133801, 'tracts': 28},
'Pamlico': {'pop': 13144, 'tracts': 4},
'Pasquotank': {'pop': 40661, 'tracts': 10},
'Pender': {'pop': 52217, 'tracts': 16},
'Perquimans': {'pop': 13453, 'tracts': 3},
'Person': {'pop': 39464, 'tracts': 7},
'Pitt': {'pop': 168148, 'tracts': 32},
'Polk': {'pop': 20510, 'tracts': 7},
'Randolph': {'pop': 141752, 'tracts': 28},
'Richmond': {'pop': 46639, 'tracts': 11},
'Robeson': {'pop': 134168, 'tracts': 31},
'Rockingham': {'pop': 93643, 'tracts': 21},
'Rowan': {'pop': 138428, 'tracts': 30},
'Rutherford': {'pop': 67810, 'tracts': 13},
'Sampson': {'pop': 63431, 'tracts': 11},
'Scotland': {'pop': 36157, 'tracts': 7},
'Stanly': {'pop': 60585, 'tracts': 13},
'Stokes': {'pop': 47401, 'tracts': 9},
'Surry': {'pop': 73673, 'tracts': 22},
'Swain': {'pop': 13981, 'tracts': 5},
'Transylvania': {'pop': 33090, 'tracts': 7},
'Tyrrell': {'pop': 4407, 'tracts': 1},
'Union': {'pop': 201292, 'tracts': 41},
'Vance': {'pop': 45422, 'tracts': 10},
'Wake': {'pop': 900993, 'tracts': 187},
'Warren': {'pop': 20972, 'tracts': 6},
'Washington': {'pop': 13228, 'tracts': 3},
'Watauga': {'pop': 51079, 'tracts': 13},
'Wayne': {'pop': 122623, 'tracts': 26},
'Wilkes': {'pop': 69340, 'tracts': 14},
'Wilson': {'pop': 81234, 'tracts': 19},
'Yadkin': {'pop': 38406, 'tracts': 7},
'Yancey': {'pop': 17818, 'tracts': 5}},
'ND': {'Adams': {'pop': 2343, 'tracts': 1},
'Barnes': {'pop': 11066, 'tracts': 4},
'Benson': {'pop': 6660, 'tracts': 4},
'Billings': {'pop': 783, 'tracts': 1},
'Bottineau': {'pop': 6429, 'tracts': 3},
'Bowman': {'pop': 3151, 'tracts': 2},
'Burke': {'pop': 1968, 'tracts': 1},
'Burleigh': {'pop': 81308, 'tracts': 19},
'Cass': {'pop': 149778, 'tracts': 33},
'Cavalier': {'pop': 3993, 'tracts': 2},
'Dickey': {'pop': 5289, 'tracts': 3},
'Divide': {'pop': 2071, 'tracts': 1},
'Dunn': {'pop': 3536, 'tracts': 1},
'Eddy': {'pop': 2385, 'tracts': 1},
'Emmons': {'pop': 3550, 'tracts': 1},
'Foster': {'pop': 3343, 'tracts': 1},
'Golden Valley': {'pop': 1680, 'tracts': 1},
'Grand Forks': {'pop': 66861, 'tracts': 18},
'Grant': {'pop': 2394, 'tracts': 1},
'Griggs': {'pop': 2420, 'tracts': 1},
'Hettinger': {'pop': 2477, 'tracts': 2},
'Kidder': {'pop': 2435, 'tracts': 1},
'LaMoure': {'pop': 4139, 'tracts': 2},
'Logan': {'pop': 1990, 'tracts': 1},
'McHenry': {'pop': 5395, 'tracts': 2},
'McIntosh': {'pop': 2809, 'tracts': 1},
'McKenzie': {'pop': 6360, 'tracts': 4},
'McLean': {'pop': 8962, 'tracts': 2},
'Mercer': {'pop': 8424, 'tracts': 3},
'Morton': {'pop': 27471, 'tracts': 5},
'Mountrail': {'pop': 7673, 'tracts': 3},
'Nelson': {'pop': 3126, 'tracts': 1},
'Oliver': {'pop': 1846, 'tracts': 1},
'Pembina': {'pop': 7413, 'tracts': 5},
'Pierce': {'pop': 4357, 'tracts': 2},
'Ramsey': {'pop': 11451, 'tracts': 3},
'Ransom': {'pop': 5457, 'tracts': 3},
'Renville': {'pop': 2470, 'tracts': 1},
'Richland': {'pop': 16321, 'tracts': 6},
'Rolette': {'pop': 13937, 'tracts': 4},
'Sargent': {'pop': 3829, 'tracts': 2},
'Sheridan': {'pop': 1321, 'tracts': 1},
'Sioux': {'pop': 4153, 'tracts': 2},
'Slope': {'pop': 727, 'tracts': 1},
'Stark': {'pop': 24199, 'tracts': 8},
'Steele': {'pop': 1975, 'tracts': 1},
'Stutsman': {'pop': 21100, 'tracts': 6},
'Towner': {'pop': 2246, 'tracts': 1},
'Traill': {'pop': 8121, 'tracts': 4},
'Walsh': {'pop': 11119, 'tracts': 6},
'Ward': {'pop': 61675, 'tracts': 13},
'Wells': {'pop': 4207, 'tracts': 2},
'Williams': {'pop': 22398, 'tracts': 7}},
'NE': {'Adams': {'pop': 31364, 'tracts': 9},
'Antelope': {'pop': 6685, 'tracts': 3},
'Arthur': {'pop': 460, 'tracts': 1},
'Banner': {'pop': 690, 'tracts': 1},
'Blaine': {'pop': 478, 'tracts': 1},
'Boone': {'pop': 5505, 'tracts': 2},
'Box Butte': {'pop': 11308, 'tracts': 3},
'Boyd': {'pop': 2099, 'tracts': 1},
'Brown': {'pop': 3145, 'tracts': 1},
'Buffalo': {'pop': 46102, 'tracts': 11},
'Burt': {'pop': 6858, 'tracts': 3},
'Butler': {'pop': 8395, 'tracts': 3},
'Cass': {'pop': 25241, 'tracts': 6},
'Cedar': {'pop': 8852, 'tracts': 2},
'Chase': {'pop': 3966, 'tracts': 1},
'Cherry': {'pop': 5713, 'tracts': 2},
'Cheyenne': {'pop': 9998, 'tracts': 3},
'Clay': {'pop': 6542, 'tracts': 2},
'Colfax': {'pop': 10515, 'tracts': 3},
'Cuming': {'pop': 9139, 'tracts': 3},
'Custer': {'pop': 10939, 'tracts': 4},
'Dakota': {'pop': 21006, 'tracts': 4},
'Dawes': {'pop': 9182, 'tracts': 2},
'Dawson': {'pop': 24326, 'tracts': 7},
'Deuel': {'pop': 1941, 'tracts': 1},
'Dixon': {'pop': 6000, 'tracts': 2},
'Dodge': {'pop': 36691, 'tracts': 9},
'Douglas': {'pop': 517110, 'tracts': 156},
'Dundy': {'pop': 2008, 'tracts': 1},
'Fillmore': {'pop': 5890, 'tracts': 2},
'Franklin': {'pop': 3225, 'tracts': 2},
'Frontier': {'pop': 2756, 'tracts': 1},
'Furnas': {'pop': 4959, 'tracts': 1},
'Gage': {'pop': 22311, 'tracts': 7},
'Garden': {'pop': 2057, 'tracts': 1},
'Garfield': {'pop': 2049, 'tracts': 1},
'Gosper': {'pop': 2044, 'tracts': 1},
'Grant': {'pop': 614, 'tracts': 1},
'Greeley': {'pop': 2538, 'tracts': 1},
'Hall': {'pop': 58607, 'tracts': 14},
'Hamilton': {'pop': 9124, 'tracts': 3},
'Harlan': {'pop': 3423, 'tracts': 1},
'Hayes': {'pop': 967, 'tracts': 1},
'Hitchcock': {'pop': 2908, 'tracts': 1},
'Holt': {'pop': 10435, 'tracts': 4},
'Hooker': {'pop': 736, 'tracts': 1},
'Howard': {'pop': 6274, 'tracts': 2},
'Jefferson': {'pop': 7547, 'tracts': 3},
'Johnson': {'pop': 5217, 'tracts': 2},
'Kearney': {'pop': 6489, 'tracts': 2},
'Keith': {'pop': 8368, 'tracts': 3},
'Keya Paha': {'pop': 824, 'tracts': 1},
'Kimball': {'pop': 3821, 'tracts': 1},
'Knox': {'pop': 8701, 'tracts': 3},
'Lancaster': {'pop': 285407, 'tracts': 74},
'Lincoln': {'pop': 36288, 'tracts': 8},
'Logan': {'pop': 763, 'tracts': 1},
'Loup': {'pop': 632, 'tracts': 1},
'Madison': {'pop': 34876, 'tracts': 9},
'McPherson': {'pop': 539, 'tracts': 1},
'Merrick': {'pop': 7845, 'tracts': 3},
'Morrill': {'pop': 5042, 'tracts': 1},
'Nance': {'pop': 3735, 'tracts': 1},
'Nemaha': {'pop': 7248, 'tracts': 2},
'Nuckolls': {'pop': 4500, 'tracts': 2},
'Otoe': {'pop': 15740, 'tracts': 5},
'Pawnee': {'pop': 2773, 'tracts': 1},
'Perkins': {'pop': 2970, 'tracts': 1},
'Phelps': {'pop': 9188, 'tracts': 3},
'Pierce': {'pop': 7266, 'tracts': 2},
'Platte': {'pop': 32237, 'tracts': 7},
'Polk': {'pop': 5406, 'tracts': 2},
'Red Willow': {'pop': 11055, 'tracts': 3},
'Richardson': {'pop': 8363, 'tracts': 3},
'Rock': {'pop': 1526, 'tracts': 1},
'Saline': {'pop': 14200, 'tracts': 4},
'Sarpy': {'pop': 158840, 'tracts': 43},
'Saunders': {'pop': 20780, 'tracts': 5},
'Scotts Bluff': {'pop': 36970, 'tracts': 11},
'Seward': {'pop': 16750, 'tracts': 4},
'Sheridan': {'pop': 5469, 'tracts': 2},
'Sherman': {'pop': 3152, 'tracts': 1},
'Sioux': {'pop': 1311, 'tracts': 1},
'Stanton': {'pop': 6129, 'tracts': 2},
'Thayer': {'pop': 5228, 'tracts': 2},
'Thomas': {'pop': 647, 'tracts': 1},
'Thurston': {'pop': 6940, 'tracts': 2},
'Valley': {'pop': 4260, 'tracts': 2},
'Washington': {'pop': 20234, 'tracts': 5},
'Wayne': {'pop': 9595, 'tracts': 2},
'Webster': {'pop': 3812, 'tracts': 2},
'Wheeler': {'pop': 818, 'tracts': 1},
'York': {'pop': 13665, 'tracts': 4}},
'NH': {'Belknap': {'pop': 60088, 'tracts': 15},
'Carroll': {'pop': 47818, 'tracts': 11},
'Cheshire': {'pop': 77117, 'tracts': 16},
'Coos': {'pop': 33055, 'tracts': 11},
'Grafton': {'pop': 89118, 'tracts': 19},
'Hillsborough': {'pop': 400721, 'tracts': 86},
'Merrimack': {'pop': 146445, 'tracts': 36},
'Rockingham': {'pop': 295223, 'tracts': 66},
'Strafford': {'pop': 123143, 'tracts': 25},
'Sullivan': {'pop': 43742, 'tracts': 10}},
'NJ': {'Atlantic': {'pop': 274549, 'tracts': 69},
'Bergen': {'pop': 905116, 'tracts': 179},
'Burlington': {'pop': 448734, 'tracts': 114},
'Camden': {'pop': 513657, 'tracts': 127},
'Cape May': {'pop': 97265, 'tracts': 32},
'Cumberland': {'pop': 156898, 'tracts': 35},
'Essex': {'pop': 783969, 'tracts': 210},
'Gloucester': {'pop': 288288, 'tracts': 63},
'Hudson': {'pop': 634266, 'tracts': 166},
'Hunterdon': {'pop': 128349, 'tracts': 26},
'Mercer': {'pop': 366513, 'tracts': 77},
'Middlesex': {'pop': 809858, 'tracts': 175},
'Monmouth': {'pop': 630380, 'tracts': 144},
'Morris': {'pop': 492276, 'tracts': 100},
'Ocean': {'pop': 576567, 'tracts': 126},
'Passaic': {'pop': 501226, 'tracts': 100},
'Salem': {'pop': 66083, 'tracts': 24},
'Somerset': {'pop': 323444, 'tracts': 68},
'Sussex': {'pop': 149265, 'tracts': 41},
'Union': {'pop': 536499, 'tracts': 108},
'Warren': {'pop': 108692, 'tracts': 23}},
'NM': {'Bernalillo': {'pop': 662564, 'tracts': 153},
'Catron': {'pop': 3725, 'tracts': 1},
'Chaves': {'pop': 65645, 'tracts': 16},
'Cibola': {'pop': 27213, 'tracts': 7},
'Colfax': {'pop': 13750, 'tracts': 3},
'Curry': {'pop': 48376, 'tracts': 12},
'De Baca': {'pop': 2022, 'tracts': 1},
'Dona Ana': {'pop': 209233, 'tracts': 41},
'Eddy': {'pop': 53829, 'tracts': 12},
'Grant': {'pop': 29514, 'tracts': 8},
'Guadalupe': {'pop': 4687, 'tracts': 1},
'Harding': {'pop': 695, 'tracts': 1},
'Hidalgo': {'pop': 4894, 'tracts': 2},
'Lea': {'pop': 64727, 'tracts': 18},
'Lincoln': {'pop': 20497, 'tracts': 5},
'Los Alamos': {'pop': 17950, 'tracts': 4},
'Luna': {'pop': 25095, 'tracts': 6},
'McKinley': {'pop': 71492, 'tracts': 17},
'Mora': {'pop': 4881, 'tracts': 1},
'Otero': {'pop': 63797, 'tracts': 16},
'Quay': {'pop': 9041, 'tracts': 3},
'Rio Arriba': {'pop': 40246, 'tracts': 9},
'Roosevelt': {'pop': 19846, 'tracts': 5},
'San Juan': {'pop': 130044, 'tracts': 33},
'San Miguel': {'pop': 29393, 'tracts': 7},
'Sandoval': {'pop': 131561, 'tracts': 28},
'Santa Fe': {'pop': 144170, 'tracts': 50},
'Sierra': {'pop': 11988, 'tracts': 4},
'Socorro': {'pop': 17866, 'tracts': 6},
'Taos': {'pop': 32937, 'tracts': 6},
'Torrance': {'pop': 16383, 'tracts': 4},
'Union': {'pop': 4549, 'tracts': 1},
'Valencia': {'pop': 76569, 'tracts': 18}},
'NV': {'Carson City': {'pop': 55274, 'tracts': 14},
'Churchill': {'pop': 24877, 'tracts': 7},
'Clark': {'pop': 1951269, 'tracts': 487},
'Douglas': {'pop': 46997, 'tracts': 17},
'Elko': {'pop': 48818, 'tracts': 14},
'Esmeralda': {'pop': 783, 'tracts': 1},
'Eureka': {'pop': 1987, 'tracts': 1},
'Humboldt': {'pop': 16528, 'tracts': 4},
'Lander': {'pop': 5775, 'tracts': 1},
'Lincoln': {'pop': 5345, 'tracts': 2},
'Lyon': {'pop': 51980, 'tracts': 10},
'Mineral': {'pop': 4772, 'tracts': 2},
'Nye': {'pop': 43946, 'tracts': 10},
'Pershing': {'pop': 6753, 'tracts': 1},
'Storey': {'pop': 4010, 'tracts': 1},
'Washoe': {'pop': 421407, 'tracts': 112},
'White Pine': {'pop': 10030, 'tracts': 3}},
'NY': {'Albany': {'pop': 304204, 'tracts': 75},
'Allegany': {'pop': 48946, 'tracts': 13},
'Bronx': {'pop': 1385108, 'tracts': 339},
'Broome': {'pop': 200600, 'tracts': 55},
'Cattaraugus': {'pop': 80317, 'tracts': 21},
'Cayuga': {'pop': 80026, 'tracts': 20},
'Chautauqua': {'pop': 134905, 'tracts': 35},
'Chemung': {'pop': 88830, 'tracts': 22},
'Chenango': {'pop': 50477, 'tracts': 12},
'Clinton': {'pop': 82128, 'tracts': 19},
'Columbia': {'pop': 63096, 'tracts': 21},
'Cortland': {'pop': 49336, 'tracts': 12},
'Delaware': {'pop': 47980, 'tracts': 14},
'Dutchess': {'pop': 297488, 'tracts': 79},
'Erie': {'pop': 919040, 'tracts': 237},
'Essex': {'pop': 39370, 'tracts': 13},
'Franklin': {'pop': 51599, 'tracts': 14},
'Fulton': {'pop': 55531, 'tracts': 15},
'Genesee': {'pop': 60079, 'tracts': 15},
'Greene': {'pop': 49221, 'tracts': 15},
'Hamilton': {'pop': 4836, 'tracts': 4},
'Herkimer': {'pop': 64519, 'tracts': 19},
'Jefferson': {'pop': 116229, 'tracts': 26},
'Kings': {'pop': 2504700, 'tracts': 760},
'Lewis': {'pop': 27087, 'tracts': 7},
'Livingston': {'pop': 65393, 'tracts': 15},
'Madison': {'pop': 73442, 'tracts': 16},
'Monroe': {'pop': 744344, 'tracts': 192},
'Montgomery': {'pop': 50219, 'tracts': 16},
'Nassau': {'pop': 1339532, 'tracts': 280},
'New York': {'pop': 1585873, 'tracts': 288},
'Niagara': {'pop': 216469, 'tracts': 61},
'Oneida': {'pop': 234878, 'tracts': 74},
'Onondaga': {'pop': 467026, 'tracts': 140},
'Ontario': {'pop': 107931, 'tracts': 25},
'Orange': {'pop': 372813, 'tracts': 79},
'Orleans': {'pop': 42883, 'tracts': 11},
'Oswego': {'pop': 122109, 'tracts': 29},
'Otsego': {'pop': 62259, 'tracts': 17},
'Putnam': {'pop': 99710, 'tracts': 19},
'Queens': {'pop': 2230722, 'tracts': 669},
'Rensselaer': {'pop': 159429, 'tracts': 42},
'Richmond': {'pop': 468730, 'tracts': 109},
'Rockland': {'pop': 311687, 'tracts': 65},
'Saratoga': {'pop': 219607, 'tracts': 50},
'Schenectady': {'pop': 154727, 'tracts': 43},
'Schoharie': {'pop': 32749, 'tracts': 8},
'Schuyler': {'pop': 18343, 'tracts': 5},
'Seneca': {'pop': 35251, 'tracts': 10},
'St. Lawrence': {'pop': 111944, 'tracts': 28},
'Steuben': {'pop': 98990, 'tracts': 30},
'Suffolk': {'pop': 1493350, 'tracts': 322},
'Sullivan': {'pop': 77547, 'tracts': 24},
'Tioga': {'pop': 51125, 'tracts': 10},
'Tompkins': {'pop': 101564, 'tracts': 23},
'Ulster': {'pop': 182493, 'tracts': 47},
'Warren': {'pop': 65707, 'tracts': 19},
'Washington': {'pop': 63216, 'tracts': 17},
'Wayne': {'pop': 93772, 'tracts': 23},
'Westchester': {'pop': 949113, 'tracts': 223},
'Wyoming': {'pop': 42155, 'tracts': 11},
'Yates': {'pop': 25348, 'tracts': 5}},
'OH': {'Adams': {'pop': 28550, 'tracts': 6},
'Allen': {'pop': 106331, 'tracts': 33},
'Ashland': {'pop': 53139, 'tracts': 11},
'Ashtabula': {'pop': 101497, 'tracts': 25},
'Athens': {'pop': 64757, 'tracts': 15},
'Auglaize': {'pop': 45949, 'tracts': 11},
'Belmont': {'pop': 70400, 'tracts': 20},
'Brown': {'pop': 44846, 'tracts': 9},
'Butler': {'pop': 368130, 'tracts': 80},
'Carroll': {'pop': 28836, 'tracts': 7},
'Champaign': {'pop': 40097, 'tracts': 10},
'Clark': {'pop': 138333, 'tracts': 44},
'Clermont': {'pop': 197363, 'tracts': 40},
'Clinton': {'pop': 42040, 'tracts': 9},
'Columbiana': {'pop': 107841, 'tracts': 24},
'Coshocton': {'pop': 36901, 'tracts': 10},
'Crawford': {'pop': 43784, 'tracts': 13},
'Cuyahoga': {'pop': 1280122, 'tracts': 447},
'Darke': {'pop': 52959, 'tracts': 12},
'Defiance': {'pop': 39037, 'tracts': 9},
'Delaware': {'pop': 174214, 'tracts': 35},
'Erie': {'pop': 77079, 'tracts': 19},
'Fairfield': {'pop': 146156, 'tracts': 28},
'Fayette': {'pop': 29030, 'tracts': 7},
'Franklin': {'pop': 1163414, 'tracts': 284},
'Fulton': {'pop': 42698, 'tracts': 9},
'Gallia': {'pop': 30934, 'tracts': 7},
'Geauga': {'pop': 93389, 'tracts': 21},
'Greene': {'pop': 161573, 'tracts': 35},
'Guernsey': {'pop': 40087, 'tracts': 10},
'Hamilton': {'pop': 802374, 'tracts': 222},
'Hancock': {'pop': 74782, 'tracts': 13},
'Hardin': {'pop': 32058, 'tracts': 7},
'Harrison': {'pop': 15864, 'tracts': 5},
'Henry': {'pop': 28215, 'tracts': 7},
'Highland': {'pop': 43589, 'tracts': 9},
'Hocking': {'pop': 29380, 'tracts': 7},
'Holmes': {'pop': 42366, 'tracts': 8},
'Huron': {'pop': 59626, 'tracts': 13},
'Jackson': {'pop': 33225, 'tracts': 7},
'Jefferson': {'pop': 69709, 'tracts': 23},
'Knox': {'pop': 60921, 'tracts': 12},
'Lake': {'pop': 230041, 'tracts': 59},
'Lawrence': {'pop': 62450, 'tracts': 16},
'Licking': {'pop': 166492, 'tracts': 32},
'Logan': {'pop': 45858, 'tracts': 11},
'Lorain': {'pop': 301356, 'tracts': 73},
'Lucas': {'pop': 441815, 'tracts': 127},
'Madison': {'pop': 43435, 'tracts': 12},
'Mahoning': {'pop': 238823, 'tracts': 70},
'Marion': {'pop': 66501, 'tracts': 18},
'Medina': {'pop': 172332, 'tracts': 37},
'Meigs': {'pop': 23770, 'tracts': 6},
'Mercer': {'pop': 40814, 'tracts': 9},
'Miami': {'pop': 102506, 'tracts': 21},
'Monroe': {'pop': 14642, 'tracts': 4},
'Montgomery': {'pop': 535153, 'tracts': 153},
'Morgan': {'pop': 15054, 'tracts': 4},
'Morrow': {'pop': 34827, 'tracts': 6},
'Muskingum': {'pop': 86074, 'tracts': 19},
'Noble': {'pop': 14645, 'tracts': 3},
'Ottawa': {'pop': 41428, 'tracts': 13},
'Paulding': {'pop': 19614, 'tracts': 5},
'Perry': {'pop': 36058, 'tracts': 6},
'Pickaway': {'pop': 55698, 'tracts': 13},
'Pike': {'pop': 28709, 'tracts': 6},
'Portage': {'pop': 161419, 'tracts': 35},
'Preble': {'pop': 42270, 'tracts': 12},
'Putnam': {'pop': 34499, 'tracts': 7},
'Richland': {'pop': 124475, 'tracts': 30},
'Ross': {'pop': 78064, 'tracts': 17},
'Sandusky': {'pop': 60944, 'tracts': 15},
'Scioto': {'pop': 79499, 'tracts': 20},
'Seneca': {'pop': 56745, 'tracts': 14},
'Shelby': {'pop': 49423, 'tracts': 10},
'Stark': {'pop': 375586, 'tracts': 86},
'Summit': {'pop': 541781, 'tracts': 135},
'Trumbull': {'pop': 210312, 'tracts': 55},
'Tuscarawas': {'pop': 92582, 'tracts': 21},
'Union': {'pop': 52300, 'tracts': 10},
'Van Wert': {'pop': 28744, 'tracts': 9},
'Vinton': {'pop': 13435, 'tracts': 3},
'Warren': {'pop': 212693, 'tracts': 33},
'Washington': {'pop': 61778, 'tracts': 16},
'Wayne': {'pop': 114520, 'tracts': 32},
'Williams': {'pop': 37642, 'tracts': 9},
'Wood': {'pop': 125488, 'tracts': 28},
'Wyandot': {'pop': 22615, 'tracts': 6}},
'OK': {'Adair': {'pop': 22683, 'tracts': 5},
'Alfalfa': {'pop': 5642, 'tracts': 3},
'Atoka': {'pop': 14182, 'tracts': 4},
'Beaver': {'pop': 5636, 'tracts': 3},
'Beckham': {'pop': 22119, 'tracts': 4},
'Blaine': {'pop': 11943, 'tracts': 5},
'Bryan': {'pop': 42416, 'tracts': 11},
'Caddo': {'pop': 29600, 'tracts': 8},
'Canadian': {'pop': 115541, 'tracts': 29},
'Carter': {'pop': 47557, 'tracts': 11},
'Cherokee': {'pop': 46987, 'tracts': 9},
'Choctaw': {'pop': 15205, 'tracts': 5},
'Cimarron': {'pop': 2475, 'tracts': 2},
'Cleveland': {'pop': 255755, 'tracts': 62},
'Coal': {'pop': 5925, 'tracts': 2},
'Comanche': {'pop': 124098, 'tracts': 32},
'Cotton': {'pop': 6193, 'tracts': 2},
'Craig': {'pop': 15029, 'tracts': 5},
'Creek': {'pop': 69967, 'tracts': 21},
'Custer': {'pop': 27469, 'tracts': 5},
'Delaware': {'pop': 41487, 'tracts': 9},
'Dewey': {'pop': 4810, 'tracts': 3},
'Ellis': {'pop': 4151, 'tracts': 2},
'Garfield': {'pop': 60580, 'tracts': 12},
'Garvin': {'pop': 27576, 'tracts': 9},
'Grady': {'pop': 52431, 'tracts': 10},
'Grant': {'pop': 4527, 'tracts': 2},
'Greer': {'pop': 6239, 'tracts': 2},
'Harmon': {'pop': 2922, 'tracts': 1},
'Harper': {'pop': 3685, 'tracts': 2},
'Haskell': {'pop': 12769, 'tracts': 4},
'Hughes': {'pop': 14003, 'tracts': 5},
'Jackson': {'pop': 26446, 'tracts': 8},
'Jefferson': {'pop': 6472, 'tracts': 3},
'Johnston': {'pop': 10957, 'tracts': 3},
'Kay': {'pop': 46562, 'tracts': 11},
'Kingfisher': {'pop': 15034, 'tracts': 4},
'Kiowa': {'pop': 9446, 'tracts': 3},
'Latimer': {'pop': 11154, 'tracts': 3},
'Le Flore': {'pop': 50384, 'tracts': 12},
'Lincoln': {'pop': 34273, 'tracts': 7},
'Logan': {'pop': 41848, 'tracts': 8},
'Love': {'pop': 9423, 'tracts': 3},
'Major': {'pop': 7527, 'tracts': 3},
'Marshall': {'pop': 15840, 'tracts': 4},
'Mayes': {'pop': 41259, 'tracts': 9},
'McClain': {'pop': 34506, 'tracts': 6},
'McCurtain': {'pop': 33151, 'tracts': 8},
'McIntosh': {'pop': 20252, 'tracts': 6},
'Murray': {'pop': 13488, 'tracts': 3},
'Muskogee': {'pop': 70990, 'tracts': 16},
'Noble': {'pop': 11561, 'tracts': 4},
'Nowata': {'pop': 10536, 'tracts': 4},
'Okfuskee': {'pop': 12191, 'tracts': 4},
'Oklahoma': {'pop': 718633, 'tracts': 241},
'Okmulgee': {'pop': 40069, 'tracts': 10},
'Osage': {'pop': 47472, 'tracts': 11},
'Ottawa': {'pop': 31848, 'tracts': 9},
'Pawnee': {'pop': 16577, 'tracts': 5},
'Payne': {'pop': 77350, 'tracts': 17},
'Pittsburg': {'pop': 45837, 'tracts': 13},
'Pontotoc': {'pop': 37492, 'tracts': 10},
'Pottawatomie': {'pop': 69442, 'tracts': 16},
'Pushmataha': {'pop': 11572, 'tracts': 3},
'Roger Mills': {'pop': 3647, 'tracts': 1},
'Rogers': {'pop': 86905, 'tracts': 28},
'Seminole': {'pop': 25482, 'tracts': 9},
'Sequoyah': {'pop': 42391, 'tracts': 9},
'Stephens': {'pop': 45048, 'tracts': 11},
'Texas': {'pop': 20640, 'tracts': 5},
'Tillman': {'pop': 7992, 'tracts': 5},
'Tulsa': {'pop': 603403, 'tracts': 175},
'Wagoner': {'pop': 73085, 'tracts': 22},
'Washington': {'pop': 50976, 'tracts': 13},
'Washita': {'pop': 11629, 'tracts': 4},
'Woods': {'pop': 8878, 'tracts': 3},
'Woodward': {'pop': 20081, 'tracts': 5}},
'OR': {'Baker': {'pop': 16134, 'tracts': 6},
'Benton': {'pop': 85579, 'tracts': 18},
'Clackamas': {'pop': 375992, 'tracts': 80},
'Clatsop': {'pop': 37039, 'tracts': 12},
'Columbia': {'pop': 49351, 'tracts': 10},
'Coos': {'pop': 63043, 'tracts': 13},
'Crook': {'pop': 20978, 'tracts': 4},
'Curry': {'pop': 22364, 'tracts': 6},
'Deschutes': {'pop': 157733, 'tracts': 24},
'Douglas': {'pop': 107667, 'tracts': 22},
'Gilliam': {'pop': 1871, 'tracts': 1},
'Grant': {'pop': 7445, 'tracts': 2},
'Harney': {'pop': 7422, 'tracts': 2},
'Hood River': {'pop': 22346, 'tracts': 4},
'Jackson': {'pop': 203206, 'tracts': 41},
'Jefferson': {'pop': 21720, 'tracts': 6},
'Josephine': {'pop': 82713, 'tracts': 16},
'Klamath': {'pop': 66380, 'tracts': 20},
'Lake': {'pop': 7895, 'tracts': 2},
'Lane': {'pop': 351715, 'tracts': 86},
'Lincoln': {'pop': 46034, 'tracts': 18},
'Linn': {'pop': 116672, 'tracts': 21},
'Malheur': {'pop': 31313, 'tracts': 8},
'Marion': {'pop': 315335, 'tracts': 58},
'Morrow': {'pop': 11173, 'tracts': 2},
'Multnomah': {'pop': 735334, 'tracts': 171},
'Polk': {'pop': 75403, 'tracts': 12},
'Sherman': {'pop': 1765, 'tracts': 1},
'Tillamook': {'pop': 25250, 'tracts': 8},
'Umatilla': {'pop': 75889, 'tracts': 15},
'Union': {'pop': 25748, 'tracts': 8},
'Wallowa': {'pop': 7008, 'tracts': 3},
'Wasco': {'pop': 25213, 'tracts': 8},
'Washington': {'pop': 529710, 'tracts': 104},
'Wheeler': {'pop': 1441, 'tracts': 1},
'Yamhill': {'pop': 99193, 'tracts': 17}},
'PA': {'Adams': {'pop': 101407, 'tracts': 23},
'Allegheny': {'pop': 1223348, 'tracts': 402},
'Armstrong': {'pop': 68941, 'tracts': 19},
'Beaver': {'pop': 170539, 'tracts': 51},
'Bedford': {'pop': 49762, 'tracts': 11},
'Berks': {'pop': 411442, 'tracts': 90},
'Blair': {'pop': 127089, 'tracts': 34},
'Bradford': {'pop': 62622, 'tracts': 14},
'Bucks': {'pop': 625249, 'tracts': 143},
'Butler': {'pop': 183862, 'tracts': 44},
'Cambria': {'pop': 143679, 'tracts': 42},
'Cameron': {'pop': 5085, 'tracts': 2},
'Carbon': {'pop': 65249, 'tracts': 12},
'Centre': {'pop': 153990, 'tracts': 31},
'Chester': {'pop': 498886, 'tracts': 116},
'Clarion': {'pop': 39988, 'tracts': 10},
'Clearfield': {'pop': 81642, 'tracts': 20},
'Clinton': {'pop': 39238, 'tracts': 9},
'Columbia': {'pop': 67295, 'tracts': 15},
'Crawford': {'pop': 88765, 'tracts': 23},
'Cumberland': {'pop': 235406, 'tracts': 49},
'Dauphin': {'pop': 268100, 'tracts': 65},
'Delaware': {'pop': 558979, 'tracts': 144},
'Elk': {'pop': 31946, 'tracts': 9},
'Erie': {'pop': 280566, 'tracts': 72},
'Fayette': {'pop': 136606, 'tracts': 36},
'Forest': {'pop': 7716, 'tracts': 3},
'Franklin': {'pop': 149618, 'tracts': 27},
'Fulton': {'pop': 14845, 'tracts': 3},
'Greene': {'pop': 38686, 'tracts': 9},
'Huntingdon': {'pop': 45913, 'tracts': 12},
'Indiana': {'pop': 88880, 'tracts': 23},
'Jefferson': {'pop': 45200, 'tracts': 13},
'Juniata': {'pop': 24636, 'tracts': 5},
'Lackawanna': {'pop': 214437, 'tracts': 59},
'Lancaster': {'pop': 519445, 'tracts': 98},
'Lawrence': {'pop': 91108, 'tracts': 28},
'Lebanon': {'pop': 133568, 'tracts': 31},
'Lehigh': {'pop': 349497, 'tracts': 76},
'Luzerne': {'pop': 320918, 'tracts': 104},
'Lycoming': {'pop': 116111, 'tracts': 29},
'McKean': {'pop': 43450, 'tracts': 12},
'Mercer': {'pop': 116638, 'tracts': 30},
'Mifflin': {'pop': 46682, 'tracts': 12},
'Monroe': {'pop': 169842, 'tracts': 33},
'Montgomery': {'pop': 799874, 'tracts': 211},
'Montour': {'pop': 18267, 'tracts': 4},
'Northampton': {'pop': 297735, 'tracts': 68},
'Northumberland': {'pop': 94528, 'tracts': 24},
'Perry': {'pop': 45969, 'tracts': 10},
'Philadelphia': {'pop': 1526006, 'tracts': 384},
'Pike': {'pop': 57369, 'tracts': 18},
'Potter': {'pop': 17457, 'tracts': 5},
'Schuylkill': {'pop': 148289, 'tracts': 40},
'Snyder': {'pop': 39702, 'tracts': 8},
'Somerset': {'pop': 77742, 'tracts': 21},
'Sullivan': {'pop': 6428, 'tracts': 2},
'Susquehanna': {'pop': 43356, 'tracts': 11},
'Tioga': {'pop': 41981, 'tracts': 10},
'Union': {'pop': 44947, 'tracts': 10},
'Venango': {'pop': 54984, 'tracts': 16},
'Warren': {'pop': 41815, 'tracts': 13},
'Washington': {'pop': 207820, 'tracts': 59},
'Wayne': {'pop': 52822, 'tracts': 14},
'Westmoreland': {'pop': 365169, 'tracts': 100},
'Wyoming': {'pop': 28276, 'tracts': 7},
'York': {'pop': 434972, 'tracts': 90}},
'RI': {'Bristol': {'pop': 49875, 'tracts': 11},
'Kent': {'pop': 166158, 'tracts': 39},
'Newport': {'pop': 82888, 'tracts': 22},
'Providence': {'pop': 626667, 'tracts': 141},
'Washington': {'pop': 126979, 'tracts': 29}},
'SC': {'Abbeville': {'pop': 25417, 'tracts': 6},
'Aiken': {'pop': 160099, 'tracts': 33},
'Allendale': {'pop': 10419, 'tracts': 3},
'Anderson': {'pop': 187126, 'tracts': 39},
'Bamberg': {'pop': 15987, 'tracts': 4},
'Barnwell': {'pop': 22621, 'tracts': 6},
'Beaufort': {'pop': 162233, 'tracts': 41},
'Berkeley': {'pop': 177843, 'tracts': 45},
'Calhoun': {'pop': 15175, 'tracts': 3},
'Charleston': {'pop': 350209, 'tracts': 86},
'Cherokee': {'pop': 55342, 'tracts': 13},
'Chester': {'pop': 33140, 'tracts': 11},
'Chesterfield': {'pop': 46734, 'tracts': 10},
'Clarendon': {'pop': 34971, 'tracts': 12},
'Colleton': {'pop': 38892, 'tracts': 10},
'Darlington': {'pop': 68681, 'tracts': 16},
'Dillon': {'pop': 32062, 'tracts': 6},
'Dorchester': {'pop': 136555, 'tracts': 25},
'Edgefield': {'pop': 26985, 'tracts': 6},
'Fairfield': {'pop': 23956, 'tracts': 5},
'Florence': {'pop': 136885, 'tracts': 33},
'Georgetown': {'pop': 60158, 'tracts': 15},
'Greenville': {'pop': 451225, 'tracts': 111},
'Greenwood': {'pop': 69661, 'tracts': 14},
'Hampton': {'pop': 21090, 'tracts': 5},
'Horry': {'pop': 269291, 'tracts': 72},
'Jasper': {'pop': 24777, 'tracts': 5},
'Kershaw': {'pop': 61697, 'tracts': 15},
'Lancaster': {'pop': 76652, 'tracts': 14},
'Laurens': {'pop': 66537, 'tracts': 17},
'Lee': {'pop': 19220, 'tracts': 7},
'Lexington': {'pop': 262391, 'tracts': 74},
'Marion': {'pop': 33062, 'tracts': 8},
'Marlboro': {'pop': 28933, 'tracts': 7},
'McCormick': {'pop': 10233, 'tracts': 3},
'Newberry': {'pop': 37508, 'tracts': 8},
'Oconee': {'pop': 74273, 'tracts': 15},
'Orangeburg': {'pop': 92501, 'tracts': 20},
'Pickens': {'pop': 119224, 'tracts': 28},
'Richland': {'pop': 384504, 'tracts': 89},
'Saluda': {'pop': 19875, 'tracts': 5},
'Spartanburg': {'pop': 284307, 'tracts': 69},
'Sumter': {'pop': 107456, 'tracts': 23},
'Union': {'pop': 28961, 'tracts': 9},
'Williamsburg': {'pop': 34423, 'tracts': 11},
'York': {'pop': 226073, 'tracts': 46}},
'SD': {'Aurora': {'pop': 2710, 'tracts': 1},
'Beadle': {'pop': 17398, 'tracts': 6},
'Bennett': {'pop': 3431, 'tracts': 2},
'Bon Homme': {'pop': 7070, 'tracts': 2},
'Brookings': {'pop': 31965, 'tracts': 6},
'Brown': {'pop': 36531, 'tracts': 8},
'Brule': {'pop': 5255, 'tracts': 2},
'Buffalo': {'pop': 1912, 'tracts': 1},
'Butte': {'pop': 10110, 'tracts': 2},
'Campbell': {'pop': 1466, 'tracts': 1},
'Charles Mix': {'pop': 9129, 'tracts': 3},
'Clark': {'pop': 3691, 'tracts': 1},
'Clay': {'pop': 13864, 'tracts': 3},
'Codington': {'pop': 27227, 'tracts': 7},
'Corson': {'pop': 4050, 'tracts': 2},
'Custer': {'pop': 8216, 'tracts': 2},
'Davison': {'pop': 19504, 'tracts': 4},
'Day': {'pop': 5710, 'tracts': 3},
'Deuel': {'pop': 4364, 'tracts': 2},
'Dewey': {'pop': 5301, 'tracts': 2},
'Douglas': {'pop': 3002, 'tracts': 1},
'Edmunds': {'pop': 4071, 'tracts': 2},
'Fall River': {'pop': 7094, 'tracts': 2},
'Faulk': {'pop': 2364, 'tracts': 1},
'Grant': {'pop': 7356, 'tracts': 2},
'Gregory': {'pop': 4271, 'tracts': 2},
'Haakon': {'pop': 1937, 'tracts': 1},
'Hamlin': {'pop': 5903, 'tracts': 2},
'Hand': {'pop': 3431, 'tracts': 2},
'Hanson': {'pop': 3331, 'tracts': 1},
'Harding': {'pop': 1255, 'tracts': 1},
'Hughes': {'pop': 17022, 'tracts': 4},
'Hutchinson': {'pop': 7343, 'tracts': 3},
'Hyde': {'pop': 1420, 'tracts': 1},
'Jackson': {'pop': 3031, 'tracts': 2},
'Jerauld': {'pop': 2071, 'tracts': 1},
'Jones': {'pop': 1006, 'tracts': 1},
'Kingsbury': {'pop': 5148, 'tracts': 2},
'Lake': {'pop': 11200, 'tracts': 3},
'Lawrence': {'pop': 24097, 'tracts': 5},
'Lincoln': {'pop': 44828, 'tracts': 11},
'Lyman': {'pop': 3755, 'tracts': 2},
'Marshall': {'pop': 4656, 'tracts': 1},
'McCook': {'pop': 5618, 'tracts': 2},
'McPherson': {'pop': 2459, 'tracts': 1},
'Meade': {'pop': 25434, 'tracts': 5},
'Mellette': {'pop': 2048, 'tracts': 1},
'Miner': {'pop': 2389, 'tracts': 1},
'Minnehaha': {'pop': 169468, 'tracts': 42},
'Moody': {'pop': 6486, 'tracts': 2},
'Pennington': {'pop': 100948, 'tracts': 23},
'Perkins': {'pop': 2982, 'tracts': 1},
'Potter': {'pop': 2329, 'tracts': 1},
'Roberts': {'pop': 10149, 'tracts': 4},
'Sanborn': {'pop': 2355, 'tracts': 1},
'Shannon': {'pop': 13586, 'tracts': 3},
'Spink': {'pop': 6415, 'tracts': 3},
'Stanley': {'pop': 2966, 'tracts': 1},
'Sully': {'pop': 1373, 'tracts': 1},
'Todd': {'pop': 9612, 'tracts': 2},
'Tripp': {'pop': 5644, 'tracts': 2},
'Turner': {'pop': 8347, 'tracts': 2},
'Union': {'pop': 14399, 'tracts': 3},
'Walworth': {'pop': 5438, 'tracts': 2},
'Yankton': {'pop': 22438, 'tracts': 5},
'Ziebach': {'pop': 2801, 'tracts': 1}},
'TN': {'Anderson': {'pop': 75129, 'tracts': 18},
'Bedford': {'pop': 45058, 'tracts': 9},
'Benton': {'pop': 16489, 'tracts': 5},
'Bledsoe': {'pop': 12876, 'tracts': 3},
'Blount': {'pop': 123010, 'tracts': 28},
'Bradley': {'pop': 98963, 'tracts': 19},
'Campbell': {'pop': 40716, 'tracts': 11},
'Cannon': {'pop': 13801, 'tracts': 3},
'Carroll': {'pop': 28522, 'tracts': 8},
'Carter': {'pop': 57424, 'tracts': 17},
'Cheatham': {'pop': 39105, 'tracts': 9},
'Chester': {'pop': 17131, 'tracts': 3},
'Claiborne': {'pop': 32213, 'tracts': 9},
'Clay': {'pop': 7861, 'tracts': 2},
'Cocke': {'pop': 35662, 'tracts': 9},
'Coffee': {'pop': 52796, 'tracts': 12},
'Crockett': {'pop': 14586, 'tracts': 5},
'Cumberland': {'pop': 56053, 'tracts': 14},
'Davidson': {'pop': 626681, 'tracts': 161},
'DeKalb': {'pop': 18723, 'tracts': 4},
'Decatur': {'pop': 11757, 'tracts': 4},
'Dickson': {'pop': 49666, 'tracts': 10},
'Dyer': {'pop': 38335, 'tracts': 8},
'Fayette': {'pop': 38413, 'tracts': 11},
'Fentress': {'pop': 17959, 'tracts': 4},
'Franklin': {'pop': 41052, 'tracts': 9},
'Gibson': {'pop': 49683, 'tracts': 14},
'Giles': {'pop': 29485, 'tracts': 8},
'Grainger': {'pop': 22657, 'tracts': 5},
'Greene': {'pop': 68831, 'tracts': 15},
'Grundy': {'pop': 13703, 'tracts': 4},
'Hamblen': {'pop': 62544, 'tracts': 12},
'Hamilton': {'pop': 336463, 'tracts': 82},
'Hancock': {'pop': 6819, 'tracts': 2},
'Hardeman': {'pop': 27253, 'tracts': 6},
'Hardin': {'pop': 26026, 'tracts': 6},
'Hawkins': {'pop': 56833, 'tracts': 13},
'Haywood': {'pop': 18787, 'tracts': 6},
'Henderson': {'pop': 27769, 'tracts': 6},
'Henry': {'pop': 32330, 'tracts': 9},
'Hickman': {'pop': 24690, 'tracts': 6},
'Houston': {'pop': 8426, 'tracts': 3},
'Humphreys': {'pop': 18538, 'tracts': 5},
'Jackson': {'pop': 11638, 'tracts': 4},
'Jefferson': {'pop': 51407, 'tracts': 9},
'Johnson': {'pop': 18244, 'tracts': 5},
'Knox': {'pop': 432226, 'tracts': 112},
'Lake': {'pop': 7832, 'tracts': 2},
'Lauderdale': {'pop': 27815, 'tracts': 9},
'Lawrence': {'pop': 41869, 'tracts': 11},
'Lewis': {'pop': 12161, 'tracts': 2},
'Lincoln': {'pop': 33361, 'tracts': 9},
'Loudon': {'pop': 48556, 'tracts': 10},
'Macon': {'pop': 22248, 'tracts': 4},
'Madison': {'pop': 98294, 'tracts': 27},
'Marion': {'pop': 28237, 'tracts': 6},
'Marshall': {'pop': 30617, 'tracts': 6},
'Maury': {'pop': 80956, 'tracts': 17},
'McMinn': {'pop': 52266, 'tracts': 10},
'McNairy': {'pop': 26075, 'tracts': 7},
'Meigs': {'pop': 11753, 'tracts': 3},
'Monroe': {'pop': 44519, 'tracts': 7},
'Montgomery': {'pop': 172331, 'tracts': 39},
'Moore': {'pop': 6362, 'tracts': 2},
'Morgan': {'pop': 21987, 'tracts': 5},
'Obion': {'pop': 31807, 'tracts': 10},
'Overton': {'pop': 22083, 'tracts': 7},
'Perry': {'pop': 7915, 'tracts': 2},
'Pickett': {'pop': 5077, 'tracts': 1},
'Polk': {'pop': 16825, 'tracts': 5},
'Putnam': {'pop': 72321, 'tracts': 15},
'Rhea': {'pop': 31809, 'tracts': 6},
'Roane': {'pop': 54181, 'tracts': 11},
'Robertson': {'pop': 66283, 'tracts': 14},
'Rutherford': {'pop': 262604, 'tracts': 49},
'Scott': {'pop': 22228, 'tracts': 5},
'Sequatchie': {'pop': 14112, 'tracts': 3},
'Sevier': {'pop': 89889, 'tracts': 18},
'Shelby': {'pop': 927644, 'tracts': 221},
'Smith': {'pop': 19166, 'tracts': 5},
'Stewart': {'pop': 13324, 'tracts': 5},
'Sullivan': {'pop': 156823, 'tracts': 39},
'Sumner': {'pop': 160645, 'tracts': 42},
'Tipton': {'pop': 61081, 'tracts': 13},
'Trousdale': {'pop': 7870, 'tracts': 2},
'Unicoi': {'pop': 18313, 'tracts': 4},
'Union': {'pop': 19109, 'tracts': 4},
'Van Buren': {'pop': 5548, 'tracts': 2},
'Warren': {'pop': 39839, 'tracts': 9},
'Washington': {'pop': 122979, 'tracts': 23},
'Wayne': {'pop': 17021, 'tracts': 4},
'Weakley': {'pop': 35021, 'tracts': 11},
'White': {'pop': 25841, 'tracts': 6},
'Williamson': {'pop': 183182, 'tracts': 37},
'Wilson': {'pop': 113993, 'tracts': 21}},
'TX': {'Anderson': {'pop': 58458, 'tracts': 11},
'Andrews': {'pop': 14786, 'tracts': 4},
'Angelina': {'pop': 86771, 'tracts': 17},
'Aransas': {'pop': 23158, 'tracts': 5},
'Archer': {'pop': 9054, 'tracts': 3},
'Armstrong': {'pop': 1901, 'tracts': 1},
'Atascosa': {'pop': 44911, 'tracts': 8},
'Austin': {'pop': 28417, 'tracts': 6},
'Bailey': {'pop': 7165, 'tracts': 1},
'Bandera': {'pop': 20485, 'tracts': 5},
'Bastrop': {'pop': 74171, 'tracts': 10},
'Baylor': {'pop': 3726, 'tracts': 1},
'Bee': {'pop': 31861, 'tracts': 7},
'Bell': {'pop': 310235, 'tracts': 65},
'Bexar': {'pop': 1714773, 'tracts': 366},
'Blanco': {'pop': 10497, 'tracts': 2},
'Borden': {'pop': 641, 'tracts': 1},
'Bosque': {'pop': 18212, 'tracts': 7},
'Bowie': {'pop': 92565, 'tracts': 18},
'Brazoria': {'pop': 313166, 'tracts': 51},
'Brazos': {'pop': 194851, 'tracts': 42},
'Brewster': {'pop': 9232, 'tracts': 3},
'Briscoe': {'pop': 1637, 'tracts': 1},
'Brooks': {'pop': 7223, 'tracts': 2},
'Brown': {'pop': 38106, 'tracts': 12},
'Burleson': {'pop': 17187, 'tracts': 5},
'Burnet': {'pop': 42750, 'tracts': 8},
'Caldwell': {'pop': 38066, 'tracts': 8},
'Calhoun': {'pop': 21381, 'tracts': 6},
'Callahan': {'pop': 13544, 'tracts': 3},
'Cameron': {'pop': 406220, 'tracts': 86},
'Camp': {'pop': 12401, 'tracts': 3},
'Carson': {'pop': 6182, 'tracts': 2},
'Cass': {'pop': 30464, 'tracts': 7},
'Castro': {'pop': 8062, 'tracts': 3},
'Chambers': {'pop': 35096, 'tracts': 6},
'Cherokee': {'pop': 50845, 'tracts': 12},
'Childress': {'pop': 7041, 'tracts': 2},
'Clay': {'pop': 10752, 'tracts': 3},
'Cochran': {'pop': 3127, 'tracts': 1},
'Coke': {'pop': 3320, 'tracts': 2},
'Coleman': {'pop': 8895, 'tracts': 3},
'Collin': {'pop': 782341, 'tracts': 152},
'Collingsworth': {'pop': 3057, 'tracts': 1},
'Colorado': {'pop': 20874, 'tracts': 5},
'Comal': {'pop': 108472, 'tracts': 24},
'Comanche': {'pop': 13974, 'tracts': 4},
'Concho': {'pop': 4087, 'tracts': 1},
'Cooke': {'pop': 38437, 'tracts': 8},
'Coryell': {'pop': 75388, 'tracts': 19},
'Cottle': {'pop': 1505, 'tracts': 1},
'Crane': {'pop': 4375, 'tracts': 1},
'Crockett': {'pop': 3719, 'tracts': 1},
'Crosby': {'pop': 6059, 'tracts': 3},
'Culberson': {'pop': 2398, 'tracts': 1},
'Dallam': {'pop': 6703, 'tracts': 2},
'Dallas': {'pop': 2368139, 'tracts': 529},
'Dawson': {'pop': 13833, 'tracts': 4},
'DeWitt': {'pop': 20097, 'tracts': 5},
'Deaf Smith': {'pop': 19372, 'tracts': 4},
'Delta': {'pop': 5231, 'tracts': 2},
'Denton': {'pop': 662614, 'tracts': 137},
'Dickens': {'pop': 2444, 'tracts': 1},
'Dimmit': {'pop': 9996, 'tracts': 2},
'Donley': {'pop': 3677, 'tracts': 2},
'Duval': {'pop': 11782, 'tracts': 3},
'Eastland': {'pop': 18583, 'tracts': 5},
'Ector': {'pop': 137130, 'tracts': 28},
'Edwards': {'pop': 2002, 'tracts': 1},
'El Paso': {'pop': 800647, 'tracts': 161},
'Ellis': {'pop': 149610, 'tracts': 31},
'Erath': {'pop': 37890, 'tracts': 8},
'Falls': {'pop': 17866, 'tracts': 6},
'Fannin': {'pop': 33915, 'tracts': 9},
'Fayette': {'pop': 24554, 'tracts': 7},
'Fisher': {'pop': 3974, 'tracts': 2},
'Floyd': {'pop': 6446, 'tracts': 2},
'Foard': {'pop': 1336, 'tracts': 1},
'Fort Bend': {'pop': 585375, 'tracts': 76},
'Franklin': {'pop': 10605, 'tracts': 3},
'Freestone': {'pop': 19816, 'tracts': 7},
'Frio': {'pop': 17217, 'tracts': 3},
'Gaines': {'pop': 17526, 'tracts': 3},
'Galveston': {'pop': 291309, 'tracts': 67},
'Garza': {'pop': 6461, 'tracts': 1},
'Gillespie': {'pop': 24837, 'tracts': 5},
'Glasscock': {'pop': 1226, 'tracts': 1},
'Goliad': {'pop': 7210, 'tracts': 2},
'Gonzales': {'pop': 19807, 'tracts': 6},
'Gray': {'pop': 22535, 'tracts': 7},
'Grayson': {'pop': 120877, 'tracts': 26},
'Gregg': {'pop': 121730, 'tracts': 25},
'Grimes': {'pop': 26604, 'tracts': 6},
'Guadalupe': {'pop': 131533, 'tracts': 29},
'Hale': {'pop': 36273, 'tracts': 9},
'Hall': {'pop': 3353, 'tracts': 1},
'Hamilton': {'pop': 8517, 'tracts': 3},
'Hansford': {'pop': 5613, 'tracts': 2},
'Hardeman': {'pop': 4139, 'tracts': 1},
'Hardin': {'pop': 54635, 'tracts': 11},
'Harris': {'pop': 4092459, 'tracts': 786},
'Harrison': {'pop': 65631, 'tracts': 14},
'Hartley': {'pop': 6062, 'tracts': 1},
'Haskell': {'pop': 5899, 'tracts': 2},
'Hays': {'pop': 157107, 'tracts': 25},
'Hemphill': {'pop': 3807, 'tracts': 1},
'Henderson': {'pop': 78532, 'tracts': 17},
'Hidalgo': {'pop': 774769, 'tracts': 113},
'Hill': {'pop': 35089, 'tracts': 11},
'Hockley': {'pop': 22935, 'tracts': 7},
'Hood': {'pop': 51182, 'tracts': 10},
'Hopkins': {'pop': 35161, 'tracts': 9},
'Houston': {'pop': 23732, 'tracts': 7},
'Howard': {'pop': 35012, 'tracts': 10},
'Hudspeth': {'pop': 3476, 'tracts': 1},
'Hunt': {'pop': 86129, 'tracts': 19},
'Hutchinson': {'pop': 22150, 'tracts': 7},
'Irion': {'pop': 1599, 'tracts': 1},
'Jack': {'pop': 9044, 'tracts': 3},
'Jackson': {'pop': 14075, 'tracts': 3},
'Jasper': {'pop': 35710, 'tracts': 8},
'Jeff Davis': {'pop': 2342, 'tracts': 1},
'Jefferson': {'pop': 252273, 'tracts': 72},
'Jim Hogg': {'pop': 5300, 'tracts': 2},
'Jim Wells': {'pop': 40838, 'tracts': 7},
'Johnson': {'pop': 150934, 'tracts': 28},
'Jones': {'pop': 20202, 'tracts': 6},
'Karnes': {'pop': 14824, 'tracts': 4},
'Kaufman': {'pop': 103350, 'tracts': 18},
'Kendall': {'pop': 33410, 'tracts': 6},
'Kenedy': {'pop': 416, 'tracts': 1},
'Kent': {'pop': 808, 'tracts': 1},
'Kerr': {'pop': 49625, 'tracts': 10},
'Kimble': {'pop': 4607, 'tracts': 2},
'King': {'pop': 286, 'tracts': 1},
'Kinney': {'pop': 3598, 'tracts': 1},
'Kleberg': {'pop': 32061, 'tracts': 6},
'Knox': {'pop': 3719, 'tracts': 2},
'La Salle': {'pop': 6886, 'tracts': 1},
'Lamar': {'pop': 49793, 'tracts': 12},
'Lamb': {'pop': 13977, 'tracts': 5},
'Lampasas': {'pop': 19677, 'tracts': 5},
'Lavaca': {'pop': 19263, 'tracts': 6},
'Lee': {'pop': 16612, 'tracts': 4},
'Leon': {'pop': 16801, 'tracts': 3},
'Liberty': {'pop': 75643, 'tracts': 14},
'Limestone': {'pop': 23384, 'tracts': 8},
'Lipscomb': {'pop': 3302, 'tracts': 2},
'Live Oak': {'pop': 11531, 'tracts': 4},
'Llano': {'pop': 19301, 'tracts': 6},
'Loving': {'pop': 82, 'tracts': 1},
'Lubbock': {'pop': 278831, 'tracts': 68},
'Lynn': {'pop': 5915, 'tracts': 3},
'Madison': {'pop': 13664, 'tracts': 4},
'Marion': {'pop': 10546, 'tracts': 4},
'Martin': {'pop': 4799, 'tracts': 2},
'Mason': {'pop': 4012, 'tracts': 2},
'Matagorda': {'pop': 36702, 'tracts': 10},
'Maverick': {'pop': 54258, 'tracts': 9},
'McCulloch': {'pop': 8283, 'tracts': 3},
'McLennan': {'pop': 234906, 'tracts': 51},
'McMullen': {'pop': 707, 'tracts': 1},
'Medina': {'pop': 46006, 'tracts': 8},
'Menard': {'pop': 2242, 'tracts': 1},
'Midland': {'pop': 136872, 'tracts': 27},
'Milam': {'pop': 24757, 'tracts': 7},
'Mills': {'pop': 4936, 'tracts': 2},
'Mitchell': {'pop': 9403, 'tracts': 2},
'Montague': {'pop': 19719, 'tracts': 6},
'Montgomery': {'pop': 455746, 'tracts': 59},
'Moore': {'pop': 21904, 'tracts': 4},
'Morris': {'pop': 12934, 'tracts': 3},
'Motley': {'pop': 1210, 'tracts': 1},
'Nacogdoches': {'pop': 64524, 'tracts': 13},
'Navarro': {'pop': 47735, 'tracts': 10},
'Newton': {'pop': 14445, 'tracts': 4},
'Nolan': {'pop': 15216, 'tracts': 5},
'Nueces': {'pop': 340223, 'tracts': 81},
'Ochiltree': {'pop': 10223, 'tracts': 3},
'Oldham': {'pop': 2052, 'tracts': 1},
'Orange': {'pop': 81837, 'tracts': 21},
'Palo Pinto': {'pop': 28111, 'tracts': 9},
'Panola': {'pop': 23796, 'tracts': 6},
'Parker': {'pop': 116927, 'tracts': 19},
'Parmer': {'pop': 10269, 'tracts': 2},
'Pecos': {'pop': 15507, 'tracts': 4},
'Polk': {'pop': 45413, 'tracts': 10},
'Potter': {'pop': 121073, 'tracts': 34},
'Presidio': {'pop': 7818, 'tracts': 2},
'Rains': {'pop': 10914, 'tracts': 2},
'Randall': {'pop': 120725, 'tracts': 29},
'Reagan': {'pop': 3367, 'tracts': 1},
'Real': {'pop': 3309, 'tracts': 1},
'Red River': {'pop': 12860, 'tracts': 4},
'Reeves': {'pop': 13783, 'tracts': 5},
'Refugio': {'pop': 7383, 'tracts': 2},
'Roberts': {'pop': 929, 'tracts': 1},
'Robertson': {'pop': 16622, 'tracts': 5},
'Rockwall': {'pop': 78337, 'tracts': 11},
'Runnels': {'pop': 10501, 'tracts': 4},
'Rusk': {'pop': 53330, 'tracts': 13},
'Sabine': {'pop': 10834, 'tracts': 3},
'San Augustine': {'pop': 8865, 'tracts': 3},
'San Jacinto': {'pop': 26384, 'tracts': 4},
'San Patricio': {'pop': 64804, 'tracts': 16},
'San Saba': {'pop': 6131, 'tracts': 2},
'Schleicher': {'pop': 3461, 'tracts': 1},
'Scurry': {'pop': 16921, 'tracts': 4},
'Shackelford': {'pop': 3378, 'tracts': 1},
'Shelby': {'pop': 25448, 'tracts': 6},
'Sherman': {'pop': 3034, 'tracts': 1},
'Smith': {'pop': 209714, 'tracts': 41},
'Somervell': {'pop': 8490, 'tracts': 2},
'Starr': {'pop': 60968, 'tracts': 15},
'Stephens': {'pop': 9630, 'tracts': 3},
'Sterling': {'pop': 1143, 'tracts': 1},
'Stonewall': {'pop': 1490, 'tracts': 1},
'Sutton': {'pop': 4128, 'tracts': 1},
'Swisher': {'pop': 7854, 'tracts': 3},
'Tarrant': {'pop': 1809034, 'tracts': 357},
'Taylor': {'pop': 131506, 'tracts': 38},
'Terrell': {'pop': 984, 'tracts': 1},
'Terry': {'pop': 12651, 'tracts': 3},
'Throckmorton': {'pop': 1641, 'tracts': 1},
'Titus': {'pop': 32334, 'tracts': 8},
'Tom Green': {'pop': 110224, 'tracts': 25},
'Travis': {'pop': 1024266, 'tracts': 218},
'Trinity': {'pop': 14585, 'tracts': 5},
'Tyler': {'pop': 21766, 'tracts': 5},
'Upshur': {'pop': 39309, 'tracts': 7},
'Upton': {'pop': 3355, 'tracts': 2},
'Uvalde': {'pop': 26405, 'tracts': 5},
'Val Verde': {'pop': 48879, 'tracts': 10},
'Van Zandt': {'pop': 52579, 'tracts': 10},
'Victoria': {'pop': 86793, 'tracts': 23},
'Walker': {'pop': 67861, 'tracts': 10},
'Waller': {'pop': 43205, 'tracts': 6},
'Ward': {'pop': 10658, 'tracts': 3},
'Washington': {'pop': 33718, 'tracts': 6},
'Webb': {'pop': 250304, 'tracts': 61},
'Wharton': {'pop': 41280, 'tracts': 11},
'Wheeler': {'pop': 5410, 'tracts': 2},
'Wichita': {'pop': 131500, 'tracts': 37},
'Wilbarger': {'pop': 13535, 'tracts': 4},
'Willacy': {'pop': 22134, 'tracts': 6},
'Williamson': {'pop': 422679, 'tracts': 89},
'Wilson': {'pop': 42918, 'tracts': 11},
'Winkler': {'pop': 7110, 'tracts': 3},
'Wise': {'pop': 59127, 'tracts': 11},
'Wood': {'pop': 41964, 'tracts': 10},
'Yoakum': {'pop': 7879, 'tracts': 2},
'Young': {'pop': 18550, 'tracts': 4},
'Zapata': {'pop': 14018, 'tracts': 3},
'Zavala': {'pop': 11677, 'tracts': 4}},
'UT': {'Beaver': {'pop': 6629, 'tracts': 2},
'Box Elder': {'pop': 49975, 'tracts': 11},
'Cache': {'pop': 112656, 'tracts': 26},
'Carbon': {'pop': 21403, 'tracts': 5},
'Daggett': {'pop': 1059, 'tracts': 1},
'Davis': {'pop': 306479, 'tracts': 54},
'Duchesne': {'pop': 18607, 'tracts': 3},
'Emery': {'pop': 10976, 'tracts': 3},
'Garfield': {'pop': 5172, 'tracts': 2},
'Grand': {'pop': 9225, 'tracts': 2},
'Iron': {'pop': 46163, 'tracts': 8},
'Juab': {'pop': 10246, 'tracts': 2},
'Kane': {'pop': 7125, 'tracts': 2},
'Millard': {'pop': 12503, 'tracts': 3},
'Morgan': {'pop': 9469, 'tracts': 2},
'Piute': {'pop': 1556, 'tracts': 1},
'Rich': {'pop': 2264, 'tracts': 1},
'Salt Lake': {'pop': 1029655, 'tracts': 212},
'San Juan': {'pop': 14746, 'tracts': 4},
'Sanpete': {'pop': 27822, 'tracts': 5},
'Sevier': {'pop': 20802, 'tracts': 5},
'Summit': {'pop': 36324, 'tracts': 13},
'Tooele': {'pop': 58218, 'tracts': 11},
'Uintah': {'pop': 32588, 'tracts': 6},
'Utah': {'pop': 516564, 'tracts': 128},
'Wasatch': {'pop': 23530, 'tracts': 4},
'Washington': {'pop': 138115, 'tracts': 21},
'Wayne': {'pop': 2778, 'tracts': 1},
'Weber': {'pop': 231236, 'tracts': 50}},
'VA': {'Accomack': {'pop': 33164, 'tracts': 11},
'Albemarle': {'pop': 98970, 'tracts': 22},
'Alexandria': {'pop': 139966, 'tracts': 38},
'Alleghany': {'pop': 16250, 'tracts': 6},
'Amelia': {'pop': 12690, 'tracts': 2},
'Amherst': {'pop': 32353, 'tracts': 9},
'Appomattox': {'pop': 14973, 'tracts': 3},
'Arlington': {'pop': 207627, 'tracts': 59},
'Augusta': {'pop': 73750, 'tracts': 13},
'Bath': {'pop': 4731, 'tracts': 1},
'Bedford': {'pop': 68676, 'tracts': 16},
'Bedford City': {'pop': 6222, 'tracts': 1},
'Bland': {'pop': 6824, 'tracts': 2},
'Botetourt': {'pop': 33148, 'tracts': 8},
'Bristol': {'pop': 17835, 'tracts': 4},
'Brunswick': {'pop': 17434, 'tracts': 5},
'Buchanan': {'pop': 24098, 'tracts': 7},
'Buckingham': {'pop': 17146, 'tracts': 4},
'Buena Vista': {'pop': 6650, 'tracts': 1},
'Campbell': {'pop': 54842, 'tracts': 12},
'Caroline': {'pop': 28545, 'tracts': 7},
'Carroll': {'pop': 30042, 'tracts': 7},
'Charles City': {'pop': 7256, 'tracts': 3},
'Charlotte': {'pop': 12586, 'tracts': 3},
'Charlottesville': {'pop': 43475, 'tracts': 12},
'Chesapeake': {'pop': 222209, 'tracts': 41},
'Chesterfield': {'pop': 316236, 'tracts': 71},
'Clarke': {'pop': 14034, 'tracts': 3},
'Colonial Heights': {'pop': 17411, 'tracts': 5},
'Covington': {'pop': 5961, 'tracts': 2},
'Craig': {'pop': 5190, 'tracts': 1},
'Culpeper': {'pop': 46689, 'tracts': 8},
'Cumberland': {'pop': 10052, 'tracts': 2},
'Danville': {'pop': 43055, 'tracts': 16},
'Dickenson': {'pop': 15903, 'tracts': 4},
'Dinwiddie': {'pop': 28001, 'tracts': 7},
'Emporia': {'pop': 5927, 'tracts': 2},
'Essex': {'pop': 11151, 'tracts': 3},
'Fairfax': {'pop': 1081726, 'tracts': 258},
'Fairfax City': {'pop': 22565, 'tracts': 5},
'Falls Church': {'pop': 12332, 'tracts': 3},
'Fauquier': {'pop': 65203, 'tracts': 17},
'Floyd': {'pop': 15279, 'tracts': 3},
'Fluvanna': {'pop': 25691, 'tracts': 4},
'Franklin': {'pop': 56159, 'tracts': 10},
'Franklin City': {'pop': 8582, 'tracts': 2},
'Frederick': {'pop': 78305, 'tracts': 14},
'Fredericksburg': {'pop': 24286, 'tracts': 6},
'Galax': {'pop': 7042, 'tracts': 2},
'Giles': {'pop': 17286, 'tracts': 4},
'Gloucester': {'pop': 36858, 'tracts': 8},
'Goochland': {'pop': 21717, 'tracts': 5},
'Grayson': {'pop': 15533, 'tracts': 5},
'Greene': {'pop': 18403, 'tracts': 3},
'Greensville': {'pop': 12243, 'tracts': 3},
'Halifax': {'pop': 36241, 'tracts': 9},
'Hampton': {'pop': 137436, 'tracts': 34},
'Hanover': {'pop': 99863, 'tracts': 23},
'Harrisonburg': {'pop': 48914, 'tracts': 11},
'Henrico': {'pop': 306935, 'tracts': 64},
'Henry': {'pop': 54151, 'tracts': 14},
'Highland': {'pop': 2321, 'tracts': 1},
'Hopewell': {'pop': 22591, 'tracts': 7},
'Isle of Wight': {'pop': 35270, 'tracts': 8},
'James City': {'pop': 67009, 'tracts': 11},
'King George': {'pop': 23584, 'tracts': 5},
'King William': {'pop': 15935, 'tracts': 4},
'King and Queen': {'pop': 6945, 'tracts': 2},
'Lancaster': {'pop': 11391, 'tracts': 3},
'Lee': {'pop': 25587, 'tracts': 6},
'Lexington': {'pop': 7042, 'tracts': 1},
'Loudoun': {'pop': 312311, 'tracts': 65},
'Louisa': {'pop': 33153, 'tracts': 6},
'Lunenburg': {'pop': 12914, 'tracts': 3},
'Lynchburg': {'pop': 75568, 'tracts': 19},
'Madison': {'pop': 13308, 'tracts': 2},
'Manassas': {'pop': 37821, 'tracts': 7},
'Manassas Park': {'pop': 14273, 'tracts': 2},
'Martinsville': {'pop': 13821, 'tracts': 5},
'Mathews': {'pop': 8978, 'tracts': 2},
'Mecklenburg': {'pop': 32727, 'tracts': 9},
'Middlesex': {'pop': 10959, 'tracts': 4},
'Montgomery': {'pop': 94392, 'tracts': 16},
'Nelson': {'pop': 15020, 'tracts': 3},
'New Kent': {'pop': 18429, 'tracts': 3},
'Newport News': {'pop': 180719, 'tracts': 44},
'Norfolk': {'pop': 242803, 'tracts': 81},
'Northampton': {'pop': 12389, 'tracts': 4},
'Northumberland': {'pop': 12330, 'tracts': 3},
'Norton': {'pop': 3958, 'tracts': 1},
'Nottoway': {'pop': 15853, 'tracts': 4},
'Orange': {'pop': 33481, 'tracts': 5},
'Page': {'pop': 24042, 'tracts': 5},
'Patrick': {'pop': 18490, 'tracts': 4},
'Petersburg': {'pop': 32420, 'tracts': 11},
'Pittsylvania': {'pop': 63506, 'tracts': 16},
'Poquoson': {'pop': 12150, 'tracts': 3},
'Portsmouth': {'pop': 95535, 'tracts': 31},
'Powhatan': {'pop': 28046, 'tracts': 5},
'Prince Edward': {'pop': 23368, 'tracts': 5},
'Prince George': {'pop': 35725, 'tracts': 7},
'Prince William': {'pop': 402002, 'tracts': 83},
'Pulaski': {'pop': 34872, 'tracts': 10},
'Radford': {'pop': 16408, 'tracts': 3},
'Rappahannock': {'pop': 7373, 'tracts': 2},
'Richmond': {'pop': 9254, 'tracts': 2},
'Richmond City': {'pop': 204214, 'tracts': 66},
'Roanoke': {'pop': 92376, 'tracts': 18},
'Roanoke City': {'pop': 97032, 'tracts': 23},
'Rockbridge': {'pop': 22307, 'tracts': 4},
'Rockingham': {'pop': 76314, 'tracts': 19},
'Russell': {'pop': 28897, 'tracts': 7},
'Salem': {'pop': 24802, 'tracts': 5},
'Scott': {'pop': 23177, 'tracts': 6},
'Shenandoah': {'pop': 41993, 'tracts': 9},
'Smyth': {'pop': 32208, 'tracts': 9},
'Southampton': {'pop': 18570, 'tracts': 5},
'Spotsylvania': {'pop': 122397, 'tracts': 30},
'Stafford': {'pop': 128961, 'tracts': 27},
'Staunton': {'pop': 23746, 'tracts': 6},
'Suffolk': {'pop': 84585, 'tracts': 28},
'Surry': {'pop': 7058, 'tracts': 2},
'Sussex': {'pop': 12087, 'tracts': 5},
'Tazewell': {'pop': 45078, 'tracts': 11},
'Virginia Beach': {'pop': 437994, 'tracts': 100},
'Warren': {'pop': 37575, 'tracts': 8},
'Washington': {'pop': 54876, 'tracts': 13},
'Waynesboro': {'pop': 21006, 'tracts': 5},
'Westmoreland': {'pop': 17454, 'tracts': 4},
'Williamsburg': {'pop': 14068, 'tracts': 3},
'Winchester': {'pop': 26203, 'tracts': 5},
'Wise': {'pop': 41452, 'tracts': 11},
'Wythe': {'pop': 29235, 'tracts': 6},
'York': {'pop': 65464, 'tracts': 14}},
'VT': {'Addison': {'pop': 36821, 'tracts': 10},
'Bennington': {'pop': 37125, 'tracts': 12},
'Caledonia': {'pop': 31227, 'tracts': 10},
'Chittenden': {'pop': 156545, 'tracts': 35},
'Essex': {'pop': 6306, 'tracts': 3},
'Franklin': {'pop': 47746, 'tracts': 10},
'Grand Isle': {'pop': 6970, 'tracts': 2},
'Lamoille': {'pop': 24475, 'tracts': 7},
'Orange': {'pop': 28936, 'tracts': 10},
'Orleans': {'pop': 27231, 'tracts': 10},
'Rutland': {'pop': 61642, 'tracts': 20},
'Washington': {'pop': 59534, 'tracts': 19},
'Windham': {'pop': 44513, 'tracts': 18},
'Windsor': {'pop': 56670, 'tracts': 18}},
'WA': {'Adams': {'pop': 18728, 'tracts': 5},
'Asotin': {'pop': 21623, 'tracts': 6},
'Benton': {'pop': 175177, 'tracts': 37},
'Chelan': {'pop': 72453, 'tracts': 14},
'Clallam': {'pop': 71404, 'tracts': 22},
'Clark': {'pop': 425363, 'tracts': 104},
'Columbia': {'pop': 4078, 'tracts': 1},
'Cowlitz': {'pop': 102410, 'tracts': 24},
'Douglas': {'pop': 38431, 'tracts': 8},
'Ferry': {'pop': 7551, 'tracts': 3},
'Franklin': {'pop': 78163, 'tracts': 13},
'Garfield': {'pop': 2266, 'tracts': 1},
'Grant': {'pop': 89120, 'tracts': 16},
'Grays Harbor': {'pop': 72797, 'tracts': 17},
'Island': {'pop': 78506, 'tracts': 22},
'Jefferson': {'pop': 29872, 'tracts': 7},
'King': {'pop': 1931249, 'tracts': 397},
'Kitsap': {'pop': 251133, 'tracts': 55},
'Kittitas': {'pop': 40915, 'tracts': 8},
'Klickitat': {'pop': 20318, 'tracts': 3},
'Lewis': {'pop': 75455, 'tracts': 20},
'Lincoln': {'pop': 10570, 'tracts': 4},
'Mason': {'pop': 60699, 'tracts': 14},
'Okanogan': {'pop': 41120, 'tracts': 10},
'Pacific': {'pop': 20920, 'tracts': 8},
'Pend Oreille': {'pop': 13001, 'tracts': 5},
'Pierce': {'pop': 795225, 'tracts': 172},
'San Juan': {'pop': 15769, 'tracts': 5},
'Skagit': {'pop': 116901, 'tracts': 30},
'Skamania': {'pop': 11066, 'tracts': 5},
'Snohomish': {'pop': 713335, 'tracts': 151},
'Spokane': {'pop': 471221, 'tracts': 105},
'Stevens': {'pop': 43531, 'tracts': 12},
'Thurston': {'pop': 252264, 'tracts': 49},
'Wahkiakum': {'pop': 3978, 'tracts': 1},
'Walla Walla': {'pop': 58781, 'tracts': 12},
'Whatcom': {'pop': 201140, 'tracts': 34},
'Whitman': {'pop': 44776, 'tracts': 10},
'Yakima': {'pop': 243231, 'tracts': 45}},
'WI': {'Adams': {'pop': 20875, 'tracts': 7},
'Ashland': {'pop': 16157, 'tracts': 7},
'Barron': {'pop': 45870, 'tracts': 10},
'Bayfield': {'pop': 15014, 'tracts': 5},
'Brown': {'pop': 248007, 'tracts': 54},
'Buffalo': {'pop': 13587, 'tracts': 5},
'Burnett': {'pop': 15457, 'tracts': 6},
'Calumet': {'pop': 48971, 'tracts': 11},
'Chippewa': {'pop': 62415, 'tracts': 11},
'Clark': {'pop': 34690, 'tracts': 8},
'Columbia': {'pop': 56833, 'tracts': 12},
'Crawford': {'pop': 16644, 'tracts': 6},
'Dane': {'pop': 488073, 'tracts': 107},
'Dodge': {'pop': 88759, 'tracts': 20},
'Door': {'pop': 27785, 'tracts': 9},
'Douglas': {'pop': 44159, 'tracts': 12},
'Dunn': {'pop': 43857, 'tracts': 8},
'Eau Claire': {'pop': 98736, 'tracts': 20},
'Florence': {'pop': 4423, 'tracts': 2},
'Fond du Lac': {'pop': 101633, 'tracts': 20},
'Forest': {'pop': 9304, 'tracts': 4},
'Grant': {'pop': 51208, 'tracts': 12},
'Green': {'pop': 36842, 'tracts': 8},
'Green Lake': {'pop': 19051, 'tracts': 6},
'Iowa': {'pop': 23687, 'tracts': 6},
'Iron': {'pop': 5916, 'tracts': 3},
'Jackson': {'pop': 20449, 'tracts': 5},
'Jefferson': {'pop': 83686, 'tracts': 20},
'Juneau': {'pop': 26664, 'tracts': 7},
'Kenosha': {'pop': 166426, 'tracts': 35},
'Kewaunee': {'pop': 20574, 'tracts': 4},
'La Crosse': {'pop': 114638, 'tracts': 25},
'Lafayette': {'pop': 16836, 'tracts': 5},
'Langlade': {'pop': 19977, 'tracts': 6},
'Lincoln': {'pop': 28743, 'tracts': 10},
'Manitowoc': {'pop': 81442, 'tracts': 19},
'Marathon': {'pop': 134063, 'tracts': 27},
'Marinette': {'pop': 41749, 'tracts': 12},
'Marquette': {'pop': 15404, 'tracts': 5},
'Menominee': {'pop': 4232, 'tracts': 2},
'Milwaukee': {'pop': 947735, 'tracts': 297},
'Monroe': {'pop': 44673, 'tracts': 9},
'Oconto': {'pop': 37660, 'tracts': 10},
'Oneida': {'pop': 35998, 'tracts': 14},
'Outagamie': {'pop': 176695, 'tracts': 40},
'Ozaukee': {'pop': 86395, 'tracts': 18},
'Pepin': {'pop': 7469, 'tracts': 2},
'Pierce': {'pop': 41019, 'tracts': 8},
'Polk': {'pop': 44205, 'tracts': 10},
'Portage': {'pop': 70019, 'tracts': 14},
'Price': {'pop': 14159, 'tracts': 6},
'Racine': {'pop': 195408, 'tracts': 44},
'Richland': {'pop': 18021, 'tracts': 5},
'Rock': {'pop': 160331, 'tracts': 38},
'Rusk': {'pop': 14755, 'tracts': 5},
'Sauk': {'pop': 61976, 'tracts': 13},
'Sawyer': {'pop': 16557, 'tracts': 6},
'Shawano': {'pop': 41949, 'tracts': 11},
'Sheboygan': {'pop': 115507, 'tracts': 26},
'St. Croix': {'pop': 84345, 'tracts': 14},
'Taylor': {'pop': 20689, 'tracts': 6},
'Trempealeau': {'pop': 28816, 'tracts': 8},
'Vernon': {'pop': 29773, 'tracts': 7},
'Vilas': {'pop': 21430, 'tracts': 5},
'Walworth': {'pop': 102228, 'tracts': 22},
'Washburn': {'pop': 15911, 'tracts': 5},
'Washington': {'pop': 131887, 'tracts': 28},
'Waukesha': {'pop': 389891, 'tracts': 86},
'Waupaca': {'pop': 52410, 'tracts': 12},
'Waushara': {'pop': 24496, 'tracts': 7},
'Winnebago': {'pop': 166994, 'tracts': 41},
'Wood': {'pop': 74749, 'tracts': 17}},
'WV': {'Barbour': {'pop': 16589, 'tracts': 4},
'Berkeley': {'pop': 104169, 'tracts': 14},
'Boone': {'pop': 24629, 'tracts': 8},
'Braxton': {'pop': 14523, 'tracts': 3},
'Brooke': {'pop': 24069, 'tracts': 6},
'Cabell': {'pop': 96319, 'tracts': 29},
'Calhoun': {'pop': 7627, 'tracts': 2},
'Clay': {'pop': 9386, 'tracts': 3},
'Doddridge': {'pop': 8202, 'tracts': 2},
'Fayette': {'pop': 46039, 'tracts': 12},
'Gilmer': {'pop': 8693, 'tracts': 2},
'Grant': {'pop': 11937, 'tracts': 3},
'Greenbrier': {'pop': 35480, 'tracts': 7},
'Hampshire': {'pop': 23964, 'tracts': 5},
'Hancock': {'pop': 30676, 'tracts': 8},
'Hardy': {'pop': 14025, 'tracts': 3},
'Harrison': {'pop': 69099, 'tracts': 22},
'Jackson': {'pop': 29211, 'tracts': 6},
'Jefferson': {'pop': 53498, 'tracts': 15},
'Kanawha': {'pop': 193063, 'tracts': 53},
'Lewis': {'pop': 16372, 'tracts': 5},
'Lincoln': {'pop': 21720, 'tracts': 5},
'Logan': {'pop': 36743, 'tracts': 9},
'Marion': {'pop': 56418, 'tracts': 18},
'Marshall': {'pop': 33107, 'tracts': 9},
'Mason': {'pop': 27324, 'tracts': 6},
'McDowell': {'pop': 22113, 'tracts': 8},
'Mercer': {'pop': 62264, 'tracts': 16},
'Mineral': {'pop': 28212, 'tracts': 7},
'Mingo': {'pop': 26839, 'tracts': 7},
'Monongalia': {'pop': 96189, 'tracts': 24},
'Monroe': {'pop': 13502, 'tracts': 3},
'Morgan': {'pop': 17541, 'tracts': 4},
'Nicholas': {'pop': 26233, 'tracts': 7},
'Ohio': {'pop': 44443, 'tracts': 18},
'Pendleton': {'pop': 7695, 'tracts': 3},
'Pleasants': {'pop': 7605, 'tracts': 2},
'Pocahontas': {'pop': 8719, 'tracts': 4},
'Preston': {'pop': 33520, 'tracts': 8},
'Putnam': {'pop': 55486, 'tracts': 10},
'Raleigh': {'pop': 78859, 'tracts': 17},
'Randolph': {'pop': 29405, 'tracts': 7},
'Ritchie': {'pop': 10449, 'tracts': 3},
'Roane': {'pop': 14926, 'tracts': 4},
'Summers': {'pop': 13927, 'tracts': 4},
'Taylor': {'pop': 16895, 'tracts': 4},
'Tucker': {'pop': 7141, 'tracts': 3},
'Tyler': {'pop': 9208, 'tracts': 3},
'Upshur': {'pop': 24254, 'tracts': 6},
'Wayne': {'pop': 42481, 'tracts': 11},
'Webster': {'pop': 9154, 'tracts': 3},
'Wetzel': {'pop': 16583, 'tracts': 5},
'Wirt': {'pop': 5717, 'tracts': 2},
'Wood': {'pop': 86956, 'tracts': 26},
'Wyoming': {'pop': 23796, 'tracts': 6}},
'WY': {'Albany': {'pop': 36299, 'tracts': 10},
'Big Horn': {'pop': 11668, 'tracts': 3},
'Campbell': {'pop': 46133, 'tracts': 7},
'Carbon': {'pop': 15885, 'tracts': 5},
'Converse': {'pop': 13833, 'tracts': 4},
'Crook': {'pop': 7083, 'tracts': 2},
'Fremont': {'pop': 40123, 'tracts': 10},
'Goshen': {'pop': 13249, 'tracts': 4},
'Hot Springs': {'pop': 4812, 'tracts': 2},
'Johnson': {'pop': 8569, 'tracts': 2},
'Laramie': {'pop': 91738, 'tracts': 21},
'Lincoln': {'pop': 18106, 'tracts': 4},
'Natrona': {'pop': 75450, 'tracts': 18},
'Niobrara': {'pop': 2484, 'tracts': 1},
'Park': {'pop': 28205, 'tracts': 5},
'Platte': {'pop': 8667, 'tracts': 2},
'Sheridan': {'pop': 29116, 'tracts': 6},
'Sublette': {'pop': 10247, 'tracts': 2},
'Sweetwater': {'pop': 43806, 'tracts': 12},
'Teton': {'pop': 21294, 'tracts': 4},
'Uinta': {'pop': 21118, 'tracts': 3},
'Washakie': {'pop': 8533, 'tracts': 3},
'Weston': {'pop': 7208, 'tracts': 2}}}
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.