content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/python
# coding: utf8
from __future__ import print_function
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from enum import Enum
from collections import MutableSequence
from collections import namedtuple
from collections import OrderedDict
from itertools import chain
import bisect
import logging
import struct
import array
import hashlib
import json
from .helper import ts_daily_left, ts_daily_right
from .helper import ts_hourly_left, ts_hourly_right
from .helper import ts_weekly_left, ts_weekly_right
from .helper import ts_monthly_left, ts_monthly_right
Aggregation = namedtuple('Aggregation', ['min', 'max', 'sum', 'count'])
class BucketType(Enum):
dynamic = 1
hourly = 2
daily = 3
weekly = 4
monthly = 5
resultset = 6
class ItemType(Enum):
raw_float = 1
raw_int = 2
tuple_float_2 = 3
tuple_float_3 = 4
tuple_float_4 = 5
basic_aggregation = 6
class TupleArray(MutableSequence):
def __init__(self, data_type="f", tuple_size=2):
if tuple_size < 2 or tuple_size > 20:
raise ValueError("invalid tuple size (2-20)")
super(TupleArray, self).__init__()
self.data_type = data_type
self.tuple_size = tuple_size
self._arrays = [array.array(data_type) for i in range(tuple_size)]
def __len__(self):
return len(self._arrays[0])
def __getitem__(self, ii):
return tuple(item[ii] for item in self._arrays)
def __delitem__(self, ii):
for a in self._arrays:
del a[ii]
def __setitem__(self, ii, val):
if len(val) != len(self._arrays):
raise ValueError("tuple size incorrect")
for i, v in enumerate(val):
self._arrays[i][ii] = v
return tuple(item[ii] for item in self._arrays)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "<TupleArray {} x {}>".format(self.data_type, self.tuple_size)
def insert(self, ii, val):
if len(val) != len(self._arrays):
raise ValueError("tuple size incorrect")
for i, v in enumerate(val):
self._arrays[i].insert(ii, v)
def append(self, val):
if len(val) != len(self._arrays):
raise ValueError("tuple size incorrect")
for i, v in enumerate(val):
self._arrays[i].append(v)
def tostring(self):
return b"".join([x.tostring() for x in self._arrays])
def fromstring(self, string):
s = len(string) / len(self._arrays)
for i, a in enumerate(self._arrays):
f = int(i * s)
t = int(i * s + s)
a.fromstring(string[f:t])
class Bucket(object):
def __init__(self, parent, key, range_key, values=None):
self.parent = parent
self._dirty = False
self._existing = False
self._range_min = 0
self._range_max = 0
self.set_range_key(range_key)
# Create Data Structures
self._timestamps = array.array("I")
if self.item_type == ItemType.raw_float:
self._values = array.array("f")
elif self.item_type == ItemType.raw_int:
self._values = array.array("I")
elif self.item_type == ItemType.tuple_float_2:
self._values = TupleArray("f", 2)
elif self.item_type == ItemType.tuple_float_3:
self._values = TupleArray("f", 3)
elif self.item_type == ItemType.tuple_float_4:
self._values = TupleArray("f", 4)
else:
raise NotImplementedError("invalid item type")
if values is not None:
self.insert(values)
@property
def item_type(self):
return self.parent.item_type
@property
def bucket_type(self):
return self.parent.bucket_type
@property
def key(self):
return self.parent.key
@property
def existing(self):
return self._existing
@property
def dirty(self):
return self._dirty
def reset_dirty(self):
self._dirty = False
@property
def range_key(self):
return self._range_min
def set_range_key(self, range_key):
if self.bucket_type == BucketType.hourly:
l = ts_hourly_left(range_key)
r = ts_hourly_right(range_key)
elif self.bucket_type == BucketType.daily:
l = ts_daily_left(range_key)
r = ts_daily_right(range_key)
elif self.bucket_type == BucketType.weekly:
l = ts_weekly_left(range_key)
r = ts_weekly_right(range_key)
elif self.bucket_type == BucketType.monthly:
l = ts_monthly_left(range_key)
r = ts_monthly_right(range_key)
else:
raise NotImplementedError("invalid bucket type")
if l != range_key:
raise ValueError("invalid range key: %s" % range_key)
self._range_min = l
self._range_max = r
@property
def range_min(self):
return self._range_min
@property
def range_max(self):
return self._range_max
def __len__(self):
return len(self._timestamps)
def __bool__(self): # Python 3
if len(self) < 1:
return False
if len(self._timestamps) != len(self._values):
return False
# Check if sorted
it = iter(self._timestamps)
it.__next__()
return all(b >= a for a, b in zip(self._timestamps, it))
def __nonzero__(self): # Python 2
if len(self) < 1:
return False
if len(self._timestamps) != len(self._values):
return False
# Check if sorted
it = iter(self._timestamps)
it.next()
return all(b >= a for a, b in zip(self._timestamps, it))
def to_hash(self):
s = "{}.{}.{}.{}.{}.{}.{}.{}".format(self.key, self.item_type,
self.bucket_type, len(self),
self.ts_min, self.ts_max,
self.existing, self.dirty)
return hashlib.sha1(s).hexdigest()
def __eq__(self, other):
if not isinstance(other, Bucket):
return False
# Is Hashing a Performance Problem ?
# h1 = self.to_hash()
# h2 = other.to_hash()
# return h1 == h2
# This would compare the objects without hash
if self.key != other.key:
return False
if self._dirty != other._dirty:
return False
if self.item_type != other.item_type:
return False
if self.bucket_type != other.bucket_type:
return False
if len(self._timestamps) != len(other._timestamps):
return False
if len(self._timestamps) > 0:
if self._timestamps[0] != other._timestamps[0]:
return False
if self._timestamps[-1] != other._timestamps[-1]:
return False
return True
def __ne__(self, other):
return not self == other # NOT return not self.__eq__(other)
def __repr__(self):
l = len(self._timestamps)
if l > 0:
m = self._timestamps[0]
else:
m = -1
return "<{} series({}), min_ts: {}, items: {}, buckets: {}>".format(
self.key, l, m, self.item_type, self.bucket_type)
@property
def ts_max(self):
if len(self._timestamps) > 0:
return self._timestamps[-1]
return -1
@property
def ts_min(self):
if len(self._timestamps) > 0:
return self._timestamps[0]
return -1
def _at(self, i):
return (self._timestamps[i], self._values[i])
def __getitem__(self, key):
return self._at(key)
def to_string(self):
header = (struct.pack("H", int(self.item_type.value)) +
struct.pack("H", int(self.bucket_type.value)))
length = struct.pack("I", len(self))
return (header + length + self._timestamps.tostring() +
self._values.tostring())
@classmethod
def from_string(cls, key, string):
item_type = ItemType(int(struct.unpack("H", string[0:2])[0]))
bucket_type = BucketType(int(struct.unpack("H", string[2:4])[0]))
item_length = int(struct.unpack("I", string[4:8])[0])
split = 8 + 4 * item_length
ts, v = string[8:split], string[split:]
i = Bucket(key, item_type=item_type, bucket_type=bucket_type)
i._timestamps.fromstring(ts)
i._values.fromstring(v)
assert(i)
return i
def insert_point(self, timestamp, value, overwrite=False):
timestamp = int(timestamp)
idx = bisect.bisect_left(self._timestamps, timestamp)
# Append
if idx == len(self._timestamps):
self._timestamps.append(timestamp)
self._values.append(value)
self._dirty = True
return 1
# Already Existing
if self._timestamps[idx] == timestamp:
# Replace
logging.debug("duplicate insert")
if overwrite:
self._values[idx] = value
self._dirty = True
return 1
return 0
# Insert
self._timestamps.insert(idx, timestamp)
self._values.insert(idx, value)
self._dirty = True
return 1
def insert(self, series):
counter = 0
for timestamp, value in series:
counter += self.insert_point(timestamp, value)
return counter
class BucketCollection(OrderedDict):
def __init__(self, parent, *args, **kwargs):
self.parent = parent
super(BucketCollection, self).__init__(*args, **kwargs)
def __missing__(self, key):
k = self.parent.key
bucket = Bucket(self.parent, k, key)
self[key] = bucket
return self[key]
class TimeSeries(object):
DEFAULT_ITEMTYPE = ItemType.raw_float
DEFAULT_BUCKETTYPE = BucketType.daily
def __init__(self, key, values=None):
# Determine Types
# Maybe get this from key
self.item_type = self.DEFAULT_ITEMTYPE
self.bucket_type = self.DEFAULT_BUCKETTYPE
self.key = str(key).lower()
self.buckets = BucketCollection(self)
if values is not None:
self.insert(values)
def get_range_left(self, timestamp):
if self.bucket_type == BucketType.hourly:
return ts_hourly_left(timestamp)
elif self.bucket_type == BucketType.daily:
return ts_daily_left(timestamp)
elif self.bucket_type == BucketType.weekly:
return ts_weekly_left(timestamp)
elif self.bucket_type == BucketType.monthly:
return ts_monthly_left(timestamp)
else:
raise NotImplementedError("invalid bucket type")
def get_range_right(self, timestamp):
if self.bucket_type == BucketType.hourly:
return ts_hourly_right(timestamp)
elif self.bucket_type == BucketType.daily:
return ts_daily_right(timestamp)
elif self.bucket_type == BucketType.weekly:
return ts_weekly_right(timestamp)
elif self.bucket_type == BucketType.monthly:
return ts_monthly_right(timestamp)
else:
raise NotImplementedError("invalid bucket type")
def insert(self, series):
last_range_min = -1
last_range_max = -1
for timestamp, value in series:
if last_range_min <= timestamp <= last_range_max:
# just insert
self.buckets[last_range_min].insert_point(timestamp, value)
else:
l = self.get_range_left(timestamp)
r = self.get_range_right(timestamp)
if l < last_range_min or r < last_range_max:
raise ValueError("unsorted range key")
last_range_min = l
last_range_max = r
self.buckets[last_range_min].insert_point(timestamp, value)
@property
def timestamps(self):
bucket_timestamps = [x._timestamps for x in self.buckets.itervalues()]
return chain(bucket_timestamps)
@property
def values(self):
bucket_values = [x._values for x in self.buckets.itervalues()]
return chain(bucket_values)
def __len__(self):
return sum([len(x) for x in self.buckets.itervalues()])
def _at(self, i):
offset = 0
idx = 0
buckets = list(self.buckets.items())
current_bucket = buckets[idx]
while i >= len(current_bucket) + offset:
offset += len(current_bucket)
idx += 1
current_bucket = buckets[idx]
return current_bucket[i-offset]
def __getitem__(self, key):
return self._at(key)
class ResultSet(TimeSeries):
def __init__(self, key, items):
super(ResultSet, self).__init__(key)
self.bucket_type = BucketType.resultset
for i in items:
if i.key != key:
raise ValueError("Item has wrong key")
self._timestamps += i._timestamps
self._values += i._values
def _trim(self, ts_min, ts_max):
low = bisect.bisect_left(self._timestamps, ts_min)
high = bisect.bisect_right(self._timestamps, ts_max)
self._timestamps = self._timestamps[low:high]
self._values = self._values[low:high]
def all(self):
"""Return an iterater to get all ts value pairs.
"""
return zip(self._timestamps, self._values)
def daily(self):
"""Generator to access daily data.
This will return an inner generator.
"""
i = 0
while i < len(self._timestamps):
j = 0
lower_bound = ts_daily_left(self._timestamps[i])
upper_bound = ts_daily_right(self._timestamps[i])
while (i + j < len(self._timestamps) and
lower_bound <= self._timestamps[i + j] <= upper_bound):
j += 1
yield ((self._timestamps[x], self._values[x])
for x in range(i, i + j))
i += j
def hourly(self):
"""Generator to access hourly data.
This will return an inner generator.
"""
i = 0
while i < len(self._timestamps):
j = 0
lower_bound = ts_hourly_left(self._timestamps[i])
upper_bound = ts_hourly_right(self._timestamps[i])
while (i + j < len(self._timestamps) and
lower_bound <= self._timestamps[i + j] <= upper_bound):
j += 1
yield ((self._timestamps[x], self._values[x])
for x in range(i, i + j))
i += j
def aggregation(self, group="hourly", function="mean"):
"""Aggregation Generator.
"""
if group == "hourly":
it = self.hourly
left = ts_hourly_left
elif group == "daily":
it = self.daily
left = ts_daily_left
else:
raise ValueError("Invalid aggregation group")
if function == "sum":
func = sum
elif function == "count":
func = len
elif function == "min":
func = min
elif function == "max":
func = max
elif function == "amp":
def amp(x):
return max(x) - min(x)
func = amp
elif function == "mean":
def mean(x):
return sum(x) / len(x)
func = mean
else:
raise ValueError("Invalid aggregation group")
for g in it():
t = list(g)
ts = left(t[0][0])
value = func([x[1] for x in t])
yield (ts, value) | python |
'''
Code Challenge: Solve the Eulerian Cycle Problem.
Input: The adjacency list of an Eulerian directed graph.
Output: An Eulerian cycle in this graph.
'''
import random
import copy
with open('test1.txt','r') as f:
#with open('dataset_203_2.txt','r') as f:
adjacency_list = dict()
eulerian_edge_len = 0
muti_node = []
for i in f:
i = i.split('->')
left = int(i[0].strip())
right = i[1].strip()
if ',' in right:
muti_node.append(left)
right = right.split(',')
right = [int(x) for x in right]
eulerian_edge_len = eulerian_edge_len + len(right)
else:
eulerian_edge_len = eulerian_edge_len + len(right)
right = int(right)
adjacency_list[left] = right
def cycle_form(adjacency_list, start_point):
adjacency_list_temp = copy.deepcopy(adjacency_list)
cycle_nodes = [start_point]
start_node = start_point
for i in range(eulerian_edge_len):
next_node = adjacency_list_temp[start_point]
if type(next_node) == int:
cycle_nodes.append(next_node)
start_point = next_node
else:
next_node = random.choice(next_node)
adjacency_list_temp[start_point].remove(next_node)
cycle_nodes.append(next_node)
start_point = next_node
if start_point in muti_node:
if len(adjacency_list_temp[start_point]) == 0:
break
if cycle_nodes[-1] == cycle_nodes[0]:
if type(adjacency_list_temp[cycle_nodes[0]]) == int:
break
if len(cycle_nodes) < (eulerian_edge_len + 1):
remain_muti_node = []
for i in muti_node:
if i in cycle_nodes:
if len(adjacency_list_temp[i]) > 0:
remain_muti_node.append(i)
new_start = random.choice(remain_muti_node)
else:
new_start = None
return [cycle_nodes, new_start]
def eulerian_cycle(adjacency_list):
start_point = random.choice(list(adjacency_list.keys()))
cycle_result = cycle_form(adjacency_list, start_point)
cycle = cycle_result[0]
while len(cycle) < (eulerian_edge_len + 1):
new_start = cycle_result[1]
cycle_new = cycle_form(adjacency_list, new_start)
cycle = cycle_new[0]
return cycle
print(eulerian_cycle(adjacency_list))
| python |
"""Comic Rereading Discord Bot"""
from .rereadbot import *
async def setup(bot):
"""Setup the DoA Cogs"""
bot.add_cog(DoaRereadCog(bot, envfile="./.env"))
| python |
from datetime import datetime, timezone
import requests
from schemas import Contest
from spider.utils import update_platform
def main():
headers = {"x-requested-with": "XMLHttpRequest"}
resp = requests.get("https://csacademy.com/contests/", headers=headers)
json_data = resp.json()
data = []
tz = timezone.utc
for item in json_data["state"]["Contest"]:
if item.get("baseContestId"):
continue
contest_id = item["id"]
name = item["longName"]
link = "https://csacademy.com/contest/" + item["name"]
if isinstance(item.get("startTime", None), float):
start_time = datetime.fromtimestamp(item["startTime"], tz=tz)
else:
continue
if isinstance(item.get("endTime", None), float):
end_time = datetime.fromtimestamp(item["endTime"], tz=tz)
else:
continue
data.append(
Contest(
contest_id=contest_id,
name=name,
link=link,
start_time=start_time,
end_time=end_time,
)
)
update_platform("CSAcademy", data)
if __name__ == "__main__":
main()
| python |
#
# This source code is licensed under the Apache 2 license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import cPickle as pickle
import numpy as np
import h5py
import random
import pandas as pd
from nltk.tokenize import TweetTokenizer
word_tokenize = TweetTokenizer().tokenize
import re
# IMPORTANT: Make sure the parameters below match the specification of the generated
# summaries (i.e. the params['summaries_filename'] variable) in terms of the state and
# and the dataset (i.e. params['dataset_location']) that will be loaded.
params = {
'state': 'test',
# 'state': 'validate',
'dataset_location': '../Datasets/ar/with_property_placeholders/',
# 'summaries_filename': './checkpoints/eo/with_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Testing.h5'
# 'summaries_filename': './checkpoints/eo/without_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Validation.h5'
'summaries_filename': './checkpoints/ar/with_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Testing.h5'
# 'summaries_filename': './checkpoints/ar/without_property_placeholders/surf_form_tuples.model.t7.batch_size_85.beam_size_20.summaries_Testing.h5'
}
labels_file_location = '../Datasets/ar/Labels/labels_dict.p'
# We are only be displaying the most probable summary.
beamidx = 0
# The location that the output .csv will be stored.
summaries_dump_location = params['summaries_filename'].replace('h5', 'p')
# IMPORTANT: Leave the batch size unchanged
# It's the one with which we trained the models, and it should be the same
# with the one of the loaded pre-trained model that was used to generate the summaries
# (i.e. with beam-sample.lua). Change only if you train your own models using a
# different batch size.
batch_size = int(re.findall(r'(?<=batch_size_)(.*)(?=.beam_size)', params['summaries_filename'])[0])
beam_size = int(re.findall(r'(?<=beam_size_)(.*)(?=.summaries)', params['summaries_filename'])[0])
print('Parameters')
for key in params:
print('%s: %s' % (key, params[key]))
# Loading relevant dataset files.
summaries = h5py.File(params['summaries_filename'], 'r')
with open(params['dataset_location'] + 'summaries_dictionary.json', 'r') as f:
summaries_dictionary = json.load(f, 'utf-8')
id2word = summaries_dictionary['id2word']
id2word = {int(key): id2word[key] for key in id2word}
word2id = summaries_dictionary['word2id']
with open(params['dataset_location'] + 'triples_dictionary.json', 'r') as f:
triples_dictionary = json.load(f, 'utf-8')
max_num_triples = triples_dictionary['max_num_triples']
id2item = triples_dictionary['id2item']
id2item = {int(key): id2item[key] for key in id2item}
item2id = triples_dictionary['item2id']
# Loading supporting inverse dictionaries for surface forms and instance types.
with open(params['dataset_location'] + 'inv_surf_forms_dictionary.json', 'r') as f:
inv_surf_forms_tokens = json.load(f, encoding='utf-8')
with open(params['dataset_location'] + 'surf_forms_counts.p', 'rb') as f:
surf_forms_counts = pickle.load(f)
with open(params['dataset_location'] + 'inv_instance_types_with_predicates.json', 'r') as f:
inv_instancetypes_with_pred_dict = json.load(f, encoding='utf-8')
with open(params['dataset_location'] + 'splitDataset_with_targets.p', 'rb') as f:
splitDataset = pickle.load(f)
# Loading supporting labels_en dataset.
with open(labels_file_location, 'rb') as f:
labels = pickle.load(f)
print('All relevant dataset files from: %s have been successfully loaded.' % params['dataset_location'])
# Example of the structure of the supporting dictionaries:
# surf_form_counts[u'http://www.wikidata.org/entity/Q46611']: {u'Apollo-Programo': 10, u'Projekto Apollo': 6, u'projekto Apollo': 2}
# inv_surf_forms_tokens[u'#surFormToken71849']: [u'http://www.wikidata.org/entity/Q832222', u'Caprivi-streko']
# inv_instancetypes_with_pred_dict[u'#instanceTypeWithPredicate11']: u'http://www.wikidata.org/prop/direct/P138'
most_frequent_surf_form = {}
for entity in surf_forms_counts:
most_frequent_surf_form[entity] = sorted(surf_forms_counts[entity], key=lambda k: surf_forms_counts[entity][k], reverse=True)[0]
def tokenizeNumbers(inp_string):
tokens = word_tokenize(inp_string)
for j in range(0, len(tokens)):
try:
tempNumber = float(tokens[j].replace(',', ''))
if tempNumber // 1000 >= 1 and tempNumber // 1000 < 3:
tokens[j] = '<year> '
else:
tokens[j] = '0 '
except ValueError:
pass
# return detokenize(tokens, return_str=True) # detokenize has an issue with the non-latin characters.
return ' '.join(tokens)
def match_predicate_to_entity(token, triples, expressed_triples):
matched_entities = []
for tr in range(0, len(triples)):
if tr not in expressed_triples:
tempPredicate = triples[tr].split()[1]
if tempPredicate == token:
tempEntity = triples[tr].split()[-1]
if tempEntity == "<item>":
tempEntity == triples[tr].split()[0]
if tempEntity not in matched_entities:
matched_entities.append(tempEntity.decode('utf-8'))
if len(matched_entities) == 0:
token = '<resource>'
else:
random_selection = random.choice(matched_entities)
while random_selection not in labels and len(matched_entities) > 1:
matched_entities.remove(random_selection)
random_selection = random.choice(matched_entities)
if random_selection in labels:
if 'Datasets/ar/' in labels_file_location:
token = labels[random_selection].decode('unicode-escape')
else:
token = labels[random_selection]
expressed_triples.append(random_selection)
else:
token = '<resource>'
return token
def token_to_word(token, main_entity, triples, expressed_triples):
global summaries_type
if 'without_property_placeholders' in params['summaries_filename']:
assert ('#instanceTypeWithPredicate' not in token)
main_entity = main_entity
if "#surFormToken" in token:
word = inv_surf_forms_tokens[token[1:]][1] if "##surFormToken" in token else inv_surf_forms_tokens[token][1]
elif "#instanceTypeWithPredicate" in token:
word = match_predicate_to_entity(inv_instancetypes_with_pred_dict[token], triples, expressed_triples)
elif "#instanceType" in token:
word = inv_instancetypes_dict[token]
elif token == "<item>":
# The returned variable word is of type: unicode.
word = tokenizeNumbers(most_frequent_surf_form[main_entity])
else:
word = token
return word
output = {'Main-Item': [],
'index': [],
'number_original_triples': [],
'original_triples': [],
'number_input_triples': [],
'final_triples_with_types_reduced': [],
'final_triples_with_types': [],
'Target': [],
'Generated-Summary': []}
for batchidx in range(0, len(summaries['triples'])):
print('Post-processing summaries from %d. Batch...' % (batchidx + 1))
for instance in range(0, batch_size):
# Pay attention to the Python division at the np.round() function -- can seriously mess things up!
# More info at: https://stackoverflow.com/questions/28617841/rounding-to-nearest-int-with-numpy-rint-not-consistent-for-5
# We are using the built-in version of round which seems to be doing the trick for now.
splitDatasetIndex = int(round(instance * len(splitDataset[params['state']]['item']) / float(batch_size)) + batchidx)
mainItem = splitDataset[params['state']]['item'][splitDatasetIndex].decode('utf-8')
final_triples_with_types = []
for tr in range(0, len(splitDataset[params['state']]['final_triples_with_types'][splitDatasetIndex])):
tempTriple = splitDataset[params['state']]['final_triples_with_types'][splitDatasetIndex][tr]
if type(tempTriple) is not unicode:
tempTriple = tempTriple.decode('utf-8')
final_triples_with_types.append(tempTriple.replace('<item>', mainItem))
final_triples_with_types_reduced = []
for tr in range(0, len(splitDataset[params['state']]['final_triples_with_types_reduced'][splitDatasetIndex])):
# eq_used_for_training_triple: the triple as it was used by the neural network
# during training, validation and testing.
eq_used_for_training_triple = ' '.join([id2item[summaries['triples'][batchidx][tr][instance][j]] for j in range(0, 3)])
assert(splitDataset[params['state']]['final_triples_with_types_reduced'][splitDatasetIndex][tr] == eq_used_for_training_triple)
if eq_used_for_training_triple is not unicode:
eq_used_for_training_triple = eq_used_for_training_triple.decode('utf-8')
final_triples_with_types_reduced.append(eq_used_for_training_triple.replace('<item>', mainItem))
original_triples = []
for tr in range(0, len(splitDataset[params['state']]['triples'][splitDatasetIndex])):
tempTriple = splitDataset[params['state']]['triples'][splitDatasetIndex][tr]
if type(tempTriple) is not unicode:
tempTriple = tempTriple.decode('utf-8')
original_triples.append(tempTriple.replace('<item>', mainItem))
assert(len(final_triples_with_types) >= len(final_triples_with_types_reduced))
assert(len(final_triples_with_types) == len(original_triples))
expressed_triples = []
# We read from the tail of the argsort to find the elements
# with the highest probability.
selected_summary_index = np.argsort(summaries['probabilities'][:, batchidx * batch_size + instance])[::-1][beamidx]
summary = ''
i = 0
while summaries['summaries'][selected_summary_index][batchidx * batch_size + instance][i] != word2id['<end>']:
summary += ' ' + token_to_word(id2word[summaries['summaries'][selected_summary_index][batchidx * batch_size + instance][i]],
mainItem,
splitDataset[params['state']]['triples'][splitDatasetIndex],
expressed_triples)
if i == len(summaries['summaries'][selected_summary_index][batchidx * batch_size + instance]) - 1:
break
else:
i += 1
summary += ' ' + token_to_word(id2word[summaries['summaries'][selected_summary_index][batchidx * batch_size + instance][i]],
mainItem,
splitDataset[params['state']]['triples'][splitDatasetIndex],
expressed_triples)
# Appending everything to the dictionary of lists.
if id2item[0] not in summary[1:]:
output['index'].append((batchidx, instance))
output['number_original_triples'].append(len(original_triples))
output['original_triples'].append(original_triples)
output['number_input_triples'].append(len(final_triples_with_types_reduced))
output['final_triples_with_types_reduced'].append(final_triples_with_types_reduced)
output['final_triples_with_types'].append(final_triples_with_types)
output['Main-Item'].append(mainItem)
output['Target'].append(splitDataset[params['state']]['actual_target'][splitDatasetIndex])
output['Generated-Summary'].append(summary[1:])
# Saving all the generated summaries along with their input triples in a pickle file.
with open(summaries_dump_location, 'wb') as f:
pickle.dump(output, f)
print('The generated summaries have been successfully saved at: %s' % summaries_dump_location)
| python |
# Generated by Django 2.1.11 on 2019-11-18 19:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("course_catalog", "0052_userlistitem_contenttypes")]
operations = [
migrations.CreateModel(
name="Playlist",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=256)),
("short_description", models.TextField(blank=True, null=True)),
(
"_deprecated_offered_by",
models.CharField(
blank=True, db_column="offered_by", max_length=128, null=True
),
),
(
"image_description",
models.CharField(blank=True, max_length=1024, null=True),
),
("platform", models.CharField(max_length=40)),
("playlist_id", models.CharField(max_length=80)),
("image_src", models.URLField(blank=True, max_length=400, null=True)),
("url", models.URLField(max_length=2048, null=True)),
("published", models.BooleanField(default=True)),
("has_user_list", models.BooleanField(default=True)),
],
options={"abstract": False},
),
migrations.CreateModel(
name="PlaylistVideo",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("position", models.PositiveIntegerField()),
(
"playlist",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlist_videos",
to="course_catalog.Playlist",
),
),
(
"video",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlist_videos",
to="course_catalog.Video",
),
),
],
),
migrations.CreateModel(
name="VideoChannel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("title", models.CharField(max_length=256)),
("short_description", models.TextField(blank=True, null=True)),
(
"_deprecated_offered_by",
models.CharField(
blank=True, db_column="offered_by", max_length=128, null=True
),
),
("platform", models.CharField(max_length=40)),
("channel_id", models.CharField(max_length=80)),
("full_description", models.TextField(blank=True, null=True)),
("published", models.BooleanField(default=True)),
(
"offered_by",
models.ManyToManyField(
blank=True, to="course_catalog.LearningResourceOfferor"
),
),
(
"topics",
models.ManyToManyField(blank=True, to="course_catalog.CourseTopic"),
),
],
options={"abstract": False},
),
migrations.AddField(
model_name="playlist",
name="channel",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="playlists",
to="course_catalog.VideoChannel",
),
),
migrations.AddField(
model_name="playlist",
name="offered_by",
field=models.ManyToManyField(
blank=True, to="course_catalog.LearningResourceOfferor"
),
),
migrations.AddField(
model_name="playlist",
name="topics",
field=models.ManyToManyField(blank=True, to="course_catalog.CourseTopic"),
),
migrations.AddField(
model_name="playlist",
name="videos",
field=models.ManyToManyField(
through="course_catalog.PlaylistVideo", to="course_catalog.Video"
),
),
migrations.AlterUniqueTogether(
name="playlistvideo", unique_together={("playlist", "video")}
),
]
| python |
"""Helpers to integrate the process on controlling profiles."""
from dataclasses import dataclass
from typing import List, Set, Optional
from bson import ObjectId
from flags import ProfilePermission, PermissionLevel
from mongodb.factory import ProfileManager, ChannelManager
from mongodb.helper import IdentitySearcher
from models import ChannelProfileModel
@dataclass
class ProfileControlEntry:
"""Single entry representing the profile control checking result."""
root_oid: ObjectId
name: str
controllable: bool
@dataclass
class ChannelProfileEntry:
"""Single entry representing a channel profile."""
profile: ChannelProfileModel
owner_names: List[str]
def __post_init__(self):
self.owner_names = sorted(self.owner_names)
class ProfileHelper:
"""Helper to process the profile data."""
@staticmethod
def get_user_profile_controls(
channel_model, profile_oid: ObjectId, requester_oid: ObjectId, permissions: Set[ProfilePermission]) \
-> List[ProfileControlEntry]:
"""
Check if the requester can perform certain actions on members who have the certain profile.
The **certain actions** mentioned above currently are:
- Control the profile attaching status
Actions are unable to perform on the users who have a higher permission level.
Actions also cannot be performed on default profile.
.. note::
This function is expensive because it calls ``IdentitySearcher.get_batch_user_name()``.
:param channel_model: channel data of the profile
:param profile_oid: OID of the profile
:param requester_oid: OID of the user who requested this check
:param permissions: permissions that the requester has
:return: list of `ProfileControlEntry` containing the check result
"""
ret = []
names = IdentitySearcher.get_batch_user_name(ProfileManager.get_profile_user_oids(profile_oid), channel_model)
perm_dict = ProfileManager.get_user_permission_lv_dict(channel_model.id)
remove_self = ProfilePermission.PRF_CONTROL_SELF in permissions
remove_member = ProfilePermission.PRF_CONTROL_MEMBER in permissions
is_default = channel_model.config.default_profile_oid == profile_oid
user_perm_lv = perm_dict.get(requester_oid, PermissionLevel.lowest())
for uid, name in sorted(names.items(), key=lambda item: item[1]):
if not name:
name = str(uid)
controllable = False
if not is_default and user_perm_lv >= perm_dict.get(uid, PermissionLevel.lowest()):
controllable = remove_self if uid == requester_oid else remove_member
ret.append(ProfileControlEntry(root_oid=uid, name=name, controllable=controllable))
return ret
@staticmethod
def get_channel_profiles(channel_oid: ObjectId, partial_name: Optional[str] = None) -> List[ChannelProfileEntry]:
"""
Get a list of the channel profiles in ``channel_oid``.
``partial_name`` can be a part of the profile name.
:param channel_oid: channel to get the profiles
:param partial_name: keyword to get the profiles
:return: list of channel profiles
"""
ret = []
# Get channel profiles. Terminate if no available profiles
profs = list(ProfileManager.get_channel_profiles(channel_oid, partial_name))
if not profs:
return ret
# Get channel data. Terminate if no channel data found
channel_model = ChannelManager.get_channel_oid(channel_oid)
if not channel_model:
return ret
# Get user names, and the prof-channel dict
user_oids_dict = ProfileManager.get_profiles_user_oids([prof.id for prof in profs])
user_oids = []
for _, onplat_oids in user_oids_dict.items():
user_oids.extend(onplat_oids)
user_names = IdentitySearcher.get_batch_user_name(user_oids, channel_model)
for prof in profs:
uids = user_oids_dict.get(prof.id, [])
ret.append(ChannelProfileEntry(prof, [user_names.get(uid) for uid in uids]))
return ret
| python |
import uos
import network
import socket
import select
import time
from machine import UART, Pin
ap_mode = False
recvPollers = []
sockets = []
clients = []
def socketSend(message):
for socket in sockets:
try:
socket.sendall(message)
except:
socket.close()
def generateDataPkg(text):
data = bytearray(b'\x3A\x00\x01')
data.extend(text.encode('utf8'))
for i in range(4-(len(text)%4)):
data.append(0)
data.append((~(sum(data)-58)+1)&0xFF)
return data
def generateDescPkg(dataPkg):
desc = bytearray(b'\x3a\x4e\x44\x64\x00\x01\x00\x01\x00\x00\x00\x00\x05\xff\x00')
desc[9], desc[11] = (len(dataPkg)-2,)*2
desc[14] = (~(sum(desc)-58)+1)&0xFF
return desc
def casioSend(descPkg, dataPkg):
uart.write(b'\x15')
uart.read(1)
uart.write(descPkg)
uart.read(1)
uart.write(dataPkg)
uart.read(1)
def handler(pin):
dataPkg = generateDataPkg("rtr")
descPkg = generateDescPkg(dataPkg)
casioSend(descPkg, dataPkg)
sta_if = network.WLAN(network.STA_IF)
ap_if = network.WLAN(network.AP_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
ap_if.active(False)
sta_if.connect('gurkenterror', 'saas1234')
while not sta_if.isconnected():
if sta_if.status() == 3:
print('network not available, starting ap')
sta_if.active(False)
ap_if.active(True)
ap_if.config(essid="gurkenterror", password="saas1234")
ap_mode = True
break
if ap_mode:
print('network config:', ap_if.ifconfig())
else:
print('network config:', sta_if.ifconfig())
if not ap_mode:
s = socket.socket()
print("connecting")
s.connect(('192.168.4.1', 65432))
print("connected")
clients = eval(s.recv(500))
print(clients)
sockets.append(s)
recvPoller = select.poll()
recvPoller.register(s, select.POLLIN)
recvPollers.append(recvPoller)
for client in clients:
s = socket.socket()
s.connect((client, 65432))
sockets.append(s)
recvPoller = select.poll()
recvPoller.register(s, select.POLLIN)
recvPollers.append(recvPoller)
listener = socket.socket()
listener.bind(("", 65432))
listener.listen(10)
print("listener started")
connPoller = select.poll()
connPoller.register(listener, select.POLLIN)
uos.dupterm(None, 1) # disable REPL on UART(0)
uart = UART(0, 38400)
uart.init(38400, bits=8, parity=None, stop=1, timeout=1000)
button = Pin(0, Pin.IN, Pin.PULL_UP)
button.irq(trigger=Pin.IRQ_FALLING, handler=handler)
# Main loop
while(True):
# Handle new connections
connEvents = connPoller.poll(100)
for descriptor, Event in connEvents:
print("Got an incoming connection request")
conn, addr = listener.accept()
print(conn, addr)
conn.sendall(str(clients))
sockets.append(conn)
clients.append(addr[0])
recvPoller = select.poll()
recvPoller.register(conn, select.POLLIN)
recvPollers.append(recvPoller)
# Handle new messsages for every socket
for recvPoller in recvPollers:
recvEvents = recvPoller.poll(100)
for descriptor, Event in recvEvents:
data = descriptor.recv(500)
print("Received: ", data)
descPkg = generateDescPkg(data)
casioSend(descPkg, data)
# Handle UART com
if uart.any() and uart.read(1) == b'\x15':
uart.write(b'\x13')
desc = uart.read(15)
uart.write(b'\x06')
msg = uart.read(desc[9]+2)
uart.write(b'\x06')
print("".join("%02x " % i for i in msg))
socketSend(msg)
try:
print("Received: ", msg[3:-2].decode("utf8"))
except:
print("not unicode")
| python |
from importlib import import_module
def load_extensions(app):
for extension in app.config["EXTENSIONS"]:
module_name, factory = extension.split(":")
ext = import_module(module_name)
getattr(ext, factory)(app)
def load_blueprints(app):
for extension in app.config["BLUEPRINTS"]:
module_name, factory = extension.split(":")
ext = import_module(module_name)
getattr(ext, factory)(app)
def load_middlewares(app):
for middleware in reversed(app.config["MIDDLEWARES"]):
module_name, klass = middleware.split(":")
ext = import_module(module_name)
app.wsgi_app = getattr(ext, klass)(app.wsgi_app)
def init_app(app, settings_override=None):
app.config.from_object("settings")
if settings_override:
app.config.update(settings_override)
| python |
#let's work on dictionaries
'''stuff = {'name':'Vivek', 'age':18, 'height':6*2}
print(stuff['name'])
print(stuff['age'])
print(stuff)
'''
'''
state = {
'Oregon' : 'OR',
'Florida' : 'FL',
'California': 'CA',
'New York' : 'NY',
'Michigan' : 'MI'
}
cities = {
'CA': 'California',
'NY' : 'New York',
'MI' : 'Michigan'
}
cities['OR'] = 'Oregon'
cities['FL'] = 'Florida'
print('-'*10)
print("NY state has : ",cities['NY'])
print('-'*10)'''
a = {
'a' : 'Monday',
'b' : 'Tuesday',
'c' : 'Wednesday',
'd' : 'Thursday',
'e' : 'Friday',
'f' : 'Saturday',
'g' : 'Sunday'
}
print(a)
for key,k in a.items():
print(key, k)
print("-"*10)
print(a.get('a',"Hi there"))
print(a.get('h', "Hello World"))
| python |
from dataclasses import dataclass, field
from typing import Optional
from .geometry import Geometry
__NAMESPACE__ = "sdformat/v1.3/collision.xsd"
@dataclass
class Collision:
"""The collision properties of a link.
Note that this can be different from the visual properties of a
link, for example, simpler collision models are often used to reduce
computation time.
Parameters
----------
laser_retro: intensity value returned by laser sensor.
max_contacts: Maximum number of contacts allowed between two
entities. This value overrides the max_contacts element defined
in physics.
pose: The reference frame of the collision element, relative to the
reference frame of the link.
geometry: The shape of the visual or collision object.
surface: The surface parameters
name: Unique name for the collision element within the scope of the
parent link.
"""
class Meta:
name = "collision"
laser_retro: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_contacts: int = field(
default=10,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
pose: str = field(
default="0 0 0 0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){5}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
geometry: Optional[Geometry] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
surface: Optional["Collision.Surface"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
name: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"required": True,
},
)
@dataclass
class Surface:
"""
The surface parameters.
"""
bounce: Optional["Collision.Surface.Bounce"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
friction: Optional["Collision.Surface.Friction"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
contact: Optional["Collision.Surface.Contact"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Bounce:
"""
Parameters
----------
restitution_coefficient: Bounciness coefficient of
restitution, from [0...1], where 0=no bounciness.
threshold: Bounce velocity threshold, below which effective
coefficient of restitution is 0.
"""
restitution_coefficient: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
threshold: float = field(
default=100000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Friction:
"""
Parameters
----------
ode: ODE friction parameters
"""
ode: Optional["Collision.Surface.Friction.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE friction parameters.
Parameters
----------
mu: Coefficient of friction in the range of [0..1].
mu2: Second coefficient of friction in the range of
[0..1]
fdir1: 3-tuple specifying direction of mu1 in the
collision local reference frame.
slip1: Force dependent slip direction 1 in collision
local frame, between the range of [0..1].
slip2: Force dependent slip direction 2 in collision
local frame, between the range of [0..1].
"""
mu: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
mu2: float = field(
default=-1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
fdir1: str = field(
default="0 0 0",
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"(\s*(-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+)\s+){2}((-|\+)?(\d+(\.\d*)?|\.\d+|\d+\.\d+[eE][-\+]?[0-9]+))\s*",
},
)
slip1: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
slip2: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
@dataclass
class Contact:
"""
Parameters
----------
ode: ODE contact parameters
"""
ode: Optional["Collision.Surface.Contact.Ode"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
},
)
@dataclass
class Ode:
"""
ODE contact parameters.
Parameters
----------
soft_cfm: Soft constraint force mixing.
soft_erp: Soft error reduction parameter
kp: dynamically "stiffness"-equivalent coefficient for
contact joints
kd: dynamically "damping"-equivalent coefficient for
contact joints
max_vel: maximum contact correction velocity truncation
term.
min_depth: minimum allowable depth before contact
correction impulse is applied
"""
soft_cfm: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
soft_erp: float = field(
default=0.2,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kp: float = field(
default=1000000000000.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
kd: float = field(
default=1.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
max_vel: float = field(
default=0.01,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
min_depth: float = field(
default=0.0,
metadata={
"type": "Element",
"namespace": "",
"required": True,
},
)
| python |
import trio
from socket import (
inet_aton,
)
import pytest
import pytest_trio
from async_service import background_trio_service
from p2p.discv5.channel_services import (
DatagramReceiver,
DatagramSender,
Endpoint,
IncomingDatagram,
OutgoingDatagram,
OutgoingPacket,
PacketDecoder,
PacketEncoder,
)
from p2p.tools.factories import (
AuthTagPacketFactory,
EndpointFactory,
)
@pytest_trio.trio_fixture
async def socket_pair():
sending_socket = trio.socket.socket(
family=trio.socket.AF_INET,
type=trio.socket.SOCK_DGRAM,
)
receiving_socket = trio.socket.socket(
family=trio.socket.AF_INET,
type=trio.socket.SOCK_DGRAM,
)
# specifying 0 as port number results in using random available port
await sending_socket.bind(("127.0.0.1", 0))
await receiving_socket.bind(("127.0.0.1", 0))
return sending_socket, receiving_socket
@pytest.mark.trio
async def test_datagram_receiver(socket_pair):
sending_socket, receiving_socket = socket_pair
receiver_address = receiving_socket.getsockname()
sender_address = sending_socket.getsockname()
send_channel, receive_channel = trio.open_memory_channel(1)
async with background_trio_service(DatagramReceiver(receiving_socket, send_channel)):
data = b"some packet"
await sending_socket.sendto(data, receiver_address)
with trio.fail_after(0.5):
received_datagram = await receive_channel.receive()
assert received_datagram.datagram == data
assert received_datagram.sender_endpoint.ip_address == inet_aton(sender_address[0])
assert received_datagram.sender_endpoint.port == sender_address[1]
@pytest.mark.trio
async def test_datagram_sender(socket_pair):
sending_socket, receiving_socket = socket_pair
receiver_endpoint = receiving_socket.getsockname()
sender_endpoint = sending_socket.getsockname()
send_channel, receive_channel = trio.open_memory_channel(1)
async with background_trio_service(DatagramSender(receive_channel, sending_socket)):
outgoing_datagram = OutgoingDatagram(
b"some packet",
Endpoint(inet_aton(receiver_endpoint[0]), receiver_endpoint[1]),
)
await send_channel.send(outgoing_datagram)
with trio.fail_after(0.5):
data, sender = await receiving_socket.recvfrom(1024)
assert data == outgoing_datagram.datagram
assert sender == sender_endpoint
@pytest.mark.trio
async def test_packet_decoder():
datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1)
packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)
service = PacketDecoder(datagram_receive_channel, packet_send_channel)
async with background_trio_service(service):
packet = AuthTagPacketFactory()
sender_endpoint = EndpointFactory()
await datagram_send_channel.send(IncomingDatagram(
datagram=packet.to_wire_bytes(),
sender_endpoint=sender_endpoint,
))
with trio.fail_after(0.5):
incoming_packet = await packet_receive_channel.receive()
assert incoming_packet.packet == packet
assert incoming_packet.sender_endpoint.ip_address == sender_endpoint.ip_address
assert incoming_packet.sender_endpoint.port == sender_endpoint.port
@pytest.mark.trio
async def test_packet_decoder_error():
datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1)
packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)
service = PacketDecoder(datagram_receive_channel, packet_send_channel)
async with background_trio_service(service):
# send invalid packet
await datagram_send_channel.send(IncomingDatagram(
datagram=b"not a valid packet",
sender_endpoint=EndpointFactory(),
))
# send valid packet
packet = AuthTagPacketFactory()
sender_endpoint = EndpointFactory()
await datagram_send_channel.send(IncomingDatagram(
datagram=packet.to_wire_bytes(),
sender_endpoint=sender_endpoint,
))
# ignore the invalid one, only receive the valid one
with trio.fail_after(0.5):
incoming_packet = await packet_receive_channel.receive()
assert incoming_packet.packet == packet
assert incoming_packet.sender_endpoint.ip_address == sender_endpoint.ip_address
assert incoming_packet.sender_endpoint.port == sender_endpoint.port
@pytest.mark.trio
async def test_packet_encoder():
packet_send_channel, packet_receive_channel = trio.open_memory_channel(1)
datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1)
service = PacketEncoder(packet_receive_channel, datagram_send_channel)
async with background_trio_service(service):
receiver_endpoint = EndpointFactory()
outgoing_packet = OutgoingPacket(
packet=AuthTagPacketFactory(),
receiver_endpoint=receiver_endpoint,
)
await packet_send_channel.send(outgoing_packet)
with trio.fail_after(0.5):
outgoing_datagram = await datagram_receive_channel.receive()
assert outgoing_datagram.datagram == outgoing_packet.packet.to_wire_bytes()
assert outgoing_datagram.receiver_endpoint.ip_address == receiver_endpoint.ip_address
assert outgoing_datagram.receiver_endpoint.port == receiver_endpoint.port
| python |
# -*- coding: utf-8 -*-
from dataclasses import dataclass
from dataclasses_io import dataclass_io
from pathlib import Path
_TEST_PATH = Path(__file__).parent
@dataclass_io
@dataclass
class _MyDataclass:
id: int
name: str
memo: str
if __name__ == "__main__":
dataclass1 = _MyDataclass(id=42, name="John Doe", memo="Hello, world!")
# {'id': 42, 'name': 'John Doe', 'memo': 'Hello, world!'}
print("dataclass1", dataclass1.config)
dataclass1.save(_TEST_PATH / "test.json")
dataclass2 = _MyDataclass.load(_TEST_PATH / "test.json")
print("dataclass2", dataclass2.config) # same as line 19
# dataclass1 and dataclass2 have the same properties, but refer to
# different memories. save() and load() operate well as intended.
print(f"dataclass1 == dataclass2: {dataclass1 == dataclass2}")
print(f"dataclass1 is dataclass2: {dataclass1 is dataclass2}")
| python |
# 给定一个含有 n 个正整数的数组和一个正整数 s ,找出该数组中满足其和 ≥ s 的长度最小的连续子数组。如果不存在符合条件的连续子数组,返回 0。
#
# 示例:
#
# 输入: s = 7, nums = [2,3,1,2,4,3]
# 输出: 2
# 解释: 子数组 [4,3] 是该条件下的长度最小的连续子数组。
# 进阶:
#
# 如果你已经完成了O(n) 时间复杂度的解法, 请尝试 O(n log n) 时间复杂度的解法。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/minimum-size-subarray-sum
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
if not nums:
return 0
left = 0
right = 0
ans = float('inf')
sum_of_nums = nums[0]
while left <= right < len(nums):
if sum_of_nums < s:
right += 1
if right < len(nums):
sum_of_nums += nums[right]
else:
ans = min(ans, right - left + 1)
sum_of_nums -= nums[left]
left += 1
return 0 if ans == float('inf') else ans
if __name__ == '__main__':
s = Solution()
assert s.minSubArrayLen(7, [2, 3, 1, 2, 4, 3]) == 2
assert s.minSubArrayLen(4, [1, 4, 4]) == 1
assert s.minSubArrayLen(11, [1, 2, 3, 4, 5]) == 3 | python |
"""
Clean and validate a DataFrame column containing country names.
"""
from functools import lru_cache
from operator import itemgetter
from os import path
from typing import Any, Union
import dask
import dask.dataframe as dd
import numpy as np
import pandas as pd
import regex as re
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, create_report_new, to_dask
COUNTRY_DATA_FILE = path.join(path.split(path.abspath(__file__))[0], "country_data.tsv")
DATA = pd.read_csv(COUNTRY_DATA_FILE, sep="\t", encoding="utf-8", dtype=str)
REGEXES = [re.compile(entry, re.IGNORECASE) for entry in DATA.regex]
def clean_country(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
input_format: str = "auto",
output_format: str = "name",
fuzzy_dist: int = 0,
strict: bool = False,
inplace: bool = False,
errors: str = "coerce",
report: bool = True,
progress: bool = True,
) -> pd.DataFrame:
"""
Clean and standardize country names.
Read more in the :ref:`User Guide <country_userguide>`.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
column
The name of the column containing country names.
input_format
The ISO 3166 input format of the country.
- 'auto': infer the input format
- 'name': country name ('United States')
- 'official': official state name ('United States of America')
- 'alpha-2': alpha-2 code ('US')
- 'alpha-3': alpha-3 code ('USA')
- 'numeric': numeric code (840)
(default: 'auto')
output_format
The desired ISO 3166 format of the country:
- 'name': country name ('United States')
- 'official': official state name ('United States of America')
- 'alpha-2': alpha-2 code ('US')
- 'alpha-3': alpha-3 code ('USA')
- 'numeric': numeric code (840)
(default: 'name')
fuzzy_dist
The maximum edit distance (number of single character insertions, deletions
or substitutions required to change one word into the other) between a country value
and input that will count as a match. Only applies to 'auto', 'name' and 'official'
input formats.
(default: 0)
strict
If True, matching for input formats 'name' and 'official' are done by looking
for a direct match. If False, matching is done by searching the input for a
regex match.
(default: False)
inplace
If True, delete the column containing the data that was cleaned. Otherwise,
keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
report
If True, output the summary report. Otherwise, no report is outputted.
(default: True)
progress
If True, display a progress bar.
(default: True)
Examples
--------
>>> df = pd.DataFrame({'country': [' Canada ', 'US']})
>>> clean_country(df, 'country')
Country Cleaning Report:
2 values cleaned (100.0%)
Result contains 2 (100.0%) values in the correct format and 0 null values (0.0%)
country country_clean
0 Canada Canada
1 US United States
"""
# pylint: disable=too-many-arguments
input_formats = {"auto", "name", "official", "alpha-2", "alpha-3", "numeric"}
output_formats = {"name", "official", "alpha-2", "alpha-3", "numeric"}
if input_format not in input_formats:
raise ValueError(
f'input_format {input_format} is invalid, it needs to be one of "auto", '
'"name", "official", "alpha-2", "alpha-3" or "numeric'
)
if output_format not in output_formats:
raise ValueError(
f'output_format {output_format} is invalid, it needs to be "name", '
'"official", "alpha-2", "alpha-3" or "numeric'
)
if strict and fuzzy_dist > 0:
raise ValueError(
"can't do fuzzy matching while strict mode is enabled, "
"set strict=False for fuzzy matching or fuzzy_dist=0 for strict matching"
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [
_format_country(x, input_format, output_format, fuzzy_dist, strict, errors) for x in srs
],
meta=object,
)
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
_code_=df["clean_code_tup"].map(itemgetter(1)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
# counts of codes indicating how values were changed
stats = df["_code_"].value_counts(sort=False)
df = df.drop(columns=["clean_code_tup", "_code_"])
if inplace:
df = df.drop(columns=column)
with ProgressBar(minimum=1, disable=not progress):
df, stats = dask.compute(df, stats)
# output a report describing the result of clean_country
if report:
create_report_new("Country", stats, errors)
return df
def validate_country(
x: Union[str, int, pd.Series], input_format: str = "auto", strict: bool = True
) -> Union[bool, pd.Series]:
"""
Validate country names.
Read more in the :ref:`User Guide <country_userguide>`.
Parameters
----------
x
pandas Series of countries or str/int country value.
input_format
The ISO 3166 input format of the country.
- 'auto': infer the input format
- 'name': country name ('United States')
- 'official': official state name ('United States of America')
- 'alpha-2': alpha-2 code ('US')
- 'alpha-3': alpha-3 code ('USA')
- 'numeric': numeric code (840)
(default: 'auto')
strict
If True, matching for input formats 'name' and 'official' are done by
looking for a direct match, if False, matching is done by searching
the input for a regex match.
(default: False)
Examples
--------
>>> validate_country('United States')
True
>>> df = pd.DataFrame({'country': ['Canada', 'NaN']})
>>> validate_country(df['country'])
0 True
1 False
Name: country, dtype: bool
"""
if isinstance(x, pd.Series):
x = x.astype(str).str.lower().str.strip()
return x.apply(_check_country, args=(input_format, strict, False))
x = str(x).lower().strip()
return _check_country(x, input_format, strict, False)
def _format_country(
val: Any,
input_format: str,
output_format: str,
fuzzy_dist: int,
strict: bool,
errors: str,
) -> Any:
"""
Function to transform a country instance into the desired format
The last component of the returned tuple contains a code indicating how the
input value was changed:
0 := the value is null
1 := the value could not be parsed
2 := the value is cleaned and the cleaned value is DIFFERENT than the input value
3 := the value is cleaned and is THE SAME as the input value (no transformation)
"""
# pylint: disable=too-many-arguments
# _check_country parses input value "val", and returns the index of the country
# in the DATA dataframe. The returned value "status" can be either "null"
# (which means val is a null value), "unknown" (in which case val
# could not be parsed) or "success" (a successful parse of the value).
country = str(val).lower().strip()
result_index, status = _check_country(country, input_format, strict, True)
if fuzzy_dist > 0 and status == "unknown" and input_format in ("auto", "name", "official"):
result_index, status = _check_fuzzy_dist(country, fuzzy_dist)
if status == "null":
return np.nan, 0
if status == "unknown":
if errors == "raise":
raise ValueError(f"unable to parse value {val}")
return val if errors == "ignore" else np.nan, 1
result = DATA.loc[result_index, output_format]
if pd.isna(result):
# country doesn't have the required output format
if errors == "raise":
raise ValueError(f"unable to parse value {val}")
return val if errors == "ignore" else np.nan, 1
return result, 2 if val != result else 3
@lru_cache(maxsize=2 ** 20)
def _check_country(country: str, input_format: str, strict: bool, clean: bool) -> Any:
"""
Finds the index of the given country in the DATA dataframe.
Parameters
----------
country
string containing the country value being cleaned
input_format
the ISO 3166 input format of the country
strict
If True, for input types "name" and "offical" the function looks for a direct match
in the DATA dataframe. If False, the country input is searched for a regex match.
clean
If True, a tuple (index, status) is returned.
If False, the function returns True/False to be used by the validate country function.
"""
if country in NULL_VALUES:
return (None, "null") if clean else False
if input_format == "auto":
input_format = _get_format_from_name(country)
if strict and input_format == "regex":
for form in ("name", "official"):
ind = DATA[
DATA[form].str.contains(f"^{re.escape(country)}$", flags=re.IGNORECASE, na=False)
].index
if np.size(ind) > 0:
return (ind[0], "success") if clean else True
elif not strict and input_format in ("regex", "name", "official"):
for index, country_regex in enumerate(REGEXES):
if country_regex.search(country):
return (index, "success") if clean else True
else:
ind = DATA[
DATA[input_format].str.contains(
f"^{re.escape(country)}$", flags=re.IGNORECASE, na=False
)
].index
if np.size(ind) > 0:
return (ind[0], "success") if clean else True
return (None, "unknown") if clean else False
@lru_cache(maxsize=2 ** 20)
def _check_fuzzy_dist(country: str, fuzzy_dist: int) -> Any:
"""
A match is found if a country has an edit distance <= fuzzy_dist
with a string that contains a match with one of the country regexes.
Find the index of a match with a minimum edit distance.
"""
results = []
for i, country_regex in enumerate(DATA.regex):
# {e<=fuzzy_dist} means the total number of errors
# (insertions, deletions and substitutions) must be <= fuzzy_dist,
# re.BESTMATCH looks for a match with minimum number of errors
fuzzy_regex = f"({country_regex}){{e<={fuzzy_dist}}}"
match = re.search(fuzzy_regex, country, flags=re.BESTMATCH | re.IGNORECASE)
if match:
# add total number of errors and the index to results
results.append((sum(match.fuzzy_counts), i))
if not results:
return None, "unknown"
return min(results)[1], "success"
def _get_format_from_name(name: str) -> str:
"""
Function to infer the input format. Used when the input format is auto.
"""
try:
int(name)
return "numeric"
except ValueError:
return "alpha-2" if len(name) == 2 else "alpha-3" if len(name) == 3 else "regex"
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-15 02:47
from __future__ import unicode_literals
from django.db import migrations, models
import jobs.models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0008_auto_20161115_0222'),
]
operations = [
migrations.AlterField(
model_name='additionalinformation',
name='resume',
field=models.FileField(blank=True, upload_to=jobs.models.get_file_path),
),
]
| python |
from normality import normalize
def text_parts(text):
text = normalize(text, latinize=True)
if text is None:
return set()
return set(text.split(' '))
def index_text(proxy):
texts = set()
for name in proxy.names:
texts.update(text_parts(name))
return ' '.join(texts)
| python |
import torch
import torch.nn as nn
from nn_blocks import *
from torch import optim
import time
class DApredictModel(nn.Module):
def __init__(self, utt_vocab, da_vocab, tod_bert, config):
super(DApredictModel, self).__init__()
if config['DApred']['use_da']:
self.da_encoder = DAEncoder(da_input_size=len(da_vocab.word2id), da_embed_size=config['DApred']['DA_EMBED'],
da_hidden=config['DApred']['DA_HIDDEN'])
self.da_context = DAContextEncoder(da_hidden=config['DApred']['DA_HIDDEN'])
dec_hidden_size = config['DApred']['DA_HIDDEN']+config['DApred']['UTT_CONTEXT']*2+1+768 if config['DApred']['use_da'] else config['DApred']['UTT_CONTEXT']*2+1
self.da_decoder = DADecoder(da_input_size=len(da_vocab.word2id), da_embed_size=config['DApred']['DA_EMBED'],
da_hidden=dec_hidden_size)
self.utt_encoder = UtteranceEncoder(utt_input_size=len(utt_vocab.word2id), embed_size=config['DApred']['UTT_EMBED'],
utterance_hidden=config['DApred']['UTT_HIDDEN'], padding_idx=utt_vocab.word2id['<PAD>'])
self.utt_context = UtteranceContextEncoder(utterance_hidden_size=config['DApred']['UTT_CONTEXT']*2+1)
self.attention = Attention(self.utt_encoder.hidden_size*2)
self.criterion = nn.CrossEntropyLoss(ignore_index=0)
self.config = config
self.tod_bert = tod_bert
def forward(self, X_da, Y_da, X_utt, TC, turn, step_size):
"""
X_da: input sequence of DA, Tensor(window_size, batch_size, 1)
Y_da: gold DA, Tensor(batch_size, 1)
X_utt: input sentences, Tensor(window_size, batch_size, seq_len, 1)
turn: whether the next speaker equal to current speaker, Tensor(window_size, batch_size, 1)
"""
dec_hidden = self._encode(X_da=X_da, X_utt=X_utt, TC=TC, step_size=step_size, turn=turn)
decoder_output = self.da_decoder(dec_hidden) # (batch_size, 1, DA_VOCAB)
decoder_output = decoder_output.squeeze(1) # (batch_size, DA_VOCAB)
Y_da = Y_da.squeeze()
if self.config['use_weights']:
if self.config['use_freq']:
device = torch.device('cpu')
class_weights = [0, 0.499, 0.7621, 0.8918, 0.9002, 0.9799, 0.9881, 0.9879, 0.9904]
weights = torch.FloatTensor(class_weights).to(device)
w_criterion = nn.CrossEntropyLoss(weight=weights, ignore_index=0)
loss = w_criterion(decoder_output, Y_da)
#loss = self.criterion(decoder_output, Y_da)
if self.training:
loss.backward()
return loss.item(), decoder_output.data.cpu().numpy()
def predict(self, X_da, X_utt, TC, turn, step_size):
with torch.no_grad():
dec_hidden = self._encode(X_da=X_da, X_utt=X_utt, TC=TC, step_size=step_size, turn=turn)
decoder_output = self.da_decoder(dec_hidden) # (batch_size, 1, DA_VOCAB)
decoder_output = decoder_output.squeeze(1) # (batch_size, DA_VOCAB)
decoder_output = F.softmax(decoder_output, dim=-1)
return decoder_output.data.cpu().numpy()
def _encode(self, X_da, X_utt, TC, turn, step_size):
if self.config['DApred']['use_da']:
da_context_hidden = self.da_context.initHidden(step_size)
# da_contexts = []
for x_da in X_da:
da_encoder_hidden = self.da_encoder(x_da) # (batch_size, 1, DA_HIDDEN)
da_context_output, da_context_hidden = self.da_context(da_encoder_hidden, da_context_hidden) # (batch_size, 1, DA_HIDDEN)
# da_contexts.append(da_context_output)
# da_context_output = torch.stack(da_contexts).permute(0, 1)
if self.config['DApred']['use_utt'] and not self.config['DApred']['use_uttcontext']:
utt_encoder_hidden = self.utt_encoder.initHidden(step_size)
utt_encoder_output, utt_encoder_hidden = self.utt_encoder(X_utt[-1], utt_encoder_hidden) # (batch_size, 1, UTT_HIDDEN)
if self.config['DApred']['use_da']:
dec_hidden = torch.cat((da_context_output, utt_encoder_output), dim=-1)
else:
dec_hidden = utt_encoder_output
elif self.config['DApred']['use_uttcontext']:
# utt_contexts = []
utt_context_hidden = self.utt_context.initHidden(step_size)
for i in range(len(X_utt)):
utt_encoder_hidden = self.utt_encoder.initHidden(step_size)
utt_encoder_output, utt_encoder_hidden = self.utt_encoder(X_utt[i], utt_encoder_hidden) # (batch_size, 1, UTT_HIDDEN)
# utt_encoder_output = utt_encoder_output.sum(dim=1).unsqueeze(1)
attns = self.attention(utt_encoder_output)
utt_encoder_output = (utt_encoder_output * attns).sum(dim=1).unsqueeze(1)
utt_encoder_output = torch.cat((utt_encoder_output, turn[i].float().unsqueeze(-1)), dim=-1)
utt_context_output, utt_context_hidden = self.utt_context(utt_encoder_output, utt_context_hidden) # (batch_size, 1, UTT_HIDDEN)
# utt_contexts.append(utt_context_output)
# utt_context_output = torch.stack(utt_contexts).permute(0, 1)
if self.config['DApred']['use_da']:
dec_hidden = torch.cat((da_context_output, utt_context_output), dim=-1)
if self.config['use_tod']:
tod_context_encoding = self.tod_bert(TC, return_dict=True)
tod_features = tod_context_encoding['last_hidden_state']
#print('Tod features', tod_features.shape)
tod_context_output = tod_features[:,0,:].unsqueeze(1)
dec_hidden = torch.cat((dec_hidden, tod_context_output), dim=-1)
dec_hidden = self.utt_encoder.dropout(dec_hidden)
#dec_hidden = torch.cat((da_context_output, utt_context_output), dim=-1) # (batch_size, 1, DEC_HIDDEN)
if not self.config['DApred']['use_dacontext']:
dec_hidden = torch.cat((da_encoder_hidden, utt_context_output), dim=-1)
else:
dec_hidden = utt_context_output
else:
dec_hidden = da_context_output
return dec_hidden
| python |
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.now(),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 4, 24),
}
dag = DAG('undeploy_prediction_codegen', default_args=default_args)
# TODO: dockerFileTag and dockerFilePath should be passed in from webhook
switch_to_aws = BashOperator(
task_id='switch_to_aws',
bash_command='sudo kubectl config use-context awsdemo',
dag=dag)
undeploy_container_aws = BashOperator(
task_id='undeploy_container_to_aws',
bash_command='sudo kubectl delete prediction-codegen',
dag=dag)
switch_to_gcp = BashOperator(
task_id='switch_to_gcp',
bash_command='sudo kubectl config use-context gcpdemo',
dag=dag)
undeploy_container_gcp = BashOperator(
task_id='undeploy_container_gcp',
bash_command='sudo kubectl delete prediction-codegen',
dag=dag)
# Setup Airflow DAG
undeploy_container_aws.set_upstream(switch_to_aws)
switch_to_gcp.set_upstream(undeploy_container_aws)
undeploy_container_gcp.set_upstream(switch_to_gcp)
| python |
from typing import Any, Tuple, Union
from lf3py.lang.annotation import FunctionAnnotation
from lf3py.routing.errors import UnresolvedArgumentsError
from lf3py.routing.types import Middleware
from lf3py.serialization.deserializer import Deserializer
from lf3py.serialization.errors import DeserializeError
from lf3py.task.data import Command
def resolve_args(middleware: Middleware, command: Command, dsn_spec: str) -> Union[Tuple[Any, dict], dict]:
try:
func_anno = FunctionAnnotation(middleware)
dsn_params = command.dsn.capture(dsn_spec)
dsn_kwargs = {
key: int(dsn_params[key]) if arg_anno.origin is int else dsn_params[key]
for key, arg_anno in func_anno.args.items()
if key in dsn_params
}
body_kwargs = {
key: command.data(arg_anno.origin)
for key, arg_anno in func_anno.args.items()
if key not in dsn_kwargs and not arg_anno.is_generics and issubclass(arg_anno.origin, Deserializer)
}
inject_kwargs = {**dsn_kwargs, **body_kwargs}
if func_anno.is_method:
return func_anno.receiver, inject_kwargs
else:
return inject_kwargs
except (DeserializeError, KeyError, ValueError) as e:
raise UnresolvedArgumentsError(e) from e
| python |
#!/usr/bin/env python
# coding=utf8
from __future__ import unicode_literals
from datetime import timedelta
import collections
import functools
import os
import re
import string
from io import StringIO
import pytest
from hypothesis import given, settings, HealthCheck, assume
import hypothesis.strategies as st
import srt
REGISTER_SETTINGS = lambda name, **kwargs: settings.register_profile(
name, suppress_health_check=[HealthCheck.too_slow], deadline=None, **kwargs
)
REGISTER_SETTINGS("base")
REGISTER_SETTINGS("release", max_examples=1000)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "base"))
HOURS_IN_DAY = 24
TIMEDELTA_MAX_DAYS = 999999999
CONTENTLESS_SUB = functools.partial(
srt.Subtitle, index=1, start=timedelta(seconds=1), end=timedelta(seconds=2)
)
def is_strictly_legal_content(content):
"""
Filter out things that would violate strict mode. Illegal content
includes:
- A content section that starts or ends with a newline
- A content section that contains blank lines
"""
if content.strip("\r\n") != content:
return False
elif not content.strip():
return False
elif "\n\n" in content:
return False
else:
return True
def subs_eq(got, expected, any_order=False):
"""
Compare Subtitle objects using vars() so that differences are easy to
identify.
"""
got_vars = [frozenset(vars(sub).items()) for sub in got]
expected_vars = [frozenset(vars(sub).items()) for sub in expected]
if any_order:
assert collections.Counter(got_vars) == collections.Counter(expected_vars)
else:
assert got_vars == expected_vars
def timedeltas(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
"""
A Hypothesis strategy to generate timedeltas.
Right now {min,max}_value are shoved into multiple fields in timedelta(),
which is not very customisable, but it's good enough for our current test
purposes. If you need more precise control, you may need to add more
parameters to this function to be able to customise more freely.
"""
time_unit_strategy = st.integers(min_value=min_value, max_value=max_value)
timestamp_strategy = st.builds(
timedelta,
hours=time_unit_strategy,
minutes=time_unit_strategy,
seconds=time_unit_strategy,
)
return timestamp_strategy
def equivalent_timestamps(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
def string_timestamp(hours, minutes, seconds, msecs, paddings):
hours, minutes, seconds, msecs = map(
lambda v_and_p: "0" * v_and_p[1] + str(v_and_p[0]),
zip((hours, minutes, seconds, msecs), paddings),
)
return "{}:{}:{},{}".format(hours, minutes, seconds, msecs)
def ts_field_value():
return st.integers(min_value=min_value, max_value=max_value)
def zero_padding():
return st.integers(min_value=0, max_value=2)
@st.composite
def maybe_off_by_one_fields(draw):
field = draw(ts_field_value())
field_maybe_plus_one = draw(st.integers(min_value=field, max_value=field + 1))
return field_maybe_plus_one, field
def get_equiv_timestamps(h, m, s, ms2, ts1paddings, ts2paddings):
h2, h1 = h
m2, m1 = m
s2, s1 = s
ms1 = (
(h2 - h1) * 60 * 60 * 1000 + (m2 - m1) * 60 * 1000 + (s2 - s1) * 1000 + ms2
)
return (
string_timestamp(h2, m2, s2, ms2, ts2paddings),
string_timestamp(h1, m1, s1, ms1, ts1paddings),
)
return st.builds(
get_equiv_timestamps,
maybe_off_by_one_fields(),
maybe_off_by_one_fields(),
maybe_off_by_one_fields(),
ts_field_value(),
st.tuples(*[zero_padding() for _ in range(4)]),
st.tuples(*[zero_padding() for _ in range(4)]),
)
def subtitles(strict=True):
"""A Hypothesis strategy to generate Subtitle objects."""
# max_value settings are just to avoid overflowing TIMEDELTA_MAX_DAYS by
# using arbitrary low enough numbers.
#
# We also skip subs with start time >= end time, so we split them into two
# groups to avoid overlap.
start_timestamp_strategy = timedeltas(min_value=0, max_value=500000)
end_timestamp_strategy = timedeltas(min_value=500001, max_value=999999)
# \r is not legal inside Subtitle.content, it should have already been
# normalised to \n.
content_strategy = st.text(min_size=1).filter(lambda x: "\r" not in x)
proprietary_strategy = st.text().filter(
lambda x: all(eol not in x for eol in "\r\n")
)
if strict:
content_strategy = content_strategy.filter(is_strictly_legal_content)
subtitle_strategy = st.builds(
srt.Subtitle,
index=st.integers(min_value=0),
start=start_timestamp_strategy,
end=end_timestamp_strategy,
proprietary=proprietary_strategy,
content=content_strategy,
)
return subtitle_strategy
@given(st.lists(subtitles()))
def test_compose_and_parse_from_file(input_subs):
srt_file = StringIO(srt.compose(input_subs, reindex=False))
reparsed_subs = srt.parse(srt_file)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_from_file_bom(input_subs):
srt_file = StringIO("\ufeff" + srt.compose(input_subs, reindex=False))
reparsed_subs = srt.parse(srt_file)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_strict(input_subs):
composed = srt.compose(input_subs, reindex=False)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_compose_without_ending_blank_line(input_subs):
"""
Many sub editors don't add a blank line to the end, and many editors accept
it. We should just accept this too in input.
"""
composed = srt.compose(input_subs, reindex=False)
composed_without_ending_blank = composed[:-1]
reparsed_subs = srt.parse(composed_without_ending_blank)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_compose_without_eol_at_all(input_subs):
composed = srt.compose(input_subs, reindex=False)
composed_without_ending_blank = composed.rstrip("\r\n")
reparsed_subs = srt.parse(composed_without_ending_blank)
subs_eq(reparsed_subs, input_subs)
@given(st.text().filter(is_strictly_legal_content))
def test_compose_and_parse_strict_mode(content):
# sub.content should not have OS-specific line separators, only \n
assume("\r" not in content)
content = "\n" + content + "\n\n" + content + "\n"
sub = CONTENTLESS_SUB(content=content)
parsed_strict = list(srt.parse(sub.to_srt()))[0]
parsed_unstrict = list(srt.parse(sub.to_srt(strict=False)))[0]
# Strict mode should remove blank lines in content, leading, and trailing
# newlines.
assert not parsed_strict.content.startswith("\n")
assert not parsed_strict.content.endswith("\n")
assert "\n\n" not in parsed_strict.content
# When strict mode is false, no processing should be applied to the
# content (other than \r\n becoming \n).
assert parsed_unstrict.content == sub.content.replace("\r\n", "\n")
@given(st.integers(min_value=1, max_value=TIMEDELTA_MAX_DAYS))
def test_timedelta_to_srt_timestamp_can_go_over_24_hours(days):
srt_timestamp = srt.timedelta_to_srt_timestamp(timedelta(days=days))
srt_timestamp_hours = int(srt_timestamp.split(":")[0])
assert srt_timestamp_hours == days * HOURS_IN_DAY
@given(subtitles())
def test_subtitle_equality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
assert sub_1 == sub_2
@given(subtitles())
def test_subtitle_inequality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
sub_2.index += 1
assert sub_1 != sub_2
@given(subtitles())
def test_subtitle_from_scratch_equality(subtitle):
srt_block = subtitle.to_srt()
# Get two totally new sets of objects so as not to affect the hash
# comparison
sub_1 = list(srt.parse(srt_block))[0]
sub_2 = list(srt.parse(srt_block))[0]
subs_eq([sub_1], [sub_2])
# In case subs_eq and eq disagree for some reason
assert sub_1 == sub_2
assert hash(sub_1) == hash(sub_2)
@given(st.lists(subtitles()))
def test_parsing_spaced_arrow(subs):
spaced_block = srt.compose(subs, reindex=False, strict=False).replace("-->", "- >")
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_spaced_ender_arrow(subs):
# Seen in BSG subtitles
spaced_block = srt.compose(subs, reindex=False, strict=False).replace("-->", "-- >")
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_no_ws_arrow(subs):
spaced_block = srt.compose(subs, reindex=False, strict=False).replace(
" --> ", "-->"
)
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.text(string.whitespace), st.lists(subtitles()))
def test_parsing_leading_whitespace(ws, subs):
prews_block = ws + srt.compose(subs, reindex=False, strict=False)
reparsed_subtitles = srt.parse(prews_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_negative_index(subs):
for sub in subs:
sub.index *= -1
prews_block = srt.compose(subs, reindex=False, strict=False)
reparsed_subtitles = srt.parse(prews_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_content_with_blank_lines(subs):
for subtitle in subs:
# We stuff a blank line in the middle so as to trigger the "special"
# content parsing for erroneous SRT files that have blank lines.
subtitle.content = subtitle.content + "\n\n" + subtitle.content
reparsed_subtitles = srt.parse(srt.compose(subs, reindex=False, strict=False))
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_no_content(subs):
for subtitle in subs:
subtitle.content = ""
reparsed_subtitles = srt.parse(srt.compose(subs, reindex=False, strict=False))
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()), st.lists(subtitles()), st.text(alphabet="\n\r\t "))
def test_subs_missing_content_removed(content_subs, contentless_subs, contentless_text):
for sub in contentless_subs:
sub.content = contentless_text
subs = contentless_subs + content_subs
composed_subs = list(srt.sort_and_reindex(subs, in_place=True))
# We should have composed the same subs as there are in content_subs, as
# all contentless_subs should have been stripped.
subs_eq(composed_subs, content_subs, any_order=True)
# The subtitles should be reindexed starting at start_index, excluding
# contentless subs
default_start_index = 1
assert [sub.index for sub in composed_subs] == list(
range(default_start_index, default_start_index + len(composed_subs))
)
@given(
st.lists(subtitles()),
st.lists(subtitles()),
timedeltas(min_value=-999, max_value=-1),
)
def test_subs_starts_before_zero_removed(positive_subs, negative_subs, negative_td):
for sub in negative_subs:
sub.start = negative_td
sub.end = negative_td # Just to avoid tripping any start >= end errors
subs = positive_subs + negative_subs
composed_subs = list(srt.sort_and_reindex(subs, in_place=True))
# There should be no negative subs
subs_eq(composed_subs, positive_subs, any_order=True)
@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
def test_sort_and_reindex(input_subs, start_index):
for sub in input_subs:
# Pin all subs to same end time so that start time is compared only,
# must be guaranteed to be < sub.start, see how
# start_timestamp_strategy is done
sub.end = timedelta(500001)
reindexed_subs = list(
srt.sort_and_reindex(input_subs, start_index=start_index, in_place=True)
)
# The subtitles should be reindexed starting at start_index
assert [sub.index for sub in reindexed_subs] == list(
range(start_index, start_index + len(input_subs))
)
# The subtitles should be sorted by start time
expected_sorting = sorted(input_subs, key=lambda sub: sub.start)
assert reindexed_subs == expected_sorting
@given(st.lists(subtitles()))
def test_sort_and_reindex_no_skip(input_subs):
# end time > start time should not trigger a skip if skip=False
for sub in input_subs:
old_start = sub.start
sub.start = sub.end
sub.end = old_start
reindexed_subs = list(srt.sort_and_reindex(input_subs, skip=False))
# Nothing should have been skipped
assert len(reindexed_subs) == len(input_subs)
@given(st.lists(subtitles(), min_size=1))
def test_sort_and_reindex_same_start_time_uses_end(input_subs):
for sub in input_subs:
# Pin all subs to same start time so that end time is compared only
sub.start = timedelta(1)
reindexed_subs = list(srt.sort_and_reindex(input_subs, in_place=True))
# The subtitles should be sorted by end time when start time is the same
expected_sorting = sorted(input_subs, key=lambda sub: sub.end)
assert reindexed_subs == expected_sorting
@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
def test_sort_and_reindex_not_in_place_matches(input_subs, start_index):
# Make copies for both sort_and_reindex calls so that they can't affect
# each other
not_in_place_subs = [srt.Subtitle(**vars(sub)) for sub in input_subs]
in_place_subs = [srt.Subtitle(**vars(sub)) for sub in input_subs]
nip_ids = [id(sub) for sub in not_in_place_subs]
ip_ids = [id(sub) for sub in in_place_subs]
not_in_place_output = list(
srt.sort_and_reindex(not_in_place_subs, start_index=start_index)
)
in_place_output = list(
srt.sort_and_reindex(in_place_subs, start_index=start_index, in_place=True)
)
# The results in each case should be the same
subs_eq(not_in_place_output, in_place_output)
# Not in place sort_and_reindex should have created new subs
assert not any(id(sub) in nip_ids for sub in not_in_place_output)
# In place sort_and_reindex should be reusing the same subs
assert all(id(sub) in ip_ids for sub in in_place_output)
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_noncontiguous(subs, fake_idx, garbage, fake_timedelta):
composed = srt.compose(subs)
# Put some garbage between subs that should trigger our failed parsing
# detection. Since we do some magic to try and detect blank lines that
# don't really delimit subtitles, it has to look at least a little like an
# SRT block.
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
composed = composed.replace(
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_noncontiguous_ignore_errors(subs, fake_idx, garbage, fake_timedelta):
composed = srt.compose(subs)
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
composed = composed.replace(
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)
# Should not raise, we have ignore_errors
list(srt.parse(composed, ignore_errors=True))
def _parseable_as_int(text):
try:
int(text)
except ValueError:
return False
return True
def _parseable_as_float(text):
try:
float(text)
except ValueError:
return False
return True
@given(st.lists(subtitles()), st.text(min_size=1))
def test_parser_noncontiguous_leading(subs, garbage):
# Issue #50 permits leading whitespace, see test_parsing_leading_whitespace
assume(not garbage.isspace())
# Issue #56 permits negative indexes, see test_parsing_negative_index. It
# also shouldn't just be a number, because then we'd confuse it with our
# index...
assume(garbage.strip()[0] != ".")
assume(garbage.strip()[0] != "-")
assume(not _parseable_as_int(garbage.strip()))
assume(not _parseable_as_float(garbage.strip()))
# Put some garbage at the beginning that should trigger our noncontiguity
# checks
composed = garbage + srt.compose(subs)
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_didnt_match_to_end_raises(subs, fake_idx, garbage, fake_timedelta):
srt_blocks = [sub.to_srt() for sub in subs]
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
garbage = "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
srt_blocks.append(garbage)
composed = "".join(srt_blocks)
with pytest.raises(srt.SRTParseError) as thrown_exc:
list(srt.parse(composed))
# Since we will consume as many \n as needed until we meet the lookahead
# assertion, leading newlines in `garbage` will be stripped.
garbage_stripped = garbage.lstrip("\n")
assert garbage_stripped == thrown_exc.value.unmatched_content
assert len(composed) - len(garbage_stripped) == thrown_exc.value.expected_start
assert len(composed) == thrown_exc.value.actual_start
@given(st.lists(subtitles()))
def test_parser_can_parse_with_dot_msec_delimiter(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
dot_srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
# We should only do the first two, as it might also be in the
# proprietary metadata, causing this test to fail.
dot_timestamp = srt_lines[1].replace(",", ".", 2)
srt_lines[1] = dot_timestamp
dot_srt_blocks.append("\n".join(srt_lines))
composed_with_dots = "".join(dot_srt_blocks)
reparsed_subs = srt.parse(composed_with_dots)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_can_parse_with_fullwidth_delimiter(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
dot_srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
dot_timestamp = srt_lines[1].replace(",", ",", 1).replace(":", ":", 1)
srt_lines[1] = dot_timestamp
dot_srt_blocks.append("\n".join(srt_lines))
composed_with_fullwidth = "".join(dot_srt_blocks)
reparsed_subs = srt.parse(composed_with_fullwidth)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_can_parse_with_no_msec(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
# We should only do the first two, as it might also be in the
# proprietary metadata, causing this test to fail.
srt_lines[1] = re.sub(",[0-9]+", "", srt_lines[1], 2)
srt_blocks.append("\n".join(srt_lines))
composed = "".join(srt_blocks)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, subs)
@given(subtitles())
def test_repr_doesnt_crash(sub):
# Not much we can do here, but we should make sure __repr__ doesn't crash
# or anything and it does at least vaguely look like what we want
assert "Subtitle" in repr(sub)
assert str(sub.index) in repr(sub)
@given(subtitles(), subtitles())
def test_parser_accepts_final_no_newline_no_content(sub1, sub2):
# Limit size so we know how much to remove
sub2.content = ""
subs = [sub1, sub2]
# Remove the last newlines so that there are none. Cannot use rstrip since
# there might be other stuff that gets matched in proprietary
stripped_srt_blocks = srt.compose(subs, reindex=False)[:-2]
reparsed_subs = srt.parse(stripped_srt_blocks)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_accepts_newline_no_content(subs):
for sub in subs:
# Limit size so we know how many lines to remove
sub.content = ""
# Remove the last \n so that there is only one
stripped_srt_blocks = "".join(sub.to_srt()[:-1] for sub in subs)
reparsed_subs = srt.parse(stripped_srt_blocks)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_strict_crlf(input_subs):
composed_raw = srt.compose(input_subs, reindex=False)
composed = composed_raw.replace("\n", "\r\n")
reparsed_subs = list(srt.parse(composed))
for sub in reparsed_subs:
sub.content = sub.content.replace("\r\n", "\n")
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()), st.one_of(st.just("\n"), st.just("\r\n")))
def test_compose_and_parse_strict_custom_eol(input_subs, eol):
composed = srt.compose(input_subs, reindex=False, eol=eol)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, input_subs)
@given(equivalent_timestamps())
def test_equal_timestamps_despite_different_fields_parsed_as_equal(timestamps):
ts1, ts2 = timestamps
assert srt.srt_timestamp_to_timedelta(ts1) == srt.srt_timestamp_to_timedelta(ts2)
@given(timedeltas())
def test_bad_timestamp_format_raises(ts):
ts = srt.timedelta_to_srt_timestamp(ts)
ts = ts.replace(":", "t", 1)
with pytest.raises(srt.TimestampParseError):
srt.srt_timestamp_to_timedelta(ts)
@given(st.lists(subtitles()), st.lists(st.sampled_from(string.whitespace)))
def test_can_parse_index_trailing_ws(input_subs, whitespace):
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = lines[0] + "".join(whitespace)
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_parse_index_with_dot(input_subs):
# Seen in Battlestar Galactica subs
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = lines[0] + "." + lines[0]
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()), st.lists(st.just("0")))
def test_can_parse_index_leading_zeroes(input_subs, zeroes):
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = "".join(zeroes) + lines[0]
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
| python |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
import argparse
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
import numpy
import scipy.stats
import torch
import torch.optim as optim
import jammy_flows
from jammy_flows import helper_fns
import pylab
from matplotlib import rc
import random
def seed_everything(seed_no):
random.seed(seed_no)
numpy.random.seed(seed_no)
torch.manual_seed(seed_no)
## Generate data that follows letter shapes using some TTF template
###################################################################
def sample_character(char, path='OpenSans-Bold.ttf', fontsize=60, width_per_cell=0.5, num_samples=1000, center_coords=(0,0), manifold_type="e"):
"""
Based on https://stackoverflow.com/a/27753869/190597 (jsheperd)
"""
font = ImageFont.truetype(path, fontsize)
w, h = font.getsize(char)
h *= 2
image = Image.new('L', (w, h), 1)
draw = ImageDraw.Draw(image)
draw.text((0, 0), char, font=font)
arr = np.asarray(image)
arr = np.where(arr, 0, 1)
arr = arr[(arr != 0).any(axis=1)]
one_mask=arr.T==1
num_x_cells=one_mask.shape[0]
num_y_cells=one_mask.shape[1]
## discretized random sampling that follows letter shape
xvals, yvals=np.meshgrid(np.arange(one_mask.shape[0]), np.arange(one_mask.shape[1]))
xvals=xvals.T.astype('float64')
yvals=yvals.T.astype('float64')
xvals-=num_x_cells//2
yvals-=num_y_cells//2
# add some extra noise
xvals+=np.random.normal(size=xvals.shape)
yvals+=np.random.normal(size=yvals.shape)
xvals*=width_per_cell
yvals*=width_per_cell*(-1.0) ## have to flip y
one_coords=np.hstack([xvals[one_mask][:,None], yvals[one_mask][:,None]])
sample_indices=np.random.choice(len(one_coords), num_samples)
samples=one_coords[sample_indices]
samples[:,0]+=center_coords[0]
samples[:,1]+=center_coords[1]
## scale azimuth to make it similar to zenith
if(manifold_type=="s"):
azi_diff=(samples[:,1]-numpy.pi)
samples[:,1]=numpy.pi+azi_diff*2
return samples
## this function generates train and test data
def sample_data(pdf_def, sentence, num_samples=10000):
words=sentence.split(" ")
num_words=len(words)
last_len=len(words[0])
for w in words:
if(len(w)!=last_len):
raise Exception("All words in sentence must be of same length")
## every char takes 2 dimensions
manifold_str=""
len_per_word=0
pdf_dim=0
for pdf in pdf_def.split("+"):
if(int(pdf[1:])%2!=0):
raise Exception("Characters take 2 dimensions, so string is visualized with 2*len(str) dims. Every PDF must have a dimension divisible by 2 for simplicity.")
len_per_word=int(pdf[1:])//2
pdf_dim+=int(pdf[1:])
if("e" in pdf):
manifold_str+=len_per_word*"e"
elif("s" in pdf):
manifold_str+=len_per_word*"s"
word_indices=np.random.choice(num_words, num_samples)
_, class_occurences = np.unique(word_indices, return_counts=True)
labels=torch.randn( (num_samples, pdf_dim)).type(torch.float64)
## loop words
for w_index, w in enumerate(words):
this_w_sample=[]
## loop char per word
for c_index, c in enumerate(w):
center=(0,0)
stretch=0.5
## if sphere, center character at equator
if(manifold_str[c_index]=="s"):
center=(np.pi/2.0, np.pi)
stretch=0.05
res=sample_character(c, num_samples=class_occurences[w_index], width_per_cell=stretch, center_coords=center, manifold_type=manifold_str[c_index])
if(manifold_str[c_index]=="s"):
assert( ((res[:,0]<0) | (res[:,0]>np.pi)).sum()==0)
assert( ((res[:,1]<0) | (res[:,1]>2*np.pi)).sum()==0)
this_w_sample.append(torch.from_numpy(res))
tot_sample=torch.cat(this_w_sample, dim=1)
labels[word_indices==w_index]=tot_sample
onehot_input = torch.nn.functional.one_hot(torch.from_numpy(word_indices), num_words).type(torch.float64)
return onehot_input, labels
#######################################################################
## plot the model during training
def plot_test(test_data, test_labels, model, words, fname="figs/test.png"):
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
num_words=len(torch.unique(test_data, dim=0))
fig=pylab.figure(figsize=((num_words+1)*4, 4))
gridspec=fig.add_gridspec(1, num_words+1)
word_ids=torch.nn.functional.one_hot(torch.arange(num_words), num_words).type(torch.float64)
## 2 * log_pdf differences
pdf_res, base_pdf_res, _=model(test_labels)#, conditional_input=test_data)
dim=test_labels.shape[1]
glob_dim_index=0
bounds=[]
bmin=9999
bmax=-9999
mask=[]
for pdf_str in model.pdf_defs_list:
this_dim=int(pdf_str[1:])
this_type=pdf_str[0]
if(this_type=="e"):
for ind in range(this_dim):
this_min=test_labels.detach().numpy()[:,glob_dim_index].min()
this_max=test_labels.detach().numpy()[:,glob_dim_index].max()
if(this_min<bmin):
bmin=this_min
if(this_max>bmax):
bmax=this_max
glob_dim_index+=1
else:
glob_dim_index+=2
continue
sphere_plot_type="standard"
for pdf_str in model.pdf_defs_list:
this_dim=int(pdf_str[1:])
this_type=pdf_str[0]
if(this_type=="s"):
if(sphere_plot_type=="standard"):
bounds.append([0,np.pi])
bounds.append([0,2*np.pi])
else:
bounds.append([-2,2])
bounds.append([-2,2])
glob_dim_index+=2
else:
for ind in range(this_dim):
bounds.append([bmin,bmax])
logpz_max= scipy.stats.multivariate_normal.logpdf( dim*[0], mean=dim*[0])
twice_pdf_diff=2*(logpz_max - base_pdf_res)
coverage_probs=np.linspace(0.01,0.99,100)
true_twice_llhs=scipy.stats.chi2.ppf(coverage_probs, df=dim)
## plot PDF for individual "word input data"
colors=pylab.cm.tab10.colors
cov_ax=fig.add_subplot(gridspec[0,num_words])
for word_index, wid in enumerate(word_ids):
helper_fns.visualize_pdf(model, fig, gridspec=gridspec[0,word_index], conditional_input=None, total_pdf_eval_pts=2000, nsamples=10000, contour_probs=[], hide_labels=True,bounds=bounds,s2_norm=sphere_plot_type)
## plot coverage
this_coverage=twice_pdf_diff[(wid[word_index]==test_data[:,word_index])]
act_cov=[]
for ind,true_cov in enumerate(coverage_probs):
act_cov.append(sum(this_coverage<true_twice_llhs[ind])/float(len(this_coverage)))
cov_ax.plot(coverage_probs, act_cov, label=r"$p(x|'%s')$" % words[word_index], color=colors[word_index])
cov_ax.plot([0.0,1.0],[0.0,1.0], color="k", lw=2.0, ls="--")
cov_ax.set_xlim(0,1)
cov_ax.set_ylim(0,1)
cov_ax.grid(True)
cov_ax.legend(loc="upper right")
cov_ax.set_title("Coverage")
fig.suptitle("pdf structure: %s" % "+".join(model.pdf_defs_list))
fig.tight_layout()
fig.savefig(fname)
pylab.close(fig)
#test_evals, standard_normal_base_evals, _=model(test_labels, conditional_input=test_data)
############################
if __name__ == "__main__":
parser = argparse.ArgumentParser('train_example')
parser.add_argument("-sentence", type=str, default="JAMMY FLOWS")
parser.add_argument("-pdf_def", type=str, default="e4+s2+e4")
parser.add_argument("-layer_def", type=str, default="gggg+n+gggg")
parser.add_argument("-train_size", type=int, default=200000)
parser.add_argument("-batch_size", type=int, default=20)
parser.add_argument("-test_size", type=int, default=1000)
parser.add_argument("-lr", type=float, default=0.001)
args=parser.parse_args()
seed_everything(1)
assert(args.train_size % args.batch_size==0)
## train data used for training
train_data, train_labels=sample_data(args.pdf_def, args.sentence, num_samples=args.train_size)
## test used to calculate coverage
test_data, test_labels=sample_data(args.pdf_def, args.sentence, num_samples=args.test_size)
extra_flow_defs=dict()
extra_flow_defs["n"]=dict()
extra_flow_defs["n"]["kwargs"]=dict()
extra_flow_defs["n"]["kwargs"]["zenith_type_layers"]="g"
extra_flow_defs["n"]["kwargs"]["use_extra_householder"]=0
word_pdf=jammy_flows.pdf(args.pdf_def, args.layer_def, conditional_input_dim=None, hidden_mlp_dims_sub_pdfs="128",flow_defs_detail=extra_flow_defs, use_custom_low_rank_mlps=False,
custom_mlp_highway_mode=4)
word_pdf.count_parameters(verbose=True)
## initalize params with test sample (only advantage gains for Gaussianization flows)
word_pdf.init_params(data=test_labels)
## start training loop
num_batches=args.train_size//args.batch_size
num_epochs=300
plot_every_n=200
glob_counter=0
cur_lr=args.lr
for ep_id in range(num_epochs):
optimizer = optim.Adam(word_pdf.parameters(), lr=cur_lr)
for batch_id in range(num_batches):
## get new batch
batch_data, batch_labels=train_data[batch_id*args.batch_size:batch_id*args.batch_size+args.batch_size], train_labels[batch_id*args.batch_size:batch_id*args.batch_size+args.batch_size]
## reset accumulated grad
optimizer.zero_grad()
## evaluate PDF
log_pdf, _,_=word_pdf(batch_labels)#, conditional_input=batch_data)
## neg log-loss
loss=-log_pdf.mean()
print("loss ", loss)
## backprop
loss.backward()
## take a gradient step
optimizer.step()
## plot test data
if(glob_counter%plot_every_n==0):
with torch.no_grad():
print("VALIDATION EVAL")
val_log_pdf, _, _=word_pdf(test_labels)#, conditional_input=test_data)
val_loss=-val_log_pdf.mean()
print("ep: %d / batch_id: %d / val-loss %.3f" % (ep_id, batch_id, val_loss))
print("before plotting")
print("----------------------------->")
plot_test(test_data, test_labels, word_pdf, args.sentence.split(" "), fname="./figs/%.6d.png" % glob_counter)
glob_counter+=1
cur_lr*=0.9 | python |
from m5.params import *
from m5.SimObject import SimObject
from Controller import RubyController
class PMMU(RubyController):
type = 'PMMU'
cxx_class = 'PMMU'
cxx_header = "mem/spm/pmmu.hh"
# version = Param.Int("");
page_size_bytes = Param.Int(512,"Size of a SPM page in bytes")
ruby_system = Param.RubySystem(NULL, "")
responseFromSPM = Param.MessageBuffer("");
responseToSPM = Param.MessageBuffer("");
requestFromSPM = Param.MessageBuffer("");
requestToSPM = Param.MessageBuffer("");
responseToNetwork = Param.MessageBuffer("");
requestToNetwork = Param.MessageBuffer("");
governor = Param.BaseGovernor("")
gov_type = Param.String("Local", "Governor type")
spm_s_side = SlavePort("Slave port where SPM pushes requests/responses")
spm_m_side = MasterPort("Master port to send requests/responses to SPM")
# system = Param.System(Parent.any, "System we belong to")
# system = Param.System("System we belong to")
# spm_memory = Param.SPM("")
# cache_memory = Param.BaseCache("")
| python |
import requests
import mimetypes
import hashlib
class Tebi:
def __init__(self, bucket, **kwargs):
self.bucket = "https://" + bucket
self.auth = kwargs.get('auth', None)
if (self.auth):
self.auth = "TB-PLAIN " + self.auth
def GetObject(self, key):
headers = {}
if (self.auth):
headers["Authorization"] = self.auth
response = requests.get(self.bucket+"/"+key, headers=headers)
return response
def PutObject(self, key, obj, **kwargs):
file = kwargs.get('file', None)
mime = kwargs.get('ContentType', None)
auth = kwargs.get('auth', self.auth)
CacheControl = kwargs.get('CacheControl', None)
data = obj
if (mime != None and mime == "auto" and file != None):
mime = mimetypes.guess_type(file)[0]
headers = {}
if (mime != None):
headers["Content-Type"] = mime
if (CacheControl != None):
headers["Cache-Control"] = CacheControl
if (self.auth):
headers["Authorization"] = auth
if (file and not data):
data = open(file, "rb")
headers["Content-MD5"] = hashlib.md5(data).hexdigest()
response = requests.put(self.bucket + +"/"+key, headers=headers)
return response
def ListObjects(self, key, **kwargs):
auth = kwargs.get('auth', self.auth)
headers = {
"Authorization": auth
}
response = requests.get(self.bucket+"/?"+key, headers=headers)
return response
| python |
from __future__ import absolute_import, print_function, unicode_literals
import cwltool.main
import pkg_resources
import signal
import sys
import logging
from cwl_tes.tes import TESWorkflow
from cwl_tes.__init__ import __version__
log = logging.getLogger("tes-backend")
log.setLevel(logging.INFO)
console = logging.StreamHandler()
# formatter = logging.Formatter("[%(asctime)s]\t[%(levelname)s]\t%(message)s")
# console.setFormatter(formatter)
log.addHandler(console)
def versionstring():
pkg = pkg_resources.require("cwltool")
if pkg:
cwltool_ver = pkg[0].version
else:
cwltool_ver = "unknown"
return "%s %s with cwltool %s" % (sys.argv[0], __version__, cwltool_ver)
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = cwltool.main.arg_parser()
parser = add_args(parser)
parsed_args = parser.parse_args(args)
if not len(args) >= 1:
print(versionstring())
print("CWL document required, no input file was provided")
parser.print_usage()
return 1
if parsed_args.version:
print(versionstring())
return 0
if parsed_args.tes is None:
print(versionstring())
parser.print_usage()
print("cwl-tes: error: argument --tes is required")
return 1
if parsed_args.quiet:
log.setLevel(logging.WARN)
if parsed_args.debug:
log.setLevel(logging.DEBUG)
blacklist_false = ["no_container", "disable_pull", "disable_net",
"custom_net", "no_match_user"]
for f in blacklist_false:
if vars(parsed_args).get(f):
log.warning("arg: '%s' has no effect in cwl-tes" % (f))
blacklist_true = ["enable_pull"]
for f in blacklist_true:
if not vars(parsed_args).get(f):
log.warning("arg: '%s' has no effect in cwl-tes" % (f))
# custom
if not parsed_args.rm_container:
log.warning("arg: 'leave_container' has no effect in cwl-tes")
tes_workflow = TESWorkflow(parsed_args.tes, vars(parsed_args))
# setup signal handler
def signal_handler(*args):
log.info(
"recieved control-c signal"
)
log.info(
"terminating thread(s)..."
)
log.warning(
"remote TES processes %s may keep running" %
([t.id for t in tes_workflow.threads])
)
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
return cwltool.main.main(
args=parsed_args,
executor=tes_workflow.executor,
makeTool=tes_workflow.make_tool,
versionfunc=versionstring,
logger_handler=console
)
def add_args(parser):
parser.add_argument(
"--tes",
type=str,
help="GA4GH TES Service URL"
)
return parser
if __name__ == "__main__":
sys.exit(main())
| python |
from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.utils.timezone import now
from rest_framework import generics
from bluebottle.bluebottle_drf2.pagination import BluebottlePagination
from bluebottle.clients import properties
from .models import Page
from .serializers import PageSerializer
class PageList(generics.ListAPIView):
queryset = Page.objects.all()
serializer_class = PageSerializer
pagination_class = BluebottlePagination
def get_queryset(self):
qs = super(PageList, self).get_queryset()
# Set language if supplied
language = self.kwargs.get('language', None)
if language:
qs = qs.filter(language=language)
qs = qs.filter(status=Page.PageStatus.published)
qs = qs.filter(publication_date__lte=now())
qs = qs.filter(Q(publication_end_date__gte=now()) |
Q(publication_end_date__isnull=True))
return qs
class PageDetail(generics.RetrieveAPIView):
queryset = Page.objects.all()
serializer_class = PageSerializer
def get_queryset(self):
qs = super(PageDetail, self).get_queryset()
qs = qs.filter(status=Page.PageStatus.published)
qs = qs.filter(publication_date__lte=now())
qs = qs.filter(Q(publication_end_date__gte=now()) |
Q(publication_end_date__isnull=True))
return qs
def get_object(self, queryset=None):
queryset = self.get_queryset()
try:
return queryset.get(
language=self.kwargs['language'],
slug=self.kwargs['slug']
)
except ObjectDoesNotExist:
try:
return queryset.get(
language=properties.LANGUAGE_CODE,
slug=self.kwargs['slug']
)
except ObjectDoesNotExist:
raise Http404
| python |
from networkx.algorithms import bipartite
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
class BipartiteGraphState(QuantumCircuit):
def __init__(self, bipartite_graph):
super().__init__()
self.graph = bipartite_graph
# Create a quantum register based on the number of nodes
# in W + the number of nodes in B (= total number of nodes in G)
self.white_nodes, self.black_nodes = bipartite.sets(self.graph)
self.qreg = QuantumRegister(len(self.black_nodes) + len(self.white_nodes))
self.creg = ClassicalRegister(len(self.black_nodes) + len(self.white_nodes))
# Create a circuit using the quantum register
self.circuit = QuantumCircuit(self.qreg, self.creg)
# For each vertex in W, apply a Hadamard gate
for vertex in self.white_nodes:
self.circuit.h(vertex)
# For each vertex in B, apply a Hadamard gate
for vertex in self.black_nodes:
self.circuit.h(vertex)
# For each edge e={x,y} apply a controlled-Z gate on its vertices
for x, y in self.graph.edges:
self.circuit.cz(x, y)
self.node_dict = self.build_node_dict()
def build_node_dict(self):
"""
create a node dictionary from node to integer index of a qubit
in a Qiskit circuit
:param self:
"""
self.node_dict = dict()
for count, node in enumerate(self.graph.nodes):
self.node_dict[node] = count
def x_measurement(self, qubit, cbit):
"""Measure 'qubit' in the X-basis, and store the result in 'cbit'"""
self.circuit.h(qubit)
self.circuit.measure(qubit, cbit)
self.circuit.h(qubit)
def x_measure_white(self):
"""
measure the white qubits in the Pauli X-basis
:param self:
"""
self.circuit.barrier()
for vertex in self.black_nodes:
self.circuit.measure(vertex, vertex)
self.circuit.barrier()
for vertex in self.white_nodes:
self.x_measurement(vertex, vertex)
def x_measure_black(self):
"""
measure the black qubits in the Pauli X-basis
:param self:
"""
self.circuit.barrier()
for vertex in self.white_nodes:
self.circuit.measure(vertex, vertex)
self.circuit.barrier()
for vertex in self.black_nodes:
self.x_measurement(vertex, vertex)
def apply_stabilizer(self, node):
"""
applies the stabilizer generator corresponding to node
:param self:
:param node: a node in self.graph
"""
self.circuit.x(self.node_dict[node])
for neighbor in self.graph.neighbors(node):
self.circuit.z(self.node_dict[neighbor])
| python |
# pylint: disable=no-name-in-module
from collections import deque
from typing import Deque
from pydantic import BaseModel
from ..core.constants import Interval
from .timeframe import TimeFrame
class Window(BaseModel):
"""Holds a sequence of timeframes and additional metadata."""
interval: Interval
timeframes: Deque[TimeFrame] = deque()
| python |
#!/usr/bin/env python3
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
# keyboard lib
from pynput.keyboard import Key, Listener, Controller
# capslock status
from capslock_status import status
# pop up time in ms
time = 700
# get capslock status
is_capslock_on = status.get_capslock_status()
# show caps-lock on pop up
# for given time
# then hide the window
def show_on():
# build interfaces
builder = Gtk.Builder()
builder.add_from_file("interfaces/on.glade")
window = builder.get_object("capslock-on")
return window
# show caps-lock off pop up
# for given time
# then hide the win
def show_off():
# build interfaces
builder = Gtk.Builder()
builder.add_from_file("interfaces/off.glade")
window = builder.get_object("capslock-off")
return window
# listen keyboard
keyboard = Controller()
# custom exception
class MyException(Exception):
pass
def on_press(key):
# define gloabal variable for pynput
global is_capslock_on
# exit keyboard listener
window = Gtk.Window()
if key == Key.esc:
raise MyException(key)
if key == Key.caps_lock:
if not is_capslock_on:
window = show_on()
is_capslock_on = True
else:
window = show_off()
is_capslock_on = False
# show window and kill
window.show_all()
GLib.timeout_add(time, window.hide);
# connect destroy event
window.connect("destroy", Gtk.main_quit)
# quit window after 1 ms
GLib.timeout_add(time, Gtk.main_quit)
Gtk.main()
# create keyboard listener
with Listener(on_press=on_press) as listener:
listener.join()
| python |
from mix import save_color_image, brightness_limitization
import os
import shutil
from argparse import ArgumentParser
import json
from utils import change_datatype
from utils import timestamp_to_datetime
from utils import Bands
def parse_arguments():
parser = ArgumentParser(description='Create colored images and collect'
'into folder.',
epilog='python color_images.py ./downloads')
parser.add_argument('directory', help='directory for images.')
parser.add_argument('-c', '--collect', help='directory to collect images.',
default=None)
parser.add_argument('--collect-only', help="collect only",
action='store_true')
parser.add_argument('-b', '--bright-limit', type=int,
help='Supremum of chanel brightness.',
default=3500)
return parser.parse_args()
def color_images(directory, bright_limit=3500):
"""
Search tail folder in <directory> and create colored image
:param directory: str, directory, where to look
:param bright_limit: int, Supremum of chanel brightness.
"""
for root, dirs, files in os.walk(directory):
if len(dirs) == 0:
try:
product_dir = os.path.split(os.path.normpath(root))[0]
# open information about product
info = json.load(open(os.path.join(product_dir,
'info.json'), 'r'))
sentinel = info['Satellite']
if sentinel == 'Sentinel-2':
print('Coloring ' + root + '...')
save_color_image(root, Bands.RED, Bands.GREEN, Bands.BLUE,
'TCI1', bright_limit)
elif sentinel == 'Sentinel-1':
print('Changing DType to uint8 ' + root + '...')
for file in files:
if 'uint8' in file:
continue
new_file = os.path.splitext(file)[0] + '_uint8' + \
os.path.splitext(file)[1]
change_datatype(os.path.join(root, file),
os.path.join(root, new_file),
processor=lambda
x: brightness_limitization(x, 255))
print('\tuint8 file: ' + new_file)
else:
print('Unknown satellite')
except Exception as e:
print('Error: ' + 'Path: ' + root + '\n' + str(e))
def collect_images(search_directory, target='./colored'):
"""
Search colored images in <search_directory> and copy them
into target directory
:param search_directory: str, directory to search imaegs
:param target: str, directory to copy images
"""
for root, dirs, files in os.walk(search_directory):
for file in files:
if 'TCI1' in file or 'uint8' in file:
file_hint = ' '.join([os.path.splitext(file)[0]] +
os.path.normpath(root).split(os.sep)[-2:])
product_dir = os.path.split(os.path.normpath(root))[0]
# open information about product
info = json.load(open(os.path.join(product_dir,
'info.json'), 'r'))
sensing_start = timestamp_to_datetime(info['Sensing start'])
new_file = info['Satellite'] + \
' {:%Y-%m-%d %H:%M} '.format(sensing_start) + \
file_hint + '.tiff'
shutil.copy(os.path.join(root, file),
os.path.join(target, new_file))
if __name__ == '__main__':
args = parse_arguments()
if args.collect_only is False:
print('Coloring images in ' + args.directory)
color_images(args.directory, args.bright_limit)
if args.collect is not None:
print('Collecting files into ' + args.collect)
if os.path.isdir(args.collect) is False:
os.mkdir(args.collect)
collect_images(args.directory, args.collect)
| python |
def deleteWhitespaces(inputStr):
nonWhitespaces = inputStr.split(' ')
return ''.join(nonWhitespaces)
| python |
"""Graph implementation using adjacency lists."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Dict, Set, Optional, Union, Tuple
from collections.abc import Iterable
@dataclass
class Node:
"""This class can be used standalone or with a Graph
(if fast access to the list of all nodes is required)
"""
value: Any
# Maps edge to weight
adjacent: Dict[Node, int] = field(default_factory=dict)
def edge(self, other: Node, weight: int = 1, rev_weight: Optional[int] = None):
"""Don't forget to call Graph.add_node() if you are using a Graph class."""
self.adjacent[other] = weight
other.adjacent[self] = weight if rev_weight is None else rev_weight
def __hash__(self) -> int:
"""Every node is unique, we cannot have node equality."""
return id(self)
@dataclass
class Graph:
nodes: Set[Node] = field(default_factory=set)
@staticmethod
def _normalize_node(node: Any) -> Node:
if isinstance(node, Node):
return node
return Node(node)
def add_node(self, node: Any, adjacent: Iterable[Node] = ()) -> Node:
node = self._normalize_node(node)
self.nodes.add(node)
for adj_node in adjacent:
node.edge(adj_node)
return node
def add_node_weights(
self,
node: Any,
adjacent: Dict[Node, Union[int, Tuple[int, int]]] = (),
) -> Node:
node = self._normalize_node(node)
self.nodes.add(node)
for adj_node, weight in adjacent.items():
if isinstance(weight, tuple):
node.edge(adj_node, *weight)
else:
node.edge(adj_node, weight)
return node
| python |
# encoding = utf-8
"""Wrapper for API calls to ExtraHop."""
# COPYRIGHT 2020 BY EXTRAHOP NETWORKS, INC.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
# This file is part of an ExtraHop Supported Integration. Make NO MODIFICATIONS below this line
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class ExtraHopClient(object):
"""
ExtraHopClient is a simple wrapper around Requests.Session to save authentication and
connection data.
"""
def __init__(self, host, api_key, verify_certs=False):
self.host = host
self.session = requests.Session()
self.session.headers = {
"Accept": "application/json",
"Authorization": f"ExtraHop apikey={api_key}",
}
self.session.verify = verify_certs
def get(self, path):
"""Send GET request to ExtraHop API."""
return self._api_request("get", path)
def post(self, path, data=None, json=None):
"""Send POST request to ExtraHop API."""
return self._api_request("post", path, data, json)
def patch(self, path, data=None, json=None):
return self._api_request("patch", path, data, json)
def delete(self, path):
return self._api_request("delete", path)
def _api_request(self, method, path, data=None, json=None):
"""Handle API requests to ExtraHop API."""
url = f"https://{self.host}/api/v1/{path}"
if method == "get":
rsp = self.session.get(url)
elif method == "post":
rsp = self.session.post(url, data=data, json=json)
elif method == "patch":
rsp = self.session.patch(url, data=data, json=json)
elif method == "delete":
rsp = self.session.delete(url)
else:
raise ValueError("Unsupported HTTP method {}".format(method))
rsp.raise_for_status()
return rsp
| python |
from distutils.core import setup
DESCRIPTION = ('Python interface to the Refinitiv Datastream (former Thomson '
'Reuters Datastream) API via Datastream Web Services (DSWS)')
# Long description to be published in PyPi
LONG_DESCRIPTION = """
**PyDatastream** is a Python interface to the Refinitiv Datastream (former Thomson
Reuters Datastream) API via Datastream Web Services (DSWS) (non free),
with some convenience functions. This package requires valid credentials for this
API.
For the documentation please refer to README.md inside the package or on the
GitHub (https://github.com/vfilimonov/pydatastream/blob/master/README.md).
"""
_URL = 'http://github.com/vfilimonov/pydatastream'
__version__ = __author__ = __email__ = None # will be extracted from _version.py
exec(open('pydatastream/_version.py').read()) # defines __version__ pylint: disable=W0122
setup(name='PyDatastream',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=_URL,
download_url=_URL + '/archive/v' + __version__ + '.zip',
author=__author__,
author_email=__email__,
license='MIT License',
packages=['pydatastream'],
install_requires=['requests'],
extras_require={
'pandas': ['pandas'],
},
classifiers=['Programming Language :: Python :: 3'],
)
| python |
from django.conf import settings
from django.contrib import admin
from django.template.response import TemplateResponse
from django.urls import path, resolve, reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.views.generic import View
from constance import config
class AdminBaseContextMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(title=self._admin_title, **kwargs)
context.update(admin.site.each_context(self.request))
return context
class CrazyArmsAdminSite(admin.AdminSite):
AdminBaseContextMixin = AdminBaseContextMixin
index_title = ""
empty_value_display = mark_safe("<em>none</em>")
site_url = None
nginx_proxy_views = (("View server logs", "/logs/", "common.view_logs"),)
if settings.ZOOM_ENABLED:
nginx_proxy_views += (("Administer Zoom over VNC", "/zoom/vnc/", "common.view_websockify"),)
if settings.HARBOR_TELNET_WEB_ENABLED:
nginx_proxy_views += (
(
"Liquidsoap harbor telnet (experimental)",
"/telnet/",
"common.view_telnet",
),
)
@property
def site_title(self):
return format_html("{} — Station Admin", config.STATION_NAME)
site_header = site_title
def __init__(self, *args, **kwargs):
self.extra_urls = []
super().__init__(*args, **kwargs)
def app_index_extra(self, request):
return TemplateResponse(
request,
self.index_template or "admin/app_index_extra.html",
{
**self.each_context(request),
"title": "Miscellaneous Configuration administration",
"app_list": False,
},
)
def app_index(self, request, app_label, extra_context=None):
return super().app_index(
request,
app_label,
extra_context={**(extra_context or {}), "extra_urls": []},
)
def each_context(self, request):
context = super().each_context(request)
current_url_name = resolve(request.path_info).url_name
is_extra_url = False
extra_urls = []
# Registered views
for title, pattern, permission in self.extra_urls:
if permission is None or request.user.has_perm(permission):
extra_urls.append((title, reverse(f"admin:{pattern.name}"), False))
if current_url_name == pattern.name:
is_extra_url = True
for title, url, permission in self.nginx_proxy_views:
if request.user.has_perm(permission):
extra_urls.append((title, url, True))
context.update(
{
"current_url_name": current_url_name,
"extra_urls": sorted(extra_urls),
"is_extra_url": is_extra_url,
}
)
return context
def register_view(self, route, title, kwargs=None, name=None):
if name is None:
name = route.replace("/", "").replace("-", "_")
def register(cls_or_func):
cls_or_func._admin_title = title
view = self.admin_view(cls_or_func.as_view() if issubclass(cls_or_func, View) else cls_or_func)
pattern = path(
route=f"settings/{route}",
view=self.admin_view(view),
kwargs=kwargs,
name=name,
)
permission = getattr(cls_or_func, "permission_required", None)
self.extra_urls.append((title, pattern, permission))
return cls_or_func
return register
def get_urls(self):
return (
[
path(
"settings/",
view=self.admin_view(self.app_index_extra),
name="app_index_extra",
)
]
+ [pattern for _, pattern, _ in self.extra_urls]
+ super().get_urls()
)
| python |
from robo_navegador import *
from dados_ritmistas import ler_dados
from alterar_docs import *
nomes = ('Matheus Delaqua Rocha De Jesus',
'Cecília')
if __name__ == '__main__':
renomear(nome_atual_pasta='Credenciamento TABU (File responses)')
mover(path=('Arquivo do Documento (File responses)', 'Comprovante de Matrícula (File responses)'))
site = Navegador()
site.logar('[email protected]', 'asequith')
lista = ler_dados()
for pessoa in lista:
if not (pessoa.arquivo_doc or pessoa.comprovante) == 'Arquivo não encontrado\n':
if pessoa.nome not in nomes:
site.cadastrar_ritmista(pessoa)
sleep(5)
else:
print(f'\033[1;7;30mPulando {pessoa.nome}...\033[m')
print(f'\033[1;7;30mPrograma finalizado, {site.contador} ritmistas cadastrados\033[m')
| python |
import argparse
from pathlib import Path
from event_types import event_types
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Train event classes models.'
'Results are saved in the models directory.'
)
)
args = parser.parse_args()
n_types = 3
start_from_DL2 = False
if start_from_DL2:
# Prod3b
# dl2_file_name = (
# '/lustre/fs21/group/cta/users/maierg/analysis/AnalysisData/uploadDL2/'
# 'Paranal_20deg/gamma_onSource.S.3HB9-FD_ID0.eff-0.root'
# )
# Prod5
dl2_file_name = (
'/lustre/fs22/group/cta/users/maierg/analysis/AnalysisData/'
'prod5-Paranal-20deg-sq08-LL/EffectiveAreas/'
'EffectiveArea-50h-ID0-NIM2LST2MST2SST2SCMST2-g20210921-V3/BDT.DL2.50h-V3.g20210921/'
'gamma_onSource.S.BL-4LSTs25MSTs70SSTs-MSTF_ID0.eff-0.root'
)
dtf = event_types.extract_df_from_dl2(dl2_file_name)
else:
dtf = event_types.load_dtf()
dtf_e = event_types.bin_data_in_energy(dtf)
labels, train_features = event_types.nominal_labels_train_features()
dtf_e = event_types.add_event_types_column(dtf_e, labels)
dtf_e_train, dtf_e_test = event_types.split_data_train_test(dtf_e)
all_models = event_types.define_classifiers()
selected_models = [
'MLP_classifier',
# 'MLP_relu_classifier',
# 'MLP_logistic_classifier',
# 'MLP_uniform_classifier',
# 'BDT_classifier',
# 'random_forest_classifier',
# 'ridge_classifier',
# # 'ridgeCV_classifier', # unnecessary, same as the ridge classifier
# 'SVC_classifier', # Fails to evaluate for some reason, all SVC based fail
# 'SGD_classifier',
# 'Gaussian_process_classifier', # Takes forever to train
# 'bagging_svc_classifier', # Fails to evaluate for some reason, all SVC based fail
# 'bagging_dt_classifier',
# 'oneVsRest_classifier', # Fails to evaluate for some reason
# 'gradient_boosting_classifier',
]
models_to_train = dict()
for this_model in selected_models:
this_model_name = '{}_ntypes_{:d}'.format(this_model, n_types)
models_to_train[this_model_name] = dict()
models_to_train[this_model_name]['train_features'] = train_features
models_to_train[this_model_name]['labels'] = 'event_type_{:d}'.format(n_types)
models_to_train[this_model_name]['model'] = all_models[this_model]
models_to_train[this_model_name]['test_data_suffix'] = 'classification'
trained_models = event_types.train_models(
dtf_e_train,
models_to_train
)
event_types.save_models(trained_models)
event_types.save_test_dtf(dtf_e_test, 'classification')
| python |
#-*- coding: utf-8 -*-
#!/usr/bin/python3
"""
Copyright (c) 2020 LG Electronics Inc.
SPDX-License-Identifier: MIT
"""
import argparse
import copy
import logging
import os
import sys
import textwrap
from .tool_wrapper import get_tool_list, get_tool_wrapper, load_tools
from .context import WrapperContext
from .report import Report
from texttable import Texttable
LOGGER = logging.getLogger('SAGE')
def run_tools(ctx):
for toolname in get_tool_list():
option = ctx.get_tool(toolname)
if option is not None:
wrapper = get_tool_wrapper(toolname)(toolname, option)
if wrapper.get_tool_path(ctx) is None:
LOGGER.warning("* %s is not installed!!!", toolname)
continue
LOGGER.info("* %s is running...", toolname)
wrapper.run(ctx)
run_tools.__annotations__ = {'ctx': WrapperContext}
def generate_report(ctx, args_dict):
report = Report(ctx, args_dict)
table = Texttable(max_width=0)
table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)
table.add_rows(report.get_summary_table())
print(table.draw())
if ctx.output_path:
report.write_to_file(os.path.join(ctx.output_path, "sage_report.json"))
generate_report.__annotations__ = {'ctx': WrapperContext, 'args_dict': dict}
def main():
parser = argparse.ArgumentParser(
description="Static Analysis Group Execution",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--source-path", help="source path")
parser.add_argument("--build-path", help="build path")
parser.add_argument(
"--tool-path", help="if this option is specified, only tools in this path is executed")
parser.add_argument("--output-path", help="output path")
parser.add_argument("--exclude-path", help="exclude path")
parser.add_argument("--target-triple", help="compile target triple")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
parser.add_argument(
"tools", nargs="*", help=textwrap.dedent("""\
List of tools.
Tool-specific command-line options separated by colons can be added after the tool name.
ex) 'cppcheck:--library=googletest'"""),
default=["cppcheck", "cpplint", "duplo", "metrix++"])
args = parser.parse_args()
args_dict = copy.deepcopy(vars(args))
default_exclude_path = " .git"
if args.exclude_path:
args.exclude_path += default_exclude_path
else:
args.exclude_path = default_exclude_path
log_level = logging.DEBUG if args.verbose else logging.WARNING
logging.basicConfig(stream=sys.stdout, level=log_level)
# load wrapper
LOGGER.info("load wrapper")
load_tools()
# make WrapperContext
ctx = WrapperContext(
args.tools, args.source_path, args.build_path, args.tool_path,
args.output_path, args.target_triple, args.exclude_path)
if not ctx.proj_file_exists():
LOGGER.error("There is no 'compile_commands.json'")
LOGGER.info("run tools")
run_tools(ctx)
# generate report
LOGGER.info("reporting")
generate_report(ctx, args_dict)
if __name__ == "__main__":
main()
| python |
from sqlalchemy import (
create_engine as create_engine,
MetaData, Table,
Column, Integer, Sequence,
String, ForeignKey, DateTime,
select, delete, insert, update, func
)
from sqlalchemy.sql import and_
from tornado import concurrent, ioloop
import datetime
import tornado
import sqlite3
#from concurrent.futures import ThreadPoolExecutor
metadata = MetaData()
tables = {
'servers': Table('servers', metadata,
Column('id', Integer(), Sequence('servers_id_seq'), primary_key=True, index=True),
Column('name', String(20), nullable=False, unique=True, index=True),
Column('address', String(16), nullable=False),
Column('port', String(10), nullable=False)),
'servers_logs': Table('servers_logs', metadata,
Column('id', Integer(), Sequence('servers_logs_id_seq'), primary_key=True, index=True),
Column('server_id', Integer(), nullable=False, index=True),
Column('time', DateTime, nullable=False),
Column('text', String(1024), nullable=False)),
'users': Table('users', metadata,
Column('id', Integer(), Sequence('users_id_seq'), primary_key=True, index=True)),
'servers_events': Table('servers_events', metadata,
Column('id', Integer(), Sequence('servers_events_seq'), primary_key=True, index=True),
Column('user_id', Integer(), nullable=False, index=True),
Column('server_id', Integer(), nullable=False, index=True),
Column('text', String(1024), nullable=False)),
'events_occured': Table('events_occured', metadata,
Column('event_id', Integer(), index=True),
Column('log_id', Integer(), index=True))
}
class DBHandler():
#executor = ThreadPoolExecutor(max_workers=4)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.io_loop = ioloop.IOLoop.current()
self.engine = create_engine('sqlite:///database.db')
self.conn = self.engine.connect()
def shutdb(self):
self.conn.close();
self.io_loop = None
self.engine = None
self.conn = None
#sqlite object cant be used in different threads, so i disabled this feature
#temporarily.
#@concurrent.run_on_executor
def execute(self, query, *args):
return self.conn.execute(query)
def init_db():
'''
Fill db with initial environment.
'''
#engine = create_engine('postgresql://idfumg:qwerty@localhost/logmonitor_db')
engine = create_engine('sqlite:///database.db')
metadata.create_all(engine)
conn = engine.connect()
transaction = conn.begin()
conn.execute(delete(tables['servers_logs']))
conn.execute(delete(tables['servers']))
conn.execute(delete(tables['servers_events']))
conn.execute(delete(tables['users']))
conn.execute(delete(tables['events_occured']))
now = datetime.datetime.now()
servers = [
{'name': 'ГРТ', 'address': '192.168.1.1', 'port': '67890'},
{'name': 'ГРС', 'address': '192.168.1.2', 'port': '54321'},
{'name': 'TST', 'address': '192.168.1.3', 'port': '12345'}
]
conn.execute(insert(tables['servers']), servers)
servers_logs = []
for i in range(1000):
servers_logs.append({'server_id': 1, 'time': now, 'text': 'HTTPSRV МОВАПУ Warning! Unexpected behaviour! ' + str(i)})
for i in range(500):
servers_logs.append({'server_id': 1, 'time': now, 'text': 'search test ' + str(i)})
# for i in range(500):
# servers_logs.append({'name': 'ГРТ', 'time': now - datetime.timedelta(days=i), 'text': 'search test ' + str(i)})
grs_servers_logs = []
for i in range(10):
grs_servers_logs.append({'server_id': 2, 'time': now + datetime.timedelta(days=1), 'text': 'HTTPSRV МОВАПУ Warning! my own unexpected error! ' + str(i)})
events = [
{'user_id': 1, 'text': 'unexpected', 'server_id': 1},
{'user_id': 1, 'text': 'httpsrv', 'server_id': 1},
{'user_id': 1, 'text': 'error', 'server_id': 2},
]
conn.execute(insert(tables['servers_logs']), servers_logs)
conn.execute(insert(tables['servers_logs']), grs_servers_logs)
conn.execute(insert(tables['servers_events']), events)
print('database filled')
cursor = conn.execute(select([tables['servers']]))
servers = [server[1] for server in cursor]
transaction.commit()
conn.close()
return servers
| python |
inp = open("input/day6.txt", "r")
prvotne_ribe = [int(x) for x in inp.readline().split(",")]
inp.close()
prvotna_populacija = [0 for _ in range(9)]
for riba in prvotne_ribe:
prvotna_populacija[riba] += 1
def zivljenje(N):
populacija = prvotna_populacija
for _ in range(N):
nova_populacija = [0 for _ in range(9)]
for k in range(9):
if k == 0:
nova_populacija[8] += populacija[k]
nova_populacija[6] += populacija[k]
else:
nova_populacija[k-1] += populacija[k]
populacija = nova_populacija
return sum(populacija)
# --------------------------
print("1. del: ")
print(zivljenje(80))
print("2. del: ")
print(zivljenje(256))
| python |
import sys
import pandas as pd
import matplotlib.pyplot as plt
def main():
dfpath = 'nr_dataframes/final.pkl'
df = pd.read_pickle(dfpath)
df.hist(column='length', bins=100)
df = df[df[show] > 400]
plt.show()
if __name__=="__main__":
show = sys.argv[1]
main()
| python |
from selenium import webdriver
import datetime
from . import helper
class NewVisitorTest(helper.FunctionalTestBase):
def setUp(self):
self.browser = webdriver.Firefox()
self.data = {
"dhuha": "4",
"tilawah_from": "1",
"tilawah_to": "20",
"ql": "5",
"shaum": "Iya",
"date": datetime.datetime.now().strftime("%Y-%m-%d")
}
def tearDown(self):
self.delete_item_by_date(self.data["date"])
self.logout()
self.browser.quit()
#region helper methods
def assert_data_saved_correctly(self):
dhuha_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Dhuha']/td[2]")
self.assertIn(self.data["dhuha"], dhuha_display.text)
ql_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Qiyamul Lail']/td[2]")
self.assertIn(self.data["ql"], ql_display.text)
shaum_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Shaum']/td[2]")
self.assertIn(self.data["shaum"], shaum_display.text)
tilawah_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Tilawah']/td[2]")
self.assertIn(self.data["tilawah_from"], tilawah_display.text)
self.assertIn(self.data["tilawah_to"], tilawah_display.text)
#endregion
def login_entrydata_searchreport_logout(self):
# Brian mendapat informasi dari grup WA ttg aplikasi mutaba'ah harian online
# Dia mencoba mengakses halaman depan (home) aplikasi tersebut
self.browser.get("http://localhost:8000")
self.try_logout()
# Brian melihat tidak ada menu apa2 kecuali link untuk login
self.assertEquals(len(self.browser.find_elements_by_id("user-email")), 0)
self.assertEquals(len(self.browser.find_elements_by_id("logout")), 0)
self.assertEquals(len(self.browser.find_elements_by_id("menu-entry")), 0)
self.assertEquals(len(self.browser.find_elements_by_id("menu-report")), 0)
self.login()
# Setelah login, Brian melihat ada menu ke halaman 'Entry' dan 'Report'
self.assertEquals(len(self.browser.find_elements_by_id("menu-entry")), 1)
self.assertEquals(len(self.browser.find_elements_by_id("menu-report")), 1)
# Brian membuka halaman 'Report' untuk memastikan tidak ada data apa2
# karena ini adalah pertama kalinya ia mengakses aplikasi mutaba'ah ini
self.navigate_to_report()
report_items = self.find_report_items_by_date()
self.assertEquals(len(report_items), 0)
# Brian kemudian membuka halaman 'Entry',
# dan mengisikan data mutaba'ah untuk tgl hari ini
self.navigate_to_entry()
self.create_or_edit_data(self.data)
# Setelah disubmit, Brian melihat halaman konfirmasi menunjukkan data
# sesuai dg yg sudah diisi sebelumnya
self.assert_data_saved_correctly()
# error: AssertionError: u"4 raka'at" != '4'
# Brian beralih ke halaman 'Report' utk memastikan data yg baru saja
# disubmit, muncul di halaman 'Report'
self.navigate_to_report()
report_items = self.find_report_items_by_date(self.data["date"])
self.assertEquals(len(report_items), 1)
report_item = report_items[0]
# Brian menyadari ada inputan yg salah
# Brian kemudian mengupdate data Dhuha dg angka yang benar
self.data["dhuha"] = "6"
report_item.click()
btn_edit = self.browser.find_element_by_id("edit")
btn_edit.click()
self.browser.switch_to.window(self.browser.window_handles[1])
self.create_or_edit_data(self.data)
# Setelah disubmit, Brian melihat halaman konfirmasi menunjukkan data
# sesuai update terakhir
# kemudian Brian menutup halaman konfirmasi tsb
self.assert_data_saved_correctly()
self.browser.close()
self.browser.switch_to.window(self.browser.window_handles[0])
| python |
# Generated by Django 2.0.6 on 2018-06-14 08:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0007_auto_20180613_2156'),
('voting', '0005_auto_20180613_2201'),
]
operations = [
migrations.CreateModel(
name='UserTaggingCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_date', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('tag_course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_tags', to='course.Course', verbose_name='Tagging course')),
('tagger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Course Tagger')),
('tags', models.ManyToManyField(to='voting.Tags', verbose_name="User's tag(s) for this course")),
],
options={
'verbose_name_plural': 'User Reviews',
'verbose_name': 'User Review',
},
),
]
| python |
# Generated by Django 3.2.9 on 2021-11-24 15:56
from django.db import migrations
EVENT_TYPES = (
(1, "CREATED", "Created the resourcing request"),
(2, "UPDATED", "Updated the resourcing request"),
(3, "SENT_FOR_APPROVAL", "Sent the resourcing request for approval"),
(4, "AMENDING", "Amending the resourcing request"),
(5, "SENT_FOR_REVIEW", "Sent the amendments for review"),
(6, "REVIEWED_AMENDMENTS", "Reviewed the amendments"),
(7, "GROUP_APPROVED", "A group approved the resourcing request"),
(8, "GROUP_REJECTED", "A group rejected the resourcing request"),
(9, "COMMENTED", "Somebody commented on the resourcing request"),
(10, "APPROVED", "The resourcing request was approved"),
)
def insert_event_types(apps, schema_editor):
EventType = apps.get_model("event_log", "EventType")
for pk, code, name in EVENT_TYPES:
EventType.objects.create(pk=pk, code=code, name=name)
def delete_event_types(apps, schema_editor):
EventType = apps.get_model("event_log", "EventType")
EventType.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
("main", "0027_auto_20211123_1605"),
("event_log", "0001_initial"),
]
operations = [migrations.RunPython(insert_event_types, delete_event_types)]
| python |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class RemoteProfile(models.Model):
host = models.URLField(max_length=200)
api_key = models.CharField(max_length=128)
def __str__(self):
return self.host
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField()
api_key = models.CharField(max_length=128, unique=True)
remote_profiles = models.ManyToManyField(RemoteProfile)
def __str__(self):
return self.user.__str__()
class Tag(models.Model):
name = models.CharField(max_length=128, blank=False, unique=True)
def __str__(self):
return self.name
class Post(models.Model):
slug = models.SlugField(max_length=200, unique=True)
title = models.CharField(max_length=256)
content = RichTextUploadingField(blank=True)
password = models.CharField(max_length=64, blank=True)
image = models.ImageField(upload_to='img/', default=None)
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, blank=True)
published = models.BooleanField(default=False)
def __str__(self):
return self.title
class Page(models.Model):
slug = models.SlugField(max_length=200, unique=True)
order = models.IntegerField(default=0)
link_title = models.CharField(max_length=32)
content = RichTextUploadingField(blank=True)
published = models.BooleanField(default=False)
LOCATION_CHOICES = [
('NAV', 'Navbar'),
('SIDE', 'Sidebar'),
('FOOT', 'Footer'),
]
location = models.CharField(max_length=4, choices=LOCATION_CHOICES, default='NAV')
def __str__(self):
return self.link_title
| python |
#!/usr/bin/env python
# $Id: mailtrim.py,v 1.1 2002/05/31 04:57:44 msoulier Exp $
"""The purpose of this script is to trim a standard Unix mbox file. If the
main function is called, it expects two parameters in argv. The first is the
number of most recent messages to keep. The second is the path to the mbox
file."""
import sys, string, os
from tempfile import mktemp
from shutil import copyfile
error = sys.stderr.write
def count_messages(file):
"""The purpose of this function is to count the messages in the mailbox,
rewind the mailbox seek pointer, and then return the number of messages in
the mailbox file."""
count = 0
while 1:
line = file.readline()
if not line: break
if line[:5] == "From ":
count = count + 1
file.seek(0)
return count
def trim(file, keep):
"""This purpose of this function is to perform the actual trimming of the
mailbox file."""
count = count_messages(file)
print "\nThere are %d messages in the mailbox file." % count
if count <= keep:
print "\nThis file already contains less than the desired number of"
print "messages. Nothing to do."
return
remove = count - keep
print "\nNeed to remove %d messages..." % remove
tempfilename = mktemp()
tempfile = open(tempfilename, "w")
copying = 0
while 1:
line = file.readline()
if not line: break
if line[:5] == "From ":
if remove:
remove = remove - 1
continue
else:
copying = 1
if not copying:
continue
tempfile.write(line)
tempfile.close()
copyfile(tempfilename, file.name)
os.unlink(tempfilename)
def main():
"""This function expects sys.argv to be set appropriately with the
required options, mentioned in the module's docstring. It is the entry
point for the rest of the program."""
if len(sys.argv) != 3:
error("Usage: %s <number to keep> <mbox file>\n" % sys.argv[0])
sys.exit(1)
keep = string.atoi(sys.argv[1])
filename = sys.argv[2]
if not os.path.exists(filename):
error("ERROR: File %s does not exist\n" % filename)
sys.exit(1)
print "Trimming %s to %d messages..." % (filename, keep)
file = open(filename, "r")
trim(file, keep)
file.close()
print "\nDone trimming %s." % filename
if __name__ == '__main__': main()
| python |
# 'hello_module.py'
def helloworld():
print ("Hello World!")
def goodbye():
print ("Good Bye Dear!")
| python |
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from .views import OrderView, PayNotifyView, OrderQueryView
urlpatterns = [
url(r"^order/$", OrderView.as_view(), name="order"),
url(r"^notify/$", csrf_exempt(PayNotifyView.as_view()), name="notify"),
url(r"^orderquery/$", OrderQueryView.as_view(), name="orderquery"),
]
| python |
import flickr_api
import win32api, win32con, win32gui
username = 'NASA Goddard Photo and Video'
flickr_api.set_keys(api_key='73ec08be7826d8b0a608151ce5faaf9d', api_secret='fbb2fcd772ce44a6')
user = flickr_api.Person.findByUserName(username)
photos = user.getPublicPhotos()
print photos[0]
photos[0].save(photos[0].title+".jpg")
def setWallpaper(path):
key = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER,"Control Panel\\Desktop",0,win32con.KEY_SET_VALUE)
win32api.RegSetValueEx(key, "WallpaperStyle", 0, win32con.REG_SZ, "0")
win32api.RegSetValueEx(key, "TileWallpaper", 0, win32con.REG_SZ, "0")
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, path, 1+2)
if __name__== "__main__":
path = r'C:\Users\djs04_000\documents\visual studio 2013\Projects\WallSpace\WallSpace\Hubble Observes One-of-a-Kind Star Nicknamed ?Nasty?.jpg'
setWallpaper(path) | python |
import math
from error import Error
from dataclasses import dataclass
class Value:
def add(self, other):
self.illegal_operation()
def subtract(self, other):
self.illegal_operation()
def multiply(self, other):
self.illegal_operation()
def divide(self, other):
self.illegal_operation()
def mod(self, other):
self.illegal_operation()
def eq(self, other):
self.illegal_operation()
def ne(self, other):
self.illegal_operation()
def lt(self, other):
self.illegal_operation()
def gt(self, other):
self.illegal_operation()
def le(self, other):
self.illegal_operation()
def ge(self, other):
self.illegal_operation()
def and_(self, other):
self.illegal_operation()
def or_(self, other):
self.illegal_operation()
def xor(self, other):
self.illegal_operation()
def plus(self):
self.illegal_operation()
def minus(self):
self.illegal_operation()
def not_(self):
self.illegal_operation()
def invert(self):
self.illegal_operation()
def pound(self):
self.illegal_operation()
def illegal_operation(self):
raise Error('Illegal operation')
def __repr__(self):
return f'{self.value}'
@dataclass
class Number(Value):
value: float
def add(self, other):
if isinstance(other, Number):
return Number(self.value + other.value)
else:
self.illegal_operation()
def subtract(self, other):
if isinstance(other, Number):
return Number(self.value - other.value)
else:
self.illegal_operation()
def multiply(self, other):
if isinstance(other, Number):
return Number(self.value * other.value)
else:
self.illegal_operation()
def divide(self, other):
if isinstance(other, Number):
return Number(self.value / other.value)
else:
self.illegal_operation()
def mod(self, other):
if isinstance(other, Number):
return Number(self.value % other.value)
else:
self.illegal_operation()
def eq(self, other):
if isinstance(other, Number):
return Number(float(self.value == other.value))
else:
return Number(0.0)
def ne(self, other):
if isinstance(other, Number):
return Number(float(self.value != other.value))
else:
return Number(1.0)
def lt(self, other):
if isinstance(other, Number):
return Number(float(self.value < other.value))
else:
return self.illegal_operation()
def gt(self, other):
if isinstance(other, Number):
return Number(float(self.value > other.value))
else:
return self.illegal_operation()
def le(self, other):
if isinstance(other, Number):
return Number(float(self.value <= other.value))
else:
return self.illegal_operation()
def ge(self, other):
if isinstance(other, Number):
return Number(float(self.value >= other.value))
else:
return self.illegal_operation()
def and_(self, other):
if isinstance(other, Number):
return Number(float(bool(self.value) and bool(other.value)))
else:
return self.illegal_operation()
def or_(self, other):
if isinstance(other, Number):
return Number(float(bool(self.value) or bool(other.value)))
else:
return self.illegal_operation()
def xor(self, other):
if isinstance(other, Number):
return Number(float(bool(self.value) != bool(other.value)))
else:
return self.illegal_operation()
def plus(self):
return Number(+self.value)
def minus(self):
return Number(-self.value)
def not_(self):
return Number(float(not bool(self.value)))
def invert(self):
return Number(float(~math.floor(self.value)))
def __repr__(self):
return f'{self.value}'
@dataclass
class String(Value):
value: str
def add(self, other):
if isinstance(other, String):
return String(self.value + other.value)
else:
self.illegal_operation()
def eq(self, other):
if isinstance(other, String):
return Number(float(self.value == other.value))
else:
return Number(0.0)
def ne(self, other):
if isinstance(other, String):
return Number(float(self.value != other.value))
else:
return Number(1.0)
def pound(self):
return Number(float(len(self.value)))
def __repr__(self):
return f'{self.value}'
@dataclass
class At(Value):
def eq(self, other):
return Number(float(isinstance(other, At)))
def ne(self, other):
return Number(float(not isinstance(other, At)))
def __repr__(self):
return '@'
@dataclass
class Func(Value):
func: any
def __repr__(self):
return '<function>' | python |
#
# Memento
# Backend
# Notification Models
#
import re
from datetime import datetime
from sqlalchemy.orm import validates
from ..app import db
# defines a channel where notifications are sent
class Channel(db.Model):
# kinds/types
class Kind:
Task = "task"
Event = "event"
Notice = "notice"
# model fields
id = db.Column(db.Integer, primary_key=True)
kind = db.Column(db.String(64), nullable=False)
# relationships
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
notifications = db.relationship("Notification", backref=db.backref("channel"),
lazy=True)
@validates('kind')
def validate_kind(self, key, kind):
kind_list = [Channel.Kind.Task,
Channel.Kind.Event,
Channel.Kind.Notice]
if not kind:
raise AssertionError("kind must not be empty")
elif kind not in kind_list:
raise AssertionError('Enter either Event , Task or Notice')
else:
return kind
# defines a notification that is send to a channel
class Notification(db.Model):
# model fields
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256), nullable=False)
description = db.Column(db.String(1024), nullable=True)
firing_time = db.Column(db.DateTime, nullable=False) # utc timezone
# relationships
channel_id = db.Column(db.Integer, db.ForeignKey("channel.id"), nullable=True)
@validates('title')
def validate_title (self, key, title):
if not title:
raise AssertionError('title must not be empty')
elif len(title) < 2 or len(title) > 256:
raise AssertionError('must be between 2 to 256 characters long')
else:
return title
@validates('description')
def validate_description (self, key, description):
if len(description) > 1024:
raise AssertionError("Description must not exceed 1024 characters")
else:
return description
## convenience properties
# checks if the notification is pending firing
# returns True if pending firing False otherwise
@property
def pending(self):
time_till_fire = (self.firing_time - datetime.utcnow()).total_seconds()
# max secs after firing time for a notification to be considered still pending
pending_window = 60.0
return True if time_till_fire > -pending_window else False
| python |
import unittest
from unittest.mock import Mock
from pydictionaria import sfm_lib
from clldutils.sfm import SFM, Entry
def test_normalize():
from pydictionaria.sfm_lib import normalize
sfm = SFM([Entry([('sd', 'a__b')])])
sfm.visit(normalize)
assert sfm[0].get('sd') == 'a b'
def test_split_join():
from pydictionaria.sfm_lib import split, join
assert split(join(['a', 'b'])) == ['a', 'b']
def test_Entry():
from pydictionaria.sfm_lib import Entry
e = Entry.from_string("""
\\lx lexeme
\\hm 1
\\marker value
""")
assert e.id == 'lexeme 1'
e.upsert('marker', 'new value')
assert e.get('marker') == 'new value'
e.upsert('new_marker', 'value')
assert e.get('new_marker') == 'value'
def test_ComparisonMeanings(mocker):
from pydictionaria.sfm_lib import Entry, ComparisonMeanings
class Concepticon(object):
conceptsets = {1: mocker.Mock(id='1', gloss='gloss', definition='definition')}
def lookup(self, *args, **kw):
return [[(None, 1)]]
cm = ComparisonMeanings(Concepticon())
e = Entry([('lx', 'lexeme'), ('de', 'meaning')])
cm(e)
assert 'gloss' in e.get('zcom2')
e = Entry([('lx', 'lexeme'), ('ge', 'gl.oss')])
cm(e)
assert 'gloss' in e.get('zcom2')
class ExampleExtraction(unittest.TestCase):
def test_separate_examples_from_entry(self):
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text'),
('xe', 'translation'),
('dt', 'time stamp')])
new_entry = extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(new_entry, [
('lx', 'headword'),
('xref', example.id),
('dt', 'time stamp')])
def test_marker_mapping(self):
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text'),
('xe', 'translation')])
extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(example, [
('ref', example.id),
('tx', 'primary text'),
('ft', 'translation'),
('lemma', 'headword')])
def test_generation_of_lemma_marker(self):
# Side Question: Is it bad that the lemma marker is appended to the end?
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text'),
('xe', 'translation')])
extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(example, [
('ref', example.id),
('tx', 'primary text'),
('ft', 'translation'),
('lemma', 'headword')])
def test_merging_of_lemma_marker(self):
example_markers = {'lemma', 'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('lemma', 'other_headword'),
('xv', 'primary text'),
('xe', 'translation')])
extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(example, [
('ref', example.id),
('lemma', 'other_headword ; headword'),
('tx', 'primary text'),
('ft', 'translation')])
def test_multiple_examples(self):
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('xv', 'primary text 2'),
('xe', 'translation 2'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example2 = examples[1]
self.assertEqual(example2, [
('ref', example2.id),
('tx', 'primary text 2'),
('ft', 'translation 2'),
('lemma', 'headword')])
example3 = examples[2]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
def test_there_might_be_stuff_before_xv(self):
example_markers = {'rf', 'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('rf', 'source 1'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('rf', 'source 2'),
('xv', 'primary text 2'),
('xe', 'translation 2'),
('rf', 'source 3'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('rf', 'source 1'),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example2 = examples[1]
self.assertEqual(example2, [
('ref', example2.id),
('rf', 'source 2'),
('tx', 'primary text 2'),
('ft', 'translation 2'),
('lemma', 'headword')])
example3 = examples[2]
self.assertEqual(example3, [
('ref', example3.id),
('rf', 'source 3'),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
def test_there_might_be_stuff_after_xe(self):
example_markers = {'xv', 'xe', 'z0'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('z0', 'gloss ref 1'),
('xv', 'primary text 2'),
('xe', 'translation 2'),
('z0', 'gloss ref 2'),
('xv', 'primary text 3'),
('xe', 'translation 3'),
('z0', 'gloss ref 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('z0', 'gloss ref 1'),
('lemma', 'headword')])
example2 = examples[1]
self.assertEqual(example2, [
('ref', example2.id),
('tx', 'primary text 2'),
('ft', 'translation 2'),
('z0', 'gloss ref 2'),
('lemma', 'headword')])
example3 = examples[2]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('z0', 'gloss ref 3'),
('lemma', 'headword')])
def test_missing_xe(self):
example_markers = {'xv', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('xv', 'primary text 2'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_xv_in_the_middle(self):
example_markers = {'xv', 'mid1', 'mid2', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('mid1', 'mid1 1'),
('xv', 'primary text 1b'),
('mid2', 'mid2 1'),
('xe', 'translation 1')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1 primary text 1b'),
('mid1', 'mid1 1'),
('mid2', 'mid2 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
def test_rf_in_the_middle(self):
example_markers = {'rf', 'xv', 'mid1', 'mid2', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('rf', 'source 1'),
('xv', 'primary text 1'),
('mid1', 'mid1 1'),
('rf', 'source 2'),
('xv', 'primary text 2'),
('mid2', 'mid2 2'),
('xe', 'translation 2')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('rf', 'source 2'),
('tx', 'primary text 2'),
('mid2', 'mid2 2'),
('ft', 'translation 2'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_missing_xe_and_empty_xv(self):
example_markers = {'xv', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('xv', ''),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_two_xv_markers_at_the_beginning(self):
example_markers = {'rf', 'xv', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('rf', 'source 1'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('rf', 'source 2'),
('xe', 'translation 2'),
('rf', 'source 3'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('rf', 'source 1'),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('rf', 'source 3'),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_missing_beginning(self):
example_markers = {'rf', 'xv', 'xe', 'other_marker'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('other_marker', 'other marker 1'),
('xe', 'translation 1'),
('other_marker', 'other marker 2'),
('xe', 'translation 2'),
('xv', 'primary text 3'),
('other_marker', 'other marker 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('other_marker', 'other marker 1'),
('ft', 'translation 1'),
# Note: trailing stuff ends up in the previous example, because we
# never know, when an example *truly* ends
('other_marker', 'other marker 2'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('other_marker', 'other marker 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
| python |
# shuffle can randomly shuffles a list, and choice make a choice from a set of different items !
from random import choice, shuffle
# use external module termcolor for genarate beautiful colors
from termcolor import colored, cprint
# using pyfiglet, external module -> we can draw ascii_art very easily !
import pyfiglet
# found_syn function return us synonyms of the word which player enter ! [check synonym.py]
from synonym import found_syn
# colors avaliable for termcolor !
ava_colors = ("red", "blue", "green", "yellow", "blue", "magenta", "cyan")
# decorate func. print statements with different colors
def decorate(str):
cprint(colored(str, choice(ava_colors)))
# ascii_text func. print statements with ascii_art
def ascii_text(str):
text = pyfiglet.figlet_format(str)
decorate(text) # print ascii_art with color
# jumble func. shffle the given word
def jumble(word):
# shuffle can only shuffle list, so make the word list, using inbuild list method
jumble_word = list(word)
# shuffle the list of letters
shuffle(jumble_word)
# join back the letters using inbuild join method !
shuffle_word = ''.join(jumble_word)
# after suffling is the word is same is as given word again shuffle it, else return it !
if(word != shuffle_word):
return shuffle_word
else:
jumble(word)
# display hint msg --> create this to keep our code DRY [Don't repeate yourself !]
def give_hint(hintMsg, hint, word, join="with"):
decorate(f"\n The word {hintMsg} {join} {hint}")
answer = input().lower()
# if after hint player guess it correctly, return True and print CORRECT, and going to next player
if(answer == word):
return True
# show 3 hint to the player !
def get_hint(word):
decorate("Hint ---> ")
while(True):
# 1st hint only shows the first letter of the word
if(give_hint("starts", word[0], word)):
return True
# 2nd hint only shows the last letter of the word
elif(give_hint("ends", word[len(word) - 1], word)):
return True
else:
# 3rd hint shows one nearest meaning[synonyms] of the of the word
# found_syn func, found a synonym and return it ! [check synonym.py]
synonym = found_syn(word)
# if found a synonym show to the user
if(synonym):
if(give_hint("synonyms", choice(synonym), word, "is")):
# after showing synonym if user guess it correctly, show CORRECT
return True
# else show the original answer to the player !
else:
print() # for give one line space !
break
| python |
from setuptools import setup
setup(
name='COERbuoyOne',
version='0.2.0',
author='Simon H. Thomas',
author_email='[email protected]',
packages=['COERbuoyOne'],
url='http://coerbuoy.maynoothuniversity.ie',
license='LICENSE.txt',
description='A realistic benchmark for Wave Enegery Converter controllers',
long_description=open('README.txt').read(),
install_requires=[
"numpy",
"scipy",
"pandas",
"COERbuoy",
],
include_package_data=True,
)
| python |
from setuptools import setup
setup(
name='ShapeWorld',
version='0.1',
description='A new test methodology for multimodal language understanding',
author='Alexander Kuhnle',
author_email='[email protected]',
keywords=[],
license='MIT',
url='https://github.com/AlexKuhnle/ShapeWorld',
packages=['shapeworld'],
install_requires=['numpy', 'pillow'])
| python |
class Solution:
def validWordSquare(self, words):
"""
:type words: List[str]
:rtype: bool
"""
m = len(words)
if m != 0:
n = len(words[0])
else:
n = 0
if m != n:
return False
for x in range(m):
n = len(words[x])
c = 0
#print('x', x)
for y in range(m):
if len(words[y]) < x + 1:
break
c += 1
if c != n:
return False
for y in range(n):
if words[x][y] != words[y][x]:
return False
return True
"""
Given a sequence of words, check whether it forms a valid word square.
A sequence of words forms a valid word square if the kth row and column read the exact same string, where 0 ≤ k < max(numRows, numColumns).
Note:
The number of words given is at least 1 and does not exceed 500.
Word length will be at least 1 and does not exceed 500.
Each word contains only lowercase English alphabet a-z.
Example 1:
Input:
[
"abcd",
"bnrt",
"crmy",
"dtye"
]
Output:
true
"""
| python |
import matplotlib.pyplot as plt
from models import *
device="cuda:0" if torch.cuda.is_available() else "cpu"
def plot_random():
"""
Plots a random character from the Normal Distribution N[0,5).
No arguments
"""
# dec.eval()
samp=(torch.randn(1,8)*5).float().to(device)
plt.imshow(dec(samp).reshape(28,28).squeeze().detach().cpu().numpy())
return plt.show()
def plot_losses(recloss,dloss,gloss):
"""
Function which plots graph of all losses.
Args:
recloss (list or iterable type object): Object containing recombination loss for each epoch/iteraction.
dloss (list or iterable type object): Object containing discriminator loss.
gloss (list or iterable type object): Object containing generator loss.
"""
plt.plot(recloss,label='recombination loss')
plt.plot(dloss,label='discriminator loss')
plt.plot(gloss,label='gen loss')
plt.legend()
return plt.show()
def interpolate_characters(n,s1,s2,filename=None,cmap=None):
"""
Function which returns a plot of n-linearly interpolated figures between s1 and s2.
Args:
n (Integer): Number of plots you want.
s1 (torch.tensor): Image one.
s2 (torch.tensor): Image two.
filename (String): Name of image you want to store the plot as. Defaults to None.
cmap (String): Custom matplotlib cmap. Defaults to 'Greens'.
"""
f, axarr = plt.subplots(ncols=n)
# dec.eval()
if cmap is not None:
plt.set_cmap(cmap)
else:
plt.set_cmap('Greens')
plt.axis('off')
m=(s2-s1)/n
for i in range(n):
latz=m*(i+1)+s1
image=dec(latz).reshape(28,28).detach().cpu().numpy()
axarr[i].imshow(image)
axarr[i].axis("off")
if filename is not None:
plt.savefig(filename,bbox_inches='tight')
return plt.show()
| python |
duration_seconds = int(input())
seconds = duration_seconds % 60
temp = duration_seconds // 60
minutes = temp % 60
temp = temp // 60
hours = temp % 60
print(f"{hours}:{minutes}:{seconds}")
| python |
import pickle
import os
import sys
import genetic_algorithm as ga
import game
import pygame
import numpy as np
import snake
def save(generation, details, filename="generation"):
"""
Saves a snakes generation after checking if a file with same name
already exists (also asks for a new name before exiting)
"""
if not isinstance(filename, str):
raise TypeError("Expected a string, received a " + type(filename).__name__)
for sn in generation:
if not isinstance(sn, snake.snake):
raise TypeError("Expected a snake, received a " + type(sn).__name__)
if not isinstance(details, dict):
raise TypeError("Expected a dictionary, received a " + type(details).__name__)
# setting path filename and checking if it already exists
if not os.path.exists("models"):
os.mkdir('models')
path_filename = "models/" + filename
already_exists = os.path.isfile(path_filename)
if already_exists:
answer = get_yes_no("A file with this name already exists, do you want to overwrite it? [yes/no]")
if not answer:
filename = input("Please enter the new name: ")
save(generation, details, filename)
exit()
with open(path_filename, "wb") as f:
pickle.dump(generation, f)
pickle.dump(details, f)
print(filename + " is correctly saved!")
def load(filename="generation"):
"""
Loads a snakes generation
"""
if not isinstance(filename, str):
raise TypeError("Expected a string, received a " + type(filename).__name__)
# setting path filename and checking if it already exists
path_filename = "models/" + filename
exists = os.path.isfile(path_filename)
if exists:
with open(path_filename, "rb") as f:
generation = pickle.load(f)
details = pickle.load(f)
for sn in generation:
if not isinstance(sn, snake.snake):
raise TypeError("Expected a snake, received a " + type(sn).__name__)
sn.is_alive = True
sn.length = 1
sn.occupied = []
sn.fitness = 0
return generation, details
else:
print("Error: file not found")
exit()
def get_yes_no(question):
"""
Used to get a yes or no answer
"""
if not isinstance(question, str):
raise TypeError("Expected a string, received a " + type(question).__name__)
yes = {"yes", "y", "ye"}
no = {"no", "n"}
while True:
print(question)
answer = input().lower()
if answer in no:
return False
elif answer in yes:
return True
else:
print("Please respond with yes or no!")
def train(generation=[], details={}, snakes=10, shape=[], generations=1,
size=10, view=False, end=100):
"""
Used to train the model
"""
if not isinstance(generation, list):
raise TypeError("Expected a list, received a " + type(generation).__name__)
if not isinstance(details, dict):
raise TypeError("Expected a dict, received a " + type(details).__name__)
if not isinstance(snakes, int):
raise TypeError("Expected an int, received a " + type(snakes).__name__)
if not isinstance(shape, list):
raise TypeError("Expected a string, received a " + type(shape).__name__)
if not isinstance(generations, int):
raise TypeError("Expected an int, received a " + type(generations).__name__)
if not isinstance(size, int):
raise TypeError("Expected an int, received a " + type(size).__name__)
if not isinstance(view, bool):
raise TypeError("Expected a bool, received a " + type(view).__name__)
if not isinstance(end, int):
raise TypeError("Expected an int, received a " + type(end).__name__)
# initializing best results
best_generation = []
best_result = -1
best_index = 0
if not generation:
generation = ga.create_generation(generation, snakes, shape)
else:
for sn in generation:
if not isinstance(sn, snake.snake):
raise TypeError("Expected a snake, received a " + type(sn).__name__)
snakes = len(generation)
size = details["game_size"]
end = details["duration"]
# running the train simulation
for gen in range(generations):
generation = ga.create_generation(generation)
for sn in generation:
g = game.game(size, view, end)
g.add_snake(sn)
while g.snake.is_alive:
g.play()
if view: esc_exit()
result = np.mean([x.fitness for x in generation])
print("generation", gen+1, "/", generations, ":", result)
# updating best results
if result >= best_result:
best_generation = generation
best_result = result
best_index = gen
print("Saving generation", best_index+1, "with a result of", best_result, "...")
best_generation = ga.sort_generation(best_generation)
if not bool(details):
details = {"trained": generations,
"game_size": size,
"duration": end,
"best": best_generation[0].fitness}
else:
details["trained"] += generations
return best_generation, details
def esc_exit():
"""
Used to stop graphical representation
"""
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE: quit() | python |
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
from time import gmtime, strftime
# Get the data from the source
url = "https://www.house.gov/representatives"
url_req = urlopen(Request(url, headers={'User-Agent': 'Mozilla'}))
raw_html = BeautifulSoup(url_req, "lxml")
html = raw_html.prettify()
# Archive data
dir_path = "archive/house/"
time_stamp = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
# # Archive HTML with a timestamp
file_name = dir_path + "html/house-" + time_stamp + ".html"
file = open(file_name, "w")
file.write(str(html))
file.close()
# Archive JSON with a timestamp
json_file_name = dir_path + "json/house-" + time_stamp + ".json"
json = open(json_file_name, "w")
json.write("{\n\t\"members\": [\n")
all_representatives = []
representatives = raw_html("tr")
for representative in representatives[498:]:
information = representative("td")
if len(information) > 0:
full_name = information[0]
state_district = information[1]
party = information[2]
office_room = information[3]
phone = information[4]
website = information[0].find("a").get("href")
committee_assignments = information[5]
# Pretty printing
tab = "\t\t\t"
# Escape quotes in names
get_name = str(full_name.get_text())
formatted_name = get_name.replace('"', r'\"')
# Get first and last name separately
last_name, first_name = formatted_name.split(",")
# Get state and district separately
get_state_district = str(state_district.get_text()).strip()
state, district = get_state_district.rsplit(" ", 1)
if district == "Large":
state, district, district_large = get_state_district.rsplit(" ", 2)
district = district + " " + district_large
# JSON
print_name = tab + "\"full_name\": \"" + first_name.strip() + " " + last_name.strip() + "\",\n"
print_first_name = tab + "\"first_name\": \"" + first_name.strip() + "\",\n"
print_last_name = tab + "\"last_name\": \"" + last_name.strip() + "\",\n"
print_state_district = tab + "\"state_district\": \"" + get_state_district + "\",\n"
print_state = tab + "\"state\": \"" + state + "\",\n"
print_district = tab + "\"district\": \"" + district + "\",\n"
print_party = tab + "\"party\": \"" + str(party.get_text()).strip() + "\",\n"
print_office_room = tab + "\"office_room\": \"" + str(office_room.get_text()).strip() + "\",\n"
print_phone = tab + "\"phone\": \"" + str(phone.get_text()).strip() + "\",\n"
print_website = tab + "\"website\": \"" + website + "\",\n"
print_committee_assignments = ( tab + "\"committee_assignments\": [\"" +
str(committee_assignments.get_text('", "', strip=True)).strip() + "\"]\n" )
print_all = (
"\t\t{\n" +
print_name +
print_first_name +
print_last_name +
print_state_district +
print_state +
print_district +
print_party +
print_office_room +
print_phone +
print_website +
print_committee_assignments +
"\t\t},\n"
)
# Remove trailing comma at end of JSON
if representative == representatives[-1]:
print_all = print_all[:-2] + "\n\t]\n}"
json.write(print_all)
json.close()
| python |
#PasswordGenerator GGearing314 01/10/19
from random import *
case=randint(1,2)
number=randint(1,99)
animals=("ant","alligator","baboon","badger","barb","bat","beagle","bear","beaver","bird","bison","bombay","bongo","booby","butterfly","bee","camel","cat","caterpillar","catfish","cheetah","chicken","chipmunk","cow","crab","deer","dingo","dodo","dog","dolphin","donkey","duck","eagle","earwig","elephant","emu","falcon","ferret","fish","flamingo","fly","fox","frog","gecko","gibbon","giraffe","goat","goose","gorilla")
colour=("red","orange","yellow","green","blue","indigo","violet","purple","magenta","cyan","pink","brown","white","grey","black")
chosenanimal= animals[randint(0,len(animals))]
chosencolour=colour[randint(0,len(colour))]
if case==1:
chosenanimal=chosenanimal.upper()
print(chosencolour,number,chosenanimal)
else:
chosencolour=chosencolour.upper()
print(chosenanimal,number,chosencolour)
#print("This program has exatly ",(len(animals)*len(colour)*99*2),"different combinations") #I'm not sure this is right
input("Press enter to close...")
| python |
from thundra import constants
from thundra.context.execution_context_manager import ExecutionContextManager
from thundra.wrappers.fastapi.fastapi_wrapper import FastapiWrapper
from thundra.context.tracing_execution_context_provider import TracingExecutionContextProvider
from thundra.context.global_execution_context_provider import GlobalExecutionContextProvider
from thundra.wrappers import wrapper_utils
import pytest
def test_fastapi_hooks_called(test_app, monkeypatch):
def mock_before_request(self, request, req_body):
ExecutionContextManager.set_provider(TracingExecutionContextProvider())
execution_context = wrapper_utils.create_execution_context()
execution_context.platform_data["request"] = request
execution_context.platform_data["request"]["body"] = req_body
self.plugin_context.request_count += 1
self.execute_hook("before:invocation", execution_context)
assert execution_context.root_span.operation_name == '/1'
assert execution_context.root_span.get_tag('http.method') == 'GET'
assert execution_context.root_span.get_tag('http.host') == 'testserver'
assert execution_context.root_span.get_tag('http.query_params') == b''
assert execution_context.root_span.get_tag('http.path') == '/1'
assert execution_context.root_span.class_name == constants.ClassNames['FASTAPI']
assert execution_context.root_span.domain_name == 'API'
return execution_context
def mock_after_request(self, execution_context):
assert execution_context.response.body == b'{"hello_world":1}'
assert execution_context.response.status_code == 200
self.prepare_and_send_reports_async(execution_context)
ExecutionContextManager.clear()
monkeypatch.setattr(FastapiWrapper, "before_request", mock_before_request)
monkeypatch.setattr(FastapiWrapper, "after_request", mock_after_request)
response = test_app.get('/1')
def test_fastapi_errornous(test_app, monkeypatch):
try:
def mock_error_handler(self, error):
execution_context = ExecutionContextManager.get()
if error:
execution_context.error = error
self.prepare_and_send_reports_async(execution_context)
assert error.type == "RuntimeError"
assert error.message == "Test Error"
monkeypatch.setattr(FastapiWrapper, "error_handler", mock_error_handler)
test_app.get('/error')
except:
"Error thrown in endpoint" | python |
import lanelines
from compgraph import CompGraph, CompGraphRunner
import numpy as np
import cv2
func_dict = {
'warp': lanelines.warp,
'gray': lanelines.gray,
'get_HLS': lanelines.get_hls_channels,
'weighted_HLS_sum': lanelines.weighted_HLS,
'threshold_gray': lanelines.mask_threashold_range,
'threshold_S': lanelines.mask_threashold_range,
'threshold_wHLS': lanelines.mask_threashold_range,
'apply_sobel_x_to_S': lanelines.scaled_sobel_x,
'threshold_S_sobel_x': lanelines.mask_threashold_range,
'median_blur_tssx': cv2.medianBlur,
'close_thresholded_S': lanelines.morphological_close,
'gather_thresholded_images': lanelines.gather_thresholded_images,
'combine_thresholds_bitwise_or': lanelines.bitwise_or,
'get_target_cells_coordinates': lanelines.get_target_cells_coordinates,
'fit_lane_polynomials': lanelines.fit_lane_polynomials,
}
func_io = {
'warp': (('image', 'M', 'canvas_size'), 'warped'),
'gray': ('warped', 'warped_gray'),
'get_HLS': ('warped', ('H', 'L', 'S')),
'weighted_HLS_sum': (('H', 'L', 'S', 'HLS_weights'), 'weighted_HLS'),
'threshold_gray': (('warped_gray', 'gray_from', 'gray_to'), 'thresholded_gray'),
'threshold_S': (('S', 'S_from', 'S_to'), 'thresholded_S'),
'threshold_wHLS': (('weighted_HLS', 'wHLS_from', 'wHLS_to'), 'thresholded_wHLS'),
'apply_sobel_x_to_S': ('S', 'S_sobel_x'),
'threshold_S_sobel_x': (('S_sobel_x', 'S_sobel_x_from', 'S_sobel_x_to'), 'thresholded_S_sobel_x'),
'median_blur_tssx': (('thresholded_S_sobel_x', 'tssx_median_kernel'), 'tssx_median'),
'close_thresholded_S': (('thresholded_S', 'close_kernel_for_tS'), 'ts_closed'),
'gather_thresholded_images' : (
('thresholded_S', 'thresholded_wHLS', 'thresholded_S_sobel_x', 'tssx_median', 'ts_closed', 'thresholded_gray'),
'thresholded_images'
),
'combine_thresholds_bitwise_or': ('thresholded_images', 'all_thresholds'),
'get_target_cells_coordinates': (
('all_thresholds', 'n_cells_x', 'n_cells_y', 'cell_threshold'),
('estpoints_left', 'estpoints_right'),
),
'fit_lane_polynomials': (
('estpoints_left', 'estpoints_right'),
('p_coefs_left', 'p_coefs_right')
),
}
computational_graph = CompGraph(func_dict, func_io)
parameters = {
'canvas_size': (500, 1500),
'HLS_weights': [0, 0.4, 1.],
'gray_from': 210,
'gray_to': 255,
'S_from': 180,
'S_to': 255,
'wHLS_from': 180,
'wHLS_to': 255,
'S_sobel_x_from': 20,
'S_sobel_x_to': 240,
'tssx_median_kernel': 5,
'close_kernel_for_tS': (3, 3),
'n_cells_x': 50,
'n_cells_y': 100,
'cell_threshold': 70,
}
| python |
import time
import typing as t
from huey import crontab
from app.db.session import db_session
from app.db.crud.server import get_server_with_ports_usage
from app.db.crud.port_forward import get_forward_rule, get_all_expire_rules
from app.db.models.port import Port
from .config import huey
from tasks.ansible import ansible_hosts_runner
from tasks.utils.runner import run
from tasks.utils.handlers import iptables_finished_handler
def clean_finished_handler(runner):
ansible_hosts_runner()
@huey.task()
def clean_runner(server: t.Dict):
run(
server=server,
playbook="clean.yml",
finished_callback=clean_finished_handler,
)
@huey.task(priority=4)
def clean_port_runner(server_id: int, port: Port, update_traffic: bool = True):
with db_session() as db:
if db_forward_rule := get_forward_rule(db, server_id, port.id):
db.delete(db_forward_rule)
db.commit()
server = get_server_with_ports_usage(db, server_id)
run(
server=server,
playbook="clean_port.yml",
extravars={"local_port": port.num},
finished_callback=iptables_finished_handler(
server.id, accumulate=True, update_traffic_bool=update_traffic
),
)
@huey.periodic_task(crontab(minute="*"), priority=4)
def clean_expired_port_runner():
with db_session() as db:
db_expire_rules = get_all_expire_rules(db)
for db_rule in db_expire_rules:
if time.time() > db_rule.config.get("expire_time", float("inf")):
clean_port_runner(
db_rule.port.server.id,
db_rule.port,
update_traffic=True,
)
| python |
# This is an exact clone of identification.py with functions renamed for clarity and all code relating to creating an
# alignment removed
from typing import Tuple
import sys
import os
path_to_src = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(path_to_src)
from src.objects import Database, Spectrum, MPSpectrumID, DEVFallOffEntry
from src.preprocessing import merge_search, preprocessing_utils
from src import database
from src.file_io import JSON
import time
import os
import copy
import json
# top results to keep for creating an alignment
TOP_X = 50
def database_and_spectra_preprocessing(
spectra_files: str,
database_file: str,
verbose: bool = True,
min_peptide_len: int = 5,
max_peptide_len: int = 20,
peak_filter: int = 0,
relative_abundance_filter: float = 0.0,
ppm_tolerance: int = 20,
precursor_tolerance: int = 10,
digest: str = '',
cores: int = 1,
n: int = 5,
DEBUG: bool = False,
truth_set: str = '',
output_dir: str = ''
) -> dict:
# build/load the database
verbose and print('Loading database...')
db = database.build(database_file)
verbose and print('Done')
# load all of the spectra
verbose and print('Loading spectra...')
spectra, boundaries = preprocessing_utils.load_spectra(
spectra_files,
ppm_tolerance,
peak_filter=peak_filter,
relative_abundance_filter=relative_abundance_filter
)
verbose and print('Done')
# get the boundary -> kmer mappings for b and y ions
matched_masses_b, matched_masses_y, db = merge_search.modified_match_masses(boundaries, db, max_peptide_len, DEBUG)
# # if we only get 1 core, don't do the multiprocessing bit
# if cores == 1:
# # go through and id all spectra
# for i, spectrum in enumerate(spectra):
# print(f'Creating alignment for spectrum {i+1}/{len(spectra)} [{to_percent(i+1, len(spectra))}%]', end='\r')
# # get b and y hits
# b_hits, y_hits = [], []
# for mz in spectrum.spectrum:
# # get the correct boundary
# mapped = mz_mapping[mz]
# b = boundaries[mapped]
# b = hashable_boundaries(b)
# if b in matched_masses_b:
# b_hits += matched_masses_b[b]
# if b in matched_masses_y:
# y_hits += matched_masses_y[b]
return db | python |
from .base import NextcloudManager
class NextcloudGroupManager(NextcloudManager):
def all(self, search=None):
"""
Get all nextcloud groups
"""
request = self.api.get_groups(search=search)
self.check_request(request)
objs = []
for name in request.data['groups']:
objs.append(self.get(name))
return objs
def get(self, name=None, **kwargs):
"""
Get a specific nextcloud group
"""
return super().get(name=name, **kwargs)
| python |
import numpy as np
import matplotlib.pyplot as plt
from soundsig.plots import multi_plot
"""
Implementation of S. Zayd Enam's STRF modeling stuff:
S. Zayd Enam, Michael R. DeWeese, "Spectro-Temporal Models of Inferior Colliculus Neuron Receptive Fields"
http://users.soe.ucsc.edu/~afletcher/hdnips2013/papers/strfmodels_plos.pdf
"""
def onset_strf(t, f, t_c=0.150, t_freq=10.0, t_phase=0.0, t_sigma=0.250, f_c=3000.0, f_sigma=500.0):
T,F = np.meshgrid(t, f)
f_part = np.exp(-(F - f_c)**2 / (2*f_sigma**2))
t_part = np.sin(2*np.pi*t_freq*(T - t_c) + t_phase)
exp_part = np.exp( (-(T - t_c)**2 / (2*t_sigma**2)) )
strf = t_part*f_part*exp_part
strf /= np.abs(strf).max()
return strf
def checkerboard_strf(t, f, t_freq=10.0, t_phase=0.0,
f_freq=1e-6, f_phase=0.0, t_c=0.150, f_c=3000.0,
t_sigma=0.050, f_sigma=500.0, harmonic=False):
T,F = np.meshgrid(t, f)
t_part = np.cos(2*np.pi*t_freq*T + t_phase)
f_part = np.cos(2*np.pi*f_freq*F + f_phase)
exp_part = np.exp( (-(T-t_c)**2 / (2*t_sigma**2)) - ((F - f_c)**2 / (2*f_sigma**2)) )
if harmonic:
f_part = np.abs(f_part)
strf = t_part*f_part*exp_part
strf /= np.abs(strf).max()
return strf
def sweep_strf(t, f, theta=0.0, aspect_ratio=1.0, phase=0.0, wavelength=0.5, spread=1.0, f_c=5000.0, t_c=0.0):
T,F = np.meshgrid(t-t_c, f-f_c)
T /= np.abs(T).max()
F /= np.abs(F).max()
Tp = T*np.cos(theta) + F*np.sin(theta)
Fp = -T*np.sin(theta) + F*np.cos(theta)
exp_part = np.exp( -(Tp**2 + (aspect_ratio**2 * Fp**2)) / (2*spread**2) )
cos_part = np.cos( (2*np.pi*Tp / wavelength) + phase)
return exp_part*cos_part
def plot_strf(pdata, ax):
strf = pdata['strf']
absmax = np.abs(strf).max()
plt.imshow(strf, interpolation='nearest', aspect='auto', origin='lower',
extent=plot_extent, vmin=-absmax, vmax=absmax, cmap=plt.cm.seismic)
plt.title(pdata['title'])
plt.xticks([])
plt.yticks([])
if __name__ == '__main__':
nt = 100
t = np.linspace(0.0, 0.250)
nf = 100
f = np.linspace(300.0, 8000.0, nf)
plot_extent = [t.min(), t.max(), f.min(), f.max()]
#build onset STRFs of varying center frequency and temporal bandwidths
onset_f_sigma = 500
onset_f_c = np.linspace(300.0, 8000.0, 10)
onset_t_sigmas = np.array([0.005, 0.010, 0.025, 0.050])
onset_t_freqs = np.array([20.0, 15.0, 10.0, 5.0])
onset_plist = list()
for f_c in onset_f_c:
for t_sigma,t_freq in zip(onset_t_sigmas, onset_t_freqs):
t_c = 0.5*(1.0 / t_freq) - 0.010
strf = onset_strf(t, f, t_freq=t_freq, t_phase=np.pi, f_c=f_c, f_sigma=1000.0, t_sigma=t_sigma, t_c=t_c)
title = '$f_c$=%dHz, $\sigma_t$=%dms, $f_t$=%dHz' % (f_c, t_sigma*1e3, t_freq)
onset_plist.append({'strf':strf, 'title':title})
multi_plot(onset_plist, plot_strf, nrows=len(onset_f_c), ncols=len(onset_t_sigmas))
#build harmonic stack STRFs
stack_t_sigma = 0.005
stack_f_sigma = 1500
stack_f_c = np.linspace(300.0, 8000.0, 10)
stack_f_freq = np.linspace(1e-4, 7e-4, 5)
stack_t_freqs = np.array([20.0, 15.0, 10.0, 5.0])
stack_plist = list()
for f_c in stack_f_c:
for f_freq in stack_f_freq:
strf = checkerboard_strf(t, f,
t_freq=10.0, t_phase=0.0,
f_freq=f_freq, f_phase=0.0,
t_c=0.015, f_c=f_c,
t_sigma=stack_t_sigma, f_sigma=stack_f_sigma, harmonic=False)
title = '$f_c$=%dHz, f_freq=%0.6f' % (f_c, f_freq)
stack_plist.append({'strf':strf, 'title':title})
multi_plot(stack_plist, plot_strf, nrows=len(stack_f_c), ncols=len(stack_f_freq))
#build frequency sweep STRFs
sweep_wavelengths = np.array([0.25, 0.5, 0.75])
sweep_spreads = np.array([0.100, 0.150, 0.200, 0.250])
sweep_thetas = np.array([-np.pi/8, -np.pi/6, -np.pi/4, np.pi/4, np.pi/6, np.pi/8])
sweep_plist = list()
for wavelength,spread in zip(sweep_wavelengths, sweep_spreads):
for theta in sweep_thetas:
t_c = 0.1*wavelength
strf = sweep_strf(t, f, theta=theta, wavelength=wavelength, spread=spread, t_c=t_c)
title = '$\lambda$=%0.3f, $\\theta$=%d$\degree$' % (wavelength, theta*(180.0 / np.pi))
sweep_plist.append({'strf':strf, 'title':title})
multi_plot(sweep_plist, plot_strf, nrows=len(sweep_wavelengths), ncols=len(sweep_thetas))
plt.show()
| python |
import binascii
import pytest
from random import random
import jmap
from jmap import errors
@pytest.mark.asyncio
async def test_mailbox_get_all(account, idmap):
response = await account.mailbox_get(idmap)
assert response['accountId'] == account.id
assert int(response['state']) > 0
assert isinstance(response['notFound'], list)
assert len(response['notFound']) == 0
assert isinstance(response['list'], list)
assert len(response['list']) > 0
for mailbox in response['list']:
assert mailbox['id']
assert mailbox['name']
assert mailbox['myRights']
assert 'role' in mailbox
assert 'sortOrder' in mailbox
assert 'totalEmails' in mailbox
assert 'totalThreads' in mailbox
assert 'unreadThreads' in mailbox
assert 'isSubscribed' in mailbox
assert 'parentId' in mailbox
@pytest.mark.asyncio
async def test_mailbox_get_notFound(account, idmap):
wrong_ids = ['notexisting', 123]
properties = ['name', 'myRights']
response = await account.mailbox_get(
idmap,
ids=wrong_ids,
properties=properties,
)
assert response['accountId'] == account.id
assert int(response['state']) > 0
assert isinstance(response['notFound'], list)
assert set(response['notFound']) == set(wrong_ids)
assert isinstance(response['list'], list)
assert len(response['list']) == 0
@pytest.mark.asyncio
async def test_mailbox_set_fail(account, idmap):
with pytest.raises(errors.stateMismatch):
await account.mailbox_set(idmap, ifInState='wrongstate')
@pytest.mark.asyncio
async def test_mailbox_create_duplicate(account, idmap):
response = await account.mailbox_set(
idmap,
create={
"test": {
"parentId": None,
"name": 'INBOX',
}
}
)
assert response['notCreated']['test']['type'] == 'invalidArguments'
@pytest.mark.asyncio
async def test_mailbox_create_rename_destroy(account, idmap, inbox_id):
# Create
response = await account.mailbox_set(
idmap,
create={
"test": {
"parentId": inbox_id,
"name": str(random())[2:10],
"isSubscribed": False,
}
}
)
newId = response['created']['test']['id']
assert not response['notCreated']
assert not response['updated']
assert not response['notUpdated']
assert not response['destroyed']
assert not response['notDestroyed']
# Rename
update = {newId: {"name": " ÁÝŽ-\\"}}
response = await account.mailbox_set(idmap, update=update)
assert not response['created']
assert not response['notCreated']
assert response['updated'] == update
assert not response['notUpdated']
assert not response['notUpdated']
assert not response['destroyed']
# Destroy
response = await account.mailbox_set(idmap, destroy=[newId])
assert not response['created']
assert not response['notCreated']
assert not response['updated']
assert not response['notUpdated']
assert response['destroyed'] == [newId]
assert not response['notDestroyed']
@pytest.mark.asyncio
async def test_mailbox_query(account, inbox_id):
response = await account.mailbox_query(
filter={"parentId": inbox_id},
sort=[{"property": "sortOrder"},{"property": "name"}],
position=0,
limit=10,
calculateTotal=True,
)
assert response['accountId'] == account.id
assert isinstance(response['ids'], list)
assert 0 < len(response['ids']) <= 10
@pytest.mark.asyncio
async def test_email_query_inMailbox(account, inbox_id, email_id):
response = await account.email_query(**{
"filter": {"inMailbox": inbox_id},
"anchor": email_id,
"collapseThreads": False,
"limit": 10,
"calculateTotal": True
})
assert response['accountId'] == account.id
assert response['position'] > 0
assert response['total'] > 0
assert response['collapseThreads'] == False
assert response['queryState']
assert isinstance(response['ids'], list)
assert 0 < len(response['ids']) <= 10
assert response['canCalculateChanges'] in (True, False)
@pytest.mark.asyncio
async def test_email_get_all(account, idmap, uidvalidity):
response = await account.email_get(idmap)
assert response['accountId'] == account.id
assert isinstance(response['list'], list)
assert 0 < len(response['list']) <= 1000
assert response['notFound'] == []
for msg in response['list']:
assert msg['id']
assert msg['threadId']
@pytest.mark.asyncio
async def test_email_get(account, idmap, uidvalidity, email_id, email_id2):
properties = {
'threadId', 'mailboxIds', 'inReplyTo', 'keywords', 'subject',
'sentAt', 'receivedAt', 'size', 'blobId',
'from', 'to', 'cc', 'bcc', 'replyTo',
'attachments', 'hasAttachment',
'headers', 'preview', 'body',
}
good_ids = [email_id, email_id2]
wrong_ids = [
"notsplit",
"not-int",
f"{uidvalidity}-{1 << 33}",
f"{uidvalidity}-{1 << 32}",
f"{uidvalidity}-{(1<<32)-1}",
f"{uidvalidity}-0",
f"{uidvalidity}--10",
f"{uidvalidity}-1e2",
f"{uidvalidity}-str",
1234,
]
response = await account.email_get(
idmap,
ids=good_ids + wrong_ids,
properties=list(properties),
maxBodyValueBytes=1024,
)
assert response['accountId'] == account.id
assert isinstance(response['list'], list)
assert len(response['list']) == 2
assert isinstance(response['notFound'], list)
assert set(response['notFound']) == set(wrong_ids)
for msg in response['list']:
assert msg['id'] in good_ids
for prop in properties - {'body'}:
assert prop in msg
assert 'textBody' in msg or 'htmlBody' in msg
@pytest.mark.asyncio
async def test_email_query_get_threads(account, idmap, inbox_id):
response = await account.email_query(**{
"filter": {"inMailbox": inbox_id},
"sort": [{"property": "receivedAt", "isAscending": False}],
"collapseThreads": True,
"position": 0,
"limit": 30,
"calculateTotal": True,
})
response = await account.email_get(idmap, ids=response['ids'], properties=["threadId"])
assert isinstance(response['notFound'], list)
assert len(response['notFound']) == 0
assert isinstance(response['list'], list)
assert len(response['list']) == 30
for msg in response['list']:
assert msg['id']
assert msg['threadId']
thread_ids = [msg['threadId'] for msg in response['list']]
response = await account.thread_get(idmap, ids=thread_ids)
assert len(response['notFound']) == 0
assert len(response['list']) >= 30
email_ids = []
for thread in response['list']:
assert thread['id']
assert thread['emailIds']
email_ids.extend(thread['emailIds'])
properties = ["threadId","mailboxIds","keywords",
"hasAttachment","from","to","subject",
"receivedAt","size","preview"]
response = await account.email_get(idmap, ids=email_ids, properties=properties)
assert len(response['notFound']) == 0
assert len(response['list']) >= 30
for msg in response['list']:
for prop in properties:
assert prop in msg
@pytest.mark.asyncio
async def test_email_get_detail(account, idmap, email_id):
properties = {
"blobId", "messageId", "inReplyTo", "references",
"header:list-id:asText", "header:list-post:asURLs",
"sender", "cc", "bcc", "replyTo", "sentAt",
"bodyStructure", "bodyValues",
}
bodyProperties = [
"partId", "blobId", "size", "name", "type",
"charset", "disposition", "cid", "location",
]
response = await account.email_get(idmap, **{
"ids": [email_id],
"properties": list(properties),
"fetchHTMLBodyValues": True,
"bodyProperties": bodyProperties,
})
assert response['accountId'] == account.id
assert isinstance(response['notFound'], list)
assert len(response['notFound']) == 0
assert isinstance(response['list'], list)
assert len(response['list']) == 1
for msg in response['list']:
for prop in properties - {'body'}:
assert prop in msg
@pytest.mark.asyncio
async def test_email_setget_seen(account, idmap, email_id):
for state in (True, False):
response = await account.email_set(
idmap,
update={
email_id: {"keywords/$seen": state}
}
)
assert response['accountId'] == account.id
assert isinstance(response['updated'], dict)
assert isinstance(response['notUpdated'], dict)
assert isinstance(response['created'], dict)
assert isinstance(response['notCreated'], dict)
assert isinstance(response['destroyed'], list)
assert isinstance(response['notDestroyed'], dict)
assert len(response['updated']) > 0
assert len(response['notUpdated']) == 0
assert len(response['created']) == 0
assert len(response['notCreated']) == 0
assert len(response['destroyed']) == 0
assert len(response['notDestroyed']) == 0
response = await account.email_get(
idmap,
ids=[email_id],
properties=['keywords']
)
assert response['list'][0]['id'] == email_id
assert response['list'][0]['keywords'].get('$seen', False) == state
@pytest.mark.asyncio
async def test_email_create_destroy(account, idmap, inbox_id):
async def create_stream():
yield binascii.a2b_base64("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=")
res = await account.upload(create_stream(), 'image/png')
attachmentBlobId = res['blobId']
email = {
"mailboxIds": [inbox_id],
"to": [{
"name": "Filip Hanes",
"email": "[email protected]"
}],
"bodyValues": {
"1": {
"type": "text/plain",
"value": "Hi,\nwhats'up wonderful person?",
},
"2": {
"type": "text/html",
"value": "<p>Hi,</p><p>whats'up wonderful person?</p>",
},
},
"textBody": [{
'partId': "1",
'type': "text/plain",
}],
"htmlBody": [{
'partId': "2",
'type': "text/html",
}],
"attachments": [
{
'blobId': attachmentBlobId,
'type': "image/png",
'name': "picture.png",
'cid': "picture.png",
'disposition': 'attachment',
},
]
}
response = await account.email_set(idmap, create={"test": email})
assert response['created']['test']['id']
blobId = response['created']['test']['blobId']
assert blobId
body = await account.download(blobId)
assert body
@pytest.mark.asyncio
async def test_email_changes(account, uidvalidity):
response = await account.email_changes(sinceState=f"{uidvalidity},1,1", maxChanges=3000)
changes = response['created'] + response['updated'] + response['removed']
assert 0 < len(changes) < 3000
@pytest.mark.asyncio
async def test_thread_changes(account, uidvalidity):
response = await account.thread_changes(sinceState=f"{uidvalidity},1,10", maxChanges=30)
changes = response['created'] + response['updated'] + response['removed']
assert 0 < len(changes) < 30
@pytest.mark.asyncio
async def test_mailbox_changes(account):
with pytest.raises(jmap.errors.cannotCalculateChanges):
await account.mailbox_changes(sinceState="1", maxChanges=300)
| python |
from ocha.libs import utils
import os, yaml
from ocha.libs import setting
def create_production_env(data_env, app_path):
host = data_env['app']['host']
port = data_env['app']['port']
f=open(app_path+"/production.sh", "a+")
f.write("gunicorn production:app -b "+str(host)+":"+str(port)+" -w 2 --chdir "+app_path+"/")
f.close()
def create_env(data_env, app_path):
db_driver = None
try:
db_driver = data_env['database']['driver']
except Exception:
db_driver = "cockroachdb"
env_check = None
try:
env_check = data_env['app']['environment']
except Exception as e:
print(e)
env_sett = ""
if env_check:
if env_check == 'production':
env_sett = "False"
else:
env_sett = "True"
f=open(app_path+"/.env", "a+")
# APP CONFIG
f.write("APP_NAME = "+data_env['app']['name'])
f.write("\n")
f.write("APP_HOST = "+data_env['app']['host'])
f.write("\n")
f.write("APP_PORT = "+str(data_env['app']['port']))
f.write("\n")
f.write("FLASK_DEBUG = "+env_sett)
f.write("\n")
f.write("\n")
# MEMCACHE CONFIG
f.write("MEMCACHE_HOST = "+data_env['app']['host'])
f.write("\n")
f.write("MEMCACHE_PORT = 11211")
f.write("\n")
f.write("\n")
# DATABASE CONFIG
f.write("DB_NAME = "+data_env['database']['name'])
f.write("\n")
f.write("DB_HOST = "+data_env['database']['host'])
f.write("\n")
f.write("DB_PORT = "+str(data_env['database']['port']))
f.write("\n")
f.write("DB_USER = "+data_env['database']['username'])
f.write("\n")
f.write("DB_SSL = "+data_env['database']['ssl'])
f.write("\n")
f.write("DB_DRIVER = "+db_driver)
f.write("\n")
f.write("\n")
# REDIS CONFIG
f.write("FLASK_REDIS_URL = redis://:"+data_env['redis']['password']+"@"+str(data_env['redis']['host'])+":"+str(data_env['redis']['port'])+"/0")
f.write("\n")
f.write("\n")
f.write("JWT_SECRET_KEY = wqertyudfgfhjhkcxvbnmn@123$32213")
f.close()
def create_file_controller(nm_controller, app_path, security):
controller_path = app_path+"/app/controllers/api"
file_controller_path = controller_path+"/"+nm_controller+".py"
create_controller(nm_controller,file_controller_path, security)
def create_controller(nm_controller, file_controller_path, security):
sec_value = ""
if security == True:
sec_value = "@jwt_required"
nm_ctrl = nm_controller.capitalize()
f=open(file_controller_path, "a+")
value_ctrl = """from flask_restful import Resource, reqparse, request
from app.helpers.rest import response
from app.helpers import cmd_parser as cmd
from app import psycopg2
from app.libs import utils
from app.models import model as db
from app.middlewares.auth import jwt_required
from app.helpers import endpoint_parse as ep
import json
class """+nm_ctrl+"""(Resource):
"""+sec_value+"""
def post(self):
json_req = request.get_json(force=True)
command = utils.get_command(request.path)
command = command
init_data = cmd.parser(json_req, command)
a = ep.endpoint_parser(command, init_data)
return response(200, data=a)
"""
f.write(value_ctrl)
f.close()
def read_app(app_name, path=None):
if path is None:
app_path = utils.APP_HOME+"/BLESS/"+app_name
else:
app_path = path+"/"+app_name
if not os.path.exists(app_path):
return None
else:
return app_path
def set_endpoint_template(endpoint_obj, app_path):
endpoint_fix = {
"endpoint": endpoint_obj
}
endpoint_value = yaml.dump(endpoint_fix)
template_path = app_path+"/app/static/templates/endpoint.yml"
f=open(template_path, "a+")
f.write(endpoint_value)
f.close()
def create_app(app_name, app_framework, path=None):
url_git = "https://github.com/Blesproject/bless_"+app_framework+".git"
if path is None:
app_path = utils.APP_HOME+"/BLESS"
dst_path = app_path+"/"+app_name
else:
app_path = path
dst_path = app_path+"/"+app_name
if not os.path.exists(app_path):
os.makedirs(app_path)
# copy(flask_path,dst_path)
try:
clone = utils.template_git(url=url_git, dir=dst_path)
except Exception as e:
print(str(e))
else:
return True
else:
# copy(flask_path,dst_path)
try:
clone = utils.template_git(url=url_git, dir=dst_path)
except Exception as e:
print(str(e))
else:
return False
def create_routing(endpoint_obj, app_path):
init_import = "from flask import Blueprint\nfrom flask_restful import Api \nfrom .user import *\nfrom .auth import *\n"
ctrl_import = ""
for i in endpoint_obj:
ctrl_import += "from ."+i+" import * \n"
p_import = init_import+ctrl_import
value_start = """\n\napi_blueprint = Blueprint("api", __name__, url_prefix='/api')
api = Api(api_blueprint)
api.add_resource(UserdataResource, '/user')
api.add_resource(UserdataResourceById, '/user/<userdata_id>')
api.add_resource(UserdataInsert, '/user')
api.add_resource(UserdataUpdate, '/user/<userdata_id>')
api.add_resource(UserdataRemove, '/user/<userdata_id>')
api.add_resource(Usersignin, '/sign')
api.add_resource(UserTokenRefresh, '/sign/token')
api.add_resource(UserloginInsert, '/user/add')\n"""
value_default = p_import+value_start
add_resource_data = ""
for a in endpoint_obj:
ctrl_class = a.capitalize()
add_resource_data += "api.add_resource("+ctrl_class+", '/"+a+"')\n"
all_value = value_default+ add_resource_data
init_path = app_path+"/app/controllers/api/__init__.py"
f=open(init_path, "a+")
f.write(all_value)
f.close()
def create_moduls(moduls_name, moduls_data, app_path, sync_md=None):
import_value = "from app.models import model as db\n\n\n"
moduls_path = ""
file_moduls_path = ""
if sync_md is None:
moduls_path = app_path+"/app/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
else:
moduls_path = app_path+"/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
f=open(file_moduls_path, "a+")
f.write(import_value)
function_value = ""
utils.report("Moduls "+moduls_name+" Create")
for i in moduls_data:
if moduls_data[i]['action'] == 'insert':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = args['fields']
try:
result = db.insert(table, fields)
except Exception as e:
respons = {
"status": False,
"error": str(e)
}
else:
respons = {
"status": True,
"messages": "Fine!",
"id": result
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'remove':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
result = db.delete(table,fields,field_value)
except Exception as e:
respons = {
"status": False,
"messages": str(e)
}
else:
respons = {
"status": result,
"messages": "Fine Deleted!"
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'get':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
try:
results = db.get_all(args['table'])
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
elif moduls_data[i]['action'] == 'where':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
results = db.get_by_id(args['table'],fields,field_value)
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
else:
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
return args\n\n
"""
f.write(function_value)
f.close()
def add_function_moduls(moduls_name, moduls_data, app_path, sync_md = None):
moduls_path = ""
file_moduls_path = ""
if sync_md is None:
moduls_path = app_path+"/app/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
else:
moduls_path = app_path+"/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
with open(file_moduls_path, "a") as myfile:
function_value = ""
for i in moduls_data:
# print(i)
if moduls_data[i]['action'] == 'insert':
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = args['fields']
try:
result = db.insert(table, fields)
except Exception as e:
respons = {
"status": False,
"error": str(e)
}
else:
respons = {
"status": True,
"messages": "Fine!",
"id": result
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'remove':
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
result = db.delete(table,fields,field_value)
except Exception as e:
respons = {
"status": False,
"messages": str(e)
}
else:
respons = {
"status": result,
"messages": "Fine Deleted!"
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'get':
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
try:
results = db.get_all(args['table'])
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
elif moduls_data[i]['action'] == 'where':
function_value += """
def """+moduls_data[i]['action']+"""(args):
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
results = db.get_by_id(args['table'],fields,field_value)
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
else:
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
return args\n\n
"""
myfile.write(function_value)
| python |
import os
import sys
import openpype
from openpype.api import Logger
log = Logger().get_logger(__name__)
def main(env):
from openpype.hosts.fusion.api import menu
import avalon.fusion
# Registers pype's Global pyblish plugins
openpype.install()
# activate resolve from pype
avalon.api.install(avalon.fusion)
log.info(f"Avalon registred hosts: {avalon.api.registered_host()}")
menu.launch_openpype_menu()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import math
import numpy as np
VELOCITIES = np.array([
(1, 0),
(np.sqrt(1/2+np.sqrt(1/8)), np.sqrt(1/6-np.sqrt(1/72))),
(np.sqrt(1/2), np.sqrt(1/6)),
(np.sqrt(1/2-np.sqrt(1/8)), np.sqrt(1/6+np.sqrt(1/72))),
(0, np.sqrt(1/3))
])
VELOCITIES.flags.writeable = False
assert np.allclose(np.square(VELOCITIES * [1, np.sqrt(3)]).sum(axis=1), 1)
def distance(velocities):
rounded = velocities.round()
delta = velocities - rounded
squared = np.square(delta)
return math.fsum(squared.flat)
# def distance(velocities):
# rounded = (velocities + 0.5).round() - 0.5
# delta = velocities - rounded
# processed = 1 / (np.square(delta) + 1)
# return processed.sum()
def main():
last_q = 0 / 1000000
last_d = distance(VELOCITIES * last_q)
improving = False
for i in range(1, 6000001):
q = i / 1000000
d = distance(VELOCITIES * q)
if d < last_d:
if not improving:
improving = True
elif d > last_d:
if improving:
improving = False
print("%.6f: %.7g" % (last_q, last_d))
last_q = q
last_d = d
if __name__ == '__main__':
main()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
import json
import psycopg2
import psycopg2.extras
import re
import transforms
import signal
import sys
from get_pg_conn import get_pg_conn
# see https://filosophy.org/code/python-function-execution-deadlines---in-simple-examples/
class TimedOutExc(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimedOutExc()
def new_f(*args):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
return f(*args)
signal.alarm(0)
new_f.__name__ = f.__name__
return new_f
return decorate
@deadline(5)
def attempt_match(args, matcher_id, transformed_word_ids_by_transformed_word, matches, transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id, figure_id, word, symbol_id, transformed_word):
if transformed_word:
matches.add(transformed_word)
if transformed_word not in transformed_word_ids_by_transformed_word:
# This might not be the best way to insert. TODO: look at the proper way to handle this.
transformed_words_cur.execute(
'''
INSERT INTO transformed_words (transformed_word)
VALUES (%s)
ON CONFLICT (transformed_word) DO UPDATE SET transformed_word = EXCLUDED.transformed_word
RETURNING id;
''',
(transformed_word, )
)
transformed_word_id = transformed_words_cur.fetchone()[0]
transformed_word_ids_by_transformed_word[transformed_word] = transformed_word_id
else:
transformed_word_id = transformed_word_ids_by_transformed_word[transformed_word]
else:
transformed_word_id = None
transform_args = []
for t in args[0:len(transforms_applied)]:
transform_args.append("-" + t["category"][0] + " " + t["name"])
if not word == '':
match_attempts_cur.execute('''
INSERT INTO match_attempts (ocr_processor_id, matcher_id, figure_id, word, transformed_word_id, symbol_id, transforms_applied)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING;
''',
(ocr_processor_id, matcher_id, figure_id, word, transformed_word_id, symbol_id, " ".join(transform_args))
)
def match(args):
conn = get_pg_conn()
ocr_processors__figures_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
symbols_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
matchers_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
transformed_words_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
match_attempts_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# transforms_to_apply includes both mutations and normalizations
transforms_to_apply = []
for arg in args:
category = arg["category"]
name = arg["name"]
t = getattr(getattr(transforms, name), name)
transforms_to_apply.append({"transform": t, "name": name, "category": category})
transforms_json = []
for t in transforms_to_apply:
transform_json = {}
transform_json["category"] = t["category"]
name = t["name"]
transform_json["name"] = name
with open("./transforms/" + name + ".py", "r") as f:
code = f.read().encode()
transform_json["code_hash"] = hashlib.sha224(code).hexdigest()
transforms_json.append(transform_json)
transforms_json_str = json.dumps(transforms_json)
matchers_cur.execute(
'''
SELECT id FROM matchers WHERE transforms=%s;
''',
(transforms_json_str, )
)
matcher_ids = matchers_cur.fetchone()
if matcher_ids != None:
matcher_id = matcher_ids[0]
else:
matchers_cur.execute(
'''
INSERT INTO matchers (transforms)
VALUES (%s)
ON CONFLICT (transforms) DO UPDATE SET transforms = EXCLUDED.transforms
RETURNING id;
''',
(transforms_json_str, )
)
matcher_id = matchers_cur.fetchone()[0]
if matcher_id == None:
raise Exception("matcher_id not found!");
normalizations = []
for t in transforms_to_apply:
t_category = t["category"]
if t_category == "normalize":
normalizations.append(t)
try:
ocr_processors__figures_query = '''
SELECT ocr_processor_id, figure_id, jsonb_extract_path(result, 'textAnnotations', '0', 'description') AS description
FROM ocr_processors__figures ORDER BY ocr_processor_id, figure_id;
'''
ocr_processors__figures_cur.execute(ocr_processors__figures_query)
symbols_query = '''
SELECT id, symbol
FROM symbols;
'''
symbols_cur.execute(symbols_query)
# original symbol incl/
symbol_ids_by_symbol = {}
for s in symbols_cur:
symbol_id = s["id"]
symbol = s["symbol"]
normalized_results = [symbol]
for normalization in normalizations:
for normalized in normalized_results:
normalized_results = []
for n in normalization["transform"](normalized):
normalized_results.append(n)
if n not in symbol_ids_by_symbol:
symbol_ids_by_symbol[n] = symbol_id
# Also collect unique uppercased symbols for matching
if n.upper() not in symbol_ids_by_symbol:
symbol_ids_by_symbol[n.upper] = symbol_id
#with open("./symbol_ids_by_symbol.json", "a+") as symbol_ids_by_symbol_file:
# symbol_ids_by_symbol_file.write(json.dumps(symbol_ids_by_symbol))
transformed_word_ids_by_transformed_word = {}
transformed_words_cur.execute(
'''
SELECT id, transformed_word
FROM transformed_words;
'''
)
for row in transformed_words_cur:
transformed_word_id = row["id"]
transformed_word = row["transformed_word"]
transformed_word_ids_by_transformed_word[transformed_word] = transformed_word_id
successes = []
fails = []
for row in ocr_processors__figures_cur:
ocr_processor_id = row["ocr_processor_id"]
figure_id = row["figure_id"]
paragraph = row["description"]
if paragraph:
for line in paragraph.split("\n"):
words = set()
words.add(line.replace(" ", ""))
matches = set()
for w in line.split(" "):
words.add(w)
for word in words:
transforms_applied = []
transformed_words = [word]
for transform_to_apply in transforms_to_apply:
transforms_applied.append(transform_to_apply["name"])
for transformed_word_prev in transformed_words:
transformed_words = []
for transformed_word in transform_to_apply["transform"](transformed_word_prev):
# perform match for original and uppercased words (see elif)
try:
if transformed_word in symbol_ids_by_symbol:
attempt_match(
args, matcher_id, transformed_word_ids_by_transformed_word, matches,
transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id,
figure_id, word, symbol_ids_by_symbol[transformed_word], transformed_word)
elif transformed_word.upper() in symbol_ids_by_symbol:
attempt_match(
args, matcher_id, transformed_word_ids_by_transformed_word, matches,
transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id,
figure_id, word, symbol_ids_by_symbol[transformed_word.upper()], transformed_word.upper())
else:
transformed_words.append(transformed_word)
# except TimedOutExc as e:
# print "took too long"
except(Exception) as e:
print('Unexpected Error:', e)
print('figure_id:', figure_id)
print('word:', word)
print('transformed_word:', transformed_word)
print('transforms_applied:', transforms_applied)
raise
if len(matches) == 0:
attempt_match(args, matcher_id, transformed_word_ids_by_transformed_word, matches, transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id, figure_id, word, None, None)
if len(matches) > 0:
successes.append(line + ' => ' + ' & '.join(matches))
else:
fails.append(line)
conn.commit()
with open("./outputs/successes.txt", "a+") as successesfile:
successesfile.write('\n'.join(successes))
with open("./outputs/fails.txt", "a+") as failsfile:
failsfile.write('\n'.join(fails))
print('match: SUCCESS')
except(psycopg2.DatabaseError) as e:
print('Database Error %s' % psycopg2.DatabaseError)
print('Database Error (same one): %s' % e)
print('Database Error (same one):', e)
raise
except(Exception) as e:
print('Unexpected Error:', e)
raise
finally:
if conn:
conn.close()
| python |
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
import unittest
from karmia import KarmiaContext
class TestKarmiaContextSet(unittest.TestCase):
def test_parameter(self):
context = KarmiaContext()
key = 'key'
value = 'value'
context.set(key, value)
self.assertEqual(context.parameters[key], value)
def test_object(self):
context = KarmiaContext()
parameter = {'key': 'value'}
context.set(parameter)
self.assertEqual(context.parameters['key'], parameter['key'])
def test_merge(self):
context = KarmiaContext()
parameter1 = {'key1': 'value1'}
parameter2 = {'key2': 'value2'}
context.set(parameter1)
context.set(parameter2)
self.assertEqual(context.parameters['key1'], parameter1['key1'])
self.assertEqual(context.parameters['key2'], parameter2['key2'])
class TestKarmiaContextGet(unittest.TestCase):
def test_parameter(self):
context = KarmiaContext()
key = 'key'
value = 'value'
context.set(key, value)
self.assertEqual(context.get(key), value)
def test_default_parameter(self):
context = KarmiaContext()
key = 'key'
default_value = 'default_value'
self.assertEqual(context.get(key, default_value), default_value)
class TestKarmiaContextRemove(unittest.TestCase):
def test_remove(self):
context = KarmiaContext()
key = 'key'
value = 'value'
context.set(key, value)
self.assertEqual(context.get(key), value)
context.remove(key)
self.assertEqual(context.get(key), None)
class TestKarmiaContextChild(unittest.TestCase):
def test_extend(self):
context = KarmiaContext()
key1 = 'key1'
key2 = 'key2'
values1 = {'value1': 1}
values2 = {'value2': 2}
context.set(key1, values1)
child = context.child()
self.assertEqual(child.get(key1), values1)
child.set(key2, values2)
self.assertEqual(child.get(key1), values1)
self.assertEqual(child.get(key2), values2)
self.assertEqual(context.get(key1), values1)
self.assertEqual(context.get(key2), None)
class TestAnnotate(unittest.TestCase):
def test_annotate_function(self):
context = KarmiaContext()
fn = lambda value1, value2: value1 + value2
self.assertEqual(list(context.annotate(fn).keys()), ['value1', 'value2'])
def test_no_arguments(self):
context = KarmiaContext()
fn = lambda: 'result'
self.assertEqual(list(context.annotate(fn).keys()), [])
class TestInvoke(unittest.TestCase):
def test_invoke(self):
context = KarmiaContext()
fn = lambda value1, value2: value1 + value2
parameters = {'value1': 1, 'value2': 2}
self.assertEqual(context.invoke(fn, parameters), parameters['value1'] + parameters['value2'])
class TestCall(unittest.TestCase):
def test_return(self):
context = KarmiaContext()
fn = lambda value1, value2: value1 + value2
parameters = {'value1': 1, 'value2': 2}
self.assertEqual(context.call(fn, parameters), parameters['value1'] + parameters['value2'])
def callback(self):
def fn(value1, value2, callback):
callback(None, value1 + value2)
def callback(error, result):
self.assertIsNone(error)
self.assertEqual(result, parameters['value1', 'value2'])
context = KarmiaContext()
parameters = {'value1': 1, 'value2': 2}
context.call(fn, parameters, callback)
def test_no_parameters(self):
context = KarmiaContext()
result = 'result'
fn = lambda: result
self.assertEqual(context.call(fn), result)
def test_merge_parameters(self):
context = KarmiaContext()
key = 'value1'
value = 1
parameters = {'value2': 2}
fn = lambda value1, value2: value1 + value2
context.set(key, value)
self.assertEqual(context.call(fn, parameters), value + parameters['value2'])
class TestAsync(unittest.TestCase):
def callback(self):
def fn(value1, value2, callback):
return callback(None, value1 + value2)
def callback(error, result):
self.assertIsNone(error)
self.assertEqual(result, parameters['value1', 'value2'])
context = KarmiaContext()
parameters = {'value1': 1, 'value2': 2}
async = context.async(fn, parameters)
self.assertTrue(callable(async))
async(callback)
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# c-hanging-comment-ender-p: nil
# End:
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2021 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Apply some settings to an XFCE Desktop environment
* Keyboard shortcuts
* Panel configuration
"""
import argparse
import collections
import json
import logging
import re
import os
import os.path
import subprocess
import sys
SHORTCUTS = (
# Use urxvt as Alt+F3 if it is available, otherwise a terminal
('<Alt>F3', ('urxvt', 'xfce4-terminal', 'exo-open --launch TerminalEmulator')),
# Lock screen with Ctrl+Alt+L
('<Primary><Alt>l', ('xflock4', )),
# Take a screenshot with the screenshooter
('Print', ('xfce4-screenshooter', )),
)
logger = logging.getLogger(__name__)
class ActionArguments(object): # pylint: disable=too-few-public-methods
"""Arguments to the program"""
def __init__(self, do_for_real, verbose, home_dir):
self.do_for_real = do_for_real
self.verbose = verbose
self.home_dir = os.path.expanduser(home_dir or '~')
def silent_run(cmd):
"""Run the given command, dropping its output, and return False if it failed"""
logger.debug("running %s", ' '.join(cmd))
try:
subprocess.check_output(cmd)
return True
except subprocess.CalledProcessError as exc:
logger.error("%s", exc)
return False
except OSError as exc:
logger.error("%s", exc)
return False
def try_run(cmd):
"""Try running the command and return its output on success, None on failure"""
logger.debug("running: %s", ' '.join(cmd))
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return None
def find_prog_in_path(prog):
"""Find the given program in the default $PATH"""
for path_dir in ('/usr/bin', '/usr/sbin', '/bin', '/sbin'):
path_prog = '{0}/{1}'.format(path_dir, prog)
if os.path.exists(path_prog):
return path_prog
return None
def get_xfce4_shortcut(key):
"""Get the shortcut associated with the given key"""
result = try_run([
'xfconf-query', '--channel', 'xfce4-keyboard-shortcuts',
'--property', '/commands/custom/{0}'.format(key)])
if result is None:
result = try_run([
'xfconf-query', '--channel', 'xfce4-keyboard-shortcuts',
'--property', '/commands/default/{0}'.format(key)])
return result if result is None else result.decode('utf-8').rstrip('\n')
def set_xfce4_shortcut(act_args, key, cmd):
"""Set the shortcut associated with the given key"""
current_cmd = get_xfce4_shortcut(key)
if current_cmd == cmd:
if act_args.verbose:
logger.info("shortcut %s is already %r", key, cmd)
return True
if not act_args.do_for_real:
logger.info("[dry run] shortcut %s: %r -> %r", key, current_cmd, cmd)
return True
logger.info("shortcut %s: %r -> %r", key, current_cmd, cmd)
return silent_run([
'xfconf-query', '--channel', 'xfce4-keyboard-shortcuts',
'--property', '/commands/custom/{0}'.format(key),
'--type', 'string', '--create', '--set', cmd])
def set_xfce4_shortcut_avail(act_args, key, progs):
"""Set the shortcut associated with the given key to the first available program"""
for cmdline in progs:
# Split the command line to find the used program
cmd_split = cmdline.split(None, 1)
cmd_split[0] = find_prog_in_path(cmd_split[0])
if cmd_split[0] is not None:
return set_xfce4_shortcut(act_args, key, ' '.join(cmd_split))
logger.warning("no program found for shortcut %s", key)
return True
def configure_xfce4_shortcuts(act_args):
for key, progs in SHORTCUTS:
if not set_xfce4_shortcut_avail(act_args, key, progs):
return False
return True
class Xfce4Panels(object):
"""Represent the state of the panels
c.f. xfconf-query --channel xfce4-panel --list --verbose
"""
# Key => type, default value
panel_properties = (
('autohide-behavior', int, 0),
('length', int, 0),
('plugin-ids', [int], []),
('position', str, ''),
('position-locked', bool, False),
('size', int, 0),
)
# Name, key => type
plugin_properties = (
('clock', 'digital-format', str),
('directorymenu', 'base-directory', str),
('launcher', 'items', [str]),
('separator', 'style', int),
('separator', 'expand', bool),
('systray', 'names-visible', [str]),
)
def __init__(self, act_args):
self.act_args = act_args
self.panels = None
self.panel_plugins = None
self.available_plugins = None
@staticmethod
def read_prop(prop, prop_type, default):
"""Read a property of xfce4-panel channel of the given type"""
is_list = isinstance(prop_type, list) and len(prop_type) == 1 and default in ([], None)
assert is_list or default is None or isinstance(default, prop_type)
result = try_run([
'xfconf-query', '--channel', 'xfce4-panel',
'--property', prop])
if result is None:
return [] if is_list and default is not None else default
lines = result.decode('utf-8').splitlines()
if is_list:
if len(lines) <= 2 or not lines[0].endswith(':') or lines[1] != '':
raise ValueError("unexpected xfce4-panel%s value: %r" % (prop, lines))
return [prop_type[0](line) for line in lines[2:]]
if prop_type is bool and len(lines) == 1:
if lines[0] == 'true':
return True
if lines[0] == 'false':
return False
if prop_type is int and len(lines) == 1:
return int(lines[0])
if prop_type is str and len(lines) == 1:
return lines[0]
raise NotImplementedError("unable to convert result to %r: %r" % (prop_type, lines))
def set_panel_prop(self, panel_id, prop_name, value):
"""Set a panel property"""
for prop, prop_type, default in self.panel_properties:
if prop == prop_name:
is_list = isinstance(prop_type, list) and len(prop_type) == 1
if is_list:
assert all(isinstance(v, prop_type[0]) for v in value), \
"Wrong value type for panel property %s" % prop_name
else:
assert isinstance(value, prop_type), \
"Wrong value type for panel property %s" % prop_name
# Prepare the arguments for xfconf-query
if is_list:
text_type = 'list'
text_value = str(value) # TODO: how to modify lists?
elif prop_type is bool:
text_type = 'bool'
text_value = 'true' if value else 'false'
elif prop_type is int or prop_type is str:
text_type = 'int'
text_value = str(value)
elif prop_type is str:
text_type = 'string'
text_value = value
else:
raise NotImplementedError("unable to write a property of type %r" % prop_type)
# Get the current value
prop_path = '/panels/panel-{0}/{1}'.format(panel_id, prop_name)
current_val = self.panels[panel_id][prop_name]
if current_val == value:
if self.act_args.verbose:
logger.info("%s is already %r", prop_path, value)
return True
if not self.act_args.do_for_real:
logger.info("[dry run] %s: %r -> %r", prop_path, current_val, value)
return True
logger.info("%s: %r -> %r", prop_path, current_val, value)
result = silent_run([
'xfconf-query', '--channel', 'xfce4-panel',
'--property', prop_path,
'--create', '--type', text_type, '--set', text_value])
if not result:
return result
# Sanity check
new_value = self.read_prop(prop_path, prop_type, default)
if new_value == current_val:
logger.error("failed to set %s to %r (old value stayed)", prop_path, value)
return False
if new_value != value:
logger.error("failed to set %s to %r (new value %r)", prop_path, value, new_value)
return False
return True
raise NotImplementedError("unknown panel property %s" % prop_name)
def read_file(self, file_rel_path):
"""Read a configuration file"""
abs_path = os.path.join(
self.act_args.home_dir, '.config', 'xfce4', 'panel', file_rel_path)
logger.debug("reading %s", abs_path)
try:
with open(abs_path, 'r') as stream:
return stream.read().splitlines()
except OSError:
return None
def read_panels(self):
"""Retrieve the currently configured panels"""
panel_ids = self.read_prop('/panels', [int], [])
if not panel_ids:
logger.error("failed to retrieve xfce4-panel/panels enumeration")
return False
self.panels = collections.OrderedDict()
self.panel_plugins = collections.OrderedDict()
for panel_id in panel_ids:
if panel_id in self.panels:
logger.error("duplicated xfce4-panel/panels ID %d", panel_id)
return False
prop_prefix = '/panels/panel-{0}/'.format(panel_id)
self.panels[panel_id] = {}
for prop, prop_type, default in self.panel_properties:
try:
self.panels[panel_id][prop] = self.read_prop(prop_prefix + prop, prop_type, default)
except ValueError as exc:
logger.error("%s", exc)
return False
self.panel_plugins[panel_id] = collections.OrderedDict()
for plugin_id in self.panels[panel_id]['plugin-ids']:
# Read the plugin config
prop_prefix = '/plugins/plugin-{0}'.format(plugin_id)
plugin_name = self.read_prop(prop_prefix, str, '')
self.panel_plugins[panel_id][plugin_id] = collections.OrderedDict()
self.panel_plugins[panel_id][plugin_id]['name'] = plugin_name
for plname, prop, prop_type in self.plugin_properties:
if plname != plugin_name:
continue
val = self.read_prop(prop_prefix + '/' + prop, prop_type, None)
if val is not None:
self.panel_plugins[panel_id][plugin_id][prop] = val
# Read the files associated with the plugin
if plugin_name == 'launcher':
# Load the .desktop file associated with a launcher
items = self.panel_plugins[panel_id][plugin_id].get('items')
if items:
self.panel_plugins[panel_id][plugin_id]['item-files'] = collections.OrderedDict()
for item_name in items:
content = self.read_file('{0}-{1}/{2}'.format(plugin_name, plugin_id, item_name))
self.panel_plugins[panel_id][plugin_id]['item-files'][item_name] = content
elif plugin_name in ('cpugraph', 'fsguard', 'netload', 'systemload'):
content = self.read_file('{0}-{1}.rc'.format(plugin_name, plugin_id))
if content is not None:
self.panel_plugins[panel_id][plugin_id]['rc-file'] = content
return True
def read_available_plugins(self):
"""Load the available panel plugins"""
plugins_path = '/usr/share/xfce4/panel/plugins'
logger.debug("loading files from %s", plugins_path)
available_plugins = set()
for filename in os.listdir(plugins_path):
if filename.endswith('.desktop'):
with open(os.path.join(plugins_path, filename), 'r') as fplugin:
for line in fplugin:
if re.match(r'^X-XFCE-Module\s*=\s*(\S+)', line):
# The .desktop file is a module. Let's add its name!
available_plugins.add(filename[:-8])
break
self.available_plugins = available_plugins
return True
def read_config(self):
"""Load all configuration options related to the panels"""
if not self.read_panels():
return False
if not self.read_available_plugins():
return False
return True
def dump_config(self, stream):
"""Print the loaded configuration"""
json.dump(
collections.OrderedDict((('panels', self.panels), ('plugins', self.panel_plugins))),
stream, indent=2)
stream.write('\n')
def configure(self):
"""Apply configuration of the panels"""
for panel_id, panel_config in sorted(self.panels.items()):
if panel_config['position'] == 'p=10;x=0;y=0':
# Bottom panel
logger.info("Found bottom panel with ID %d", panel_id)
if not self.set_panel_prop(panel_id, 'position-locked', True):
return False
if not self.set_panel_prop(panel_id, 'length', 0):
return False
# "Automatically hide the panel" -> "Always"
if not self.set_panel_prop(panel_id, 'autohide-behavior', 2):
return False
elif panel_config['position'] == 'p=6;x=0;y=0':
# Top panel
logger.info("Found top panel with ID %d", panel_id)
if not self.set_panel_prop(panel_id, 'position-locked', True):
return False
if not self.set_panel_prop(panel_id, 'length', 100):
return False
if not self.set_panel_prop(panel_id, 'autohide-behavior', 0):
return False
return True
def main(argv=None):
parser = argparse.ArgumentParser(
description="Apply settings to an XFCE Desktop environment")
parser.add_argument('-d', '--debug', action='store_true',
help="show debug messages")
parser.add_argument('-n', '--dry-run',
dest='real', action='store_false', default=False,
help="show what would change with --real (default)")
parser.add_argument('-r', '--real', action='store_true',
help="really change the settings")
parser.add_argument('-v', '--verbose', action='store_true',
help="show the settings which would not be modified")
parser.add_argument('-H', '--home', type=str,
help="$HOME environment variable to use")
parser.add_argument('-P', '--show-panels', action='store_true',
help="show panels configuration")
args = parser.parse_args(argv)
logging.basicConfig(
format='[%(levelname)s] %(message)s',
level=logging.DEBUG if args.debug else logging.INFO)
# Try using xfconf-query --version
if not silent_run(['xfconf-query', '--version']):
logger.fatal("xfconf-query does not work")
return False
act_args = ActionArguments(args.real, args.verbose, args.home)
if not configure_xfce4_shortcuts(act_args):
return False
panels = Xfce4Panels(act_args)
if not panels.read_config():
return False
if args.show_panels:
panels.dump_config(sys.stdout)
if not panels.configure():
return False
return True
if __name__ == '__main__':
sys.exit(0 if main() else 1)
| python |
# import argv variable so we can take command line arguments
from sys import argv
# extract the command line arguments from argv and store them in variables
script, filename = argv
# print a formatted string with the filename command line arugment inserted
print(f"We're going to erase {filename}")
# print a string
print("If you don't want that, hit CTRL-C (^C)")
# print a string
print("if you do want that, hit RETURN.")
# get input from the user on whether or not they want to erase the contents of filename
input("?")
# print a string
print("Opening the file...")
# open the file referenced by filename in write mode (which truncates the file) and store the returned file object in target
target = open(filename, 'w')
# print a string
print("Truncating the file. Goodbye!")
# truncate the file object stored in target
target.truncate()
# print a string
print("Now I'm going to ask you for three lines.")
# get user input for line 1 and store in line1
line1 = input("line 1: ")
# get user input for line 2 and store in line2
line2 = input("line 2: ")
# get user input for line 3 and store in line3
line3 = input("line 3: ")
# print a string
print("I'm going to write these to the file.")
# write string stored in line1 to file object in target
target.write(line1)
# write a newline character to file object in target
target.write("\n")
# write string stored in line2 to file object in target
target.write(line2)
# write a newline character to file object in target
target.write("\n")
# write string stored in line3 to file object in target
target.write(line3)
# write a newline character to file object in target
target.write("\n")
# print a string
print("And finally we close it.")
# close the file object in target.
target.close() | python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
from s_analyzer.apps.rest.api import router
from s_analyzer.site.views import HomeView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^$', HomeView.as_view(), name="home"),
]
| python |
from django.db import models
from re import sub
# Create your models here.
class Movie(models.Model):
movie_name = models.CharField(max_length=250, unique=True, blank=False, null=False)
movie_year = models.IntegerField()
imdb_rating = models.DecimalField(max_digits=3, decimal_places=2, blank=True, null=True)
imdb_link = models.URLField(blank=True, null=True)
down720_link = models.URLField(blank=True, null=True)
down1080_link = models.URLField(blank=True, null=True)
image_available = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} {}'.format(self.movie_name, self.movie_year)
def human_readable_name(self):
return sub('[/ ]+', '_', self.movie_name)
class Actor(models.Model):
actor_name = models.CharField(max_length=100, blank=False, null=False)
movies = models.ManyToManyField(Movie)
def __str__(self):
return self.actor_name
class Director(models.Model):
director_name = models.CharField(max_length=100, blank=False, null=False)
movies = models.ManyToManyField(Movie)
def __str__(self):
return self.director_name
class Genre(models.Model):
genre = models.CharField(max_length=100, blank=False, null=False)
movies = models.ManyToManyField(Movie)
def __str__(self):
return self.genre | python |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from datetime import datetime
from helper.utils import TestUtils as tu
from mushroom_rl.core import Agent
from mushroom_rl.algorithms.actor_critic import SAC
from mushroom_rl.core import Core
from mushroom_rl.environments.gym_env import Gym
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight,
gain=nn.init.calculate_gain('relu'))
def forward(self, state, action):
state_action = torch.cat((state.float(), action.float()), dim=1)
q = F.relu(self._h(state_action))
return torch.squeeze(q)
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight,
gain=nn.init.calculate_gain('relu'))
def forward(self, state):
return F.relu(self._h(torch.squeeze(state, 1).float()))
def learn_sac():
# MDP
horizon = 200
gamma = 0.99
mdp = Gym('Pendulum-v0', horizon, gamma)
mdp.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Settings
initial_replay_size = 64
max_replay_size = 50000
batch_size = 64
n_features = 64
warmup_transitions = 10
tau = 0.005
lr_alpha = 3e-4
# Approximator
actor_input_shape = mdp.info.observation_space.shape
actor_mu_params = dict(network=ActorNetwork,
n_features=n_features,
input_shape=actor_input_shape,
output_shape=mdp.info.action_space.shape,
use_cuda=False)
actor_sigma_params = dict(network=ActorNetwork,
n_features=n_features,
input_shape=actor_input_shape,
output_shape=mdp.info.action_space.shape,
use_cuda=False)
actor_optimizer = {'class': optim.Adam,
'params': {'lr': 3e-4}}
critic_input_shape = (
actor_input_shape[0] + mdp.info.action_space.shape[0],)
critic_params = dict(network=CriticNetwork,
optimizer={'class': optim.Adam,
'params': {'lr': 3e-4}},
loss=F.mse_loss,
n_features=n_features,
input_shape=critic_input_shape,
output_shape=(1,),
use_cuda=False)
# Agent
agent = SAC(mdp.info, actor_mu_params, actor_sigma_params, actor_optimizer,
critic_params, batch_size, initial_replay_size, max_replay_size,
warmup_transitions, tau, lr_alpha,
critic_fit_params=None)
# Algorithm
core = Core(agent, mdp)
core.learn(n_steps=2 * initial_replay_size,
n_steps_per_fit=initial_replay_size)
return agent
def test_sac():
policy = learn_sac().policy
w = policy.get_weights()
w_test = np.array([ 1.6998193, -0.732528, 1.2986078, -0.26860124,
0.5094043, -0.5001421, -0.18989229, -0.30646914])
assert np.allclose(w, w_test)
def test_sac_save(tmpdir):
agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f"))
agent_save = learn_sac()
agent_save.save(agent_path, full_save=True)
agent_load = Agent.load(agent_path)
for att, method in vars(agent_save).items():
save_attr = getattr(agent_save, att)
load_attr = getattr(agent_load, att)
tu.assert_eq(save_attr, load_attr)
| python |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import numpy as np
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 8,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
flags.DEFINE_integer(
"moe_num_extend", 8,
"The number of attention outputs, used for MoeExtendModel.")
flags.DEFINE_string("moe_method", "none",
"The pooling method used in the DBoF cluster layer. "
"used for MoeMaxModel.")
flags.DEFINE_integer(
"class_size", 200,
"The dimention of prediction projection, used for all chain models.")
flags.DEFINE_integer(
"encoder_size", 100,
"The dimention of prediction encoder, used for all mix models.")
flags.DEFINE_integer(
"hidden_size_1", 100,
"The size of the first hidden layer, used forAutoEncoderModel.")
flags.DEFINE_integer(
"hidden_channels", 3,
"The number of hidden layers, only used in early experiment.")
flags.DEFINE_integer(
"moe_layers", 1,
"The number of combine layers, used for combine related models.")
flags.DEFINE_integer(
"softmax_bound", 1000,
"The number of labels to be a group, only used for MoeSoftmaxModel and MoeDistillSplitModel.")
flags.DEFINE_bool(
"moe_group", False,
"Whether to split the 4716 labels into different groups, used in MoeMix4Model and MoeNoiseModel")
flags.DEFINE_float("noise_std", 0.2, "the standard deviation of noise added to the input.")
flags.DEFINE_float("ensemble_w", 1.0, "ensemble weight used in distill chain models.")
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
output = slim.fully_connected(
model_input, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
shape = model_input.get_shape().as_list()
if FLAGS.frame_features:
model_input = tf.reshape(model_input,[-1,shape[-1]])
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
"""
gate_w = tf.get_variable("gate_w", [shape[1], vocab_size * (num_mixtures + 1)], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(gate_w))
gate_activations = tf.matmul(model_input,gate_w)
expert_w = tf.get_variable("expert_w", [shape[1], vocab_size * num_mixtures], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(expert_w))
expert_v = tf.get_variable("expert_v", [vocab_size * num_mixtures], tf.float32,
initializer=tf.constant_initializer(0.0))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(expert_v))
expert_activations = tf.nn.xw_plus_b(model_input,expert_w,expert_v)"""
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoeDistillModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
shape = model_input.get_shape().as_list()
if FLAGS.frame_features:
model_input = tf.reshape(model_input,[-1,shape[-1]])
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_sub_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
if distill_labels is not None:
expert_gate = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="expert_gate")
expert_gate = expert_gate*0.8 + 0.1
final_probabilities = distill_labels*(1.0-expert_gate) + final_sub_probabilities*expert_gate
tf.summary.histogram("expert_gate/activations", expert_gate)
else:
final_probabilities = final_sub_probabilities
return {"predictions": final_probabilities, "predictions_class": final_sub_probabilities}
class MoeDistillEmbeddingModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
"""
embedding_mat = np.loadtxt("./resources/embedding_matrix.model.gz")
embedding_mat = tf.cast(embedding_mat,dtype=tf.float32)
bound = FLAGS.softmax_bound
vocab_size_1 = bound
probabilities_by_distill = distill_labels[:, :vocab_size_1]
embedding_mat = embedding_mat[:vocab_size_1, :]
labels_smooth = tf.matmul(probabilities_by_distill, embedding_mat)
probabilities_by_smooth_1 = (labels_smooth[:, :vocab_size_1] - probabilities_by_distill)/tf.reduce_sum(probabilities_by_distill,axis=1,keep_dims=True)
probabilities_by_smooth_2 = labels_smooth[:, vocab_size_1:]/tf.reduce_sum(probabilities_by_distill,axis=1,keep_dims=True)
labels_smooth = tf.concat((probabilities_by_smooth_1, probabilities_by_smooth_2), axis=1)"""
expert_gate = slim.fully_connected(
distill_labels,
1,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="expert_gate")
#final_probabilities = tf.clip_by_value(distill_labels + labels_smooth, 0.0, 1.0)
final_probabilities = distill_labels
return {"predictions": final_probabilities}
class MoeDistillChainModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
shape = model_input.get_shape().as_list()
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillChainNormModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
model_input = tf.nn.l2_normalize(model_input,dim=1)
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = class_input/tf.reduce_sum(distill_labels,axis=1,keep_dims=True)
class_input = tf.nn.l2_normalize(class_input,dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillChainNorm2Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
model_input = tf.nn.l2_normalize(model_input,dim=1)
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = class_input/tf.reduce_sum(distill_labels,axis=1,keep_dims=True)
class_input = tf.nn.l2_normalize(class_input,dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillSplitModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
class_size = 256
probabilities_by_distill = distill_labels[:,vocab_size_1:]
class_input = slim.fully_connected(
probabilities_by_distill,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
#class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_1])
final_probabilities = tf.concat((probabilities_by_class_and_batch, probabilities_by_distill), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillSplit2Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
class_size = 256
probabilities_by_distill = distill_labels[:,vocab_size_1:]
probabilities_by_residual = tf.clip_by_value(1.0-tf.reduce_sum(probabilities_by_distill,axis=1,keep_dims=True), 0.0, 1.0)
probabilities_by_distill_residual = tf.concat((probabilities_by_residual,probabilities_by_distill), axis=1)
class_input = slim.fully_connected(
probabilities_by_distill_residual,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_1])
final_probabilities = tf.concat((probabilities_by_class_and_batch, probabilities_by_distill), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillSplit3Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
vocab_size_2 = vocab_size - vocab_size_1
class_size = 256
probabilities_by_distill = distill_labels[:,:vocab_size_1]
probabilities_by_residual = distill_labels[:,vocab_size_1:]
feature_size = model_input.get_shape().as_list()[1]
model_input = slim.fully_connected(
model_input,
feature_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="model_inputs")
model_input = tf.nn.l2_normalize(model_input, dim=1)
gate_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-1")
expert_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-1")
gating_distribution_1 = tf.nn.softmax(tf.reshape(
gate_activations_1,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution_1 = tf.nn.sigmoid(tf.reshape(
expert_activations_1,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch_1 = tf.reduce_sum(
gating_distribution_1[:, :num_mixtures] * expert_distribution_1, 1)
probabilities_by_class_and_batch_1 = tf.reshape(probabilities_by_class_and_batch_1,
[-1, vocab_size_1])
probabilities_by_class = tf.concat((probabilities_by_class_and_batch_1, probabilities_by_residual), axis=1)
class_input = slim.fully_connected(
probabilities_by_distill,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_2 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_2 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_2])
final_probabilities = tf.concat((probabilities_by_distill, probabilities_by_class_and_batch), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeDistillSplit4Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
vocab_size_2 = vocab_size - vocab_size_1
class_size = 256
probabilities_by_distill = distill_labels[:,:vocab_size_1]
probabilities_by_residual = distill_labels[:,vocab_size_1:]
gate_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-1")
expert_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-1")
gating_distribution_1 = tf.nn.softmax(tf.reshape(
gate_activations_1,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution_1 = tf.nn.sigmoid(tf.reshape(
expert_activations_1,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch_1 = tf.reduce_sum(
gating_distribution_1[:, :num_mixtures] * expert_distribution_1, 1)
probabilities_by_class_and_batch_1 = tf.reshape(probabilities_by_class_and_batch_1,
[-1, vocab_size_1])
probabilities_by_class = tf.concat((probabilities_by_class_and_batch_1, probabilities_by_residual), axis=1)
class_input = slim.fully_connected(
probabilities_by_distill,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_2 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_2 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_2])
final_probabilities = tf.concat((probabilities_by_distill, probabilities_by_class_and_batch), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeSoftmaxModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def sub_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
name="",
**unused_params):
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
bound = FLAGS.softmax_bound
vocab_size_1 = bound
gate_activations = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates"+name)
expert_activations = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts"+name)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_sigmoid = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_1])
vocab_size_2 = vocab_size - bound
class_size = vocab_size_2
channels = 1
probabilities_by_softmax = []
for i in range(channels):
if i<channels-1:
sub_vocab_size = class_size + 1
else:
sub_vocab_size = vocab_size_2 - (channels-1)*class_size + 1
gate_activations = slim.fully_connected(
model_input,
sub_vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates-%s" % i + name)
expert_activations = slim.fully_connected(
model_input,
sub_vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts-%s" % i + name)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.softmax(tf.reshape(
expert_activations,
[-1, sub_vocab_size, num_mixtures]),dim=1) # (Batch * #Labels) x num_mixtures
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_subvocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_subvocab = tf.reshape(probabilities_by_subvocab,
[-1, sub_vocab_size])
probabilities_by_subvocab = probabilities_by_subvocab/tf.reduce_sum(probabilities_by_subvocab,axis=1,keep_dims=True)
if i==0:
probabilities_by_softmax = probabilities_by_subvocab[:,:-1]
else:
probabilities_by_softmax = tf.concat((probabilities_by_softmax, probabilities_by_subvocab[:,:-1]),axis=1)
probabilities_by_class = tf.concat((probabilities_by_sigmoid,probabilities_by_softmax),axis=1)
return probabilities_by_class
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
shape = model_input.get_shape().as_list()[1]
class_size = FLAGS.class_size
probabilities_by_class = self.sub_model(model_input,vocab_size,name="pre")
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_2 = slim.fully_connected(
1-probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs2-%s" % i)
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input_1,class_input_2),axis=1)
probabilities_by_vocab = self.sub_model(vocab_input,vocab_size,name="-%s" % i)
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeNegativeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_pos")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts_pos")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities_pos = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_neg")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts_neg")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities_neg = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities_pos/(final_probabilities_pos + final_probabilities_neg + 1e-6)
return {"predictions": final_probabilities, "predictions_positive": final_probabilities_pos,
"predictions_negative": final_probabilities_neg}
class MoeMaxModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures+1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size*num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
expert_others = slim.fully_connected(
model_input,
vocab_size,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="others")
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
forward_indices = []
backward_indices = []
for i in range(num_mixtures):
forward_indice = np.arange(vocab_size)
np.random.seed(i)
np.random.shuffle(forward_indice)
backward_indice = np.argsort(forward_indice,axis=None)
forward_indices.append(forward_indice)
backward_indices.append(backward_indice)
forward_indices = tf.constant(np.stack(forward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
backward_indices = tf.constant(np.stack(backward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
forward_indices = tf.stop_gradient(tf.reshape(forward_indices,[-1]))
backward_indices = tf.stop_gradient(tf.reshape(backward_indices,[-1]))
expert_activations = tf.transpose(tf.reshape(expert_activations,[-1,vocab_size*num_mixtures]))
expert_activations = tf.transpose(tf.gather(expert_activations,forward_indices))
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures+1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_softmax = tf.transpose(expert_activations,perm=[0,2,1])
expert_softmax = tf.concat((tf.reshape(expert_softmax,[-1,num_mixtures]),tf.reshape(expert_others,[-1,1])),axis=1)
expert_distribution = tf.nn.softmax(tf.reshape(
expert_softmax,
[-1, num_mixtures+1])) # (Batch * #Labels) x num_mixtures
expert_distribution = tf.reshape(expert_distribution[:,:num_mixtures],[-1,num_mixtures,vocab_size])
expert_distribution = tf.reshape(tf.transpose(expert_distribution,perm=[0,2,1]),[-1,vocab_size*num_mixtures])
expert_distribution = tf.transpose(tf.gather(tf.transpose(expert_distribution),backward_indices))
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_class_and_batch = tf.reduce_sum(gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(probabilities_by_class_and_batch,[-1, vocab_size])
final_probabilities_experts = tf.reshape(expert_distribution,[-1, vocab_size, num_mixtures])
if FLAGS.moe_method=="ordered":
seq = np.loadtxt("labels_ordered.out")
tf_seq = tf.constant(seq,dtype=tf.int32)
final_probabilities = tf.gather(tf.transpose(final_probabilities),tf_seq)
final_probabilities = tf.transpose(final_probabilities)
elif FLAGS.moe_method=="unordered":
seq = np.loadtxt("labels_unordered.out")
tf_seq = tf.constant(seq,dtype=tf.int32)
final_probabilities = tf.gather(tf.transpose(final_probabilities),tf_seq)
final_probabilities = tf.transpose(final_probabilities)
return {"predictions": final_probabilities, "predictions_experts": final_probabilities_experts}
class MoeMaxMixModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 25
class_input = slim.fully_connected(
model_input,
model_input.get_shape().as_list()[1],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vocab_input = tf.concat((model_input,probabilities_by_class), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures+1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size*num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
expert_others = slim.fully_connected(
vocab_input,
vocab_size,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="others")
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
forward_indices = []
backward_indices = []
for i in range(num_mixtures):
forward_indice = np.arange(vocab_size)
np.random.seed(i)
np.random.shuffle(forward_indice)
backward_indice = np.argsort(forward_indice,axis=None)
forward_indices.append(forward_indice)
backward_indices.append(backward_indice)
forward_indices = tf.constant(np.stack(forward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
backward_indices = tf.constant(np.stack(backward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
forward_indices = tf.stop_gradient(tf.reshape(forward_indices,[-1]))
backward_indices = tf.stop_gradient(tf.reshape(backward_indices,[-1]))
expert_activations = tf.transpose(tf.reshape(expert_activations,[-1,vocab_size*num_mixtures]))
expert_activations = tf.transpose(tf.gather(expert_activations,forward_indices))
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures+1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_softmax = tf.transpose(expert_activations,perm=[0,2,1])
expert_softmax = tf.concat((tf.reshape(expert_softmax,[-1,num_mixtures]),tf.reshape(expert_others,[-1,1])),axis=1)
expert_distribution = tf.nn.softmax(tf.reshape(
expert_softmax,
[-1, num_mixtures+1])) # (Batch * #Labels) x num_mixtures
expert_distribution = tf.reshape(expert_distribution[:,:num_mixtures],[-1,num_mixtures,vocab_size])
expert_distribution = tf.reshape(tf.transpose(expert_distribution,perm=[0,2,1]),[-1,vocab_size*num_mixtures])
expert_distribution = tf.transpose(tf.gather(tf.transpose(expert_distribution),backward_indices))
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_class_and_batch = tf.reduce_sum(gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(probabilities_by_class_and_batch,[-1, vocab_size])
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeKnowledgeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
seq = np.loadtxt(FLAGS.class_file)
tf_seq = tf.constant(seq,dtype=tf.float32)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_2 = tf.matmul(probabilities_by_vocab,tf_seq)
class_input_2 = slim.fully_connected(
class_input_2,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs2-%s" % i)
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input_1,class_input_2),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMixModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.encoder_size
class_input = slim.fully_connected(
model_input,
model_input.get_shape().as_list()[1],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vocab_input = tf.concat((model_input, probabilities_by_class), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMixExtendModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
class_size = FLAGS.encoder_size
model_input_stop = tf.stop_gradient(model_input)
class_input = slim.fully_connected(
model_input_stop,
model_input.get_shape().as_list()[1],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vocab_input = tf.concat((model_input, probabilities_by_class),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_max(tf.reshape(probabilities_by_vocab,
[-1, num_extends, vocab_size]),axis=1)
probabilities_by_class = tf.reduce_mean(tf.reshape(probabilities_by_class,
[-1, num_extends, class_size]),axis=1)
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMix2Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.encoder_size
hidden_channels = FLAGS.hidden_channels
shape = model_input.get_shape().as_list()[1]
class_input = slim.fully_connected(
model_input,
shape,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1,class_size, num_mixtures])) # (Batch * #Labels) x num_mixtures
class_expert_distribution = tf.reshape(class_expert_distribution,[-1,num_mixtures])
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
"""
class_expert_activations = slim.fully_connected(
class_input,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
probabilities_by_class = slim.fully_connected(
class_expert_activations,
class_size,
activation_fn=tf.nn.softmax,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="probabilities_by_class")"""
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vars = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % FLAGS.encoder_layers)
weights = tf.constant(vars[:-1,:],dtype=tf.float32)
bias = tf.reshape(tf.constant(vars[-1,:],dtype=tf.float32),[-1])
class_output = tf.nn.relu(tf.nn.xw_plus_b(probabilities_by_class,weights,bias))
class_output = tf.nn.l2_normalize(class_output,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input, class_output), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
"""
final_probabilities = tf.reshape(probabilities_by_class,[-1,class_size*hidden_channels])
for i in range(FLAGS.encoder_layers, FLAGS.encoder_layers*2):
var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
final_probabilities = tf.nn.xw_plus_b(final_probabilities,weight_i,bias_i)
if i<FLAGS.encoder_layers*2-1:
final_probabilities = tf.nn.relu(final_probabilities)
else:
final_probabilities = tf.nn.sigmoid(final_probabilities)"""
return {"predictions": final_probabilities, "predictions_encoder": probabilities_by_class}
class MoeMix3Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.encoder_size
hidden_channels = FLAGS.hidden_channels
shape = model_input.get_shape().as_list()[1]
class_input = slim.fully_connected(
model_input,
shape,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.reshape(class_expert_activations,[-1,num_mixtures])
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
hidden_mean = tf.reduce_mean(probabilities_by_class,axis=1,keep_dims=True)
hidden_std = tf.sqrt(tf.reduce_mean(tf.square(probabilities_by_class-hidden_mean),axis=1,keep_dims=True))
probabilities_by_class = (probabilities_by_class-hidden_mean)/(hidden_std+1e-6)
hidden_2 = tf.nn.relu(probabilities_by_class)
vars = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % FLAGS.encoder_layers)
weights = tf.constant(vars[:-1,:],dtype=tf.float32)
bias = tf.reshape(tf.constant(vars[-1,:],dtype=tf.float32),[-1])
class_output = tf.nn.relu(tf.nn.xw_plus_b(hidden_2,weights,bias))
#class_output = probabilities_by_class
class_output = tf.nn.l2_normalize(class_output,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input, class_output), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_encoder": probabilities_by_class}
class MoeMix4Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
if FLAGS.moe_group:
channels = vocab_size//class_size + 1
vocab_input = model_input
probabilities_by_class = []
for i in range(channels):
if i<channels-1:
sub_vocab_size = class_size
else:
sub_vocab_size = vocab_size - (channels-1)*class_size
gate_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, sub_vocab_size])
if i==0:
probabilities_by_class = probabilities_by_vocab
else:
probabilities_by_class = tf.concat((probabilities_by_class, probabilities_by_vocab),axis=1)
#probabilities_by_features = tf.stop_gradient(probabilities_by_class)
probabilities_by_features = probabilities_by_class
class_input_1 = slim.fully_connected(
probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class1-%s" % i)
class_input_2 = slim.fully_connected(
1-probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class2-%s" % i)
if not FLAGS.frame_features:
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input,class_input_1,class_input_2),axis=1)
"""
class_input_1 = slim.fully_connected(
probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class1-%s" % i)
if not FLAGS.frame_features:
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input,class_input_1),axis=1)"""
else:
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_2 = slim.fully_connected(
1-probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs2-%s" % i)
if not FLAGS.frame_features:
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input_1,class_input_2),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeNoiseModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
if FLAGS.train=="train":
noise = tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=FLAGS.noise_std, dtype=tf.float32)
model_input = tf.nn.l2_normalize(model_input+noise, 1)
if FLAGS.moe_group:
channels = vocab_size//class_size + 1
vocab_input = model_input
probabilities_by_class = []
for i in range(channels):
if i<channels-1:
sub_vocab_size = class_size
else:
sub_vocab_size = vocab_size - (channels-1)*class_size
gate_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, sub_vocab_size])
if i==0:
probabilities_by_class = probabilities_by_vocab
else:
probabilities_by_class = tf.concat((probabilities_by_class, probabilities_by_vocab),axis=1)
#probabilities_by_features = tf.stop_gradient(probabilities_by_class)
probabilities_by_features = probabilities_by_class
class_input = slim.fully_connected(
probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class-%s" % i)
class_input = tf.nn.l2_normalize(class_input,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input,class_input),axis=1)
else:
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs-%s" % i)
if FLAGS.train=="train":
noise = tf.random_normal(shape=tf.shape(class_input), mean=0.0, stddev=0.2, dtype=tf.float32)
class_input = tf.nn.l2_normalize(class_input+noise, 1)
class_input = tf.nn.l2_normalize(class_input,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMix5Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
feature_sizes = FLAGS.feature_sizes
feature_sizes = [int(feature_size) for feature_size in feature_sizes.split(',')]
feature_input = model_input[:,0:feature_sizes[0]]
probabilities_by_class = model_input[:,feature_sizes[0]:]
class_input = slim.fully_connected(
probabilities_by_class,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((feature_input,class_input),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities}
class MoeExtendModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_max(tf.reshape(final_probabilities_by_class_and_batch,
[-1, num_extends, vocab_size]), axis=1)
return {"predictions": final_probabilities}
class MoeExtendDistillChainModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
class_size = 256
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.reshape(tf.tile(tf.reshape(class_input,[-1,1,class_size]),[1,num_extends,1]),[-1,class_size])
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_max(tf.reshape(final_probabilities_by_class_and_batch,
[-1, num_extends, vocab_size]), axis=1)
return {"predictions": final_probabilities}
class MoeExtendCombineModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
num_extends = FLAGS.moe_num_extend
shape = model_input.get_shape().as_list()[1]
model_input = tf.reshape(model_input,[-1, num_extends, shape])
model_input_0 = model_input[:,0,:]
gate_activations = slim.fully_connected(
model_input_0,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input_0,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
input_layers = []
for i in range(FLAGS.moe_layers-1):
model_input_i = model_input[:,i+1,:]
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
input_layers.append(class_input_1)
vocab_input = tf.concat([model_input_i]+input_layers,axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeExtendSoftmaxModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
extend_activations = slim.fully_connected(
model_input,
vocab_size,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="extends")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
extend_distribution = tf.nn.softmax(tf.reshape(
extend_activations,
[-1, num_extends, vocab_size]),dim=1) # (Batch * #Labels) x (num_mixtures + 1)
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_sum(tf.reshape(final_probabilities_by_class_and_batch,
[-1, num_extends, vocab_size])*extend_distribution,axis=1)
return {"predictions": final_probabilities}
class MoeSepModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
feature_sizes = FLAGS.feature_sizes
feature_sizes = [int(feature_size) for feature_size in feature_sizes.split(',')]
fbegin = 0
final_probabilities_all = []
for i in range(len(feature_sizes)):
feature_size = feature_sizes[i]
feature_input = model_input[:,fbegin:fbegin+feature_size]
fbegin += feature_size
gate = slim.fully_connected(
feature_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert = slim.fully_connected(
feature_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_prob = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_prob = tf.reshape(final_prob,[-1, vocab_size])
final_probabilities_all.append(final_prob)
final_probabilities_all = tf.stack(final_probabilities_all,axis=1)
final_probabilities = tf.reduce_max(final_probabilities_all,axis=1)
return {"predictions": final_probabilities}
class SimModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
embedding_size = model_input.get_shape().as_list()[1]
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
model_input = tf.maximum(model_input,tf.zeros_like(model_input))
expert_distribution = []
for i in range(num_mixtures):
embeddings = tf.Variable(tf.truncated_normal([vocab_size, embedding_size],stddev=0.1))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(embeddings))
embeddings = tf.maximum(embeddings,tf.zeros_like(embeddings))
norm_embeddings = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = tf.div(embeddings, norm_embeddings)
norm_input = tf.sqrt(tf.reduce_sum(tf.square(model_input), 1, keep_dims=True))
normalized_input = tf.div(model_input,norm_input)
similarity = tf.matmul(normalized_input, normalized_embeddings, transpose_b=True)*2
expert_distribution.append(similarity)
expert_distribution = tf.stack(expert_distribution,axis=2)
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class AutoEncoderModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
model_input = model_input
hidden_size_1 = FLAGS.hidden_size_1
hidden_size_2 = FLAGS.encoder_size
with tf.name_scope("autoencoder"):
hidden_1 = slim.fully_connected(
model_input,
hidden_size_1,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="hidden_1")
hidden_2 = slim.fully_connected(
hidden_1,
hidden_size_2,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="hidden_2")
output_1 = slim.fully_connected(
hidden_2,
hidden_size_1,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="output_1")
output_2 = slim.fully_connected(
output_1,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="output_2")
"""
scale = tf.get_variable("scale", [1, vocab_size], tf.float32,
initializer=tf.constant_initializer(0.0))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(scale))"""
output_2 = model_input
return {"predictions": output_2} | python |
from table import Table
class CSVTable(Table):
def __init__(self, savepath):
self.savepath = savepath
self.file_created = False
super().__init__()
def _table_add(self):
fieldnames = [column.generate_header() for column in self.columns]
with open(self.savepath, mode="w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
def _tablesave(self):
fieldnames = [column.generate_header() for column in self.columns]
values = {column.generate_header(): column.get_last_value() for column in self.columns}
with open(self.savepath, mode="a") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writerow(values)
| python |
# -*- coding: utf-8 -*-
import csv
from pathlib import Path
import tkinter as tk
import argparse
import json
def matchKeyToName(pathToJsonfile:str, key : str):
cityKeysFile = json.load(open(pathToJsonfile))
return cityKeysFile[key]['Town']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--classifType', type=str, required=False, default='Tiles')
parser.add_argument('--datasetPath', type=str, required=False, default='C:/Users/hx21262/MAPHIS/datasets')
parser.add_argument('--cityKey', type=str, required=False, default='36')
args = parser.parse_args()
cityName = matchKeyToName(f'{args.datasetPath}/cityKey.json', args.cityKey)
datasetPath = Path(args.datasetPath)
classifiedFolderPath = Path(f'{args.datasetPath}/classifiedMaps/{cityName}')
classifiedFolderPath.mkdir(parents=True, exist_ok=True)
print(f'Classification Type : {args.classifType}')
if args.classifType.lower() == 'labels':
defaultFeatureList = ['manhole','lamppost', 'stone', 'chimney', 'chy', 'hotel',
'church', 'workshop', 'firepost', 'river', 'school', 'barrack',
'workhouse', 'market', 'chapel', 'bank', 'pub', 'public house', 'hotel',
'inn', 'bath', 'theatre', 'police', 'wharf', 'yard', 'green', 'park', 'quarry' ]
from interactiveWindowLabels import Application
elif args.classifType.lower() == 'tiles':
defaultFeatureList = ['rich residential neighborhood', 'poor residential neighborhood', 'industrial district',
'peri-urban district', 'farm and forest']
from interactiveWindowTiles import Application
elif args.classifType.lower() == 'contours':
defaultFeatureList = ['interesting','not interesting', 'tree', 'factory', 'villa']
from interactiveWindowContours import Application
else:
raise ValueError ("Has to be contours, tiles or labels")
featureListName = f'featureList{args.classifType.capitalize()}.csv'
## Check if feature List file exists, creates it if not
fp = Path(f'{args.datasetPath}/classifiedMaps/{featureListName}')
if not fp.is_file():
with open(fp, 'w', newline='') as csvFile:
fileWriter = csv.writer(csvFile)
for featureName in defaultFeatureList:
fileWriter.writerow([featureName])
root = tk.Tk()
app = Application(root, cityName, datasetPath, classifiedFolderPath)
root.mainloop()
if __name__=='__main__':
main() | python |
"""Events that are emitted during pipeline execution"""
import abc
import datetime
import json
import enum
class Event():
def __init__(self) -> None:
"""
Base class for events that are emitted from mara.
"""
def to_json(self):
return json.dumps({field: value.isoformat() if isinstance(value, datetime.datetime) else value
for field, value in self.__dict__.items()})
class EventHandler(abc.ABC):
@abc.abstractmethod
def handle_event(self, event: Event):
pass
class PipelineEvent():
def __init__(self, node_path: [str]) -> None:
"""
Base class for events that are emitted during a pipeline run.
Args:
node_path: The path of the current node in the data pipeline that is run
"""
self.node_path = node_path
def to_json(self):
return json.dumps({field: value.isoformat() if isinstance(value, datetime.datetime) else value
for field, value in self.__dict__.items()})
class RunStarted(PipelineEvent):
def __init__(self, node_path: [str], start_time: datetime.datetime, pid: int) -> None:
"""
A pipeline run started
Args:
node_path: The path of the pipeline that was run
start_time: The time when the run started
pid: The process id of the process that runs the pipeline
"""
super().__init__([])
self.node_path = node_path
self.start_time = start_time
self.pid = pid
class RunFinished(PipelineEvent):
def __init__(self, node_path: [str], end_time: datetime.datetime, succeeded: bool) -> None:
"""
A pipeline run finished
Args:
node_path: The path of the pipeline that was run
end_time: The time when the run finished
succeeded: Whether the run succeeded
"""
super().__init__([])
self.node_path = node_path
self.end_time = end_time
self.succeeded = succeeded
class NodeStarted(PipelineEvent):
def __init__(self, node_path: [str], start_time: datetime.datetime, is_pipeline: bool) -> None:
"""
A task run started.
Args:
node_path: The path of the current node in the data pipeline that is run
start_time: The time when the task started
is_pipeline: Whether the node is a pipeline
"""
super().__init__(node_path)
self.start_time = start_time
self.is_pipeline = is_pipeline
class NodeFinished(PipelineEvent):
def __init__(self, node_path: [str], start_time: datetime.datetime, end_time: datetime.datetime,
is_pipeline: bool, succeeded: bool) -> None:
"""
A run of a task or pipeline finished.
Args:
node_path: The path of the current node in the data pipeline that is run
start_time: The time when the task started
end_time: The time when the task finished
is_pipeline: Whether the node is a pipeline
succeeded: Whether the task succeeded
"""
super().__init__(node_path)
self.start_time = start_time
self.end_time = end_time
self.is_pipeline = is_pipeline
self.succeeded = succeeded
class Output(PipelineEvent):
class Format(enum.EnumMeta):
"""Formats for displaying log messages"""
STANDARD = 'standard'
VERBATIM = 'verbatim'
ITALICS = 'italics'
def __init__(self, node_path: [str], message: str,
format: Format = Format.STANDARD, is_error: bool = False) -> None:
"""
Some text output occurred.
Args:
node_path: The path of the current node in the data pipeline that is run
message: The message to display
format: How to format the message
is_error: Whether the message is considered an error message
"""
super().__init__(node_path)
self.message = message
self.format = format
self.is_error = is_error
self.timestamp = datetime.datetime.now()
| python |
# An implementation of reference learning for the game TicTacToe
| python |
from enum import Enum
import numpy as np
class TypeData(Enum):
BODY = 0
HAND = 1
class HandJointType(Enum):
BAMB_0 = 0
BAMB_1 = 1
BIG_TOE = 2
BIG_TOE_1 = 3
BIG_TOE_2 = 4
FINGER_1 = 5
FINGER_1_1 = 6
FINGER_1_2 = 7
FINGER_1_3 = 8
FINGER_2 = 9
FINGER_2_1 = 10
FINGER_2_2 = 11
FINGER_2_3 = 12
FINGER_3 = 13
FINGER_3_1 = 14
FINGER_3_2 = 15
FINGER_3_3 = 16
FINGER_4 = 17
FINGER_4_1 = 18
FINGER_4_2 = 19
FINGER_4_3 = 20
class JointType(Enum):
Nose = 0
Neck = 1
RightShoulder = 2
RightElbow = 3
RightHand = 4
LeftShoulder = 5
LeftElbow = 6
LeftHand = 7
RightWaist = 8
RightKnee = 9
RightFoot = 10
LeftWaist = 11
LeftKnee = 12
LeftFoot = 13
RightEye = 14
LeftEye = 15
RightEar = 16
LeftEar = 17
hand_join_indices = [
HandJointType.BAMB_0,
HandJointType.BAMB_1,
HandJointType.BIG_TOE,
HandJointType.BIG_TOE_1,
HandJointType.BIG_TOE_2,
HandJointType.FINGER_1,
HandJointType.FINGER_1_1,
HandJointType.FINGER_1_2,
HandJointType.FINGER_1_3,
HandJointType.FINGER_2,
HandJointType.FINGER_2_1,
HandJointType.FINGER_2_2,
HandJointType.FINGER_2_3,
HandJointType.FINGER_3,
HandJointType.FINGER_3_1,
HandJointType.FINGER_3_2,
HandJointType.FINGER_3_3,
HandJointType.FINGER_4,
HandJointType.FINGER_4_1,
HandJointType.FINGER_4_2,
HandJointType.FINGER_4_3
]
coco_joint_indices= [
JointType.Nose,
JointType.LeftEye,
JointType.RightEye,
JointType.LeftEar,
JointType.RightEar,
JointType.LeftShoulder,
JointType.RightShoulder,
JointType.LeftElbow,
JointType.RightElbow,
JointType.LeftHand,
JointType.RightHand,
JointType.LeftWaist,
JointType.RightWaist,
JointType.LeftKnee,
JointType.RightKnee,
JointType.LeftFoot,
JointType.RightFoot
]
LIMBS = [[JointType.Neck, JointType.RightWaist],
[JointType.RightWaist, JointType.RightKnee],
[JointType.RightKnee, JointType.RightFoot],
[JointType.Neck, JointType.LeftWaist],
[JointType.LeftWaist, JointType.LeftKnee],
[JointType.LeftKnee, JointType.LeftFoot],
[JointType.Neck, JointType.RightShoulder],
[JointType.RightShoulder, JointType.RightElbow],
[JointType.RightElbow, JointType.RightHand],
[JointType.RightShoulder, JointType.RightEar],
[JointType.Neck, JointType.LeftShoulder],
[JointType.LeftShoulder, JointType.LeftElbow],
[JointType.LeftElbow, JointType.LeftHand],
[JointType.LeftShoulder, JointType.LeftEar],
[JointType.Neck, JointType.Nose],
[JointType.Nose, JointType.RightEye],
[JointType.Nose, JointType.LeftEye],
[JointType.RightEye, JointType.RightEar],
[JointType.LeftEye, JointType.LeftEar]]
HANDLINES = [
[HandJointType.BAMB_0, HandJointType.BAMB_1],
[HandJointType.BAMB_1, HandJointType.BIG_TOE],
[HandJointType.BIG_TOE, HandJointType.BIG_TOE_1],
[HandJointType.BIG_TOE_1, HandJointType.BIG_TOE_2],
[HandJointType.BAMB_0, HandJointType.FINGER_1],
[HandJointType.FINGER_1, HandJointType.FINGER_1_1],
[HandJointType.FINGER_1_1, HandJointType.FINGER_1_2],
[HandJointType.FINGER_1_2, HandJointType.FINGER_1_3],
[HandJointType.BAMB_0, HandJointType.FINGER_2],
[HandJointType.FINGER_2, HandJointType.FINGER_2_1],
[HandJointType.FINGER_2_1, HandJointType.FINGER_2_2],
[HandJointType.FINGER_2_2, HandJointType.FINGER_2_3],
[HandJointType.BAMB_0, HandJointType.FINGER_3],
[HandJointType.FINGER_3, HandJointType.FINGER_3_1],
[HandJointType.FINGER_3_1, HandJointType.FINGER_3_2],
[HandJointType.FINGER_3_2, HandJointType.FINGER_3_3],
[HandJointType.BAMB_0, HandJointType.FINGER_4],
[HandJointType.FINGER_4, HandJointType.FINGER_4_1],
[HandJointType.FINGER_4_1, HandJointType.FINGER_4_2],
[HandJointType.FINGER_4_2, HandJointType.FINGER_4_3],
]
body_edges = np.array(
[[0, 1], # neck - nose
[1, 16], [16, 18], # nose - l_eye - l_ear
[1, 15], [15, 17], # nose - r_eye - r_ear
[0, 3], [3, 4], [4, 5], # neck - l_shoulder - l_elbow - l_wrist
[0, 9], [9, 10], [10, 11], # neck - r_shoulder - r_elbow - r_wrist
[0, 6], [6, 7], [7, 8], # neck - l_hip - l_knee - l_ankle
[0, 12], [12, 13], [13, 14]]) # neck - r_hip - r_knee - r_ankle
hand_edges = [[0, 1],
[1, 2], [2, 3], [3, 4], # nose - l_eye - l_ear
[0, 5], [5, 6],[6, 7],[7, 8], # nose - r_eye - r_ear
[0, 9], [9,10], [10, 11],[11, 12], # neck - l_shoulder - l_elbow - l_wrist
[0, 13], [13, 14], [14, 15],[15, 16], # neck - r_shoulder - r_elbow - r_wrist
[0, 17], [17, 18], [18, 19],[19, 20]] # neck - r_hip - r_knee - r_ankle
| python |
import tensorflow as tf
import time
import os
import sys
import model_nature as model
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base,'../../'))
import datasets.Img2ImgPipeLine as train_dataset
physical_devices = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
######################################################################################################
train_path_A = "G:\\Datasets\\Img2Img\\horse2zebra\\trainA"
train_path_B = "G:\\Datasets\\Img2Img\\horse2zebra\\trainB"
test_path_A = "G:\\Datasets\\Img2Img\\horse2zebra\\testA"
test_path_B = "G:\\Datasets\\Img2Img\\horse2zebra\\testB"
tmp_path = "D:/Work/Codes_tmp/2DCycleGAN-mixed-horse2zebra-Vanilla"
out_path = "D:/Work/Codes_tmp/2DCycleGAN-mixed-horse2zebra-Vanilla/out"
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
if not os.path.exists(out_path):
os.makedirs(out_path)
def map_func(x):
# x shape = [batch,2,256,256,3]
#必须归一化 对应于网络的tanh 但是暂时不知道用什么像素标准去归一化 可能需要遍历所有的值
A = tf.reshape(x[:,0,:,:,:],[1,256,256,3], name=None)
A = (A-0.0)/1
B = tf.reshape(x[:,1,:,:,:],[1,256,256,3], name=None)
B = (B-0.0)/1
return A,B
EPOCHES = 200
BATCH_SIZE = 1
num_threads = 4
dataset = train_dataset.DataPipeLine(train_path_A,train_path_B)
dataset = tf.data.Dataset.from_generator(dataset.generator,output_types=tf.float32)\
.batch(BATCH_SIZE)\
.map(map_func,num_parallel_calls=num_threads)\
.prefetch(buffer_size = tf.data.experimental.AUTOTUNE)
test_set = train_dataset.DataPipeLine(test_path_A,test_path_B)
test_set = tf.data.Dataset.from_generator(test_set.generator,output_types=tf.float32)\
.batch(BATCH_SIZE)\
.map(map_func,num_parallel_calls=num_threads)\
.prefetch(buffer_size = tf.data.experimental.AUTOTUNE)
model = model.CycleGAN(train_set=dataset,
test_set=test_set,
loss_name="Vanilla",
mixed_precision=True,
learning_rate=2e-4,
tmp_path=tmp_path,
out_path=out_path)
model.build(X_shape=[None,256,256,3],Y_shape=[None,256,256,3])
model.train(epoches=EPOCHES) | python |
#
# Hangman
# Python Techdegree
#
# Created by Dulio Denis on 2/9/17.
# Copyright (c) 2017 ddApps. All rights reserved.
# ------------------------------------------------
# Guess what word the computer picked.
#
import random
import os
import sys
# make a list of words
words = [
'apple',
'banana',
'orange',
'coconut',
'strawberry',
'lime',
'grapefruit',
'lemon',
'kumquat',
'pineapple',
'blueberry',
'melon'
]
# clear the screen
def clear():
# if windows
if os.name == 'nt':
os.system('cls')
# else its Unix based like macOS and Linux
else:
os.system('clear')
# draw function
def draw(bad_guesses, good_guesses, secret_word):
# clear the screen first
clear()
# and draw the strikes
print('Strikes: {}/7'.format(len(bad_guesses)))
print('') # a blank line just for formatting
# draw the bad guesses
for letter in bad_guesses:
print(letter, end = ' ')
print('\n\n')
# then draw guessed letters
for letter in secret_word:
if letter in good_guesses:
print(letter, end=' ')
else:
print('_', end=' ')
# get the guess
def get_guess(bad_guesses, good_guesses):
while True:
# take a guess and lowercase it right away
guess = input("Guess a letter: ").lower()
# validate its a legitimate guess
if (len(guess)) != 1:
print("You can only guess a single letter")
elif guess in bad_guesses or guess in good_guesses:
print("You've already guessed that letter.")
elif not guess.isalpha():
print("You can only guess letters.")
else:
return guess
# play the game
def play(done):
# clear the screen
clear()
# pick a random word
secret_word = random.choice(words)
# have both a good and bad guess letter list
bad_guesses = []
good_guesses = []
while True:
draw(bad_guesses, good_guesses, secret_word)
guess = get_guess(bad_guesses, good_guesses)
if guess in secret_word:
good_guesses.append(guess)
found = True
for letter in secret_word:
if letter not in good_guesses:
found = False
if found:
print("You win!")
print("The secret word was {}".format(secret_word))
done = True
else:
bad_guesses.append(guess)
if len(bad_guesses) == 7:
draw(bad_guesses, good_guesses, secret_word)
print("You lost!")
print("The secret word was {}".format(secret_word))
done = True
if done:
play_again = input('Play again? Y/n ').lower()
if play_again != 'n':
return play(done=False)
else:
sys.exit()
def welcome():
print('Welcome to Hangman!')
start = input('Press enter/return to start or Q to quit ').lower()
if start == 'q':
print('Thanks for playing.')
sys.exit()
else:
return True
done = False
while True:
clear()
welcome()
play(done)
| python |
#!/usr/bin/python3
from shutil import copyfile
from shutil import move
from os import remove
from os import environ
import os
import os.path
import sys
import subprocess
homedir = os.environ['HOME']
bash_target_file = homedir + "/.bashrc"
bash_backup_file = homedir + "/.backup-bashrc"
bash_new_file = homedir + "/.newbashrc"
interfaces = []
def get_network_interfaces():
for line in open('/proc/net/dev', 'r'):
if line.find(":") != -1 and line.find("lo") == -1:
interfaces.append(line.split(":")[0].strip())
def modify_bash_terminal_line(selected_interface):
with open(bash_new_file, "w") as newfile:
with open (bash_target_file) as oldfile:
for line in oldfile:
if line.find("PS1") != -1 and not line.strip().startswith("#"):
### This modifies the terminal to show timestamp, IP, and current directory inline
newfile.write("PS1=\'[`date +\"%d-%b-%y %T\"`]\\[\\033[01;31m\\] `ifconfig " + selected_interface + " 2>/dev/null | sed -n 2,2p | cut -d\" \" -f 10`\\[\\033[00m\\] \\[\\033[01;34m\\]\\W\\[\\033[00m\\] > \'" + "\n")
else:
newfile.write(line)
remove(bash_target_file)
move(bash_new_file, bash_target_file)
def add_log_file_creation():
with open(bash_target_file, "a") as f:
### Add a line to the .bashrc file to create a new log file and log all shell commands
f.write("test \"$(ps -ocommand= -p $PPID | awk \'{print $1}\')\" == \'script\' || (script -f $HOME/$(date +\"%d-%b-%y_%H-%M-%S\")_shell.log)")
def zsh_log_file_creation(user):
zsh_filename = "/" + user + "/.zshrc"
with open(zsh_filename, "a") as file:
file.write("precmd() { eval 'RETRN_VAL=$?;logger -p local6.debug \"$(whoami) [$$]: $(history | tail -n1 | sed \"s/^[ ]*[0-9]\+[ ]*//\" ) [$RETRN_VAL]\"' }")
def main():
if ("zsh" in environ['SHELL']):
with open("/etc/rsyslog.d/commands.conf", "w") as commands:
commands.write("local6.* /var/log/commands.log")
result = subprocess.run(["service", "rsyslog restart"], capture_output=True, text=True)
# Make modifications to .zshrc
if os.path.isfile("/root/.zshrc"):
copyfile("/root/.zshrc", "/root/.backup_zshrc") ### make a back-up just in case :)
zsh_log_file_creation("root")
else:
print("Something's wrong... there's no \".zshrc\" file for root!")
if os.path.isfile("/home/kali/.zshrc"):
copyfile("/home/kali/.zshrc", "/home/kali/.backup_zshrc") ### make a back-up just in case :)
zsh_log_file_creation("home/kali")
else:
print("Something's wrong... there's no \".zshrc\" file for kali!")
else:
if os.path.isfile(bash_target_file):
### Figure out what network interfaces are available
selected_interface = None
get_network_interfaces()
### If there is only one interface, don't bother asking the user - just set that
if len(interfaces) != 0 and len(interfaces) == 1:
selected_interface = interfaces[0]
else: ### Otherwise, ask the user to select from the available network interfaces
while selected_interface not in interfaces:
selected_interface = raw_input("Choose your active interface: " + ' '.join(interfaces) + "\n")
copyfile(bash_target_file, bash_backup_file) ### make a back-up of the .bashrc - just in case :)
modify_bash_terminal_line(selected_interface)
add_log_file_creation()
else:
print("Something's wrong... there's no \".bashrc\" file!")
if __name__ == "__main__":
main()
| python |
import json
import uuid
import factory
import mock
from django.test import TestCase
from facility_profile.models import Facility
from facility_profile.models import MyUser
from facility_profile.models import SummaryLog
from test.support import EnvironmentVarGuard
from .helpers import serialized_facility_factory
from morango.models.certificates import Filter
from morango.models.core import DeletedModels
from morango.models.core import HardDeletedModels
from morango.models.core import InstanceIDModel
from morango.models.core import RecordMaxCounter
from morango.models.core import Store
from morango.sync.controller import _self_referential_fk
from morango.sync.controller import MorangoProfileController
class FacilityModelFactory(factory.DjangoModelFactory):
class Meta:
model = Facility
name = factory.Sequence(lambda n: "Fac %d" % n)
class StoreModelFacilityFactory(factory.DjangoModelFactory):
class Meta:
model = Store
model_name = "facility"
profile = "facilitydata"
last_saved_instance = uuid.uuid4().hex
last_saved_counter = 1
dirty_bit = True
class SerializeIntoStoreTestCase(TestCase):
def setUp(self):
InstanceIDModel.get_or_create_current_instance()
self.range = 10
self.mc = MorangoProfileController("facilitydata")
self.original_name = "ralphie"
self.new_name = "rafael"
def test_all_models_get_serialized(self):
[FacilityModelFactory() for _ in range(self.range)]
self.mc.serialize_into_store()
self.assertEqual(len(Store.objects.all()), self.range)
def test_no_models_get_serialized(self):
# set dirty bit off on new models created
[
FacilityModelFactory.build().save(update_dirty_bit_to=False)
for _ in range(self.range)
]
# only models with dirty bit on should be serialized
self.mc.serialize_into_store()
self.assertFalse(Store.objects.exists())
def test_dirty_bit_gets_set(self):
[FacilityModelFactory() for _ in range(self.range)]
# dirty bit should be on
for facility in Facility.objects.all():
self.assertTrue(facility._morango_dirty_bit)
self.mc.serialize_into_store()
# dirty bit should have been toggled off
for facility in Facility.objects.all():
self.assertFalse(facility._morango_dirty_bit)
def test_store_models_get_updated(self):
FacilityModelFactory(name=self.original_name)
self.mc.serialize_into_store()
store_facility = Store.objects.first()
deserialized_model = json.loads(store_facility.serialized)
self.assertEqual(deserialized_model["name"], self.original_name)
Facility.objects.update(name=self.new_name)
self.mc.serialize_into_store()
store_facility = Store.objects.first()
deserialized_model = json.loads(store_facility.serialized)
self.assertEqual(deserialized_model["name"], self.new_name)
def test_last_saved_counter_updates(self):
FacilityModelFactory(name=self.original_name)
self.mc.serialize_into_store()
old_counter = Store.objects.first().last_saved_counter
Facility.objects.all().update(name=self.new_name)
self.mc.serialize_into_store()
new_counter = Store.objects.first().last_saved_counter
self.assertEqual(old_counter + 1, new_counter)
def test_last_saved_instance_updates(self):
FacilityModelFactory(name=self.original_name)
self.mc.serialize_into_store()
old_instance_id = Store.objects.first().last_saved_instance
with EnvironmentVarGuard() as env:
env['MORANGO_SYSTEM_ID'] = 'new_sys_id'
(new_id, _) = InstanceIDModel.get_or_create_current_instance(clear_cache=True)
Facility.objects.all().update(name=self.new_name)
self.mc.serialize_into_store()
new_instance_id = Store.objects.first().last_saved_instance
self.assertNotEqual(old_instance_id, new_instance_id)
self.assertEqual(new_instance_id, new_id.id)
def test_extra_fields_dont_get_overwritten(self):
serialized = """{"username": "deadbeef", "height": 6.0, "weight": 100}"""
MyUser.objects.create(username="deadbeef")
self.mc.serialize_into_store()
Store.objects.update(serialized=serialized)
MyUser.objects.update(username="alivebeef")
self.mc.serialize_into_store()
serialized = json.loads(Store.objects.first().serialized)
self.assertIn("height", serialized)
def test_updates_store_deleted_flag(self):
fac = FacilityModelFactory()
fac_id = fac.id
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(pk=fac_id).deleted)
fac.delete()
self.assertTrue(DeletedModels.objects.exists())
self.mc.serialize_into_store()
self.assertFalse(DeletedModels.objects.exists())
self.assertTrue(Store.objects.get(pk=fac_id).deleted)
def test_cascading_delete_updates_store_deleted_flag(self):
fac = FacilityModelFactory()
child = FacilityModelFactory(parent_id=fac.id)
child_id = child.id
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(pk=child_id).deleted)
fac.delete()
self.mc.serialize_into_store()
self.assertTrue(Store.objects.get(pk=child_id).deleted)
def test_conflicting_data_appended(self):
self.maxDiff = None
serialized = json.dumps({"username": "deadb\neef"})
conflicting = []
user = MyUser.objects.create(username="user")
self.mc.serialize_into_store()
# add serialized fields to conflicting data
conflicting.insert(0, serialized)
conflicting.insert(0, json.dumps(user.serialize()))
# set store record and app record dirty bits to true to force serialization merge conflict
Store.objects.update(conflicting_serialized_data=serialized, dirty_bit=True)
user.username = "user1"
user.save(update_dirty_bit_to=True)
self.mc.serialize_into_store()
# assert we have placed serialized object into store's serialized field
st = Store.objects.get(id=user.id)
self.assertEqual(json.loads(st.serialized), user.serialize())
# assert store serialized field is moved to conflicting data
conflicting_serialized_data = st.conflicting_serialized_data.split("\n")
for x in range(len(conflicting)):
self.assertEqual(conflicting[x], conflicting_serialized_data[x])
def test_filtered_serialization_single_filter(self):
fac = FacilityModelFactory()
user = MyUser.objects.create(username="deadbeef")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store(filter=Filter(user._morango_partition))
self.assertFalse(Store.objects.filter(id=fac.id).exists())
self.assertTrue(Store.objects.filter(id=user.id).exists())
self.assertTrue(Store.objects.filter(id=log.id).exists())
def test_filtered_serialization_multiple_filter(self):
fac = FacilityModelFactory()
user = MyUser.objects.create(username="deadbeef")
user2 = MyUser.objects.create(username="alivebeef")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store(
filter=Filter(user._morango_partition + "\n" + user2._morango_partition)
)
self.assertFalse(Store.objects.filter(id=fac.id).exists())
self.assertTrue(Store.objects.filter(id=user2.id).exists())
self.assertTrue(Store.objects.filter(id=user.id).exists())
self.assertTrue(Store.objects.filter(id=log.id).exists())
def test_self_ref_fk_class_adds_value_to_store(self):
root = FacilityModelFactory()
child = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
self.assertEqual(Store.objects.get(id=child.id)._self_ref_fk, root.id)
def test_regular_class_leaves_value_blank_in_store(self):
log = SummaryLog.objects.create(user=MyUser.objects.create(username="user"))
self.mc.serialize_into_store()
self.assertEqual(Store.objects.get(id=log.id)._self_ref_fk, "")
def test_previously_deleted_store_flag_resets(self):
# create and delete object
user = MyUser.objects.create(username="user")
user_id = user.id
self.mc.serialize_into_store()
MyUser.objects.all().delete()
self.mc.serialize_into_store()
self.assertTrue(Store.objects.get(id=user_id).deleted)
# recreate object with same id
user = MyUser.objects.create(username="user")
# ensure deleted flag is updated after recreation
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(id=user_id).deleted)
def test_previously_hard_deleted_store_flag_resets(self):
# create and delete object
user = MyUser.objects.create(username="user")
user_id = user.id
self.mc.serialize_into_store()
user.delete(hard_delete=True)
self.mc.serialize_into_store()
self.assertTrue(Store.objects.get(id=user_id).hard_deleted)
# recreate object with same id
user = MyUser.objects.create(username="user")
# ensure hard deleted flag is updated after recreation
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(id=user_id).hard_deleted)
def test_hard_delete_wipes_serialized(self):
user = MyUser.objects.create(username="user")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store()
Store.objects.update(conflicting_serialized_data="store")
st = Store.objects.get(id=log.id)
self.assertNotEqual(st.serialized, "")
self.assertNotEqual(st.conflicting_serialized_data, "")
user.delete(hard_delete=True) # cascade hard delete
self.mc.serialize_into_store()
st.refresh_from_db()
self.assertEqual(st.serialized, "{}")
self.assertEqual(st.conflicting_serialized_data, "")
def test_in_app_hard_delete_propagates(self):
user = MyUser.objects.create(username="user")
log_id = uuid.uuid4().hex
log = SummaryLog(user=user, id=log_id)
StoreModelFacilityFactory(
model_name="user", id=user.id, serialized=json.dumps(user.serialize())
)
store_log = StoreModelFacilityFactory(
model_name="contentsummarylog",
id=log.id,
serialized=json.dumps(log.serialize()),
)
user.delete(hard_delete=True)
# preps log to be hard_deleted
self.mc.deserialize_from_store()
# updates store log to be hard_deleted
self.mc.serialize_into_store()
store_log.refresh_from_db()
self.assertTrue(store_log.hard_deleted)
self.assertEqual(store_log.serialized, "{}")
def test_store_hard_delete_propagates(self):
user = MyUser(username="user")
user.save(update_dirty_bit_to=False)
log = SummaryLog(user=user)
log.save(update_dirty_bit_to=False)
StoreModelFacilityFactory(
model_name="user",
id=user.id,
serialized=json.dumps(user.serialize()),
hard_deleted=True,
deleted=True,
)
# make sure hard_deleted propagates to related models even if they are not hard_deleted
self.mc.deserialize_from_store()
self.assertTrue(HardDeletedModels.objects.filter(id=log.id).exists())
class RecordMaxCounterUpdatesDuringSerialization(TestCase):
def setUp(self):
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
self.mc = MorangoProfileController("facilitydata")
self.fac1 = FacilityModelFactory(name="school")
self.mc.serialize_into_store()
self.old_rmc = RecordMaxCounter.objects.first()
def test_new_rmc_for_existing_model(self):
with EnvironmentVarGuard() as env:
env['MORANGO_SYSTEM_ID'] = 'new_sys_id'
(new_id, _) = InstanceIDModel.get_or_create_current_instance(clear_cache=True)
Facility.objects.update(name="facility")
self.mc.serialize_into_store()
new_rmc = RecordMaxCounter.objects.get(
instance_id=new_id.id, store_model_id=self.fac1.id
)
new_store_record = Store.objects.get(id=self.fac1.id)
self.assertEqual(new_rmc.counter, new_store_record.last_saved_counter)
self.assertEqual(new_rmc.instance_id, new_store_record.last_saved_instance)
def test_update_rmc_for_existing_model(self):
Facility.objects.update(name="facility")
self.mc.serialize_into_store()
# there should only be 1 RecordMaxCounter for a specific instance_id and a specific model (unique_together)
self.assertEqual(
RecordMaxCounter.objects.filter(
instance_id=self.current_id.id, store_model_id=self.fac1.id
).count(),
1,
)
new_rmc = RecordMaxCounter.objects.get(
instance_id=self.current_id.id, store_model_id=self.fac1.id
)
new_store_record = Store.objects.get(id=self.fac1.id)
self.assertEqual(self.old_rmc.counter + 1, new_rmc.counter)
self.assertEqual(new_rmc.counter, new_store_record.last_saved_counter)
self.assertEqual(new_rmc.instance_id, new_store_record.last_saved_instance)
def test_new_rmc_for_non_existent_model(self):
with EnvironmentVarGuard() as env:
env['MORANGO_SYSTEM_ID'] = 'new_sys_id'
(new_id, _) = InstanceIDModel.get_or_create_current_instance(clear_cache=True)
new_fac = FacilityModelFactory(name="college")
self.mc.serialize_into_store()
new_rmc = RecordMaxCounter.objects.get(
instance_id=new_id.id, store_model_id=new_fac.id
)
new_store_record = Store.objects.get(id=new_fac.id)
self.assertNotEqual(new_id.id, self.current_id.id)
self.assertEqual(new_store_record.last_saved_instance, new_rmc.instance_id)
self.assertEqual(new_store_record.last_saved_counter, new_rmc.counter)
class DeserializationFromStoreIntoAppTestCase(TestCase):
def setUp(self):
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
self.range = 10
self.mc = MorangoProfileController("facilitydata")
for i in range(self.range):
self.ident = uuid.uuid4().hex
StoreModelFacilityFactory(
pk=self.ident, serialized=serialized_facility_factory(self.ident)
)
def test_dirty_store_records_are_deserialized(self):
self.assertFalse(Facility.objects.all().exists())
self.mc.deserialize_from_store()
self.assertEqual(len(Facility.objects.all()), self.range)
def test_clean_store_records_do_not_get_deserialized(self):
self.assertFalse(Facility.objects.exists())
Store.objects.update(dirty_bit=False)
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.exists())
def test_deleted_models_do_not_get_deserialized(self):
Store.objects.update_or_create(defaults={"deleted": True}, id=self.ident)
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.filter(id=self.ident).exists())
def test_deleted_models_deletes_them_in_app(self):
# put models in app layer
self.mc.deserialize_from_store()
# deleted flag on store should delete model in app layer
Store.objects.update_or_create(
defaults={"deleted": True, "dirty_bit": True}, id=self.ident
)
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.filter(id=self.ident).exists())
def test_update_app_with_newer_data_from_store(self):
name = "test"
fac = FacilityModelFactory(id=self.ident, name=name)
fac.save(update_dirty_bit_to=False)
self.assertEqual(fac.name, name)
self.mc.deserialize_from_store()
fac = Facility.objects.get(id=self.ident)
self.assertNotEqual(fac.name, name)
def test_handle_extra_field_deserialization(self):
# modify a store record by adding extra serialized field
store_model = Store.objects.get(id=self.ident)
serialized = json.loads(store_model.serialized)
serialized.update({"wacky": True})
store_model.serialized = json.dumps(serialized)
store_model.save()
# deserialize records
self.mc.deserialize_from_store()
# by this point no errors should have occurred but we check list of fields anyways
fac = Facility.objects.get(id=self.ident)
self.assertNotIn("wacky", fac.__dict__)
def test_store_dirty_bit_resets(self):
self.assertTrue(Store.objects.filter(dirty_bit=True))
self.mc.deserialize_from_store()
self.assertFalse(Store.objects.filter(dirty_bit=True))
def test_record_with_dirty_bit_off_doesnt_deserialize(self):
st = Store.objects.first()
st.dirty_bit = False
st.save()
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.filter(id=st.id).exists())
def test_broken_fk_leaves_store_dirty_bit(self):
serialized = """{"user_id": "40de9a3fded95d7198f200c78e559353", "id": "bd205b5ee5bc42da85925d24c61341a8"}"""
st = StoreModelFacilityFactory(
id=uuid.uuid4().hex, serialized=serialized, model_name="contentsummarylog"
)
self.mc.deserialize_from_store()
st.refresh_from_db()
self.assertTrue(st.dirty_bit)
def test_invalid_model_leaves_store_dirty_bit(self):
user = MyUser(username="a" * 21)
st = StoreModelFacilityFactory(
model_name="user",
id=uuid.uuid4().hex,
serialized=json.dumps(user.serialize()),
)
self.mc.deserialize_from_store()
st.refresh_from_db()
self.assertTrue(st.dirty_bit)
def test_deleted_model_propagates_to_store_record(self):
"""
It could be the case that we have two store records, one that is deleted and the other that has a fk pointing to the deleted record.
When we deserialize, we want to ensure that the record with the fk pointer also gets the deleted flag set, while also not
deserializing the data into a model.
"""
# user will be deleted
user = MyUser(username="user")
user.save(update_dirty_bit_to=False)
# log may be synced in from other device
log = SummaryLog(user_id=user.id)
log.id = log.calculate_uuid()
StoreModelFacilityFactory(
model_name="user",
id=user.id,
serialized=json.dumps(user.serialize()),
deleted=True,
)
StoreModelFacilityFactory(
model_name="contentsummarylog",
id=log.id,
serialized=json.dumps(log.serialize()),
)
# make sure delete propagates to store due to deleted foreign key
self.mc.deserialize_from_store()
# have to serialize to update deleted models
self.mc.serialize_into_store()
self.assertFalse(SummaryLog.objects.filter(id=log.id).exists())
self.assertTrue(Store.objects.get(id=log.id).deleted)
def test_hard_deleted_model_propagates_to_store_record(self):
"""
It could be the case that we have two store records, one that is hard deleted and the other that has a fk pointing to the hard deleted record.
When we deserialize, we want to ensure that the record with the fk pointer also gets the hard deleted flag set, while also not
deserializing the data into a model.
"""
# user will be deleted
user = MyUser(username="user")
user.save(update_dirty_bit_to=False)
# log may be synced in from other device
log = SummaryLog(user_id=user.id)
log.id = log.calculate_uuid()
StoreModelFacilityFactory(
model_name="user",
id=user.id,
serialized=json.dumps(user.serialize()),
deleted=True,
hard_deleted=True,
)
StoreModelFacilityFactory(
model_name="contentsummarylog",
id=log.id,
serialized=json.dumps(log.serialize()),
)
# make sure delete propagates to store due to deleted foreign key
self.mc.deserialize_from_store()
# have to serialize to update deleted models
self.mc.serialize_into_store()
self.assertFalse(SummaryLog.objects.filter(id=log.id).exists())
self.assertTrue(Store.objects.get(id=log.id).hard_deleted)
def _create_two_users_to_deserialize(self):
user = MyUser(username="test", password="password")
user2 = MyUser(username="test2", password="password")
user.save()
user2.save()
self.mc.serialize_into_store()
user.username = "changed"
user2.username = "changed2"
Store.objects.filter(id=user.id).update(serialized=json.dumps(user.serialize()), dirty_bit=True)
Store.objects.filter(id=user2.id).update(serialized=json.dumps(user2.serialize()), dirty_bit=True)
return user, user2
def test_regular_model_deserialization(self):
# deserialization should be able to handle multiple records
user, user2 = self._create_two_users_to_deserialize()
self.mc.deserialize_from_store()
self.assertFalse(MyUser.objects.filter(username="test").exists())
self.assertFalse(MyUser.objects.filter(username="test2").exists())
self.assertTrue(MyUser.objects.filter(username="changed").exists())
self.assertTrue(MyUser.objects.filter(username="changed2").exists())
def test_filtered_deserialization(self):
# filtered deserialization only impacts specific records
user, user2 = self._create_two_users_to_deserialize()
self.mc.deserialize_from_store(filter=Filter(user._morango_partition))
self.assertFalse(MyUser.objects.filter(username="test").exists())
self.assertTrue(MyUser.objects.filter(username="test2").exists())
self.assertTrue(MyUser.objects.filter(username="changed").exists())
self.assertFalse(MyUser.objects.filter(username="changed2").exists())
class SelfReferentialFKDeserializationTestCase(TestCase):
def setUp(self):
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
self.mc = MorangoProfileController("facilitydata")
def test_self_ref_fk(self):
self.assertEqual(_self_referential_fk(Facility), "parent_id")
self.assertEqual(_self_referential_fk(MyUser), None)
def test_delete_model_in_store_deletes_models_in_app(self):
root = FacilityModelFactory()
child1 = FacilityModelFactory(parent=root)
child2 = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
# simulate a node being deleted and synced
Store.objects.filter(id=child2.id).update(deleted=True)
Store.objects.update(dirty_bit=True)
grandchild1 = FacilityModelFactory(parent=child2)
grandchild2 = FacilityModelFactory(parent=child2)
self.mc.deserialize_from_store()
# ensure tree structure in app layer is correct
child1 = Facility.objects.filter(id=child1.id)
self.assertTrue(child1.exists())
self.assertEqual(child1[0].parent_id, root.id)
self.assertFalse(Facility.objects.filter(id=child2.id).exists())
self.assertFalse(Facility.objects.filter(id=grandchild1.id).exists())
self.assertFalse(Facility.objects.filter(id=grandchild2.id).exists())
def test_models_created_successfully(self):
root = FacilityModelFactory()
child1 = FacilityModelFactory(parent=root)
child2 = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
Facility.objects.all().delete()
DeletedModels.objects.all().delete()
Store.objects.update(dirty_bit=True, deleted=False)
self.mc.deserialize_from_store()
# ensure tree structure in app layer is correct
self.assertTrue(Facility.objects.filter(id=root.id).exists())
child1 = Facility.objects.filter(id=child1.id)
self.assertTrue(child1.exists())
self.assertEqual(child1[0].parent_id, root.id)
child2 = Facility.objects.filter(id=child2.id)
self.assertTrue(child2.exists())
self.assertEqual(child2[0].parent_id, root.id)
def test_deserialization_of_model_with_missing_parent(self):
self._test_deserialization_of_model_with_missing_parent(correct_self_ref_fk=True)
def test_deserialization_of_model_with_mismatched_self_ref_fk(self):
self._test_deserialization_of_model_with_missing_parent(correct_self_ref_fk=False)
def _test_deserialization_of_model_with_missing_parent(self, correct_self_ref_fk):
root = FacilityModelFactory()
child1 = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
new_child = Store.objects.get(id=child1.id)
data = json.loads(new_child.serialized)
new_child.id = data["id"] = "a" * 32
data["parent_id"] = "b" * 32
if correct_self_ref_fk:
new_child._self_ref_fk = data["parent_id"]
new_child.serialized = json.dumps(data)
new_child.dirty_bit = True
new_child.save()
self.mc.deserialize_from_store()
new_child.refresh_from_db()
self.assertTrue(new_child.dirty_bit)
self.assertIn("exist", new_child.deserialization_error)
def test_deserialization_of_model_with_missing_foreignkey_referent(self):
user = MyUser.objects.create(username="penguin")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store()
new_log = Store.objects.get(id=log.id)
data = json.loads(new_log.serialized)
new_log.id = data["id"] = "f" * 32
data["user_id"] = "e" * 32
new_log.serialized = json.dumps(data)
new_log.dirty_bit = True
new_log.save()
self.mc.deserialize_from_store()
new_log.refresh_from_db()
self.assertTrue(new_log.dirty_bit)
self.assertIn("exist", new_log.deserialization_error) | python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys
import inquirer
import untangle
import requests
import platform
from colors import *
#If you want to use the program using an alias
#uncomment the following line and write your correct path
#os.chdir("/home/user/test/tunein-cli/")
type={}
station={}
headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:74.0) Gecko/20100101 Firefox/74.0' }
source="http://opml.radiotime.com/Browse.ashx"
ERASE_LINE = '\x1b[1J'
ERASE_ALL = '\x1b[g'
GO_HOME = '\x1b[H'
SCROLL = '\x1b[1000M'
sys.stdout.write(ERASE_LINE)
sys.stdout.write(GO_HOME)
erase="clear && printf '\e[3J'"
if "Windows" in platform.system():
erase="cls"
#check mplayer
try:
std=os.popen("mplayer").read()
if len(std) == 0:
raise
except:
print underline('\nmplayer is not installed\nPlease install mplayer first.')
exit()
def get(url,s):
page = requests.get(url)
xml=page.content
if s=="true":
obj = untangle.parse(xml)
return obj
else:
return xml
def scrape(url,keyword):
if url=="":
url=source
os.system(erase)
out=['<<back']
dup_out=['<<back']
type={}
station={}
obj=get(url,"true")
if keyword!="":
stream=get(url,"true")
if dir(stream.opml.body.outline).count("outline")>2:
for i in obj.opml.body.outline.outline:
type[i["text"]]=i["URL"]
else:
if isinstance(keyword, int)==True:
target=keyword
else:
for i in stream.opml.body.outline:
if i['key'] == keyword:
target=stream.opml.body.outline.index(i)
for i in obj.opml.body.outline[target].outline:
type[i["text"]]=i["URL"]
else:
for i in obj.opml.body.outline:
type[i["text"]]=i["URL"]
a=1
for i in type.keys():
if i.strip() == "More Stations":
st1="[%s] " %(a)
st2=green(i)
out.insert(1,"%s%s" %(st1,st2))
dup_out.insert(1,"%s%s" %(st1,st2))
elif i.strip() == "Find by Name":
st1="[%s] " %(a)
st2=red(i)
out.insert(2,"%s%s" %(st1,st2))
dup_out.insert(2,"%s%s" %(st1,st2))
else:
st1="[%s] " %(a)
st2=u''.join(i).encode("utf-8")
out.append("%s%s" %(st1,bold(st2)))
dup_out.append("%s%s" %(st1,st2))
a+=1
ask=[inquirer.List('opt',message="Choose:",choices=out)]
ans=inquirer.prompt(ask)['opt']
if ans == "<<back":
main()
else:
choice=int(dup_out[out.index(ans)].split()[0][1:-1])
choice-=1
st_url=type[type.keys()[choice]]
if st_url != None and "Tune.ashx?id" in st_url:
st_title=type.keys()[choice]
newurl=get(st_url,"false")
if len(newurl.split())>1:
newurl=newurl.split()[0]
playlist(newurl,st_title)
if st_url==None:
tt=dup_out[out.index(ans)].split()
tt.remove(dup_out[out.index(ans)].split()[0])
for i in obj.opml.body.outline:
if i["text"]==" ".join(tt):
key=i["key"]
if key==None:
key=choice
#print "SCRAPE:",url,key
scrape(url,key)
scrape(st_url,"")
def playlist(url,title):
global run
print "\nTitle:",bold(u''.join(title).encode("utf-8"))
print "STREAM:",bold(url)
if ".pls" in url:
print "pls file found"
url=os.popen("python getter.py '%s false'" %(url.strip())).read()[6:]
print "FOUND:",url
if run=="true":
print "Opening stream..."
print "To stop streaming press enter:"
os.system("mplayer -really-quiet %s" %(url))
print ""
kill=raw_input("exit:")
os.system("pkill -9 mplayer")
main()
elif run == "false":
try:
title.encode('ascii')
new_title=title
if new_title.startswith(".")==True:
new_title=new_title[1:]
except UnicodeEncodeError:
new_title="".join(x for x in title if x.isalnum())
if new_title.startswith(".")==True:
new_title=new_title[1:]
new_title=new_title.encode('utf8')
#title="playlist"
file=open("%s.pls" %(new_title),"w")
file.write("[playlist]")
file.write("\nFile1=%s" %(url.strip()))
file.write("\nTitle1=%s" %(r''.join(new_title)))
file.write("\nLength1=-1")
file.write("\nNumberOfEntries=1")
file.write("\nVersion=2")
file.close()
print bold("Location: "+os.path.abspath("%s.pls" %(new_title)))
print "done."
exit()
elif run == "info":
exit()
elif run == "browser":
print "Opening stream in browser..."
if "Linux" in platform.system():
os.popen("xdg-open %s" %(url))
elif "Darwin" in platform.system():
os.popen("open %s" %(url))
elif "Windows" in platform.system():
os.popen("start %s" %(url))
main()
elif run == "fav":
fav=open("fav_st.txt","a+")
fav.write("%s %s" %(u''.join(title).encode("utf-8"),url))
fav.close()
print "added.\npress enter to continue:", raw_input()
main()
#START from HERE
def main():
global run
run="false"
os.system(erase)
ask1=[inquirer.List('opt',message="Select Option:",choices=[
'[1]'+bold(': Open Stream'),
'[2]'+bold(': Download Stream'),
'[3]'+bold(': Show Stream Source'),
'[4]'+bold(': Open In Browser'),
'[5]'+bold(': Add to Favourites'),
'[6]'+bold(': Add custom station'),
'[7]'+bold(': Favourites'),
'[8]'+bold(': Exit')])]
ans1=inquirer.prompt(ask1)['opt']
if ans1[1:2] == "1":
run="true"
elif ans1[1:2] == "2":
run="false"
elif ans1[1:2] == "3":
run="info"
elif ans1[1:2] == "4":
run="browser"
elif ans1[1:2] == "5":
run="fav"
elif ans1[1:2] == "6":
c_name=raw_input(bold("Name:"))
c_url=raw_input(bold("Address:"))
fav=open("fav_st.txt","a+")
fav.write("%s %s" %(u''.join(c_name).encode("utf-8"),c_url))
fav.close()
print "added.\npress enter to continue:", raw_input()
main()
elif ans1[1:2] == "7":
favlist={}
dupfavlist=["<<back"]
dup2favlist=["<<back"]
fav=open("fav_st.txt","r").read().splitlines()
for item in fav:
if len(item)!=0:
favlist[" ".join(item.split()[0:-1])]=item.split()[-1]
dupfavlist.append(" ".join(item.split()[0:-1]))
dup2favlist.append(bold(" ".join(item.split()[0:-1])))
os.system(erase)
ask2=[inquirer.List('opt',message="Choose:",choices=dup2favlist)]
ans2=inquirer.prompt(ask2)['opt']
if ans2 == "<<back":
main()
run="true"
playlist(favlist[dupfavlist[dup2favlist.index(ans2)]],ans2.decode("utf-8"))
elif ans1[1:2] == "8":
print bold("Bye.")
exit()
scrape("","")
main()
| python |
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# Allows to drag parent widget when holding pushbutton
# To use it you need to set screen_geometry in your QWidget first
class DragButton(QPushButton):
def __init__(self, parent: QWidget, constant_x0: bool):
super(DragButton, self).__init__()
self.parent = parent
self.__mousePressPos = None
self.__mouseMovePos = None
self.constantX0 = constant_x0 # left edge of screen
self.posY = 0
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton:
self.__mousePressPos = event.globalPos()
self.__mouseMovePos = event.globalPos()
super(DragButton, self).mousePressEvent(event)
def mouseMoveEvent(self, event: QMouseEvent) -> None:
if event.buttons() == Qt.LeftButton:
# adjust offset from clicked point to origin of widget
curr_pos = self.parent.mapToGlobal(self.parent.pos())
global_pos = event.globalPos()
diff = global_pos - self.__mouseMovePos
new_pos = self.parent.mapFromGlobal(curr_pos + diff)
if self.constantX0:
new_pos.setX(0)
if new_pos.y() < 0:
new_pos.setY(0)
if new_pos.y() > self.parent.screen_geometry.bottom() - self.parent.height():
new_pos.setY(self.parent.screen_geometry.bottom() - self.parent.height())
self.parent.move(new_pos)
self.__mouseMovePos = global_pos
super(DragButton, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event: QMouseEvent) -> None:
if self.__mousePressPos is not None:
moved = event.globalPos() - self.__mousePressPos
if moved.manhattanLength() > 3:
event.ignore()
# print("Menu Y: %d" % self.parent.mapToGlobal(self.parent.pos()).y())
self.posY = self.parent.mapToGlobal(self.parent.pos()).y()
elif hasattr(self.parent, "show_hide_buttons"):
# Since this class is used in MainWidget AND NetWidget need to check which one is calling
# and hide parents buttons only if it has method for that.
# Cannot use isinstance() because importing MainWidget would cause circular import.
show_hide_buttons = getattr(self.parent, "show_hide_buttons")
if hasattr(show_hide_buttons, "__call__"):
show_hide_buttons()
if hasattr(self.parent, "update_pos_size"):
update_pos_size = getattr(self.parent, "update_pos_size")
if hasattr(update_pos_size, "__call__"):
update_pos_size()
else:
super(DragButton, self).mouseReleaseEvent(event)
| python |
from csv import reader
from . import Destination
from . import DestinationPro
from . import ProtocolPort
def read_prot_port_info(info):
prot_info = {"HTTP": ["1", "1", "1"], "HTTPS": ["1", "0", "1"]}
with open(info, "r") as f:
csv_reader = reader(f)
next(csv_reader)
for row in csv_reader:
prot_port = row[0].upper()
well_known = row[1]
human_readable = row[2]
imp = row[4]
prot_info[prot_port] = [well_known, human_readable, imp]
return prot_info
#constructs DestinationPros from an output CSV
#useful for generating plots without having to rerun analyses
def load(script_dir, out_csv_path):
print("Loading results from %s..." % out_csv_path)
prot_enc_dict = {"encrypted": "1", "unencrypted": "0", "unknown": "-1"}
prots_info = read_prot_port_info(script_dir + "/protocol_analysis/protocols_info.csv")
dst_pro = []
with open(out_csv_path, "r") as f:
csv_reader = reader(f)
next(csv_reader)
for row in csv_reader:
ip = row[0]
host = row[1]
host_full = row[2]
bytes_snd = row[3]
bytes_rcv = row[4]
pckt_snd = row[5]
pckt_rcv = row[6]
country = row[7]
party = row[8]
org = row[9]
prot_port = row[10]
enc = row[11]
dst = Destination.Destination(ip, host, party, host_full, country, org)
try:
prot_info = prots_info[prot_port.upper()]
prot = ProtocolPort.ProtocolPort(prot_port, prot_enc_dict[enc.lower()],
prot_info[0], prot_info[1], prot_info[2])
except KeyError:
prot = ProtocolPort.ProtocolPort(prot_port, '-1', '-1', '-1', '-1')
dp = DestinationPro.DestinationPro(dst, prot)
dp.add_all(int(bytes_snd), int(bytes_rcv), int(pckt_snd), int(pckt_rcv))
dst_pro.append(dp)
return dst_pro
| python |
# 執行時自行註解掉不需要的段落
# 自動型別
var = 'Hello World' # string
print(var)
var = 100 # int
print(var+10)
print('-----')
# 沒有 overflow
var = 17**3000 # 17的3000次方
print(var)
print('-----')
# swap
a=1
b=2
c=3
print(a,b,c)
c,a,b=b,c,a
print(a,b,c)
print('-----')
# string index
var1 = 'Hello World'
var2 = "Python Programming"
print(var1[0]) # H, 從0開始
print(var2[1:5]) # "ytho", 1到小於5
print('-----')
| python |
import os, sys, inspect
# use this if you want to include modules from a subforder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import simulation_parameters
import numpy as np
import pylab
import MergeSpikefiles
from FigureCreator import plot_params
import matplotlib.cm as cm
import json
def plot_raster(params, fn, ax, pn, title='', color='k', alpha=1.):
print 'Loading Spikes from:', params['%s_spikes_merged_fn_base' % cell_type]
if (os.path.exists(fn) == False):
Merger = MergeSpikefiles.MergeSpikefiles(params)
Merger.merge_spiketimes_files(params['%s_spiketimes_fn_base' % (cell_type)], params['%s_spiketimes_merged_fn_base' % (cell_type)], pn)
print 'Loading ', fn
data = np.loadtxt(fn)
assert (data.size > 0), 'ERROR file %s has 0 size\nIf there was a problem when merging them, delete the empty one and rerun' % (fn)
ax.plot(data[:,0], data[:,1], 'o', markersize=5, markeredgewidth=.0, color=color, alpha=alpha)
ax.set_xlim((0, params['t_sim']))
ax.set_title(title)
ax.set_xlabel('Time [ms]')
# ax.set_ylabel('Cell GID')
ylabels = ax.get_yticklabels()
yticks = ax.get_yticks()
new_ylabels = []
for i_, y in enumerate(yticks[0:]):
# for i_, y in enumerate(yticks[1:]):
new_ylabels.append('%d' % (y - params['%s_offset' % cell_type]))
ax.set_ylim((-1 + params['%s_offset' % cell_type], params['n_%s' % cell_type] + params['%s_offset' % cell_type] + 1))
if len(new_ylabels) > 0:
ax.set_yticklabels(new_ylabels)
xlabels = ax.get_xticklabels()
xticks = ax.get_xticks()
new_xlabels = ['']
for i_, x in enumerate(xticks[1:-1]):
# for i_, x in enumerate(xticks[1:]):
new_xlabels.append('%d' % x)
new_xlabels.append('')
ax.set_xticklabels(new_xlabels)
def get_sniff_amplitude(x, tstart, tstop, T, t_shift, amp):
f_x = 0
if (x > tstart) and (x < tstop):
f_x = (amp * (np.sin(x / (T) - t_shift))**2)
return f_x
def plot_sniff_input(params, ax):
if params['with_sniffing_input']:
tstop = params['t_stop'] = 1200 # [ms]
tstart = params['t_start'] = 200 # [ms]
T = params['sniff_period'] = 80. # [ms]
t_shift = params['t_shift_sniff'] = 40. # [ms]
else:
print 'This was run without sniffing input\nReturn None'
return None
times = np.arange(0, params['t_sim'], 5)
ylim = ax.get_ylim()
alpha_max = .2
c = 'b'
for t in times:
f_x = get_sniff_amplitude(t, tstart, tstop, T, t_shift, 1.0)
# print 'f_x', f_x
ax.plot((t, t), (ylim[0], ylim[1]), lw=4, ls='-', c=c, alpha=f_x * alpha_max)
if __name__ == '__main__':
info_txt = \
"""
Usage:
python plot_pattern_completion_rivalry.py [PATTERN_NUMBER]
"""
# python plot_pattern_completion_rivalry.py [TRAINING_FOLDER] [TEST_FOLDER] [PATTERN_NUMBER_MIN] [PATTERN_NUMBER_MAX]
assert (len(sys.argv) > 1), 'ERROR: pattern number not given\n' + info_txt
pn_max = int(sys.argv[1])
training_folder = 'Cluster_OcOcLearning_nGlom40_nHC12_nMC30_vqOvrlp4_np50_OcOnly/'
plot_folder = 'Cluster_PatternCompletionTestPostLearningWithSniff_fOR0.50_nGlom40_nHC12_nMC30_vqOvrlp4_np50_FullSystem/'
params_fn = os.path.abspath(plot_folder) + '/Parameters/simulation_parameters.json'
param_tool = simulation_parameters.parameter_storage(params_fn=params_fn)
params = param_tool.params
training_params_fn = os.path.abspath(training_folder) + '/Parameters/simulation_parameters.json'
training_param_tool = simulation_parameters.parameter_storage(params_fn=training_params_fn)
training_params = training_param_tool.params
cell_type = 'readout'
# cell_type = 'pyr'
# cell_type = 'mit'
for pn in xrange(pn_max):
training_fn = training_params['%s_spiketimes_merged_fn_base' % cell_type] + str(pn) + '.dat'
test_fn = params['%s_spiketimes_merged_fn_base' % cell_type] + str(pn) + '.dat'
plot_params['figure.subplot.left'] = .11
plot_params['figure.subplot.top'] = .92
plot_params['figure.subplot.right'] = .98
plot_params['xtick.labelsize'] = 24
plot_params['ytick.labelsize'] = 24
plot_params['axes.labelsize'] = 32
plot_params['axes.titlesize'] = 32
pylab.rcParams.update(plot_params)
fig = pylab.figure()
ax = fig.add_subplot(111)
color_0 = '#A6A6A6'
color_1 = 'b'
# title = 'Pattern completion test pattern %d' % (pn)
# title = 'MT spikes'
title = '%s spikes ' % (cell_type.capitalize())
plot_raster(training_params, training_fn, ax, pn, title=title, color=color_0, alpha=0.9)
plot_raster(params, test_fn, ax, pn, title=title, color=color_1, alpha=1.)
# plot_sniff_input(params, ax)
output_fn = params['figure_folder'] + '/' + 'competion_raster_%s_%d.png' % (cell_type, pn)
print 'Saving figure to', output_fn
pylab.savefig(output_fn, dpi=(300))
pylab.show()
| python |
# %%
# ml + loss vs inner steps (Sigmoid best val)
import numpy as np
import matplotlib.pyplot as plt
from pylab import MaxNLocator
from pathlib import Path
print('running')
save_plot = True
# save_plot = False
# - data for distance
inner_steps_for_dist = [1, 2, 4, 8, 16, 32]
meta_test_cca = [0.2801, 0.2866, 0.2850, 0.2848, 0.2826, 0.2914]
meta_test_cca_std = [0.0351, 0.0336, 0.0322, 0.0341, 0.0321, 0.0390]
# - data for meta-lost
inner_steps_for_loss = [0, 1, 2, 4, 8, 16, 32]
loss_maml0 = 43.43485323588053
meta_test_loss = [loss_maml0, 10.404328906536103, 4.988216777642568, 5.07447034517924, 5.449032692114512, 5.36303452650706, 4.339294484257698]
# - create plot
fig, axs = plt.subplots(2, 1, sharex=True, tight_layout=True)
axs[0].errorbar(inner_steps_for_dist, meta_test_cca, yerr=meta_test_cca_std, marker='x', label='dCCA')
# axs[0].errorbar(inner_steps_for_dist, meta_test_ned, yerr=meta_test_ned_std, marker='x', label='NED')
axs[0].axhline(y=0.12, color='r', linestyle='--', label='dCCA previous work [15]')
axs[0].legend()
axs[0].set_title('Representation difference vs adaption\'s inner steps ')
axs[0].set_ylabel('Represenation change')
# axs[0].set_ylim([0, 1])
axs[1].plot(inner_steps_for_loss, meta_test_loss, marker='x', label='loss', color='g')
axs[1].set_title('Meta-Validation loss vs adaptation\'s inner steps')
axs[1].set_xlabel('adaptation\'s inner steps')
axs[1].set_ylabel('Loss')
# axs[1].axhline(y=loss_maml0, color='g', linestyle='--', label='not adaptated')
axs[1].get_xaxis().set_major_locator(MaxNLocator(integer=True))
axs[1].legend()
plt.tight_layout()
if save_plot:
root = Path('~/Desktop').expanduser()
plt.savefig(root / 'ml_loss_vs_inner_steps_sigmoid_best.png')
plt.savefig(root / 'ml_loss_vs_inner_steps_sigmoid_best.svg')
plt.savefig(root / 'ml_loss_vs_inner_steps_sigmoid_best.pdf')
plt.show()
#%%
# ml + loss vs inner steps (ReLU best net)
import numpy as np
import matplotlib.pyplot as plt
from pylab import MaxNLocator
from pathlib import Path
print('running')
save_plot = True
# save_plot = False
# - data for distance
inner_steps_for_dist = [1, 2, 4, 8, 16, 32]
meta_test_cca = [0.2876, 0.2962, 0.2897, 0.3086, 0.2951, 0.3024]
meta_test_cca_std = [0.0585, 0.0649, 0.0575, 0.0625, 0.0565, 0.0620]
# - data for meta-loss
inner_steps_for_loss = [0, 1, 2, 4, 8, 16, 32]
loss_maml0 = 19.27044554154078
# loss_maml0_std = 1.019144981585053
meta_test_loss = [loss_maml0,
5.545517734686533, 7.434794012705485, 6.754467636346817, 6.577781716982524, 3.731084116299947, 6.21407161851724]
# plt.title("Meta-test vs Depth of ResNet")
fig, axs = plt.subplots(2, 1, sharex=True, tight_layout=True)
axs[0].errorbar(inner_steps_for_dist, meta_test_cca, yerr=meta_test_cca_std, marker='x', label='dCCA')
axs[0].axhline(y=0.12, color='r', linestyle='--', label='dCCA previous work [15]')
axs[0].legend()
axs[0].set_title('Representation difference vs adaption\'s inner steps ')
axs[0].set_ylabel('Represenation change')
# axs[0].set_ylim([0, 1])
axs[1].plot(inner_steps_for_loss, meta_test_loss, marker='x', label='loss', color='g')
axs[1].set_title('Meta-Validation loss vs adaptation\'s inner steps')
axs[1].set_xlabel('adaptation\'s inner steps')
axs[1].set_ylabel('Loss')
# axs[1].axhline(y=loss_maml0, color='g', linestyle='--', label='not adaptated')
axs[1].get_xaxis().set_major_locator(MaxNLocator(integer=True))
axs[1].legend()
plt.tight_layout()
if save_plot:
root = Path('~/Desktop').expanduser()
plt.savefig(root / 'ml_loss_vs_inner_steps_relu_best.png')
plt.savefig(root / 'ml_loss_vs_inner_steps_relu_best.svg')
plt.savefig(root / 'ml_loss_vs_inner_steps_relu_best.pdf')
plt.show()
print('done') | python |
# -*- coding: utf-8 -*-
"""
Password generator to generate a password based on the specified pattern.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2018 - 2019 by rgb-24bit.
:license: MIT, see LICENSE for more details.
"""
from .__version__ import __version__, __description__
from .__version__ import __author__, __author_email__
from .__version__ import __license__, __copyright__
from rgpg.core import cli
if __name__ == '__main__':
cl()
| python |
"""Module :mod:`perslay.archi` implement the persistence layer."""
# Authors: Mathieu Carriere <[email protected]>
# License: MIT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Post-processing operation with combination of batch normalization, dropout and relu
def _post_processing(vector, pro, dropout_value=.9):
for c in pro:
if c == "b":
vector = tf.layers.batch_normalization(vector)
if c == "d":
vector = tf.nn.dropout(vector, dropout_value)
if c == "r":
vector = tf.nn.relu(vector)
return vector
# Vectorization implementing DeepSet architecture
def permutation_equivariant_layer(inp, dimension, perm_op, L_init, G_init, bias_init, L_const, G_const, bias_const, train_vect):
""" DeepSet PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
lbda = tf.get_variable("L", shape=[dimension_before, dimension], initializer=L_init, trainable=train_vect) if not L_const else tf.get_variable("L", initializer=L_init)
b = tf.get_variable("b", shape=[1, 1, dimension], initializer=bias_init, trainable=train_vect) if not bias_const else tf.get_variable("b", initializer=bias_init)
A = tf.reshape(tf.einsum("ijk,kl->ijl", inp, lbda), [-1, num_pts, dimension])
if perm_op is not None:
if perm_op == "max":
beta = tf.tile(tf.expand_dims(tf.reduce_max(inp, axis=1), 1), [1, num_pts, 1])
elif perm_op == "min":
beta = tf.tile(tf.expand_dims(tf.reduce_min(inp, axis=1), 1), [1, num_pts, 1])
elif perm_op == "sum":
beta = tf.tile(tf.expand_dims(tf.reduce_sum(inp, axis=1), 1), [1, num_pts, 1])
else:
raise Exception("perm_op should be min, max or sum")
gamma = tf.get_variable("G", shape=[dimension_before, dimension], initializer=G_init, trainable=train_vect) if not G_const else tf.get_variable("G", initializer=G_init)
B = tf.reshape(tf.einsum("ijk,kl->ijl", beta, gamma), [-1, num_pts, dimension])
return A - B + b
else:
return A + b
# Vectorizations taken from "Learning Representations of Persistence Barcodes"
def rational_hat_layer(inp, num_elements, q, mean_init, r_init, mean_const, r_const, train_vect):
""" Rational Hat PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
mu = tf.get_variable("m", shape=[1, 1, dimension_before, num_elements], initializer=mean_init, trainable=train_vect) if not mean_const else tf.get_variable("m", initializer=mean_init)
r = tf.get_variable("r", shape=[1, 1, 1], initializer=r_init, trainable=train_vect) if not r_const else tf.get_variable("r", initializer=r_init)
bc_inp = tf.expand_dims(inp, -1)
norms = tf.norm(bc_inp - mu, ord=q, axis=2)
return 1/(1 + norms) - 1/(1 + tf.abs(tf.abs(r)-norms))
def rational_layer(inp, num_elements, mean_init, variance_init, alpha_init, mean_const, variance_const, alpha_const, train_vect):
""" Rational PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
mu = tf.get_variable("m", shape=[1, 1, dimension_before, num_elements], initializer=mean_init, trainable=train_vect) if not mean_const else tf.get_variable("m", initializer=mean_init)
sg = tf.get_variable("s", shape=[1, 1, dimension_before, num_elements], initializer=variance_init, trainable=train_vect) if not variance_const else tf.get_variable("s", initializer=variance_init)
al = tf.get_variable("a", shape=[1, 1, num_elements], initializer=alpha_init, trainable=train_vect) if not alpha_const else tf.get_variable("a", initializer=alpha_init)
bc_inp = tf.expand_dims(inp, -1)
return 1/tf.pow(1+tf.reduce_sum(tf.multiply(tf.abs(bc_inp - mu), tf.abs(sg)), axis=2), al)
def exponential_layer(inp, num_elements, mean_init, variance_init, mean_const, variance_const, train_vect):
""" Exponential PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
mu = tf.get_variable("m", shape=[1, 1, dimension_before, num_elements], initializer=mean_init, trainable=train_vect) if not mean_const else tf.get_variable("m", initializer=mean_init)
sg = tf.get_variable("s", shape=[1, 1, dimension_before, num_elements], initializer=variance_init, trainable=train_vect) if not variance_const else tf.get_variable("s", initializer=variance_init)
bc_inp = tf.expand_dims(inp, -1)
return tf.exp(tf.reduce_sum(-tf.multiply(tf.square(bc_inp - mu), tf.square(sg)), axis=2))
# Vectorizations implementing persistence landscapes
def landscape_layer(inp, num_samples, sample_init, sample_const, train_vect):
""" Landscape PersLay """
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
return tf.maximum( .5 * (inp[:, :, 1:2] - inp[:, :, 0:1]) - tf.abs(sp - .5 * (inp[:, :, 1:2] + inp[:, :, 0:1])), np.array([0]))
# Vectorizations implementing Betti curves
def betti_layer(inp, theta, num_samples, sample_init, sample_const, train_vect):
""" Betti PersLay """
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
X, Y = inp[:, :, 0:1], inp[:, :, 1:2]
return 1. / ( 1. + tf.exp( -theta * (.5*(Y-X) - tf.abs(sp - .5*(Y+X))) ) )
# Vectorizations implementing persistence entropy
def entropy_layer(inp, theta, num_samples, sample_init, sample_const, train_vect):
""" Entropy PersLay
WARNING: this function assumes that padding values are zero
"""
bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32)))
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
L, X, Y = bp_inp[:, :, 1:2], bp_inp[:, :, 0:1], bp_inp[:, :, 0:1] + bp_inp[:, :, 1:2]
LN = tf.multiply(L, 1. / tf.expand_dims(tf.matmul(L[:,:,0], tf.ones([L.shape[1],1])), -1))
entropy_terms = tf.where(LN > 0., -tf.multiply(LN, tf.log(LN)), LN)
return tf.multiply(entropy_terms, 1. / ( 1. + tf.exp( -theta * (.5*(Y-X) - tf.abs(sp - .5*(Y+X))) ) ))
# Vectorizations implementing persistence images
def image_layer(inp, image_size, image_bnds, variance_init, variance_const, train_vect):
""" Persistence Image PersLay """
bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32)))
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
coords = [tf.range(start=image_bnds[i][0], limit=image_bnds[i][1], delta=(image_bnds[i][1] - image_bnds[i][0]) / image_size[i]) for i in range(dimension_before)]
M = tf.meshgrid(*coords)
mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0)
sg = tf.get_variable("s", shape=[1], initializer=variance_init, trainable=train_vect) if not variance_const else tf.get_variable("s", initializer=variance_init)
bc_inp = tf.reshape(bp_inp, [-1, num_pts, dimension_before] + [1 for _ in range(dimension_before)])
return tf.exp(tf.reduce_sum( -tf.square(bc_inp-mu) / (2*tf.square(sg[0])), axis=2)) / (2*np.pi*tf.square(sg[0]))
def perslay_channel(output, name, diag, **kwargs):
""" PersLay channel for persistence diagrams
output : list on which perslay output will be appended
name : name of the operation for tensorflow
diag : big matrix of shape [N_diag, N_pts_per_diag, dimension_diag (coordinates of points) + 1 (mask--0 or 1)]
"""
try:
train_weight = kwargs["train_weight"]
except KeyError:
train_weight = True
try:
train_vect = kwargs["train_vect"]
except KeyError:
train_vect = True
N, dimension_diag = diag.get_shape()[1], diag.get_shape()[2]
tensor_mask = diag[:, :, dimension_diag - 1]
tensor_diag = diag[:, :, :dimension_diag - 1]
if kwargs["persistence_weight"] == "linear":
with tf.variable_scope(name + "-linear_pweight"):
C = tf.get_variable("C", shape=[1], initializer=kwargs["coeff_init"], trainable=train_weight) if not kwargs["coeff_const"] else tf.get_variable("C", initializer=kwargs["coeff_init"])
weight = C * tf.abs(tensor_diag[:, :, 1:2]-tensor_diag[:, :, 0:1])
if kwargs["persistence_weight"] == "power":
with tf.variable_scope(name + "-power_pweight"):
p = kwargs["power_p"]
C = tf.get_variable("C", shape=[1], initializer=kwargs["coeff_init"], trainable=train_weight) if not kwargs["coeff_const"] else tf.get_variable("C", initializer=kwargs["coeff_init"])
weight = C * tf.pow(tf.abs(tensor_diag[:, :, 1:2]-tensor_diag[:, :, 0:1]), p)
if kwargs["persistence_weight"] == "grid":
with tf.variable_scope(name + "-grid_pweight"):
W = tf.get_variable("W", shape=kwargs["grid_size"], initializer=kwargs["grid_init"], trainable=train_weight) if not kwargs["grid_const"] else tf.get_variable("W", initializer=kwargs["grid_init"])
indices = []
for dim in range(dimension_diag-1):
[m, M] = kwargs["grid_bnds"][dim]
coords = tf.slice(tensor_diag, [0, 0, dim], [-1, -1, 1])
ids = kwargs["grid_size"][dim] * (coords - m)/(M - m)
indices.append(tf.cast(ids, tf.int32))
weight = tf.expand_dims(tf.gather_nd(params=W, indices=tf.concat(indices, axis=2)), -1)
if kwargs["persistence_weight"] == "gmix":
with tf.variable_scope(name + "-gmix_pweight"):
M = tf.get_variable("M", shape=[1,1,2,kwargs["gmix_num"]], initializer=kwargs["gmix_m_init"], trainable=train_weight) if not kwargs["gmix_m_const"] else tf.get_variable("M", initializer=kwargs["gmix_m_init"])
V = tf.get_variable("V", shape=[1,1,2,kwargs["gmix_num"]], initializer=kwargs["gmix_v_init"], trainable=train_weight) if not kwargs["gmix_v_const"] else tf.get_variable("V", initializer=kwargs["gmix_v_init"])
bc_inp = tf.expand_dims(tensor_diag, -1)
weight = tf.expand_dims(tf.reduce_sum(tf.exp(tf.reduce_sum(-tf.multiply(tf.square(bc_inp - M), tf.square(V)), axis=2)), axis=2), -1)
# First layer of channel: processing of the persistence diagrams by vectorization of diagram points
if kwargs["layer"] == "pm": # Channel with permutation equivariant layers
for idx, (dim, pop) in enumerate(kwargs["peq"]):
with tf.variable_scope(name + "-perm_eq-" + str(idx)):
tensor_diag = permutation_equivariant_layer(tensor_diag, dim, pop, kwargs["weight_init"], kwargs["weight_init"], kwargs["bias_init"], kwargs["weight_const"], kwargs["weight_const"], kwargs["bias_const"], train_vect)
elif kwargs["layer"] == "ls": # Channel with landscape layer
with tf.variable_scope(name + "-samples"):
tensor_diag = landscape_layer(tensor_diag, kwargs["num_samples"], kwargs["sample_init"], kwargs["sample_const"], train_vect)
elif kwargs["layer"] == "bc": # Channel with Betti layer
with tf.variable_scope(name + "-samples"):
tensor_diag = betti_layer(tensor_diag, kwargs["theta"], kwargs["num_samples"], kwargs["sample_init"], kwargs["sample_const"], train_vect)
elif kwargs["layer"] == "en": # Channel with entropy layer
with tf.variable_scope(name + "-samples"):
tensor_diag = entropy_layer(tensor_diag, kwargs["theta"], kwargs["num_samples"], kwargs["sample_init"], kwargs["sample_const"], train_vect)
elif kwargs["layer"] == "im": # Channel with image layer
with tf.variable_scope(name + "-bandwidth"):
tensor_diag = image_layer(tensor_diag, kwargs["image_size"], kwargs["image_bnds"], kwargs["variance_init"], kwargs["variance_const"], train_vect)
elif kwargs["layer"] == "ex": # Channel with exponential layer
with tf.variable_scope(name + "-gaussians"):
tensor_diag = exponential_layer(tensor_diag, kwargs["num_elements"], kwargs["mean_init"], kwargs["variance_init"], kwargs["mean_const"], kwargs["variance_const"], train_vect)
elif kwargs["layer"] == "rt": # Channel with rational layer
with tf.variable_scope(name + "-bandwidth"):
tensor_diag = rational_layer(tensor_diag, kwargs["num_elements"], kwargs["mean_init"], kwargs["variance_init"], kwargs["alpha_init"], kwargs["mean_const"], kwargs["variance_const"], kwargs["alpha_const"], train_vect)
elif kwargs["layer"] == "rh": # Channel with rational hat layer
with tf.variable_scope(name + "-bandwidth"):
tensor_diag = rational_hat_layer(tensor_diag, kwargs["num_elements"], kwargs["q"], kwargs["mean_init"], kwargs["r_init"], kwargs["mean_const"], kwargs["r_const"], train_vect)
output_dim = len(tensor_diag.shape) - 2
vector = None # to avoid warning
if output_dim == 1:
# Apply weight and mask
if kwargs["persistence_weight"] is not None:
tiled_weight = tf.tile(weight, [1, 1, tensor_diag.shape[2].value])
tensor_diag = tf.multiply(tensor_diag, tiled_weight)
tiled_mask = tf.tile(tf.expand_dims(tensor_mask, -1), [1, 1, tensor_diag.shape[2].value])
masked_layer = tf.multiply(tensor_diag, tiled_mask)
# Permutation invariant operation
if kwargs["perm_op"] == "topk": # k first values
masked_layer_t = tf.transpose(masked_layer, perm=[0, 2, 1])
values, indices = tf.nn.top_k(masked_layer_t, k=kwargs["keep"])
vector = tf.reshape(values, [-1, kwargs["keep"] * tensor_diag.shape[2].value])
elif kwargs["perm_op"] == "sum": # sum
vector = tf.reduce_sum(masked_layer, axis=1)
elif kwargs["perm_op"] == "max": # maximum
vector = tf.reduce_max(masked_layer, axis=1)
elif kwargs["perm_op"] == "mean": # minimum
vector = tf.reduce_mean(masked_layer, axis=1)
# Second layer of channel: fully-connected (None if fc_layers is set to [], default value)
for idx, tup in enumerate(kwargs["fc_layers"]):
# tup is a tuple whose element are
# 1. dim of fully-connected,
# 2. string for processing,
# 3. (optional) dropout value
with tf.variable_scope(name + "-fc-" + str(idx)):
vector = tf.layers.dense(vector, tup[0])
with tf.variable_scope(name + "-bn-" + str(idx)):
if len(tup) == 2:
vector = _post_processing(vector, tup[1])
else:
vector = _post_processing(vector, tup[1], tup[2])
elif output_dim == 2:
# Apply weight and mask
if kwargs["persistence_weight"] is not None:
weight = tf.expand_dims(weight, -1)
tiled_weight = tf.tile(weight, [1, 1, tensor_diag.shape[2].value, tensor_diag.shape[3].value])
tensor_diag = tf.multiply(tensor_diag, tiled_weight)
tiled_mask = tf.tile(tf.reshape(tensor_mask, [-1, N, 1, 1]), [1, 1, tensor_diag.shape[2].value, tensor_diag.shape[3].value])
masked_layer = tf.multiply(tensor_diag, tiled_mask)
# Permutation invariant operation
if kwargs["perm_op"] == "sum": # sum
vector = tf.reduce_sum(masked_layer, axis=1)
elif kwargs["perm_op"] == "max": # maximum
vector = tf.reduce_max(masked_layer, axis=1)
elif kwargs["perm_op"] == "mean": # minimum
vector = tf.reduce_mean(masked_layer, axis=1)
# Second layer of channel: convolution
vector = tf.expand_dims(vector, -1)
for idx, tup in enumerate(kwargs["cv_layers"]):
# tup is a tuple whose element are
# 1. num of filters,
# 2. kernel size,
# 3. string for postprocessing,
# 4. (optional) dropout value
with tf.variable_scope(name + "-cv-" + str(idx)):
vector = tf.layers.conv2d(vector, filters=tup[0], kernel_size=tup[1])
with tf.variable_scope(name + "-bn-" + str(idx)):
if len(tup) == 3:
vector = _post_processing(vector, tup[2])
else:
vector = _post_processing(vector, tup[2], tup[3])
vector = tf.layers.flatten(vector)
output.append(vector)
return vector
| python |
from sqlalchemy.dialects.postgresql import UUID
from app.common.sqlalchemy_extensions import utcnow
from database import db
class BaseModel(db.Model):
__abstract__ = True
id = db.Column(
UUID,
primary_key=True,
server_default=db.func.uuid_generate_v4())
created = db.Column(db.DateTime, server_default=utcnow())
last_update = db.Column(
db.DateTime, server_default=utcnow(), onupdate=utcnow())
| python |
"""
Edge Detection.
A high-pass filter sharpens an image. This program analyzes every
pixel in an image in relation to the neighboring pixels to sharpen
the image.
"""
kernel = [[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]]
img = None
def setup():
size(640, 360)
img = loadImage("moon.jpg") # Load the original image
noLoop()
def draw():
image(img, 0, 0) # Displays the image from point (0,0)
img.loadPixels()
# Create an opaque image of the same size as the original
edgeImg = createImage(img.width, img.height, RGB)
# Loop through every pixel in the image.
for y in range(1, img.height - 1): # Skip top and bottom edges
for x in range(1, img.width - 1): # Skip left and right edges
sum = 0 # Kernel sum for this pixel
for ky in range(-1, 2, 1):
for kx in range(-1, 2, 1):
# Calculate the adjacent pixel for this kernel point
pos = (y + ky) * img.width + (x + kx)
# Image is grayscale, red/green/blue are identical
val = red(img.pixels[pos])
# Multiply adjacent pixels based on the kernel values
sum += kernel[ky + 1][kx + 1] * val
# For this pixel in the image, set the gray value
# based on the sum from the kernel
edgeImg.pixels[y * img.width + x] = color(sum, sum, sum)
# State that there are changes to edgeImg.pixels
edgeImg.updatePixels()
image(edgeImg, width / 2, 0) # Draw the image
| python |
"""
关于dfs,bfs的解释
https://zhuanlan.zhihu.com/p/50187643
"""
class Solution:
def minDepth(self,root):
if not root:
return 0
l = self.minDepth(root.left)
r = self.minDepth(root.right)
return 1 + r + 1 if l == 0 or r == 0 else min(l,r)+1 | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 爬取 春暖花开 论坛帖子中的图片
import os
import fake_useragent
import re
import requests
import time
from bs4 import BeautifulSoup
class Picture:
def all_url(self, url):
"""一个页面有许多图集,而这样的页面有很多,该方法是根据传入的根url,获取所有的页面url"""
list_str = url.split('-')
html = self.request(url)
last = BeautifulSoup(html.text, 'lxml').find('span', id='fd_page_bottom').find('a', class_='last')['href']
max_page = str(last).split('-')[-1].split('.')[0]
for index in range(1, int(max_page) + 1):
new_url = '%s-%s-%d.html' % (list_str[0], list_str[1], index)
print('开始处理页面:%s' % new_url)
self.one_page(new_url)
def one_page(self, url):
"""处理一个页面中的所有图集"""
html = self.request(url)
all_tbody = BeautifulSoup(html.text, 'lxml').find_all('tbody', id=re.compile('(normalthread_)[0-9]+'))
for tbody in all_tbody:
href = tbody.find('td', class_='icn').find('a')['href']
img_url = 'http://%s/%s' % (url.split('/')[2], str(href))
print('开始处理图集:%s' % img_url)
path = str(href).split('-')[1]
self.save_img(img_url, path)
print('当前图集处理完毕')
def save_img(self, url, path):
if self.makedir(path):
html = self.request(url)
all_img = BeautifulSoup(html.text, 'lxml').find_all('img', class_='zoom')
for img in all_img:
try:
img_url = img['file']
except KeyError:
continue
img = self.request(img_url)
if img.status_code != 200:
print('请求失败:%d' % img.status_code)
continue
file_name = str(img_url).split('/')[-1]
with open(file_name, 'ab') as f:
f.write(img.content)
time.sleep(3)
@staticmethod
def makedir(path):
"""创建图集文件夹"""
path = path.strip()
full_path = os.path.join("E:\Image\sex", path)
if not os.path.exists(full_path):
print('建了一个名字叫做', path, '的文件夹!')
os.makedirs(full_path)
# 切换到新建的目录
os.chdir(full_path)
return True
else:
print(path, '文件夹已经存在了!')
return False
@staticmethod
def request(url):
"""请求url并返回响应结果"""
fa = fake_useragent.UserAgent()
headers = {
'User-Agent': fa.random,
}
content = requests.get(url, headers=headers)
return content
if __name__ == '__main__':
p = Picture()
p.all_url('http://qqlive8.space/forum-158-1.html')
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.