gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import pygame
from engine import *
from eventManager import *
class Label(Engine.GUI.Widget):
def __init__(self, text, textColor = None, backgroundColor = None, fontSize = None, padding = None, width = None, height = None, transparentBackground = True):
super().__init__()
self.textColor = textColor if textColor != None else self.options.labelWidgetTextColor
self.backgroundColor = backgroundColor if backgroundColor != None else self.options.labelWidgetBackgroundColor
self.hasTransparentBackground = transparentBackground
self.fontSize = fontSize if fontSize != None else self.options.widgetFontSize
self.font = pygame.font.Font(self.options.widgetFont, self.fontSize)
self.text = text
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor) #need this here to get initial size
self.textRect = self.renderedText.get_rect()
self.rect = self.textRect #self.rect is actually Rect for widget, used here to provide initial size values
self.padding = padding if padding != None else self.options.widgetPadding
self.width = width
if self.width == None:
self.width = self.rect.width + self.padding
self.rect.width = self.width
self.height = height
if self.height == None:
self.height = self.rect.height + self.padding
self.rect.height = self.height
def redrawWidget(self):
self.dirty = True
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.backgroundColor)
if self.hasTransparentBackground:
self.image.set_colorkey(self.backgroundColor)
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.width / 2) - (self.textRect.width / 2)
self.textRect.y = (self.height / 2) - (self.textRect.height / 2)
self.image.blit(self.renderedText, self.textRect)
def update(self):
if self.dirty:
self.redrawWidget()
self.dirty = False
class StatTracker(Label):
def __init__(self, stat, value, textColor = None, backgroundColor = None, fontSize = None, padding = None, width = None, height = None, transparentBackground = True):
super().__init__(stat, textColor, backgroundColor, fontSize, padding, width, height, transparentBackground)
self.stat = stat
self.statValue = value
self.text = stat
self.valueFontSize = fontSize if fontSize != None else self.options.widgetFontSize
self.valueFont = pygame.font.Font(self.options.widgetFont, self.valueFontSize)
self.textFontSize = self.options.statTrackerTextFontSize
self.textFont = pygame.font.Font(self.options.widgetFont, self.textFontSize)
# get initial sizes
self.renderedValue = self.valueFont.render(str(self.statValue), True, self.textColor, self.backgroundColor)
self.valueRect = self.renderedValue.get_rect()
self.renderedText = self.textFont.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.padding = padding if padding != None else self.options.statTrackerTextPadding
self.width = width if width != None else max(self.valueRect.width, self.textRect.width) + self.padding
self.height = height if height != None else self.valueRect.height + self.textRect.height + self.padding + self.options.statTrackerValueTextSpacing
self.rect = self.textRect #self.rect is actually Rect for widget, used here to provide initial size values
self.rect.width = self.width
self.rect.height = self.height
def addListeners(self):
event = Events.StatUpdateEvent()
self.eventManager.addListener(event, self)
def notify(self, event):
if isinstance(event, Events.StatUpdateEvent):
if event.stat == self.stat:
self.value(event.value)
def value(self, value = None):
if value != None:
self.statValue += value
self.redrawWidget()
return self.statValue
def redrawWidget(self):
self.dirty = True
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.backgroundColor)
if self.hasTransparentBackground:
self.image.set_colorkey(self.backgroundColor)
self.renderedValue = self.valueFont.render(str(self.statValue), True, self.textColor, self.backgroundColor)
self.valueRect = self.renderedValue.get_rect()
self.valueRect.x = (self.width / 2) - (self.valueRect.width / 2)
self.valueRect.y = (self.padding / 2) #self.topEdge() +
self.renderedText = self.textFont.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.width / 2) - (self.textRect.width / 2)
self.textRect.y = self.rect.height - (self.padding / 2) - self.textRect.height
self.width = max(self.valueRect.width, self.textRect.width) + self.padding
self.rect.width = self.width
self.image.blit(self.renderedValue, self.valueRect)
self.image.blit(self.renderedText, self.textRect)
class HoverableWidget(Label):
def __init__(self, text, textColor = None, backgroundColor = None, fontSize = None, padding = None, onHoverAction = None, width = None, height = None, transparentBackground = False):
super().__init__(text, textColor, backgroundColor, fontSize, padding, width, height, transparentBackground)
self.unfocusedBackgroundColor = self.backgroundColor
self.focusedBackgroundColor = self.getFocusedColor(self.backgroundColor)
if onHoverAction:
self.onHoverAction = onHoverAction
else:
self.onHoverAction = self.changeBackground
def addListeners(self):
event = Events.HoverWidgetEvent()
self.eventManager.addListener(event, self)
def getContrastingShade(self, color):
constrastingShadeOffset = .2 * 255
if 255 - color > constrastingShadeOffset:
color += constrastingShadeOffset
else:
color -= constrastingShadeOffset
return color
def getFocusedColor(self, color):
r = self.getContrastingShade(color[0])
g = self.getContrastingShade(color[1])
b = self.getContrastingShade(color[2])
a = None
rgb = None
if len(color) > 3:
a = self.getContrastingShade(color[3])
if a:
rgb = (r, g, b, a)
else:
rgb = (r, g, b)
return rgb
def changeBackground(self):
self.dirty = True
if self.focused:
self.backgroundColor = self.focusedBackgroundColor
else:
self.backgroundColor = self.unfocusedBackgroundColor
self.update()
def hover(self, focused):
if self.onHoverAction:
self.dirty = True
self.focused = focused
self.onHoverAction()
self.update()
def notify(self, event):
if isinstance(event, Events.HoverWidgetEvent):
focused = self.rect.collidepoint(event.pos)
self.hover(focused)
class Button(HoverableWidget):
def __init__(self, text, textColor = None, buttonColor = None, fontSize = None, padding = None, onClickAction = None, onHoverAction = None, width = None, height = None):
super().__init__(text, textColor, buttonColor, fontSize, padding, onHoverAction, width, height, transparentBackground = False)
self.onClickAction = onClickAction
def addListeners(self):
super().addListeners()
event = Events.LeftClickWidgetEvent()
self.eventManager.addListener(event, self)
#print("Adding listeners for", self.text)
def click(self):
if self.onClickAction:
self.dirty = True
self.onClickAction()
def notify(self, event):
super().notify(event)
if isinstance(event, Events.LeftClickWidgetEvent) and self.rect.collidepoint(event.pos):
#print("Firing", event.name, "for Listener", self.text)
self.click()
elif isinstance(event, Events.KeyboardActivateWidgetEvent) and self.focused:
self.click()
class SliderWidget(Engine.GUI.Widget):
def __init__(self, valueKey, values, defaultValue, textColor = None, fillColor = None, backgroundColor = None, onDragAction = None, transparentBackground = True):
super().__init__()
self.eventManager = EventManager()
self.textColor = textColor if textColor != None else self.options.sliderWidgetTextColor
self.fillColor = fillColor if fillColor != None else self.options.sliderWidgetFillColor
self.backgroundColor = backgroundColor if backgroundColor != None else self.options.sliderWidgetBackgroundColor
self.hasTransparentBackground = transparentBackground
self.width = self.options.sliderWidth
self.height = self.options.sliderHeight
self.valueKey = valueKey
self.defaultValue = defaultValue
self.text = str(self.defaultValue)
self.value = self.defaultValue
self.stepValues = {}
self.font = pygame.font.Font(self.options.widgetFont, self.options.sliderFontSize)
self.onDragAction = onDragAction if onDragAction != None else self.slideToValue
self.image = pygame.Surface((self.width, self.height)) #contains bar, slide and text; all are defined here for initial positioning
self.rect = self.image.get_rect()
self.bar = pygame.Surface((self.options.sliderWidth, self.options.sliderBarHeight))
self.bar.fill(self.fillColor)
self.barRect = self.bar.get_rect()
self.barRect.x = self.options.sliderBarOffsetX
self.barRect.y = self.options.sliderBarOffsetY
self.slide = pygame.Surface((self.options.sliderSlideWidth, self.options.sliderSlideHeight))
self.slide.fill(self.fillColor)
self.slideRect = self.slide.get_rect()
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.image.get_rect().width / 2) - (self.textRect.width / 2)
self.textRect.y = self.image.get_rect().height - self.options.sliderTextOffsetY - self.textRect.height
#make a lookup table for slide position and value
vals = len(values) - 1
isRawList = not (type(values[0]) == type([]) or type(values[0]) == type(()))
maxStep = self.barRect.width - self.slideRect.width
minStep = 0
stepCounter = 0
self.step = ((maxStep - minStep) / vals)
for val in values:
key = self.step * stepCounter
if isRawList:
self.stepValues[key] = (str(val), val) #mimic (label, value)
else:
self.stepValues[key] = val #should already be (label, value) or [label, value]
stepCounter += 1
self.setValue(self.defaultValue)
def addListeners(self):
event = Events.DragWidgetEvent()
self.eventManager.addListener(event, self)
event = Events.LeftClickWidgetEvent()
self.eventManager.addListener(event, self)
def redrawWidget(self):
self.dirty = True
self.image = pygame.Surface((self.width, self.height))
self.image.fill(self.backgroundColor)
if self.hasTransparentBackground:
self.image.set_colorkey(self.backgroundColor)
self.bar = pygame.Surface((self.options.sliderWidth, self.options.sliderBarHeight))
self.bar.fill(self.fillColor)
self.slide = pygame.Surface((self.options.sliderSlideWidth, self.options.sliderSlideHeight))
self.slide.fill(self.fillColor)
self.renderedText = self.font.render(self.text, True, self.textColor, self.backgroundColor)
self.textRect = self.renderedText.get_rect()
self.textRect.x = (self.image.get_rect().width / 2) - (self.textRect.width / 2)
self.textRect.y = self.image.get_rect().height - self.options.sliderTextOffsetY - self.textRect.height
self.image.blit(self.bar, self.barRect)
self.image.blit(self.slide, self.slideRect)
self.image.blit(self.renderedText, self.textRect)
def update(self):
if self.dirty:
self.redrawWidget()
self.dirty = False
def drag(self, pos):
if self.onDragAction:
self.dirty = True
self.onDragAction(pos)
self.update()
def setValue(self, val):
for key in self.stepValues.keys():
item = self.stepValues[key]
if val == item[0] or val == item[1]:
self.slideToValue(key)
def slideToValue(self, dx):
self.dirty = True
closestStep = int(dx / self.step) #ensure integer
key = closestStep * self.step
if key in self.stepValues.keys():
item = self.stepValues[key]
self.text = item[0]
self.value = item[1]
self.slideRect.x = key
self.update()
def handleIfOnSelf(self, event):
relx = event.pos[0] - self.rect.x
minx = self.barRect.x - self.options.sliderDragPaddingX
maxx = minx + self.barRect.width + self.options.sliderDragPaddingX
rely = event.pos[1] - self.rect.y
miny = self.slideRect.y
maxy = miny + self.slideRect.height
if (minx <= relx <= maxx and miny <= rely <= maxy):
self.drag(relx)
def notify(self, event):
if isinstance(event, Events.DragWidgetEvent):
self.handleIfOnSelf(event)
if isinstance(event, Events.LeftClickWidgetEvent):
self.handleIfOnSelf(event)
|
|
from collections import Counter
import math
import os
import pickle
import re
import string
import pysrt
import nltk
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
import sklearn.cross_validation
import sklearn.svm
import sklearn.naive_bayes
from sklearn.cluster import MeanShift, estimate_bandwidth
URL_REGEX = '(?:www\.)|(?:https?://)\S+\.\S+'
STOP_WORDS = set(nltk.corpus.stopwords.words('english')) | {"n't", "..."} | set(string.punctuation)
STEMMER = nltk.stem.porter.PorterStemmer()
def extract_features(subtitle_lines):
word_counter = Counter()
for line in subtitle_lines:
tokens = nltk.word_tokenize(line)
for token in tokens:
token = token.lower()
# Skipping stop words, tokens starting with "'" and one reoeated character (like '---' and such)
if token in STOP_WORDS or token.startswith("'") or len(set(token)) == 1:
continue
# Stemming
token = STEMMER.stem(token)
word_counter[token] += 1
return word_counter
def extract_features_ngrams(subtitle_lines, n=2):
word_counter = Counter()
for line in subtitle_lines:
tokens = nltk.word_tokenize(line)
for i in xrange(len(tokens) - (n - 1)):
ngram = []
for j in xrange(n):
token = tokens[i+j].lower()
# Skipping stop words, tokens starting with "'" and one reoeated character (like '---' and such)
if token in STOP_WORDS or token.startswith("'") or len(set(token)) == 1:
continue
# Stemming
token = STEMMER.stem(token)
ngram.append(token)
ngram = tuple(ngram)
word_counter[ngram] += 1
return word_counter
extract_features = extract_features_ngrams
def extract_lines(subtitle_path):
try:
subtitle_object = pysrt.open(subtitle_path)
except UnicodeDecodeError:
subtitle_object = pysrt.open(subtitle_path, encoding='latin1')
subtitle_lines = []
for sub in subtitle_object:
text = sub.text
# Removing any formatting via HTML tags
text = re.sub('<[^<]+?>', '', text)
# Skipping links (usually ads or subtitle credits so irrelevant)
if re.search(URL_REGEX, text):
continue
subtitle_lines.append(text)
return subtitle_lines
def classification_validation(features, labels):
kf = sklearn.cross_validation.StratifiedKFold(labels, n_folds=5)
clf_scores = []
print '# CROSS-VALIDATION'
for train, test in kf:
X_train, X_test, y_train, y_test = features[train], features[test], labels[train], labels[test]
clf = get_classifier().fit(X_train, y_train)
clf_score = clf.score(X_test, y_test)
clf_scores.append(clf_score)
print " . Score: {}".format(clf_score)
avg_clf_score = sum(clf_scores) / len(clf_scores)
print
print "=> Average classifier score: {}".format(avg_clf_score)
def get_classifier():
return sklearn.naive_bayes.GaussianNB()
class SeriesClassifier(object):
def __init__(self, clf, vectorizer, variance_threshold, pca, inverse_document_frequency):
self.clf = clf
self.vectorizer = vectorizer
self.variance_threshold = variance_threshold
self.pca = pca
self.inverse_document_frequency = inverse_document_frequency
def extract_features(self, lines):
w_f = extract_features(lines)
for word in w_f:
tf = w_f[word]
idf = self.inverse_document_frequency[word]
w_f[word] = math.log(1 + tf) * math.log(idf) if idf != 0 else 0
features = self.vectorizer.transform([w_f])
#features = self.variance_threshold.transform(features)
features = features.toarray()
#features = self.pca.transform(features)
return features[0]
def predict(self, features):
return self.clf.predict(features)[0]
if __name__ == '__main__':
series_list = ['modern_family', '30_rock', 'big_bang_theory', 'parks_and_recreation', 'entourage',
'house_of_cards', 'the_west_wing', 'borgen', 'the_newsroom',
'american_horror_story', 'penny_dreadful', 'the_walking_dead']
words_frequencies = []
series_labels = []
subtitle_labels = []
for series in series_list:
print '* {}'.format(series.upper())
folder_path = os.path.join('subtitles', series)
subtitles = [os.path.join(folder_path, path) for path in os.listdir(folder_path) if not path.startswith('.')]
for sub_path in subtitles:
print ' . Analyzed subtitle "{}"'.format(sub_path)
subtitle_lines = extract_lines(sub_path)
# Some encoding errors can cause no lines to be detected
if not subtitle_lines:
continue
words_frequencies.append(extract_features(subtitle_lines))
series_labels.append(series)
subtitle_labels.append(sub_path)
series_labels, subtitle_labels = map(np.array, [series_labels, subtitle_labels])
words_set = set(word for w_f in words_frequencies for word in w_f)
# Calculating the inverse document frequency for each word
inverse_document_frequency = Counter()
total_frequency = float(sum(sum(w_f.values()) for w_f in words_frequencies))
for word in words_set:
total_word_frequency = len(set(series_labels[i] for i, w_f in enumerate(words_frequencies) if word in w_f))
inverse_document_frequency[word] = math.log(len(series_list) / total_word_frequency)
#inverse_document_frequency[word] = math.log(total_frequency / total_word_frequency)
# Replacing word frequencies by tf-idf
for w_f in words_frequencies:
for word in w_f:
tf = w_f[word]
idf = inverse_document_frequency[word]
w_f[word] = math.log(1 + tf) * math.log(idf) if idf != 0 else 0
# Vectorizing the word count among all series
vectorizer = DictVectorizer()
feature_vectors = vectorizer.fit_transform(words_frequencies)
# Dropping features with low variance
MIN_VARIANCE = 0.04
variance_threshold = VarianceThreshold(threshold=MIN_VARIANCE)
#feature_vectors = variance_threshold.fit_transform(feature_vectors)
# Turning to a dense matrix
feature_vectors = feature_vectors.toarray()
# PCA
pca = PCA(n_components=20)
#feature_vectors = pca.fit_transform(feature_vectors)
print feature_vectors.shape
# Cross-validation
classification_validation(feature_vectors, series_labels)
# Training an SVM classifier and dumping it to a file
clf = get_classifier().fit(feature_vectors, series_labels)
series_clf = SeriesClassifier(clf, vectorizer, variance_threshold, pca, inverse_document_frequency)
with open('clf.pickle', 'w') as clf_file:
pickle.dump(series_clf, clf_file)
# Clustering
CLUSTERING = False
if CLUSTERING:
temp_pca = PCA(n_components=4)
X = temp_pca.fit_transform(feature_vectors)
bandwidth = estimate_bandwidth(X, quantile=0.4, n_samples=1000)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
# for i, label in enumerate(ms.labels_):
# print label, subtitle_labels[i]
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from unittest import mock
from importlib import reload
from datetime import datetime, timedelta
from google.api_core import operation as ga_operation
from google.auth import credentials as auth_credentials
from google.cloud import aiplatform
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import models
from google.cloud.aiplatform import utils
from google.cloud.aiplatform_v1beta1.services.endpoint_service import (
client as endpoint_service_client_v1beta1,
)
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
client as prediction_service_client_v1beta1,
)
from google.cloud.aiplatform_v1beta1.types import (
endpoint as gca_endpoint_v1beta1,
machine_resources as gca_machine_resources_v1beta1,
prediction_service as gca_prediction_service_v1beta1,
endpoint_service as gca_endpoint_service_v1beta1,
)
from google.cloud.aiplatform_v1.services.model_service import (
client as model_service_client,
)
from google.cloud.aiplatform_v1.services.endpoint_service import (
client as endpoint_service_client,
)
from google.cloud.aiplatform_v1.services.prediction_service import (
client as prediction_service_client,
)
from google.cloud.aiplatform_v1.types import (
endpoint as gca_endpoint,
model as gca_model,
machine_resources as gca_machine_resources,
prediction_service as gca_prediction_service,
endpoint_service as gca_endpoint_service,
encryption_spec as gca_encryption_spec,
)
_TEST_PROJECT = "test-project"
_TEST_PROJECT_2 = "test-project-2"
_TEST_LOCATION = "us-central1"
_TEST_LOCATION_2 = "europe-west4"
_TEST_DISPLAY_NAME = "test-display-name"
_TEST_DISPLAY_NAME_2 = "test-display-name-2"
_TEST_ID = "1028944691210842416"
_TEST_ID_2 = "4366591682456584192"
_TEST_DESCRIPTION = "test-description"
_TEST_ENDPOINT_NAME = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/endpoints/{_TEST_ID}"
)
_TEST_ENDPOINT_NAME_ALT_LOCATION = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION_2}/endpoints/{_TEST_ID}"
)
_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
_TEST_MODEL_NAME = (
f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/models/{_TEST_ID}"
)
_TEST_MODEL_ID = "1028944691210842416"
_TEST_PREDICTION = [[1.0, 2.0, 3.0], [3.0, 3.0, 1.0]]
_TEST_INSTANCES = [[1.0, 2.0, 3.0], [1.0, 3.0, 4.0]]
_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials())
_TEST_SERVICE_ACCOUNT = "[email protected]"
_TEST_DEPLOYED_MODELS = [
gca_endpoint.DeployedModel(id=_TEST_ID, display_name=_TEST_DISPLAY_NAME),
gca_endpoint.DeployedModel(id=_TEST_ID_2, display_name=_TEST_DISPLAY_NAME_2),
]
_TEST_MACHINE_TYPE = "n1-standard-32"
_TEST_ACCELERATOR_TYPE = "NVIDIA_TESLA_P100"
_TEST_ACCELERATOR_COUNT = 2
_TEST_EXPLANATIONS = [
gca_prediction_service_v1beta1.explanation.Explanation(attributions=[])
]
_TEST_ATTRIBUTIONS = [
gca_prediction_service_v1beta1.explanation.Attribution(
baseline_output_value=1.0,
instance_output_value=2.0,
feature_attributions=3.0,
output_index=[1, 2, 3],
output_display_name="abc",
approximation_error=6.0,
output_name="xyz",
)
]
_TEST_EXPLANATION_METADATA = aiplatform.explain.ExplanationMetadata(
inputs={
"features": aiplatform.explain.ExplanationMetadata.InputMetadata(
{
"input_tensor_name": "dense_input",
"encoding": "BAG_OF_FEATURES",
"modality": "numeric",
"index_feature_mapping": ["abc", "def", "ghj"],
}
)
},
outputs={
"medv": aiplatform.explain.ExplanationMetadata.OutputMetadata(
{"output_tensor_name": "dense_2"}
)
},
)
_TEST_EXPLANATION_PARAMETERS = aiplatform.explain.ExplanationParameters(
{"sampled_shapley_attribution": {"path_count": 10}}
)
# CMEK encryption
_TEST_ENCRYPTION_KEY_NAME = "key_1234"
_TEST_ENCRYPTION_SPEC = gca_encryption_spec.EncryptionSpec(
kms_key_name=_TEST_ENCRYPTION_KEY_NAME
)
_TEST_ENDPOINT_GAPIC = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME, name=_TEST_ENDPOINT_NAME
)
_TEST_ENDPOINT_LIST = [
gca_endpoint.Endpoint(
name=_TEST_ENDPOINT_NAME,
display_name="aac",
create_time=datetime.now() - timedelta(minutes=15),
),
gca_endpoint.Endpoint(
name=_TEST_ENDPOINT_NAME,
display_name="aab",
create_time=datetime.now() - timedelta(minutes=5),
),
gca_endpoint.Endpoint(
name=_TEST_ENDPOINT_NAME,
display_name="aaa",
create_time=datetime.now() - timedelta(minutes=10),
),
]
_TEST_LIST_FILTER = 'display_name="abc"'
_TEST_LIST_ORDER_BY_CREATE_TIME = "create_time desc"
_TEST_LIST_ORDER_BY_DISPLAY_NAME = "display_name"
_TEST_LABELS = {"my_key": "my_value"}
@pytest.fixture
def get_endpoint_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME,
encryption_spec=_TEST_ENCRYPTION_SPEC,
)
yield get_endpoint_mock
@pytest.fixture
def get_endpoint_alt_location_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME_ALT_LOCATION,
encryption_spec=_TEST_ENCRYPTION_SPEC,
)
yield get_endpoint_mock
@pytest.fixture
def get_endpoint_with_models_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME,
deployed_models=_TEST_DEPLOYED_MODELS,
)
yield get_endpoint_mock
@pytest.fixture
def get_model_mock():
with mock.patch.object(
model_service_client.ModelServiceClient, "get_model"
) as get_model_mock:
get_model_mock.return_value = gca_model.Model(
display_name=_TEST_DISPLAY_NAME, name=_TEST_MODEL_NAME,
)
yield get_model_mock
@pytest.fixture
def create_endpoint_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "create_endpoint"
) as create_endpoint_mock:
create_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
create_endpoint_lro_mock.result.return_value = gca_endpoint.Endpoint(
name=_TEST_ENDPOINT_NAME, display_name=_TEST_DISPLAY_NAME
)
create_endpoint_mock.return_value = create_endpoint_lro_mock
yield create_endpoint_mock
@pytest.fixture
def deploy_model_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "deploy_model"
) as deploy_model_mock:
deployed_model = gca_endpoint.DeployedModel(
model=_TEST_MODEL_NAME, display_name=_TEST_DISPLAY_NAME,
)
deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
deploy_model_lro_mock.result.return_value = gca_endpoint_service.DeployModelResponse(
deployed_model=deployed_model,
)
deploy_model_mock.return_value = deploy_model_lro_mock
yield deploy_model_mock
@pytest.fixture
def deploy_model_with_explanations_mock():
with mock.patch.object(
endpoint_service_client_v1beta1.EndpointServiceClient, "deploy_model"
) as deploy_model_mock:
deployed_model = gca_endpoint_v1beta1.DeployedModel(
model=_TEST_MODEL_NAME, display_name=_TEST_DISPLAY_NAME,
)
deploy_model_lro_mock = mock.Mock(ga_operation.Operation)
deploy_model_lro_mock.result.return_value = gca_endpoint_service_v1beta1.DeployModelResponse(
deployed_model=deployed_model,
)
deploy_model_mock.return_value = deploy_model_lro_mock
yield deploy_model_mock
@pytest.fixture
def undeploy_model_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "undeploy_model"
) as undeploy_model_mock:
undeploy_model_lro_mock = mock.Mock(ga_operation.Operation)
undeploy_model_lro_mock.result.return_value = (
gca_endpoint_service.UndeployModelResponse()
)
undeploy_model_mock.return_value = undeploy_model_lro_mock
yield undeploy_model_mock
@pytest.fixture
def delete_endpoint_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "delete_endpoint"
) as delete_endpoint_mock:
delete_endpoint_lro_mock = mock.Mock(ga_operation.Operation)
delete_endpoint_lro_mock.result.return_value = (
gca_endpoint_service.DeleteEndpointRequest()
)
delete_endpoint_mock.return_value = delete_endpoint_lro_mock
yield delete_endpoint_mock
@pytest.fixture
def sdk_private_undeploy_mock():
"""Mocks the high-level Endpoint._undeploy() SDK private method"""
with mock.patch.object(aiplatform.Endpoint, "_undeploy") as sdk_undeploy_mock:
sdk_undeploy_mock.return_value = None
yield sdk_undeploy_mock
@pytest.fixture
def sdk_undeploy_all_mock():
"""Mocks the high-level Endpoint.undeploy_all() SDK method"""
with mock.patch.object(
aiplatform.Endpoint, "undeploy_all"
) as sdk_undeploy_all_mock:
sdk_undeploy_all_mock.return_value = None
yield sdk_undeploy_all_mock
@pytest.fixture
def list_endpoints_mock():
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "list_endpoints"
) as list_endpoints_mock:
list_endpoints_mock.return_value = _TEST_ENDPOINT_LIST
yield list_endpoints_mock
@pytest.fixture
def create_endpoint_client_mock():
with mock.patch.object(
initializer.global_config, "create_client", autospec=True,
) as create_endpoint_client_mock:
endpoint_client_mock = mock.Mock(
spec=endpoint_service_client.EndpointServiceClient
)
endpoint_client_mock.get_endpoint.return_value = _TEST_ENDPOINT_GAPIC
create_endpoint_client_mock.return_value = endpoint_client_mock
yield create_endpoint_client_mock
@pytest.fixture
def predict_client_predict_mock():
with mock.patch.object(
prediction_service_client.PredictionServiceClient, "predict"
) as predict_mock:
predict_mock.return_value = gca_prediction_service.PredictResponse(
deployed_model_id=_TEST_MODEL_ID
)
predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
yield predict_mock
@pytest.fixture
def predict_client_explain_mock():
with mock.patch.object(
prediction_service_client_v1beta1.PredictionServiceClient, "explain"
) as predict_mock:
predict_mock.return_value = gca_prediction_service_v1beta1.ExplainResponse(
deployed_model_id=_TEST_MODEL_ID,
)
predict_mock.return_value.predictions.extend(_TEST_PREDICTION)
predict_mock.return_value.explanations.extend(_TEST_EXPLANATIONS)
predict_mock.return_value.explanations[0].attributions.extend(
_TEST_ATTRIBUTIONS
)
yield predict_mock
class TestEndpoint:
def setup_method(self):
reload(initializer)
reload(aiplatform)
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
def test_constructor(self, create_endpoint_client_mock):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
credentials=_TEST_CREDENTIALS,
)
models.Endpoint(_TEST_ENDPOINT_NAME)
create_endpoint_client_mock.assert_has_calls(
[
mock.call(
client_class=utils.EndpointClientWithOverride,
credentials=initializer.global_config.credentials,
location_override=_TEST_LOCATION,
prediction_client=False,
),
mock.call(
client_class=utils.PredictionClientWithOverride,
credentials=None,
location_override=_TEST_LOCATION,
prediction_client=True,
),
]
)
def test_constructor_with_endpoint_id(self, get_endpoint_mock):
models.Endpoint(_TEST_ID)
get_endpoint_mock.assert_called_with(name=_TEST_ENDPOINT_NAME)
def test_constructor_with_endpoint_name(self, get_endpoint_mock):
models.Endpoint(_TEST_ENDPOINT_NAME)
get_endpoint_mock.assert_called_with(name=_TEST_ENDPOINT_NAME)
def test_constructor_with_custom_project(self, get_endpoint_mock):
models.Endpoint(endpoint_name=_TEST_ID, project=_TEST_PROJECT_2)
test_endpoint_resource_name = endpoint_service_client.EndpointServiceClient.endpoint_path(
_TEST_PROJECT_2, _TEST_LOCATION, _TEST_ID
)
get_endpoint_mock.assert_called_with(name=test_endpoint_resource_name)
@pytest.mark.usefixtures("get_endpoint_mock")
def test_constructor_with_conflicting_location(self):
"""Passing a full resource name with `_TEST_LOCATION` and providing `_TEST_LOCATION_2` as location"""
with pytest.raises(RuntimeError) as err:
models.Endpoint(
endpoint_name=_TEST_ENDPOINT_NAME, location=_TEST_LOCATION_2
)
assert err.match(
regexp=r"is provided, but different from the resource location"
)
def test_constructor_with_custom_location(self, get_endpoint_alt_location_mock):
models.Endpoint(endpoint_name=_TEST_ID, location=_TEST_LOCATION_2)
test_endpoint_resource_name = endpoint_service_client.EndpointServiceClient.endpoint_path(
_TEST_PROJECT, _TEST_LOCATION_2, _TEST_ID
)
get_endpoint_alt_location_mock.assert_called_with(
name=test_endpoint_resource_name
)
def test_constructor_with_custom_credentials(self, create_endpoint_client_mock):
creds = auth_credentials.AnonymousCredentials()
models.Endpoint(_TEST_ENDPOINT_NAME, credentials=creds)
create_endpoint_client_mock.assert_has_calls(
[
mock.call(
client_class=utils.EndpointClientWithOverride,
credentials=creds,
location_override=_TEST_LOCATION,
prediction_client=False,
),
mock.call(
client_class=utils.PredictionClientWithOverride,
credentials=creds,
location_override=_TEST_LOCATION,
prediction_client=True,
),
]
)
@pytest.mark.usefixtures("get_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_init_aiplatform_with_encryption_key_name_and_create_endpoint(
self, create_endpoint_mock, sync
):
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
)
my_endpoint = models.Endpoint.create(display_name=_TEST_DISPLAY_NAME, sync=sync)
if not sync:
my_endpoint.wait()
expected_endpoint = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC
)
create_endpoint_mock.assert_called_once_with(
parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(),
)
expected_endpoint.name = _TEST_ENDPOINT_NAME
assert my_endpoint._gca_resource == expected_endpoint
@pytest.mark.usefixtures("get_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_create(self, create_endpoint_mock, sync):
my_endpoint = models.Endpoint.create(
display_name=_TEST_DISPLAY_NAME,
encryption_spec_key_name=_TEST_ENCRYPTION_KEY_NAME,
sync=sync,
)
if not sync:
my_endpoint.wait()
expected_endpoint = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME, encryption_spec=_TEST_ENCRYPTION_SPEC
)
create_endpoint_mock.assert_called_once_with(
parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(),
)
expected_endpoint.name = _TEST_ENDPOINT_NAME
assert my_endpoint.gca_resource == expected_endpoint
assert my_endpoint.network is None
@pytest.mark.usefixtures("get_endpoint_mock")
def test_accessing_properties_with_no_resource_raises(self,):
my_endpoint = aiplatform.Endpoint(_TEST_ENDPOINT_NAME)
my_endpoint._gca_resource = None
with pytest.raises(RuntimeError) as e:
my_endpoint.gca_resource
e.match(regexp=r"Endpoint resource has not been created.")
with pytest.raises(RuntimeError) as e:
my_endpoint.network
e.match(regexp=r"Endpoint resource has not been created.")
@pytest.mark.usefixtures("get_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_create_with_description(self, create_endpoint_mock, sync):
my_endpoint = models.Endpoint.create(
display_name=_TEST_DISPLAY_NAME, description=_TEST_DESCRIPTION, sync=sync
)
if not sync:
my_endpoint.wait()
expected_endpoint = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME, description=_TEST_DESCRIPTION,
)
create_endpoint_mock.assert_called_once_with(
parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_create_with_labels(self, create_endpoint_mock, sync):
my_endpoint = models.Endpoint.create(
display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS, sync=sync
)
if not sync:
my_endpoint.wait()
expected_endpoint = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME, labels=_TEST_LABELS,
)
create_endpoint_mock.assert_called_once_with(
parent=_TEST_PARENT, endpoint=expected_endpoint, metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy(self, deploy_model_mock, sync):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(test_model, sync=sync)
if not sync:
test_endpoint.wait()
automatic_resources = gca_machine_resources.AutomaticResources(
min_replica_count=1, max_replica_count=1,
)
deployed_model = gca_endpoint.DeployedModel(
automatic_resources=automatic_resources,
model=test_model.resource_name,
display_name=None,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=deployed_model,
traffic_split={"0": 100},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_display_name(self, deploy_model_mock, sync):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(
model=test_model, deployed_model_display_name=_TEST_DISPLAY_NAME, sync=sync
)
if not sync:
test_endpoint.wait()
automatic_resources = gca_machine_resources.AutomaticResources(
min_replica_count=1, max_replica_count=1,
)
deployed_model = gca_endpoint.DeployedModel(
automatic_resources=automatic_resources,
model=test_model.resource_name,
display_name=_TEST_DISPLAY_NAME,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=deployed_model,
traffic_split={"0": 100},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_traffic_80(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, traffic_percentage=80, sync=sync)
if not sync:
test_endpoint.wait()
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_traffic_120(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, traffic_percentage=120, sync=sync)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_traffic_negative(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, traffic_percentage=-18, sync=sync)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_min_replica(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, min_replica_count=-1, sync=sync)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_max_replica(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, max_replica_count=-2, sync=sync)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_raise_error_traffic_split(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, traffic_split={"a": 99}, sync=sync)
@pytest.mark.usefixtures("get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_traffic_percent(self, deploy_model_mock, sync):
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME,
traffic_split={"model1": 100},
)
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, traffic_percentage=70, sync=sync)
if not sync:
test_endpoint.wait()
automatic_resources = gca_machine_resources.AutomaticResources(
min_replica_count=1, max_replica_count=1,
)
deployed_model = gca_endpoint.DeployedModel(
automatic_resources=automatic_resources,
model=test_model.resource_name,
display_name=None,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=deployed_model,
traffic_split={"model1": 30, "0": 70},
metadata=(),
)
@pytest.mark.usefixtures("get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_traffic_split(self, deploy_model_mock, sync):
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME,
traffic_split={"model1": 100},
)
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(
model=test_model, traffic_split={"model1": 30, "0": 70}, sync=sync
)
if not sync:
test_endpoint.wait()
automatic_resources = gca_machine_resources.AutomaticResources(
min_replica_count=1, max_replica_count=1,
)
deployed_model = gca_endpoint.DeployedModel(
automatic_resources=automatic_resources,
model=test_model.resource_name,
display_name=None,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=deployed_model,
traffic_split={"model1": 30, "0": 70},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_dedicated_resources(self, deploy_model_mock, sync):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(
model=test_model,
machine_type=_TEST_MACHINE_TYPE,
accelerator_type=_TEST_ACCELERATOR_TYPE,
accelerator_count=_TEST_ACCELERATOR_COUNT,
service_account=_TEST_SERVICE_ACCOUNT,
sync=sync,
)
if not sync:
test_endpoint.wait()
expected_machine_spec = gca_machine_resources.MachineSpec(
machine_type=_TEST_MACHINE_TYPE,
accelerator_type=_TEST_ACCELERATOR_TYPE,
accelerator_count=_TEST_ACCELERATOR_COUNT,
)
expected_dedicated_resources = gca_machine_resources.DedicatedResources(
machine_spec=expected_machine_spec,
min_replica_count=1,
max_replica_count=1,
)
expected_deployed_model = gca_endpoint.DeployedModel(
dedicated_resources=expected_dedicated_resources,
model=test_model.resource_name,
display_name=None,
service_account=_TEST_SERVICE_ACCOUNT,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=expected_deployed_model,
traffic_split={"0": 100},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_explanations(self, deploy_model_with_explanations_mock, sync):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(
model=test_model,
machine_type=_TEST_MACHINE_TYPE,
accelerator_type=_TEST_ACCELERATOR_TYPE,
accelerator_count=_TEST_ACCELERATOR_COUNT,
explanation_metadata=_TEST_EXPLANATION_METADATA,
explanation_parameters=_TEST_EXPLANATION_PARAMETERS,
sync=sync,
)
if not sync:
test_endpoint.wait()
expected_machine_spec = gca_machine_resources_v1beta1.MachineSpec(
machine_type=_TEST_MACHINE_TYPE,
accelerator_type=_TEST_ACCELERATOR_TYPE,
accelerator_count=_TEST_ACCELERATOR_COUNT,
)
expected_dedicated_resources = gca_machine_resources_v1beta1.DedicatedResources(
machine_spec=expected_machine_spec,
min_replica_count=1,
max_replica_count=1,
)
expected_deployed_model = gca_endpoint_v1beta1.DeployedModel(
dedicated_resources=expected_dedicated_resources,
model=test_model.resource_name,
display_name=None,
explanation_spec=gca_endpoint_v1beta1.explanation.ExplanationSpec(
metadata=_TEST_EXPLANATION_METADATA,
parameters=_TEST_EXPLANATION_PARAMETERS,
),
)
deploy_model_with_explanations_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=expected_deployed_model,
traffic_split={"0": 100},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_min_replica_count(self, deploy_model_mock, sync):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, min_replica_count=2, sync=sync)
if not sync:
test_endpoint.wait()
automatic_resources = gca_machine_resources.AutomaticResources(
min_replica_count=2, max_replica_count=2,
)
deployed_model = gca_endpoint.DeployedModel(
automatic_resources=automatic_resources,
model=test_model.resource_name,
display_name=None,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=deployed_model,
traffic_split={"0": 100},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock", "get_model_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_deploy_with_max_replica_count(self, deploy_model_mock, sync):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_model = models.Model(_TEST_ID)
test_endpoint.deploy(model=test_model, max_replica_count=2, sync=sync)
if not sync:
test_endpoint.wait()
automatic_resources = gca_machine_resources.AutomaticResources(
min_replica_count=1, max_replica_count=2,
)
deployed_model = gca_endpoint.DeployedModel(
automatic_resources=automatic_resources,
model=test_model.resource_name,
display_name=None,
)
deploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model=deployed_model,
traffic_split={"0": 100},
metadata=(),
)
@pytest.mark.parametrize(
"model1, model2, model3, percent",
[
(100, None, None, 70),
(50, 50, None, 70),
(40, 60, None, 75),
(40, 60, None, 88),
(88, 12, None, 36),
(11, 89, None, 18),
(1, 99, None, 80),
(1, 2, 97, 68),
(99, 1, 0, 22),
(0, 0, 100, 18),
(7, 87, 6, 46),
],
)
def test_allocate_traffic(self, model1, model2, model3, percent):
old_split = {}
if model1 is not None:
old_split["model1"] = model1
if model2 is not None:
old_split["model2"] = model2
if model3 is not None:
old_split["model3"] = model3
new_split = models.Endpoint._allocate_traffic(old_split, percent)
new_split_sum = 0
for model in new_split:
new_split_sum += new_split[model]
assert new_split_sum == 100
assert new_split["0"] == percent
@pytest.mark.parametrize(
"model1, model2, model3, deployed_model",
[
(100, None, None, "model1"),
(50, 50, None, "model1"),
(40, 60, None, "model2"),
(40, 60, None, "model1"),
(88, 12, None, "model1"),
(11, 89, None, "model1"),
(1, 99, None, "model2"),
(1, 2, 97, "model1"),
(99, 1, 0, "model2"),
(0, 0, 100, "model3"),
(7, 87, 6, "model2"),
],
)
def test_unallocate_traffic(self, model1, model2, model3, deployed_model):
old_split = {}
if model1 is not None:
old_split["model1"] = model1
if model2 is not None:
old_split["model2"] = model2
if model3 is not None:
old_split["model3"] = model3
new_split = models.Endpoint._unallocate_traffic(old_split, deployed_model)
new_split_sum = 0
for model in new_split:
new_split_sum += new_split[model]
assert new_split_sum == 100 or new_split_sum == 0
assert new_split[deployed_model] == 0
@pytest.mark.parametrize("sync", [True, False])
def test_undeploy(self, undeploy_model_mock, sync):
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME,
traffic_split={"model1": 100},
)
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
assert dict(test_endpoint._gca_resource.traffic_split) == {"model1": 100}
test_endpoint.undeploy("model1", sync=sync)
if not sync:
test_endpoint.wait()
undeploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model_id="model1",
traffic_split={},
# traffic_split={"model1": 0},
metadata=(),
)
@pytest.mark.parametrize("sync", [True, False])
def test_undeploy_with_traffic_split(self, undeploy_model_mock, sync):
with mock.patch.object(
endpoint_service_client.EndpointServiceClient, "get_endpoint"
) as get_endpoint_mock:
get_endpoint_mock.return_value = gca_endpoint.Endpoint(
display_name=_TEST_DISPLAY_NAME,
name=_TEST_ENDPOINT_NAME,
traffic_split={"model1": 40, "model2": 60},
)
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_endpoint.undeploy(
deployed_model_id="model1",
traffic_split={"model1": 0, "model2": 100},
sync=sync,
)
if not sync:
test_endpoint.wait()
undeploy_model_mock.assert_called_once_with(
endpoint=test_endpoint.resource_name,
deployed_model_id="model1",
traffic_split={"model2": 100},
metadata=(),
)
@pytest.mark.usefixtures("get_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_undeploy_raise_error_traffic_split_total(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_endpoint.undeploy(
deployed_model_id="model1", traffic_split={"model2": 99}, sync=sync
)
@pytest.mark.usefixtures("get_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_undeploy_raise_error_undeployed_model_traffic(self, sync):
with pytest.raises(ValueError):
test_endpoint = models.Endpoint(_TEST_ENDPOINT_NAME)
test_endpoint.undeploy(
deployed_model_id="model1",
traffic_split={"model1": 50, "model2": 50},
sync=sync,
)
def test_predict(self, get_endpoint_mock, predict_client_predict_mock):
test_endpoint = models.Endpoint(_TEST_ID)
test_prediction = test_endpoint.predict(
instances=_TEST_INSTANCES, parameters={"param": 3.0}
)
true_prediction = models.Prediction(
predictions=_TEST_PREDICTION, deployed_model_id=_TEST_ID
)
assert true_prediction == test_prediction
predict_client_predict_mock.assert_called_once_with(
endpoint=_TEST_ENDPOINT_NAME,
instances=_TEST_INSTANCES,
parameters={"param": 3.0},
)
def test_explain(self, get_endpoint_mock, predict_client_explain_mock):
test_endpoint = models.Endpoint(_TEST_ID)
test_prediction = test_endpoint.explain(
instances=_TEST_INSTANCES,
parameters={"param": 3.0},
deployed_model_id=_TEST_MODEL_ID,
)
expected_explanations = _TEST_EXPLANATIONS
expected_explanations[0].attributions.extend(_TEST_ATTRIBUTIONS)
expected_prediction = models.Prediction(
predictions=_TEST_PREDICTION,
deployed_model_id=_TEST_ID,
explanations=expected_explanations,
)
assert expected_prediction == test_prediction
predict_client_explain_mock.assert_called_once_with(
endpoint=_TEST_ENDPOINT_NAME,
instances=_TEST_INSTANCES,
parameters={"param": 3.0},
deployed_model_id=_TEST_MODEL_ID,
)
def test_list_models(self, get_endpoint_with_models_mock):
ept = aiplatform.Endpoint(_TEST_ID)
my_models = ept.list_models()
assert my_models == _TEST_DEPLOYED_MODELS
@pytest.mark.usefixtures("get_endpoint_with_models_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_undeploy_all(self, sdk_private_undeploy_mock, sync):
ept = aiplatform.Endpoint(_TEST_ID)
ept.undeploy_all(sync=sync)
if not sync:
ept.wait()
# undeploy_all() results in an undeploy() call for each deployed_model
sdk_private_undeploy_mock.assert_has_calls(
[
mock.call(deployed_model_id=deployed_model.id, sync=sync)
for deployed_model in _TEST_DEPLOYED_MODELS
],
any_order=True,
)
@pytest.mark.usefixtures("list_endpoints_mock")
def test_list_endpoint_has_prediction_client(self):
"""Test call to Endpoint.list() and ensure Endpoints have prediction client set"""
ep_list = aiplatform.Endpoint.list(order_by=_TEST_LIST_ORDER_BY_CREATE_TIME)
assert ep_list # Ensure list is not empty
# Confirm every Endpoint object in the list has a prediction client
assert all(
[
isinstance(
e._prediction_client, aiplatform.utils.PredictionClientWithOverride
)
for e in ep_list
]
)
def test_list_endpoint_order_by_time(self, list_endpoints_mock):
"""Test call to Endpoint.list() and ensure list is returned in descending order of create_time"""
ep_list = aiplatform.Endpoint.list(
filter=_TEST_LIST_FILTER, order_by=_TEST_LIST_ORDER_BY_CREATE_TIME
)
# `order_by` is not passed to API since it is not an accepted field
list_endpoints_mock.assert_called_once_with(
request={"parent": _TEST_PARENT, "filter": _TEST_LIST_FILTER}
)
assert len(ep_list) == len(_TEST_ENDPOINT_LIST)
for ep in ep_list:
assert type(ep) == aiplatform.Endpoint
assert ep_list[0].create_time > ep_list[1].create_time > ep_list[2].create_time
def test_list_endpoint_order_by_display_name(self, list_endpoints_mock):
"""Test call to Endpoint.list() and ensure list is returned in order of display_name"""
ep_list = aiplatform.Endpoint.list(
filter=_TEST_LIST_FILTER, order_by=_TEST_LIST_ORDER_BY_DISPLAY_NAME
)
# `order_by` is not passed to API since it is not an accepted field
list_endpoints_mock.assert_called_once_with(
request={"parent": _TEST_PARENT, "filter": _TEST_LIST_FILTER}
)
assert len(ep_list) == len(_TEST_ENDPOINT_LIST)
for ep in ep_list:
assert type(ep) == aiplatform.Endpoint
assert (
ep_list[0].display_name < ep_list[1].display_name < ep_list[2].display_name
)
@pytest.mark.usefixtures("get_endpoint_with_models_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_delete_endpoint_without_force(
self, sdk_undeploy_all_mock, delete_endpoint_mock, sync
):
ept = aiplatform.Endpoint(_TEST_ID)
ept.delete(sync=sync)
if not sync:
ept.wait()
# undeploy_all() should not be called unless force is set to True
sdk_undeploy_all_mock.assert_not_called()
delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
@pytest.mark.usefixtures("get_endpoint_with_models_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_delete_endpoint_with_force(
self, sdk_undeploy_all_mock, delete_endpoint_mock, sync
):
ept = aiplatform.Endpoint(_TEST_ID)
ept.delete(force=True, sync=sync)
if not sync:
ept.wait()
# undeploy_all() should be called if force is set to True
sdk_undeploy_all_mock.assert_called_once()
delete_endpoint_mock.assert_called_once_with(name=_TEST_ENDPOINT_NAME)
|
|
# -*- coding: utf-8 -*-
'''
Support for MacPorts under Mac OSX.
This module has some caveats.
1. Updating the database of available ports is quite resource-intensive.
However, `refresh=True` is the default for all operations that need an
up-to-date copy of available ports. Consider `refresh=False` when you are
sure no db update is needed.
2. In some cases MacPorts doesn't always realize when another copy of itself
is running and will gleefully tromp all over the available ports database.
This makes MacPorts behave in undefined ways until a fresh complete
copy is retrieved.
Because of 1 and 2 it is possible to get the salt-minion into a state where
`salt mac-machine pkg./something/` won't want to return. Use
`salt-run jobs.active`
on the master to check for potentially long-running calls to `port`.
Finally, ports database updates are always handled with `port selfupdate`
as opposed to `port sync`. This makes sense in the MacPorts user commmunity
but may confuse experienced Linux admins as Linux package managers
don't upgrade the packaging software when doing a package database update.
In other words `salt mac-machine pkg.refresh_db` is more like
`apt-get update; apt-get upgrade dpkg apt-get` than simply `apt-get update`.
'''
# Import python libs
from __future__ import absolute_import
import copy
import logging
import re
# Import salt libs
import salt.utils
import salt.utils.mac_utils
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
LIST_ACTIVE_ONLY = True
__virtualname__ = 'pkg'
def __virtual__():
'''
Confine this module to Mac OS with MacPorts.
'''
if not salt.utils.is_darwin():
return False, 'mac_ports only available on MacOS'
if not salt.utils.which('port'):
return False, 'mac_ports requires the "port" binary'
return __virtualname__
def _list(query=''):
cmd = 'port list {0}'.format(query)
out = salt.utils.mac_utils.execute_return_result(cmd)
ret = {}
for line in out.splitlines():
try:
name, version_num, category = re.split(r'\s+', line.lstrip())[0:3]
version_num = version_num[1:]
except ValueError:
continue
ret[name] = version_num
return ret
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.is_true(versions_as_list)
# 'removed', 'purge_desired' not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
cmd = ['port', 'installed']
out = salt.utils.mac_utils.execute_return_result(cmd)
for line in out.splitlines():
try:
name, version_num, active = re.split(r'\s+', line.lstrip())[0:3]
version_num = version_num[1:]
except ValueError:
continue
if not LIST_ACTIVE_ONLY or active == '(active)':
__salt__['pkg_resource.add_pkg'](ret, name, version_num)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3>
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation
Options:
refresh
Update ports with ``port selfupdate``
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package1> <package2> <package3>
'''
if salt.utils.is_true(kwargs.get('refresh', True)):
refresh_db()
available = _list(' '.join(names)) or {}
installed = __salt__['pkg.list_pkgs']() or {}
ret = {}
for key, val in six.iteritems(available):
if key not in installed or salt.utils.compare_versions(ver1=installed[key], oper='<', ver2=val):
ret[key] = val
else:
ret[key] = '{0} (installed)'.format(version(key))
return ret
# available_version is being deprecated
available_version = salt.utils.alias_function(latest_version, 'available_version')
def remove(name=None, pkgs=None, **kwargs):
'''
Removes packages with ``port uninstall``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
pkg_params = __salt__['pkg_resource.parse_targets'](name,
pkgs,
**kwargs)[0]
old = list_pkgs()
targets = [x for x in pkg_params if x in old]
if not targets:
return {}
cmd = ['port', 'uninstall']
cmd.extend(targets)
err_message = ''
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
err_message = exc.strerror
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if err_message:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': err_message, 'changes': ret})
return ret
def install(name=None, refresh=False, pkgs=None, **kwargs):
'''
Install the passed package(s) with ``port install``
name
The name of the formula to be installed. Note that this parameter is
ignored if "pkgs" is passed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
version
Specify a version to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5'
variant
Specify a variant to pkg to install. Ignored if pkgs is specified.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
salt '*' pkg.install git-core version='1.8.5.5' variant='+credential_osxkeychain+doc+pcre'
Multiple Package Installation Options:
pkgs
A list of formulas to install. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo","bar"]'
salt '*' pkg.install pkgs='["[email protected]","bar"]'
salt '*' pkg.install pkgs='["[email protected]+ssl","[email protected]"]'
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install 'package package package'
'''
pkg_params, pkg_type = \
__salt__['pkg_resource.parse_targets'](name,
pkgs,
{})
if salt.utils.is_true(refresh):
refresh_db()
# Handle version kwarg for a single package target
if pkgs is None:
version_num = kwargs.get('version')
variant_spec = kwargs.get('variant')
spec = None
if version_num:
spec = (spec or '') + '@' + version_num
if variant_spec:
spec = (spec or '') + variant_spec
pkg_params = {name: spec}
if pkg_params is None or len(pkg_params) == 0:
return {}
formulas_array = []
for pname, pparams in six.iteritems(pkg_params):
formulas_array.append(pname + (pparams or ''))
old = list_pkgs()
cmd = ['port', 'install']
cmd.extend(formulas_array)
err_message = ''
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
err_message = exc.strerror
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.compare_dicts(old, new)
if err_message:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': err_message, 'changes': ret})
return ret
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
'''
Check whether or not an upgrade is available for all packages
Options:
refresh
Update ports with ``port selfupdate``
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
if refresh:
refresh_db()
return _list('outdated')
def upgrade_available(pkg, refresh=True):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return pkg in list_upgrades(refresh=refresh)
def refresh_db():
'''
Update ports with ``port selfupdate``
'''
cmd = ['port', 'selfupdate']
return salt.utils.mac_utils.execute_return_success(cmd)
def upgrade(refresh=True): # pylint: disable=W0613
'''
Run a full upgrade using MacPorts 'port upgrade outdated'
Options:
refresh
Update ports with ``port selfupdate``
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
ret = {'changes': {},
'result': True,
'comment': '',
}
if refresh:
refresh_db()
old = list_pkgs()
cmd = ['port', 'upgrade', 'outdated']
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
if 'Nothing to upgrade' in exc.strerror:
ret['comment'] = 'Nothing to upgrade'
else:
ret['result'] = False
ret['comment'] = exc.strerror
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret['changes'] = salt.utils.compare_dicts(old, new)
return ret
|
|
#
# Logger class
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import sys
import numpy as np
import collections
# Value types
_COUNTER = 0
_FLOAT = 1
_INT = 2
_TIME = 3
_TEXT = 4
class Logger(object):
"""
Logs numbers to screen and/or a file.
Example
-------
::
log = pints.Logger()
log.add_counter('id', width=2)
log.add_float('Length')
log.log(1, 1.23456)
log.log(2, 7.8901)
"""
def __init__(self):
super(Logger, self).__init__()
# Log to screen
self._stream = sys.stdout
# Log to file (disabled)
self._filename = None
# Log to file in csv mode
self._csv_mode = False
# Started writing?
self._have_logged = False
# Logging fields
# List of field names
self._field_names = []
# List of field formatting options, specified as a tuple:
# (width, type, format1, format2)
# Where format2 is a format to be used if format1 is too wide.
# For the format specification, see:
# https://docs.python.org/3/library/string.html#formatspec
self._field_formats = []
# List of field indices to write to stream
self._stream_fields = []
# Buffer of data to log
self._buffer = collections.deque()
def add_counter(self, name, width=5, max_value=None, file_only=False):
"""
Adds a field for positive integers.
Returns this :class:`Logger` object.
Parameters
----------
name : str
This field's name. Will be displayed in the header.
width : int
A hint for the width of this column. If numbers exceed this width
layout will break, but no information will be lost.
max_value : int|None
A hint for the maximum number this field will need to display.
file_only : boolean
If set to ``True``, this field will not be shown on screen.
"""
if self._have_logged:
raise RuntimeError('Cannot add fields after logging has started.')
# Check name & width
name = str(name)
width = int(width)
# Determine field width
width = max(width, len(name), 1)
if max_value is not None:
max_value = float(max_value)
width = max(width, int(np.ceil(np.log10(max_value))))
# Create format
f1 = f2 = '{:<' + str(width) + 'd}'
# Add field
self._field_names.append(name)
self._field_formats.append((width, _COUNTER, f1, f2))
if not file_only:
self._stream_fields.append(len(self._field_names) - 1)
# Return self to allow for chaining
return self
def add_float(self, name, width=9, file_only=False):
"""
Adds a field for floating point number.
Returns this :class:`Logger` object.
Parameters
----------
name : str
This field's name. Will be displayed in the header.
width : int
A hint for the field's width. The minimum width is 7.
file_only : boolean
If set to ``True``, this field will not be shown on screen.
"""
if self._have_logged:
raise RuntimeError('Cannot add fields after logging has started.')
# Example: 5 digits => width 11
# -1.234e-299
# 12345678901
# 12345
# Example: 5 digits => 7
# -1.2345
# Example: 1 digit => 7
# -1e-299
# Check name & width
name = str(name)
width = int(width)
# Determine field width
width = max(width, len(name), 7)
# Create format
# 'g' is for general floating point number, formatting depends on
# magnitude
f1 = '{: .' + str(width - 2) + 'g}'
f2 = '{: .' + str(width - 6) + 'g}'
# Add field
self._field_names.append(name)
self._field_formats.append((width, _FLOAT, f1, f2))
if not file_only:
self._stream_fields.append(len(self._field_names) - 1)
# Return self to allow for chaining
return self
def add_int(self, name, width=5, file_only=False):
"""
Adds a field for a (positive or negative) integer.
Returns this :class:`Logger` object.
Parameters
----------
name : str
This field's name. Will be displayed in the header.
width : int
A hint for the width of this column. If numbers exceed this width
layout will break, but no information will be lost.
file_only : boolean
If set to ``True``, this field will not be shown on screen.
"""
if self._have_logged:
raise RuntimeError('Cannot add fields after logging has started.')
# Check name & width
name = str(name)
width = int(width)
# Determine field width
width = int(max(width, len(name), 1))
# Create format
f1 = f2 = '{:< ' + str(width) + 'd}'
# Add field
self._field_names.append(name)
self._field_formats.append((width, _INT, f1, f2))
if not file_only:
self._stream_fields.append(len(self._field_names) - 1)
# Return self to allow for chaining
return self
def add_long_float(self, name, file_only=False):
"""
Adds a field for a maximum precision floating point number.
Returns this :class:`Logger` object.
Parameters
----------
name : str
This field's name. Will be displayed in the header.
file_only : boolean
If set to ``True``, this field will not be shown on screen.
"""
if self._have_logged:
raise RuntimeError('Cannot add fields after logging has started.')
# Example: 17 digits = width 25
# -1.23456699999999992e-299
# 1234567890123456789012345
# 1 23456789012345678
# Example: 17 digits = width 24
# -1.23456699999999997e+00
# 123456789012345678901234
# 1 23456789012345678
# Check name
name = str(name)
# Determine field width
width = max(len(name), 24)
# Create format
f1 = '{: .17e}'
f2 = '{: .16e}'
# Add field
self._field_names.append(name)
self._field_formats.append((width, _FLOAT, f1, f2))
if not file_only:
self._stream_fields.append(len(self._field_names) - 1)
# Return self to allow for chaining
return self
def add_string(self, name, width, file_only=False):
"""
Adds a field showing (at most ``width`` characters of) string values.
Returns this :class:`Logger` object.
Parameters
----------
name : str
This field's name. Will be displayed in the header.
width : int
The maximum width for strings to display.
file_only : boolean
If set to ``True``, this field will not be shown on screen.
"""
if self._have_logged:
raise RuntimeError('Cannot add fields after logging has started.')
# Check name, width
name = str(name)
width = int(width)
# Determine field width
width = max(len(name), width)
# Add field
f1 = f2 = None
self._field_names.append(name)
self._field_formats.append((width, _TEXT, f1, f2))
if not file_only:
self._stream_fields.append(len(self._field_names) - 1)
# Return self to allow for chaining
return self
def add_time(self, name, file_only=False):
"""
Adds a field showing a formatted time (given in seconds).
Returns this :class:`Logger` object.
Parameters
----------
name : str
This field's name. Will be displayed in the header.
file_only : boolean
If set to ``True``, this field will not be shown on screen.
"""
if self._have_logged:
raise RuntimeError('Cannot add fields after logging has started.')
# Check name
name = str(name)
# Determine field width
width = max(len(name), 8)
# Add field
f1 = f2 = None
self._field_names.append(name)
self._field_formats.append((width, _TIME, f1, f2))
if not file_only:
self._stream_fields.append(len(self._field_names) - 1)
# Return self to allow for chaining
return self
def log(self, *data):
"""
Logs a new row of data.
"""
# Ignore data if no logging specified
if self._stream is None and self._filename is None:
return
# Check number of fields
nfields = len(self._field_names)
if nfields < 1:
raise ValueError('Unable to log: No fields specified.')
# Exactly one row given? Then log, else store in buffer
rows = []
if len(self._buffer) == 0 and len(data) == nfields:
rows.append(data)
else:
self._buffer.extend(data)
while len(self._buffer) >= nfields:
rows.append([self._buffer.popleft() for i in range(nfields)])
# Nothing to print? Then return
if not rows:
return
# Log in CSV format
if self._csv_mode and self._filename is not None:
mode = 'a' if self._have_logged else 'w'
with open(self._filename, mode) as f:
# Write names
if not self._have_logged:
f.write(','.join(
['"' + x + '"' for x in self._field_names]) + '\n')
# Write data
for row in rows:
line = []
column = iter(row)
for width, dtype, f1, f2 in self._field_formats:
v = next(column)
if v is None:
x = ''
elif dtype == _FLOAT:
x = '{:.17e}'.format(v)
elif dtype == _TIME:
x = str(v)
elif dtype == _TEXT:
x = '"' + str(v) + '"'
else:
x = str(int(v))
line.append(x)
f.write(','.join(line) + '\n')
# No need to log to screen? Then skip line formatting and return
if not self._stream:
self._have_logged = True
return
# Format fields
formatted_rows = []
# Add headers
if not self._have_logged:
headers = []
for i, name in enumerate(self._field_names):
width = self._field_formats[i][0]
headers.append(name + ' ' * (width - len(name)))
formatted_rows.append(headers)
# Add data
for row in rows:
column = iter(row)
formatted_row = []
for width, dtype, f1, f2 in self._field_formats:
v = next(column)
if v is None:
x = ' ' * width
elif dtype == _FLOAT:
x = f1.format(v)
if len(x) > width:
x = f2.format(v)
x += ' ' * (width - len(x))
elif dtype == _TIME:
x = self._format_time(v)
elif dtype == _TEXT:
x = str(v)[:width]
x += ' ' * (width - len(x))
else:
x = f1.format(int(v))
formatted_row.append(x)
formatted_rows.append(formatted_row)
# Log to screen
if self._stream is not None:
lines = []
for row in formatted_rows:
lines.append(' '.join([row[i] for i in self._stream_fields]))
self._stream.write('\n'.join(lines) + '\n')
# Log to file (non csv)
if self._filename is not None and not self._csv_mode:
lines = []
for row in formatted_rows:
lines.append(' '.join([x for x in row]))
with open(self._filename, 'a' if self._have_logged else 'w') as f:
f.write('\n'.join(lines) + '\n')
# Have logged!
self._have_logged = True
def set_filename(self, filename=None, csv=False):
"""
Enables logging to a file if a ``filename`` is passed in. Logging to
file can be disabled by passing ``filename=None``.
Usually, file logging happens in the same format as logging to screen.
To obtain csv logs instead, set `csv=True`
"""
if self._have_logged:
raise RuntimeError('Cannot configure after logging has started.')
if filename is None:
self._filename = None
else:
self._filename = str(filename)
self._csv_mode = True if csv else False
def set_stream(self, stream=sys.stdout):
"""
Enables logging to screen if an output ``stream`` is passed in. Logging
to screen can be disabled by passing ``stream=None``.
"""
if self._have_logged:
raise RuntimeError('Cannot configure after logging has started.')
self._stream = stream
def _format_time(self, seconds):
"""
Formats a time in seconds to the format "mmm:ss.s", i.e. a three-digit
minutes figure and a three-digit seconds figure.
"""
# Split off minutes
minutes = int(seconds // 60)
seconds -= 60 * minutes
# Round seconds abve 59.95 so we never show 60.0 seconds
if seconds >= 59.95:
minutes += 1
seconds = 0
# Format and return
return '{:>3d}:{:0>4.1f}'.format(minutes, seconds)
class Loggable(object):
"""
Interface for classes that can log to a :class:`Logger`.
"""
def _log_init(self, logger):
"""
Adds this :class:`Loggable's<Loggable>` fields to a :class:`Logger`.
"""
pass
def _log_write(self, logger):
"""
Logs data for each of the fields specified in :meth:`_log_init()`.
"""
pass
|
|
import cProfile
import datetime
import enum
import json
import logging
import os
import random
import re
import time
import itertools
import csv
class SetType(enum.Enum):
""" Types of set, i.e. training set
"""
ALL = "all"
QUERY = "query"
TEST = "test"
TRAIN = "train"
def get_session(gpu_fraction=0.3):
import tensorflow as tf # Shadow import for testing
"""
Helper function to ensure that Keras only uses some fraction of the memory
Args:
gpu_fraction: Fraction of the GPU memory to use
Returns:
A tensorflow session to be passed into tensorflow_backend.set_session
"""
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def setup_custom_logger(name):
""" Setup a custom logger that will output to the console
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# create a console handler
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# create a file handler
file_handler = logging.FileHandler("./log_{}".format(name))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def get_index_of_tuple(list_of_tuple, index_of_tuple, value):
""" Determine how far through the list to find the value.
If the value does not exist in the list, then return the
length of the list.
Args:
list_of_tuple: a list of tuples i.e. [(index1, index2, index3)]
index_of_tuple_1: which index in the tuple you want to compare the value to
value: the value to search
Return:
the number of items in the list it has compared
"""
for index_of_list, tupl in enumerate(list_of_tuple):
if tupl[index_of_tuple] == value:
return index_of_list + 1
# could not find value in list_of_tuple, so return length of tuple
return len(list_of_tuple)
def get_index_of_pairs(list_of_tuple, index_of_tuple_1, index_of_tuple_2, value):
""" Determine how far through the list to find the value.
If the value does not exist in the list, then return the
length of the list.
Args:
list_of_tuple: a list of tuples i.e. [(index1, index2, index3)]
index_of_tuple_1: which index in the tuple you want to compare the value to
index_of_tuple_2: which index in the tuple you want to compare the value to
value: the value to search
Return:
the number of items in the list it has compared
"""
for index_of_list, tupl in enumerate(list_of_tuple):
if tupl[index_of_tuple_1] == value and tupl[index_of_tuple_2] == value:
return index_of_list + 1
# could not find value in list_of_tuple, so return length of tuple
return len(list_of_tuple)
def get_basename(string):
""" Extract the basename from the filepath.
Args:
filepath in the format of a string
Args:
filename in the format of a string
"""
return os.path.basename(os.path.normpath(string))
def get_numeric(string):
""" Extract the numeric value in a string.
Args:
string
Returns:
a string with only the numeric value extracted
"""
return re.sub('[^0-9]', '', string)
def get_timestamp(timestamp):
""" Convert datetime object into a string in the format of
Year-Month-Date Hour:Minute:Second
Args:
datetime
Returns:
string in the format of Year-Month-Date Hour:Minute:Second
"""
return timestamp.strftime("%Y-%m-%d %H:%M:%S") if isinstance(type(timestamp), datetime.datetime) else timestamp
def should_drop(drop_percentage):
""" Based on the given percentage, provide an answer
whether or not to drop the image.
Args:
drop_percentage: the likelihood of a drop in the form of a float from [0,1]
Returns:
a boolean whether to drop or not drop the image
"""
return random.random() < drop_percentage
def read_json(filepath):
""" Assuming the json file contains a dictionary per line,
read the json file and create a generator that yields each
dictionary per line.
Args:
filepath: path to the json file
Returns:
a generator that yields dictionary line by line
"""
with open(filepath) as file:
for line in file:
yield json.loads(line)
def remove_file(filename):
""" Assuming the filename exists where the application is run,
remove the file.
Args:
filename
Returns:
filename is removed
"""
try:
os.remove(filename)
except OSError:
pass
def timewrapper(func):
""" This is a decorator to calculate how fast each operation takes.
Args:
func: function pointer
args: arguments to the function
kwargs: named arguments not defined in advance to be passed in to the function
"""
def timer(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
elapsed = time.time() - start
print("{} took {} seconds".format(func.__name__, elapsed))
return result
return timer
def profilewrapper(func):
""" This is a decorator to profile a function.
Args:
func: function pointer
args: arguments to the function
kwargs: named arguments not defined in advance to be passed in to the function
"""
def profiler(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiler
def get_split(key, pivots):
if not isinstance(pivots, list):
pivots = list(pivots)
pivots.sort()
hash_val = hash(key)%100
for split, pivot in enumerate(pivots):
if hash_val < pivot:
return split
return len(pivots)
def train_test_key_filter(key, split="train"):
hash_val = get_split(key, [90])
split = split.lower()
if split == "train":
desired_val = 0
elif split == "test":
desired_val = 1
else:
raise ValueError('Unknown Split Type: %s'%split)
if hash_val == desired_val:
return True
else:
return False
def prep_for_siamese(*csv_files, json_file='./out.json', full_combos=False):
"""
Prepares a json file containing pairwise feature vectors for input to the siamese docker container.
:param csv_files: List of CSV files containing i2v-produced feature vectors
:param json_file: Optional output json file path
:param full_combos: Boolean indicating whether full combinations of observation set records should be used.
"""
# Generator for csv rows from a single csv file.
def iter_rows(csv_file):
with open(csv_file, newline='') as csv_hdl:
for row in csv.reader(csv_hdl):
yield row
# Generator for flattened access to rows from multiple csv files.
def iter_many(row_gens):
for gen in row_gens:
for row in gen:
yield row
if len(csv_files) == 1:
if not full_combos:
raise NotImplemented("Full combinations must be applied if only one csv is supplied.")
combos = itertools.combinations(iter_rows(csv_files[0]), 2)
else:
if full_combos:
combos = itertools.combinations(iter_many(map(iter_rows, csv_files)), 2)
elif len(csv_files) == 2:
combos = itertools.product(*map(iter_rows, csv_files))
else:
raise NotImplemented("Full combinations must be applied if more than two csvs are supplied")
with open(json_file, 'w') as json_hdl:
for left, right in combos:
try:
dct = {
'left': list([float(v) for v in left[1:]]),
'right': list([float(v) for v in right[1:]]),
'left_img': left[0],
'right_img': right[0]
}
json_hdl.write(json.dumps(dct) + '\n')
except IOError:
raise IOError("Error occurred writing vectors to json")
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utility functions for use with Swift."""
import hashlib
import hmac
import json
import logging
import six
import time
import traceback
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
EMPTY_ETAG = 'd41d8cd98f00b204e9800998ecf8427e'
def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
This function comes from swift.common.utils.config_true_value()
"""
return value is True or \
(isinstance(value, six.string_types) and value.lower() in TRUE_VALUES)
def prt_bytes(bytes, human_flag):
"""
convert a number > 1024 to printable format, either in 4 char -h format as
with ls -lh or return as 12 char right justified string
"""
if human_flag:
suffix = ''
mods = list('KMGTPEZY')
temp = float(bytes)
if temp > 0:
while temp > 1023:
try:
suffix = mods.pop(0)
except IndexError:
break
temp /= 1024.0
if suffix != '':
if temp >= 10:
bytes = '%3d%s' % (temp, suffix)
else:
bytes = '%.1f%s' % (temp, suffix)
if suffix == '': # must be < 1024
bytes = '%4s' % bytes
else:
bytes = '%12s' % bytes
return bytes
def generate_temp_url(path, seconds, key, method, absolute=False):
"""Generates a temporary URL that gives unauthenticated access to the
Swift object.
:param path: The full path to the Swift object. Example:
/v1/AUTH_account/c/o.
:param seconds: The amount of time in seconds the temporary URL will
be valid for.
:param key: The secret temporary URL key set on the Swift cluster.
To set a key, run 'swift post -m
"Temp-URL-Key:b3968d0207b54ece87cccc06515a89d4"'
:param method: A HTTP method, typically either GET or PUT, to allow for
this temporary URL.
:raises: ValueError if seconds is not a positive integer
:raises: TypeError if seconds is not an integer
:return: the path portion of a temporary URL
"""
if seconds < 0:
raise ValueError('seconds must be a positive integer')
try:
if not absolute:
expiration = int(time.time() + seconds)
else:
expiration = int(seconds)
except TypeError:
raise TypeError('seconds must be an integer')
standard_methods = ['GET', 'PUT', 'HEAD', 'POST', 'DELETE']
if method.upper() not in standard_methods:
logger = logging.getLogger("swiftclient")
logger.warning('Non default HTTP method %s for tempurl specified, '
'possibly an error', method.upper())
hmac_body = '\n'.join([method.upper(), str(expiration), path])
# Encode to UTF-8 for py3 compatibility
sig = hmac.new(key.encode(),
hmac_body.encode(),
hashlib.sha1).hexdigest()
return ('{path}?temp_url_sig='
'{sig}&temp_url_expires={exp}'.format(
path=path,
sig=sig,
exp=expiration))
def parse_api_response(headers, body):
charset = 'utf-8'
# Swift *should* be speaking UTF-8, but check content-type just in case
content_type = headers.get('content-type', '')
if '; charset=' in content_type:
charset = content_type.split('; charset=', 1)[1].split(';', 1)[0]
return json.loads(body.decode(charset))
def report_traceback():
"""
Reports a timestamp and full traceback for a given exception.
:return: Full traceback and timestamp.
"""
try:
formatted_lines = traceback.format_exc()
now = time.time()
return formatted_lines, now
except AttributeError:
return None, None
class NoopMD5(object):
def __init__(self, *a, **kw):
pass
def update(self, *a, **kw):
pass
def hexdigest(self, *a, **kw):
return ''
class ReadableToIterable(object):
"""
Wrap a filelike object and act as an iterator.
It is recommended to use this class only on files opened in binary mode.
Due to the Unicode changes in python 3 files are now opened using an
encoding not suitable for use with the md5 class and because of this
hit the exception on every call to next. This could cause problems,
especially with large files and small chunk sizes.
"""
def __init__(self, content, chunk_size=65536, md5=False):
"""
:param content: The filelike object that is yielded from.
:param chunk_size: The max size of each yielded item.
:param md5: Flag to enable calculating the MD5 of the content
as it is yielded.
"""
self.md5sum = hashlib.md5() if md5 else NoopMD5()
self.content = content
self.chunk_size = chunk_size
def get_md5sum(self):
return self.md5sum.hexdigest()
def __next__(self):
"""
Both ``__next__`` and ``next`` are provided to allow compatibility
with python 2 and python 3 and their use of ``iterable.next()``
and ``next(iterable)`` respectively.
"""
chunk = self.content.read(self.chunk_size)
if not chunk:
raise StopIteration
try:
self.md5sum.update(chunk)
except TypeError:
self.md5sum.update(chunk.encode())
return chunk
def next(self):
return self.__next__()
def __iter__(self):
return self
class LengthWrapper(object):
"""
Wrap a filelike object with a maximum length.
Fix for https://github.com/kennethreitz/requests/issues/1648
It is recommended to use this class only on files opened in binary mode.
"""
def __init__(self, readable, length, md5=False):
"""
:param readable: The filelike object to read from.
:param length: The maximum amount of content to that can be read from
the filelike object before it is simulated to be
empty.
:param md5: Flag to enable calculating the MD5 of the content
as it is read.
"""
self.md5sum = hashlib.md5() if md5 else NoopMD5()
self._length = self._remaining = length
self._readable = readable
def __len__(self):
return self._length
def get_md5sum(self):
return self.md5sum.hexdigest()
def read(self, *args, **kwargs):
if self._remaining <= 0:
return ''
chunk = self._readable.read(*args, **kwargs)[:self._remaining]
self._remaining -= len(chunk)
try:
self.md5sum.update(chunk)
except TypeError:
self.md5sum.update(chunk.encode())
return chunk
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from threading import Thread
from Queue import Queue
from bagpipe.bgp.engine import RouteEvent, Subscription, Unsubscription
from bagpipe.bgp.engine.worker import Worker
from bagpipe.bgp.engine.bgp_peer_worker import BGPPeerWorker
from bagpipe.bgp.common.looking_glass import LookingGlass, LGMap
from bagpipe.bgp.common import logDecorator
from bagpipe.exabgp.structure.address import AFI, SAFI
from bagpipe.exabgp.message.update.attribute.communities import RouteTarget
log = logging.getLogger(__name__)
class WorkerCleanupEvent(object):
def __init__(self, worker):
self.worker = worker
def __repr__(self):
return "WorkerCleanupEvent:%s" % (self.worker.name)
class Match(object):
def __init__(self, afi, safi, routeTarget):
assert(isinstance(afi, AFI))
assert(isinstance(safi, SAFI))
assert(routeTarget is None or isinstance(routeTarget, RouteTarget))
self.afi = afi
self.safi = safi
self.routeTarget = routeTarget
# FIXME: use a better hash if needed for performances
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return "match:%s/%s,%s" % (self.afi or "*", self.safi or "*",
self.routeTarget or "*")
def __cmp__(self, other):
assert isinstance(other, Match)
self_afi = self.afi or AFI(0)
self_safi = self.safi or SAFI(0)
self_rt = self.routeTarget or RouteTarget(0, None, 0)
other_afi = other.afi or AFI(0)
other_safi = other.safi or SAFI(0)
other_rt = other.routeTarget or RouteTarget(0, None, 0)
return cmp((self_afi, self_safi, self_rt),
(other_afi, other_safi, other_rt))
StopEvent = "StopEvent"
class RouteTableManager(Thread, LookingGlass):
"""
This singleton class will dispatch events between Workers.
Events relates to BGP routes that are announced or withdrawn by workers.
Workers subscribe to events by indicating AFI, SAFI, Route Targets in which
they are interested.
There can be workers responsible for handling services locally (e.g. a VPN
instance) and workers that are BGP peers.
Though sources of routes are typically Workers, they don't need to be; any
class can source a route.
"""
class WorkersAndEntries(object):
def __init__(self):
self.workers = set()
self.entries = set()
def __repr__(self):
return "workers: %s\nentries: %s" % (self.workers,
self.entries)
def __init__(self):
Thread.__init__(self, name="RouteTableManager")
self.setDaemon(True)
self._match2workersAndEntries = {}
# keys are Matches, values are WorkersAndEntries objects
self._worker2matches = {} # keys are Workers, values are Match objects
self._source_nlri2entry = {}
# keys are (source,nlri) tuples, values are Entry objects
self._source2entries = {}
# dict: keys are event sources, each value is a set() of Entry
# objects
self._queue = Queue()
@logDecorator.logInfo
def stop(self):
self.enqueue(StopEvent)
def run(self):
while True:
log.debug("RouteTableManager waiting on queue")
event = self._queue.get()
log.debug("RouteTableManager received event %s", event)
try:
if event.__class__ == RouteEvent:
self._receiveRouteEvent(event)
elif event.__class__ == Subscription:
self._workerSubscribes(event)
elif event.__class__ == Unsubscription:
self._workerUnsubscribes(event)
elif event.__class__ == WorkerCleanupEvent:
self._workerCleanup(event.worker)
elif event == StopEvent:
log.info("StopEvent => breaking main loop")
break
except Exception as e:
log.error("Exception during processing of event: %s", repr(e))
log.error(" event was: %s", event)
log.error("%s", traceback.format_exc())
log.debug("RouteTableManager queue size: %d", self._queue.qsize())
log.info("Out of main loop")
def enqueue(self, event):
self._queue.put(event)
def _checkMatch2workersAndEntriesCleanup(self, match):
try:
item = self._match2workersAndEntries[match]
except KeyError:
log.warning("why are we here ?")
# nothing to cleanup
return
if len(item.workers) == 0 and len(item.entries) == 0:
del self._match2workersAndEntries[match]
def _match2workersAndEntriesLookupCreate(self, match):
try:
return self._match2workersAndEntries[match]
except KeyError:
wa = RouteTableManager.WorkersAndEntries()
self._match2workersAndEntries[match] = wa
return wa
def _match2entries(self, match, createIfNone=False, emptyListIfNone=True):
if createIfNone:
return self._match2workersAndEntriesLookupCreate(match).entries
try:
return self._match2workersAndEntries[match].entries
except KeyError:
if emptyListIfNone:
return []
else:
raise
def _match2workers(self, match, createIfNone=False, emptyListIfNone=True):
if createIfNone:
return self._match2workersAndEntriesLookupCreate(match).workers
try:
return self._match2workersAndEntries[match].workers
except KeyError:
if emptyListIfNone:
return []
else:
raise
def _source2entriesAddEntry(self, entry):
try:
entries = self._source2entries[entry.source]
except KeyError:
entries = set()
self._source2entries[entry.source] = entries
entries.add(entry)
def _source2entriesRemoveEntry(self, entry):
try:
entries = self._source2entries[entry.source]
log.debug("_source2entries[ entry.worker ] = %s ", entries)
entries.discard(entry)
except KeyError:
log.debug("(attempt at removing a non existing entry from "
"self._source2entries[ entry.source ] : %s)", entry)
pass
def getWorkerSubscriptions(self, worker):
"""
the returned entries should *not* be modified by caller
(TODO: protect worker object by making their variables private)
"""
if worker not in self._worker2matches:
return []
else:
matches = self._worker2matches[worker]
if matches is None:
return []
else:
return sorted(matches)
def getWorkerRouteEntries(self, worker):
"""
the returned entries should *not* be modified by caller
(TODO: protect Entry objects by making their variables private)
"""
if worker not in self._source2entries:
return []
else:
entries = self._source2entries[worker]
if entries is None:
return []
else:
return entries
def _workerSubscribes(self, sub):
# TODO: this function currently will not consider whether or not
# is already subscribed to set of route events, before considering
# a subscription. In particular, multiple identical subscriptions
# will lead to this code resyntethizing events at each call.
#
# Ideally, the code should detect that the worker is already subscribed
# and skip the subscription. *But* such a change should not be done
# until the code in ExaBGPPeerWorker is updated to support this.
assert(isinstance(sub.worker, Worker))
log.info("workerSubscribes: %s", sub)
worker = sub.worker
# self._dumpState()
match = Match(sub.afi, sub.safi, sub.routeTarget)
# update match2worker
self._match2workers(match, createIfNone=True).add(worker)
# update worker2matches
if worker not in self._worker2matches:
self._worker2matches[worker] = set()
# re-synthesize events
for entry in self._match2entries(match):
log.debug("Found a entry for this match: %s", entry)
event = RouteEvent(RouteEvent.ADVERTISE, entry)
(shouldDispatch, reason) = self._shouldDispatch(event, worker)
if shouldDispatch:
# check if the entry carries a routeTarget to which the worker
# was already subscribed
for rt in entry.routeTargets:
if Match(entry.afi,
entry.safi,
rt) in self._worker2matches[worker]:
(shouldDispatch, reason) = (
False,
"worker already had a subscription for this route")
break
if shouldDispatch:
log.info("Dispatching re-synthesized event for %s", entry)
worker.enqueue(event)
else:
log.info("%s => not dispatching re-synthesized event for %s",
reason, entry)
# update worker2matches
self._worker2matches[worker].add(match)
# self._dumpState()
def _workerUnsubscribes(self, sub):
assert(isinstance(sub.worker, Worker))
# self._dumpState()
match = Match(sub.afi, sub.safi, sub.routeTarget)
# update _worker2matches
if sub.worker not in self._worker2matches:
log.warning("worker %s unsubs'd from %s but wasn't tracked yet",
sub.worker, match)
else:
try:
self._worker2matches[sub.worker].remove(match)
except KeyError:
log.warning("worker %s unsubs' from %s but this match was"
"not tracked for this worker (should not happen,"
" this is a bug)", sub.worker, match)
# synthesize withdraw events
for entry in self._match2entries(match, emptyListIfNone=True):
intersect = set(
self._matchesFor(entry.afi, entry.safi, entry.routeTargets)
).intersection(self._worker2matches[sub.worker])
if len(intersect) > 0:
log.debug("Will not synthesize withdraw event for %s, because"
" worker subscribed to %s", entry, intersect)
else:
log.debug("Found a entry for this match: %s", entry)
event = RouteEvent(RouteEvent.WITHDRAW, entry)
(shouldDispatch, reason) = self._shouldDispatch(event,
sub.worker)
if shouldDispatch:
log.info("Dispatching re-synthesized event for %s", entry)
sub.worker.enqueue(event)
else:
log.info(
"%s => not dispatching re-synthesized event for %s",
reason, entry)
# update _match2workersAndEntries
if match not in self._match2workersAndEntries:
log.warning("worker %s unsubscribed from %s but we had no such"
" subscription yet", sub.worker, match)
else:
try:
self._match2workers(match).remove(sub.worker)
except KeyError:
log.warning("worker %s unsubscribed from %s but was not"
" subscribed yet", sub.worker, match)
self._checkMatch2workersAndEntriesCleanup(match)
# self._dumpState()
def _matchesFor(self, afi, safi, routeTargets):
# generate all possible match entries for this afi/safi
# and these routetargets, with all possible wildcards
#
# There are 4*(n+1) possible Match object (for n routeTargets)
for _afi in (Subscription.ANY_AFI, afi):
for _safi in (Subscription.ANY_SAFI, safi):
yield Match(_afi, _safi, None)
if routeTargets is not None:
for rt in routeTargets:
yield Match(_afi, _safi, rt)
def _propagateRouteEvent(self, routeEvent, exceptWorkers=None):
'''Propagate routeEvent to workers subscribed to the route RTs
or wildcards, except the workers in exceptWorkers. Returns the list of
workers to which the event was propagated.'''
log.debug("Propagate event to interested workers: %s", routeEvent)
re = routeEvent.routeEntry
if exceptWorkers is None:
exceptWorkers = []
targetWorkers = set()
for match in self._matchesFor(re.afi, re.safi, re.routeTargets):
log.debug("Finding interested workers for match %s", match)
interestedWorkers = self._match2workers(match,
emptyListIfNone=True)
log.debug(" Workers interested in this match: %s",
interestedWorkers)
for worker in interestedWorkers:
(shouldDispatch, reason) = self._shouldDispatch(routeEvent,
worker)
if shouldDispatch:
if worker not in exceptWorkers:
log.debug("Will dispatch event to %s: %s",
worker, routeEvent)
targetWorkers.add(worker)
else:
log.debug("Decided not to dispatch to %s, based on "
"exceptWorkers: %s", worker, routeEvent)
else:
log.debug("Decided not to dispatch to %s: %s (%s)",
worker, reason, routeEvent)
for worker in targetWorkers:
log.info("Dispatching event to %s: %s", worker, routeEvent)
worker.enqueue(routeEvent)
return targetWorkers
def _receiveRouteEvent(self, routeEvent):
log.info("receive: %s", routeEvent)
entry = routeEvent.routeEntry
log.debug("Try to find a entry from same peer with same nlri")
try:
replacedEntry = self._source_nlri2entry[(entry.source, entry.nlri)]
except KeyError:
replacedEntry = None
log.debug(" Result: %s", replacedEntry)
# replacedEntry should be non-empty for a withdraw
if replacedEntry is None and (routeEvent.type == RouteEvent.WITHDRAW):
log.warning("WITHDRAW but found no route that we could remove: %s",
routeEvent.routeEntry)
return
# Propagate events to interested workers...
if routeEvent.type == RouteEvent.ADVERTISE:
if replacedEntry == routeEvent.routeEntry:
log.warning("The route advertized is the same as the one "
"previously advertized by the source, ignoring")
return
# propagate event to interested worker
# and include the info on the route are replaced by this
# route, if any
routeEvent.setReplacedRoute(replacedEntry)
workersAlreadyNotified = self._propagateRouteEvent(routeEvent)
else: # WITHDRAW
workersAlreadyNotified = None
# Synthesize and dispatch a withdraw event for the route entry that
# was withdrawn or replaced, except, in the case of a replaced route,
# to workers that had the ADVERTISE event
if replacedEntry is not None:
log.debug("Synthesizing a withdraw event for replaced route %s",
replacedEntry)
removalEvent = RouteEvent(RouteEvent.WITHDRAW,
replacedEntry,
routeEvent.source)
self._propagateRouteEvent(removalEvent, workersAlreadyNotified)
# Update match2entries and source2entries for the
# replacedRoute
for match in self._matchesFor(replacedEntry.afi,
replacedEntry.safi,
replacedEntry.routeTargets):
try:
self._match2entries(match).discard(replacedEntry)
except KeyError:
log.error("Trying to remove a route from a match, but"
" match %s not found - not supposed to happen"
" (route: %s)", match, replacedEntry)
self._checkMatch2workersAndEntriesCleanup(match)
self._source2entriesRemoveEntry(replacedEntry)
if routeEvent.type == RouteEvent.ADVERTISE:
# Update match2entries and source2entries for the newly
# advertized route
for match in self._matchesFor(entry.afi,
entry.safi,
entry.routeTargets):
self._match2entries(match, createIfNone=True).add(entry)
self._source2entriesAddEntry(entry)
# Update _source_nlri2entry
self._source_nlri2entry[(entry.source, entry.nlri)] = entry
else: # WITHDRAW
# Update _source_nlri2entry
try:
del self._source_nlri2entry[(entry.source, entry.nlri)]
except KeyError:
log.error("Withdraw, but nothing removed in "
"_sourcenlri2entryRemove")
# self._dumpState()
def _shouldDispatch(self, routeEvent, targetWorker):
'''
returns a (boolean,string) tuple
the string contains the reason why the routeEvent should not be
dispatched to targetWorker
'''
if (routeEvent.source == targetWorker):
return (False, "not dispatching an update back to its source")
elif (isinstance(routeEvent.source, BGPPeerWorker)
and isinstance(targetWorker, BGPPeerWorker)):
return (False, "do not dispatch a route between BGP peers")
else:
return (True, "")
def _workerCleanup(self, worker):
'''
Consider all routes announced by this worker as withdrawn.
Consider this worker unsubscribed from all of its current
subscriptions.
'''
log.info("Cleanup for worker %s", worker.name)
# synthesize withdraw events for all routes from this worker
if worker in self._source2entries:
entries = self._source2entries[worker]
log.info(" Preparing to withdraw %d routes that were advertised "
"by worker", len(entries))
for entry in entries:
log.info(" Enqueue event to Withdraw route %s", entry)
self.enqueue(RouteEvent(RouteEvent.WITHDRAW, entry))
del self._source2entries[worker]
else:
log.info("(we had no trace of %s in _source2entries)", worker)
# remove worker from all of its subscriptions
if worker in self._worker2matches:
for match in self._worker2matches[worker]:
assert(match in self._match2workersAndEntries)
self._match2workers(match).remove(worker)
del self._worker2matches[worker]
# self._dumpState()
def _dumpState(self):
if not log.isEnabledFor(logging.DEBUG):
return
dump = []
dump.append("~~~ Worker -> Matches ~~~")
for worker in self._worker2matches.keys():
dump.append(" %s" % worker)
matches = list(self._worker2matches[worker])
matches.sort()
for match in matches:
dump.append(" %s" % match)
dump.append("\n~~~ Source -> Entries ~~~")
for source in self._source2entries.keys():
dump.append(" %s" % source)
for entry in self._source2entries[source]:
dump.append(" %s" % entry)
match2workerDump = []
match2entriesDump = []
matches = list(self._match2workersAndEntries.keys())
matches.sort()
for match in matches:
match2workerDump.append(" %s" % match)
match2entriesDump.append(" %s" % match)
for worker in self._match2workers(match):
match2workerDump.append(" %s" % worker)
for re in self._match2entries(match):
match2entriesDump.append(" %s" % re)
dump.append("\n~~~ Match -> Workers ~~~\n%s\n" %
"\n".join(match2workerDump))
dump.append("~~~ Match -> Entries ~~~\n%s\n" %
"\n".join(match2entriesDump))
dump.append("~~~ (source,nlri) -> entries ~~~")
for ((source, nlri), entry) in self._source_nlri2entry.iteritems():
dump.append(" (%s, %s): %s" % (source, nlri, entry))
log.debug("RouteTableManager data dump:\n\n%s\n", "\n".join(dump))
# Looking Glass #####
def getLGMap(self):
return {"workers": (LGMap.COLLECTION,
(self.getLGWorkerList, self.getLGWorkerFromPathItem)),
"routes": (LGMap.SUBTREE, self.getLGRoutes)}
def getLGRoutes(self, pathPrefix):
result = {}
match_IPVPN = Match(
AFI(AFI.ipv4), SAFI(SAFI.mpls_vpn), Subscription.ANY_RT)
match_EVPN = Match(
AFI(AFI.l2vpn), SAFI(SAFI.evpn), Subscription.ANY_RT)
match_RTC = Match(AFI(AFI.ipv4), SAFI(SAFI.rtc), Subscription.ANY_RT)
for match in [match_IPVPN, match_EVPN, match_RTC]:
matchResult = []
if match in self._match2workersAndEntries:
for entry in self._match2entries(match):
matchResult.append(
entry.getLookingGlassInfo(pathPrefix))
result[repr(match)] = matchResult
return result
def getLGWorkerList(self):
return [{"id": worker.name} for worker in self._worker2matches.keys()]
def getLGWorkerFromPathItem(self, pathItem):
# TODO(tmmorin): do a hash-lookup instead of looping the list
for worker in self._source2entries.keys():
if worker.name == pathItem:
return worker
def getAllRoutesButRTC(self):
try:
return [re for re in
self._match2workersAndEntries[Match(Subscription.ANY_AFI,
Subscription.ANY_SAFI,
Subscription.ANY_RT)
].entries
if not (re.afi == AFI(AFI.ipv4) and
re.safi == SAFI(SAFI.rtc))
]
except KeyError:
return []
def getLocalRoutesCount(self):
return reduce(
lambda count, entry:
count + (not isinstance(entry.source, BGPPeerWorker)),
self.getAllRoutesButRTC(),
0)
def getReceivedRoutesCount(self):
return reduce(
lambda count, entry:
count + isinstance(entry.source, BGPPeerWorker),
self.getAllRoutesButRTC(),
0)
|
|
from flask import jsonify
import jsonrpclib
import socket
import struct
import urllib
from Maraschino import app
from maraschino.noneditable import *
from maraschino.tools import *
from maraschino import logger
xbmc_error = 'There was a problem connecting to the XBMC server'
@app.route('/xhr/play/<file_type>/<media_type>/<int:media_id>')
@requires_auth
def xhr_play_media(file_type, media_type, media_id):
logger.log('CONTROLS :: Playing %s' % media_type, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
position = 0
if file_type == 'video':
id = 1
else:
id = 0
try:
xhr_clear_playlist(id)
except:
logger.log('CONTROLS :: Failed to clear %s playlist' % file_type, 'DEBUG')
return jsonify({ 'failed': True })
if file_type == 'video':
if media_type == 'tvshow':
try:
tvshow_episodes = xbmc.VideoLibrary.GetEpisodes(tvshowid=media_id, sort={ 'method': 'episode' })['episodes']
for episode in tvshow_episodes:
episodeid = episode['episodeid']
item = {'episodeid': episodeid}
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to retrieve episodes', 'DEBUG')
return jsonify({'failed': True})
elif 'season' in media_type:
media_type = media_type.split('_')
season = int(media_type[1])
try:
tvshow_episodes = xbmc.VideoLibrary.GetEpisodes(tvshowid=media_id, season=season, sort={ 'method': 'episode' })['episodes']
for episode in tvshow_episodes:
episodeid = episode['episodeid']
item = {'episodeid': episodeid}
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to retrieve episodes', 'DEBUG')
return jsonify({'failed': True})
else:
try:
item = { media_type + 'id': media_id }
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % media_type, 'DEBUG')
return jsonify({'failed': True})
else:
try:
if media_type == 'song' and get_setting_value('xbmc_songs_play_album') == '1':
song = xbmc.AudioLibrary.GetSongDetails(songid=media_id, properties=['albumid', 'track'])['songdetails']
item = {'albumid': song['albumid']}
position = song['track'] -1
else:
item = {media_type+'id': media_id}
xbmc.Playlist.Add(playlistid=0, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % media_type, 'DEBUG')
return jsonify({'failed': True})
try:
item = {'playlistid': id, 'position': position}
xbmc.Player.Open(item)
except:
logger.log('CONTROLS :: Failed to open %s playlist' % file_type, 'DEBUG')
return jsonify({'failed': True})
return jsonify({'success': True})
@app.route('/xhr/enqueue/<file_type>/<media_type>/<int:media_id>')
@requires_auth
def xhr_enqueue_media(file_type, media_type, media_id):
logger.log('CONTROLS :: Queueing %s' % media_type, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
if file_type == 'video':
if media_type == 'tvshow':
try:
tvshow_episodes = xbmc.VideoLibrary.GetEpisodes(tvshowid=media_id, sort={ 'method': 'episode' })['episodes']
for episode in tvshow_episodes:
episodeid = episode['episodeid']
item = { 'episodeid': episodeid }
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to retrieve episodes', 'DEBUG')
return jsonify({ 'failed': True })
elif 'season' in media_type:
media_type = media_type.split('_')
season = int(media_type[1])
try:
tvshow_episodes = xbmc.VideoLibrary.GetEpisodes(tvshowid=media_id, season=season, sort={ 'method': 'episode' })['episodes']
for episode in tvshow_episodes:
episodeid = episode['episodeid']
item = { 'episodeid': episodeid }
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to retrieve episodes', 'DEBUG')
return jsonify({ 'failed': True })
else:
try:
item = { media_type + 'id': media_id }
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % media_type, 'DEBUG')
return jsonify({ 'failed': True })
else:
try:
item = { media_type + 'id': media_id }
xbmc.Playlist.Add(playlistid=0, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % media_type, 'DEBUG')
return jsonify({ 'failed': True })
return jsonify({ 'success': True })
@app.route('/xhr/resume/video/<video_type>/<int:video_id>')
@requires_auth
def xhr_resume_video(video_type, video_id):
logger.log('CONTROLS :: Resuming %s' % video_type, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
try:
xhr_clear_playlist(1)
except:
logger.log('CONTROLS :: Failed to clear video playlist', 'DEBUG')
return jsonify({ 'failed': True })
try:
if video_type == 'episode':
video = xbmc.VideoLibrary.GetEpisodeDetails(episodeid=video_id, properties=['resume'])['episodedetails']
else:
video = xbmc.VideoLibrary.GetMovieDetails(movieid=video_id, properties=['resume'])['moviedetails']
except:
logger.log('CONTROLS :: Failed to retrieve reume position', 'DEBUG')
return jsonify({ 'failed': True })
seconds = int(video['resume']['position'])
hours = seconds / 3600
seconds -= 3600*hours
minutes = seconds / 60
seconds -= 60*minutes
position = { 'hours': hours, 'minutes': minutes, 'seconds': seconds }
try:
item = { video_type + 'id': video_id }
xbmc.Playlist.Add(playlistid=1, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % video_type, 'DEBUG')
return jsonify({ 'failed': True })
item = { 'playlistid': 1 }
try:
xbmc.Player.Open(item)
xbmc.Player.Seek(playerid=1, value=position)
except:
logger.log('CONTROLS :: Failed to open %s at %s' % (video_type, position), 'DEBUG')
return jsonify({ 'failed': True })
return jsonify({ 'success': True })
@app.route('/xhr/play/trailer/<int:movieid>')
@app.route('/xhr/play/trailer/url/<path:trailer>')
@requires_auth
def xhr_play_trailer(movieid=None, trailer=None):
logger.log('CONTROLS :: Playing trailer', 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
try:
xhr_clear_playlist(1)
except:
logger.log('CONTROLS :: Failed to clear video playlist', 'DEBUG')
return jsonify({ 'failed': True })
if not trailer:
try:
trailer = xbmc.VideoLibrary.GetMovieDetails(movieid=movieid, properties= ['trailer'])['moviedetails']['trailer']
except:
logger.log('CONTROLS :: Failed to retrieve trailer url', 'DEBUG')
return jsonify({ 'failed': True })
else:
trailer = youtube_to_xbmc(trailer)
item = { 'file': trailer }
try:
xbmc.Playlist.Add(playlistid=1, item=item)
item = { 'playlistid': 1 }
xbmc.Player.Open(item)
except:
logger.log('CONTROLS :: Failed to open trailer', 'DEBUG')
return jsonify({ 'failed': True })
return jsonify({ 'success': True })
@app.route('/xhr/play_file/<file_type>/', methods=['POST'])
@requires_auth
def xhr_play_file(file_type):
logger.log('CONTROLS :: Playing %s file' % file_type, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
if file_type == "music":
file_type = "audio"
id = 0
else:
id = 1
try:
xhr_clear_playlist(id)
except:
logger.log('CONTROLS :: Failed to clear %s playlist' % file_type, 'DEBUG')
return jsonify({ 'failed': True })
file = request.form['file']
file = urllib.unquote(file.encode('ascii')).decode('utf-8')
if file_type == "video":
player = 1
else:
player = 0
try:
item = { 'file': file }
xbmc.Playlist.Add(playlistid=player, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % file_type, 'DEBUG')
return jsonify({ 'failed': True })
try:
item = { 'playlistid': player }
xbmc.Player.Open(item)
except:
logger.log('CONTROLS :: Failed to open %s' % file_type, 'DEBUG')
return jsonify({ 'failed': True })
return jsonify({ 'success': True })
@app.route('/xhr/enqueue_file/<file_type>/', methods=['POST'])
@requires_auth
def xhr_enqueue_file(file_type):
logger.log('CONTROLS :: Queueing %s file' % file_type, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
file = request.form['file']
file = urllib.unquote(file.encode('ascii')).decode('utf-8')
if file_type == "video":
player = 1
else:
player = 0
try:
item = { 'file': file }
xbmc.Playlist.Add(playlistid=player, item=item)
except:
logger.log('CONTROLS :: Failed to add %s to playlist' % file_type, 'DEBUG')
return jsonify({ 'failed': True })
return jsonify({ 'success': True })
@app.route('/xhr/playlist/<int:playerid>/play/<int:position>')
@requires_auth
def xhr_playlist_play(playerid, position):
logger.log('CONTROLS :: playing playlist position %i' % position, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
try:
xbmc.Player.GoTo(playerid=playerid, to=position)
return jsonify({'success': True})
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return jsonify({'failed': True})
@app.route('/xhr/playlist/<int:playlistid>/clear')
@requires_auth
def xhr_clear_playlist(playlistid):
logger.log('CONTROLS :: Clearing playlist', 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
try:
xbmc.Playlist.Clear(playlistid=playlistid)
return jsonify({'success': True})
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return jsonify({'failed': True})
@app.route('/xhr/playlist/<int:playlistid>/move_item/<int:position1>/<direction>')
@requires_auth
def xhr_move_playlist_item(playlistid, position1, direction):
logger.log('CONTROLS :: Moving playlist item %s' % direction, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
if direction == 'up':
if position1 != 0:
position2 = position1 - 1
else:
logger.log('CONTROLS :: Playlist item is already at first position', 'INFO')
return jsonify({'success': True})
else:
position2 = position1 + 1
try:
xbmc.Playlist.Swap(playlistid=playlistid, position1=position1, position2=position2)
return jsonify({'success': True})
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return jsonify({'failed': True})
@app.route('/xhr/playlist/<int:playlistid>/remove_item/<int:position>')
@requires_auth
def xhr_remove_playlist_item(playlistid, position):
logger.log('CONTROLS :: Removing playlist item %s' % position, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
try:
xbmc.Playlist.Remove(playlistid=playlistid, position=position)
return jsonify({'success': True})
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return jsonify({'failed': True})
@app.route('/xhr/controls/change_channel/<int:channelid>')
@requires_auth
def xhr_change_channel(channelid):
logger.log('CONTROLS :: Changing channel %s' % channelid, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
try:
xbmc.Player.Open(item={'channelid': channelid})
return jsonify({'success': True})
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return jsonify({'failed': True})
@app.route('/xhr/controls/<command>')
@requires_auth
def xhr_controls(command):
serversettings = server_settings()
xbmc = jsonrpclib.Server(server_api_address())
return_response = 'failed'
try:
active_player = xbmc.Player.GetActivePlayers()
if active_player[0]['type'] == 'video':
playerid = 1
elif active_player[0]['type'] == 'audio':
playerid = 0
except:
active_player = None
if command == 'play_pause':
logger.log('CONTROLS :: Play/Pause', 'INFO')
try:
xbmc.Player.PlayPause(playerid=playerid)
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'stop':
logger.log('CONTROLS :: Stop', 'INFO')
try:
xbmc.Player.Stop(playerid=playerid)
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif 'volume' in command:
logger.log('CONTROLS :: Volume', 'INFO')
try:
volume = command.split('_')
volume = int(volume[1])
xbmc.Application.SetVolume(volume=volume)
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'next':
logger.log('CONTROLS :: Next', 'INFO')
try:
version = xbmc.Application.GetProperties(properties=['version'])['version']['major']
if version < 12:
xbmc.Player.GoNext(playerid=playerid)
else:
xbmc.Player.GoTo(playerid=playerid, to='next')
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'previous':
logger.log('CONTROLS :: Previous', 'INFO')
try:
version = xbmc.Application.GetProperties(properties=['version'])['version']['major']
if version < 12:
xbmc.Player.GoPrevious(playerid=playerid)
else:
xbmc.Player.GoTo(playerid=playerid, to='previous')
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'fast_forward':
logger.log('CONTROLS :: Fast forward', 'INFO')
try:
xbmc.Player.SetSpeed(playerid=playerid, speed='increment')
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'rewind':
logger.log('CONTROLS :: Rewind', 'INFO')
try:
xbmc.Player.SetSpeed(playerid=playerid, speed='decrement')
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif 'seek' in command:
logger.log('CONTROLS :: Seek', 'INFO')
try:
percentage = command.split('_')
percentage = int(percentage[1])
xbmc.Player.Seek(playerid=playerid, value=percentage)
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'shuffle':
logger.log('CONTROLS :: Shuffle', 'INFO')
try:
version = xbmc.Application.GetProperties(properties=['version'])['version']['major']
if version > 11:
xbmc.Player.SetShuffle(playerid=playerid, shuffle='toggle')
else:
shuffled = xbmc.Player.GetProperties(playerid=playerid, properties=['shuffled'])['shuffled']
if shuffled == True:
xbmc.Player.UnShuffle(playerid=playerid)
else:
xbmc.Player.Shuffle(playerid=playerid)
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'repeat':
logger.log('CONTROLS :: Repeat', 'INFO')
try:
version = xbmc.Application.GetProperties(properties=['version'])['version']['major']
if version > 11:
xbmc.Player.SetRepeat(playerid=playerid, repeat='cycle')
else:
states = ['off', 'one', 'all']
repeat = xbmc.Player.GetProperties(playerid=playerid, properties=['repeat'])['repeat']
state = states.index(repeat)
if state <= 1:
state = state + 1
else:
state = 0
state = states[state]
xbmc.Player.Repeat(playerid=playerid, state=state)
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'update_video':
logger.log('CONTROLS :: Updating video library', 'INFO')
try:
xbmc.VideoLibrary.Scan()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'clean_video':
logger.log('CONTROLS :: Cleaning video library', 'INFO')
try:
xbmc.VideoLibrary.Clean()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'update_audio':
logger.log('CONTROLS :: Updating audio library', 'INFO')
try:
xbmc.AudioLibrary.Scan()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'clean_audio':
logger.log('CONTROLS :: Cleaning audio library', 'INFO')
try:
xbmc.AudioLibrary.Clean()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'poweroff':
logger.log('CONTROLS :: Shutting down XBMC machine', 'INFO')
try:
xbmc.System.Shutdown()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'suspend':
logger.log('CONTROLS :: Suspending XBMC machine', 'INFO')
try:
xbmc.System.Suspend()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'reboot':
logger.log('CONTROLS :: Rebooting XBMC machine', 'INFO')
try:
xbmc.System.Reboot()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'pvr-scan':
logger.log('CONTROLS :: Scanning PVR EPG', 'INFO')
try:
xbmc.PVR.Scan()
return_response = 'success'
except:
logger.log('CONTROLS :: %s' % xbmc_error, 'ERROR')
return_response = 'failed'
elif command == 'poweron':
logger.log('CONTROLS :: Powering on XBMC machine', 'INFO')
server_macaddress = serversettings['mac_address']
if not server_macaddress:
logger.log('CONTROLS :: No XBMC machine MAC address defined', 'ERROR')
return jsonify({ 'failed': True })
else:
try:
addr_byte = server_macaddress.split(':')
hw_addr = struct.pack('BBBBBB',
int(addr_byte[0], 16),
int(addr_byte[1], 16),
int(addr_byte[2], 16),
int(addr_byte[3], 16),
int(addr_byte[4], 16),
int(addr_byte[5], 16))
msg = '\xff' * 6 + hw_addr * 16
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(msg, ("255.255.255.255", 9))
return_response = 'success'
except:
logger.log('CONTROLS :: Failed to send WOL packet', 'ERROR')
return_response = 'failed'
if return_response == 'success':
return jsonify({ 'success': True })
else:
return jsonify({ 'failed': True })
@app.route('/xhr/download/<file_type>/<media_type>/<int:media_id>')
@requires_auth
def xhr_download_media(file_type, media_type, media_id):
logger.log('CONTROLS :: Downloading %s' % media_type, 'INFO')
xbmc = jsonrpclib.Server(server_api_address())
serversettings = server_settings()
try:
if media_type == 'episode':
video = xbmc.VideoLibrary.GetEpisodeDetails(episodeid=media_id, properties=['file'])['episodedetails']['file']
elif media_type == 'movie':
video = xbmc.VideoLibrary.GetMovieDetails(movieid=media_id, properties=['file'])['moviedetails']['file']
except:
logger.log('CONTROLS :: Failed to retrieve path to %s' % media_type, 'DEBUG')
return jsonify({ 'failed': True })
path = xbmc.Files.PrepareDownload(path=video)['details']['path']
url = 'http://'+serversettings['username']+':'+serversettings['password']+'@'+serversettings['hostname']+':'+serversettings['port']+'/'+path
return url
|
|
#! /usr/bin/env python
# Make sure that the script "sinfit_mod.py" is in the same directory as the data since it contains the fitting function.
# The script "multiple.py" is run by the command "python multiple.py datafile.txt". For running all output files from rvel, run the script "runall.py".
import sys
# from pylab import *
#from scipy import optimize
from scipy import *
from numpy import *
from subprocess import Popen, PIPE
import os
# sinfit_mod contains the fitting function
from sinfit_mod import fit_model
class PartialFile(file):
def __init__(self, filename, lowerbound, upperbound):
self.file = open(filename)
self.lower = lowerbound
self.upper = upperbound
def next(self):
row = self.file.next()
xvalue = float(row.split()[0])
while xvalue < self.lower:
row = self.file.next()
xvalue = float(row.split()[0])
if self.upper < xvalue:
raise StopIteration
return row
def __iter__(self):
return self
plot_sequence1 ="""
set xrange [%(LOWER)f:%(UPPER)f]
set xlabel 'Date [HJD]'
set ylabel 'Radial Velocity [km/s]'
plot '%(DATAFILE)s' using %(xcol)d:%(ycol)d:%(err)d with errorbars 5 title 'err radial velocity [km/s]' lw 3, '%(INITIALPAR)s' title 'guess' with lines, '%(FITPAR)s' title 'fit' with lines
set terminal png size 600,400
set out '%(PLOTFILE)s'
replot
exit"""
plot_sequence2 ="""
set xrange [%(LOWER)f:%(UPPER)f]
set xlabel 'Date [HJD]'
set ylabel 'Radial Velocity [km/s]'
set terminal png size 600,400
set out '%(PLOTFILE)s'
plot '%(DATAFILE)s' using %(xcol)d:%(ycol)d:%(err)d with errorbars 5 title 'err radial velocity [km/s]' lw 3, '%(INITIALPAR)s' title 'guess' with lines, '%(FITPAR)s' title 'fit' with lines
exit"""
class Fitting(object):
def __init__(self, G, K, PHI_0, P, LOWER, UPPER):
self.G = G
self.K = K
self.PHI_0 = PHI_0
self.P = P
self.LOWER = LOWER
self.UPPER = UPPER
def fit(self, filename,
xcol = 1, ycol = 2, errcol = 3, scale = 1.0,
save_into = None):
"""fit(filename, xcol = 1, ycol = 2, errcol = 3,
scale = 1, save_into = None)
filename: Where the data comes from
xcol: Columns in the files (starting 1) for each data series
ycol
errcol
scale: Scale factor for the Chi^2 (modifies errors)
save_into: Dictionary. It's either empty/None or contains two keys,
"initpar", "fitpar", whose values are the names of the files
where to write down the result of the fitting.
Returns: (G, K, PHI_0), (Gerr, Kerr, PHIerr), simple_Chi^2, reducing_factor"""
xdata, ydata, err = loadtxt(PartialFile(filename, self.LOWER, self.UPPER),
usecols=[xcol - 1, ycol - 1, errcol - 1],
unpack=True)
# 3 is the number of independent parameters
reducing_factor = (len(xdata) - 3)
print "Fitting the model (G=%f, K=%f, PHI_0=%f)" % (self.G, self.K, self.PHI_0)
if not save_into:
updated, chi2, cov = fit_model(xdata, ydata, err * scale,
self.PHI_0, self.G, self.K)
else:
scaled_err = err * scale
updated, chi2, cov = fit_model(xdata, ydata, scaled_err,
self.PHI_0, self.G, self.K,
initpar_file = save_into["initpar"],
fitpar_file = save_into["fitpar"])
savetxt(save_into["data"], zip(xdata, ydata, scaled_err) )
rchi2 = chi2 / reducing_factor
print " G K PHI_0 Chi^2 reduced Chi^2"
print "%8.2f %8.2f %8.2f %8.6f %8.2f %8.2f" % (tuple(updated) + (P, chi2, rchi2))
print "Iteration 2: Fitting the model (G=%f, K=%f, PHI_0=%f)" % (self.G, self.K, self.PHI_0)
scale = rchi2**0.5
if not save_into:
updated, chi2, cov = fit_model(xdata, ydata, err * scale,
self.PHI_0, self.G, self.K)
else:
scaled_err = err * scale
updated, chi2, cov = fit_model(xdata, ydata, scaled_err,
self.PHI_0, self.G, self.K,
initpar_file = save_into["initpar"],
fitpar_file = save_into["fitpar"])
savetxt(save_into["data"], zip(xdata, ydata, scaled_err) )
rchi2 = chi2 / reducing_factor
print " G K PHI_0 Chi^2 reduced Chi^2"
print "%8.2f %8.2f %8.2f %8.10f %8.2f %8.2f" % (tuple(updated) + (P, chi2, rchi2))
return updated, ((rchi2*cov[0][0])**0.5, (rchi2*cov[1][1])**0.5, (rchi2*cov[2][2])**0.5), chi2, reducing_factor
def plot(self, template, datafile, initialpar, fitpar, plotfile,
xcol = 1, ycol = 2, errcol = 3):
proc = Popen('gnuplot', stdin=PIPE, stdout=PIPE)
command = template % {
'LOWER': self.LOWER,
'UPPER': self.UPPER,
'xcol': xcol,
'ycol': ycol,
'err': errcol,
'DATAFILE': datafile,
'INITIALPAR': initialpar,
'FITPAR': fitpar,
'PLOTFILE': plotfile,
}
# print command
proc.communicate(command)
proc.wait()
# Main program
if __name__ == '__main__':
if len(sys.argv) not in ( 2, 3 ):
print "Usage:"
print " %s [ -n ] datafilename.txt" % sys.argv[0]
sys.exit(0)
if sys.argv[1] == '-n':
filename = sys.argv[2]
template = plot_sequence2
else:
filename = sys.argv[1]
template = plot_sequence1
G = -40
K = 40.0
P = 0.0762233
PHI_0 = 349.745
PHI_0 = 0.745
INITIALPAR='%s/initialpar.dat'
FITPAR='%s/fitpar.dat'
DATAFILE='%s/data.dat'
PLOTFILE='%s/fitplot.png'
PARAMETERS='%s/fitplot.png'
bounds = ( (349.74, 349.82),
(363.73, 363.81),
(397.72, 397.81),
(398.69, 398.77),
(401.75, 401.83) )
print "Starting to fit the data"
parameters_file = open("parameters.txt", "w")
for order, (lower, upper) in enumerate(bounds):
dataset = order + 1
datadir = "dataset%d" % dataset
if not os.path.isdir(datadir):
os.mkdir(datadir)
fitter = Fitting(G, K, PHI_0, P, lower, upper)
datafile = DATAFILE % datadir
initialpar = INITIALPAR % datadir
fitpar = FITPAR % datadir
result = fitter.fit(filename, save_into = {"initpar": initialpar,
"fitpar": fitpar,
"data": datafile})
fitted_par, par_err, chi2, rf = result
S_updated = ["%12.10f" % x for x in fitted_par]
Errors = ["%20.18f" % x for x in par_err]
savetxt(PARAMETERS % datadir,
zip(("G", "K", "PHI_0"),
S_updated,
("GErr", "Kerr", "PHIerr"),
Errors),
fmt="%10s")
parameters_file.write("%3d %14.11f %14.11f %14.11f %20.18f %20.18f %20.18f\n" %
(dataset,
fitted_par[0], fitted_par[1], fitted_par[2],
par_err[0], par_err[1], par_err[2]))
# savetxt(parameters_file,
# zip(("G", "K", "PHI_0"),
# S_updated,
# ("GErr", "Kerr", "PHIerr"),
# Errors),
# fmt="%10s")
fitter.plot(template, datafile, initialpar, fitpar, PLOTFILE % datadir)
parameters_file.close()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.tasks_v2beta2.types import cloudtasks
from google.cloud.tasks_v2beta2.types import queue
from google.cloud.tasks_v2beta2.types import queue as gct_queue
from google.cloud.tasks_v2beta2.types import task
from google.cloud.tasks_v2beta2.types import task as gct_task
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-tasks",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CloudTasksTransport(abc.ABC):
"""Abstract transport class for CloudTasks."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "cloudtasks.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_queues: gapic_v1.method.wrap_method(
self.list_queues,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.get_queue: gapic_v1.method.wrap_method(
self.get_queue,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.create_queue: gapic_v1.method.wrap_method(
self.create_queue, default_timeout=20.0, client_info=client_info,
),
self.update_queue: gapic_v1.method.wrap_method(
self.update_queue, default_timeout=20.0, client_info=client_info,
),
self.delete_queue: gapic_v1.method.wrap_method(
self.delete_queue,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.purge_queue: gapic_v1.method.wrap_method(
self.purge_queue, default_timeout=20.0, client_info=client_info,
),
self.pause_queue: gapic_v1.method.wrap_method(
self.pause_queue, default_timeout=20.0, client_info=client_info,
),
self.resume_queue: gapic_v1.method.wrap_method(
self.resume_queue, default_timeout=20.0, client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
self.set_iam_policy, default_timeout=20.0, client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.list_tasks: gapic_v1.method.wrap_method(
self.list_tasks,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.get_task: gapic_v1.method.wrap_method(
self.get_task,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.create_task: gapic_v1.method.wrap_method(
self.create_task, default_timeout=20.0, client_info=client_info,
),
self.delete_task: gapic_v1.method.wrap_method(
self.delete_task,
default_retry=retries.Retry(
initial=0.1,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=20.0,
),
default_timeout=20.0,
client_info=client_info,
),
self.lease_tasks: gapic_v1.method.wrap_method(
self.lease_tasks, default_timeout=20.0, client_info=client_info,
),
self.acknowledge_task: gapic_v1.method.wrap_method(
self.acknowledge_task, default_timeout=20.0, client_info=client_info,
),
self.renew_lease: gapic_v1.method.wrap_method(
self.renew_lease, default_timeout=20.0, client_info=client_info,
),
self.cancel_lease: gapic_v1.method.wrap_method(
self.cancel_lease, default_timeout=20.0, client_info=client_info,
),
self.run_task: gapic_v1.method.wrap_method(
self.run_task, default_timeout=20.0, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_queues(
self,
) -> Callable[
[cloudtasks.ListQueuesRequest],
Union[cloudtasks.ListQueuesResponse, Awaitable[cloudtasks.ListQueuesResponse]],
]:
raise NotImplementedError()
@property
def get_queue(
self,
) -> Callable[
[cloudtasks.GetQueueRequest], Union[queue.Queue, Awaitable[queue.Queue]]
]:
raise NotImplementedError()
@property
def create_queue(
self,
) -> Callable[
[cloudtasks.CreateQueueRequest],
Union[gct_queue.Queue, Awaitable[gct_queue.Queue]],
]:
raise NotImplementedError()
@property
def update_queue(
self,
) -> Callable[
[cloudtasks.UpdateQueueRequest],
Union[gct_queue.Queue, Awaitable[gct_queue.Queue]],
]:
raise NotImplementedError()
@property
def delete_queue(
self,
) -> Callable[
[cloudtasks.DeleteQueueRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def purge_queue(
self,
) -> Callable[
[cloudtasks.PurgeQueueRequest], Union[queue.Queue, Awaitable[queue.Queue]]
]:
raise NotImplementedError()
@property
def pause_queue(
self,
) -> Callable[
[cloudtasks.PauseQueueRequest], Union[queue.Queue, Awaitable[queue.Queue]]
]:
raise NotImplementedError()
@property
def resume_queue(
self,
) -> Callable[
[cloudtasks.ResumeQueueRequest], Union[queue.Queue, Awaitable[queue.Queue]]
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def set_iam_policy(
self,
) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Union[
iam_policy_pb2.TestIamPermissionsResponse,
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
],
]:
raise NotImplementedError()
@property
def list_tasks(
self,
) -> Callable[
[cloudtasks.ListTasksRequest],
Union[cloudtasks.ListTasksResponse, Awaitable[cloudtasks.ListTasksResponse]],
]:
raise NotImplementedError()
@property
def get_task(
self,
) -> Callable[[cloudtasks.GetTaskRequest], Union[task.Task, Awaitable[task.Task]]]:
raise NotImplementedError()
@property
def create_task(
self,
) -> Callable[
[cloudtasks.CreateTaskRequest], Union[gct_task.Task, Awaitable[gct_task.Task]]
]:
raise NotImplementedError()
@property
def delete_task(
self,
) -> Callable[
[cloudtasks.DeleteTaskRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def lease_tasks(
self,
) -> Callable[
[cloudtasks.LeaseTasksRequest],
Union[cloudtasks.LeaseTasksResponse, Awaitable[cloudtasks.LeaseTasksResponse]],
]:
raise NotImplementedError()
@property
def acknowledge_task(
self,
) -> Callable[
[cloudtasks.AcknowledgeTaskRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def renew_lease(
self,
) -> Callable[
[cloudtasks.RenewLeaseRequest], Union[task.Task, Awaitable[task.Task]]
]:
raise NotImplementedError()
@property
def cancel_lease(
self,
) -> Callable[
[cloudtasks.CancelLeaseRequest], Union[task.Task, Awaitable[task.Task]]
]:
raise NotImplementedError()
@property
def run_task(
self,
) -> Callable[[cloudtasks.RunTaskRequest], Union[task.Task, Awaitable[task.Task]]]:
raise NotImplementedError()
__all__ = ("CloudTasksTransport",)
|
|
"""
Handles a single chunk of data (16x16x128 blocks) from a Minecraft save.
For more information about the chunck format:
https://minecraft.gamepedia.com/Chunk_format
"""
from io import BytesIO
from struct import pack
from math import ceil
import array
# Legacy numeric block identifiers
# mapped to alpha identifiers in best effort
# See https://minecraft.gamepedia.com/Java_Edition_data_values/Pre-flattening
# TODO: move this map into a separate file
block_ids = {
0: 'air',
1: 'stone',
2: 'grass_block',
3: 'dirt',
4: 'cobblestone',
5: 'oak_planks',
6: 'sapling',
7: 'bedrock',
8: 'flowing_water',
9: 'water',
10: 'flowing_lava',
11: 'lava',
12: 'sand',
13: 'gravel',
14: 'gold_ore',
15: 'iron_ore',
16: 'coal_ore',
17: 'oak_log',
18: 'oak_leaves',
19: 'sponge',
20: 'glass',
21: 'lapis_ore',
24: 'sandstone',
30: 'cobweb',
31: 'grass',
32: 'dead_bush',
35: 'white_wool',
37: 'dandelion',
38: 'poppy',
39: 'brown_mushroom',
40: 'red_mushroom',
43: 'stone_slab',
44: 'stone_slab',
47: 'bookshelf',
48: 'mossy_cobblestone',
49: 'obsidian',
50: 'torch',
51: 'fire',
52: 'spawner',
53: 'oak_stairs',
54: 'chest',
56: 'diamond_ore',
58: 'crafting_table',
59: 'wheat',
60: 'farmland',
61: 'furnace',
62: 'furnace',
63: 'sign', # will change to oak_sign in 1.14
64: 'oak_door',
65: 'ladder',
66: 'rail',
67: 'cobblestone_stairs',
72: 'oak_pressure_plate',
73: 'redstone_ore',
74: 'redstone_ore',
78: 'snow',
79: 'ice',
81: 'cactus',
82: 'clay',
83: 'sugar_cane',
85: 'oak_fence',
86: 'pumpkin',
91: 'lit_pumpkin',
101: 'iron_bars',
102: 'glass_pane',
}
def block_id_to_name(bid):
try:
name = block_ids[bid]
except KeyError:
name = 'unknown_%d' % (bid,)
print("warning: unknown block id %i" % bid)
print("hint: add that block to the 'block_ids' map")
return name
# Generic Chunk
class Chunk(object):
"""Class for representing a single chunk."""
def __init__(self, nbt):
self.chunk_data = nbt['Level']
self.coords = self.chunk_data['xPos'],self.chunk_data['zPos']
def get_coords(self):
"""Return the coordinates of this chunk."""
return (self.coords[0].value,self.coords[1].value)
def __repr__(self):
"""Return a representation of this Chunk."""
return "Chunk("+str(self.coords[0])+","+str(self.coords[1])+")"
# Chunk in Region old format
class McRegionChunk(Chunk):
def __init__(self, nbt):
Chunk.__init__(self, nbt)
self.blocks = BlockArray(self.chunk_data['Blocks'].value, self.chunk_data['Data'].value)
def get_max_height(self):
return 127
def get_block(self, x, y, z):
name = block_id_to_name(self.blocks.get_block(x, y, z))
return name
def iter_block(self):
for y in range(0, 128):
for z in range(0, 16):
for x in range(0, 16):
yield self.get_block(x, y, z)
# Section in Anvil new format
class AnvilSection(object):
def __init__(self, nbt, version):
self.names = []
self.indexes = []
# Is the section flattened ?
# See https://minecraft.gamepedia.com/1.13/Flattening
if version == 0 or version == 1343: # 1343 = MC 1.12.2
self._init_array(nbt)
elif version >= 1631 and version <= 2230: # MC 1.13 to MC 1.15.2
self._init_index_unpadded(nbt)
elif version >= 2566 and version <= 2730: # MC 1.16.0 to MC 1.17.2 (latest tested version)
self._init_index_padded(nbt)
else:
raise NotImplementedError()
# Section contains 4096 blocks whatever data version
assert len(self.indexes) == 4096
# Decode legacy section
# Contains an array of block numeric identifiers
def _init_array(self, nbt):
bids = []
for bid in nbt['Blocks'].value:
try:
i = bids.index(bid)
except ValueError:
bids.append(bid)
i = len(bids) - 1
self.indexes.append(i)
for bid in bids:
bname = block_id_to_name(bid)
self.names.append(bname)
# Decode modern section
# Contains palette of block names and indexes packed with run-on between elements (pre 1.16 format)
def _init_index_unpadded(self, nbt):
for p in nbt['Palette']:
name = p['Name'].value
self.names.append(name)
states = nbt['BlockStates'].value
# Block states are packed into an array of longs
# with variable number of bits per block (min: 4)
num_bits = (len(self.names) - 1).bit_length()
if num_bits < 4: num_bits = 4
assert num_bits == len(states) * 64 / 4096
mask = pow(2, num_bits) - 1
i = 0
bits_left = 64
curr_long = states[0]
for _ in range(0,4096):
if bits_left == 0:
i = i + 1
curr_long = states[i]
bits_left = 64
if num_bits <= bits_left:
self.indexes.append(curr_long & mask)
curr_long = curr_long >> num_bits
bits_left = bits_left - num_bits
else:
i = i + 1
next_long = states[i]
remaining_bits = num_bits - bits_left
next_long = (next_long & (pow(2, remaining_bits) - 1)) << bits_left
curr_long = (curr_long & (pow(2, bits_left) - 1))
self.indexes.append(next_long | curr_long)
curr_long = states[i]
curr_long = curr_long >> remaining_bits
bits_left = 64 - remaining_bits
# Decode modern section
# Contains palette of block names and indexes packed with padding if elements don't fit (post 1.16 format)
def _init_index_padded(self, nbt):
for p in nbt['Palette']:
name = p['Name'].value
self.names.append(name)
states = nbt['BlockStates'].value
num_bits = (len(self.names) - 1).bit_length()
if num_bits < 4: num_bits = 4
mask = 2**num_bits - 1
indexes_per_element = 64 // num_bits
last_state_elements = 4096 % indexes_per_element
if last_state_elements == 0: last_state_elements = indexes_per_element
assert len(states) == ceil(4096 / indexes_per_element)
for i in range(len(states)-1):
long = states[i]
for _ in range(indexes_per_element):
self.indexes.append(long & mask)
long = long >> num_bits
long = states[-1]
for _ in range(last_state_elements):
self.indexes.append(long & mask)
long = long >> num_bits
def get_block(self, x, y, z):
# Blocks are stored in YZX order
i = y * 256 + z * 16 + x
p = self.indexes[i]
return self.names[p]
def iter_block(self):
for i in range(0, 4096):
p = self.indexes[i]
yield self.names[p]
# Chunck in Anvil new format
class AnvilChunk(Chunk):
def __init__(self, nbt):
Chunk.__init__(self, nbt)
# Started to work on this class with MC version 1.13.2
# so with the chunk data version 1631
# Backported to first Anvil version (= 0) from examples
# Could work with other versions, but has to be tested first
try:
version = nbt['DataVersion'].value
if version != 1343 and not (version >= 1631 or version <= 2730):
raise NotImplementedError('DataVersion %d not implemented' % (version,))
except KeyError:
version = 0
# Load all sections
self.sections = {}
if 'Sections' in self.chunk_data:
for s in self.chunk_data['Sections']:
if "BlockStates" in s.keys(): # sections may only contain lighting information
self.sections[s['Y'].value] = AnvilSection(s, version)
def get_section(self, y):
"""Get a section from Y index."""
if y in self.sections:
return self.sections[y]
return None
def get_max_height(self):
ymax = 0
for y in self.sections.keys():
if y > ymax: ymax = y
return ymax * 16 + 15
def get_block(self, x, y, z):
"""Get a block from relative x,y,z."""
sy,by = divmod(y, 16)
section = self.get_section(sy)
if section == None:
return None
return section.get_block(x, by, z)
def iter_block(self):
for s in self.sections.values():
for b in s.iter_block():
yield b
class BlockArray(object):
"""Convenience class for dealing with a Block/data byte array."""
def __init__(self, blocksBytes=None, dataBytes=None):
"""Create a new BlockArray, defaulting to no block or data bytes."""
if isinstance(blocksBytes, (bytearray, array.array)):
self.blocksList = list(blocksBytes)
else:
self.blocksList = [0]*32768 # Create an empty block list (32768 entries of zero (air))
if isinstance(dataBytes, (bytearray, array.array)):
self.dataList = list(dataBytes)
else:
self.dataList = [0]*16384 # Create an empty data list (32768 4-bit entries of zero make 16384 byte entries)
def get_blocks_struct(self):
"""Return a dictionary with block ids keyed to (x, y, z)."""
cur_x = 0
cur_y = 0
cur_z = 0
blocks = {}
for block_id in self.blocksList:
blocks[(cur_x,cur_y,cur_z)] = block_id
cur_y += 1
if (cur_y > 127):
cur_y = 0
cur_z += 1
if (cur_z > 15):
cur_z = 0
cur_x += 1
return blocks
# Give blockList back as a byte array
def get_blocks_byte_array(self, buffer=False):
"""Return a list of all blocks in this chunk."""
if buffer:
length = len(self.blocksList)
return BytesIO(pack(">i", length)+self.get_blocks_byte_array())
else:
return array.array('B', self.blocksList).tostring()
def get_data_byte_array(self, buffer=False):
"""Return a list of data for all blocks in this chunk."""
if buffer:
length = len(self.dataList)
return BytesIO(pack(">i", length)+self.get_data_byte_array())
else:
return array.array('B', self.dataList).tostring()
def generate_heightmap(self, buffer=False, as_array=False):
"""Return a heightmap, representing the highest solid blocks in this chunk."""
non_solids = [0, 8, 9, 10, 11, 38, 37, 32, 31]
if buffer:
return BytesIO(pack(">i", 256)+self.generate_heightmap()) # Length + Heightmap, ready for insertion into Chunk NBT
else:
bytes = []
for z in range(16):
for x in range(16):
for y in range(127, -1, -1):
offset = y + z*128 + x*128*16
if (self.blocksList[offset] not in non_solids or y == 0):
bytes.append(y+1)
break
if (as_array):
return bytes
else:
return array.array('B', bytes).tostring()
def set_blocks(self, list=None, dict=None, fill_air=False):
"""
Sets all blocks in this chunk, using either a list or dictionary.
Blocks not explicitly set can be filled to air by setting fill_air to True.
"""
if list:
# Inputting a list like self.blocksList
self.blocksList = list
elif dict:
# Inputting a dictionary like result of self.get_blocks_struct()
list = []
for x in range(16):
for z in range(16):
for y in range(128):
coord = x,y,z
offset = y + z*128 + x*128*16
if (coord in dict):
list.append(dict[coord])
else:
if (self.blocksList[offset] and not fill_air):
list.append(self.blocksList[offset])
else:
list.append(0) # Air
self.blocksList = list
else:
# None of the above...
return False
return True
def set_block(self, x,y,z, id, data=0):
"""Sets the block a x, y, z to the specified id, and optionally data."""
offset = y + z*128 + x*128*16
self.blocksList[offset] = id
if (offset % 2 == 1):
# offset is odd
index = (offset-1)//2
b = self.dataList[index]
self.dataList[index] = (b & 240) + (data & 15) # modify lower bits, leaving higher bits in place
else:
# offset is even
index = offset//2
b = self.dataList[index]
self.dataList[index] = (b & 15) + (data << 4 & 240) # modify ligher bits, leaving lower bits in place
# Get a given X,Y,Z or a tuple of three coordinates
def get_block(self, x,y,z, coord=False):
"""Return the id of the block at x, y, z."""
"""
Laid out like:
(0,0,0), (0,1,0), (0,2,0) ... (0,127,0), (0,0,1), (0,1,1), (0,2,1) ... (0,127,1), (0,0,2) ... (0,127,15), (1,0,0), (1,1,0) ... (15,127,15)
::
blocks = []
for x in range(15):
for z in range(15):
for y in range(127):
blocks.append(Block(x,y,z))
"""
offset = y + z*128 + x*128*16 if (coord == False) else coord[1] + coord[2]*128 + coord[0]*128*16
return self.blocksList[offset]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Queue management implementation for Plaso.
This file contains an implementation of a queue used by plaso for
queue management.
The queue has been abstracted in order to provide support for different
implementations of the queueing mechanism, to support multi processing and
scalability.
"""
import abc
import collections
import logging
import multiprocessing
from dfvfs.path import path_spec as dfvfs_path_spec
from plaso.lib import event
from plaso.lib import errors
class QueueEndOfInput(object):
"""Class that implements a queue end of input."""
class Queue(object):
"""Class that implements the queue interface."""
@abc.abstractmethod
def __len__(self):
"""Return the estimated number of entries inside the queue."""
@abc.abstractmethod
def IsEmpty(self):
"""Determines if the queue is empty."""
@abc.abstractmethod
def PushItem(self, item):
"""Pushes an item onto the queue."""
@abc.abstractmethod
def PopItem(self):
"""Pops an item off the queue."""
def SignalEndOfInput(self):
"""Signals the queue no input remains."""
self.PushItem(QueueEndOfInput())
class MultiThreadedQueue(Queue):
"""Multi threaded queue."""
def __init__(self):
"""Initializes the multi threaded queue."""
super(MultiThreadedQueue, self).__init__()
self._queue = multiprocessing.Queue()
def __len__(self):
"""Return the total number of events stored inside the queue."""
size = 0
try:
size = self._queue.qsize()
except NotImplementedError:
logging.warning(
u'Returning queue length does not work on Mac OS X because of broken'
u'sem_getvalue()')
raise
return size
def IsEmpty(self):
"""Determines if the queue is empty."""
return self._queue.empty()
def PushItem(self, item):
"""Pushes an item onto the queue."""
self._queue.put(item)
def PopItem(self):
"""Pops an item off the queue."""
try:
return self._queue.get()
except KeyboardInterrupt:
raise errors.QueueEmpty
class SingleThreadedQueue(Queue):
"""Single threaded queue."""
def __init__(self):
"""Initializes a single threaded queue."""
super(SingleThreadedQueue, self).__init__()
self._queue = collections.deque()
def __len__(self):
"""Return the number of items inside the queue."""
return len(self._queue)
def IsEmpty(self):
"""Determines if the queue is empty."""
return len(self._queue)
def PushItem(self, item):
"""Pushes an item onto the queue."""
self._queue.append(item)
def PopItem(self):
"""Pops an item off the queue."""
try:
# Using popleft to have FIFO behavior.
return self._queue.popleft()
except IndexError:
raise errors.QueueEmpty
class QueueConsumer(object):
"""Class that implements the queue consumer interface.
The consumer subscribes to updates on the queue.
"""
def __init__(self, queue_object):
"""Initializes the queue consumer.
Args:
queue_object: the queue object (instance of Queue).
"""
super(QueueConsumer, self).__init__()
self._queue = queue_object
class QueueProducer(object):
"""Class that implements the queue producer interface.
The producer generates updates on the queue.
"""
def __init__(self, queue_object):
"""Initializes the queue producer.
Args:
queue_object: the queue object (instance of Queue).
"""
super(QueueProducer, self).__init__()
self._queue = queue_object
def SignalEndOfInput(self):
"""Signals the queue no input remains."""
self._queue.SignalEndOfInput()
class AnalysisReportQueueConsumer(QueueConsumer):
"""Class that implements the analysis report queue consumer.
The consumer subscribes to updates on the queue.
"""
@abc.abstractmethod
def _ConsumeAnalysisReport(self, analysis_report):
"""Consumes an analysis report callback for ConsumeAnalysisReports."""
def ConsumeAnalysisReports(self):
"""Consumes the analysis reports that are pushed on the queue.
Raises:
RuntimeError: when there is an unsupported object type on the queue.
"""
while True:
try:
item = self._queue.PopItem()
except errors.QueueEmpty:
break
if isinstance(item, QueueEndOfInput):
# Push the item back onto the queue to make sure all
# queue consumers are stopped.
self._queue.PushItem(item)
break
if not isinstance(item, event.AnalysisReport):
raise RuntimeError(u'Unsupported item type on queue.')
self._ConsumeAnalysisReport(item)
class AnalysisReportQueueProducer(QueueProducer):
"""Class that implements the analysis report queue producer.
The producer generates updates on the queue.
"""
def ProduceAnalysisReport(self, analysis_report):
"""Produces a analysis report onto the queue.
Args:
analysis_report: the analysis report object (instance of
EventAnalysisReport).
"""
self._queue.PushItem(analysis_report)
class EventObjectQueueConsumer(QueueConsumer):
"""Class that implements the event object queue consumer.
The consumer subscribes to updates on the queue.
"""
@abc.abstractmethod
def _ConsumeEventObject(self, event_object):
"""Consumes an event object callback for ConsumeEventObjects."""
def ConsumeEventObjects(self):
"""Consumes the event object that are pushed on the queue.
Raises:
RuntimeError: when there is an unsupported object type on the queue.
"""
while True:
try:
item = self._queue.PopItem()
except errors.QueueEmpty:
break
if isinstance(item, QueueEndOfInput):
# Push the item back onto the queue to make sure all
# queue consumers are stopped.
self._queue.PushItem(item)
break
self._ConsumeEventObject(item)
class EventObjectQueueProducer(QueueProducer):
"""Class that implements the event object queue producer.
The producer generates updates on the queue.
"""
def ProduceEventObject(self, event_object):
"""Produces an event object onto the queue.
Args:
event_object: the event object (instance of EventObject).
"""
try:
self._queue.PushItem(event_object)
except ValueError as exception:
logging.error(
u'Unable to produce a serialized event object, with error:{}'.format(
exception))
def ProduceEventObjects(self, event_objects):
"""Produces event objects onto the queue.
Args:
event_objects: a generator of event objects (instances of EventObject).
"""
for event_object in event_objects:
self.ProduceEventObject(event_object)
class AnalysisPluginProducer(EventObjectQueueProducer):
"""Producer for Event Objects sent to analysis plugins."""
def __init__(self, queue_object):
super(AnalysisPluginProducer, self).__init__(queue_object)
class ItemQueueConsumer(QueueConsumer):
"""Class that implements the item queue consumer.
The consumer subscribes to updates on the queue.
"""
@abc.abstractmethod
def _ConsumeItem(self, item):
"""Consumes an item callback for ConsumeItems."""
def ConsumeItems(self):
"""Consumes the items that are pushed on the queue."""
while True:
try:
item = self._queue.PopItem()
except errors.QueueEmpty:
break
if isinstance(item, QueueEndOfInput):
# Push the item back onto the queue to make sure all
# queue consumers are stopped.
self._queue.PushItem(item)
break
self._ConsumeItem(item)
class PathSpecQueueConsumer(QueueConsumer):
"""Class that implements the path specification queue consumer.
The consumer subscribes to updates on the queue.
"""
@abc.abstractmethod
def _ConsumePathSpec(self, path_spec):
"""Consumes a path specification callback for ConsumePathSpecs."""
def ConsumePathSpecs(self):
"""Consumes the path specifications that are pushed on the queue.
Raises:
RuntimeError: when there is an unsupported object type on the queue.
"""
while True:
try:
item = self._queue.PopItem()
except errors.QueueEmpty:
break
if isinstance(item, QueueEndOfInput):
# Push the item back onto the queue to make sure all
# queue consumers are stopped.
self._queue.PushItem(item)
break
if not isinstance(item, dfvfs_path_spec.PathSpec):
raise RuntimeError(u'Unsupported item type on queue.')
self._ConsumePathSpec(item)
class PathSpecQueueProducer(QueueProducer):
"""Class that implements the path specification queue producer.
The producer generates updates on the queue.
"""
def ProducePathSpec(self, path_spec):
"""Produces a path specification onto the queue.
Args:
path_spec: the path specification object (instance of dfvfs.PathSpec).
"""
self._queue.PushItem(path_spec)
|
|
# Copyright 2014 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import tarfile
import time
import zipfile
import urllib
from subprocess import call
import zopkio.constants as constants
from zopkio.deployer import Deployer, Process
from zopkio.remote_host_helper import better_exec_command, DeploymentError, get_sftp_client, get_ssh_client,\
open_remote_file, log_output, exec_with_env
import zopkio.runtime as runtime
logger = logging.getLogger(__name__)
class SSHDeployer(Deployer):
"""
A simple deployer that copies an executable to the remote host and runs it
"""
def __init__(self, service_name, configs=None):
"""
Creates a new SSHDeployer, typical use case is to create one deployer per service. The functions of the
deployer are driven by configs. The configs can be set in the constructor as defaults and subsequently overridden
during each invocation. The following configs are currently supported
additional_directories: used during uninstall to remove additional directories see directories_to_clean
args: used during start to give args to the start command
delay: used during start or stop to add a delay before returning in order to allow the service time to start up
directories_to_clean: used during uninstall to removed additional directories
env: used during install/start/stop/get_pid to run custom commands with the specified environment
executable: the executable that defines this service
extract: used during install to now if the executable should be extracted
hostname: used during each function to specify the host to execute it on,
should be passed per call rather than set by default
install_path: the path to install the executable
no_copy: used during install to skip the installation step if the executable has already been copied
pid_command: used during get_pid if this is specified than the command will be used to determine the pid of the executable
use this or pid_file or pid_keyword
pid_file: used during get_pid if this is specified than the file will be read to determine the pid of the executable
use this or pid_command or pid_keyword
pid_keyword: used during get_pid if this is specified than the keyword will be used with pgrep to determine the pid of the executable
use this or pid_command or pid_file
post_install_cmds: used during install to run custom commands prior to running start
start_command: used during start, the command to start the service
stop_command: used during stop, the command to stop the service
sync: used during start, whether the start command is synchronous or not (Default not)
terminate_only: used during stop to terminate the process rather than using the stop command
:param service_name: an arbitrary name that can be used to describe the executable
:param configs: default configurations for the other methods
:return:
"""
logging.getLogger("paramiko").setLevel(logging.ERROR)
self.service_name = service_name
self.default_configs = {} if configs is None else configs
Deployer.__init__(self)
def install(self, unique_id, configs=None):
"""
Copies the executable to the remote machine under install path. Inspects the configs for the possible keys
'hostname': the host to install on
'install_path': the location on the remote host
'executable': the executable to copy
'no_copy': if this config is passed in and true then this method will not copy the executable assuming that it is
already installed
'post_install_cmds': an optional list of commands that should be executed on the remote machine after the
executable has been installed. If no_copy is set to true, then the post install commands will not be run.
If the unique_id is already installed on a different host, this will perform the cleanup action first.
If either 'install_path' or 'executable' are provided the new value will become the default.
:param unique_id:
:param configs:
:return:
"""
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
hostname = None
is_tarfile = False
is_zipfile = False
if unique_id in self.processes:
process = self.processes[unique_id]
prev_hostname = process.hostname
if 'hostname' in configs:
if prev_hostname is not configs['hostname']:
self.uninstall(unique_id, configs)
hostname = configs['hostname']
else:
self.uninstall(unique_id, configs)
hostname = prev_hostname
elif 'hostname' in configs:
hostname = configs['hostname']
else:
# we have not installed this unique_id before and no hostname is provided in the configs so raise an error
logger.error("hostname was not provided for unique_id: " + unique_id)
raise DeploymentError("hostname was not provided for unique_id: " + unique_id)
env = configs.get("env", {})
install_path = configs.get('install_path') or self.default_configs.get('install_path')
pid_file = configs.get('pid_file') or self.default_configs.get('pid_file')
if install_path is None:
logger.error("install_path was not provided for unique_id: " + unique_id)
raise DeploymentError("install_path was not provided for unique_id: " + unique_id)
if not configs.get('no_copy', False):
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
log_output(better_exec_command(ssh, "mkdir -p {0}".format(install_path),
"Failed to create path {0}".format(install_path)))
log_output(better_exec_command(ssh, "chmod 755 {0}".format(install_path),
"Failed to make path {0} writeable".format(install_path)))
executable = configs.get('executable') or self.default_configs.get('executable')
if executable is None:
logger.error("executable was not provided for unique_id: " + unique_id)
raise DeploymentError("executable was not provided for unique_id: " + unique_id)
#if the executable is in remote location copy to local machine
copy_from_remote_location = False;
if (":" in executable):
copy_from_remote_location = True
if ("http" not in executable):
remote_location_server = executable.split(":")[0]
remote_file_path = executable.split(":")[1]
remote_file_name = os.path.basename(remote_file_path)
local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name)
if not os.path.exists(local_temp_file_name):
with get_sftp_client(remote_location_server,username=runtime.get_username(), password=runtime.get_password()) as ftp:
try:
ftp.get(remote_file_path, local_temp_file_name)
executable = local_temp_file_name
except:
raise DeploymentError("Unable to load file from remote server " + executable)
#use urllib for http copy
else:
remote_file_name = executable.split("/")[-1]
local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name)
if not os.path.exists(local_temp_file_name):
try:
urllib.urlretrieve (executable, local_temp_file_name)
except:
raise DeploymentError("Unable to load file from remote server " + executable)
executable = local_temp_file_name
try:
exec_name = os.path.basename(executable)
install_location = os.path.join(install_path, exec_name)
with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp:
ftp.put(executable, install_location)
except:
raise DeploymentError("Unable to copy executable to install_location:" + install_location)
finally:
#Track if its a tarfile or zipfile before deleting it in case the copy to remote location fails
is_tarfile = tarfile.is_tarfile(executable)
is_zipfile = zipfile.is_zipfile(executable)
if (copy_from_remote_location and not configs.get('cache',False)):
os.remove(executable)
# only supports tar and zip (because those modules are provided by Python's standard library)
if configs.get('extract', False) or self.default_configs.get('extract', False):
if is_tarfile:
log_output(better_exec_command(ssh, "tar -xf {0} -C {1}".format(install_location, install_path),
"Failed to extract tarfile {0}".format(exec_name)))
elif is_zipfile:
log_output(better_exec_command(ssh, "unzip -o {0} -d {1}".format(install_location, install_path),
"Failed to extract zipfile {0}".format(exec_name)))
else:
logger.error(executable + " is not a supported filetype for extracting")
raise DeploymentError(executable + " is not a supported filetype for extracting")
post_install_cmds = configs.get('post_install_cmds', False) or self.default_configs.get('post_install_cmds', [])
for cmd in post_install_cmds:
relative_cmd = "cd {0}; {1}".format(install_path, cmd)
log_output(exec_with_env(ssh, relative_cmd,
msg="Failed to execute post install command: {0}".format(relative_cmd), env=env))
self.processes[unique_id] = Process(unique_id, self.service_name, hostname, install_path)
self.processes[unique_id].pid_file = pid_file
def start(self, unique_id, configs=None):
"""
Start the service. If `unique_id` has already been installed the deployer will start the service on that host.
Otherwise this will call install with the configs. Within the context of this function, only four configs are
considered
'start_command': the command to run (if provided will replace the default)
'args': a list of args that can be passed to the command
'sync': if the command is synchronous or asynchronous defaults to asynchronous
'delay': a delay in seconds that might be needed regardless of whether the command returns before the service can
be started
:param unique_id:
:param configs:
:return: if the command is executed synchronously return the underlying paramiko channel which can be used to get the stdout
otherwise return the triple stdin, stdout, stderr
"""
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
logger.debug("starting " + unique_id)
# do not start if already started
if self.get_pid(unique_id, configs) is not constants.PROCESS_NOT_RUNNING_PID:
return None
if unique_id not in self.processes:
self.install(unique_id, configs)
hostname = self.processes[unique_id].hostname
install_path = self.processes[unique_id].install_path
# order of precedence for start_command and args from highest to lowest:
# 1. configs
# 2. from Process
# 3. from Deployer
start_command = configs.get('start_command') or self.processes[unique_id].start_command or self.default_configs.get('start_command')
pid_file = configs.get('pid_file') or self.default_configs.get('pid_file')
if start_command is None:
logger.error("start_command was not provided for unique_id: " + unique_id)
raise DeploymentError("start_command was not provided for unique_id: " + unique_id)
args = configs.get('args') or self.processes[unique_id].args or self.default_configs.get('args')
if args is not None:
full_start_command = "{0} {1}".format(start_command, ' '.join(args))
else:
full_start_command = start_command
command = "cd {0}; {1}".format(install_path, full_start_command)
env = configs.get("env", {})
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
exec_with_env(ssh, command, msg="Failed to start", env=env, sync=configs.get('sync', False))
self.processes[unique_id].start_command = start_command
self.processes[unique_id].args = args
# For cases where user pases it with start command
if self.processes[unique_id].pid_file is None:
self.processes[unique_id].pid_file = pid_file
if 'delay' in configs:
time.sleep(configs['delay'])
def stop(self, unique_id, configs=None):
"""Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception
There are two configs that will be considered:
'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the
behavior if stop_command is None and not overridden)
'stop_command': overrides the default stop_command
:param unique_id:
:param configs:
:return:
"""
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
logger.debug("stopping " + unique_id)
if unique_id in self.processes:
hostname = self.processes[unique_id].hostname
else:
logger.error("Can't stop {0}: process not known".format(unique_id))
raise DeploymentError("Can't stop {0}: process not known".format(unique_id))
if configs.get('terminate_only', False):
self.terminate(unique_id, configs)
else:
stop_command = configs.get('stop_command') or self.default_configs.get('stop_command')
env = configs.get("env", {})
if stop_command is not None:
install_path = self.processes[unique_id].install_path
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command),
msg="Failed to stop {0}".format(unique_id), env=env))
else:
self.terminate(unique_id, configs)
if 'delay' in configs:
time.sleep(configs['delay'])
def uninstall(self, unique_id, configs=None):
"""uninstall the service. If the deployer has not started a service with
`unique_id` this will raise a DeploymentError. This considers one config:
'additional_directories': a list of directories to remove in addition to those provided in the constructor plus
the install path. This will update the directories to remove but does not override it
:param unique_id:
:param configs:
:return:
"""
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
if unique_id in self.processes:
hostname = self.processes[unique_id].hostname
else:
logger.error("Can't uninstall {0}: process not known".format(unique_id))
raise DeploymentError("Can't uninstall {0}: process not known".format(unique_id))
install_path = self.processes[unique_id].install_path
directories_to_remove = self.default_configs.get('directories_to_clean', [])
directories_to_remove.extend(configs.get('additional_directories', []))
if install_path not in directories_to_remove:
directories_to_remove.append(install_path)
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
for directory_to_remove in directories_to_remove:
log_output(better_exec_command(ssh, "rm -rf {0}".format(directory_to_remove),
"Failed to remove {0}".format(directory_to_remove)))
def get_pid(self, unique_id, configs=None):
"""Gets the pid of the process with `unique_id`. If the deployer does not know of a process
with `unique_id` then it should return a value of constants.PROCESS_NOT_RUNNING_PID
"""
RECV_BLOCK_SIZE = 16
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
if unique_id in self.processes:
hostname = self.processes[unique_id].hostname
else:
return constants.PROCESS_NOT_RUNNING_PID
if self.processes[unique_id].start_command is None:
return constants.PROCESS_NOT_RUNNING_PID
if self.processes[unique_id].pid_file is not None:
with open_remote_file(hostname, self.processes[unique_id].pid_file,
username=runtime.get_username(), password=runtime.get_password()) as pid_file:
full_output = pid_file.read()
elif 'pid_file' in configs.keys():
with open_remote_file(hostname, configs['pid_file'],
username=runtime.get_username(), password=runtime.get_password()) as pid_file:
full_output = pid_file.read()
else:
pid_keyword = self.processes[unique_id].start_command
if self.processes[unique_id].args is not None:
pid_keyword = "{0} {1}".format(pid_keyword, ' '.join(self.processes[unique_id].args))
pid_keyword = configs.get('pid_keyword', pid_keyword)
# TODO(jehrlich): come up with a simpler approach to this
pid_command = "ps aux | grep '{0}' | grep -v grep | tr -s ' ' | cut -d ' ' -f 2 | grep -Eo '[0-9]+'".format(pid_keyword)
pid_command = configs.get('pid_command', pid_command)
non_failing_command = "{0}; if [ $? -le 1 ]; then true; else false; fi;".format(pid_command)
env = configs.get("env", {})
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
chan = exec_with_env(ssh, non_failing_command, msg="Failed to get PID", env=env)
output = chan.recv(RECV_BLOCK_SIZE)
full_output = output
while len(output) > 0:
output = chan.recv(RECV_BLOCK_SIZE)
full_output += output
if len(full_output) > 0:
pids = [int(pid_str) for pid_str in full_output.split('\n') if pid_str.isdigit()]
if len(pids) > 0:
return pids
return constants.PROCESS_NOT_RUNNING_PID
def get_host(self, unique_id):
"""Gets the host of the process with `unique_id`. If the deployer does not know of a process
with `unique_id` then it should return a value of SOME_SENTINAL_VALUE
:Parameter unique_id: the name of the process
:raises NameError if the name is not valid process
"""
if unique_id in self.processes:
return self.processes[unique_id].hostname
logger.error("{0} not a known process".format(unique_id))
raise NameError("{0} not a known process".format(unique_id))
def get_processes(self):
""" Gets all processes that have been started by this deployer
:Returns: A list of Processes
"""
return self.processes.values()
def kill_all_process(self):
""" Terminates all the running processes. By default it is set to false.
Users can set to true in config once the method to get_pid is done deterministically
either using pid_file or an accurate keyword
"""
if (runtime.get_active_config("cleanup_pending_process",False)):
for process in self.get_processes():
self.terminate(process.unique_id)
|
|
# coding=utf-8
# Copyright 2022 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian process layers."""
from edward2.tensorflow import constraints
from edward2.tensorflow import generated_random_variables
from edward2.tensorflow import initializers
from edward2.tensorflow import regularizers
from edward2.tensorflow.layers import utils
import tensorflow as tf
import tensorflow_probability as tfp
class Zeros(object):
"""Function returning zeros tensor of same shape excluding the last dim."""
def __call__(self, inputs):
return tf.zeros(tf.shape(inputs)[:-1], inputs.dtype)
def get_config(self):
return {}
class ExponentiatedQuadratic(object):
"""Exponentiated quadratic kernel."""
def __init__(self, variance, lengthscale):
self.variance = variance
self.lengthscale = lengthscale
def __call__(self, x1, x2):
"""Computes exponentiated quadratic over all pairs of inputs.
Args:
x1: Tensor of shape [batch_x1, ...]. Slices along the batch axis denote an
individual input to be passed to the kernel. It is computed pairwise
with each input sliced from x2.
x2: Tensor of shape [batch_x2, ...]. Slices along the batch axis denote an
individual input passed to the kernel function. It is computed pairwise
with each input sliced from x1.
Returns:
Tensor of shape [batch_x1, batch_x2].
"""
size = tf.convert_to_tensor(x1).shape.ndims
if size > 2:
raise NotImplementedError('Multiple feature dimensions is not yet '
'supported.')
x1 = x1 / self.lengthscale
x2 = x2 / self.lengthscale
x1_squared = tf.reduce_sum(tf.square(x1), list(range(1, len(x1.shape))))
x2_squared = tf.reduce_sum(tf.square(x2), list(range(1, len(x2.shape))))
square = (x1_squared[:, tf.newaxis] +
x2_squared[tf.newaxis, :] -
2 * tf.matmul(x1, x2, transpose_b=True))
return self.variance * tf.exp(-square / 2)
def get_config(self):
return {'variance': self.variance, 'lengthscale': self.lengthscale}
class LinearKernel(object):
"""Linear kernel, optionally on top of a feature extractor (e.g., encoder)."""
def __init__(self, variance, bias, encoder=tf.identity):
self.variance = variance
self.bias = bias
self.encoder = encoder
def __call__(self, x1, x2):
"""Computes scaled dot product of over all pairs of encoded inputs.
Args:
x1: Tensor of shape [batch_x1] + encoder domain. Slices along the batch
axis denote an individual input to be passed to the kernel. It is
computed pairwise with each input sliced from x2.
x2: Tensor of shape [batch_x2] + encoder domain. Slices along the batch
axis denote an individual input to be passed to the kernel. It is
computed pairwise with each input sliced from x1.
Returns:
Tensor of shape [batch_x1, batch_x2].
"""
encoded_x1 = self.encoder(x1)
encoded_x2 = self.encoder(x2)
dot_product = tf.matmul(encoded_x1, encoded_x2, transpose_b=True)
return self.variance * dot_product + self.bias
def get_config(self):
return {
'variance': self.variance,
'bias': self.bias,
'encoder': tf.keras.utils.serialize_keras_object(self.encoder),
}
class GaussianProcess(tf.keras.layers.Layer):
r"""Gaussian process layer.
The layer represents a distribution over functions, where a
stochastic forward pass appears as
```none
f ~ GP(f | conditional_inputs, conditional_outputs; mean_fn, covariance_fn)
outputs = f(inputs)
```
The optional arguments `conditional_inputs` and `conditional_outputs`
capture data that the GP "memorizes", i.e., it forms a posterior predictive
distribution. If left unspecified, the GP posits a prior predictive.
Given a call to `inputs`, an equivalent formulation in terms of function
outputs is
```none
outputs ~ \prod_{unit=1}^{units} MultivariateNormal(output[:, unit] |
mean = mean_fn(inputs) + Knm Kmm^{-1} (conditional_outputs[:, unit]-mean),
covariance = Knn - Knm Kmm^{-1} Kmn)
```
where Knm is the covariance function evaluated between all `inputs` and
`conditional_inputs`; Knn is between all `inputs`; Kmm is between all
`conditional_inputs`; and mean is the mean function evaluated on
`conditional_inputs`. The multivariate normal is correlated across input
dimensions and is independent across output dimensions.
"""
def __init__(
self,
units,
mean_fn=Zeros(),
covariance_fn=ExponentiatedQuadratic(variance=1., lengthscale=1.),
conditional_inputs=None,
conditional_outputs=None,
**kwargs):
"""Constructs layer.
Args:
units: integer, dimensionality of layer.
mean_fn: Mean function, a callable taking an inputs Tensor of shape
[batch, ...] and returning a Tensor of shape [batch].
covariance_fn: Covariance function, a callable taking two input Tensors
of shape [batch_x1, ...] and [batch_x2, ...] respectively, and returning
a positive semi-definite matrix of shape [batch_x1, batch_x2].
conditional_inputs: Tensor of shape [batch, ...], where batch must be the
same as conditional_outputs', and ellipses must match layer inputs.
conditional_outputs: Tensor of shape [batch, units], where batch must be
the same as conditional_inputs' and units is the layer's units size.
**kwargs: kwargs passed to parent class.
"""
super(GaussianProcess, self).__init__(**kwargs)
self.units = int(units)
self.mean_fn = mean_fn
self.covariance_fn = covariance_fn
self.conditional_inputs = conditional_inputs
self.conditional_outputs = conditional_outputs
self.supports_masking = True
self.input_spec = tf.keras.layers.InputSpec(min_ndim=2)
def build(self, input_shape=None):
# Don't track trainable variables such as in the kernel. The user should
# refer to any via, e.g., self.covariance_fn or the user environment.
self.built = True
def call(self, inputs):
if self.conditional_inputs is None and self.conditional_outputs is None:
covariance_matrix = self.covariance_fn(inputs, inputs)
# Tile locations so output has shape [units, batch_size]. Covariance will
# broadcast to [units, batch_size, batch_size], and we perform
# shape manipulations to get a random variable over [batch_size, units].
loc = self.mean_fn(inputs)
loc = tf.tile(loc[tf.newaxis], [self.units] + [1] * len(loc.shape))
else:
knn = self.covariance_fn(inputs, inputs)
knm = self.covariance_fn(inputs, self.conditional_inputs)
kmm = self.covariance_fn(self.conditional_inputs, self.conditional_inputs)
kmm = tf.linalg.set_diag(
kmm, tf.linalg.diag_part(kmm) + tf.keras.backend.epsilon())
kmm_tril = tf.linalg.cholesky(kmm)
kmm_tril_operator = tf.linalg.LinearOperatorLowerTriangular(kmm_tril)
knm_operator = tf.linalg.LinearOperatorFullMatrix(knm)
# TODO(trandustin): Vectorize linear algebra for multiple outputs. For
# now, we do each separately and stack to obtain a locations Tensor of
# shape [units, batch_size].
loc = []
for conditional_outputs_unit in tf.unstack(self.conditional_outputs,
axis=-1):
center = conditional_outputs_unit - self.mean_fn(
self.conditional_inputs)
loc_unit = knm_operator.matvec(
kmm_tril_operator.solvevec(kmm_tril_operator.solvevec(center),
adjoint=True))
loc.append(loc_unit)
loc = tf.stack(loc) + self.mean_fn(inputs)[tf.newaxis]
covariance_matrix = knn
covariance_matrix -= knm_operator.matmul(
kmm_tril_operator.solve(
kmm_tril_operator.solve(knm, adjoint_arg=True), adjoint=True))
covariance_matrix = tf.linalg.set_diag(
covariance_matrix,
tf.linalg.diag_part(covariance_matrix) + tf.keras.backend.epsilon())
# Form a multivariate normal random variable with batch_shape units and
# event_shape batch_size. Then make it be independent across the units
# dimension. Then transpose its dimensions so it is [batch_size, units].
random_variable = (
generated_random_variables.MultivariateNormalFullCovariance(
loc=loc, covariance_matrix=covariance_matrix))
random_variable = generated_random_variables.Independent(
random_variable.distribution, reinterpreted_batch_ndims=1)
bijector = tfp.bijectors.Inline(
forward_fn=lambda x: tf.transpose(x, perm=[1, 0]),
inverse_fn=lambda y: tf.transpose(y, perm=[1, 0]),
forward_event_shape_fn=lambda input_shape: input_shape[::-1],
forward_event_shape_tensor_fn=lambda input_shape: input_shape[::-1],
inverse_log_det_jacobian_fn=lambda y: tf.cast(0, y.dtype),
forward_min_event_ndims=2)
random_variable = generated_random_variables.TransformedDistribution(
random_variable.distribution, bijector=bijector)
return random_variable
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
input_dim = input_shape[-1]
if input_dim is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def get_config(self):
config = {
'units': self.units,
'mean_fn': tf.keras.utils.serialize_keras_object(self.mean_fn),
'covariance_fn': tf.keras.utils.serialize_keras_object(
self.covariance_fn),
'conditional_inputs': None, # don't serialize as it can be large
'conditional_outputs': None, # don't serialize as it can be large
}
base_config = super(GaussianProcess, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@utils.add_weight
class SparseGaussianProcess(GaussianProcess):
r"""Gaussian process layer with inducing input and output variables.
The layer represents a distribution over functions, where a
stochastic forward pass appears as
```none
f ~ GP(f | inducing_inputs, inducing_outputs; mean_fn, covariance_fn)
outputs = f(inputs)
```
The arguments `inducing_inputs` and `inducing_outputs`
capture data that the GP "memorizes", i.e., it forms a posterior predictive
distribution. Typically in a variational inference scheme (and by default),
the inducing outputs are normally distributed with learnable location and
scale parameters, and the inducing inputs are learnable parameters.
Given a call to `inputs` with these defaults, an equivalent formulation in
terms of function outputs is
```none
inducing_outputs ~ Normal(inducing_outputs | mean, stddev)
outputs ~ \prod_{unit=1}^{units} MultivariateNormal(output[:, unit] |
mean = mean_fn(inputs) + Knm Kmm^{-1} (inducing_outputs[:, unit]-mean),
covariance = Knn - Knm Kmm^{-1} Kmn)
```
where Knm is the covariance function evaluated between all `inputs` and
`inducing_inputs`; Knn is between all `inputs`; Kmm is between all
`inducing_inputs`; and mean is the mean function evaluated on
`inducing_inputs`. The multivariate normal is correlated across input
dimensions and is independent across output dimensions.
#### Examples
We demonstrate a three-layer deep GP with variational inference (Salimbeni and
Deisenroth, 2017; Damianou and Lawrence, 2013). The code snippet mirrors
Figure 5 of Bayesian Layers. We apply it for regression given batches of
spatial inputs and vector-valued outputs. We flatten inputs to use the
default squared exponential kernel; this naturally extends to pass in a
more sophisticated kernel function.
```python
from tensor2tensor.layers import bayes
batch_size = 256
dataset_size = 10000
features, labels = load_spatial_data(batch_size)
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
layers.SparseGaussianProcess(256, num_inducing=512),
layers.SparseGaussianProcess(256, num_inducing=512),
layers.SparseGaussianProcess(10, num_inducing=512),
])
# Run training loop.
num_steps = 1000
for _ in range(num_steps):
with tf.GradientTape() as tape:
predictions = model(features)
nll = tf.losses.mean_squared_error(labels=labels, predictions=predictions)
kl = sum(model.losses) / dataset_size
loss = nll + kl
gradients = tape.gradient(loss, model.variables) # use any optimizer here
```
"""
def __init__(
self,
units,
num_inducing,
mean_fn=Zeros(),
covariance_fn=ExponentiatedQuadratic(variance=1., lengthscale=1.),
inducing_inputs_initializer='random_normal',
inducing_outputs_initializer='trainable_normal',
inducing_inputs_regularizer=None,
inducing_outputs_regularizer='normal_kl_divergence',
inducing_inputs_constraint=None,
inducing_outputs_constraint=None,
**kwargs):
"""Constructs layer.
Args:
units: integer, dimensionality of layer.
num_inducing: integer, number of inducing points for the approximation.
mean_fn: Mean function, a callable taking an inputs Tensor of shape
[batch, ...] and returning a Tensor of shape [batch].
covariance_fn: Covariance function, a callable taking two input Tensors
of shape [batch_x1, ...] and [batch_x2, ...] respectively, and returning
a positive semi-definite matrix of shape [batch_x1, batch_x2].
inducing_inputs_initializer: Initializer for the inducing inputs.
inducing_outputs_initializer: Initializer for the inducing outputs.
inducing_inputs_regularizer: Regularizer function applied to the inducing
inputs.
inducing_outputs_regularizer: Regularizer function applied to the inducing
outputs.
inducing_inputs_constraint: Constraint function applied to the inducing
inputs.
inducing_outputs_constraint: Constraint function applied to the inducing
outputs.
**kwargs: kwargs passed to parent class.
"""
super(SparseGaussianProcess, self).__init__(
units=units,
mean_fn=mean_fn,
covariance_fn=covariance_fn,
conditional_inputs=None,
conditional_outputs=None,
**kwargs)
self.num_inducing = num_inducing
self.inducing_inputs_initializer = initializers.get(
inducing_inputs_initializer)
self.inducing_outputs_initializer = initializers.get(
inducing_outputs_initializer)
self.inducing_inputs_regularizer = regularizers.get(
inducing_inputs_regularizer)
self.inducing_outputs_regularizer = regularizers.get(
inducing_outputs_regularizer)
self.inducing_inputs_constraint = constraints.get(
inducing_inputs_constraint)
self.inducing_outputs_constraint = constraints.get(
inducing_outputs_constraint)
def build(self, input_shape=None):
input_shape = tf.TensorShape(input_shape)
input_dim = input_shape[-1]
self.conditional_inputs = self.add_weight(
shape=(self.num_inducing, input_dim),
name='inducing_inputs',
initializer=self.inducing_inputs_initializer,
regularizer=self.inducing_inputs_regularizer,
constraint=self.inducing_inputs_constraint)
self.conditional_outputs = self.add_weight(
shape=(self.num_inducing, self.units),
name='inducing_outputs',
initializer=self.inducing_outputs_initializer,
regularizer=self.inducing_outputs_regularizer,
constraint=self.inducing_outputs_constraint)
super(SparseGaussianProcess, self).build(input_shape)
def call_weights(self):
"""Calls any weights if the initializer is itself a layer."""
if isinstance(self.inducing_inputs_initializer, tf.keras.layers.Layer):
self.conditional_inputs = self.inducing_inputs_initializer(
self.conditional_inputs.shape, self.dtype)
if isinstance(self.inducing_outputs_initializer, tf.keras.layers.Layer):
self.conditional_outputs = self.inducing_outputs_initializer(
self.conditional_outputs.shape, self.dtype)
def call(self, inputs):
self.call_weights()
return super(SparseGaussianProcess, self).call(inputs)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
import tempfile
import textwrap
import unittest
from collections import namedtuple
from contextlib import contextmanager
from twitter.common.collections import maybe_list
from pants.base.revision import Revision
from pants.java.distribution.distribution import Distribution
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import chmod_plus_x, safe_open, safe_rmtree, touch
EXE = namedtuple('Exe', ['relpath', 'contents'])
def exe(relpath, version=None):
contents = textwrap.dedent("""
#!/bin/sh
if [ $# -ne 3 ]; then
# Sanity check a classpath switch with a value plus the classname for main
echo "Expected 3 arguments, got $#: $@" >&2
exit 1
fi
echo "java.home=${{DIST_ROOT}}"
{}
""".format('echo "java.version={}"'.format(version) if version else '')).strip()
return EXE(relpath, contents=contents)
@contextmanager
def distribution(files=None, executables=None, java_home=None):
with temporary_dir() as dist_root:
with environment_as(DIST_ROOT=os.path.join(dist_root, java_home) if java_home else dist_root):
for f in maybe_list(files or ()):
touch(os.path.join(dist_root, f))
for executable in maybe_list(executables or (), expected_type=EXE):
path = os.path.join(dist_root, executable.relpath)
with safe_open(path, 'w') as fp:
fp.write(executable.contents or '')
chmod_plus_x(path)
yield dist_root
@contextmanager
def env(**kwargs):
environment = dict(JDK_HOME=None, JAVA_HOME=None, PATH=None)
environment.update(**kwargs)
with environment_as(**environment):
yield
class DistributionValidationTest(unittest.TestCase):
def test_validate_basic(self):
with distribution() as dist_root:
with self.assertRaises(ValueError):
Distribution(bin_path=os.path.join(dist_root, 'bin')).validate()
with distribution(files='bin/java') as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin')).validate()
with distribution(executables=exe('bin/java')) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin')).validate()
def test_validate_jre(self):
with distribution(executables=exe('bin/java')) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin'), jdk=False).validate()
def test_validate_jdk(self):
with distribution(executables=exe('bin/java')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin'), jdk=True).validate()
with distribution(executables=[exe('bin/java'), exe('bin/javac')]) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin'), jdk=True).validate()
with distribution(executables=[exe('jre/bin/java'), exe('bin/javac')],
java_home='jre') as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'jre/bin'), jdk=True).validate()
def test_validate_version(self):
with distribution(executables=exe('bin/java', '1.7.0_25')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin'), minimum_version='1.7.0_45').validate()
with distribution(executables=exe('bin/java', '1.8.0_1')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin'), maximum_version='1.8').validate()
with distribution(executables=exe('bin/java', '1.7.0_25')) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin'), minimum_version='1.7.0_25').validate()
Distribution(bin_path=os.path.join(dist_root, 'bin'),
minimum_version=Revision.lenient('1.6')).validate()
Distribution(bin_path=os.path.join(dist_root, 'bin'),
minimum_version='1.7.0_25',
maximum_version='1.7.999').validate()
def test_validated_binary(self):
with distribution(files='bin/jar', executables=exe('bin/java')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin')).binary('jar')
with distribution(executables=[exe('bin/java'), exe('bin/jar')]) as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'bin')).binary('jar')
with distribution(executables=[exe('jre/bin/java'), exe('bin/jar')],
java_home='jre') as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'jre', 'bin')).binary('jar')
with distribution(executables=[exe('jre/bin/java'), exe('bin/jar'), exe('bin/javac')],
java_home='jre') as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'jre', 'bin')).binary('jar')
with distribution(executables=[exe('jre/bin/java'), exe('jre/bin/java_vm'), exe('bin/javac')],
java_home='jre') as dist_root:
Distribution(bin_path=os.path.join(dist_root, 'jre', 'bin')).binary('java_vm')
def test_validated_library(self):
with distribution(executables=exe('bin/java')) as dist_root:
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.join(dist_root, 'bin')).find_libs(['tools.jar'])
with distribution(executables=exe('bin/java'), files='lib/tools.jar') as dist_root:
dist = Distribution(bin_path=os.path.join(dist_root, 'bin'))
self.assertEqual([os.path.join(dist_root, 'lib', 'tools.jar')],
dist.find_libs(['tools.jar']))
with distribution(executables=[exe('jre/bin/java'), exe('bin/javac')],
files=['lib/tools.jar', 'jre/lib/rt.jar'],
java_home='jre') as dist_root:
dist = Distribution(bin_path=os.path.join(dist_root, 'jre/bin'))
self.assertEqual([os.path.join(dist_root, 'lib', 'tools.jar'),
os.path.join(dist_root, 'jre', 'lib', 'rt.jar')],
dist.find_libs(['tools.jar', 'rt.jar']))
class BaseDistributionLocationTest(unittest.TestCase):
def make_tmp_dir(self):
tmpdir = tempfile.mkdtemp()
self.addCleanup(safe_rmtree, tmpdir)
return tmpdir
def set_up_no_linux_discovery(self):
orig_java_dist_dir = Distribution._JAVA_DIST_DIR
def restore_java_dist_dir():
Distribution._JAVA_DIST_DIR = orig_java_dist_dir
Distribution._JAVA_DIST_DIR = self.make_tmp_dir()
self.addCleanup(restore_java_dist_dir)
def set_up_no_osx_discovery(self):
osx_java_home_exe = Distribution._OSX_JAVA_HOME_EXE
def restore_osx_java_home_exe():
Distribution._OSX_JAVA_HOME_EXE = osx_java_home_exe
Distribution._OSX_JAVA_HOME_EXE = os.path.join(self.make_tmp_dir(), 'java_home')
self.addCleanup(restore_osx_java_home_exe)
class BaseDistributionLocationEnvOnlyTest(BaseDistributionLocationTest):
def setUp(self):
self.set_up_no_linux_discovery()
self.set_up_no_osx_discovery()
class DistributionEnvLocationTest(BaseDistributionLocationEnvOnlyTest):
def test_locate_none(self):
with env():
with self.assertRaises(Distribution.Error):
Distribution.locate()
def test_locate_java_not_executable(self):
with distribution(files='bin/java') as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.locate()
def test_locate_jdk_is_jre(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.locate(jdk=True)
def test_locate_version_to_low(self):
with distribution(executables=exe('bin/java', '1.6.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.locate(minimum_version='1.7.0')
def test_locate_version_to_high(self):
with distribution(executables=exe('bin/java', '1.8.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.locate(maximum_version='1.7.999')
def test_locate_invalid_jdk_home(self):
with distribution(executables=exe('java')) as dist_root:
with env(JDK_HOME=dist_root):
with self.assertRaises(Distribution.Error):
Distribution.locate()
def test_locate_invalid_java_home(self):
with distribution(executables=exe('java')) as dist_root:
with env(JAVA_HOME=dist_root):
with self.assertRaises(Distribution.Error):
Distribution.locate()
def test_locate_jre_by_path(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.locate()
def test_locate_jdk_by_path(self):
with distribution(executables=[exe('bin/java'), exe('bin/javac')]) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.locate(jdk=True)
def test_locate_jdk_via_jre_path(self):
with distribution(executables=[exe('jre/bin/java'), exe('bin/javac')],
java_home='jre') as dist_root:
with env(PATH=os.path.join(dist_root, 'jre', 'bin')):
Distribution.locate(jdk=True)
def test_locate_version_greater_then_or_equal(self):
with distribution(executables=exe('bin/java', '1.7.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.locate(minimum_version='1.6.0')
def test_locate_version_less_then_or_equal(self):
with distribution(executables=exe('bin/java', '1.7.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.locate(maximum_version='1.7.999')
def test_locate_version_within_range(self):
with distribution(executables=exe('bin/java', '1.7.0')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.locate(minimum_version='1.6.0', maximum_version='1.7.999')
def test_locate_via_jdk_home(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(JDK_HOME=dist_root):
Distribution.locate()
def test_locate_via_java_home(self):
with distribution(executables=exe('bin/java')) as dist_root:
with env(JAVA_HOME=dist_root):
Distribution.locate()
class DistributionLinuxLocationTest(BaseDistributionLocationTest):
def setUp(self):
self.set_up_no_osx_discovery()
@contextmanager
def java_dist_dir(self):
with distribution(executables=exe('bin/java', version='1')) as jdk1_home:
with distribution(executables=exe('bin/java', version='2')) as jdk2_home:
with temporary_dir() as java_dist_dir:
jdk1_home_link = os.path.join(java_dist_dir, 'jdk1_home')
jdk2_home_link = os.path.join(java_dist_dir, 'jdk2_home')
os.symlink(jdk1_home, jdk1_home_link)
os.symlink(jdk2_home, jdk2_home_link)
original_java_dist_dir = Distribution._JAVA_DIST_DIR
Distribution._JAVA_DIST_DIR = java_dist_dir
try:
yield jdk1_home_link, jdk2_home_link
finally:
Distribution._JAVA_DIST_DIR = original_java_dist_dir
def test_locate_jdk1(self):
with env():
with self.java_dist_dir() as (jdk1_home, _):
dist = Distribution.locate(maximum_version='1')
self.assertEqual(jdk1_home, dist.home)
def test_locate_jdk2(self):
with env():
with self.java_dist_dir() as (_, jdk2_home):
dist = Distribution.locate(minimum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_trumps_path(self):
with self.java_dist_dir() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as path_jdk:
with env(PATH=os.path.join(path_jdk, 'bin')):
dist = Distribution.locate(minimum_version='2')
self.assertEqual(jdk2_home, dist.home)
dist = Distribution.locate(minimum_version='3')
self.assertEqual(path_jdk, dist.home)
def test_locate_jdk_home_trumps(self):
with self.java_dist_dir() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as jdk_home:
with env(JDK_HOME=jdk_home):
dist = Distribution.locate()
self.assertEqual(jdk_home, dist.home)
dist = Distribution.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = Distribution.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_java_home_trumps(self):
with self.java_dist_dir() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as java_home:
with env(JAVA_HOME=java_home):
dist = Distribution.locate()
self.assertEqual(java_home, dist.home)
dist = Distribution.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = Distribution.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
class DistributionOSXLocationTest(BaseDistributionLocationTest):
def setUp(self):
self.set_up_no_linux_discovery()
@contextmanager
def java_home_exe(self):
with distribution(executables=exe('bin/java', version='1')) as jdk1_home:
with distribution(executables=exe('bin/java', version='2')) as jdk2_home:
with temporary_dir() as tmpdir:
osx_java_home_exe = os.path.join(tmpdir, 'java_home')
with safe_open(osx_java_home_exe, 'w') as fp:
fp.write(textwrap.dedent("""
#!/bin/sh
echo '<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<array>
<dict>
<key>JVMHomePath</key>
<string>{jdk1_home}</string>
</dict>
<dict>
<key>JVMHomePath</key>
<string>{jdk2_home}</string>
</dict>
</array>
</plist>
'
""".format(jdk1_home=jdk1_home, jdk2_home=jdk2_home)).strip())
chmod_plus_x(osx_java_home_exe)
original_osx_java_home_exe = Distribution._OSX_JAVA_HOME_EXE
Distribution._OSX_JAVA_HOME_EXE = osx_java_home_exe
try:
yield jdk1_home, jdk2_home
finally:
Distribution._OSX_JAVA_HOME_EXE = original_osx_java_home_exe
def test_locate_jdk1(self):
with env():
with self.java_home_exe() as (jdk1_home, _):
dist = Distribution.locate()
self.assertEqual(jdk1_home, dist.home)
def test_locate_jdk2(self):
with env():
with self.java_home_exe() as (_, jdk2_home):
dist = Distribution.locate(minimum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_trumps_path(self):
with self.java_home_exe() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as path_jdk:
with env(PATH=os.path.join(path_jdk, 'bin')):
dist = Distribution.locate()
self.assertEqual(jdk1_home, dist.home)
dist = Distribution.locate(minimum_version='3')
self.assertEqual(path_jdk, dist.home)
def test_locate_jdk_home_trumps(self):
with self.java_home_exe() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as jdk_home:
with env(JDK_HOME=jdk_home):
dist = Distribution.locate()
self.assertEqual(jdk_home, dist.home)
dist = Distribution.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = Distribution.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
def test_locate_java_home_trumps(self):
with self.java_home_exe() as (jdk1_home, jdk2_home):
with distribution(executables=exe('bin/java', version='3')) as java_home:
with env(JAVA_HOME=java_home):
dist = Distribution.locate()
self.assertEqual(java_home, dist.home)
dist = Distribution.locate(maximum_version='1.1')
self.assertEqual(jdk1_home, dist.home)
dist = Distribution.locate(minimum_version='1.1', maximum_version='2')
self.assertEqual(jdk2_home, dist.home)
class DistributionCachedTest(BaseDistributionLocationEnvOnlyTest):
def setUp(self):
super(DistributionCachedTest, self).setUp()
# Save local cache and then flush so tests get a clean environment.
local_cache = Distribution._CACHE
def restore_cache():
Distribution._CACHE = local_cache
Distribution._CACHE = {}
self.addCleanup(restore_cache)
def test_cached_good_min(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.cached(minimum_version='1.7.0_25')
def test_cached_good_max(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.cached(maximum_version='1.7.0_50')
def test_cached_good_bounds(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
Distribution.cached(minimum_version='1.6.0_35', maximum_version='1.7.0_55')
def test_cached_too_low(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.7.0_40')
def test_cached_too_high(self):
with distribution(executables=exe('bin/java', '1.7.0_83')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.cached(maximum_version='1.7.0_55')
def test_cached_low_fault(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.7.0_35', maximum_version='1.7.0_55')
def test_cached_high_fault(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.6.0_00', maximum_version='1.6.0_50')
def test_cached_conflicting(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(Distribution.Error):
Distribution.cached(minimum_version='1.7.0_00', maximum_version='1.6.0_50')
def test_cached_bad_input(self):
with distribution(executables=exe('bin/java', '1.7.0_33')) as dist_root:
with env(PATH=os.path.join(dist_root, 'bin')):
with self.assertRaises(ValueError):
Distribution.cached(minimum_version=1.7, maximum_version=1.8)
def exe_path(name):
process = subprocess.Popen(['which', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = process.communicate()
if process.returncode != 0:
return None
path = stdout.strip()
return path if os.path.exists(path) and os.access(path, os.X_OK) else None
class LiveDistributionTest(unittest.TestCase):
JAVA = exe_path('java')
JAVAC = exe_path('javac')
@unittest.skipIf(not JAVA, reason='No java executable on the PATH.')
def test_validate_live(self):
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='999.9.9').validate()
with self.assertRaises(Distribution.Error):
Distribution(bin_path=os.path.dirname(self.JAVA), maximum_version='0.0.1').validate()
Distribution(bin_path=os.path.dirname(self.JAVA)).validate()
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='1.3.1').validate()
Distribution(bin_path=os.path.dirname(self.JAVA), maximum_version='999.999.999').validate()
Distribution(bin_path=os.path.dirname(self.JAVA), minimum_version='1.3.1',
maximum_version='999.999.999').validate()
Distribution.locate(jdk=False)
@unittest.skipIf(not JAVAC, reason='No javac executable on the PATH.')
def test_validate_live_jdk(self):
Distribution(bin_path=os.path.dirname(self.JAVAC), jdk=True).validate()
Distribution(bin_path=os.path.dirname(self.JAVAC), jdk=True).binary('javap')
Distribution.locate(jdk=True)
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""To shuffle records (stable)."""
import math
import os
import struct
from typing import Iterator
import uuid
import six
import tensorflow as tf
from tensorflow_datasets.core import hashing
from tensorflow_datasets.core.utils import type_utils
# Approximately how much data to store in memory before writing to disk.
# If the amount of data to shuffle is < MAX_MEM_BUFFER_SIZE, no intermediary
# data is written to disk.
MAX_MEM_BUFFER_SIZE = 1000 << 20 # 1GB
# If data to shuffle is too large for memory. Records are split among 1K
# buckets stored on disk, then each bucket is sorted in memory.
# For a dataset split of about 1TB, each bucket is going to
# be about 1GB. Larger datasets will likely be handled by Beam.
#
# Increasing the number of buckets would decrease the size of each bucket.
# Current implementation relies on having one open file per bucket.
# Windows has a limit of ~2K open files per process (Linux ~32K); so increasing
# the number of buckets might warrant some changes in implementation.
BUCKETS_NUMBER = 1000 # Number of buckets to pre-sort and hold generated data.
HKEY_SIZE = 128 # Hash of keys is 128 bits (md5).
HKEY_SIZE_BYTES = HKEY_SIZE // 8
class DuplicatedKeysError(Exception):
def __init__(self, item1, item2):
super(DuplicatedKeysError, self).__init__()
self.item1 = item1
self.item2 = item2
def _hkey_to_bytes(hkey):
"""Converts 128 bits integer hkey to binary representation."""
max_int64 = 0xFFFFFFFFFFFFFFFF
return struct.pack('=QQ', (hkey >> 64) & max_int64, hkey & max_int64)
def _read_hkey(buff):
"""Reads from fobj and returns hkey (128 bites integer)."""
a, b = struct.unpack('=QQ', buff)
return (a << 64) | b
def get_bucket_number(hkey, shards_number):
"""Returns bucket (shard) number (int) for given hashed key (int)."""
# We purposely do not use modulo (%) to keep global order across shards.
# floor(key * shards_number / HKEYS_NUMBER), with HKEYS_NUMBER = 2**HKEY_SIZE.
return math.trunc((hkey * shards_number) >> HKEY_SIZE)
class _Bucket(object):
"""Holds (key, binary value) tuples to disk, fast.
Bucket instances are designed to be used either:
1. Many buckets are written in parallel, then they are read one by one. When
reading, the data can be fully loaded in memory to be sorted.
This is how buckets are currently used in Shuffler.
2. Buckets are being written one at a time (or on different machines/jobs).
Before writing the data, it is sorted in memory. Many bucket are read in
parallel.
This is not currently used, but could be if we decide do parallelize the
writing of final sharded tfrecord files.
File format (assuming a key of 16 bytes):
key1 (16 bytes) | size1 (8 bytes) | data1 (size1 bytes) |
key2 (16 bytes) | size2 (8 bytes) | data2 (size2 bytes) |
...
"""
def __init__(self, path):
"""Initialize a _Bucket instance.
Args:
path (str): path to bucket file, where to write to or read from.
"""
self._path = path
self._fobj = None
self._length = 0
self._size = 0
@property
def size(self):
return self._size
def __len__(self):
return self._length
def add(self, key, data):
"""Adds (key, data) to bucket.
Args:
key (int): the key.
data (binary): the data.
"""
if not self._fobj:
tf.io.gfile.makedirs(os.path.dirname(self._path))
self._fobj = tf.io.gfile.GFile(self._path, mode='wb')
data_size = len(data)
self._fobj.write(_hkey_to_bytes(key))
# http://docs.python.org/3/library/struct.html#byte-order-size-and-alignment
# The equal sign ("=") is important here, has it guarantees the standard
# size (Q: 8 bytes) is used, as opposed to native size, which can differ
# from one platform to the other. This way we know exactly 8 bytes have been
# written, and we can read that same amount of bytes later.
# We do not specify endianess (platform dependent), but this is OK since the
# temporary files are going to be written and read by the same platform.
self._fobj.write(struct.pack('=Q', data_size))
self._fobj.write(data)
self._length += 1
self._size += data_size
def flush(self):
if self._fobj:
self._fobj.flush()
self._fobj.close()
def read_values(self):
"""Yields (hkey, data) tuples stored in bucket."""
self.flush()
path = self._path
if not tf.io.gfile.exists(path):
# In case bucket was created but nothing was ever added.
# This is likely to happen if the number of buckets is large compared to
# the number of generated examples.
return
with tf.io.gfile.GFile(path, 'rb') as fobj:
while True:
buff = fobj.read(HKEY_SIZE_BYTES)
if not buff:
break
hkey = _read_hkey(buff)
size_bytes = fobj.read(8)
size = struct.unpack('=Q', size_bytes)[0]
data = fobj.read(size)
yield hkey, data
def del_file(self):
if tf.io.gfile.exists(self._path):
tf.io.gfile.remove(self._path)
class Shuffler(object):
"""Stores data in temp buckets, restitute it shuffled."""
def __init__(self, dirpath, hash_salt, disable_shuffling: bool = False):
"""Initialize Shuffler.
Args:
dirpath (string): directory in which to store temporary files.
hash_salt (string or bytes): salt to hash keys.
disable_shuffling (bool): specify whether to shuffle by hashing the key.
"""
grp_name = uuid.uuid4()
self._hasher = hashing.Hasher(hash_salt)
self._disable_shuffling = disable_shuffling
self._buckets = []
for i in range(BUCKETS_NUMBER):
bucket_name = 'bucket_%s_%03d.tmp' % (grp_name, i)
path = os.path.join(dirpath, bucket_name)
self._buckets.append(_Bucket(path))
self._read_only = False
self._total_bytes = 0
# To keep data in memory until enough data has been gathered.
self._in_memory = True
self._mem_buffer = []
@property
def size(self):
"""Return total size in bytes of records (not keys)."""
return self._total_bytes
@property
def bucket_lengths(self):
if self._in_memory:
return [len(self._mem_buffer)]
return [len(b) for b in self._buckets]
def _add_to_bucket(self, hkey, data):
bucket_number = get_bucket_number(hkey, BUCKETS_NUMBER)
self._buckets[bucket_number].add(hkey, data)
def _add_to_mem_buffer(self, hkey, data):
self._mem_buffer.append((hkey, data))
if self._total_bytes > MAX_MEM_BUFFER_SIZE:
for hkey, data in self._mem_buffer:
self._add_to_bucket(hkey, data)
self._mem_buffer = None
self._in_memory = False
def add(self, key, data):
"""Add (key, data) to shuffler."""
if self._read_only:
raise AssertionError('add() cannot be called after __iter__.')
if not isinstance(data, six.binary_type):
raise AssertionError('Only bytes (not %s) can be stored in Shuffler!' %
(type(data)))
if self._disable_shuffling:
hkey = key
else:
hkey = self._hasher.hash_key(key)
self._total_bytes += len(data)
if self._in_memory:
self._add_to_mem_buffer(hkey, data)
else:
self._add_to_bucket(hkey, data)
def __iter__(self) -> Iterator[type_utils.KeySerializedExample]:
self._read_only = True
previous_hkey = None
previous_data = None
iterator = self._iter_mem() if self._in_memory else self._iter_buckets()
for hkey, data in iterator:
if hkey == previous_hkey:
raise DuplicatedKeysError(data, previous_data)
previous_hkey = hkey
yield hkey, data
previous_data = data
def _iter_mem(self):
for hkey, data in sorted(self._mem_buffer):
yield hkey, data
def _iter_buckets(self):
for bucket in self._buckets:
bucket_data = sorted(bucket.read_values())
bucket.del_file()
for hkey, data in bucket_data:
yield hkey, data
|
|
#!/usr/bin/python
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import types
import math
import random
class datum:
def __init__(self, value, name_info):
self.name_info = name_info
self.value = value
def __repr__(self):
return "datum(%s, %s)" % (self.value, self.name_info)
def __str__(self):
return self.__repr__()
def __lt__(self, other ):
if type(other) == types.IntType or type(other) == types.FloatType:
return self.value < other
else:
return self.value < other.value
def __le__(self, other ):
if type(other) == types.IntType or type(other) == types.FloatType:
return self.value <= other
else:
return self.value <= other.value
def __gt__(self, other ):
if type(other) == types.IntType or type(other) == types.FloatType:
return self.value > other
else:
return self.value > other.value
def __ge__(self, other ):
if type(other) == types.IntType or type(other) == types.FloatType:
return self.value >= other
else:
return self.value >= other.value
def __ne__(self, other ):
if type(other) == types.IntType or type(other) == types.FloatType:
return self.value != other
else:
return self.value != other.value
def __eq__(self, other ):
if type(other) == types.IntType or type(other) == types.FloatType:
return self.value == other
else:
return self.value == other.value
def mean( x ):
# x is [datum]
data = [r.value for r in x]
return datum( sum(data) / len(data), None )
def median( x ):
# x is [datum]
return x[ len(x)/2 ]
def stddev( ll, avg=0.0 ):
# x is [datum]
data = [r.value for r in ll]
val = 0.0
for x in data:
val += (x - avg) * (x - avg)
val /= len(data)
val = math.sqrt( val )
return datum( val, None )
def percentile( data, p=0.5 ):
# data is [datum]
off = int(len(data) * p)
names = [data[i].name_info for i in xrange(off, len(data))]
return datum( data[off].value, names )
exp_delim = "--------------------------------"
def read_block( lines, start ):
i = start + 1
while i < len(lines) and not lines[i].startswith(exp_delim):
i += 1
block = lines[start + 1: min(len(lines),i)]
name = " ".join( lines[start].split()[1:] )
return (name, block)
def parse_block( block ):
data = {}
for line in block:
parts = line.split()
if len(parts) == 0 or parts[0] != 'DATA':
continue
value = float(parts[-1])
key = " ".join( parts[1:-1] )
data[key] = value
#print data
return data
def parse_output( lines ):
if lines == None or len(lines) <= 0:
return None
i = 0
ret = {}
while True:
while i < len(lines) and not lines[i].startswith( exp_delim ):
i += 1
if i >= len(lines):
break
block_name, block_lines = read_block( lines, i )
if len(block_name) == 0:
break
i += len(block_lines) + 1
ret[block_name] = parse_block( block_lines )
return ret
def get_data( outputs_dir ):
import os
files = os.listdir( outputs_dir )
data = {}
for fname in files:
fd = open( os.path.join(outputs_dir,fname), "r" )
buf = fd.read()
fd.close()
lines = buf.split("\n")
for i in xrange(0,len(lines)):
lines[i] = lines[i].strip()
dat = parse_output(lines)
if dat != None:
data[fname] = dat
return data
def get_results( data ):
results = {}
for fname in data.keys():
record = data[fname]
for step in record.keys():
step_record = record[step]
if not results.has_key(step):
results[step] = {}
for k in step_record.keys():
if not results[step].has_key(k):
results[step][k] = []
results[step][k].append( datum(step_record[k], fname) )
for step in results.keys():
for key in results[step].keys():
results[step][key].sort()
return results
def default_fields( results ):
# default field names from results
fields = {}
for step in results:
rec = results[step]
fields[step] = []
for k in rec.keys():
fields[step].append( k )
return fields
def results_apply( results, func, func_args, fields=None ):
# apply a function (with arguments) on a set of fields and return a copy
if fields == None:
fields = default_fields( results )
ret = {}
for step in fields.keys():
ret[step] = {}
for key in fields[step]:
args = {}
try:
args = func_args[step][key]
except:
pass
ret[step][key] = func(results[step][key], **args)
return ret
def fields_value( fields, name, p ):
# generate a set of function arguments for results_apply, given fields, and given a value
ps = {}
for step in fields:
ps[step] = {}
for key in fields[step]:
ps[step][key] = {name: p}
return ps
def percentiles_closure( p ):
# generate a function that, when called with results and fields, will apply the given percentile to results
def percentiles_func( results, fields=None ):
if fields == None:
fields = default_fields(results)
ps = fields_value( fields, "p", p )
return results_apply( results, percentile, ps, fields )
return percentiles_func
def error_values_closure( results, function, input_data, fields=None ):
# apply a value function to results, given the percentile desired
def error_values( results, input_data, fields=None ):
return results_apply( results, function, input_data, fields )
return error_values
def means( results, fields=None ):
# apply a mean function to results
return results_apply( results, mean, {}, fields )
def stddevs( results, mean_data, fields=None ):
# apply a stddev function to results, given mean_data
md = {}
for step in mean_data.keys():
md[step] = {}
for key in mean_data[step].keys():
md[step][key] = {'avg': mean_data[step][key].value}
evc = error_values_closure( results, stddev, md, fields )
return evc( results, md, fields )
def zero_error( results, ignored, fields=None ):
evc = error_values_closure( results, lambda data: datum(0, None), {}, fields )
return evc( results, {}, fields )
def medians( results, fields=None ):
# apply median function to results
return results_apply( results, median, {}, fields )
def percentiles( results, p, fields=None ):
# apply a percentile function to results, given the percentile desired
pf = percentiles_closure( p )
return pf( results, fields )
def aggregate_data( results, fields=None, methods=[] ):
# apply many data methods to results to form an aggregate
# method signature: method( results, fields )
if fields == None:
fields = common.default_fields()
# {step: {key: [method_result_1, method_result_2, ...], ...}, ...}
aggregate = {}
for method in methods:
data = method( results, fields )
for step in data.keys():
if not aggregate.has_key(step):
aggregate[step] = {}
for key in data[step]:
if not aggregate[step].has_key( key ):
aggregate[step][key] = []
aggregate[step][key].append( data[step][key] )
return aggregate
def aggregate_error( results, data, fields=None, methods=[] ):
# apply many error methods to results.
# method signature: method( results, data, fields )
if fields == None:
fields = common.default_fields()
# {step: {key: [error_result_1, error_result_1, ...], ...}, ...}
aggregate = {}
i = 0
for method in methods:
# get the aggregated data corresponding to this error method
err_input = {}
for step in data.keys():
err_input[step] = {}
for key in data[step].keys():
err_input[step][key] = data[step][key][i]
i += 1
err = method( results, err_input, fields )
op = []
for step in err.keys():
if not aggregate.has_key( step ):
aggregate[step] = {}
for key in err[step]:
if not aggregate[step].has_key( key ):
aggregate[step][key] = []
aggregate[step][key].append( err[step][key] )
return aggregate
def aggregate_series( results, names, data_methods, error_methods ):
# generate data from results, given names = [(experiment_step, experiment_key), ...] and *_methods = [method, ...] and kwargs = {arguments to make_bars}
fields = {}
for (step, key) in names:
if not fields.has_key(step):
fields[step] = []
fields[step].append( key )
aggregated_data = None
aggregated_error = None
if data_methods != None and len(data_methods) > 0:
aggregated_data = aggregate_data( results, fields, data_methods )
if error_methods != None and len(error_methods) > 0:
aggregated_error = aggregate_error( results, aggregated_data, fields, error_methods )
return (aggregated_data, aggregated_error)
def graph_default_order( aggregate ):
order = []
# derive bar order from aggregate
for step in aggregate.keys():
for key in aggregate[step].keys():
order.append( (step, key) )
return order
def graph_legend_labels( order ):
legend_labels = []
for (step, key) in order:
legend_labels.append( "%s: %s" % (step, key) )
return legend_labels
def graph_series( aggregate, yerror_aggregate, order=None ):
# extract aggregate data and put it into 2D value arrays
data_series = []
yerror_series = []
if order == None:
order = graph_default_order( aggregate )
i = 0
for (step, key) in order:
data_series.append( [r.value for r in aggregate[step][key]] )
has_error = False
if yerror_aggregate != None:
if yerror_aggregate.has_key(step):
if yerror_aggregate[step].has_key(key):
if len(yerror_aggregate[step][key]) != len(aggregate[step][key]):
raise Exception ("Mismatched aggregate and yerror lengths (%d vs %d) for (%s, %s)" % (len(yerror_aggregate[step][key]), len(aggregate[step][key]), step, key))
yerror_series.append( [r.value for r in yerror_aggregate[step][key]] )
has_error = True
if not has_error:
yerror_series.append( [0] * len(aggregate[step][key]) )
return (data_series, yerror_series)
def experiment_write_block( outputs, fname, step ):
import os
fpath = os.path.join( outputs, fname )
fd = open( fpath, "a" )
fd.write( exp_delim + " " + step + "\n" )
fd.close()
def experiment_write_data( outputs, fname, key, value ):
import os
fpath = os.path.join( outputs, fname )
fd = open( fpath, "a" )
fd.write("DATA %s %s\n" % (key, value) )
fd.close()
def mock_experiment( mock_outputs, step_count, key_count ):
import os
try:
os.mkdir( mock_outputs )
except:
raise Exception("Mock experiment directory %s already exists. Remove it first" % mock_outputs )
mock_experiment_names = [ "host-%s.txt" % i for i in xrange(0,100) ]
series_names = []
for fname in mock_experiment_names:
# do a mock exeriment
for i in xrange(0,step_count):
step = "step %s" % (i + 1)
experiment_write_block( mock_outputs, fname, step )
for j in xrange(0,key_count):
key = "key %s" % (j + 1)
series_names.append( (step,key) )
experiment_write_data( mock_outputs, fname, key, float(random.randint(0,1000)) / 100.0 )
experiment_write_block( mock_outputs, fname, "" )
data = get_data( mock_outputs )
results = get_results( data )
aggregated_data, aggregated_error = aggregate_series( results, series_names, [means, medians, percentiles_closure(0.9), percentiles_closure(0.99)], [stddevs, zero_error, zero_error, zero_error] )
return (aggregated_data, aggregated_error)
if __name__ == "__main__":
import pprint
import sys
import os
try:
mock_outputs = sys.argv[1]
except:
mock_outputs = ".mock_experiment"
try:
os.mkdir( mock_outputs )
except:
raise Exception("Mock experiment directory %s already exists. Remove it first" % mock_outputs )
mock_experiment_names = [ "host-%s.txt" % i for i in xrange(0,100) ]
series_names = []
for fname in mock_experiment_names:
# do a mock exeriment
for i in xrange(0,10):
step = "step %s" % (i + 1)
experiment_write_block( mock_outputs, fname, step )
for j in xrange(0,10):
key = "key %s" % (j + 1)
series_names.append( (step,key) )
experiment_write_data( mock_outputs, fname, key, float(random.randint(0,1000)) / 100.0 )
experiment_write_block( mock_outputs, fname, "" )
data = get_data( mock_outputs )
results = get_results( data )
pp = pprint.PrettyPrinter()
print "data"
pp.pprint( data )
print "results"
pp.pprint( results )
aggregated_data, aggregated_error = aggregate_series( results, series_names, [means, medians, percentiles_closure(0.9), percentiles_closure(0.99)], [stddevs, zero_error, zero_error, zero_error] )
print "aggregated data"
pp.pprint( aggregated_data )
print "aggregated error"
pp.pprint( aggregated_error )
graph_data, graph_error = graph_series( aggregated_data, aggregated_error )
print "graph data"
pp.pprint( graph_data )
print "error data"
pp.pprint( error_data )
|
|
import numpy as np
import matplotlib.pyplot as plt
import pylab
import joblib
import cPickle as cp
# t = np.arange(0, 5, 0.2)
# t2 = np.arange(0, 5, 0.02)
# def f(t):
# return np.exp(-t)*np.cos(2*np.pi*t)
def dice_np(y_true, y_pred):
y_true = y_true.reshape(y_true.shape[0], -1)
y_pred = y_pred.reshape(y_pred.shape[0], -1)
#y_true = np.reshape(y_true, -1)
#y_pred = np.reshape(y_pred, -1)
# y_true = y_true/np.max(np.max(y_true))
# y_pred = y_pred/np.max(np.max(y_pred))
#y_true[y_true > 0.0] = 1.0
#y_pred[y_pred > 0.0] = 1.0
print('Shapes : ', y_true.shape, y_pred.shape)
intersection = y_true*y_pred
#print('Int shape ', intersection.shape)
intersection = np.sum(intersection, axis = 1)
#print('Int shape new ', intersection.shape)
dr1 = np.sum(y_true, axis=1)
dr2 = np.sum(y_pred, axis=1)
#print('Dr ', dr1, dr2)
dr = dr1+dr2
nr = 2*intersection
x = nr/dr
return np.mean(x)
results = np.load('data.npz')
print(results.files)
print(results['imgs_test_X'].shape, results['imgs_test_Y'].shape, results['imgs_test_Pred'].shape)
# for i in range(0,results['imgs_test_X'].shape[0]):
# plt.figure(1)
# #plt.axis('off')
# plt.title('%d"'%(i))
# plt.subplot(231)
# plt.imshow(results['imgs_test_X'][i][0])
# plt.subplot(232)
# plt.imshow(results['imgs_test_X'][i][0]*results['imgs_test_Y'][i][0], vmin = np.min(results['imgs_test_X'][i][0]), vmax = np.max(results['imgs_test_X'][i][0]))
# plt.subplot(233)
# plt.imshow(results['imgs_test_X'][i][0]*results['imgs_test_Pred'][i][0], vmin = np.min(results['imgs_test_X'][i][0]), vmax = np.max(results['imgs_test_X'][i][0]))
# plt.subplot(235)
# plt.imshow(results['imgs_test_Y'][i][0])
# plt.subplot(236)
# plt.imshow(results['imgs_test_Pred'][i][0])
# pylab.show()
#weight = joblib.load(('weights'))
weight = cp.load(open('filters.pkl'))
#weight.reshape(weight.shape[0], -1)
#print('Weights : ', weight.keys())
#plt.figure(3)
#plt.subplot(filters_per_layer)
for key in (weight):
if key.startswith('conv_') and key.endswith('_2'):
print(key, len(weight[key]))
for j in range(0, len(weight[key])):
#weight[key].reshape(6)
plt.figure(2)
plt.subplot(1,2,1)
plt.imshow(weight[key][0])
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(weight[key][1])
plt.axis('off')
plt.figure(3)
plt.subplot(1,2,1)
plt.imshow(weight[key][2])
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(weight[key][3])
plt.axis('off')
pylab.show()
def plot_3d(image, threshold=-300):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
p = p[:,:,::-1]
verts, faces = measure.marching_cubes(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
#plots = np.zeros(results['imgs_test_Y'].shape[0],results['imgs_test_Y'].shape[2],results['imgs_test_Y'].shape[3])
#print(plots.shape)
#for i in range(0, results['imgs_test_Y'].shape[0]):
#plot_3d(results['imgs_test_Y'][])
####### PLOTTING
# plot_res = results['imgs_test_X'][0][0][0:200, 300:500]
# #*results['imgs_test_Y'][0][0][300:500]
# print('Shape : ', plot_res.shape)
ind = 19
# plt.figure(1)
# plt.title('Lung CT Scan')
# plt.imshow(results['imgs_test_X'][ind][0])
# plt.axis('off')
# #pylab.show()
# Im1 = results['imgs_test_Y'][ind][0]
# Im2 = results['imgs_test_Pred'][ind][0]
# print(dice_np(Im1, Im2))
# plt.figure(2)
# plt.title('Results')
# ax1 = plt.subplot(131)
# plt.imshow(results['imgs_test_X'][ind][0][150:350, 300:500], vmin = np.min(results['imgs_test_X'][ind][0]), vmax = np.max(results['imgs_test_X'][ind][0]))
# ax1.set_title('Region of Interest')
# plt.axis('off')
# ax1 = plt.subplot(132)
# plt.imshow(results['imgs_test_X'][ind][0][150:350, 300:500]*results['imgs_test_Y'][ind][0][150:350, 300:500], vmin = np.min(results['imgs_test_X'][ind][0]), vmax = np.max(results['imgs_test_X'][ind][0]))
# ax1.set_title('Gold Standard Mask')
# plt.axis('off')
# ax2 = plt.subplot(133)
# plt.imshow(results['imgs_test_X'][ind][0][150:350, 300:500]*results['imgs_test_Pred'][ind][0][150:350, 300:500], vmin = np.min(results['imgs_test_X'][ind][0]), vmax = np.max(results['imgs_test_X'][ind][0]))
# ax2.set_title('Predicted Mask')
# plt.axis('off')
# pylab.show()
###### PLOTTING END
# testMasks = np.load('testMasks.npy')
# trainedMasks = np.load('masksTestPredicted.npy')
# testIm = np.load('testImages.npy')
# trainIm = np.load('trainImages.npy')
# trainMasks = np.load('trainMasks.npy')
# print(testMasks.shape)
# print(trainedMasks.shape)
# print(testIm.shape)
# print(trainIm.shape)
# print(trainMasks.shape)
# for i in range(0,testIm.shape[0]):
# plt.figure(1)
# plt.subplot(231)
# plt.imshow(testIm[i][0])
# plt.subplot(232)
# plt.imshow(testMasks[i][0])
# plt.subplot(234)
# plt.imshow(testIm[i][0]*testMasks[i][0])
# plt.subplot(233)
# plt.imshow(trainedMasks[i][0])
# plt.subplot(235)
# plt.imshow(testIm[i][0]*trainedMasks[i][0])
# pylab.show()
# for i in range(0,trainIm.shape[0]):
# plt.figure(1)
# plt.subplot(131)
# plt.imshow(trainIm[i][0])
# plt.subplot(132)
# plt.imshow(trainMasks[i][0])
# plt.subplot(133)
# plt.imshow(trainMasks[i][0]*trainIm[i][0])
# pylab.show()
# print(np.sum(trainIm[0][0]), np.sum(trainIm[1][0]))
# for i in range(0,trainIm.shape[0]):
# plt.figure(2)
# plt.subplot(121)
# plt.imshow(trainIm[i][0])
# plt.subplot(122)
# plt.imshow(trainMasks[i][0]*trainIm[i][0])
# pylab.show()
# for i in range(0,trainedMasks.shape[0]):
# plt.figure(i+1)
# plt.imshow(trainedMasks[i][0])
# pylab.show()
# for i in range(0,testMasks.shape[0]):
# plt.figure(i+1)
# plt.imshow(testMasks[i][0])
# pylab.show()
# plt.figure(1)
# plt.subplot(211)
# plt.plot(t, f(t), 'bo', t2, f(t2), 'k')
# plt.subplot(212)
# plt.plot(t2, np.cos(2*np.pi*t2), 'k')
# pylab.show()
|
|
# http://docs.python.org/2/library/multiprocessing.html
from __future__ import print_function, division
import __common__
(print, print_, print_on, print_off,
rrr, profile, printDBG) = __common__.init(__name__, '[parallel]', DEBUG=False)
# Python
from itertools import izip
from os.path import exists, dirname, split
import multiprocessing
import os
import sys
# Hotspotter
import helpers as util
@profile
def _calculate(func, args):
printDBG('[parallel] * %s calculating...' % (multiprocessing.current_process().name,))
result = func(*args)
#arg_names = func.func_code.co_varnames[:func.func_code.co_argcount]
#arg_list = [n+'='+str(v) for n,v in izip(arg_names, args)]
#arg_str = '\n *** '+str('\n *** '.join(arg_list))
printDBG('[parallel] * %s finished:\n ** %s' %
(multiprocessing.current_process().name,
func.__name__))
return result
@profile
def _worker(input, output):
printDBG('[parallel] START WORKER input=%r output=%r' % (input, output))
for func, args in iter(input.get, 'STOP'):
printDBG('[parallel] worker will calculate %r' % (func))
result = _calculate(func, args)
printDBG('[parallel] worker has calculated %r' % (func))
output.put(result)
#printDBG('[parallel] worker put result in queue.')
#printDBG('[parallel] worker is done input=%r output=%r' % (input, output))
@profile
def parallel_compute(func=None, arg_list=[], num_procs=None, lazy=True, args=None,
common_args=[], output_dir=None):
if args is not None and num_procs is None:
num_procs = args.num_procs
elif num_procs is None:
num_procs = max(1, int(multiprocessing.cpu_count() / 2))
# Generate a list of tasks to send to the parallel processes
task_list = make_task_list(func, arg_list, lazy=lazy,
common_args=common_args, output_dir=output_dir)
nTasks = len(task_list)
if nTasks == 0:
print('[parallel] ... No %s tasks left to compute!' % func.func_name)
return None
# Do not execute small tasks in parallel
if nTasks < num_procs / 2 or nTasks == 1:
num_procs = 1
num_procs = min(num_procs, nTasks)
task_lbl = func.func_name + ': '
try:
ret = parallelize_tasks(task_list, num_procs, task_lbl)
except Exception as ex:
sys.stdout.flush()
print('[parallel!] Problem while parallelizing task: %r' % ex)
print('[parallel!] task_list: ')
for task in task_list:
print(' %r' % (task,))
break
print('[parallel!] common_args = %r' % common_args)
print('[parallel!] num_procs = %r ' % (num_procs,))
print('[parallel!] task_lbl = %r ' % (task_lbl,))
sys.stdout.flush()
raise
return ret
def get_common_paths(output_fpath_list):
# Takes a list of paths and extracts the common relative paths
dir_list = [dirname(fpath) for fpath in output_fpath_list]
fname_list = [split(fpath)[1] for fpath in output_fpath_list]
unique_dirs = list(set(dir_list))
return unique_dirs, fname_list
@profile
def make_task_list(func, arg_list, lazy=True, common_args=[], output_dir=None):
'''
The input should alawyas be argument 1
The output should always be argument 2
'''
has_output = len(arg_list) >= 2
append_common = lambda _args: tuple(list(_args) + common_args)
if not (lazy and has_output):
# does not check existance
task_list = [(func, append_common(_args)) for _args in izip(*arg_list)]
return task_list
if output_dir is None:
# Hackish way of getting an output dir for faster exists computation
output_fpath_list = arg_list[1]
unique_dirs, output_fname_list = get_common_paths(output_fpath_list)
if len(unique_dirs) == 1:
output_dir = unique_dirs[0]
else:
# Less hackish
output_fname_list = arg_list[1]
if output_dir is not None:
# This is a faster than checkign for existance individually
# But all the files need to be in the same directory
fname_set = set(os.listdir(output_dir))
exist_list = [fname in fname_set for fname in output_fname_list]
argiter = izip(exist_list, izip(*arg_list))
arg_list2 = [append_common(_args) for bit, _args in argiter if not bit]
else:
# check existance individually
arg_list2 = [append_common(_args) for _args in izip(*arg_list) if not exists(_args[1])]
task_list = [(func, _args) for _args in iter(arg_list2)]
nSkip = len(zip(*arg_list)) - len(arg_list2)
print('[parallel] Already computed %d %s tasks' % (nSkip, func.func_name))
return task_list
@profile
def parallelize_tasks(task_list, num_procs, task_lbl='', verbose=True):
'''
Used for embarissingly parallel tasks, which write output to disk
'''
nTasks = len(task_list)
msg = ('Distributing %d %s tasks to %d processes' % (nTasks, task_lbl, num_procs)
if num_procs > 1 else
'Executing %d %s tasks in serial' % (nTasks, task_lbl))
with util.Timer(msg=msg):
if num_procs > 1:
# Parallelize tasks
return _compute_in_parallel(task_list, num_procs, task_lbl, verbose)
else:
return _compute_in_serial(task_list, task_lbl, verbose)
@profile
def _compute_in_serial(task_list, task_lbl='', verbose=True):
# Serialize Tasks
result_list = []
nTasks = len(task_list)
if verbose:
mark_progress, end_prog = util.progress_func(nTasks, lbl=task_lbl)
# Compute each task
for count, (fn, args) in enumerate(task_list):
mark_progress(count)
#sys.stdout.flush()
result = fn(*args)
result_list.append(result)
end_prog()
else:
# Compute each task
for (fn, args) in iter(task_list):
result = fn(*args)
result_list.append(result)
print('[parallel] ... done')
return result_list
@profile
def _compute_in_parallel(task_list, num_procs, task_lbl='', verbose=True):
'''
Input: task list: [ (fn, args), ... ]
'''
task_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
nTasks = len(task_list)
# queue tasks
for task in iter(task_list):
task_queue.put(task)
# start processes
proc_list = []
for i in xrange(num_procs):
printDBG('[parallel] creating process %r' % (i,))
proc = multiprocessing.Process(target=_worker, args=(task_queue, done_queue))
proc.daemon = True
proc.start()
proc_list.append(proc)
# wait for results
printDBG('[parallel] waiting for results')
sys.stdout.flush()
result_list = []
if verbose:
mark_progress, end_prog = util.progress_func(nTasks, lbl=task_lbl, spacing=num_procs)
for count in xrange(len(task_list)):
mark_progress(count)
printDBG('[parallel] done_queue.get()')
result = done_queue.get()
result_list.append(result)
end_prog()
else:
for i in xrange(nTasks):
done_queue.get()
print('[parallel] ... done')
printDBG('[parallel] stopping children')
# stop children processes
for i in xrange(num_procs):
task_queue.put('STOP')
for proc in proc_list:
proc.join()
return result_list
#import time
#time.sleep(.01)
|
|
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for gsutil UI controller, UIThread and MainThreadUIQueue."""
from __future__ import absolute_import
from collections import deque
import Queue
import sys
import threading
import time
from gslib.metrics import LogPerformanceSummaryParams
from gslib.metrics import LogRetryableError
from gslib.parallelism_framework_util import ZERO_TASKS_TO_DO_ARGUMENT
from gslib.thread_message import FileMessage
from gslib.thread_message import FinalMessage
from gslib.thread_message import MetadataMessage
from gslib.thread_message import PerformanceSummaryMessage
from gslib.thread_message import ProducerThreadMessage
from gslib.thread_message import ProgressMessage
from gslib.thread_message import RetryableErrorMessage
from gslib.thread_message import SeekAheadMessage
from gslib.thread_message import StatusMessage
from gslib.util import DecimalShort
from gslib.util import HumanReadableWithDecimalPlaces
from gslib.util import MakeHumanReadable
from gslib.util import PrettyTime
class EstimationSource(object):
"""enum for total size source."""
# Integer to indicate total size came from the final ProducerThreadMessage.
# It has priority over all other total_size sources.
PRODUCER_THREAD_FINAL = 1
# Integer to indicate total size came from SeekAheadThread.
# It has priority over self.SEEK_AHEAD_THREAD and over
# self.INDIVIDUAL_MESSAGES.
SEEK_AHEAD_THREAD = 2
# Integer to indicate total size came from a ProducerThread estimation.
# It has priority over self.INDIVIDUAL_MESSAGES.
PRODUCER_THREAD_ESTIMATE = 3
# Stores the actual source from total_size. We start from FileMessages or
# MetadataMessages.
INDIVIDUAL_MESSAGES = 4
# Note: this priority based model was used in case we add new sources for
# total_size in the future. It also allows us to search for smaller numbers
# (larger priorities) rather than having to list those with higher priority.
def BytesToFixedWidthString(num_bytes, decimal_places=1):
"""Adjusts proper width for printing num_bytes in readable format.
Args:
num_bytes: The number of bytes we must display.
decimal_places: The standard number of decimal places.
Returns:
String of fixed width representing num_bytes.
"""
human_readable = HumanReadableWithDecimalPlaces(num_bytes,
decimal_places=decimal_places)
number_format = human_readable.split()
if int(round(float(number_format[0]))) >= 1000:
# If we are in the [1000:1024) range for the whole part of the number,
# we must remove the decimal part.
last_character = len(number_format[0]) - decimal_places - 1
number_format[0] = number_format[0][:last_character]
return '%9s' % (' '.join(number_format))
class StatusMessageManager(object):
"""General manager for common functions shared by data and metadata managers.
This subclass has the responsibility of having a common constructor and the
same handler for SeekAheadMessages and ProducerThreadMessages.
"""
class _ThroughputInformation(object):
"""Class that contains all information needed for throughput calculation.
This _ThroughputInformation is used to track progress and time at several
points of our operation.
"""
def __init__(self, progress, report_time):
"""Constructor of _ThroughputInformation.
Args:
progress: The current progress, in bytes/second or objects/second.
report_time: Float representing when progress was reported (seconds
since Epoch).
"""
self.progress = progress
self.time = report_time
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
console_width=80):
"""Instantiates a StatusMessageManager.
Args:
update_message_period: Minimum period for refreshing and displaying
new information. A non-positive value will ignore
any time restrictions imposed by this field, but
it will affect throughput and time remaining
estimations.
update_spinner_period: Minimum period for refreshing and displaying the
spinner. A non-positive value will ignore
any time restrictions imposed by this field.
sliding_throughput_period: Sliding period for throughput calculation. A
non-positive value will make it impossible to
calculate the throughput.
first_throughput_latency: Minimum waiting time before actually displaying
throughput info. A non-positive value will
ignore any time restrictions imposed by this
field.
quiet_mode: If True, do not print status messages (but still process
them for analytics reporting as necessary).
custom_time: If a custom start_time is desired. Used for testing.
verbose: Tells whether or not the operation is on verbose mode.
console_width: Width to display on console. This should not adjust the
visual output, just the space padding. For proper
visualization, we recommend setting this field to at least
80.
"""
self.update_message_period = update_message_period
self.update_spinner_period = update_spinner_period
self.sliding_throughput_period = sliding_throughput_period
self.first_throughput_latency = first_throughput_latency
self.quiet_mode = quiet_mode
self.custom_time = custom_time
self.verbose = verbose
self.console_width = console_width
# Initial estimation source for number of objects and total size
# is through individual FileMessages or individual MetadataMessages,
# depending on the StatusMessageManager superclass.
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.total_size_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects = 0
# Only used on data operations. Will remain 0 for metadata operations.
self.total_size = 0
# Time at last info update displayed.
self.refresh_message_time = (self.custom_time if self.custom_time
else time.time())
self.start_time = self.refresh_message_time
# Time at last spinner update.
self.refresh_spinner_time = self.refresh_message_time
# Measured in objects/second or bytes/second, depending on the superclass.
self.throughput = 0.0
# Deque of _ThroughputInformation to help with throughput calculation.
self.old_progress = deque()
self.last_progress_time = 0
self.spinner_char_list = ['/', '-', '\\', '|']
self.current_spinner_index = 0
self.objects_finished = 0
self.num_objects = 0 # Number of objects being processed
# This overrides time constraints for updating and displaying
# important information, such as having finished to process an object.
self.object_report_change = False
self.final_message = False
def GetSpinner(self):
"""Returns the current spinner character.
Returns:
char_to_print: Char to be printed as the spinner
"""
return self.spinner_char_list[self.current_spinner_index]
def UpdateSpinner(self):
"""Updates the current spinner character."""
self.current_spinner_index = ((self.current_spinner_index + 1) %
len(self.spinner_char_list))
def _HandleProducerThreadMessage(self, status_message):
"""Handles a ProducerThreadMessage.
Args:
status_message: The ProducerThreadMessage to be processed.
"""
if status_message.finished:
# This means this was a final ProducerThreadMessage.
if self.num_objects_source >= EstimationSource.PRODUCER_THREAD_FINAL:
self.num_objects_source = EstimationSource.PRODUCER_THREAD_FINAL
self.num_objects = status_message.num_objects
if (self.total_size_source >= EstimationSource.PRODUCER_THREAD_FINAL and
status_message.size):
self.total_size_source = EstimationSource.PRODUCER_THREAD_FINAL
self.total_size = status_message.size
return
if self.num_objects_source >= EstimationSource.PRODUCER_THREAD_ESTIMATE:
self.num_objects_source = EstimationSource.PRODUCER_THREAD_ESTIMATE
self.num_objects = status_message.num_objects
if (self.total_size_source >= EstimationSource.PRODUCER_THREAD_ESTIMATE and
status_message.size):
self.total_size_source = EstimationSource.PRODUCER_THREAD_ESTIMATE
self.total_size = status_message.size
def _HandleSeekAheadMessage(self, status_message, stream):
"""Handles a SeekAheadMessage.
Args:
status_message: The SeekAheadMessage to be processed.
stream: Stream to print messages.
"""
estimate_message = ('Estimated work for this command: objects: %s' %
status_message.num_objects)
if status_message.size:
estimate_message += (', total size: %s' %
MakeHumanReadable(status_message.size))
if self.total_size_source >= EstimationSource.SEEK_AHEAD_THREAD:
self.total_size_source = EstimationSource.SEEK_AHEAD_THREAD
self.total_size = status_message.size
if self.num_objects_source >= EstimationSource.SEEK_AHEAD_THREAD:
self.num_objects_source = EstimationSource.SEEK_AHEAD_THREAD
self.num_objects = status_message.num_objects
estimate_message += '\n'
if not self.quiet_mode:
stream.write(estimate_message)
def _HandlePerformanceSummaryMessage(self, status_message):
"""Handles a PerformanceSummaryMessage.
Args:
status_message: The PerformanceSummaryMessage to be processed.
"""
LogPerformanceSummaryParams(uses_slice=status_message.uses_slice)
def ShouldTrackThroughput(self, cur_time):
"""Decides whether enough time has passed to start tracking throughput.
Args:
cur_time: current time.
Returns:
Whether or not we should track the throughput.
"""
return cur_time - self.start_time >= self.first_throughput_latency
def ShouldPrintProgress(self, cur_time):
"""Decides whether or not it is time for printing a new progress.
Args:
cur_time: current time.
Returns:
Whether or not we should print the progress.
"""
sufficient_time_elapsed = (
cur_time - self.refresh_message_time >= self.update_message_period)
# Don't report if we aren't actually going to do anything (for example,
# an rsync that will sync 0 objects).
nonzero_report = self.num_objects
return (sufficient_time_elapsed or self.object_report_change) and (
nonzero_report)
def ShouldPrintSpinner(self, cur_time):
"""Decides whether or not it is time for updating the spinner character.
Args:
cur_time: Current time.
Returns:
Whether or not we should update and print the spinner.
"""
return (cur_time - self.refresh_spinner_time >
self.update_spinner_period and self.total_size)
def PrintSpinner(self, stream=sys.stderr):
"""Prints a spinner character.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
self.UpdateSpinner()
if not self.quiet_mode:
stream.write(self.GetSpinner() + '\r')
def UpdateThroughput(self, cur_time, cur_progress):
"""Updates throughput if the required period for calculation has passed.
The throughput is calculated by taking all the progress (objects or bytes)
processed within the last sliding_throughput_period seconds, and dividing
that by the time period between the oldest progress time within that range
and the last progress measurement, which are defined by oldest_progress[1]
and last_progress_time, respectively. Among the pros of this approach,
a connection break or a sudden change in throughput is quickly noticeable.
Furthermore, using the last throughput measurement rather than the current
time allows us to have a better estimation of the actual throughput.
Args:
cur_time: Current time to check whether or not it is time for a new
throughput measurement.
cur_progress: The current progress, in number of objects finished or in
bytes.
"""
while (len(self.old_progress) > 1 and
cur_time - self.old_progress[0].time >
self.sliding_throughput_period):
self.old_progress.popleft()
if not self.old_progress:
return
oldest_progress = self.old_progress[0]
if self.last_progress_time == oldest_progress.time:
self.throughput = 0
return
# If old-progress is not empty and the time of oldest_progress does not
# match the last_progress_time, we can safely calculate the throughput.
self.throughput = ((cur_progress - oldest_progress.progress) /
(self.last_progress_time -
oldest_progress.time))
# Just to avoid -0.00 B/s.
self.throughput = max(0, self.throughput)
def PrintFinalSummaryMessage(self, stream=sys.stderr):
"""Prints a final message to indicate operation succeeded.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
string_to_print = ('Operation completed over %s objects'
% DecimalShort(self.num_objects))
if self.total_size:
string_to_print += (
'/%s' % HumanReadableWithDecimalPlaces(self.total_size))
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(('\n' + string_to_print + '.' +
(max(remaining_width, 0) * ' ') + '\n'))
class MetadataManager(StatusMessageManager):
"""Manages shared state for metadata operations.
This manager is specific for metadata operations. Among its main functions,
it receives incoming StatusMessages, storing all necessary data
about the current and past states of the system necessary to display to the
UI. It also provides methods for calculating metrics such as throughput and
estimated time remaining. Finally, it provides methods for displaying messages
to the UI.
"""
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
console_width=80):
# pylint: disable=g-doc-args
"""Instantiates a MetadataManager.
See argument documentation in StatusMessageManager base class.
"""
# pylint: enable=g-doc-args
super(MetadataManager, self).__init__(
update_message_period=update_message_period,
update_spinner_period=update_spinner_period,
sliding_throughput_period=sliding_throughput_period,
first_throughput_latency=first_throughput_latency,
quiet_mode=quiet_mode, custom_time=custom_time, verbose=verbose,
console_width=console_width)
def GetProgress(self):
"""Gets the progress for a MetadataManager.
Returns:
The number of finished objects.
"""
return self.objects_finished
def _HandleMetadataMessage(self, status_message):
"""Handles a MetadataMessage.
Args:
status_message: The MetadataMessage to be processed.
"""
self.objects_finished += 1
if self.num_objects_source >= EstimationSource.INDIVIDUAL_MESSAGES:
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects += 1
# Ensures we print periodic progress, and that we send a final message.
self.object_report_change = True
self.last_progress_time = status_message.time
if (self.objects_finished == self.num_objects and
self.num_objects_source == EstimationSource.PRODUCER_THREAD_FINAL):
self.final_message = True
def ProcessMessage(self, status_message, stream):
"""Processes a message from _MainThreadUIQueue or _UIThread.
Args:
status_message: The StatusMessage item to be processed.
stream: Stream to print messages.
"""
self.object_report_change = False
if isinstance(status_message, SeekAheadMessage):
self._HandleSeekAheadMessage(status_message, stream)
elif isinstance(status_message, ProducerThreadMessage):
self._HandleProducerThreadMessage(status_message)
elif isinstance(status_message, MetadataMessage):
self._HandleMetadataMessage(status_message)
elif isinstance(status_message, RetryableErrorMessage):
LogRetryableError(status_message)
elif isinstance(status_message, PerformanceSummaryMessage):
self._HandlePerformanceSummaryMessage(status_message)
self.old_progress.append(
self._ThroughputInformation(self.objects_finished, status_message.time))
def PrintProgress(self, stream=sys.stderr):
"""Prints progress and throughput/time estimation.
Prints total number of objects and number of finished objects with the
percentage of work done, potentially including the throughput
(in objects/second) and estimated time remaining.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
# Time to update all information
total_remaining = self.num_objects - self.objects_finished
if self.throughput:
time_remaining = total_remaining / self.throughput
else:
time_remaining = None
char_to_print = self.GetSpinner()
if self.num_objects_source <= EstimationSource.SEEK_AHEAD_THREAD:
# An example of objects_completed here would be ' [2/3 objects]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) + '/' +
DecimalShort(self.num_objects) + ' objects]')
if self.num_objects == self.objects_finished:
percentage = '100'
else:
percentage = ('%3d' % min(99, int(100 * float(self.objects_finished) /
self.num_objects)))
percentage_completed = percentage + '% Done'
else:
# An example of objects_completed here would be ' [2 objects]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) +
' objects]')
percentage_completed = ''
if (self.refresh_message_time - self.start_time >
self.first_throughput_latency):
# Should also include throughput.
# An example of throughput here would be '2 objects/s'
throughput = '%.2f objects/s' % self.throughput
if (self.num_objects_source <= EstimationSource.PRODUCER_THREAD_ESTIMATE
and self.throughput):
# Should also include time remaining.
# An example of time remaining would be ' ETA 00:00:11'.
time_remaining_str = 'ETA ' + PrettyTime(time_remaining)
else:
time_remaining_str = ''
else:
throughput = ''
time_remaining_str = ''
format_str = ('{char_to_print} {objects_completed} {percentage_completed}'
' {throughput} {time_remaining_str}')
string_to_print = format_str.format(
char_to_print=char_to_print, objects_completed=objects_completed,
percentage_completed=percentage_completed, throughput=throughput,
time_remaining_str=time_remaining_str)
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(string_to_print + (max(remaining_width, 0) * ' ') + '\r')
def CanHandleMessage(self, status_message):
"""Determines whether this manager is suitable for handling status_message.
Args:
status_message: The StatusMessage object to be analyzed.
Returns:
True if this message can be properly handled by this manager,
False otherwise.
"""
if isinstance(status_message, (SeekAheadMessage, ProducerThreadMessage,
MetadataMessage, FinalMessage,
RetryableErrorMessage,
PerformanceSummaryMessage)):
return True
return False
class DataManager(StatusMessageManager):
"""Manages shared state for data operations.
This manager is specific for data operations. Among its main functions,
it receives incoming StatusMessages, storing all necessary data
about the current and past states of the system necessary to display to the
UI. It also provides methods for calculating metrics such as throughput and
estimated time remaining. Finally, it provides methods for displaying messages
to the UI.
"""
class _ProgressInformation(object):
"""Class that contains all progress information needed for a given file.
This _ProgressInformation is used as the value associated with a file_name
in the dict that stores the information about all processed files.
"""
def __init__(self, size):
"""Constructor of _ProgressInformation.
Args:
size: The total size of the file.
"""
# Sum of all progress obtained in this operation.
self.new_progress_sum = 0
# Sum of all progress from previous operations (mainly for resuming
# uploads or resuming downloads).
self.existing_progress_sum = 0
# Dict for tracking the progress for each individual component. Key is
# of the form (component_num, dst_url) and correspondent element is a
# tuple which stores the current progress obtained from this operation,
# and the progress obtained from previous operations.
self.dict = {}
# The total size for the file
self.size = size
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
console_width=None):
# pylint: disable=g-doc-args
"""Instantiates a DataManager.
See argument documentation in StatusMessageManager base class.
"""
# pylint: disable=g-doc-args
super(DataManager, self).__init__(
update_message_period=update_message_period,
update_spinner_period=update_spinner_period,
sliding_throughput_period=sliding_throughput_period,
first_throughput_latency=first_throughput_latency,
quiet_mode=quiet_mode, custom_time=custom_time, verbose=verbose,
console_width=console_width)
self.first_item = True
self.total_progress = 0 # Sum of progress for all threads.
self.new_progress = 0
self.existing_progress = 0
# Dict containing individual progress for each file. Key is filename
# (from src_url). It maps to a _ProgressInformation object.
self.individual_file_progress = {}
self.component_total = 0
self.finished_components = 0
self.existing_components = 0
def GetProgress(self):
"""Gets the progress for a DataManager.
Returns:
The number of processed bytes in this operation.
"""
return self.new_progress
def _HandleFileDescription(self, status_message):
"""Handles a FileMessage that describes a file.
Args:
status_message: the FileMessage to be processed.
"""
if not status_message.finished:
# File started.
if self.first_item and not self.custom_time:
# Set initial time.
self.refresh_message_time = status_message.time
self.start_time = self.refresh_message_time
self.last_throughput_time = self.refresh_message_time
self.first_item = False
# Gets file name (from src_url).
file_name = status_message.src_url.url_string
status_message.size = status_message.size if status_message.size else 0
# Creates a new entry on individual_file_progress.
self.individual_file_progress[file_name] = (
self._ProgressInformation(status_message.size))
if self.num_objects_source >= EstimationSource.INDIVIDUAL_MESSAGES:
# This ensures the file has not been counted on SeekAheadThread or
# in ProducerThread.
self.num_objects_source = EstimationSource.INDIVIDUAL_MESSAGES
self.num_objects += 1
if self.total_size_source >= EstimationSource.INDIVIDUAL_MESSAGES:
# This ensures the file size has not been counted on SeekAheadThread or
# in ProducerThread.
self.total_size_source = EstimationSource.INDIVIDUAL_MESSAGES
self.total_size += status_message.size
self.object_report_change = True
else:
# File finished.
self.objects_finished += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
total_bytes_transferred = (file_progress.new_progress_sum +
file_progress.existing_progress_sum)
# Ensures total_progress has the right value.
self.total_progress += file_progress.size - total_bytes_transferred
self.new_progress += file_progress.size - total_bytes_transferred
self.last_progress_time = status_message.time
# Deleting _ProgressInformation object to save memory.
del self.individual_file_progress[file_name]
self.object_report_change = True
if (self.objects_finished == self.num_objects and
self.num_objects_source == EstimationSource.PRODUCER_THREAD_FINAL):
self.final_message = True
def _IsFile(self, file_message):
"""Tells whether or not this FileMessage represent a file.
This is needed because FileMessage is used by both files and components.
Args:
file_message: The FileMessage to be analyzed.
Returns:
Whether or not this represents a file.
"""
message_type = file_message.message_type
return (message_type == FileMessage.FILE_DOWNLOAD or
message_type == FileMessage.FILE_UPLOAD or
message_type == FileMessage.FILE_CLOUD_COPY or
message_type == FileMessage.FILE_DAISY_COPY or
message_type == FileMessage.FILE_LOCAL_COPY or
message_type == FileMessage.FILE_REWRITE or
message_type == FileMessage.FILE_HASH)
def _HandleComponentDescription(self, status_message):
"""Handles a FileMessage that describes a component.
Args:
status_message: The FileMessage to be processed.
"""
if (status_message.message_type == FileMessage.EXISTING_COMPONENT and
not status_message.finished):
# Existing component: have to ensure total_progress accounts for it.
self.existing_components += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
key = (status_message.component_num, status_message.dst_url)
file_progress.dict[key] = (0, status_message.size)
file_progress.existing_progress_sum += status_message.size
self.total_progress += status_message.size
self.existing_progress += status_message.size
elif ((status_message.message_type == FileMessage.COMPONENT_TO_UPLOAD or
status_message.message_type == FileMessage.COMPONENT_TO_DOWNLOAD)):
if not status_message.finished:
# Component started.
self.component_total += 1
if status_message.message_type == FileMessage.COMPONENT_TO_DOWNLOAD:
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
file_progress.existing_progress_sum += (
status_message.bytes_already_downloaded)
key = (status_message.component_num, status_message.dst_url)
file_progress.dict[key] = (0, status_message.bytes_already_downloaded)
self.total_progress += status_message.bytes_already_downloaded
self.existing_progress += status_message.bytes_already_downloaded
else:
# Component finished.
self.finished_components += 1
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
key = (status_message.component_num, status_message.dst_url)
last_update = (
file_progress.dict[key] if key in file_progress.dict else (0, 0))
self.total_progress += status_message.size - sum(last_update)
self.new_progress += status_message.size - sum(last_update)
self.last_progress_time = status_message.time
file_progress.new_progress_sum += (status_message.size -
sum(last_update))
file_progress.dict[key] = (status_message.size - last_update[1],
last_update[1])
def _HandleProgressMessage(self, status_message):
"""Handles a ProgressMessage that tracks progress of a file or component.
Args:
status_message: The ProgressMessage to be processed.
"""
# Retrieving index and dict for this file.
file_name = status_message.src_url.url_string
file_progress = self.individual_file_progress[file_name]
# Retrieves last update ((0,0) if no previous update) for this file or
# component. To ensure uniqueness (among components),
# we use a (component_num, dst_url) tuple as our key.
key = (status_message.component_num, status_message.dst_url)
last_update = (
file_progress.dict[key] if key in file_progress.dict else (0, 0))
status_message.processed_bytes -= last_update[1]
file_progress.new_progress_sum += (
status_message.processed_bytes - last_update[0])
# Updates total progress with new update from component.
self.total_progress += status_message.processed_bytes - last_update[0]
self.new_progress += status_message.processed_bytes - last_update[0]
# Updates file_progress.dict on component's key.
file_progress.dict[key] = (status_message.processed_bytes, last_update[1])
self.last_progress_time = status_message.time
def ProcessMessage(self, status_message, stream):
"""Processes a message from _MainThreadUIQueue or _UIThread.
Args:
status_message: The StatusMessage item to be processed.
stream: Stream to print messages. Here only for SeekAheadThread
"""
self.object_report_change = False
if isinstance(status_message, ProducerThreadMessage):
# ProducerThread info.
self._HandleProducerThreadMessage(status_message)
elif isinstance(status_message, SeekAheadMessage):
# SeekAheadThread info.
self._HandleSeekAheadMessage(status_message, stream)
elif isinstance(status_message, FileMessage):
if self._IsFile(status_message):
# File info.
self._HandleFileDescription(status_message)
else:
# Component info.
self._HandleComponentDescription(status_message)
LogPerformanceSummaryParams(file_message=status_message)
elif isinstance(status_message, ProgressMessage):
# Progress info.
self._HandleProgressMessage(status_message)
elif isinstance(status_message, RetryableErrorMessage):
LogRetryableError(status_message)
elif isinstance(status_message, PerformanceSummaryMessage):
self._HandlePerformanceSummaryMessage(status_message)
self.old_progress.append(
self._ThroughputInformation(self.new_progress, status_message.time))
def PrintProgress(self, stream=sys.stderr):
"""Prints progress and throughput/time estimation.
If a ProducerThreadMessage or SeekAheadMessage has been provided,
it outputs the number of files completed, number of total files,
the current progress, the total size, and the percentage it
represents.
If none of those have been provided, it only includes the number of files
completed, the current progress and total size (which might be updated),
with no percentage as we do not know if more files are coming.
It may also include time estimation (available only given
ProducerThreadMessage or SeekAheadMessage provided) and throughput. For that
to happen, there is an extra condition of at least first_throughput_latency
seconds having been passed since the UIController started, and that
either the ProducerThread or the SeekAheadThread have estimated total
number of files and total size.
Args:
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
"""
# Time to update all information.
total_remaining = self.total_size - self.total_progress
if self.throughput:
time_remaining = total_remaining / self.throughput
else:
time_remaining = None
char_to_print = self.GetSpinner()
if self.num_objects_source <= EstimationSource.SEEK_AHEAD_THREAD:
# An example of objects_completed here would be ' [2/3 files]'.
objects_completed = ('[' + DecimalShort(self.objects_finished) + '/' +
DecimalShort(self.num_objects) + ' files]')
else:
# An example of objects_completed here would be ' [2 files]'.
objects_completed = '[' + DecimalShort(self.objects_finished) + ' files]'
# An example of bytes_progress would be '[101.0 MiB/1.0 GiB]'.
bytes_progress = (
'[%s/%s]' % (BytesToFixedWidthString(self.total_progress),
BytesToFixedWidthString(self.total_size)))
if self.total_size_source <= EstimationSource.SEEK_AHEAD_THREAD:
if self.num_objects == self.objects_finished:
percentage = '100'
else:
percentage = ('%3d' % min(99, int(100 * float(self.total_progress) /
self.total_size)))
percentage_completed = percentage + '% Done'
else:
percentage_completed = ''
if (self.refresh_message_time - self.start_time >
self.first_throughput_latency):
# Should also include throughput.
# An example of throughput here would be ' 82.3 MiB/s'
throughput = BytesToFixedWidthString(self.throughput) + '/s'
if (self.total_size_source <= EstimationSource.PRODUCER_THREAD_ESTIMATE
and self.throughput):
# Should also include time remaining.
# An example of time remaining would be ' ETA 00:00:11'.
time_remaining_str = 'ETA ' + PrettyTime(time_remaining)
else:
time_remaining_str = ''
else:
throughput = ''
time_remaining_str = ''
format_str = ('{char_to_print} {objects_completed}{bytes_progress}'
' {percentage_completed} {throughput} {time_remaining_str}')
string_to_print = format_str.format(
char_to_print=char_to_print, objects_completed=objects_completed,
bytes_progress=bytes_progress,
percentage_completed=percentage_completed,
throughput=throughput, time_remaining_str=time_remaining_str)
remaining_width = self.console_width - len(string_to_print)
if not self.quiet_mode:
stream.write(string_to_print + (max(remaining_width, 0) * ' ') + '\r')
def CanHandleMessage(self, status_message):
"""Determines whether this manager is suitable for handling status_message.
Args:
status_message: The StatusMessage object to be analyzed.
Returns:
True if this message can be properly handled by this manager,
False otherwise.
"""
if isinstance(status_message, (SeekAheadMessage, ProducerThreadMessage,
FileMessage, ProgressMessage, FinalMessage,
RetryableErrorMessage,
PerformanceSummaryMessage)):
return True
return False
class UIController(object):
"""Controller UI class to integrate _MainThreadUIQueue and _UIThread.
This class receives messages from _MainThreadUIQueue and _UIThread and send
them to an appropriate manager, which will then processes and store data about
them.
"""
def __init__(self, update_message_period=1, update_spinner_period=0.6,
sliding_throughput_period=5, first_throughput_latency=10,
quiet_mode=False, custom_time=None, verbose=False,
dump_status_messages_file=None):
"""Instantiates a UIController.
Args:
update_message_period: Minimum period for refreshing and displaying
new information. A non-positive value will ignore any time
restrictions imposed by this field.
update_spinner_period: Minimum period for refreshing and displaying the
spinner. A non-positive value will ignore any time restrictions
imposed by this field.
sliding_throughput_period: Sliding period for throughput calculation. A
non-positive value will make it impossible to calculate the
throughput.
first_throughput_latency: Minimum waiting time before actually displaying
throughput info. A non-positive value will ignore any time
restrictions imposed by this field.
quiet_mode: If True, do not print status messages (but still process
them for analytics reporting as necessary).
custom_time: If a custom start_time is desired. Used for testing.
verbose: Tells whether or not the operation is on verbose mode.
dump_status_messages_file: File path for logging all received status
messages, for debugging purposes.
"""
self.verbose = verbose
self.update_message_period = update_message_period
self.update_spinner_period = update_spinner_period
self.sliding_throughput_period = sliding_throughput_period
self.first_throughput_latency = first_throughput_latency
self.manager = None
self.quiet_mode = quiet_mode
self.custom_time = custom_time
self.console_width = 80 # Console width. Passed to manager.
# List storing all estimation messages from SeekAheadThread or
# ProducerThread. This is used when we still do not know which manager to
# use.
self.early_estimation_messages = []
self.printed_final_message = False
self.dump_status_message_fp = None
if dump_status_messages_file:
self.dump_status_message_fp = open(dump_status_messages_file, 'ab')
def _HandleMessage(self, status_message, stream, cur_time=None):
"""Processes a message, updates throughput and prints progress.
Args:
status_message: Message to be processed. Could be None if UIThread cannot
retrieve message from status_queue.
stream: stream to print messages. Usually sys.stderr, but customizable
for testing.
cur_time: Message time. Used to determine if it is time to refresh
output, or calculate throughput.
"""
self.manager.ProcessMessage(status_message, stream)
if self.manager.ShouldPrintProgress(cur_time):
if self.manager.ShouldTrackThroughput(cur_time):
self.manager.UpdateThroughput(cur_time, self.manager.GetProgress())
self.manager.PrintProgress(stream)
self.manager.refresh_message_time = cur_time
if self.manager.ShouldPrintSpinner(cur_time):
self.manager.PrintSpinner(stream)
self.manager.refresh_spinner_time = cur_time
if ((isinstance(status_message, FinalMessage) or
self.manager.final_message)
and self.manager.num_objects
and not self.printed_final_message):
self.printed_final_message = True
LogPerformanceSummaryParams(
num_objects_transferred=self.manager.num_objects)
self.manager.PrintFinalSummaryMessage(stream)
def Call(self, status_message, stream, cur_time=None):
"""Coordinates UI manager and calls appropriate function to handle message.
Args:
status_message: Message to be processed. Could be None if UIThread cannot
retrieve message from status_queue.
stream: Stream to print messages. Usually sys.stderr, but customizable
for testing.
cur_time: Message time. Used to determine if it is time to refresh
output, or calculate throughput.
"""
if not isinstance(status_message, StatusMessage):
if status_message == ZERO_TASKS_TO_DO_ARGUMENT and not self.manager:
# Create a manager to handle early estimation messages before returning.
self.manager = (
DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode, custom_time=self.custom_time,
verbose=self.verbose, console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream,
cur_time=estimation_message.time)
return
if self.dump_status_message_fp:
# TODO: Add Unicode support to string methods on message classes.
# Currently, dump will fail with a UnicodeEncodeErorr if the message
# class contains a Unicode attribute.
self.dump_status_message_fp.write(str(status_message))
self.dump_status_message_fp.write('\n')
if not cur_time:
cur_time = status_message.time
if not self.manager:
if (isinstance(status_message, SeekAheadMessage) or
isinstance(status_message, ProducerThreadMessage)):
self.early_estimation_messages.append(status_message)
return
elif isinstance(status_message, MetadataMessage):
self.manager = (
MetadataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode, custom_time=self.custom_time,
verbose=self.verbose, console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
else:
self.manager = (
DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
quiet_mode=self.quiet_mode, custom_time=self.custom_time,
verbose=self.verbose, console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
if not self.manager.CanHandleMessage(status_message):
if (isinstance(status_message, FileMessage) or
isinstance(status_message, ProgressMessage)):
# We have to create a DataManager to handle this data message. This is
# to avoid a possible race condition where MetadataMessages are sent
# before data messages. As such, this means that the DataManager has
# priority, and whenever a data message is received, we ignore the
# MetadataManager if one exists, and start a DataManager from scratch.
# This can be done because we do not need any MetadataMessages to
# properly handle a data operation. It could be useful to send the
# early estimation messages, if those are available.
self.manager = (
DataManager(
update_message_period=self.update_message_period,
update_spinner_period=self.update_spinner_period,
sliding_throughput_period=self.sliding_throughput_period,
first_throughput_latency=self.first_throughput_latency,
custom_time=self.custom_time, verbose=self.verbose,
console_width=self.console_width))
for estimation_message in self.early_estimation_messages:
self._HandleMessage(estimation_message, stream, cur_time)
else:
# No need to handle this message.
return
self._HandleMessage(status_message, stream, cur_time)
class MainThreadUIQueue(object):
"""Handles status display and processing in the main thread / master process.
This class emulates a queue to cover main-thread activity before or after
Apply, as well as for the single-threaded, single-process case, i.e.,
_SequentialApply. When multiple threads or processes are used during calls
to Apply, the main thread is waiting for work to complete, and this queue
must remain unused until Apply returns. Code producing arguments for
Apply (such as the NameExpansionIterator) must not post messages to this
queue to avoid race conditions with the UIThread.
This class sends the messages it receives to UIController, which
decides the correct course of action.
"""
def __init__(self, stream, ui_controller):
"""Instantiates a _MainThreadUIQueue.
Args:
stream: Stream for printing messages.
ui_controller: UIController to manage messages.
"""
super(MainThreadUIQueue, self).__init__()
self.ui_controller = ui_controller
self.stream = stream
# pylint: disable=invalid-name, unused-argument
def put(self, status_message, timeout=None):
self.ui_controller.Call(status_message, self.stream)
# pylint: enable=invalid-name, unused-argument
class UIThread(threading.Thread):
"""Responsible for centralized printing across multiple processes/threads.
This class pulls status messages that are posted to the centralized status
queue and coordinates displaying status and progress to the user. It is
used only during calls to _ParallelApply, which in turn is called only when
multiple threads and/or processes are used.
This class sends the messages it receives to UIController, which
decides the correct course of action.
"""
def __init__(self, status_queue, stream, ui_controller, timeout=1):
"""Instantiates a _UIThread.
Args:
status_queue: Queue for reporting status updates.
stream: Stream for printing messages.
ui_controller: UI controller to manage messages.
timeout: Timeout for getting a message.
"""
super(UIThread, self).__init__()
self.status_queue = status_queue
self.stream = stream
self.timeout = timeout
self.ui_controller = ui_controller
self.start()
def run(self):
try:
while True:
try:
status_message = self.status_queue.get(timeout=self.timeout)
except Queue.Empty:
status_message = None
continue
self.ui_controller.Call(status_message, self.stream)
if status_message == ZERO_TASKS_TO_DO_ARGUMENT:
# Item from MainThread to indicate we are done.
break
except Exception, e: # pylint:disable=broad-except
self.stream.write('Exception in UIThread: %s\n' % e)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
felix.endpoint
~~~~~~~~~~~~~
Endpoint management.
"""
import logging
from calico.felix import devices, futils
from calico.felix.actor import actor_message
from calico.felix.futils import FailedSystemCall
from calico.felix.futils import IPV4
from calico.felix.refcount import ReferenceManager, RefCountedActor, RefHelper
from calico.felix.dispatch import DispatchChains
from calico.felix.profilerules import RulesManager
from calico.felix.frules import (
profile_to_chain_name, commented_drop_fragment, interface_to_suffix,
chain_names
)
_log = logging.getLogger(__name__)
class EndpointManager(ReferenceManager):
def __init__(self, config, ip_type,
iptables_updater,
dispatch_chains,
rules_manager):
super(EndpointManager, self).__init__(qualifier=ip_type)
# Configuration and version to use
self.config = config
self.ip_type = ip_type
self.ip_version = futils.IP_TYPE_TO_VERSION[ip_type]
# Peers/utility classes.
self.iptables_updater = iptables_updater
self.dispatch_chains = dispatch_chains
self.rules_mgr = rules_manager
# All endpoint dicts that are on this host.
self.endpoints_by_id = {}
# Dict that maps from interface name ("tap1234") to endpoint ID.
self.endpoint_id_by_iface_name = {}
# Set of endpoints that are live on this host. I.e. ones that we've
# increffed.
self.local_endpoint_ids = set()
def _create(self, combined_id):
"""
Overrides ReferenceManager._create()
"""
return LocalEndpoint(self.config,
combined_id,
self.ip_type,
self.iptables_updater,
self.dispatch_chains,
self.rules_mgr)
def _on_object_started(self, endpoint_id, obj):
"""
Callback from a LocalEndpoint to report that it has started.
Overrides ReferenceManager._on_object_started
"""
ep = self.endpoints_by_id.get(endpoint_id)
obj.on_endpoint_update(ep, async=True)
@actor_message()
def apply_snapshot(self, endpoints_by_id):
# Tell the dispatch chains about the local endpoints in advance so
# that we don't flap the dispatch chain at start-of-day.
local_iface_name_to_ep_id = {}
for ep_id, ep in endpoints_by_id.iteritems():
if ep and ep_id.host == self.config.HOSTNAME and ep.get("name"):
local_iface_name_to_ep_id[ep.get("name")] = ep_id
self.dispatch_chains.apply_snapshot(local_iface_name_to_ep_id.keys(),
async=True)
# Then update/create endpoints and work out which endpoints have been
# deleted.
missing_endpoints = set(self.endpoints_by_id.keys())
for endpoint_id, endpoint in endpoints_by_id.iteritems():
self.on_endpoint_update(endpoint_id, endpoint,
force_reprogram=True)
missing_endpoints.discard(endpoint_id)
self._maybe_yield()
for endpoint_id in missing_endpoints:
self.on_endpoint_update(endpoint_id, None)
self._maybe_yield()
@actor_message()
def on_endpoint_update(self, endpoint_id, endpoint, force_reprogram=False):
"""
Event to indicate that an endpoint has been updated (including
creation or deletion).
:param EndpointId endpoint_id: The endpoint ID in question.
:param dict[str]|NoneType endpoint: Dictionary of all endpoint
data or None if the endpoint is to be deleted.
"""
if endpoint_id.host != self.config.HOSTNAME:
_log.debug("Skipping endpoint %s; not on our host.", endpoint_id)
return
if self._is_starting_or_live(endpoint_id):
# Local endpoint thread is running; tell it of the change.
_log.info("Update for live endpoint %s", endpoint_id)
self.objects_by_id[endpoint_id].on_endpoint_update(
endpoint, force_reprogram=force_reprogram, async=True)
old_ep = self.endpoints_by_id.pop(endpoint_id, {})
# Interface name shouldn't change but popping it now is correct for
# deletes and we add it back in below on create/modify.
old_iface_name = old_ep.get("name")
self.endpoint_id_by_iface_name.pop(old_iface_name, None)
if endpoint is None:
# Deletion. Remove from the list.
_log.info("Endpoint %s deleted", endpoint_id)
if endpoint_id in self.local_endpoint_ids:
self.decref(endpoint_id)
self.local_endpoint_ids.remove(endpoint_id)
else:
# Creation or modification
_log.info("Endpoint %s modified or created", endpoint_id)
self.endpoints_by_id[endpoint_id] = endpoint
self.endpoint_id_by_iface_name[endpoint["name"]] = endpoint_id
if endpoint_id not in self.local_endpoint_ids:
# This will trigger _on_object_activated to pass the endpoint
# we just saved off to the endpoint.
self.local_endpoint_ids.add(endpoint_id)
self.get_and_incref(endpoint_id)
@actor_message()
def on_interface_update(self, name):
"""
Called when an interface is created or changes state.
The interface may be any interface on the host, not necessarily
one managed by any endpoint of this server.
"""
try:
endpoint_id = self.endpoint_id_by_iface_name[name]
except KeyError:
_log.debug("Update on interface %s that we do not care about",
name)
else:
_log.info("Endpoint %s received interface update for %s",
endpoint_id, name)
if self._is_starting_or_live(endpoint_id):
# LocalEndpoint is running, so tell it about the change.
ep = self.objects_by_id[endpoint_id]
ep.on_interface_update(async=True)
class LocalEndpoint(RefCountedActor):
def __init__(self, config, combined_id, ip_type, iptables_updater,
dispatch_chains, rules_manager):
"""
Controls a single local endpoint.
:param combined_id: EndpointId for this endpoint.
:param ip_type: IP type for this endpoint (IPv4 or IPv6)
:param iptables_updater: IptablesUpdater to use
:param dispatch_chains: DispatchChains to use
:param rules_manager: RulesManager to use
"""
super(LocalEndpoint, self).__init__(qualifier="%s(%s)" %
(combined_id.endpoint, ip_type))
assert isinstance(dispatch_chains, DispatchChains)
assert isinstance(rules_manager, RulesManager)
self.combined_id = combined_id
self.config = config
self.ip_type = ip_type
self.ip_version = futils.IP_TYPE_TO_VERSION[ip_type]
if self.ip_type == IPV4:
self.nets_key = "ipv4_nets"
else:
self.nets_key = "ipv6_nets"
self.iptables_updater = iptables_updater
self.dispatch_chains = dispatch_chains
self.rules_mgr = rules_manager
self.rules_ref_helper = RefHelper(self, rules_manager,
self._on_profiles_ready)
# Will be filled in as we learn about the OS interface and the
# endpoint config.
self.endpoint = None
self._mac = None
self._iface_name = None
self._suffix = None
# Track whether the last attempt to program the dataplane succeeded.
# We'll force a reprogram next time we get a kick.
self._failed = False
# And whether we've received an update since last time we programmed.
self._dirty = False
@actor_message()
def on_endpoint_update(self, endpoint, force_reprogram=False):
"""
Called when this endpoint has received an update.
:param dict[str] endpoint: endpoint parameter dictionary.
"""
_log.info("%s updated: %s", self, endpoint)
mac_changed = False
if not endpoint and not self.endpoint:
# First time we have been called, but it's a delete! Maybe some
# odd timing window, but we have nothing to tidy up.
return
if endpoint and endpoint['mac'] != self._mac:
# Either we have not seen this MAC before, or it has changed.
self._mac = endpoint['mac']
mac_changed = True
if endpoint and not self.endpoint:
# This is the first time we have seen the endpoint, so extract the
# interface name and endpoint ID.
self._iface_name = endpoint["name"]
self._suffix = interface_to_suffix(self.config,
self._iface_name)
was_ready = self._ready
# Activate the required profile IDs (and deactivate any that we no
# longer need).
if endpoint:
new_profile_ids = set(endpoint["profile_ids"])
else:
new_profile_ids = set()
# Note: we don't actually need to wait for the activation to finish
# due to the dependency management in the iptables layer.
self.rules_ref_helper.replace_all(new_profile_ids)
if endpoint != self.endpoint or force_reprogram:
self._dirty = True
# Store off the endpoint we were passed.
self.endpoint = endpoint
if endpoint:
# Configure the network interface; may fail if not there yet (in
# which case we'll just do it when the interface comes up).
self._configure_interface(mac_changed)
else:
# Remove the network programming.
self._deconfigure_interface()
self._maybe_update(was_ready)
_log.debug("%s finished processing update", self)
@actor_message()
def on_unreferenced(self):
"""
Overrides RefCountedActor:on_unreferenced.
"""
_log.info("%s now unreferenced, cleaning up", self)
assert not self._ready, "Should be deleted before being unreffed."
# Removing all profile refs should have been done already but be
# defensive.
self.rules_ref_helper.discard_all()
self._notify_cleanup_complete()
@actor_message()
def on_interface_update(self):
"""
Actor event to report that the interface is either up or changed.
"""
_log.info("Endpoint %s received interface kick", self.combined_id)
self._configure_interface()
@property
def _missing_deps(self):
"""
Returns a list of missing dependencies.
"""
missing_deps = []
if not self.endpoint:
missing_deps.append("endpoint")
elif self.endpoint.get("state", "active") != "active":
missing_deps.append("endpoint active")
elif not self.endpoint.get("profile_ids"):
missing_deps.append("profile")
return missing_deps
@property
def _ready(self):
"""
Returns whether this LocalEndpoint has any dependencies preventing it
programming its rules.
"""
return not self._missing_deps
def _maybe_update(self, was_ready):
"""
Update the relevant programming for this endpoint.
:param bool was_ready: Whether this endpoint has already been
successfully configured.
"""
is_ready = self._ready
if not is_ready:
_log.debug("%s not ready, waiting on %s", self, self._missing_deps)
if self._failed or self._dirty or is_ready != was_ready:
ifce_name = self._iface_name
if is_ready:
# We've got all the info and everything is active.
if self._failed:
_log.warn("Retrying programming after a failure")
self._failed = False # Ready to try again...
_log.info("%s became ready to program.", self)
self._update_chains()
self.dispatch_chains.on_endpoint_added(
self._iface_name, async=True)
else:
# We were active but now we're not, withdraw the dispatch rule
# and our chain. We must do this to allow iptables to remove
# the profile chain.
_log.info("%s became unready.", self)
self._failed = False # Don't care any more.
self.dispatch_chains.on_endpoint_removed(ifce_name,
async=True)
self._remove_chains()
self._dirty = False
def _update_chains(self):
updates, deps = _get_endpoint_rules(self.combined_id.endpoint,
self._suffix, self.endpoint["mac"],
self.endpoint["profile_ids"])
try:
self.iptables_updater.rewrite_chains(updates, deps, async=False)
except FailedSystemCall:
_log.exception("Failed to program chains for %s. Removing.", self)
self._failed = True
self._remove_chains()
def _remove_chains(self):
try:
self.iptables_updater.delete_chains(chain_names(self._suffix),
async=True)
except FailedSystemCall:
_log.exception("Failed to delete chains for %s", self)
self._failed = True
def _configure_interface(self, mac_changed=True):
"""
Applies sysctls and routes to the interface.
:param: bool mac_changed: Has the MAC address changed since it was last
configured? If so, we reconfigure ARP for the interface in
IPv4 (ARP does not exist for IPv6, which uses neighbour
solicitation instead).
"""
try:
if self.ip_type == IPV4:
devices.configure_interface_ipv4(self._iface_name)
reset_arp = mac_changed
else:
ipv6_gw = self.endpoint.get("ipv6_gateway", None)
devices.configure_interface_ipv6(self._iface_name, ipv6_gw)
reset_arp = False
ips = set()
for ip in self.endpoint.get(self.nets_key, []):
ips.add(futils.net_to_ip(ip))
devices.set_routes(self.ip_type, ips,
self._iface_name,
self.endpoint["mac"],
reset_arp=reset_arp)
except (IOError, FailedSystemCall):
if not devices.interface_exists(self._iface_name):
_log.info("Interface %s for %s does not exist yet",
self._iface_name, self.combined_id)
elif not devices.interface_up(self._iface_name):
_log.info("Interface %s for %s is not up yet",
self._iface_name, self.combined_id)
else:
# Interface flapped back up after we failed?
_log.warning("Failed to configure interface %s for %s",
self._iface_name, self.combined_id)
def _deconfigure_interface(self):
"""
Removes routes from the interface.
"""
try:
devices.set_routes(self.ip_type, set(), self._iface_name, None)
except (IOError, FailedSystemCall):
if not devices.interface_exists(self._iface_name):
# Deleted under our feet - so the rules are gone.
_log.debug("Interface %s for %s deleted",
self._iface_name, self.combined_id)
else:
# An error deleting the rules. Log and continue.
_log.exception("Cannot delete rules for interface %s for %s",
self._iface_name, self.combined_id)
def _on_profiles_ready(self):
# We don't actually need to talk to the profiles, just log.
_log.info("Endpoint %s acquired all required profile references",
self.combined_id)
def __str__(self):
return ("Endpoint<%s,id=%s,iface=%s>" %
(self.ip_type, self.combined_id,
self._iface_name or "unknown"))
def _get_endpoint_rules(endpoint_id, suffix, mac, profile_ids):
to_chain_name, from_chain_name = chain_names(suffix)
to_chain, to_deps = _build_to_or_from_chain(
endpoint_id,
profile_ids,
to_chain_name,
"inbound"
)
from_chain, from_deps = _build_to_or_from_chain(
endpoint_id,
profile_ids,
from_chain_name,
"outbound",
expected_mac=mac,
)
updates = {to_chain_name: to_chain, from_chain_name: from_chain}
deps = {to_chain_name: to_deps, from_chain_name: from_deps}
return updates, deps
def _build_to_or_from_chain(endpoint_id, profile_ids, chain_name,
direction, expected_mac=None):
# Ensure the MARK is set to 0 when we start so that unmatched packets will
# be dropped.
chain = [
"--append %s --jump MARK --set-mark 0" % chain_name
]
if expected_mac:
_log.debug("Policing source MAC: %s", expected_mac)
chain.append('--append %s --match mac ! --mac-source %s --jump DROP '
'--match comment --comment "Incorrect source MAC"' %
(chain_name, expected_mac))
# Jump to each profile in turn. The profile will do one of the
# following:
# * DROP the packet; in which case we won't see it again.
# * RETURN the packet with MARK==1, indicating it accepted the packet. In
# which case, we RETURN and skip further profiles.
# * RETURN the packet with MARK==0, indicating it did not match the packet.
# In which case, we carry on and process the next profile.
deps = set()
for profile_id in profile_ids:
profile_chain = profile_to_chain_name(direction, profile_id)
deps.add(profile_chain)
chain.append("--append %s --jump %s" % (chain_name, profile_chain))
# If the profile accepted the packet, it sets MARK==1. Immediately
# RETURN the packet to signal that it's been accepted.
chain.append('--append %s --match mark --mark 1/1 '
'--match comment --comment "Profile accepted packet" '
'--jump RETURN' % chain_name)
# Default drop rule.
chain.append(
commented_drop_fragment(
chain_name,
"Default DROP if no match (endpoint %s):" % endpoint_id
)
)
return chain, deps
|
|
"""Query subclass used by Manager as default session query class.
"""
from math import ceil
from sqlalchemy import orm, and_, or_, inspect
from sqlalchemy.orm.strategy_options import Load
from pydash import py_
from ._compat import iteritems
__all__ = [
'Query',
'QueryModel',
'QueryProperty',
'Pagination',
'LoadOption'
]
class Query(orm.Query):
"""Extension of default Query class used in SQLAlchemy session queries.
"""
#: Default per_page argument for pagination when per_page not specified.
DEFAULT_PER_PAGE = 50
@property
def entities(self):
"""Return list of entity classes present in query."""
return [e.mapper.class_ for e in self._entities]
@property
def join_entities(self):
"""Return list of the joined entity classes present in query."""
return [e.mapper.class_ for e in self._join_entities]
@property
def all_entities(self):
"""Return list of entities + join_entities present in query."""
return self.entities + self.join_entities
def _join_eager(self, keys, use_outerjoin, **kargs):
"""Helper method for applying ``join()``/``outerjoin()` with
``contains_eager()``.
"""
alias = kargs.pop('alias', {})
options = kargs.pop('options', None)
if not isinstance(alias, dict):
alias = {keys[0]: alias}
join_args = [(alias.get(key), key) for key in keys]
load = orm.contains_eager(keys[0], alias=alias.get(keys[0]))
for key in keys[1:]:
load = load.contains_eager(key, alias=alias.get(key))
if options:
apply_load_options(load, options)
join = self.outerjoin if use_outerjoin else self.join
return join(*join_args).options(load)
def join_eager(self, *keys, **kargs):
"""Apply ``join`` + ``self.options(contains_eager())``.
Args:
keys (mixed): Either string or column references to join
path(s).
Keyword Args:
alias: Join alias or ``dict`` mapping key names to aliases.
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
"""
return self._join_eager(keys, False, **kargs)
def outerjoin_eager(self, *keys, **kargs):
"""Apply ``outerjoin`` + ``self.options(contains_eager())``.
Args:
keys (mixed): Either string keys or column references to join
path(s).
Keyword Args:
alias: Join alias or ``dict`` mapping key names to aliases.
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
"""
return self._join_eager(keys, True, **kargs)
def _join_load(self, keys, load_strategy, **kargs):
"""Helper method for returning load strategies."""
options = kargs.pop('options', None)
load = getattr(orm, load_strategy)(keys[0], **kargs)
for key in keys[1:]:
load = getattr(load, load_strategy)(key)
if options:
load = apply_load_options(load, options)
return self.options(load)
def joinedload(self, *keys, **kargs):
"""Apply ``joinedload()`` to `keys`.
Args:
keys (mixed): Either string or column references to join
path(s).
Keyword Args:
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
Note:
Additional keyword args will be passed to initial load creation.
"""
return self._join_load(keys, 'joinedload', **kargs)
def immediateload(self, *keys, **kargs):
"""Apply ``immediateload()`` to `keys`.
Args:
keys (mixed): Either string or column references to join
path(s).
Keyword Args:
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
Note:
Additional keyword args will be passed to initial load creation.
"""
return self._join_load(keys, 'immediateload', **kargs)
def lazyload(self, *keys, **kargs):
"""Apply ``lazyload()`` to `keys`.
Args:
keys (mixed): Either string or column references to join
path(s).
Keyword Args:
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
Note:
Additional keyword args will be passed to initial load creation.
"""
return self._join_load(keys, 'lazyload', **kargs)
def noload(self, *keys, **kargs):
"""Apply ``noload()`` to `keys`.
Args:
keys (mixed): Either string or column references to join
path(s).
Keyword Args:
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
Note:
Additional keyword args will be passed to initial load creation.
"""
return self._join_load(keys, 'noload', **kargs)
def subqueryload(self, *keys, **kargs):
"""Apply ``subqueryload()`` to `keys`.
Args:
keys (mixed): Either string or column references to join
path(s).
Keyword Args:
options (list): A list of :class:`LoadOption` to apply to the
overall load strategy, i.e., each :class:`LoadOption` will be
chained at the end of the load.
Note:
Additional keyword args will be passed to initial load creation.
"""
return self._join_load(keys, 'subqueryload', **kargs)
def load_only(self, *columns):
"""Apply ``load_only()`` to query."""
obj, columns = get_load_options(*columns)
return self.options(obj.load_only(*columns))
def defer(self, *columns):
"""Apply ``defer()`` to query."""
load, columns = get_load_options(*columns)
for column in columns:
load = load.defer(column)
return self.options(load)
def undefer(self, *columns):
"""Apply ``undefer()`` to query."""
load, columns = get_load_options(*columns)
for column in columns:
load = load.undefer(column)
return self.options(load)
def undefer_group(self, *names):
"""Apply ``undefer_group()`` to query."""
obj, names = get_load_options(*names)
return self.options(obj.undefer_group(names[0]))
def chain(self):
"""Return pydash chaining instance with items returned by
:meth:`all`.
See Also:
`pydash's <http://pydash.readthedocs.org/>`_ documentation on
`chaining <http://pydash.readthedocs.org/en/latest/chaining.html>`_
"""
return py_.chain(self.all())
def index_by(self, callback=None):
"""Index items returned by :meth:`all` using `callback`."""
return py_.index_by(self.all(), callback)
def map(self, callback=None):
"""Map `callback` to each item returned by :meth:`all`."""
return py_.map(self.all(), callback)
def reduce(self, callback=None, initial=None):
"""Reduce :meth:`all` using `callback`."""
return py_.reduce(self.all(), callback, initial)
def reduce_right(self, callback=None, initial=None):
"""Reduce reversed :meth:`all` using `callback`."""
return py_.reduce_right(self.all(), callback, initial)
def pluck(self, column):
"""Pluck `column` attribute values from :meth:`all` results and
return as list.
"""
return py_.pluck(self.all(), column)
def page(self, page=1, per_page=None):
"""Return query with limit and offset applied for page."""
if per_page is None:
per_page = self.DEFAULT_PER_PAGE
return self.limit(per_page).offset((page - 1) * per_page)
def paginate(self, page=1, per_page=None, error_out=True):
"""Return :class:`Pagination` instance using already defined query
parameters.
"""
if error_out and page < 1:
raise IndexError
if per_page is None:
per_page = self.DEFAULT_PER_PAGE
items = self.page(page, per_page).all()
if not items and page != 1 and error_out:
raise IndexError
# No need to count if we're on the first page and there are fewer items
# than we expected.
if page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items)
class QueryModel(Query):
"""Class used for default query property class for ``mymanager.query``,
``mymanager.session.query``, and ``MyModel.query``. Can be used in other
libraries/implementations when creating a session::
from sqlalchemy import orm
from alchy import QueryModel
# or if not using as query property
# from alchy import Query
session = orm.scoped_session(orm.sessionmaker())
session.configure(query_cls=QueryModel)
**NOTE:** If you don't plan to use the query class as a query property,
then you can use the :class:`Query` class instead since it won't include
features that only work within a query property context.
Attributes:
__search_filters__: All available search filter functions indexed by a
canonical name which will be referenced in advanced/simple search.
All filter functions should take a single value and return an
SQLAlchemy filter expression, i.e.,
``{key: lambda value: Model.column_name.contains(value)}``
__advanced_search__: Advanced search models search by named parameters.
Generally found on advanced search forms where each field maps to a
specific database field that will be queried against. If defined as
a list, each item should be a key from :attr:`__search_filters__`.
The matching :attr:`__search_filters__` function will be used in
the query. If defined as a dict, it should have the same format as
:attr:`__search_filters__`.
__simple_search__: Simple search models search by phrase (like Google
search). Defined like :attr:`__advanced_search__`.
__order_by__: Default order-by to use when
:attr:`alchy.model.ModelBase.query` used.
"""
__search_filters__ = {}
__advanced_search__ = []
__simple_search__ = []
__order_by__ = None
@property
def Model(self):
"""Return primary entity model class."""
return self.entities[0]
def get_search_filters(self, keys):
"""Return :attr:`__search_filters__` filtered by keys."""
if isinstance(keys, dict):
return keys
else:
return dict([(key, self.__search_filters__[key]) for key in keys])
def advanced_filter(self, search_dict=None):
"""Return the compiled advanced search filter mapped to `search_dict`.
"""
if search_dict is None: # pragma: no cover
search_dict = {}
filter_funcs = self.get_search_filters(self.__advanced_search__)
term_filters = [filter_funcs[key](value)
for key, value in iteritems(search_dict)
if key in filter_funcs]
# All filters should match for an advanced search.
return and_(*term_filters)
def simple_filter(self, search_terms=None):
"""Return the compiled simple search filter mapped to `search_terms`.
"""
if search_terms is None: # pragma: no cover
search_terms = []
filter_funcs = self.get_search_filters(self.__simple_search__)
# Only support AND'ing search terms together. Apply each simple search
# filter to each search term and group them together.
term_filters = [[func(term) for func in filter_funcs.values()]
for term in search_terms]
# Each item in term_filters is a list of filters applied to one of
# the search terms contained in search_string. We need at least one
# simple filter to match for each term. We need all search terms to
# have at least simple filter match.
return and_(*[or_(*filters) for filters in term_filters])
def search(self, search_string=None, search_dict=None, **search_options):
"""Perform combination of simple/advanced searching with optional
limit/offset support.
"""
search_options.setdefault('limit', None)
search_options.setdefault('offset', None)
search_options.setdefault('order_by', self.__order_by__)
query = self
# Apply search filtering and pagination to Model's primary keys so we
# can use the query as a subquery. In order to properly handle
# pagination, we can use a subquery so that the outer level joins
# won't cause records to be excluded when they include *-to-many
# relationships. For example, if we were returning a query of user +
# user keywords (one-to-many), then for something like the first 25
# users, we may actually have more than that many records since we're
# joining on many records from the user keywords table.
original = (self.lazyload('*')
.load_only(*self.Model.primary_attrs())
.distinct())
# Use the original query so that we preserve joins and where
# statements.
model_query = original
if self.whereclause is not None:
# If our base query contains a whereclause, then we need to
# compelete the "transfer" of the base query's where statements to
# model_query by wiping out the base query's criterion. i.e. We
# only want to maintain selects and froms in the base query and
# keep wheres in the model_query.
# Call a generative query method that won't modify its state. This
# is basically a no-op used to copy the query object and modify it
# below. NOTE: There may be a better way to do this.
query = query.filter()
# Remove existing filters since they were transferred to the
# model_query. This may seem kind of hacky but I don't know of a
# better way to nullify the query object's where clause.
query._criterion = None
if search_string is not None:
model_query = model_query.filter(
self.simple_filter(search_string.split()))
if search_dict is not None:
model_query = model_query.filter(
self.advanced_filter(search_dict))
if search_options['order_by'] is not None:
if not isinstance(search_options['order_by'], (list, tuple)):
search_options['order_by'] = [search_options['order_by']]
model_query = model_query.order_by(*search_options['order_by'])
if search_options['limit'] is not None:
model_query = model_query.limit(search_options['limit'])
if search_options['offset'] is not None:
model_query = model_query.offset(search_options['offset'])
if model_query != original:
subquery = model_query.subquery()
query = query.join(
subquery, join_subquery_on_columns(subquery,
self.Model.primary_keys()))
return query
class QueryProperty(object):
"""Query property accessor which gives a model access to query capabilities
via :attr:`alchy.model.ModelBase.query` which is equivalent to
``session.query(Model)``.
"""
def __init__(self, session):
self.session = session
def __get__(self, model, Model):
mapper = orm.class_mapper(Model)
if mapper:
if not getattr(Model, 'query_class', None):
Model.query_class = QueryModel
query_property = Model.query_class(mapper, session=self.session())
return query_property
##
# Pagination class and usage adapated from Flask-SQLAlchemy:
# https://github.com/mitsuhiko/flask-sqlalchemy
##
class Pagination(object):
"""Internal helper class returned by :meth:`Query.paginate`. You can also
construct it from any other SQLAlchemy query object if you are working with
other libraries. Additionally it is possible to pass ``None`` as query
object in which case the `prev` and `next` will no longer work.
"""
def __init__(self, query, page, per_page, total, items):
#: The query object that was used to create this pagination object.
self.query = query
#: The current page number (1 indexed).
self.page = page
#: The number of items to be displayed on a page.
self.per_page = per_page
#: The total number of items matching the query.
self.total = total
#: The items for the current page.
self.items = items
if self.per_page == 0:
self.pages = 0
else:
#: The total number of pages.
self.pages = int(ceil(self.total / float(self.per_page)))
#: Number of the previous page.
self.prev_num = self.page - 1
#: True if a previous page exists.
self.has_prev = self.page > 1
#: Number of the next page.
self.next_num = self.page + 1
#: True if a next page exists.
self.has_next = self.page < self.pages
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.query is not None, \
'a query object is required for this method to work'
return self.query.paginate(self.page - 1, self.per_page, error_out)
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.query is not None, \
'a query object is required for this method to work'
return self.query.paginate(self.page + 1, self.per_page, error_out)
class LoadOption(object):
"""Chained load option to apply to a load strategy when calling
:class:`Query` load methods.
Example usage: ::
qry = (db.session.query(Product)
.join_eager('category',
options=[LoadOption('noload', 'images')]))
This would result in the ``noload`` option being chained to the eager
option for ``Product.category`` and is equilvalent to: ::
qry = (db.session.query(Product)
.join('category')
.options(contains_eager('category').noload('images')))
"""
def __init__(self, strategy, *args, **kargs):
self.strategy = strategy
self.args = args
self.kargs = kargs
def get_load_options(*columns):
"""Helper method that attempts to extract a sqlalchemy object from
`columns[0]` and return remaining columns to apply to a query load method.
"""
model_inspect = inspect(columns[0], raiseerr=False)
# return an obj which has loading API
if model_inspect and model_inspect.is_mapper:
obj = Load(columns[0])
columns = columns[1:]
elif isinstance(columns[0], Load):
obj = columns[0]
columns = columns[1:]
else:
obj = orm
return (obj, columns)
def apply_load_options(load, options):
"""Apply load `options` to base `load` object.
"""
for load_option in options:
load = getattr(load, load_option.strategy)(*load_option.args,
**load_option.kargs)
return load
def base_columns_from_subquery(subquery):
"""Return non-aliased, base columns from subquery."""
# base_columns is a set so we need to cast to list.
return [(column, list(column.base_columns))
for column in subquery.c.values()]
def join_subquery_on_columns(subquery, columns):
"""Return join-on condition which maps subquery's columns to columns."""
subquery_base_columns = base_columns_from_subquery(subquery)
join_on = []
for subquery_column, base_columns in subquery_base_columns:
# Don't support joining to subquery column with more than 1 base
# column.
if len(base_columns) == 1 and base_columns[0] in columns:
join_on.append(subquery_column == base_columns[0])
if join_on:
return and_(*join_on)
else: # pragma: no cover
return None
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import spectral_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.util.all_util import remove_undocumented
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import *
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import *
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import *
from tensorflow.python.ops.variables import *
# pylint: enable=wildcard-import
#### For use in remove_undocumented below:
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import check_ops as _check_ops
from tensorflow.python.ops import clip_ops as _clip_ops
from tensorflow.python.ops import confusion_matrix as _confusion_matrix
from tensorflow.python.ops import control_flow_ops as _control_flow_ops
from tensorflow.python.ops import data_flow_ops as _data_flow_ops
from tensorflow.python.ops import functional_ops as _functional_ops
from tensorflow.python.ops import gradients as _gradients
from tensorflow.python.ops import histogram_ops as _histogram_ops
from tensorflow.python.ops import init_ops as _init_ops
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import linalg_ops as _linalg_ops
from tensorflow.python.ops import logging_ops as _logging_ops
from tensorflow.python.ops import manip_ops as _manip_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops import numerics as _numerics
from tensorflow.python.ops import parsing_ops as _parsing_ops
from tensorflow.python.ops import partitioned_variables as _partitioned_variables
from tensorflow.python.ops import random_ops as _random_ops
from tensorflow.python.ops import script_ops as _script_ops
from tensorflow.python.ops import session_ops as _session_ops
from tensorflow.python.ops import sparse_ops as _sparse_ops
from tensorflow.python.ops import special_math_ops as _special_math_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.ops import string_ops as _string_ops
from tensorflow.python.ops import template as _template
from tensorflow.python.ops import tensor_array_ops as _tensor_array_ops
from tensorflow.python.ops import variable_scope as _variable_scope
from tensorflow.python.ops import variables as _variables
_allowed_symbols_math_ops = [
# TODO(drpng): decide if we want to reference these in the documentation.
"reduced_shape",
"sparse_segment_mean_grad",
"sparse_segment_sqrt_n_grad",
# Legacy: will be removed.
"arg_max",
"arg_min",
"lin_space",
"sparse_matmul", # Use tf.matmul.
# Deprecated (see versions.h):
"batch_fft",
"batch_fft2d",
"batch_fft3d",
"batch_ifft",
"batch_ifft2d",
"batch_ifft3d",
"mul", # use tf.multiply instead.
"neg", # use tf.negative instead.
"sub", # use tf.subtract instead.
# These are documented in nn.
# We are not importing nn because it would create a circular dependency.
"sigmoid",
"log_sigmoid",
"tanh",
]
_allowed_symbols_array_ops = [
# TODO(drpng): make sure they are documented.
# Scalars:
"NEW_AXIS",
"SHRINK_AXIS",
"newaxis",
# Documented in training.py.
# I do not import train, to avoid circular dependencies.
# TODO(drpng): this is defined in gen_array_ops, clearly not the right
# place.
"stop_gradient",
# See gen_docs_combined for tf.copy documentation.
"copy",
## TODO(drpng): make them inaccessible directly.
## TODO(drpng): Below, to-doc means that we need to find an appropriate
## documentation section to reference.
## For re-exporting to tf.*:
"constant",
"edit_distance", # to-doc
# From gen_array_ops:
"copy_host", # to-doc
"immutable_const", # to-doc
"invert_permutation", # to-doc
"quantize_and_dequantize", # to-doc
# TODO(drpng): legacy symbols to be removed.
"list_diff", # Use tf.listdiff instead.
"batch_matrix_diag",
"batch_matrix_band_part",
"batch_matrix_diag_part",
"batch_matrix_set_diag",
]
_allowed_symbols_partitioned_variables = [
"PartitionedVariable", # Requires doc link.
# Legacy.
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
_allowed_symbols_control_flow_ops = [
# TODO(drpng): Find a place in the documentation to reference these or
# remove.
"control_trigger",
"loop_cond",
"merge",
"switch",
]
_allowed_symbols_functional_ops = [
"nest", # Used by legacy code.
]
_allowed_symbols_gradients = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"AggregationMethod",
"gradients", # tf.gradients = gradients.gradients
"hessians",
]
_allowed_symbols_clip_ops = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"clip_by_average_norm",
"clip_by_global_norm",
"clip_by_norm",
"clip_by_value",
"global_norm",
]
_allowed_symbols_image_ops = [
# Documented in training.py.
# We are not importing training.py to avoid complex dependencies.
"audio_summary",
"histogram_summary",
"image_summary",
"merge_all_summaries",
"merge_summary",
"scalar_summary",
# TODO(drpng): link in training.py if it should be documented.
"get_summary_op",
]
_allowed_symbols_variable_scope_ops = [
"get_local_variable", # Documented in framework package.
]
_allowed_symbols_misc = [
"deserialize_many_sparse",
"parse_single_sequence_example",
"serialize_many_sparse",
"serialize_sparse",
"confusion_matrix",
]
_allowed_symbols = (_allowed_symbols_array_ops +
_allowed_symbols_clip_ops +
_allowed_symbols_control_flow_ops +
_allowed_symbols_functional_ops +
_allowed_symbols_image_ops +
_allowed_symbols_gradients +
_allowed_symbols_math_ops +
_allowed_symbols_variable_scope_ops +
_allowed_symbols_misc +
_allowed_symbols_partitioned_variables)
remove_undocumented(__name__, _allowed_symbols, [
_sys.modules[__name__],
_array_ops,
_check_ops,
_clip_ops,
_confusion_matrix,
_control_flow_ops,
_constant_op,
_data_flow_ops,
_functional_ops,
_gradients,
_histogram_ops,
_init_ops,
_io_ops,
_linalg_ops,
_logging_ops,
_manip_ops,
_math_ops,
_numerics,
_parsing_ops,
_partitioned_variables,
_random_ops,
_script_ops,
_session_ops,
_sparse_ops,
_special_math_ops,
_state_ops,
_string_ops,
_template,
_tensor_array_ops,
_variable_scope,
_variables,
])
|
|
import xml.dom.minidom
from PyQt5 import QtWidgets, QtCore, QtGui
from functions import button_functions
from ui.Ui_aboutwindow import Ui_aboutWindow
import logging
def create_eufar_xml(self, out_file_name):
logging.debug('eufar_metadata_xml.py - create_eufar_xml - starting...')
NAMESPACE_URI1 = "http://www.isotc211.org/2005/gmd"
NAMESPACE_URI2 = "http://www.isotc211.org/2005/gco"
NAMESPACE_URI3 = "http://www.opengis.net/gml"
NAMESPACE_URI4 = "http://www.w3.org/2001/XMLSchema-instance"
NAMESPACE_URI5 = "http://www.isotc211.org/2005/srv"
NAMESPACE_URI6 = "http://www.w3.org/1999/xlink"
doc = xml.dom.minidom.Document()
doc_root = add_element(doc, "gmd:MD_Metadata", doc)
doc_root.setAttribute("xmlns:gmd", NAMESPACE_URI1)
doc_root.setAttribute("xmlns:gco", NAMESPACE_URI2)
doc_root.setAttribute("xmlns:gml", NAMESPACE_URI3)
doc_root.setAttribute("xmlns:xsi", NAMESPACE_URI4)
doc_root.setAttribute("xmlns:srv", NAMESPACE_URI5)
doc_root.setAttribute("xmlns:xlink", NAMESPACE_URI6)
############################
# Identification Info
############################
# Citation
########################
identificationIdent1 = add_element(doc, "gmd:identificationInfo", doc_root)
dataIdent1MD = add_element(doc, "gmd:MD_DataIdentification", identificationIdent1)
citationIdent1 = add_element(doc, "gmd:citation", dataIdent1MD)
citationIdent1CI = add_element(doc, "gmd:CI_Citation", citationIdent1)
titleIdent1 = add_element(doc, "gmd:title", citationIdent1CI)
add_element(doc, "gco:CharacterString", titleIdent1, self.id_resourceTitle_ln.text())
identifierIdent1 = add_element(doc, "gmd:identifier", citationIdent1CI)
identifierIdent1RS = add_element(doc, "gmd:RS_Identifier", identifierIdent1)
codeIdent = add_element(doc, "gmd:code", identifierIdent1RS)
add_element(doc, "gco:CharacterString", codeIdent, self.id_resourceIdent_ln.text())
dateIdent1 = add_element(doc, "gmd:date", citationIdent1CI)
dateIdent1CI = add_element(doc, "gmd:CI_Date", dateIdent1)
dateIdent2 = add_element(doc, "gmd:date", dateIdent1CI)
add_element(doc, "gco:Date", dateIdent2, QtCore.QDate.currentDate().toString(QtCore.Qt.ISODate))
dateTypeIdent1 = add_element(doc, "gmd:dateType", dateIdent1CI)
dateTypeCodeIdent1CI = add_element(doc, "gmd:CI_DateTypeCode", dateTypeIdent1, "publication")
dateTypeCodeIdent1CI.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableS"
+ "tandards/ISO_19139_Schemas/resources/codelist/ML_gmxCodeli"
+ "sts.xml#CI_DateTypeCode")
dateTypeCodeIdent1CI.setAttribute("codeListValue","publication")
dateIdent3 = add_element(doc, "gmd:date", citationIdent1CI)
dateIdent2CI = add_element(doc, "gmd:CI_Date", dateIdent3)
dateIdent4 = add_element(doc, "gmd:date", dateIdent2CI)
add_element(doc, "gco:Date", dateIdent4, self.tr_dateRevision_do2.date().toString(QtCore.Qt.ISODate))
dateTypeIdent2 = add_element(doc, "gmd:dateType", dateIdent2CI)
dateTypeCodeIdent2CI = add_element(doc, "gmd:CI_DateTypeCode", dateTypeIdent2, "revision")
dateTypeCodeIdent2CI.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableS"
+ "tandards/ISO_19139_Schemas/resources/codelist/ML_gmxCodeli"
+ "sts.xml#CI_DateTypeCode")
dateTypeCodeIdent2CI.setAttribute("codeListValue","revision")
dateIdent5 = add_element(doc, "gmd:date", citationIdent1CI)
dateIdent3CI = add_element(doc, "gmd:CI_Date", dateIdent5)
dateIdent6 = add_element(doc, "gmd:date", dateIdent3CI)
add_element(doc, "gco:Date", dateIdent6, self.tr_dateCreation_do3.date().toString(QtCore.Qt.ISODate))
dateTypeIdent3 = add_element(doc, "gmd:dateType", dateIdent3CI)
dateTypeCodeIdent3CI = add_element(doc, "gmd:CI_DateTypeCode", dateTypeIdent3, "creation")
dateTypeCodeIdent3CI.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableS"
+ "tandards/ISO_19139_Schemas/resources/codelist/ML_gmxCodeli"
+ "sts.xml#CI_DateTypeCode")
dateTypeCodeIdent3CI.setAttribute("codeListValue","creation")
########################
# Abstract
########################
abstractIdent = add_element(doc, "gmd:abstract", dataIdent1MD)
add_element(doc, "gco:CharacterString", abstractIdent, self.id_resourceAbstract_ta.toPlainText())
#######################
# Topics
#######################
for key, value in self.topic_dict.items():
if key.isChecked():
topicIdent = add_element(doc, "gmd:topicCategory", dataIdent1MD)
add_element(doc, "gmd:MD_TopicCategoryCode", topicIdent, value)
#######################
# Keywords
#######################
descriptiveKeywordIdent1 = add_element(doc, "gmd:descriptiveKeywords", dataIdent1MD)
keywordIdent1MD = add_element(doc, "gmd:MD_Keywords", descriptiveKeywordIdent1)
for key, value in self.ceos_science_keywords.items():
if key.isChecked():
keywordIdent1 = add_element(doc, "gmd:keyword", keywordIdent1MD)
add_element(doc, "gco:CharacterString", keywordIdent1, value)
thesaurusKeywordIdent1 = add_element(doc, "gmd:thesaurusName", keywordIdent1MD)
citationKeywordIdent1CI = add_element(doc, "gmd:CI_Citation", thesaurusKeywordIdent1)
titleKeywordIdent1 = add_element(doc, "gmd:title", citationKeywordIdent1CI)
add_element(doc, "gco:CharacterString", titleKeywordIdent1, "NASA/Global Change Master Director"
+ "y (GCMD) Earth Science Keywords. Version 8.0.0.0.0")
dateIdent7 = add_element(doc, "gmd:date", citationKeywordIdent1CI)
dateIdent4CI = add_element(doc, "gmd:CI_Date", dateIdent7)
dateIdent8 = add_element(doc, "gmd:date", dateIdent4CI)
add_element(doc, "gco:Date", dateIdent8, "2015-02-20")
dateTypeIdent4 = add_element(doc, "gmd:dateType", dateIdent4CI)
dateTypeCodeIdent4CI = add_element(doc, "gmd:CI_DateTypeCode", dateTypeIdent4, "revision")
dateTypeCodeIdent4CI.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableS"
+ "tandards/ISO_19139_Schemas/resources/codelist/ML_gmxCodeli"
+ "sts.xml#CI_DateTypeCode")
dateTypeCodeIdent4CI.setAttribute("codeListValue", "revision")
#######################
# Location Extent
#######################
extentIdent1 = add_element(doc, "gmd:extent", dataIdent1MD)
extentIdent1EX = add_element(doc, "gmd:EX_Extent", extentIdent1)
extentDescription = add_element(doc, "gmd:description", extentIdent1EX)
add_element(doc, "gco:CharacterString", extentDescription, self.gl_details_rl2.currentText())
geographicIdent1 = add_element(doc, "gmd:geographicElement", extentIdent1EX)
geographicIdent1EX = add_element(doc, "gmd:EX_GeographicBoundingBox", geographicIdent1)
westBoundIdent = add_element(doc, "gmd:westBoundLongitude", geographicIdent1EX)
add_element(doc, "gco:Decimal", westBoundIdent, self.gl_westBound_ln.text())
eastBoundIdent = add_element(doc, "gmd:eastBoundLongitude", geographicIdent1EX)
add_element(doc, "gco:Decimal", eastBoundIdent, self.gl_eastBound_ln.text())
northBoundIdent = add_element(doc, "gmd:northBoundLatitude", geographicIdent1EX)
add_element(doc, "gco:Decimal", northBoundIdent, self.gl_northBound_ln.text())
southBoundIdent = add_element(doc, "gmd:southBoundLatitude", geographicIdent1EX)
add_element(doc, "gco:Decimal", southBoundIdent, self.gl_southBound_ln.text())
#######################
# Temporal Extent
#######################
temporalIdent1 = add_element(doc, "gmd:temporalElement", extentIdent1EX)
temporalIdent1EX = add_element(doc, "gmd:EX_TemporalExtent", temporalIdent1)
extentIdent3 = add_element(doc, "gmd:extent", temporalIdent1EX)
periodIdent1 = add_element(doc, "gml:TimePeriod", extentIdent3)
periodIdent1.setAttribute("gml:id","extent0")
add_element(doc, "gml:beginPosition", periodIdent1, self.tr_dateStart_do4.date().
toString(QtCore.Qt.ISODate))
add_element(doc, "gml:endPosition", periodIdent1, self.tr_dateEnd_do5.date().
toString(QtCore.Qt.ISODate))
if self.tr_tpex > 0:
for i, _ in enumerate(self.tr_dtSt_1):
temporalIdent1 = add_element(doc, "gmd:temporalElement", extentIdent1EX)
temporalIdent1EX = add_element(doc, "gmd:EX_TemporalExtent", temporalIdent1)
extentIdent3 = add_element(doc, "gmd:extent", temporalIdent1EX)
periodIdent1 = add_element(doc, "gml:TimePeriod", extentIdent3)
periodIdent1.setAttribute("gml:id","extent" + str(i + 1))
add_element(doc, "gml:beginPosition", periodIdent1, self.tr_dtSt_1[i].date().
toString(QtCore.Qt.ISODate))
add_element(doc, "gml:endPosition", periodIdent1, self.tr_dtEd_1[i].date().
toString(QtCore.Qt.ISODate))
#######################
# Spatial Resolution
#######################
resolutionIdent1 = add_element(doc, "gmd:spatialResolution", dataIdent1MD)
resolutionIdent1MD = add_element(doc, "gmd:MD_Resolution", resolutionIdent1)
if self.gl_resolution_rl1.currentText() == 'Distance':
distanceIdent1 = add_element(doc, "gmd:distance", resolutionIdent1MD)
distanceIdent2 = add_element(doc, "gco:Distance", distanceIdent1, self.gl_resolution_ln.text())
if self.gl_unit_rl.currentText() == "Make a choice...":
distanceIdent2.setAttribute("uom", "")
else:
distanceIdent2.setAttribute("uom", self.unit_dict[self.gl_unit_rl.currentText()])
elif self.gl_resolution_rl1.currentText() == 'Scale':
scaleIdent1 = add_element(doc, "gmd:equivalentScale", resolutionIdent1MD)
fractionIdent1MD = add_element(doc, "gmd:MD_RepresentativeFraction", scaleIdent1)
denominatorIdent1 = add_element(doc, "gmd:denominator", fractionIdent1MD)
add_element(doc, "gco:Integer", denominatorIdent1, self.gl_resolution_ln.text())
########################
# Language
########################
languageIdent = add_element(doc, "gmd:language", dataIdent1MD)
languageIdent2 = add_element(doc, "gmd:LanguageCode", languageIdent, self.language_dict[self.id_resourceLang_rl2.currentText()])
languageIdent2.setAttribute("codeList", "http://www.loc.gov/standards/iso639-2/")
languageIdent2.setAttribute("codeListValue", self.language_dict[self.id_resourceLang_rl2.currentText()])
#######################
# Resource Constraints
#######################
constraintIdent1 = add_element(doc, "gmd:resourceConstraints", dataIdent1MD)
constraintIdent1MD = add_element(doc, "gmd:MD_Constraints", constraintIdent1)
useIdent1 = add_element(doc, "gmd:useLimitation", constraintIdent1MD)
add_element(doc, "gco:CharacterString", useIdent1, self.au_conditions_ta.toPlainText())
if self.au_wn_2 > 0:
for i, _ in enumerate(self.au_wn_con_ta):
useIdent1 = add_element(doc, "gmd:useLimitation", constraintIdent1MD)
add_element(doc, "gco:CharacterString", useIdent1, self.au_wn_con_ta[i].toPlainText())
constraintIdent2 = add_element(doc, "gmd:resourceConstraints", dataIdent1MD)
legalIdent1MD = add_element(doc, "gmd:MD_LegalConstraints", constraintIdent2)
accessIdent1 = add_element(doc, "gmd:accessConstraints", legalIdent1MD)
restrictionIdent1MD = add_element(doc, "gmd:MD_RestrictionCode", accessIdent1, "otherRestrictions")
restrictionIdent1MD.setAttribute("codeList","http://standards.iso.org/ittf/PubliclyAvailableSta"
+ "ndards/ISO_19139_Schemas/resources/Codelist/gmxCodelists.xm"
+ "l#MD_RestrictionCode")
restrictionIdent1MD.setAttribute("codeListValue","otherRestrictions")
accessIdent2 = add_element(doc, "gmd:otherConstraints", legalIdent1MD)
add_element(doc, "gco:CharacterString", accessIdent2, self.au_limitations_ta.toPlainText())
if self.au_wn_1 > 0:
for i, _ in enumerate(self.au_wn_lim_ta):
accessIdent2 = add_element(doc, "gmd:otherConstraints", legalIdent1MD)
add_element(doc, "gco:CharacterString", accessIdent2, self.au_wn_lim_ta[i].toPlainText())
#######################
# Resource Contacts
#######################
contactIdent1 = add_element(doc, "gmd:pointOfContact", dataIdent1MD)
responsibleIdent1CI = add_element(doc, "gmd:CI_ResponsibleParty", contactIdent1)
organisationIdent1 = add_element(doc, "gmd:organisationName", responsibleIdent1CI)
add_element(doc, "gco:CharacterString", organisationIdent1, self.ro_responsibleParty_ln.text())
contactIdent2 = add_element(doc, "gmd:contactInfo", responsibleIdent1CI)
responsibleIdent2CI = add_element(doc, "gmd:CI_Contact", contactIdent2)
addressIdent1 = add_element(doc, "gmd:address", responsibleIdent2CI)
addressIdent1CI = add_element(doc, "gmd:CI_Address", addressIdent1)
emailIdent1 = add_element(doc, "gmd:electronicMailAddress", addressIdent1CI)
add_element(doc, "gco:CharacterString", emailIdent1, self.ro_responsibleEmail_ln.text())
roleIdent1 = add_element(doc, "gmd:role", responsibleIdent1CI)
roleCodeIdent1 = add_element(doc, "gmd:CI_RoleCode", roleIdent1, self.role_dict[self.ro_responsibleRole_rl1.currentText()])
roleCodeIdent1.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableStandar"
+ "ds/ISO_19139_Schemas/resources/Codelist/gmxCodelists.xml#CI_Role"
+ "Code")
roleCodeIdent1.setAttribute("codeListValue", self.role_dict[self.ro_responsibleRole_rl1.currentText()])
if self.ro_roPy > 0:
for i, _ in enumerate(self.ro_rlPy_ln):
contactIdent1 = add_element(doc, "gmd:pointOfContact", dataIdent1MD)
responsibleIdent1CI = add_element(doc, "gmd:CI_ResponsibleParty", contactIdent1)
organisationIdent1 = add_element(doc, "gmd:organisationName", responsibleIdent1CI)
add_element(doc, "gco:CharacterString", organisationIdent1, self.ro_rlPy_ln[i].text())
contactIdent2 = add_element(doc, "gmd:contactInfo", responsibleIdent1CI)
responsibleIdent2CI = add_element(doc, "gmd:CI_Contact", contactIdent2)
addressIdent1 = add_element(doc, "gmd:address", responsibleIdent2CI)
addressIdent1CI = add_element(doc, "gmd:CI_Address", addressIdent1)
emailIdent1 = add_element(doc, "gmd:electronicMailAddress", addressIdent1CI)
add_element(doc, "gco:CharacterString", emailIdent1, self.ro_rlEm_ln[i].text())
roleIdent1 = add_element(doc, "gmd:role", responsibleIdent1CI)
roleCodeIdent1 = add_element(doc, "gmd:CI_RoleCode", roleIdent1, self.role_dict[self.ro_rlRl_ln[i].currentText()])
roleCodeIdent1.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailabl"
+ "eStandards/ISO_19139_Schemas/resources/Codelist/gmxCodel"
+ "ists.xml#CI_RoleCode")
roleCodeIdent1.setAttribute("codeListValue", self.role_dict[self.ro_rlRl_ln[i].currentText()])
############################
# Hierarchy Level
############################
hierarchyLevel1 = add_element(doc, "gmd:hierarchyLevel", doc_root)
scopeLevel1MD = add_element(doc, "gmd:MD_ScopeCode", hierarchyLevel1, self.id_resourceType_rl1.
currentText().lower())
scopeLevel1MD.setAttribute("codeList","http://standards.iso.org/ittf/PubliclyAvailableStandards"
+ "/ISO_19139_Schemas/resources/Codelist/gmxCodelists.xml#MD_ScopeCode")
scopeLevel1MD.setAttribute("codeListValue", self.id_resourceType_rl1.currentText().lower())
############################
# Distribution Info
############################
distributionInfo1 = add_element(doc, "gmd:distributionInfo", doc_root)
distributionInfo1MD = add_element(doc, "gmd:MD_Distribution", distributionInfo1)
transferInfo1 = add_element(doc, "gmd:transferOptions", distributionInfo1MD)
transferInfo1MD = add_element(doc, "gmd:MD_DigitalTransferOptions", transferInfo1)
onlineInfo1 = add_element(doc, "gmd:onLine", transferInfo1MD)
onlineInfo1CI = add_element(doc, "gmd:CI_OnlineResource", onlineInfo1)
linkageInfo1 = add_element(doc, "gmd:linkage", onlineInfo1CI)
add_element(doc, "gmd:URL", linkageInfo1, self.id_resourceLocator_ln.text())
############################
# Language Info
############################
languageInfo1 = add_element(doc, "gmd:language", doc_root)
languageInfo2 = add_element(doc, "gmd:LanguageCode", languageInfo1, self.language_dict[self.mm_language_rl1.currentText()])
languageInfo2.setAttribute("codeList","http://www.loc.gov/standards/iso639-2/")
languageInfo2.setAttribute("codeListValue", self.language_dict[self.mm_language_rl1.currentText()])
############################
# Data Quality
############################
stringBuilder = []
for i in range(len(self.qv_insitu_tab_1)):
stringBuilder.append(save_statement_insitu(self, i))
for i in range(len(self.qv_imagery_tab_1)):
stringBuilder.append(save_statement_imagery(self, i))
statement = ''.join(stringBuilder)
qualityInfo1 = add_element(doc, "gmd:dataQualityInfo", doc_root)
dataQuality1DQ = add_element(doc, "gmd:DQ_DataQuality", qualityInfo1)
lineageQuality1 = add_element(doc, "gmd:lineage", dataQuality1DQ)
lineageQuality1LI = add_element(doc, "gmd:LI_Lineage", lineageQuality1)
statementQuality1 = add_element(doc, "gmd:statement", lineageQuality1LI)
add_element(doc, "gco:CharacterString", statementQuality1, statement)
reportQuality1 = add_element(doc, "gmd:report", dataQuality1DQ)
domainConsistency1DQ = add_element(doc, "gmd:DQ_DomainConsistency", reportQuality1)
resultQuality1 = add_element(doc, "gmd:result", domainConsistency1DQ)
conformanceResult1DQ = add_element(doc, "gmd:DQ_ConformanceResult", resultQuality1)
specificationQuality1 = add_element(doc, "gmd:specification", conformanceResult1DQ)
citationQuality1CI = add_element(doc, "gmd:CI_Citation", specificationQuality1)
titleQuality1 = add_element(doc, "gmd:title", citationQuality1CI)
add_element(doc, "gco:CharacterString", titleQuality1, "COMMISSION REGULATION (EC) No 1205/2008"
+ " of 3 December 2008 implementing Directive 2007/2/EC of the European Parliament "
+ "and of the Council as regards metadata")
dateQuality1 = add_element(doc, "gmd:date", citationQuality1CI)
dateQuality1CI = add_element(doc, "gmd:CI_Date", dateQuality1)
dateQuality2 = add_element(doc, "gmd:date", dateQuality1CI)
add_element(doc, "gco:Date", dateQuality2, "2008-12-04")
dateType1 = add_element(doc, "gmd:dateType", dateQuality1CI)
dateTypeCode1 = add_element(doc, "gmd:CI_DateTypeCode", dateType1, "publication")
dateTypeCode1.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableStandard"
+ "s/ISO_19139_Schemas/resources/codelist/ML_gmxCodelists.xml#CI_Dat"
+ "eTypeCode")
dateTypeCode1.setAttribute("codeListValue", "publication")
passQuality1 = add_element(doc, "gmd:pass", conformanceResult1DQ)
add_element(doc, "gco:Boolean", passQuality1, "True")
############################
# Aircraft and Instruments
############################
acquisitionInfo1 = add_element(doc, "gmd:acquisitionInfo", doc_root)
for sublist in self.aircraft_list:
aircraftInfo1 = add_element(doc, "gmd:platformInfo", acquisitionInfo1)
aircraftInfo1AI = add_element(doc, "gmd:PI_PlatformInfo", aircraftInfo1)
aircraftManufacturer = add_element(doc, "gmd:platformManufacturer", aircraftInfo1AI)
add_element(doc, "gco:CharacterString", aircraftManufacturer, sublist[0])
aircraftType = add_element(doc, "gmd:platformType", aircraftInfo1AI)
add_element(doc, "gco:CharacterString", aircraftType, sublist[1])
aircraftOperator = add_element(doc, "gmd:platformOperator", aircraftInfo1AI)
add_element(doc, "gco:CharacterString", aircraftOperator, sublist[2])
aircraftCountry = add_element(doc, "gmd:platformCountry", aircraftInfo1AI)
add_element(doc, "gco:CharacterString", aircraftCountry, sublist[3])
aircraftRegistration = add_element(doc, "gmd:platformRegistration", aircraftInfo1AI)
add_element(doc, "gco:CharacterString", aircraftRegistration, sublist[4])
for i in range(0, len(self.instModel_list)):
instrumentInfo1 = add_element(doc, "gmd:instrumentInfo", acquisitionInfo1)
instrumentInfo1II = add_element(doc, "gmd:II_InstrumentInfo", instrumentInfo1)
instrumentManufacturer = add_element(doc, "gmd:instrumentManufacturer", instrumentInfo1II)
instrumentType = add_element(doc, "gmd:instrumentType", instrumentInfo1II)
add_element(doc, "gco:CharacterString", instrumentManufacturer, self.instManufacturer_list[i])
add_element(doc, "gco:CharacterString", instrumentType, self.instModel_list[i])
############################
# Contact Info
############################
contactContact1 = add_element(doc, "gmd:contact", doc_root)
responsiblePartyInfo1CI = add_element(doc, "gmd:CI_ResponsibleParty", contactContact1)
nameContact1 = add_element(doc, "gmd:organisationName", responsiblePartyInfo1CI)
add_element(doc, "gco:CharacterString", nameContact1, self.mm_contactName_ln.text())
infoContact1 = add_element(doc, "gmd:contactInfo", responsiblePartyInfo1CI)
infoContact1CI = add_element(doc, "gmd:CI_Contact", infoContact1)
addressContact1 = add_element(doc, "gmd:address", infoContact1CI)
addressContact1CI = add_element(doc, "gmd:CI_Address", addressContact1)
emailContact1 = add_element(doc, "gmd:electronicMailAddress", addressContact1CI)
add_element(doc, "gco:CharacterString", emailContact1, self.mm_contactEmail_ln.text())
roleContact1 = add_element(doc, "gmd:role", responsiblePartyInfo1CI)
roleCodeContact1 = add_element(doc, "gmd:CI_RoleCode", roleContact1, "pointOfContact")
roleCodeContact1.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvailableStand"
+ "ards/ISO_19139_Schemas/resources/Codelist/gmxCodelists.xml#CI_"
+ "RoleCode")
roleCodeContact1.setAttribute("codeListValue", "pointOfContact")
if self.mm_pofc > 0:
for i, _ in enumerate(self.mm_conName_ln):
contactContact1 = add_element(doc, "gmd:contact", doc_root)
responsiblePartyInfo1CI = add_element(doc, "gmd:CI_ResponsibleParty", contactContact1)
nameContact1 = add_element(doc, "gmd:organisationName", responsiblePartyInfo1CI)
add_element(doc, "gco:CharacterString", nameContact1, self.mm_conName_ln[i].text())
infoContact1 = add_element(doc, "gmd:contactInfo", responsiblePartyInfo1CI)
infoContact1CI = add_element(doc, "gmd:CI_Contact", infoContact1)
addressContact1 = add_element(doc, "gmd:address", infoContact1CI)
addressContact1CI = add_element(doc, "gmd:CI_Address", addressContact1)
emailContact1 = add_element(doc, "gmd:electronicMailAddress", addressContact1CI)
add_element(doc, "gco:CharacterString", emailContact1, self.mm_conEmail_ln[i].text())
roleContact1 = add_element(doc, "gmd:role", responsiblePartyInfo1CI)
roleCodeContact1 = add_element(doc, "gmd:CI_RoleCode", roleContact1, "pointOfContact")
roleCodeContact1.setAttribute("codeList", "http://standards.iso.org/ittf/PubliclyAvaila"
+ "bleStandards/ISO_19139_Schemas/resources/Codelist/gmxC"
+ "odelists.xml#CI_RoleCode")
roleCodeContact1.setAttribute("codeListValue", "pointOfContact")
############################
# Metadata Date
############################
dateStamp1 = add_element(doc, "gmd:dateStamp", doc_root)
add_element(doc, "gco:Date", dateStamp1, self.mm_date_do1.date().toString(QtCore.Qt.ISODate))
############################
# File Creation
############################
f = open(out_file_name, 'wb')
f.write(doc.toprettyxml(indent=" ", encoding="UTF-8"))
f.close()
self.saved = True
self.modified = False
logging.debug('eufar_metadata_xml.py - create_eufar_xml - finished.')
def read_eufar_xml(self, in_file_name):
logging.debug('eufar_metadata_xml.py - read_eufar_xml - starting...')
currentIndex = self.tabWidget.currentIndex()
f = open(in_file_name, 'r')
doc = xml.dom.minidom.parse(f)
############################
# Identification Info
############################
# Citation
########################
doc_root = get_element(doc, "gmd:MD_Metadata")
identificationIdent1 = get_element(doc_root, "gmd:identificationInfo")
dataIdent1MD = get_element(identificationIdent1, "gmd:MD_DataIdentification")
citationIdent1 = get_element(dataIdent1MD, "gmd:citation")
citationIdent1CI = get_element(citationIdent1, "gmd:CI_Citation")
titleIdent1 = get_element(citationIdent1CI, "gmd:title")
set_text_value(self.id_resourceTitle_ln, titleIdent1, "gco:CharacterString")
self.id_resourceTitle_ln.setCursorPosition(0)
identifierIdent1 = get_element(citationIdent1CI, "gmd:identifier")
identifierIdent1RS = get_element(identifierIdent1, "gmd:RS_Identifier")
codeIdent = get_element(identifierIdent1RS, "gmd:code")
set_text_value(self.id_resourceIdent_ln, codeIdent, "gco:CharacterString")
self.id_resourceIdent_ln.setCursorPosition(0)
nodes = citationIdent1CI.getElementsByTagName("gmd:CI_Date")
elements = []
for node in nodes:
tmp1 = get_element(node, "gmd:date")
elements.append(get_element_value(tmp1, "gco:Date"))
self.tr_dateRevision_do2.setDate(QtCore.QDate.fromString(elements[1], QtCore.Qt.ISODate))
self.tr_dateCreation_do3.setDate(QtCore.QDate.fromString(elements[2], QtCore.Qt.ISODate))
########################
# Abstract
########################
abstractIdent = get_element(dataIdent1MD, "gmd:abstract")
set_plainText_value(self.id_resourceAbstract_ta, abstractIdent, "gco:CharacterString")
#######################
# Topics
#######################
nodes = doc.getElementsByTagName("gmd:topicCategory")
for node in nodes:
topic = get_element_value(node, "gmd:MD_TopicCategoryCode")
reverseDictionary(topic, self.topic_dict).setChecked(True)
#######################
# Temporal Extent
#######################
self.tabWidget.setCurrentIndex(5)
nodes = doc_root.getElementsByTagName("gml:TimePeriod")
for i, node in enumerate(nodes):
if i == 0:
self.tr_dateStart_do4.setDate(QtCore.QDate.fromString(get_element_value(node, "gml:beginPosition"),
QtCore.Qt.ISODate))
self.tr_dateEnd_do5.setDate(QtCore.QDate.fromString(get_element_value(node, "gml:endPosition"),
QtCore.Qt.ISODate))
else:
button_functions.plusButton_3_clicked(self)
self.tr_dtSt_1[i-1].setDate(QtCore.QDate.fromString(get_element_value(node, "gml:beginPosition"),
QtCore.Qt.ISODate))
self.tr_dtEd_1[i-1].setDate(QtCore.QDate.fromString(get_element_value(node, "gml:endPosition"),
QtCore.Qt.ISODate))
#######################
# Resource Constraints
#######################
self.tabWidget.setCurrentIndex(7)
nodes = doc_root.getElementsByTagName("gmd:useLimitation")
set_plainText_value(self.au_conditions_ta, nodes[0], "gco:CharacterString")
for i, node in enumerate(nodes[1:]):
button_functions.plusButton_5_clicked(self)
set_plainText_value(self.au_wn_con_ta[i-1], nodes[i], "gco:CharacterString")
nodes = doc_root.getElementsByTagName("gmd:otherConstraints")
set_plainText_value(self.au_limitations_ta, nodes[0], "gco:CharacterString")
for i, node in enumerate(nodes[1:]):
button_functions.plusButton_6_clicked(self)
set_plainText_value(self.au_wn_lim_ta[i-1], nodes[i], "gco:CharacterString")
#######################
# Resource Contacts
#######################
self.tabWidget.setCurrentIndex(8)
nodes = doc_root.getElementsByTagName("gmd:pointOfContact")
responsibleIdent1CI = get_element(nodes[0], "gmd:CI_ResponsibleParty")
organisationIdent1 = get_element(responsibleIdent1CI, "gmd:organisationName")
set_text_value(self.ro_responsibleParty_ln, organisationIdent1, "gco:CharacterString")
contactIdent2 = get_element(responsibleIdent1CI, "gmd:contactInfo", )
responsibleIdent2CI = get_element(contactIdent2, "gmd:CI_Contact", )
addressIdent1 = get_element(responsibleIdent2CI, "gmd:address", )
addressIdent1CI = get_element(addressIdent1, "gmd:CI_Address", )
emailIdent1 = get_element(addressIdent1CI, "gmd:electronicMailAddress", )
set_text_value(self.ro_responsibleEmail_ln, emailIdent1, "gco:CharacterString")
roleIdent1 = get_element(responsibleIdent1CI, "gmd:role", )
combo_text = get_element_value(roleIdent1, "gmd:CI_RoleCode")
self.ro_responsibleRole_rl1.setCurrentIndex(self.ro_responsibleRole_rl1.findText(reverseDictionary(combo_text, self.role_dict)))
for i, node in enumerate(nodes[1:]):
button_functions.plusButton_7_clicked(self)
responsibleIdent1CI = get_element(nodes[i], "gmd:CI_ResponsibleParty")
organisationIdent1 = get_element(responsibleIdent1CI, "gmd:organisationName")
set_text_value(self.ro_rlPy_ln[i-1], organisationIdent1, "gco:CharacterString")
contactIdent2 = get_element(responsibleIdent1CI, "gmd:contactInfo", )
responsibleIdent2CI = get_element(contactIdent2, "gmd:CI_Contact", )
addressIdent1 = get_element(responsibleIdent2CI, "gmd:address", )
addressIdent1CI = get_element(addressIdent1, "gmd:CI_Address", )
emailIdent1 = get_element(addressIdent1CI, "gmd:electronicMailAddress", )
set_text_value(self.ro_rlEm_ln[i-1], emailIdent1, "gco:CharacterString")
roleIdent1 = get_element(responsibleIdent1CI, "gmd:role", )
combo_text = get_element_value(roleIdent1, "gmd:CI_RoleCode")
self.ro_rlRl_ln[i-1].setCurrentIndex(self.ro_rlRl_ln[i-1].findText(reverseDictionary(combo_text, self.role_dict)))
#######################
# Keywords
#######################
descriptiveKeywordIdent1 = get_element(dataIdent1MD, "gmd:descriptiveKeywords")
keywordIdent1MD = get_element(descriptiveKeywordIdent1, "gmd:MD_Keywords")
nodes = keywordIdent1MD.getElementsByTagName("gmd:keyword")
for node in nodes:
keyword = get_element_value(node, "gco:CharacterString")
reverseDictionary(keyword, self.ceos_science_keywords).setChecked(True)
#######################
# Location Extent
#######################
extentIdent1 = get_element(dataIdent1MD, "gmd:extent")
extentIdent1EX = get_element(extentIdent1, "gmd:EX_Extent")
extentDescription = get_element(extentIdent1EX, "gmd:description")
combo_text = get_element_value(extentDescription, "gco:CharacterString")
index = 0
if combo_text in self.new_country_code.keys():
index = 2
if combo_text in self.emc_continents:
index = 4
elif combo_text in self.emc_oceans:
index = 3
elif combo_text in self.emc_regions:
index = 4
self.gl_category_rl1.setCurrentIndex(index)
button_functions.gl_categoryRolebox_changed(self)
self.gl_details_rl2.setCurrentIndex(self.gl_details_rl2.findText(combo_text))
geographicIdent1 = get_element(extentIdent1EX, "gmd:geographicElement")
geographicIdent1EX = get_element(geographicIdent1, "gmd:EX_GeographicBoundingBox")
westBoundIdent = get_element(geographicIdent1EX, "gmd:westBoundLongitude")
set_text_value(self.gl_westBound_ln, westBoundIdent, "gco:Decimal")
eastBoundIdent = get_element(geographicIdent1EX, "gmd:eastBoundLongitude")
set_text_value(self.gl_eastBound_ln, eastBoundIdent, "gco:Decimal")
northBoundIdent = get_element(geographicIdent1EX, "gmd:northBoundLatitude")
set_text_value(self.gl_northBound_ln, northBoundIdent, "gco:Decimal")
southBoundIdent = get_element(geographicIdent1EX, "gmd:southBoundLatitude")
set_text_value(self.gl_southBound_ln, southBoundIdent, "gco:Decimal")
#######################
# Spatial Resolution
#######################
resolutionIdent1 = get_element(dataIdent1MD, "gmd:spatialResolution")
resolutionIdent1MD = get_element(resolutionIdent1, "gmd:MD_Resolution")
test = resolutionIdent1MD.getElementsByTagName("gmd:equivalentScale")
if test:
self.gl_resolution_rl1.setCurrentIndex(self.gl_resolution_rl1.findText("Scale"))
button_functions.gl_rolebox_changed(self)
distscaleIdent1 = get_element(resolutionIdent1MD, "gmd:equivalentScale")
fractionIdent1MD = get_element(distscaleIdent1, "gmd:MD_RepresentativeFraction")
denominatorIdent1 = get_element(fractionIdent1MD, "gmd:denominator")
set_text_value(self.gl_resolution_ln, denominatorIdent1, "gco:Integer")
else:
self.gl_resolution_rl1.setCurrentIndex(self.gl_resolution_rl1.findText("Distance"))
button_functions.gl_rolebox_changed(self)
distscaleIdent1 = get_element(resolutionIdent1MD, "gmd:distance")
set_text_value(self.gl_resolution_ln, distscaleIdent1, "gco:Distance")
tmp = distscaleIdent1.getElementsByTagName("gco:Distance")
distanceIdent2 = tmp[0]
dist_attr = distanceIdent2.attributes["uom"].value
self.gl_unit_rl.setCurrentIndex(self.gl_unit_rl.findText(reverseDictionary(dist_attr, self.unit_dict)))
########################
# Language
########################
nodes = doc_root.getElementsByTagName("gmd:language")
combo_text = get_element_value(nodes[0], "gmd:LanguageCode")
self.id_resourceLang_rl2.setCurrentIndex(self.id_resourceLang_rl2.findText(reverseDictionary(combo_text, self.language_dict)))
############################
# Hierarchy Level
############################
hierarchyLevel = get_element(doc_root, "gmd:hierarchyLevel")
combo_text = get_element_value(hierarchyLevel, "gmd:MD_ScopeCode")
self.id_resourceType_rl1.setCurrentIndex(self.id_resourceType_rl1.findText(combo_text.title()))
############################
# Distribution Info
############################
distributionInfo1 = get_element(doc_root, "gmd:distributionInfo")
distributionInfo1MD = get_element(distributionInfo1, "gmd:MD_Distribution")
transferInfo1 = get_element(distributionInfo1MD, "gmd:transferOptions")
transferInfo1MD = get_element(transferInfo1, "gmd:MD_DigitalTransferOptions")
onlineInfo1 = get_element(transferInfo1MD, "gmd:onLine")
onlineInfo1CI = get_element(onlineInfo1, "gmd:CI_OnlineResource")
linkageInfo1 = get_element(onlineInfo1CI, "gmd:linkage")
set_text_value(self.id_resourceLocator_ln, linkageInfo1, "gmd:URL")
self.id_resourceLocator_ln.setCursorPosition(0)
############################
# Language Info
############################
nodes = doc_root.getElementsByTagName("gmd:language")
combo_text = get_element_value(nodes[1], "gmd:LanguageCode")
self.mm_language_rl1.setCurrentIndex(self.mm_language_rl1.findText(reverseDictionary(combo_text, self.language_dict)))
############################
# Contact Info
############################
self.tabWidget.setCurrentIndex(9)
nodes = doc_root.getElementsByTagName("gmd:contact")
responsiblePartyInfo1CI = get_element(nodes[0], "gmd:CI_ResponsibleParty")
nameContact1 = get_element(responsiblePartyInfo1CI, "gmd:organisationName")
set_text_value(self.mm_contactName_ln, nameContact1, "gco:CharacterString")
infoContact1 = get_element(responsiblePartyInfo1CI, "gmd:contactInfo")
infoContact1CI = get_element(infoContact1, "gmd:CI_Contact")
addressContact1 = get_element(infoContact1CI, "gmd:address")
addressContact1CI = get_element(addressContact1, "gmd:CI_Address")
emailContact1 = get_element(addressContact1CI, "gmd:electronicMailAddress")
set_text_value(self.mm_contactEmail_ln, emailContact1, "gco:CharacterString")
for i, node in enumerate(nodes[1:]):
button_functions.plusButton_8_clicked(self)
responsiblePartyInfo1CI = get_element(nodes[i], "gmd:CI_ResponsibleParty")
nameContact1 = get_element(responsiblePartyInfo1CI, "gmd:organisationName")
set_text_value(self.mm_conName_ln[i-1], nameContact1, "gco:CharacterString")
infoContact1 = get_element(responsiblePartyInfo1CI, "gmd:contactInfo")
infoContact1CI = get_element(infoContact1, "gmd:CI_Contact")
addressContact1 = get_element(infoContact1CI, "gmd:address")
addressContact1CI = get_element(addressContact1, "gmd:CI_Address")
emailContact1 = get_element(addressContact1CI, "gmd:electronicMailAddress")
set_text_value(self.mm_conEmail_ln[i-1], emailContact1, "gco:CharacterString")
############################
# Aircraft and Instruments
############################
self.tabWidget.setCurrentIndex(3)
nodes = doc_root.getElementsByTagName("gmd:platformInfo")
for node in nodes:
aircraftInfo11AI = get_element(node, "gmd:PI_PlatformInfo")
aircraftRegistration = get_element(aircraftInfo11AI, "gmd:platformRegistration")
aircraftManufacturer = get_element(aircraftInfo11AI, "gmd:platformManufacturer")
aircraftType = get_element(aircraftInfo11AI, "gmd:platformType")
aircraftOperator = get_element(aircraftInfo11AI, "gmd:platformOperator")
aircraftCountry = get_element(aircraftInfo11AI, "gmd:platformCountry")
manufacturer = get_element_value(aircraftManufacturer, "gco:CharacterString")
aircraft = get_element_value(aircraftType, "gco:CharacterString")
operator = get_element_value(aircraftOperator, "gco:CharacterString")
identification = get_element_value(aircraftRegistration, "gco:CharacterString")
country = get_element_value(aircraftCountry, "gco:CharacterString")
button_functions.plusButton_10_clicked(self, aircraft, operator, manufacturer, identification, country)
nodes = doc_root.getElementsByTagName("gmd:instrumentInfo")
for node in nodes:
instrument1AI = get_element(node, "gmd:II_InstrumentInfo")
instrumentManufacturer = get_element(instrument1AI, "gmd:instrumentManufacturer")
manufacturer = get_element_value(instrumentManufacturer, "gco:CharacterString")
instrumentModel = get_element(instrument1AI, "gmd:instrumentType")
model = get_element_value(instrumentModel, "gco:CharacterString")
button_functions.plusButton_4_clicked(self, manufacturer + " - " + model)
############################
# Data Quality
############################
old_format = 0
self.tabWidget.setCurrentIndex(6)
qualityInfo1 = get_element(doc_root, "gmd:dataQualityInfo")
dataQuality1DQ = get_element(qualityInfo1, "gmd:DQ_DataQuality")
lineageQuality1 = get_element(dataQuality1DQ, "gmd:lineage")
lineageQuality1LI = get_element(lineageQuality1, "gmd:LI_Lineage")
statementQuality1 = get_element(lineageQuality1LI, "gmd:statement")
statement = get_element_value(statementQuality1, "gco:CharacterString")
if statement != "" and statement != None:
stringList = []
indexStart = statement.find("[", 0)
if indexStart != -1:
indexEnd = statement.find("]", 0)
stringList.append(statement[indexStart + 1 : indexEnd])
indexStart += 1
indexEnd += 1
while indexStart < len(statement):
indexStart = statement.find("[", indexStart)
if indexStart == -1:
break
indexEnd = statement.find("]", indexEnd)
stringList.append(statement[indexStart + 1 : indexEnd])
indexStart += 1
indexEnd += 1
indexInsitu = 0
indexImagery = 0
for string in stringList:
if "Atmospheric" in string:
button_functions.plusButton_12_clicked(self)
read_statement_insitu(self, string, indexInsitu)
indexInsitu += 1
elif "Earth" in string:
button_functions.plusButton_11_clicked(self)
read_statement_imagery(self, string, indexImagery)
indexImagery += 1
else:
old_format = 1
if "Atmospheric" in statement:
button_functions.plusButton_12_clicked(self)
read_statement_insitu_old(self, statement, 0)
elif "Earth" in statement:
button_functions.plusButton_11_clicked(self)
read_statement_imagery_old(self, statement, 0)
############################
# Metadata Date
############################
dateStamp1 = get_element(doc_root, "gmd:dateStamp")
date = get_element_value(dateStamp1, "gco:Date")
self.mm_date_do1.setDate(QtCore.QDate.fromString(date, QtCore.Qt.ISODate))
self.tabWidget.setCurrentIndex(currentIndex)
if old_format == 1:
self.aboutWindow = MyAbout()
x1, y1, w1, h1 = self.geometry().getRect()
_, _, w2, h2 = self.aboutWindow.geometry().getRect()
self.aboutWindow.setGeometry(x1 + w1/2 - w2/2, y1 + h1/2 - h2/2, w2, h2)
self.aboutWindow.setMinimumSize(QtCore.QSize(450, self.aboutWindow.sizeHint().height()))
self.aboutWindow.setMaximumSize(QtCore.QSize(450, self.aboutWindow.sizeHint().height()))
self.aboutWindow.exec_()
logging.debug('eufar_metadata_xml.py - read_eufar_xml - finished.')
def get_element(parent, element_name):
logging.debug('eufar_metadata_xml.py - get_element')
return parent.getElementsByTagName(element_name)[0]
def get_element_value(parent, element_name):
logging.debug('eufar_metadata_xml.py - get_element_value')
elements = parent.getElementsByTagName(element_name)
if elements:
element = elements[0]
nodes = element.childNodes
for node in nodes:
if node.nodeType == node.TEXT_NODE:
return node.data.strip()
def set_text_value(text_widget, parent, element_name):
logging.debug('eufar_metadata_xml.py - set_text_value')
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setText(node_data)
def set_plainText_value(text_widget, parent, element_name):
logging.debug('eufar_metadata_xml.py - set_plainText_value')
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setPlainText(node_data)
def add_element(doc, element_name, parent, value=None):
logging.debug('eufar_metadata_xml.py - add_element')
new_element = doc.createElement(element_name)
if value:
new_text = doc.createTextNode(value)
new_element.appendChild(new_text)
parent.appendChild(new_element)
return new_element
def save_statement_imagery(self, index):
logging.debug('eufar_metadata_xml.py - save_statement_imagery - index ' + str(index))
statement = "[Earth observation/Remote sensing data " + str(index + 1) + "|Instrument:: "
if self.qv_imagery_list_2[index].currentText() == "Make a choice...":
instrument = ""
else:
instrument = str(self.qv_imagery_list_2[index].currentText())
statement = statement + instrument + "|Name of calibration laboratory:: "
statement = statement + str(self.qv_imagery_line_1[index].text()) + "|Date of radiometric calibration:: "
statement = statement + str(self.qv_imagery_date_1[index].date().toString(QtCore.Qt.ISODate)) + "|Date of spectral calibration:: "
statement = statement + str(self.qv_imagery_date_2[index].date().toString(QtCore.Qt.ISODate)) + "|Number of spectral bands:: "
statement = statement + str(self.qv_imagery_line_2[index].text()) + "|Overall heading / fligh direction (dd):: "
statement = statement + str(self.qv_imagery_line_3[index].text()) + "|Overall altitude / average height ASL (m):: "
statement = statement + str(self.qv_imagery_line_4[index].text()) + "|Solar zenith (dd):: "
statement = statement + str(self.qv_imagery_line_5[index].text()) + "|Solar azimuth (dd):: "
statement = statement + str(self.qv_imagery_line_6[index].text()) + "|Report anomalies in data acquisition:: "
statement = statement + str(self.qv_imagery_line_7[index].text()) + "|Processing level:: "
if self.qv_imagery_list_1[index].currentText() == "Make a choice...":
choice = ""
else:
choice = self.qv_imagery_list_1[index].currentText()
statement = statement + choice + "|Dark current (DC) correction:: "
statement = statement + getAnswer(self.qv_imagery_check_1[index], self.qv_imagery_check_2[index])
statement = statement + "|Aggregated interpolated pixel mask:: "
statement = statement + getAnswer(self.qv_imagery_check_3[index], self.qv_imagery_check_4[index])
statement = statement + "|Aggregated bad pixel mask:: "
statement = statement + getAnswer(self.qv_imagery_check_5[index], self.qv_imagery_check_6[index])
statement = statement + "|Saturated pixels / overflow:: "
statement = statement + getAnswer(self.qv_imagery_check_7[index], self.qv_imagery_check_8[index])
statement = statement + "|Problems with affected by saturation in spatial/spectral neighbourhood:: "
statement = statement + getAnswer(self.qv_imagery_check_9[index], self.qv_imagery_check_10[index])
statement = statement + "|Problems with position information / Interpolated position information:: "
statement = statement + getAnswer(self.qv_imagery_check_11[index], self.qv_imagery_check_12[index])
statement = statement + "|Problems with attitude information / Interpolated attitude information:: "
statement = statement + getAnswer(self.qv_imagery_check_13[index], self.qv_imagery_check_14[index])
statement = statement + "|Synchronization problems:: "
statement = statement + getAnswer(self.qv_imagery_check_15[index], self.qv_imagery_check_16[index])
statement = statement + "|Interpolated pixels during geocoding:: "
statement = statement + getAnswer(self.qv_imagery_check_17[index], self.qv_imagery_check_18[index])
statement = statement + "|Failure of atmospheric correction:: "
statement = statement + getAnswer(self.qv_imagery_check_19[index], self.qv_imagery_check_20[index])
statement = statement + "|Cloud mask:: "
statement = statement + getAnswer(self.qv_imagery_check_21[index], self.qv_imagery_check_22[index])
statement = statement + "|Cloud shadow mask:: "
statement = statement + getAnswer(self.qv_imagery_check_23[index], self.qv_imagery_check_24[index])
statement = statement + "|Haze mask:: "
statement = statement + getAnswer(self.qv_imagery_check_25[index], self.qv_imagery_check_26[index])
statement = statement + "|Critical terrain correction based on DEM roughness measure:: "
statement = statement + getAnswer(self.qv_imagery_check_27[index], self.qv_imagery_check_28[index])
statement = statement + "|Critical terrain correction based on slope/local illumination angle:: "
statement = statement + getAnswer(self.qv_imagery_check_29[index], self.qv_imagery_check_30[index])
statement = statement + "|Critical BRDF geometry based on sun-sensor-terrain geometry:: "
statement = statement + getAnswer(self.qv_imagery_check_31[index], self.qv_imagery_check_32[index])
statement = statement + "]"
return statement
def save_statement_insitu(self, index):
logging.debug('eufar_metadata_xml.py - save_statement_insitu - index ' + str(index))
statement = "[Atmospheric/In-situ measurements " + str(index + 1) + "|Instrument:: "
if self.qv_insitu_list_1[index].currentText() == "Make a choice...":
instrument = ""
else:
instrument = str(self.qv_insitu_list_1[index].currentText())
statement = statement + instrument + "|Link to the procedure's description:: "
statement = statement + str(self.qv_insitu_line_1[index].text()) + "|Source of calibration constants:: "
statement = statement + str(self.qv_insitu_line_2[index].text()) + "|Source of calibration materials:: "
statement = statement + str(self.qv_insitu_line_3[index].text()) + "|Data converted to geophysical units:: "
statement = statement + getAnswer(self.qv_insitu_round_1[index], self.qv_insitu_round_2[index]) + "|Output format:: "
format_list = []
answer = ""
if self.qv_insitu_check_1[index].isChecked() == True:
format_list.append("NetCDF")
if self.qv_insitu_check_3[index].isChecked() == True:
format_list.append("HDF")
if self.qv_insitu_check_2[index].isChecked() == True:
format_list.append("NASA/Ames")
if self.qv_insitu_check_4[index].isChecked() == True:
format_list.append("Other/" + self.qv_insitu_line_4[index].text())
if len(format_list) > 1:
for item in format_list:
answer = answer + item + "; "
answer = answer[:-2]
elif len(format_list) == 1:
answer = format_list[0]
statement = statement + answer + "|Quality-control flagging applied to individual data points:: "
statement = statement + str(self.qv_insitu_area_1[index].toPlainText()) + "|Assumption:: "
statement = statement + str(self.qv_insitu_area_2[index].toPlainText()) + "]"
return statement
def read_statement_imagery(self, statement, index):
logging.debug('eufar_metadata_xml.py - read_statement_imagery - index ' + str(index))
stringList = []
indexStart = statement.find("::", 40)
indexEnd = statement.find("|", 40)
stringList.append(statement[indexStart + 3 : indexEnd])
indexStart += 1
indexEnd += 1
while indexStart < len(statement):
indexStart = statement.find("::", indexStart)
if indexStart == -1:
break
indexEnd = statement.find("|", indexEnd)
if indexEnd == -1:
stringList.append(statement[indexStart + 3 :])
else:
stringList.append(statement[indexStart + 3 : indexEnd])
indexStart += 1
indexEnd += 1
if self.qv_imagery_list_2[index].findText(stringList[0]) == -1:
self.qv_imagery_list_2[index].setCurrentIndex(0)
else:
self.qv_imagery_list_2[index].setCurrentIndex(self.qv_imagery_list_2[index].findText(stringList[0]))
self.qv_imagery_line_1[index].setText(stringList[1])
self.qv_imagery_date_1[index].setDate(QtCore.QDate.fromString(stringList[2], QtCore.Qt.ISODate))
self.qv_imagery_date_2[index].setDate(QtCore.QDate.fromString(stringList[3], QtCore.Qt.ISODate))
self.qv_imagery_line_2[index].setText(stringList[4])
self.qv_imagery_line_3[index].setText(stringList[5])
self.qv_imagery_line_4[index].setText(stringList[6])
self.qv_imagery_line_5[index].setText(stringList[7])
self.qv_imagery_line_6[index].setText(stringList[8])
self.qv_imagery_line_7[index].setText(stringList[9])
if self.qv_imagery_list_1[index].findText(stringList[10]) == -1:
self.qv_imagery_list_1[index].setCurrentIndex(0)
else:
self.qv_imagery_list_1[index].setCurrentIndex(self.qv_imagery_list_1[index].findText(stringList[10]))
pushAnswer(self.qv_imagery_check_1[index], self.qv_imagery_check_2[index], stringList[11])
pushAnswer(self.qv_imagery_check_3[index], self.qv_imagery_check_4[index], stringList[12])
pushAnswer(self.qv_imagery_check_5[index], self.qv_imagery_check_6[index], stringList[13])
pushAnswer(self.qv_imagery_check_7[index], self.qv_imagery_check_8[index], stringList[14])
pushAnswer(self.qv_imagery_check_9[index], self.qv_imagery_check_10[index], stringList[15])
pushAnswer(self.qv_imagery_check_11[index], self.qv_imagery_check_12[index], stringList[16])
pushAnswer(self.qv_imagery_check_13[index], self.qv_imagery_check_14[index], stringList[17])
pushAnswer(self.qv_imagery_check_15[index], self.qv_imagery_check_16[index], stringList[18])
pushAnswer(self.qv_imagery_check_17[index], self.qv_imagery_check_18[index], stringList[19])
pushAnswer(self.qv_imagery_check_19[index], self.qv_imagery_check_20[index], stringList[20])
pushAnswer(self.qv_imagery_check_21[index], self.qv_imagery_check_22[index], stringList[21])
pushAnswer(self.qv_imagery_check_23[index], self.qv_imagery_check_24[index], stringList[22])
pushAnswer(self.qv_imagery_check_25[index], self.qv_imagery_check_26[index], stringList[23])
pushAnswer(self.qv_imagery_check_27[index], self.qv_imagery_check_28[index], stringList[24])
pushAnswer(self.qv_imagery_check_29[index], self.qv_imagery_check_30[index], stringList[25])
pushAnswer(self.qv_imagery_check_31[index], self.qv_imagery_check_32[index], stringList[26])
def read_statement_imagery_old(self, statement, index):
logging.debug('eufar_metadata_xml.py - read_statement_imagery_old - index ' + str(index))
stringList = []
indexStart = statement.find(":", 40)
indexEnd = statement.find("|", 40)
stringList.append(statement[indexStart + 2 : indexEnd])
indexStart += 1
indexEnd += 1
while indexStart < len(statement):
indexStart = statement.find(":", indexStart)
if indexStart == -1:
break
indexEnd = statement.find("|", indexEnd)
if indexEnd == -1:
stringList.append(statement[indexStart + 2 :])
else:
stringList.append(statement[indexStart + 2 : indexEnd])
indexStart += 1
indexEnd += 1
self.qv_imagery_line_1[index].setText(stringList[0])
self.qv_imagery_date_1[index].setDate(QtCore.QDate.fromString(stringList[1], QtCore.Qt.ISODate))
self.qv_imagery_date_2[index].setDate(QtCore.QDate.fromString(stringList[2], QtCore.Qt.ISODate))
self.qv_imagery_line_2[index].setText(stringList[3])
self.qv_imagery_line_3[index].setText(stringList[4])
self.qv_imagery_line_4[index].setText(stringList[5])
self.qv_imagery_line_5[index].setText(stringList[6])
self.qv_imagery_line_6[index].setText(stringList[7])
self.qv_imagery_line_7[index].setText(stringList[8])
if self.qv_imagery_list_1[index].findText(stringList[9]) == -1:
self.qv_imagery_list_1[index].setCurrentIndex(0)
else:
self.qv_imagery_list_1[index].setCurrentIndex(self.qv_imagery_list_1[index].findText(stringList[9]))
pushAnswer(self.qv_imagery_check_1[index], self.qv_imagery_check_2[index], stringList[10])
pushAnswer(self.qv_imagery_check_3[index], self.qv_imagery_check_4[index], stringList[11])
pushAnswer(self.qv_imagery_check_5[index], self.qv_imagery_check_6[index], stringList[12])
pushAnswer(self.qv_imagery_check_7[index], self.qv_imagery_check_8[index], stringList[13])
pushAnswer(self.qv_imagery_check_9[index], self.qv_imagery_check_10[index], stringList[14])
pushAnswer(self.qv_imagery_check_11[index], self.qv_imagery_check_12[index], stringList[15])
pushAnswer(self.qv_imagery_check_13[index], self.qv_imagery_check_14[index], stringList[16])
pushAnswer(self.qv_imagery_check_15[index], self.qv_imagery_check_16[index], stringList[17])
pushAnswer(self.qv_imagery_check_17[index], self.qv_imagery_check_18[index], stringList[18])
pushAnswer(self.qv_imagery_check_19[index], self.qv_imagery_check_20[index], stringList[19])
pushAnswer(self.qv_imagery_check_21[index], self.qv_imagery_check_22[index], stringList[20])
pushAnswer(self.qv_imagery_check_23[index], self.qv_imagery_check_24[index], stringList[21])
pushAnswer(self.qv_imagery_check_25[index], self.qv_imagery_check_26[index], stringList[22])
pushAnswer(self.qv_imagery_check_27[index], self.qv_imagery_check_28[index], stringList[23])
pushAnswer(self.qv_imagery_check_29[index], self.qv_imagery_check_30[index], stringList[24])
pushAnswer(self.qv_imagery_check_31[index], self.qv_imagery_check_32[index], stringList[25])
def read_statement_insitu(self, statement, index):
logging.debug('eufar_metadata_xml.py - read_statement_insitu - index ' + str(index))
stringList = []
indexStart = statement.find("::", 35)
indexEnd = statement.find("|", 35)
stringList.append(statement[indexStart + 3 : indexEnd])
indexStart += 1
indexEnd += 1
while indexStart < len(statement):
indexStart = statement.find("::", indexStart)
if indexStart == -1:
break
indexEnd = statement.find("|", indexEnd)
if indexEnd == -1:
stringList.append(statement[indexStart + 3 :])
else:
stringList.append(statement[indexStart + 3 : indexEnd])
indexStart += 1
indexEnd += 1
if self.qv_insitu_list_1[index].findText(stringList[0]) == -1:
self.qv_insitu_list_1[index].setCurrentIndex(0)
else:
self.qv_insitu_list_1[index].setCurrentIndex(self.qv_insitu_list_1[index].findText(stringList[0]))
self.qv_insitu_line_1[index].setText(stringList[1])
self.qv_insitu_line_2[index].setText(stringList[2])
self.qv_insitu_line_3[index].setText(stringList[3])
pushAnswer(self.qv_insitu_round_1[index], self.qv_insitu_round_2[index], stringList[4])
if "NetCDF" in stringList[5]:
self.qv_insitu_check_1[index].setChecked(True)
if "HDF" in stringList[5]:
self.qv_insitu_check_3[index].setChecked(True)
if "NASA/Ames" in stringList[5]:
self.qv_insitu_check_2[index].setChecked(True)
if "Other" in stringList[5]:
self.qv_insitu_check_4[index].setChecked(True)
button_functions.qv_output_other(self, index)
findOther = stringList[5].find("Other")
self.qv_insitu_line_4[index].setText(stringList[5][findOther + 6 :])
self.qv_insitu_area_1[index].setPlainText(stringList[6])
self.qv_insitu_area_2[index].setPlainText(stringList[7])
def read_statement_insitu_old(self, statement, index):
logging.debug('eufar_metadata_xml.py - read_statement_insitu_old - index ' + str(index))
stringList = []
indexStart = statement.find(":", 35)
indexEnd = statement.find("|", 35)
stringList.append(statement[indexStart + 2 : indexEnd])
indexStart += 1
indexEnd += 1
while indexStart < len(statement):
indexStart = statement.find(":", indexStart)
if indexStart == -1:
break
indexEnd = statement.find("|", indexEnd)
if indexEnd == -1:
stringList.append(statement[indexStart + 2 :])
else:
stringList.append(statement[indexStart + 2 : indexEnd])
indexStart += 1
indexEnd += 1
self.qv_insitu_line_1[index].setText(stringList[0])
self.qv_insitu_line_2[index].setText(stringList[1])
self.qv_insitu_line_3[index].setText(stringList[2])
pushAnswer(self.qv_insitu_round_1[index], self.qv_insitu_round_2[index], stringList[3])
if "NetCDF" in stringList[4]:
self.qv_insitu_check_1[index].setChecked(True)
if "HDF" in stringList[4]:
self.qv_insitu_check_3[index].setChecked(True)
if "NASA/Ames" in stringList[4]:
self.qv_insitu_check_2[index].setChecked(True)
if "Other" in stringList[4]:
self.qv_insitu_check_4[index].setChecked(True)
button_functions.qv_output_other(self, index)
findOther = stringList[4].find("Other")
self.qv_insitu_line_4[index].setText(stringList[4][findOther + 6 :])
self.qv_insitu_area_1[index].setPlainText(stringList[5])
self.qv_insitu_area_2[index].setPlainText(stringList[6])
def getAnswer(radioButton1, radioButton2):
logging.debug('eufar_metadata_xml.py - getAnswer')
answer = ""
if radioButton1.isChecked() == True:
answer = "yes"
elif radioButton2.isChecked() == True:
answer = "no"
return answer
def pushAnswer(radioButton1, radioButton2, answer):
logging.debug('eufar_metadata_xml.py - pushAnswer')
if answer == "yes":
radioButton1.setChecked(True)
elif answer == "no":
radioButton2.setChecked(True)
def reverseDictionary(string, dictionary):
logging.debug('eufar_metadata_xml.py - reverseDictionary - string ' + string)
answer = ''
for key, value in dictionary.items():
if value == string:
answer = key
break
return answer
class MyAbout(QtWidgets.QDialog, Ui_aboutWindow):
def __init__(self):
logging.debug('eufar_metadata_xml.py - MyAbout - __init__')
QtWidgets.QWidget.__init__(self)
self.setupUi(self)
aboutText = ("<html><head/><body><p align=justify>The EMC has detected that the Quality and "
+ "Validity section of your XML file has been produced with an old version of the EM"
+ "C. Since the version 1.1.0, the XML code for the Quality and Validity section has"
+ " been replaced to take into account multiple instruments and multiple forms.</p><"
+ "span align=justify>All Quality and Validity data in the XML file have been loaded"
+ " into a new form. In order to convert the old XML code to the new one, <b>after s"
+ "electing an instrument in your Quality and Validity form</b>, please save your do"
+ "cument to a new file.</span>")
self.aw_label_1.setText(aboutText)
self.aw_okButton.clicked.connect(self.closeWindow)
self.aw_okButton.setFocus(True)
self.aw_label_2.setPixmap(QtGui.QPixmap("icons/warning_popup_icon.svg"))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/warning_popup_icon.svg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
self.setWindowTitle("Old Quality & Validity section detected")
def closeWindow(self):
logging.debug('eufar_metadata_xml.py - MyAbout - closeWindow')
self.close()
|
|
from __future__ import print_function
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import GroupShuffleSplit
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import MidTermFeatures as aF
import sys
import numpy as np
import os
import glob
import pickle as cPickle
import csv
import ntpath
from scipy import linalg as la
from scipy.spatial import distance
import sklearn.svm
import sklearn.decomposition
import sklearn.ensemble
import plotly
import plotly.subplots
import plotly.graph_objs as go
import sklearn.metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../"))
shortTermWindow = 0.050
shortTermStep = 0.050
eps = 0.00000001
class Knn:
def __init__(self, features, labels, neighbors):
self.features = features
self.labels = labels
self.neighbors = neighbors
def classify(self, test_sample):
n_classes = np.unique(self.labels).shape[0]
y_dist = (distance.cdist(self.features,
test_sample.reshape(1, test_sample.shape[0]),
'euclidean')).T
i_sort = np.argsort(y_dist)
P = np.zeros((n_classes,))
for i in range(n_classes):
P[i] = np.nonzero(self.labels[i_sort[0]
[0:self.neighbors]] == i)[0].shape[0] / float(self.neighbors)
return np.argmax(P), P
def classifier_wrapper(classifier, classifier_type, test_sample):
"""
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- classifier: a classifier object of type sklearn.svm.SVC or
kNN (defined in this library) or sklearn.ensemble.
RandomForestClassifier or sklearn.ensemble.
GradientBoostingClassifier or
sklearn.ensemble.ExtraTreesClassifier
- classifier_type: "svm" or "knn" or "randomforests" or
"gradientboosting" or "extratrees"
- test_sample: a feature vector (np array)
RETURNS:
- R: class ID
- P: probability estimate
EXAMPLE (for some audio signal stored in array x):
import audioFeatureExtraction as aF
import audioTrainTest as aT
# load the classifier (here SVM, for kNN use load_model_knn instead):
[classifier, MEAN, STD, classNames, mt_win, mt_step, st_win, st_step] =
aT.load_model(model_name)
# mid-term feature extraction:
[mt_features, _, _] = aF.mid_feature_extraction(x, Fs, mt_win * Fs,
mt_step * Fs, round(Fs*st_win), round(Fs*st_step));
# feature normalization:
curFV = (mt_features[:, i] - MEAN) / STD;
# classification
[Result, P] = classifierWrapper(classifier, model_type, curFV)
"""
class_id = -1
probability = -1
if classifier_type == "knn":
class_id, probability = classifier.classify(test_sample)
elif classifier_type == "svm" or \
classifier_type == "randomforest" or \
classifier_type == "gradientboosting" or \
classifier_type == "extratrees" or \
classifier_type == "svm_rbf":
class_id = classifier.predict(test_sample.reshape(1, -1))[0]
probability = classifier.predict_proba(test_sample.reshape(1, -1))[0]
return class_id, probability
def regression_wrapper(model, model_type, test_sample):
"""
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- model: regression model
- model_type: "svm" or "knn" (TODO)
- test_sample: a feature vector (np array)
RETURNS:
- R: regression result (estimated value)
EXAMPLE (for some audio signal stored in array x):
TODO
"""
if model_type == "svm" or model_type == "randomforest" or \
model_type == "svm_rbf":
return model.predict(test_sample.reshape(1, -1))[0]
# elif classifier_type == "knn":
# TODO
def train_knn(features, labels, neighbors):
"""
Train a kNN classifier.
ARGUMENTS:
- features: a feature matrix [n_samples x numOfDimensions]
- labels: a label matrix: [n_samples x 1]
- neighbors: parameter K
RETURNS:
- kNN: the trained kNN variable
"""
knn = Knn(features, labels, neighbors)
return knn
def train_svm(features, labels, c_param, kernel='linear'):
"""
Train a multi-class probabilitistic SVM classifier.
Note: This function is simply a wrapper to the sklearn functionality
for SVM training
See function trainSVM_feature() to use a wrapper on both the
feature extraction and the SVM training
(and parameter tuning) processes.
ARGUMENTS:
- features: a feature matrix [n_samples x numOfDimensions]
- labels: a label matrix: [n_samples x 1]
- n_estimators: number of trees in the forest
- c_param: SVM parameter C (cost of constraints violation)
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value.
For a different kernel, other types of parameters should be provided.
"""
svm = sklearn.svm.SVC(C=c_param, kernel=kernel, probability=True,
gamma='auto')
svm.fit(features, labels)
return svm
def train_random_forest(features, labels, n_estimators):
"""
Train a multi-class random forest classifier.
Note: This function is simply a wrapper to the sklearn functionality
for model training.
See function extract_features_and_train() to use a wrapper on both
the feature extraction and the model training (and parameter
tuning) processes.
ARGUMENTS:
- features: a feature matrix [n_samples x numOfDimensions]
- labels: a label matrix: [n_samples x 1]
- n_estimators: number of trees in the forest
- n_estimators: number of trees in the forest
RETURNS:
- rf: the trained random forest
"""
rf = sklearn.ensemble.RandomForestClassifier(n_estimators=n_estimators)
rf.fit(features, labels)
return rf
def train_gradient_boosting(features, labels, n_estimators):
"""
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality
for model training.
See function extract_features_and_train() to use a wrapper on both
the feature extraction and the model training (and parameter
tuning) processes.
ARGUMENTS:
- features: a feature matrix [n_samples x numOfDimensions]
- labels: a label matrix: [n_samples x 1]
- n_estimators: number of trees in the forest
- n_estimators: number of trees in the forest
RETURNS:
- rf: the trained model
"""
rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=n_estimators)
rf.fit(features, labels)
return rf
def train_extra_trees(features, labels, n_estimators):
"""
Train an extra tree
Note: This function is simply a wrapper to the sklearn functionality
for model training.
See function extract_features_and_train() to use a wrapper on both
the feature extraction and the model training (and parameter
tuning) processes.
ARGUMENTS:
- features: a feature matrix [n_samples x numOfDimensions]
- labels: a label matrix: [n_samples x 1]
- n_estimators: number of trees in the forest
RETURNS:
- et: the trained model
"""
et = sklearn.ensemble.ExtraTreesClassifier(n_estimators=n_estimators)
et.fit(features, labels)
return et
def train_svm_regression(features, labels, c_param, kernel='linear'):
svm = sklearn.svm.SVR(C=c_param, kernel=kernel)
svm.fit(features, labels)
train_err = np.mean(np.abs(svm.predict(features) - labels))
return svm, train_err
def train_random_forest_regression(features, labels, n_estimators):
rf = sklearn.ensemble.RandomForestRegressor(n_estimators=n_estimators)
rf.fit(features, labels)
train_err = np.mean(np.abs(rf.predict(features) - labels))
return rf, train_err
def extract_features_and_train(paths, mid_window, mid_step, short_window,
short_step, classifier_type, model_name,
compute_beat=False, train_percentage=0.90,
dict_of_ids=None,
use_smote=False):
"""
This function is used as a wrapper to segment-based audio feature extraction
and classifier training.
ARGUMENTS:
paths: list of paths of directories. Each directory
contains a signle audio class whose samples
are stored in seperate WAV files.
mid_window, mid_step: mid-term window length and step
short_window, short_step: short-term window and step
classifier_type: "svm" or "knn" or "randomforest" or
"gradientboosting" or "extratrees"
model_name: name of the model to be saved
dict_of_ids: a dictionary which has as keys the full path of audio files and as values the respective group ids
RETURNS:
None. Resulting classifier along with the respective model
parameters are saved on files.
"""
# STEP A: Feature Extraction:
features, class_names, file_names = \
aF.multiple_directory_feature_extraction(paths, mid_window, mid_step,
short_window, short_step,
compute_beat=compute_beat)
file_names = [item for sublist in file_names for item in sublist]
if dict_of_ids:
list_of_ids = [dict_of_ids[file] for file in file_names]
else:
list_of_ids = None
if len(features) == 0:
print("trainSVM_feature ERROR: No data found in any input folder!")
return
n_feats = features[0].shape[1]
feature_names = ["features" + str(d + 1) for d in range(n_feats)]
for i, feat in enumerate(features):
if len(feat) == 0:
print("trainSVM_feature ERROR: " + paths[i] +
" folder is empty or non-existing!")
return
# STEP B: classifier Evaluation and Parameter Selection:
if classifier_type == "svm" or classifier_type == "svm_rbf":
classifier_par = np.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0, 20.0])
elif classifier_type == "randomforest":
classifier_par = np.array([10, 25, 50, 100, 200, 500])
elif classifier_type == "knn":
classifier_par = np.array([1, 3, 5, 7, 9, 11, 13, 15])
elif classifier_type == "gradientboosting":
classifier_par = np.array([10, 25, 50, 100, 200, 500])
elif classifier_type == "extratrees":
classifier_par = np.array([10, 25, 50, 100, 200, 500])
# get optimal classifier parameter:
temp_features = []
for feat in features:
temp = []
for i in range(feat.shape[0]):
temp_fv = feat[i, :]
if (not np.isnan(temp_fv).any()) and (not np.isinf(temp_fv).any()):
temp.append(temp_fv.tolist())
else:
print("NaN Found! Feature vector not used for training")
temp_features.append(np.array(temp))
features = temp_features
best_param = evaluate_classifier(features, class_names, classifier_type,
classifier_par, 1, list_of_ids, n_exp=-1,
train_percentage=train_percentage,
smote=use_smote)
print("Selected params: {0:.5f}".format(best_param))
# STEP C: Train and Save the classifier to file
# Get featues in the X, y format:
features, labels = features_to_matrix(features)
# Apply smote if necessary:
if use_smote:
sm = SMOTE(random_state=2)
features, labels = sm.fit_resample(features, labels)
# Use mean/std standard feature scaling:
scaler = StandardScaler()
features = scaler.fit_transform(features)
mean = scaler.mean_.tolist()
std = scaler.scale_.tolist()
# Then train the final classifier
if classifier_type == "svm":
classifier = train_svm(features, labels, best_param)
elif classifier_type == "svm_rbf":
classifier = train_svm(features, labels, best_param, kernel='rbf')
elif classifier_type == "randomforest":
classifier = train_random_forest(features, labels, best_param)
elif classifier_type == "gradientboosting":
classifier = train_gradient_boosting(features, labels, best_param)
elif classifier_type == "extratrees":
classifier = train_extra_trees(features, labels, best_param)
# And save the model to a file, along with
# - the scaling -mean/std- vectors)
# - the feature extraction parameters
if classifier_type == "knn":
feature_matrix = features.tolist()
labels = labels.tolist()
save_path = model_name
save_parameters(save_path, feature_matrix, labels, mean, std,
class_names, best_param, mid_window, mid_step,
short_window, short_step, compute_beat)
elif classifier_type == "svm" or classifier_type == "svm_rbf" or \
classifier_type == "randomforest" or \
classifier_type == "gradientboosting" or \
classifier_type == "extratrees":
with open(model_name, 'wb') as fid:
cPickle.dump(classifier, fid)
save_path = model_name + "MEANS"
save_parameters(save_path, mean, std, class_names, mid_window, mid_step,
short_window, short_step, compute_beat)
def save_parameters(path, *parameters):
with open(path, 'wb') as file_handle:
for param in parameters:
cPickle.dump(param, file_handle, protocol=cPickle.HIGHEST_PROTOCOL)
def feature_extraction_train_regression(folder_name, mid_window, mid_step,
short_window, short_step, model_type,
model_name, compute_beat=False):
"""
This function is used as a wrapper to segment-based audio
feature extraction and classifier training.
ARGUMENTS:
folder_name: path of directory containing the WAV files
and Regression CSVs
mt_win, mt_step: mid-term window length and step
st_win, st_step: short-term window and step
model_type: "svm" or "knn" or "randomforest"
model_name: name of the model to be saved
RETURNS:
None. Resulting regression model along with the respective
model parameters are saved on files.
"""
# STEP A: Feature Extraction:
features, _, filenames = \
aF.multiple_directory_feature_extraction([folder_name], mid_window,
mid_step, short_window,
short_step,
compute_beat=compute_beat)
features = features[0]
filenames = [ntpath.basename(f) for f in filenames[0]]
f_final = []
# Read CSVs:
csv_files = glob.glob(folder_name + os.sep + "*.csv")
regression_labels = []
regression_names = []
f_final = []
for c in csv_files:
cur_regression_labels = []
f_temp = []
# open the csv file that contains the current target value's annotations
with open(c, 'rt') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csv_reader:
if len(row) == 2:
# ... and if the current filename exists
# in the list of filenames
if row[0] in filenames:
index = filenames.index(row[0])
cur_regression_labels.append(float(row[1]))
f_temp.append(features[index, :])
else:
print("Warning: {} not found "
"in list of files.".format(row[0]))
else:
print("Warning: Row with unknown format in regression file")
f_final.append(np.array(f_temp))
# cur_regression_labels is the list of values
# for the current regression problem
regression_labels.append(np.array(cur_regression_labels))
# regression task name
regression_names.append(ntpath.basename(c).replace(".csv", ""))
if len(features) == 0:
print("ERROR: No data found in any input folder!")
return
# STEP B: classifier Evaluation and Parameter Selection:
if model_type == "svm" or model_type == "svm_rbf":
model_params = np.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5,
1.0, 5.0, 10.0])
elif model_type == "randomforest":
model_params = np.array([5, 10, 25, 50, 100])
errors = []
errors_base = []
best_params = []
for iRegression, r in enumerate(regression_names):
# get optimal classifeir parameter:
print("Regression task " + r)
bestParam, error, berror = evaluate_regression(f_final[iRegression],
regression_labels[
iRegression],
100, model_type,
model_params)
errors.append(error)
errors_base.append(berror)
best_params.append(bestParam)
print("Selected params: {0:.5f}".format(bestParam))
# scale the features (mean-std) and keep the mean/std parameters
# to be saved with the model
scaler = StandardScaler()
features_norm = scaler.fit_transform(f_final[iRegression])
mean = scaler.mean_.tolist()
std = scaler.scale_.tolist()
# STEP C: Save the model to file
if model_type == "svm":
classifier, _ = train_svm_regression(features_norm,
regression_labels[iRegression],
bestParam)
if model_type == "svm_rbf":
classifier, _ = train_svm_regression(features_norm,
regression_labels[iRegression],
bestParam, kernel='rbf')
if model_type == "randomforest":
classifier, _ = train_random_forest_regression(features_norm,
regression_labels[
iRegression],
bestParam)
# Save the model to a file, along with
# - the scaling -mean/std- vectors)
# - the feature extraction parameters
if model_type == "svm" or model_type == "svm_rbf" \
or model_type == "randomforest":
with open(model_name + "_" + r, 'wb') as fid:
cPickle.dump(classifier, fid)
save_path = model_name + "_" + r + "MEANS"
save_parameters(save_path, mean, std, mid_window, mid_step,
short_window, short_step, compute_beat)
return errors, errors_base, best_params
def load_model_knn(knn_model_name, is_regression=False):
with open(knn_model_name, "rb") as fo:
features = cPickle.load(fo)
labels = cPickle.load(fo)
mean = cPickle.load(fo)
std = cPickle.load(fo)
if not is_regression:
classes = cPickle.load(fo)
neighbors = cPickle.load(fo)
mid_window = cPickle.load(fo)
mid_step = cPickle.load(fo)
short_window = cPickle.load(fo)
short_step = cPickle.load(fo)
compute_beat = cPickle.load(fo)
features = np.array(features)
labels = np.array(labels)
mean = np.array(mean)
std = np.array(std)
classifier = Knn(features, labels, neighbors)
# Note: a direct call to the kNN constructor is used here
if is_regression:
return classifier, mean, std, mid_window, mid_step, short_window, \
short_step, compute_beat
else:
return classifier, mean, std, classes, mid_window, mid_step, \
short_window, short_step, compute_beat
def load_model(model_name, is_regression=False):
"""
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodel_name: the path of the model to be loaded
- is_regression: a flag indigating whereas this model
is regression or not
"""
with open(model_name + "MEANS", "rb") as fo:
mean = cPickle.load(fo)
std = cPickle.load(fo)
if not is_regression:
classNames = cPickle.load(fo)
mid_window = cPickle.load(fo)
mid_step = cPickle.load(fo)
short_window = cPickle.load(fo)
short_step = cPickle.load(fo)
compute_beat = cPickle.load(fo)
mean = np.array(mean)
std = np.array(std)
with open(model_name, 'rb') as fid:
svm_model = cPickle.load(fid)
if is_regression:
return svm_model, mean, std, mid_window, mid_step, short_window, \
short_step, compute_beat
else:
return svm_model, mean, std, classNames, mid_window, mid_step, \
short_window, short_step, compute_beat
def group_split(X, y, train_indeces, test_indeces, split_id):
"""
This function splits the data in train and test set according to train/test indeces based on LeaveOneGroupOut
ARGUMENTS:
X: array-like of shape (n_samples, n_features)
y: array-like of shape (n_samples,)
train_indeces: The training set indices
test_indeces: The testing set indices
split_id: the split number
RETURNS:
List containing train-test split of inputs.
"""
train_index = train_indeces[split_id]
test_index = test_indeces[split_id]
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
return X_train, X_test, y_train, y_test
def evaluate_classifier(features, class_names, classifier_name, params,
parameter_mode, list_of_ids=None, n_exp=-1,
train_percentage=0.90,
smote=False):
"""
ARGUMENTS:
features: a list ([numOfClasses x 1]) whose elements containt
np matrices of features. Each matrix features[i] of
class i is [n_samples x numOfDimensions]
class_names: list of class names (strings)
classifier_name: svm or knn or randomforest
params: list of classifier parameters (for parameter
tuning during cross-validation)
parameter_mode: 0: choose parameters that lead to maximum overall
classification ACCURACY
1: choose parameters that lead to maximum overall
f1 MEASURE
n_exp: number of cross-validation experiments
(use -1 for auto calculation based on the num of samples)
train_percentage: percentage of training (vs validation) data
default 0.90
RETURNS:
bestParam: the value of the input parameter that optimizes the
selected performance measure
"""
# transcode list of feature matrices to X, y (sklearn)
X, y = features_to_matrix(features)
# features_norm = features;
n_classes = len(features)
ac_all = []
f1_all = []
f1_std_all = []
pre_class_all = []
rec_classes_all = []
f1_classes_all = []
cms_all = []
# dynamically compute total number of samples:
# (so that if number of samples is >10K only one train-val repetition
# is performed)
n_samples_total = X.shape[0]
if n_exp == -1:
n_exp = int(50000 / n_samples_total) + 1
if list_of_ids:
train_indeces, test_indeces = [], []
gss = GroupShuffleSplit(n_splits=n_exp, train_size=.8)
for train_index, test_index in gss.split(X, y, list_of_ids):
train_indeces.append(train_index)
test_indeces.append(test_index)
for Ci, C in enumerate(params):
# for each param value
cm = np.zeros((n_classes, n_classes))
f1_per_exp = []
y_pred_all = []
y_test_all = []
for e in range(n_exp):
y_pred = []
# for each cross-validation iteration:
print("Param = {0:.5f} - classifier Evaluation "
"Experiment {1:d} of {2:d}".format(C, e+1, n_exp))
# split features:
if list_of_ids:
X_train, X_test, y_train, y_test = group_split(
X, y, train_indeces, test_indeces, e)
else:
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=1-train_percentage)
# mean/std scale the features:
scaler = StandardScaler()
if smote:
sm = SMOTE(random_state=2)
#sm = RandomUnderSampler(random_state=0)
X_train, y_train = sm.fit_resample(X_train, y_train)
scaler.fit(X_train)
X_train = scaler.transform(X_train)
# train multi-class svms:
if classifier_name == "svm":
classifier = train_svm(X_train, y_train, C)
elif classifier_name == "svm_rbf":
classifier = train_svm(X_train, y_train, C, kernel='rbf')
elif classifier_name == "knn":
classifier = train_knn(X_train, y_train, C)
elif classifier_name == "randomforest":
classifier = train_random_forest(X_train, y_train, C)
elif classifier_name == "gradientboosting":
classifier = train_gradient_boosting(X_train, y_train, C)
elif classifier_name == "extratrees":
classifier = train_extra_trees(X_train, y_train, C)
# get predictions and compute current comfusion matrix
cmt = np.zeros((n_classes, n_classes))
X_test = scaler.transform(X_test)
for i_test_sample in range(X_test.shape[0]):
y_pred.append(classifier_wrapper(classifier,
classifier_name,
X_test[i_test_sample, :])[0])
# current confusion matrices and F1:
cmt = sklearn.metrics.confusion_matrix(y_test, y_pred)
f1t = sklearn.metrics.f1_score(y_test, y_pred, average='macro')
# aggregated predicted and ground truth labels
# (used for the validation of final F1)
y_pred_all += y_pred
y_test_all += y_test.tolist()
f1_per_exp.append(f1t)
if cmt.size != cm.size:
all_classes = set(y)
split_classes = set(y_test.tolist() + y_pred)
missing_classes = all_classes.difference(split_classes)
missing_classes = list(missing_classes)
missing_classes = [int(x) for x in missing_classes]
for mm in missing_classes:
cmt = np.insert(cmt, mm, 0, axis=0)
for mm in missing_classes:
cmt = np.insert(cmt, mm, 0, axis=1)
cm = cm + cmt
cm = cm + 0.0000000010
rec = np.array([cm[ci, ci] / np.sum(cm[ci, :])
for ci in range(cm.shape[0])])
pre = np.array([cm[ci, ci] / np.sum(cm[:, ci])
for ci in range(cm.shape[0])])
pre_class_all.append(pre)
rec_classes_all.append(rec)
f1 = 2 * rec * pre / (rec + pre)
# this is just for debugging (it should be equal to f1)
f1_b = sklearn.metrics.f1_score(y_test_all, y_pred_all,
average='macro')
# Note: np.mean(f1_per_exp) will not be exacty equal to the
# overall f1 (i.e. f1 and f1_b because these are calculated on a
# per-sample basis)
f1_std = np.std(f1_per_exp)
#print(np.mean(f1), f1_b, f1_std)
f1_classes_all.append(f1)
ac_all.append(np.sum(np.diagonal(cm)) / np.sum(cm))
cms_all.append(cm)
f1_all.append(np.mean(f1))
f1_std_all.append(f1_std)
print("\t\t", end="")
for i, c in enumerate(class_names):
if i == len(class_names)-1:
print("{0:s}\t\t".format(c), end="")
else:
print("{0:s}\t\t\t".format(c), end="")
print("OVERALL")
print("\tC", end="")
for c in class_names:
print("\tPRE\tREC\tf1", end="")
print("\t{0:s}\t{1:s}".format("ACC", "f1"))
best_ac_ind = np.argmax(ac_all)
best_f1_ind = np.argmax(f1_all)
for i in range(len(pre_class_all)):
print("\t{0:.3f}".format(params[i]), end="")
for c in range(len(pre_class_all[i])):
print("\t{0:.1f}\t{1:.1f}\t{2:.1f}".format(100.0 *
pre_class_all[i][c],
100.0 *
rec_classes_all[i][c],
100.0 *
f1_classes_all[i][c]),
end="")
print("\t{0:.1f}\t{1:.1f}".format(100.0 * ac_all[i], 100.0 * f1_all[i]),
end="")
if i == best_f1_ind:
print("\t best f1", end="")
if i == best_ac_ind:
print("\t best Acc", end="")
print("")
if parameter_mode == 0:
# keep parameters that maximize overall classification accuracy:
print("Confusion Matrix:")
print_confusion_matrix(cms_all[best_ac_ind], class_names)
return params[best_ac_ind]
elif parameter_mode == 1:
# keep parameters that maximize overall f1 measure:
print("Confusion Matrix:")
print_confusion_matrix(cms_all[best_f1_ind], class_names)
print(f"Best macro f1 {100 * f1_all[best_f1_ind]:.1f}")
print(f"Best macro f1 std {100 * f1_std_all[best_f1_ind]:.1f}")
return params[best_f1_ind]
def evaluate_regression(features, labels, n_exp, method_name, params):
"""
ARGUMENTS:
features: np matrices of features [n_samples x numOfDimensions]
labels: list of sample labels
n_exp: number of cross-validation experiments
method_name: "svm" or "randomforest"
params: list of classifier params to be evaluated
RETURNS:
bestParam: the value of the input parameter that optimizes
the selected performance measure
"""
# mean/std feature scaling:
scaler = StandardScaler()
features_norm = scaler.fit_transform(features)
n_samples = labels.shape[0]
per_train = 0.9
errors_all = []
er_train_all = []
er_base_all = []
for Ci, C in enumerate(params): # for each param value
errors = []
errors_train = []
errors_baseline = []
for e in range(n_exp): # for each cross-validation iteration:
# split features:
randperm = np.random.permutation(range(n_samples))
n_train = int(round(per_train * n_samples))
f_train = [features_norm[randperm[i]]
for i in range(n_train)]
f_test = [features_norm[randperm[i+n_train]]
for i in range(n_samples - n_train)]
l_train = [labels[randperm[i]] for i in range(n_train)]
l_test = [labels[randperm[i + n_train]]
for i in range(n_samples - n_train)]
# train multi-class svms:
f_train = np.array(f_train)
if method_name == "svm":
classifier, train_err = \
train_svm_regression(f_train, l_train, C)
elif method_name == "svm_rbf":
classifier, train_err = \
train_svm_regression(f_train, l_train, C,
kernel='rbf')
elif method_name == "randomforest":
classifier, train_err = \
train_random_forest_regression(f_train, l_train, C)
error_test = []
error_test_baseline = []
for itest, fTest in enumerate(f_test):
R = regression_wrapper(classifier, method_name, fTest)
Rbaseline = np.mean(l_train)
error_test.append((R - l_test[itest]) *
(R - l_test[itest]))
error_test_baseline.append((Rbaseline - l_test[itest]) *
(Rbaseline - l_test[itest]))
error = np.array(error_test).mean()
error_baseline = np.array(error_test_baseline).mean()
errors.append(error)
errors_train.append(train_err)
errors_baseline.append(error_baseline)
errors_all.append(np.array(errors).mean())
er_train_all.append(np.array(errors_train).mean())
er_base_all.append(np.array(errors_baseline).mean())
best_ind = np.argmin(errors_all)
print("{0:s}\t\t{1:s}\t\t{2:s}\t\t{3:s}".format("Param", "MSE",
"T-MSE", "R-MSE"))
for i in range(len(errors_all)):
print("{0:.4f}\t\t{1:.2f}\t\t{2:.2f}\t\t{3:.2f}".format(params[i],
errors_all[i],
er_train_all[i],
er_base_all[i]),
end="")
if i == best_ind:
print("\t\t best", end="")
print("")
return params[best_ind], errors_all[best_ind], er_base_all[best_ind]
def print_confusion_matrix(cm, class_names):
"""
This function prints a confusion matrix for a particular classification task.
ARGUMENTS:
cm: a 2-D np array of the confusion matrix
(cm[i,j] is the number of times a sample from class i
was classified in class j)
class_names: a list that contains the names of the classes
"""
if cm.shape[0] != len(class_names):
print("printConfusionMatrix: Wrong argument sizes\n")
return
for c in class_names:
if len(c) > 4:
c = c[0:3]
print("\t{0:s}".format(c), end="")
print("")
for i, c in enumerate(class_names):
if len(c) > 4:
c = c[0:3]
print("{0:s}".format(c), end="")
for j in range(len(class_names)):
print("\t{0:.2f}".format(100.0 * cm[i][j] / np.sum(cm)), end="")
print("")
def features_to_matrix(features):
"""
features_to_matrix(features)
This function takes a list of feature matrices as argument and returns
a single concatenated feature matrix and the respective class labels.
ARGUMENTS:
- features: a list of feature matrices
RETURNS:
- feature_matrix: a concatenated matrix of features
- labels: a vector of class indices
"""
labels = np.array([])
feature_matrix = np.array([])
for i, f in enumerate(features):
if i == 0:
feature_matrix = f
labels = i * np.ones((len(f), 1))
else:
feature_matrix = np.vstack((feature_matrix, f))
labels = np.append(labels, i * np.ones((len(f), 1)))
return feature_matrix, labels
def pca_wrapper(features, dimensions):
features, labels = features_to_matrix(features)
pca = sklearn.decomposition.PCA(n_components=dimensions)
pca.fit(features)
coeff = pca.components_
coeff = coeff[:, 0:dimensions]
features_transformed = []
for f in features:
ft = f.copy()
# ft = pca.transform(ft, k=nDims)
ft = np.dot(f, coeff)
features_transformed.append(ft)
return features_transformed, coeff
def compute_class_rec_pre_f1(c_mat):
"""
Gets recall, precision and f1 PER CLASS, given the confusion matrix
:param c_mat: the [n_class x n_class] confusion matrix
:return: rec, pre and f1 for each class
"""
n_class = c_mat.shape[0]
rec, pre, f1 = [], [], []
for i in range(n_class):
rec.append(float(c_mat[i, i]) / np.sum(c_mat[i, :]))
pre.append(float(c_mat[i, i]) / np.sum(c_mat[:, i]))
f1.append(2 * rec[-1] * pre[-1] / (rec[-1] + pre[-1]))
return rec, pre, f1
def evaluate_model_for_folders(input_test_folders, model_name, model_type,
positive_class, plot=True):
"""
evaluate_model_for_folders(input_test_folders, model_name, model_type)
This function evaluates a model by computing the confusion matrix, the
per class performance metrics and by generating a ROC and Precision / Recall
diagrams (for a particular class of interest), for a given test dataset.
The dataset needs to be organized in folders (one folder per audio class),
exactly like in extract_features_and_train()
:param input_test_folders: list of folders (each folder represents a
separate audio class)
:param model_name: path to the model to be tested
:param model_type: type of the model
:param positive_class name of the positive class
:param plot (True default) if to plot 2 diagrams on plotly
:return: thr_prre, pre, rec (thresholds, precision recall values)
thr_roc, fpr, tpr (thresholds, false positive , true positive rates)
Usage example:
from pyAudioAnalysis import audioTrainTest as aT
thr_prre, pre, rec, thr_roc, fpr, tpr =
aT.evaluate_model_for_folders(["4_classes_small/speech",
"4_classes_small/music"],
"data/models/svm_rbf_4class",
"svm_rbf", "speech")
"""
class_names = []
y_true_binary = []
y_true = []
y_pred = []
probs_positive = []
for i, d in enumerate(input_test_folders):
if d[-1] == os.sep:
class_names.append(d.split(os.sep)[-2])
else:
class_names.append(d.split(os.sep)[-1])
types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(d, files)))
# get list of audio files for current folder and run classifier
for w in wav_file_list:
c, p, probs_names = file_classification(w, model_name, model_type)
y_pred.append(c)
y_true.append(probs_names.index(class_names[i]))
if i == probs_names.index(positive_class):
y_true_binary.append(1)
else:
y_true_binary.append(0)
prob_positive = p[probs_names.index(positive_class)]
probs_positive.append(prob_positive)
pre, rec, thr_prre = sklearn.metrics.precision_recall_curve(y_true_binary,
probs_positive)
fpr, tpr, thr_roc = sklearn.metrics.roc_curve(
y_true_binary, probs_positive)
cm = sklearn.metrics.confusion_matrix(y_true, y_pred)
rec_c, pre_c, f1_c = compute_class_rec_pre_f1(cm)
f1 = (sklearn.metrics.f1_score(y_true, y_pred, average='macro'))
acc = (sklearn.metrics.accuracy_score(y_true, y_pred))
print(cm)
print(rec_c, pre_c, f1_c, f1, acc)
if plot:
titles = ["Confusion matrix, acc = {0:.1f}%, "
" F1 (macro): {1:.1f}%".format(100 * acc, 100 * f1),
"Class-wise Performance measures",
"Pre vs Rec for " + positive_class,
"ROC for " + positive_class]
figs = plotly.subplots.make_subplots(rows=2, cols=2,
subplot_titles=titles)
heatmap = go.Heatmap(z=np.flip(cm, axis=0), x=class_names,
y=list(reversed(class_names)),
colorscale=[[0, '#4422ff'], [1, '#ff4422']],
name="confusin matrix", showscale=False)
mark_prop1 = dict(color='rgba(80, 220, 150, 0.5)',
line=dict(color='rgba(80, 220, 150, 1)', width=2))
mark_prop2 = dict(color='rgba(80, 150, 220, 0.5)',
line=dict(color='rgba(80, 150, 220, 1)', width=2))
mark_prop3 = dict(color='rgba(250, 150, 150, 0.5)',
line=dict(color='rgba(250, 150, 150, 1)', width=3))
b1 = go.Bar(x=class_names, y=rec_c, name="Recall", marker=mark_prop1)
b2 = go.Bar(x=class_names, y=pre_c,
name="Precision", marker=mark_prop2)
b3 = go.Bar(x=class_names, y=f1_c, name="F1", marker=mark_prop3)
figs.append_trace(heatmap, 1, 1)
figs.append_trace(b1, 1, 2)
figs.append_trace(b2, 1, 2)
figs.append_trace(b3, 1, 2)
figs.append_trace(go.Scatter(x=thr_prre, y=pre, name="Precision",
marker=mark_prop1), 2, 1)
figs.append_trace(go.Scatter(x=thr_prre, y=rec, name="Recall",
marker=mark_prop2), 2, 1)
figs.append_trace(go.Scatter(x=fpr, y=tpr, showlegend=False), 2, 2)
figs.update_xaxes(title_text="threshold", row=2, col=1)
figs.update_xaxes(title_text="false positive rate", row=2, col=2)
figs.update_yaxes(title_text="true positive rate", row=2, col=2)
plotly.offline.plot(figs, filename="temp.html", auto_open=True)
return cm, thr_prre, pre, rec, thr_roc, fpr, tpr
def file_classification(input_file, model_name, model_type):
# Load classifier:
if not os.path.isfile(model_name):
print("fileClassification: input model_name not found!")
return -1, -1, -1
if isinstance(input_file, str) and not os.path.isfile(input_file):
print("fileClassification: wav file not found!")
return -1, -1, -1
if model_type == 'knn':
classifier, mean, std, classes, mid_window, mid_step, short_window, \
short_step, compute_beat = load_model_knn(model_name)
else:
classifier, mean, std, classes, mid_window, mid_step, short_window, \
short_step, compute_beat = load_model(model_name)
# read audio file and convert to mono
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
signal = audioBasicIO.stereo_to_mono(signal)
if sampling_rate == 0:
# audio file IO problem
return -1, -1, -1
if signal.shape[0] / float(sampling_rate) < mid_window:
mid_window = signal.shape[0] / float(sampling_rate)
# feature extraction:
mid_features, s, _ = \
aF.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * short_window),
round(sampling_rate * short_step))
# long term averaging of mid-term statistics
mid_features = mid_features.mean(axis=1)
if compute_beat:
beat, beat_conf = aF.beat_extraction(s, short_step)
mid_features = np.append(mid_features, beat)
mid_features = np.append(mid_features, beat_conf)
feature_vector = (mid_features - mean) / std # normalization
# classification
class_id, probability = classifier_wrapper(classifier, model_type,
feature_vector)
return class_id, probability, classes
def file_regression(input_file, model_name, model_type):
# Load classifier:
if not os.path.isfile(input_file):
print("fileClassification: wav file not found!")
return -1, -1, -1
regression_models = glob.glob(model_name + "_*")
regression_models2 = []
for r in regression_models:
if r[-5::] != "MEANS":
regression_models2.append(r)
regression_models = regression_models2
regression_names = []
for r in regression_models:
regression_names.append(r[r.rfind("_")+1::])
# FEATURE EXTRACTION
# LOAD ONLY THE FIRST MODEL (for mt_win, etc)
if model_type == 'svm' or model_type == "svm_rbf" or \
model_type == 'randomforest':
_, _, _, mid_window, mid_step, short_window, short_step, compute_beat \
= load_model(regression_models[0], True)
# read audio file and convert to mono
samping_rate, signal = audioBasicIO.read_audio_file(input_file)
signal = audioBasicIO.stereo_to_mono(signal)
# feature extraction:
mid_features, s, _ = \
aF.mid_feature_extraction(signal, samping_rate, mid_window * samping_rate,
mid_step * samping_rate,
round(samping_rate * short_window),
round(samping_rate * short_step))
# long term averaging of mid-term statistics
mid_features = mid_features.mean(axis=1)
if compute_beat:
beat, beat_conf = aF.beat_extraction(s, short_step)
mid_features = np.append(mid_features, beat)
mid_features = np.append(mid_features, beat_conf)
# REGRESSION
R = []
for ir, r in enumerate(regression_models):
if not os.path.isfile(r):
print("fileClassification: input model_name not found!")
return (-1, -1, -1)
if model_type == 'svm' or model_type == "svm_rbf" \
or model_type == 'randomforest':
model, mean, std, _, _, _, _, _ = load_model(r, True)
curFV = (mid_features - mean) / std # normalization
# classification
R.append(regression_wrapper(model, model_type, curFV))
return R, regression_names
def lda(data, labels, red_dim):
# Centre data
data -= data.mean(axis=0)
n_data = np.shape(data)[0]
n_dim = np.shape(data)[1]
Sw = np.zeros((n_dim, n_dim))
C = np.cov((data.T))
# Loop over classes
classes = np.unique(labels)
for i in range(len(classes)):
# Find relevant datapoints
indices = (np.where(labels == classes[i]))
d = np.squeeze(data[indices, :])
classcov = np.cov((d.T))
Sw += float(np.shape(indices)[0])/n_data * classcov
Sb = C - Sw
# Now solve for W
# Compute eigenvalues, eigenvectors and sort into order
evals, evecs = la.eig(Sw, Sb)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
w = evecs[:, :red_dim]
new_data = np.dot(data, w)
return new_data, w
def train_speaker_models():
"""
This script is used to train the speaker-related models
(NOTE: data paths are hard-coded and NOT included in the library,
the models are, however included)
import audioTrainTest as aT
aT.trainSpeakerModelsScript()
"""
mt_win = 2.0
mt_step = 2.0
st_win = 0.020
st_step = 0.020
dir_name = "DIARIZATION_ALL/all"
list_of_dirs = [os.path.join(dir_name, name)
for name in os.listdir(dir_name)
if os.path.isdir(os.path.join(dir_name, name))]
extract_features_and_train(list_of_dirs, mt_win, mt_step, st_win, st_step,
"knn", "data/knnSpeakerAll",
compute_beat=False, train_percentage=0.50)
dir_name = "DIARIZATION_ALL/female_male"
list_of_dirs = [os.path.join(dir_name, name)
for name in os.listdir(dir_name)
if os.path.isdir(os.path.join(dir_name, name))]
extract_features_and_train(list_of_dirs, mt_win, mt_step, st_win, st_step,
"knn", "data/knnSpeakerFemaleMale",
compute_beat=False, train_percentage=0.50)
def main(argv):
return 0
if __name__ == '__main__':
main(sys.argv)
|
|
"""
Tests for Chandrasekhar recursions
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
import pandas as pd
from .results import results_varmax
from statsmodels.tsa.statespace import sarimax, varmax
from statsmodels.tsa.statespace.kalman_filter import (
MEMORY_CONSERVE, MEMORY_NO_LIKELIHOOD)
from numpy.testing import assert_allclose
import pytest
def check_output(res_chand, res_orig, memory_conserve=False):
# Test loglike
params = res_orig.params
assert_allclose(res_chand.llf, res_orig.llf)
assert_allclose(res_chand.model.score_obs(params),
res_orig.model.score_obs(params), atol=1e-10)
# Test state space representation matrices
for name in res_chand.model.ssm.shapes:
if name == 'obs':
continue
assert_allclose(getattr(res_chand.filter_results, name),
getattr(res_orig.filter_results, name))
# Test filter / smoother output
filter_attr = ['predicted_state', 'filtered_state', 'forecasts',
'forecasts_error']
# Can only check kalman gain if we didn't use memory conservation
if not memory_conserve:
filter_attr += ['kalman_gain']
for name in filter_attr:
actual = getattr(res_chand.filter_results, name)
desired = getattr(res_orig.filter_results, name)
assert_allclose(actual, desired, atol=1e-12)
filter_attr_burn = ['predicted_state_cov', 'filtered_state_cov']
# Can only check kalman gain if we didn't use memory conservation
if not memory_conserve:
filter_attr += ['standardized_forecasts_error', 'tmp1', 'tmp2', 'tmp3',
'tmp4']
for name in filter_attr_burn:
actual = getattr(res_chand.filter_results, name)
desired = getattr(res_orig.filter_results, name)
assert_allclose(actual, desired, atol=1e-12)
if not memory_conserve:
smoothed_attr = ['smoothed_state', 'smoothed_state_cov',
'smoothed_state_autocov',
'smoothed_state_disturbance',
'smoothed_state_disturbance_cov',
'smoothed_measurement_disturbance',
'smoothed_measurement_disturbance_cov',
'scaled_smoothed_estimator',
'scaled_smoothed_estimator_cov', 'smoothing_error',
'smoothed_forecasts', 'smoothed_forecasts_error',
'smoothed_forecasts_error_cov']
for name in smoothed_attr:
actual = getattr(res_chand.filter_results, name)
desired = getattr(res_orig.filter_results, name)
assert_allclose(actual, desired, atol=1e-12)
# Test prediction output
nobs = res_chand.model.nobs
if not memory_conserve:
pred_chand = res_chand.get_prediction(start=10, end=nobs + 50,
dynamic=40)
pred_orig = res_chand.get_prediction(start=10, end=nobs + 50,
dynamic=40)
else:
# In the memory conservation case, we can't do dynamic prediction
pred_chand = res_chand.get_prediction(start=10, end=nobs + 50)
pred_orig = res_chand.get_prediction(start=10, end=nobs + 50)
assert_allclose(pred_chand.predicted_mean, pred_orig.predicted_mean)
assert_allclose(pred_chand.se_mean, pred_orig.se_mean)
def check_univariate_chandrasekhar(filter_univariate=False, **kwargs):
# Test that Chandrasekhar recursions don't change the output
index = pd.date_range('1960-01-01', '1982-10-01', freq='QS')
dta = pd.DataFrame(results_varmax.lutkepohl_data,
columns=['inv', 'inc', 'consump'], index=index)
endog = np.log(dta['inv']).diff().loc['1960-04-01':'1978-10-01']
mod_orig = sarimax.SARIMAX(endog, **kwargs)
mod_chand = sarimax.SARIMAX(endog, **kwargs)
mod_chand.ssm.filter_chandrasekhar = True
params = mod_orig.start_params
mod_orig.ssm.filter_univariate = filter_univariate
mod_chand.ssm.filter_univariate = filter_univariate
res_chand = mod_chand.smooth(params)
# Non-oncentrated model smoothing
res_orig = mod_orig.smooth(params)
check_output(res_chand, res_orig)
def check_multivariate_chandrasekhar(filter_univariate=False,
gen_obs_cov=False, memory_conserve=False,
**kwargs):
# Test that Chandrasekhar recursions don't change the output
index = pd.date_range('1960-01-01', '1982-10-01', freq='QS')
dta = pd.DataFrame(results_varmax.lutkepohl_data,
columns=['inv', 'inc', 'consump'], index=index)
dta['dln_inv'] = np.log(dta['inv']).diff()
dta['dln_inc'] = np.log(dta['inc']).diff()
dta['dln_consump'] = np.log(dta['consump']).diff()
endog = dta.loc['1960-04-01':'1978-10-01', ['dln_inv', 'dln_inc']]
mod_orig = varmax.VARMAX(endog, **kwargs)
mod_chand = varmax.VARMAX(endog, **kwargs)
mod_chand.ssm.filter_chandrasekhar = True
params = mod_orig.start_params
mod_orig.ssm.filter_univariate = filter_univariate
mod_chand.ssm.filter_univariate = filter_univariate
if gen_obs_cov:
mod_orig['obs_cov'] = np.array([[1., 0.5],
[0.5, 1.]])
mod_chand['obs_cov'] = np.array([[1., 0.5],
[0.5, 1.]])
if memory_conserve:
mod_orig.ssm.set_conserve_memory(
MEMORY_CONSERVE & ~ MEMORY_NO_LIKELIHOOD)
mod_chand.ssm.set_conserve_memory(
MEMORY_CONSERVE & ~ MEMORY_NO_LIKELIHOOD)
res_chand = mod_chand.filter(params)
res_orig = mod_orig.filter(params)
else:
res_chand = mod_chand.smooth(params)
res_orig = mod_orig.smooth(params)
check_output(res_chand, res_orig, memory_conserve=memory_conserve)
def test_chandrasekhar_conventional():
check_univariate_chandrasekhar(filter_univariate=False)
check_univariate_chandrasekhar(filter_univariate=False,
concentrate_scale=True)
check_multivariate_chandrasekhar(filter_univariate=False)
check_multivariate_chandrasekhar(filter_univariate=False,
measurement_error=True)
check_multivariate_chandrasekhar(filter_univariate=False,
error_cov_type='diagonal')
check_multivariate_chandrasekhar(filter_univariate=False,
gen_obs_cov=True)
check_multivariate_chandrasekhar(filter_univariate=False,
gen_obs_cov=True, memory_conserve=True)
def test_chandrasekhar_univariate():
check_univariate_chandrasekhar(filter_univariate=True)
check_univariate_chandrasekhar(filter_univariate=True,
concentrate_scale=True)
check_multivariate_chandrasekhar(filter_univariate=True)
check_multivariate_chandrasekhar(filter_univariate=True,
measurement_error=True)
check_multivariate_chandrasekhar(filter_univariate=True,
error_cov_type='diagonal')
check_multivariate_chandrasekhar(filter_univariate=True,
gen_obs_cov=True)
check_multivariate_chandrasekhar(filter_univariate=True,
gen_obs_cov=True, memory_conserve=True)
def test_invalid():
# Tests that trying to use the Chandrasekhar recursions in invalid
# situations raises an error
# Missing values
endog = np.zeros(10)
endog[1] = np.nan
mod = sarimax.SARIMAX(endog)
mod.ssm.filter_chandrasekhar = True
with pytest.raises(RuntimeError, match=('Cannot use Chandrasekhar'
' recursions with missing data.')):
mod.filter([0.5, 1.0])
# Alternative timing
endog = np.zeros(10)
mod = sarimax.SARIMAX(endog)
mod.ssm.filter_chandrasekhar = True
mod.ssm.timing_init_filtered = True
with pytest.raises(RuntimeError, match=('Cannot use Chandrasekhar'
' recursions with filtered'
' timing.')):
mod.filter([0.5, 1.0])
# Time-varying matrices
endog = np.zeros(10)
mod = sarimax.SARIMAX(endog)
mod.ssm.filter_chandrasekhar = True
mod['obs_cov'] = np.ones((1, 1, 10))
with pytest.raises(RuntimeError, match=('Cannot use Chandrasekhar'
' recursions with time-varying'
r' system matrices \(except for'
r' intercept terms\).')):
mod.filter([0.5, 1.0])
|
|
import numpy as np
from pymoab import core
from pymoab import types
from pymoab import topo_util
class StructuredMultiscaleMesh:
""" Defines a structured multiscale mesh representation.
Parameters
----------
coarse_ratio: List or array of integers
List or array containing three values indicating the coarsening ratio
of the mesh in x, y and z.
mesh_size: List or array of integers
List or array containing three values indicating the mesh size
(number of fine elements) of the mesh in x, y and z.
block_size List o array of floats
List or array containing three values indicating the constant
increments of vertex coordinates in x, y and z.
"""
def __init__(self, coarse_ratio, mesh_size, block_size):
self.coarse_ratio = coarse_ratio
self.mesh_size = mesh_size
self.block_size = block_size
self.verts = None # Array containing MOAB vertex entities
self.elems = [] # List containing MOAB volume entities
self.primals = {} # Mapping from tuples (idx, idy, idz) to Meshsets
self.primal_ids = []
self.primal_centroid_ijk = {}
self.primal_adj = {}
# MOAB boilerplate
# self.mb = core.Core()
# self.root_set = self.mb.get_root_set()
# self.mesh_topo_util = topo_util.MeshTopoUtil(self.mb)
def set_moab(self, moab):
self.mb = moab
def calculate_primal_ids(self):
for dim in range(0, 3):
self.primal_ids.append(
[i // (self.coarse_ratio[dim]) for i in range(
self.mesh_size[dim])])
new_primal = []
for dim in range(0, 3):
new_primal.append(
self.primal_ids[dim][(
self.mesh_size[dim] // self.coarse_ratio[dim]) *
self.coarse_ratio[dim]:])
if len(new_primal[dim]) < (self.mesh_size[dim] // 2):
new_primal[dim] = np.repeat(
max(self.primal_ids[dim])-1, len(new_primal[dim])).tolist()
self.primal_ids[dim] = (
self.primal_ids[dim]
[:self.mesh_size[dim] //
self.coarse_ratio[dim] *
self.coarse_ratio[dim]]+new_primal[dim])
def create_fine_vertices(self):
max_mesh_size = max(
self.mesh_size[2]*self.block_size[2],
self.mesh_size[1]*self.block_size[1],
self.mesh_size[0]*self.block_size[0])
coords = np.array([(i, j, k)
for k in (
np.arange(
self.mesh_size[2]+1, dtype='float64') *
self.block_size[2]/max_mesh_size)
for j in (
np.arange(
self.mesh_size[1]+1, dtype='float64') *
self.block_size[1]/max_mesh_size)
for i in (
np.arange(
self.mesh_size[0]+1, dtype='float64') *
self.block_size[0]/max_mesh_size)
], dtype='float64')
self.verts = self.mb.create_vertices(coords.flatten())
def create_tags(self):
self.gid_tag = self.mb.tag_get_handle(
"GLOBAL_ID", 1, types.MB_TYPE_INTEGER, types.MB_TAG_DENSE, True)
self.primal_id_tag = self.mb.tag_get_handle(
"PRIMAL_ID", 1, types.MB_TYPE_INTEGER, types.MB_TAG_SPARSE, True)
self.fine_to_primal_tag = self.mb.tag_get_handle(
"FINE_TO_PRIMAL", 1, types.MB_TYPE_HANDLE,
types.MB_TAG_SPARSE, True)
self.primal_adj_tag = self.mb.tag_get_handle(
"PRIMAL_ADJ", 1, types.MB_TYPE_HANDLE,
types.MB_TAG_SPARSE, True)
self.collocation_point_tag = self.mb.tag_get_handle(
"COLLOCATION_POINT", 1, types.MB_TYPE_HANDLE,
types.MB_TAG_SPARSE, True)
def _create_hexa(self, i, j, k):
# TODO: Refactor this
hexa = [self.verts[(i)+(j*(self.mesh_size[0]+1))+(k*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i, j, k)
self.verts[(i+1)+(j*(self.mesh_size[0]+1))+(k*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i+1, j, k)
self.verts[(i+1)+(j+1)*(self.mesh_size[0])+(j+1)+(k*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i+1, j+1, k)
self.verts[(i)+(j+1)*(self.mesh_size[0])+(j+1)+(k*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i, j+1, k)
self.verts[(i)+(j*(self.mesh_size[0]+1))+((k+1)*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i, j, k+1)
self.verts[(i+1)+(j*(self.mesh_size[0]+1))+((k+1)*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i+1, j, k+1)
self.verts[(i+1)+(j+1)*(self.mesh_size[0])+(j+1)+((k+1)*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))], # (i+1, j+1, k+1)
self.verts[(i)+(j+1)*(self.mesh_size[0])+(j+1)+((k+1)*((self.mesh_size[0]+1)*(self.mesh_size[1]+1)))]] # (i, j+1, k+1)
return hexa
def create_fine_blocks_and_primal(self):
cur_id = 0
# Create fine grid
for k, idz in zip(range(self.mesh_size[2]),
self.primal_ids[2]):
print("{0} / {1}".format(k, self.mesh_size[2]))
for j, idy in zip(range(self.mesh_size[1]),
self.primal_ids[1]):
for i, idx in zip(range(self.mesh_size[0]),
self.primal_ids[0]):
hexa = self._create_hexa(i, j, k)
el = self.mb.create_element(types.MBHEX, hexa)
self.mb.tag_set_data(self.gid_tag, el, cur_id)
cur_id += 1
self.elems.append(el)
# Create primal coarse grid
try:
primal = self.primals[(idx, idy, idz)]
self.mb.add_entities(primal, [el])
self.mb.tag_set_data(
self.fine_to_primal_tag, el, primal)
except KeyError:
primal = self.mb.create_meshset()
self.primals[(idx, idy, idz)] = primal
self.mb.add_entities(primal, [el])
self.mb.tag_set_data(
self.fine_to_primal_tag, el, primal)
primal_id = 0
for primal in self.primals.values():
self.mb.tag_set_data(self.primal_id_tag, primal, primal_id)
primal_id += 1
def store_primal_adj(self):
min_coarse_ids = np.array([0, 0, 0])
max_coarse_ids = np.array([max(self.primal_ids[0]),
max(self.primal_ids[1]),
max(self.primal_ids[2])])
for primal_id, primal in self.primals.items():
adj = self.mb.create_meshset()
adj_ids = []
for i in np.arange(-1, 2):
for j in np.arange(-1, 2):
for k in np.arange(-1, 2):
coord_inc = np.array([i, j, k])
adj_id = primal_id + coord_inc
if any(adj_id != primal_id) and \
(sum(coord_inc == [0, 0, 0]) == 2) and \
all(adj_id >= min_coarse_ids) and \
all(adj_id <= max_coarse_ids):
self.mb.add_entities(
adj, [self.primals[tuple(adj_id)]])
adj_ids.append(tuple(adj_id))
self.mb.tag_set_data(self.primal_adj_tag, primal, adj)
self.primal_adj[primal_id] = adj_ids
def _primal_centroid(self, setid):
coarse_sums = np.array(
[[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]]
)
primal_centroid = (
(np.asarray(setid) + coarse_sums[0]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[1]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[2]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[3]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[4]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[5]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[6]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]) +
(np.asarray(setid) + coarse_sums[7]) *
np.array([self.coarse_ratio[0],
self.coarse_ratio[1],
self.coarse_ratio[2]]))
primal_centroid = primal_centroid // 8
return primal_centroid
def _get_block_by_ijk(self, i, j, k, n_i, n_j):
"""
Track down the block from its (i,j,k) position.
"""
block = (k)*n_i*n_j+((i)+(j)*n_i)
return block
def _get_elem_by_ijk(self, ijk):
block_id = self._get_block_by_ijk(
ijk[0], ijk[1], ijk[2], self.mesh_size[0], self.mesh_size[1])
elem = self.elems[block_id]
return elem
def _generate_sector_bounding_box(self, primal_id, sector):
bbox = []
for sector_primal in sector:
try:
bbox.append(
self.primal_centroid_ijk[tuple(primal_id - sector_primal)])
except KeyError:
pass
return np.array(bbox)
def _get_bbox_limit_coords(self, bbox):
# Max coords is +1 so that it's possible to do a
# np.arange(min_coords, max_coords) directly and INCLUDE the last coord
max_coords = np.array(
[bbox[:, 0].max(), bbox[:, 1].max(), bbox[:, 2].max()]) + 1
min_coords = np.array(
[bbox[:, 0].min(), bbox[:, 1].min(), bbox[:, 2].min()])
return [max_coords, min_coords]
def _generate_dual_faces(self, bbox):
max_coords, min_coords = self._get_bbox_limit_coords(bbox)
faces_sets = []
for idx in (min_coords[0], max_coords[0]-1):
face_set = self.mb.create_meshset()
faces_sets.append(face_set)
for idy in np.arange(min_coords[1], max_coords[1]):
for idz in np.arange(min_coords[2], max_coords[2]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(face_set, [elem])
# Generate edges
for idy in (min_coords[1], max_coords[1]-1):
edge_set = self.mb.create_meshset()
self.mb.add_child_meshset(face_set, edge_set)
for idz in np.arange(min_coords[2], max_coords[2]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(edge_set, [elem])
# Generate vertices
for idz in (min_coords[2], max_coords[2]-1):
vertex_set = self.mb.create_meshset()
self.mb.add_child_meshset(edge_set, vertex_set)
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(vertex_set, [elem])
for idz in (min_coords[2], max_coords[2]-1):
edge_set = self.mb.create_meshset()
self.mb.add_child_meshset(face_set, edge_set)
for idy in np.arange(min_coords[1], max_coords[1]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(edge_set, [elem])
# Generate vertices
for idy in (min_coords[1], max_coords[1]-1):
vertex_set = self.mb.create_meshset()
self.mb.add_child_meshset(edge_set, vertex_set)
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(vertex_set, [elem])
for idy in (min_coords[1], max_coords[1]-1):
face_set = self.mb.create_meshset()
faces_sets.append(face_set)
for idx in np.arange(min_coords[0], max_coords[0]):
for idz in np.arange(min_coords[2], max_coords[2]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(face_set, [elem])
# Generate edges
for idx in (min_coords[0], max_coords[0]-1):
edge_set = self.mb.create_meshset()
self.mb.add_child_meshset(face_set, edge_set)
for idz in np.arange(min_coords[2], max_coords[2]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(edge_set, [elem])
# Generate vertices
for idz in (min_coords[2], max_coords[2]-1):
vertex_set = self.mb.create_meshset()
self.mb.add_child_meshset(edge_set, vertex_set)
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(vertex_set, [elem])
for idz in (min_coords[2], max_coords[2]-1):
edge_set = self.mb.create_meshset()
self.mb.add_child_meshset(face_set, edge_set)
for idx in np.arange(min_coords[0], max_coords[0]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(edge_set, [elem])
# Generate vertices
for idx in (min_coords[0], max_coords[0]-1):
vertex_set = self.mb.create_meshset()
self.mb.add_child_meshset(edge_set, vertex_set)
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(vertex_set, [elem])
for idz in (min_coords[2], max_coords[2]-1):
face_set = self.mb.create_meshset()
faces_sets.append(face_set)
for idx in np.arange(min_coords[0], max_coords[0]):
for idy in np.arange(min_coords[1], max_coords[1]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(face_set, [elem])
# Generate edges
for idx in (min_coords[0], max_coords[0]-1):
edge_set = self.mb.create_meshset()
self.mb.add_child_meshset(face_set, edge_set)
for idy in np.arange(min_coords[1], max_coords[1]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(edge_set, [elem])
# Generate vertices
for idy in (min_coords[1], max_coords[1]-1):
vertex_set = self.mb.create_meshset()
self.mb.add_child_meshset(edge_set, vertex_set)
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(vertex_set, [elem])
for idy in (min_coords[1], max_coords[1]-1):
edge_set = self.mb.create_meshset()
self.mb.add_child_meshset(face_set, edge_set)
for idx in np.arange(min_coords[0], max_coords[0]):
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(edge_set, [elem])
# Generate vertices
for idx in (min_coords[0], max_coords[0]-1):
vertex_set = self.mb.create_meshset()
self.mb.add_child_meshset(edge_set, vertex_set)
elem = self._get_elem_by_ijk((idx, idy, idz))
self.mb.add_entities(vertex_set, [elem])
return faces_sets
def _generate_dual_volume(self, bbox):
max_coords, min_coords = self._get_bbox_limit_coords(bbox)
dual_volume_set = self.mb.create_meshset()
for fine_block_i in np.arange(min_coords[0], max_coords[0]):
for fine_block_j in np.arange(min_coords[1], max_coords[1]):
for fine_block_k in np.arange(min_coords[2], max_coords[2]):
fine_block_ijk = (fine_block_i, fine_block_j, fine_block_k)
elem = self._get_elem_by_ijk(fine_block_ijk)
self.mb.add_entities(dual_volume_set, [elem])
for face_set in self._generate_dual_faces(bbox):
self.mb.add_child_meshset(dual_volume_set, face_set)
return dual_volume_set
def generate_dual(self):
min_coarse_ids = np.array([0, 0, 0])
max_coarse_ids = np.array([max(self.primal_ids[0]),
max(self.primal_ids[1]),
max(self.primal_ids[2])])
i = 0
for primal_id, primal in self.primals.items():
print("{0} / {1}".format(i, len(self.primals.keys())))
i += 1
# Generate dual corners (or primal centroids)
if all(np.array(primal_id) != min_coarse_ids) and \
all(np.array(primal_id) != max_coarse_ids):
primal_centroid = self._primal_centroid(primal_id)
else:
primal_centroid = self._primal_centroid(primal_id)
for dim in range(0, 3):
if primal_id[dim] in (0, max_coarse_ids[dim]):
multiplier = 1 if primal_id[dim] != 0 else 0
primal_centroid[dim] = (multiplier *
(self.mesh_size[dim]-1))
self.primal_centroid_ijk[primal_id] = primal_centroid
# There are up to eight sectors that include each primal
primal_adjs_sectors = np.array([
# First sector
[[0, 0, 0], [0, 1, 0], [-1, 1, 0], [-1, 0, 0],
[0, 0, 1], [0, 1, 1], [-1, 1, 1], [-1, 0, 1]],
# Second sector
[[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0],
[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]],
# Third sector
[[0, 0, 0], [0, -1, 0], [-1, -1, 0], [-1, 0, 0],
[0, 0, 1], [0, -1, 1], [-1, -1, 1], [-1, 0, 1]],
# Fourth sector
[[0, 0, 0], [0, -1, 0], [1, -1, 0], [1, 0, 0],
[0, 0, 1], [0, -1, 1], [1, -1, 1], [1, 0, 1]],
# Now the same for the bottom-most sectors
[[0, 0, 0], [0, 1, 0], [-1, 1, 0], [-1, 0, 0],
[0, 0, -1], [0, 1, -1], [-1, 1, -1], [-1, 0, -1]],
[[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0],
[0, 0, -1], [0, 1, -1], [1, 1, -1], [1, 0, -1]],
[[0, 0, 0], [0, -1, 0], [-1, -1, 0], [-1, 0, 0],
[0, 0, -1], [0, -1, -1], [-1, -1, -1], [-1, 0, -1]],
[[0, 0, 0], [0, -1, 0], [1, -1, 0], [1, 0, 0],
[0, 0, -1], [0, -1, -1], [1, -1, -1], [1, 0, -1]],
])
i = 0
for primal_id, primal in self.primals.items():
print("{0} / {1}".format(i, len(self.primals.keys())))
i += 1
collocation_point = self._get_elem_by_ijk(
self.primal_centroid_ijk[primal_id])
collocation_point_root_ms = self.mb.create_meshset()
self.mb.add_entities(
collocation_point_root_ms, [collocation_point])
for sector in primal_adjs_sectors:
bbox = self._generate_sector_bounding_box(primal_id, sector)
# Check if the sector's bounding box has 8 points (this may
# not be the case for all sectors of a corner collocation
# point)
if len(bbox) != 8:
continue
volume_set = self._generate_dual_volume(bbox)
self.mb.add_child_meshset(
collocation_point_root_ms, volume_set)
self.mb.tag_set_data(
self.collocation_point_tag,
collocation_point_root_ms,
collocation_point)
|
|
## @file
# This file is used to define common parser functions for meta-data
#
# Copyright (c) 2008 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
import Common.LongFilePathOs as os
from CommonDataClass.DataClass import *
from EccToolError import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
import EccGlobalData
import re
## Get the inlcude path list for a source file
#
# 1. Find the source file belongs to which inf file
# 2. Find the inf's package
# 3. Return the include path list of the package
#
def GetIncludeListOfFile(WorkSpace, Filepath, Db):
IncludeList = []
Filepath = os.path.normpath(Filepath)
SqlCommand = """
select Value1, FullPath from Inf, File where Inf.Model = %s and Inf.BelongsToFile in(
select distinct B.BelongsToFile from File as A left join Inf as B
where A.ID = B.BelongsToFile and B.Model = %s and (A.Path || '%s' || B.Value1) = '%s')
and Inf.BelongsToFile = File.ID""" \
% (MODEL_META_DATA_PACKAGE, MODEL_EFI_SOURCE_FILE, '\\', Filepath)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
DecFullPath = os.path.normpath(mws.join(WorkSpace, Record[0]))
InfFullPath = os.path.normpath(mws.join(WorkSpace, Record[1]))
(DecPath, DecName) = os.path.split(DecFullPath)
(InfPath, InfName) = os.path.split(InfFullPath)
SqlCommand = """select Value1 from Dec where BelongsToFile =
(select ID from File where FullPath = '%s') and Model = %s""" \
% (DecFullPath, MODEL_EFI_INCLUDE)
NewRecordSet = Db.TblDec.Exec(SqlCommand)
if InfPath not in IncludeList:
IncludeList.append(InfPath)
for NewRecord in NewRecordSet:
IncludePath = os.path.normpath(os.path.join(DecPath, NewRecord[0]))
if IncludePath not in IncludeList:
IncludeList.append(IncludePath)
return IncludeList
## Get the file list
#
# Search table file and find all specific type files
#
def GetFileList(FileModel, Db):
FileList = []
SqlCommand = """select FullPath from File where Model = %s""" % str(FileModel)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
FileList.append(Record[0])
return FileList
## Get the table list
#
# Search table file and find all small tables
#
def GetTableList(FileModelList, Table, Db):
TableList = []
SqlCommand = """select ID from File where Model in %s""" % str(FileModelList)
RecordSet = Db.TblFile.Exec(SqlCommand)
for Record in RecordSet:
TableName = Table + str(Record[0])
TableList.append(TableName)
return TableList
## ParseHeaderCommentSection
#
# Parse Header comment section lines, extract Abstract, Description, Copyright
# , License lines
#
# @param CommentList: List of (Comment, LineNumber)
# @param FileName: FileName of the comment
#
def ParseHeaderCommentSection(CommentList, FileName = None):
Abstract = ''
Description = ''
Copyright = ''
License = ''
EndOfLine = "\n"
STR_HEADER_COMMENT_START = "@file"
#
# used to indicate the state of processing header comment section of dec,
# inf files
#
HEADER_COMMENT_NOT_STARTED = -1
HEADER_COMMENT_STARTED = 0
HEADER_COMMENT_FILE = 1
HEADER_COMMENT_ABSTRACT = 2
HEADER_COMMENT_DESCRIPTION = 3
HEADER_COMMENT_COPYRIGHT = 4
HEADER_COMMENT_LICENSE = 5
HEADER_COMMENT_END = 6
#
# first find the last copyright line
#
Last = 0
HeaderCommentStage = HEADER_COMMENT_NOT_STARTED
for Index in xrange(len(CommentList)-1, 0, -1):
Line = CommentList[Index][0]
if _IsCopyrightLine(Line):
Last = Index
break
for Item in CommentList:
Line = Item[0]
LineNo = Item[1]
if not Line.startswith('#') and Line:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'Comment must start with #'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
Comment = CleanString2(Line)[1]
Comment = Comment.strip()
#
# if there are blank lines between License or Description, keep them as they would be
# indication of different block; or in the position that Abstract should be, also keep it
# as it indicates that no abstract
#
if not Comment and HeaderCommentStage not in [HEADER_COMMENT_LICENSE, \
HEADER_COMMENT_DESCRIPTION, HEADER_COMMENT_ABSTRACT]:
continue
if HeaderCommentStage == HEADER_COMMENT_NOT_STARTED:
if Comment.startswith(STR_HEADER_COMMENT_START):
HeaderCommentStage = HEADER_COMMENT_ABSTRACT
else:
License += Comment + EndOfLine
else:
if HeaderCommentStage == HEADER_COMMENT_ABSTRACT:
#
# in case there is no abstract and description
#
if not Comment:
Abstract = ''
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif _IsCopyrightLine(Comment):
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Abstract += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_DESCRIPTION
elif HeaderCommentStage == HEADER_COMMENT_DESCRIPTION:
#
# in case there is no description
#
if _IsCopyrightLine(Comment):
Copyright += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_COPYRIGHT
else:
Description += Comment + EndOfLine
elif HeaderCommentStage == HEADER_COMMENT_COPYRIGHT:
if _IsCopyrightLine(Comment):
Copyright += Comment + EndOfLine
else:
#
# Contents after copyright line are license, those non-copyright lines in between
# copyright line will be discarded
#
if LineNo > Last:
if License:
License += EndOfLine
License += Comment + EndOfLine
HeaderCommentStage = HEADER_COMMENT_LICENSE
else:
if not Comment and not License:
continue
License += Comment + EndOfLine
if not Copyright.strip():
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'Header comment section must have copyright information'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
if not License.strip():
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'Header comment section must have license information'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
if not Abstract.strip() or Abstract.find('Component description file') > -1:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FileName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'Header comment section must have Abstract information.'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
return Abstract.strip(), Description.strip(), Copyright.strip(), License.strip()
## _IsCopyrightLine
# check whether current line is copyright line, the criteria is whether there is case insensitive keyword "Copyright"
# followed by zero or more white space characters followed by a "(" character
#
# @param LineContent: the line need to be checked
# @return: True if current line is copyright line, False else
#
def _IsCopyrightLine (LineContent):
LineContent = LineContent.upper()
Result = False
ReIsCopyrightRe = re.compile(r"""(^|\s)COPYRIGHT *\(""", re.DOTALL)
if ReIsCopyrightRe.search(LineContent):
Result = True
return Result
## CleanString2
#
# Split comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content,
# default is DataType.TAB_COMMENT_SPLIT
#
def CleanString2(Line, CommentCharacter='#', AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip()
#
# Replace EDK1's comment character
#
if AllowCppStyleComment:
Line = Line.replace('//', CommentCharacter)
#
# separate comments and statements
#
LineParts = Line.split(CommentCharacter, 1)
#
# remove whitespace again
#
Line = LineParts[0].strip()
if len(LineParts) > 1:
Comment = LineParts[1].strip()
#
# Remove prefixed and trailing comment characters
#
Start = 0
End = len(Comment)
while Start < End and Comment.startswith(CommentCharacter, Start, End):
Start += 1
while End >= 0 and Comment.endswith(CommentCharacter, Start, End):
End -= 1
Comment = Comment[Start:End]
Comment = Comment.strip()
else:
Comment = ''
return Line, Comment
|
|
import json
import socket
import docker
import falcon
class CreateR(object):
"""
This endpoint is for creating a new filter
"""
def on_post(self, req, resp):
"""
Send a POST request with id/nic/interval/filter/iters and it will start
a container for collection with those specifications
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# verify payload is in the correct format
# default to no filter
payload = {}
if req.content_length:
try:
payload = json.load(req.stream)
except Exception as e: # pragma: no cover
resp.body = "(False, 'malformed payload')"
return
else:
resp.body = "(False, 'malformed payload')"
return
if 'filter' not in payload:
payload['filter'] = ''
# payload should have the following fields:
# - id
# - nic
# - interval
# - filter
# - iters
# should spin up a tcpdump container that writes out pcap files based
# on the filter needs to be attached to the nic specified, if iters is
# -1 then loops until killed, otherwise completes iters number of
# captures (and creates that many pcap files) should keep track of
# container id, container name, and id of filter and filter + whatever
# verify payload has necessary information
if 'nic' not in payload:
resp.body = "(False, 'payload missing nic')"
return
if 'id' not in payload:
resp.body = "(False, 'payload missing id')"
return
if 'interval' not in payload:
resp.body = "(False, 'payload missing interval')"
return
if 'iters' not in payload:
resp.body = "(False, 'payload missing iters')"
return
# connect to docker
c = None
try:
c = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# spin up container with payload specifications
if c:
tool_d = {'network': 'host',
'environment': ['PYTHONUNBUFFERED=1', 'rabbit=true', 'external_host=0.0.0.0'],
'volumes_from': [socket.gethostname()]}
cmd = '/tmp/run.sh ' + payload['nic'] + ' ' + payload['interval']
cmd += ' ' + payload['id'] + ' ' + payload['iters'] + ' "'
cmd += payload['filter'] + '"'
try:
container = c.containers.run(image='cyberreboot/vent-ncapture:master',
command=cmd, remove=True, detach=True, **tool_d)
resp.body = "(True, 'successfully created and started filter: " + \
str(payload['id']) + ' on container: ' + \
str(container.id) + "')"
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to start container because: " + str(e) + "')"
return
return
class DeleteR(object):
"""
This endpoint is for deleting a network tap filter container
"""
def on_post(self, req, resp):
"""
Send a POST request with a docker container ID and it will be deleted.
Example input: {'id': "12345"}, {'id': ["123", "456"]}
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# verify user input
payload = {}
if req.content_length:
try:
payload = json.load(req.stream)
except Exception as e: # pragma: no cover
resp.body = "(False, 'malformed payload')"
return
else:
resp.body = "(False, 'malformed payload')"
return
# verify payload has a container ID
if 'id' not in payload:
resp.body = "(False, 'payload missing id')"
return
# connect to docker and stop the given container
c = None
try:
c = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# delete containers chosen from CLI
try:
for container_id in payload['id']:
c.containers.get(container_id).remove()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to delete containers because: " + str(e) + "')"
return
resp.body = '(True, ' + str(payload['id']) + ')'
return
class InfoR(object):
"""
This endpoint is for returning info about this service
"""
def on_get(self, req, resp):
resp.body = json.dumps({'version': 'v0.1.0'})
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
return
class ListR(object):
"""
This endpoint is for listing all filter containers
"""
def on_get(self, req, resp):
"""
Send a GET request to get the list of all of the filter containers
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# connect to docker
try:
containers = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# search for all docker containers and grab ncapture containers
container_list = []
try:
for c in containers.containers.list(all=True):
# TODO: maybe find a way to not have to hard code image name
if c.attrs['Config']['Image'] == \
'cyberreboot/vent-ncapture:master':
# the core container is not what we want
if 'core' not in c.attrs['Config']['Labels']['vent.groups']:
lst = {}
lst['id'] = c.attrs['Id'][:12]
lst['status'] = c.attrs['State']['Status']
lst['args'] = c.attrs['Args']
container_list.append(lst)
except Exception as e: # pragma: no cover
resp.body = "(False, 'Failure because: " + str(e) + "')"
return
resp.body = json.dumps(container_list)
return
class NICsR(object):
"""
This endpoint is for listing all available network interfaces
"""
def on_get(self, req, resp):
"""
Send a GET request to get the list of all available network interfaces
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# connect to docker
try:
d_client = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# start container to get network interfaces
nics = ''
try:
nics = d_client.containers.run('cyberreboot/gonet',
network_mode='host', remove=True)
resp.body = '(True, ' + str(nics.id) + ')'
except Exception as e: # pragma: no cover
resp.body = "(False, 'Failure because: " + str(e) + "')"
return
return
class StartR(object):
"""
This endpoint is for starting a network tap filter container
"""
def on_post(self, req, resp):
"""
Send a POST request with a docker container ID and it will be started.
Example input: {'id': "12345"}, {'id': ["123", "456"]}
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# verify user input
payload = {}
if req.content_length:
try:
payload = json.load(req.stream)
except Exception as e: # pragma: no cover
resp.body = "(False, 'malformed payload')"
return
else:
resp.body = "(False, 'malformed payload')"
return
# verify payload has a container ID
if 'id' not in payload:
resp.body = "(False, 'payload missing container id')"
return
# connect to docker and stop the given container
c = None
try:
c = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# start containers chosen from CLI
try:
for container_id in payload['id']:
c.containers.get(container_id).start()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to start list of containers because: " + str(e) + "')"
return
resp.body = '(True, ' + str(payload['id']) + ')'
return
class StopR(object):
"""
This endpoint is for stopping a network tap filter container
"""
def on_post(self, req, resp):
"""
Send a POST request with a docker container ID and it will be stopped.
Example input: {'id': "12345"}, {'id': ["123", "456"]
"""
resp.content_type = falcon.MEDIA_TEXT
resp.status = falcon.HTTP_200
# verify user input
payload = {}
if req.content_length:
try:
payload = json.load(req.stream)
except Exception as e: # pragma: no cover
resp.body = "(False, 'malformed payload')"
return
else:
resp.body = "(False, 'malformed payload')"
return
# verify payload has a container ID
if 'id' not in payload:
resp.body = "(False, 'payload missing container id')"
return
# connect to docker and stop the given container
c = None
try:
c = docker.from_env()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to connect to docker because: " + str(e) + "')"
return
# stop containers chosen from CLI
try:
for container_id in payload['id']:
c.containers.get(container_id).stop()
except Exception as e: # pragma: no cover
resp.body = "(False, 'unable to stop list of containers because: " + str(e) + "')"
return
resp.body = '(True, ' + str(payload['id']) + ')'
return
|
|
import random
#I don't remember what these are doing...
card_color = lambda c: c[0]
card_value = lambda c: int(c[1])
tile_kind = lambda t: t[0]
tile_cost = lambda t: int(t[1],16)
tile_wall = lambda t: int(t[2],16) #1 = north, 2 = east, 4 = south, 8 = west
#'f' = fountain = starting tile, no scoring)
scoring = [{'f':[], 'p':[1], 's':[2], 'a':[3], 'c':[4], 'g':[5], 't':[6]},
{'f':[], 'p':[8,1], 's':[9,2], 'a':[10,3], 'c':[11,4], 'g':[12,5], 't':[13,6]},
{'f':[], 'p':[16,8,1], 's':[17,9,2], 'a':[18,10,3], 'c':[19,11,4], 'g':[10,12,5], 't':[21,13,6]}]
class Player:
def __init__(self, name):
self.name = name
self.score = 0
self.tiles = []
self.cards = []
def make_cost(self, cost, avail, used=None):
if not used: used = []
max_s = sum(avail)
if max_s < cost: return None #impossible
if max_s == cost: return avail #use all cards if necessary
#use single card if available
if cost in avail:
used.append(cost)
return used
#now we're looking for some combo of lower cost cards
avail = [v for v in avail if v<cost]
for i,v in enumerate(avail):
navail = avail[::]
del navail[i]
nused = used[::]
nused.append(v)
return self.make_cost(cost-v, navail, nused)
def take_turn(self, bought, table_cards):
taken = []
#buy exact if possible, and only exact
for (col,t),used in bought.iteritems():
cards_in_col = [card_value(c) for c in self.cards if card_color(c) == col]
used = self.make_cost(tile_cost(t), cards_in_col)
bought[(col,t)] = ["%s%d" % (col,c) for c in used] if used else []
#take new cards at random
taken_sum = 0
while table_cards:
next_card = random.choice(table_cards)
taken_sum += card_value(next_card)
if taken and taken_sum>5:
break
table_cards.remove(next_card)
taken.append(next_card)
return bought,taken
class Game:
def __init__(self, players):
self.p = players
for cp in self.p:
cp.tiles.append('f00')
#component setup
self.tiles = ['p2b', 'p3c', 'p46', 'p59', 'p61', 'p72', 'p80',
's3e', 's43', 's5c', 's66', 's78', 's84', 's90',
'a47', 'a59', 'a63', 'a6c', 'a76', 'a81', 'a82', 'a90', 'aa0',
'c5d', 'c66', 'c73', 'c7c', 'c89', 'c94', 'c98', 'ca0', 'cb0',
'g6e', 'g7d', 'g83', 'g8c', 'g89', 'g92', 'ga0', 'ga1', 'ga8', 'gb0', 'gc4',
't7b', 't87', 't96', 't93', 't99', 'ta8', 'tb1', 'tb4', 'tb0', 'tc0', 'td2']
random.shuffle(self.tiles)
self.cards = [c+str(i) for c in ['o','y','g','b'] for i in range(1,10) for j in range(3)]
random.shuffle(self.cards)
self.discard = []
self.table_tiles = dict((c,self.tiles.pop()) for c in ['o','y','g','b'])
#deal cards
low_sum, low_count = 1000, 25
self.turn = -1
for i,cp in enumerate(self.p):
cur_sum, cur_count = 0, 0
while cur_sum < 20:
cp.cards.append(self.cards.pop())
cur_sum += card_value(cp.cards[-1])
cur_count += 1
if cur_sum < low_sum or (cur_sum==low_sum and cur_count<low_count):
self.turn = i-1 #turn incremented at beginning of loop
low_sum, low_count = cur_sum, cur_count
self.table_cards = [self.cards.pop() for i in range(4)]
#insert None to represent scoring cards
n = len(self.cards)
self.cards.insert(random.randint(int(.2*n),int(.4*n)), None)
self.cards.insert(random.randint(int(.6*n),int(.8*n))+1, None)
self.play()
def score(self):
#point values for current round scoring
c_scoring = scoring[self.rnd-1]
#for each kind of building, count how many buildings each player has
counts = dict((k,[[0,i] for i in range(len(self.p))]) for k in c_scoring)
for i,cp in enumerate(self.p):
for t in cp.tiles:
counts[tile_kind(t)][i][0] += 1
#score one kind of building at a time
for k,pcount in counts.iteritems():
#i tracks how many players have been awarded points so far for this kind of building
i = 0
while i<len(c_scoring[k]):
ith_place = []
max_count = max(pcount)[0]
#no points if nobody has this kind of tile
if max_count == 0: break
#store indices for all players tied for ith place
for cp in pcount:
if cp[0] == max_count:
ith_place.append(cp[1])
cp[0] = 0 #if you score for 1st place you can't score again for 2nd place
#split the points among all tied for ith place, round down
points = sum(c_scoring[k][i:i+len(ith_place)])/len(ith_place)
for pi in ith_place:
self.p[pi].score += points
i += len(ith_place)
self.rnd += 1
def play(self):
self.rnd = 1
#take turns until a replacement tile can no longer be drawn
while self.tiles or None not in self.table_tiles.values():
self.turn += 1
cp = self.p[self.turn%len(self.p)]
#decide turn
bought,taken = cp.take_turn(dict(((k,v),[]) for k,v in self.table_tiles.iteritems()), self.table_cards[::])
#execute turn
for (col,tile),cards_used in bought.iteritems():
if self.table_tiles[col] <> tile:
print "ERROR! Tried to buy %s %s but the available tiles are %s" % (col,tile,table_tiles)
return
if cards_used:
for c in cards_used:
cp.cards.remove(c)
self.discard.append(c)
cp.tiles.append(self.table_tiles[col])
print "%s bought tile %s using %s" % (cp.name, self.table_tiles[col], cards_used)
self.table_tiles[col] = self.tiles.pop() if self.tiles else None
if len(taken) > 1 and sum(card_value(c) for c in taken) > 5:
print "ERROR! Tried to take %s, which add up to >5." % taken
return
for c in taken:
self.table_cards.remove(c)
cp.cards.append(c)
print "%s took card(s) %s" % (cp.name, taken)
#replenish table cards
while len(self.table_cards) < 4:
if not self.cards:
random.shuffle(self.discard)
self.cards += self.discard
self.discard = []
c = self.cards.pop()
if not c: #scoring card
self.score()
print "SCORING ROUND %d:" % (self.rnd-1)
for cp2 in self.p:
print " %s\t%d" % (cp2.name, cp2.score)
c = self.cards.pop()
self.table_cards.append(c)
#award remaining tiles to player with highest card sum in that color, no ties
for col,t in self.table_tiles.iteritems():
if not t: continue
player_sums = [sum(card_value(c) for c in cp.cards if card_color(c) == col) for cp in self.p]
max_sum = max(player_sums)
max_sum_players = [pi for pi,s in enumerate(player_sums) if s == max_sum]
if len(max_sum_players) == 1:
self.p[max_sum_players[0]].tiles.append(self.table_tiles[col])
print "%s won tile %s with a sum of %s %s" % (self.p[max_sum_players[0]].name, self.table_tiles[col], col, max_sum)
self.table_tiles[col] = None
self.score()
winning_score = max(cp.score for cp in self.p)
print "FINAL SCORES:"
for cp2 in self.p:
print "%s %s\t%d" % ('(W)' if cp2.score == winning_score else ' ', cp2.name, cp2.score)
for i in range(100):
g = Game([Player(p) for p in ['Tristan','Brian','Jason','Mark','Andrew','Kyle']])
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A script that adds claims to Wikidata items based on a list of pages.
------------------------------------------------------------------------------
Usage:
python pwb.py claimit [pagegenerators] P1 Q2 P123 Q456
You can use any typical pagegenerator (like categories) to provide with a
list of pages. Then list the property-->target pairs to add.
------------------------------------------------------------------------------
For geographic coordinates:
python pwb.py claimit [pagegenerators] P625 [lat-dec],[long-dec],[prec]
[lat-dec] and [long-dec] represent the latitude and longitude respectively,
and [prec] represents the precision. All values are in decimal degrees,
not DMS. If [prec] is omitted, the default precision is 0.0001 degrees.
Example:
python pwb.py claimit [pagegenerators] P625 -23.3991,-52.0910,0.0001
------------------------------------------------------------------------------
By default, claimit.py does not add a claim if one with the same property
already exists on the page. To override this behavior, use the 'exists' option:
python pwb.py claimit [pagegenerators] P246 "string example" -exists:p
Suppose the claim you want to add has the same property as an existing claim
and the "-exists:p" argument is used. Now, claimit.py will not add the claim
if it has the same target, source, and/or the existing claim has qualifiers.
To override this behavior, add 't' (target), 's' (sources), or 'q' (qualifiers)
to the 'exists' argument.
For instance, to add the claim to each page even if one with the same
property and target and some qualifiers already exists:
python pwb.py claimit [pagegenerators] P246 "string example" -exists:ptq
Note that the ordering of the letters in the 'exists' argument does not matter,
but 'p' must be included.
"""
#
# (C) Legoktm, 2013
# (C) Pywikibot team, 2013-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import pagegenerators, WikidataBot
# This is required for the text that is shown when you run this script
# with the parameter -help or without parameters.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class ClaimRobot(WikidataBot):
"""A bot to add Wikidata claims."""
def __init__(self, generator, claims, exists_arg=''):
"""
Constructor.
Arguments:
* generator - A generator that yields Page objects.
* claims - A list of wikidata claims
* exists_arg - String specifying how to handle duplicate claims
"""
self.availableOptions['always'] = True
super(ClaimRobot, self).__init__(use_from_page=None)
self.generator = generator
self.claims = claims
self.exists_arg = ''.join(x for x in exists_arg.lower() if x in 'pqst')
self.cacheSources()
if self.exists_arg:
pywikibot.output("'exists' argument set to '%s'" % self.exists_arg)
def treat(self, page, item):
"""Treat each page."""
self.current_page = page
# The generator might yield pages from multiple sites
source = self.getSource(page.site)
for claim in self.claims:
# Existing claims on page of same property
for existing in item.claims.get(claim.getID(), []):
# If claim with same property already exists...
if 'p' not in self.exists_arg:
pywikibot.log(
'Skipping %s because claim with same property already exists'
% (claim.getID(),))
pywikibot.log(
'Use -exists:p option to override this behavior')
break
# If some attribute of the claim being added
# matches some attribute in an existing claim of
# the same property, skip the claim, unless the
# 'exists' argument overrides it.
if (existing.target_equals(claim.getTarget()) and
't' not in self.exists_arg):
pywikibot.log(
'Skipping %s because claim with same target already exists'
% (claim.getID(),))
pywikibot.log(
"Append 't' to -exists argument to override this behavior")
break
if 'q' not in self.exists_arg and not existing.qualifiers:
pywikibot.log(
'Skipping %s because claim without qualifiers already exists'
% (claim.getID(),))
pywikibot.log(
"Append 'q' to -exists argument to override this behavior")
break
if ('s' not in self.exists_arg or not source) and not existing.sources:
pywikibot.log(
'Skipping %s because claim without source already exists'
% (claim.getID(),))
pywikibot.log(
"Append 's' to -exists argument to override this behavior")
break
if ('s' not in self.exists_arg and source and
any(source.getID() in ref and
all(snak.target_equals(source.getTarget())
for snak in ref[source.getID()])
for ref in existing.sources)):
pywikibot.log(
'Skipping %s because claim with the same source already exists'
% (claim.getID(),))
pywikibot.log(
"Append 's' to -exists argument to override this behavior")
break
else:
self.user_add_claim(item, claim, page.site)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
@rtype: bool
"""
exists_arg = ''
commandline_claims = list()
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
gen = pagegenerators.GeneratorFactory()
for arg in local_args:
# Handle args specifying how to handle duplicate claims
if arg.startswith('-exists:'):
exists_arg = arg.split(':')[1]
continue
# Handle page generator args
if gen.handleArg(arg):
continue
commandline_claims.append(arg)
if len(commandline_claims) % 2:
pywikibot.error('Incomplete command line property-value pair.')
return False
claims = list()
repo = pywikibot.Site().data_repository()
for i in range(0, len(commandline_claims), 2):
claim = pywikibot.Claim(repo, commandline_claims[i])
if claim.type == 'wikibase-item':
target = pywikibot.ItemPage(repo, commandline_claims[i + 1])
elif claim.type == 'string':
target = commandline_claims[i + 1]
elif claim.type == 'globe-coordinate':
coord_args = [float(c) for c in commandline_claims[i + 1].split(',')]
if len(coord_args) >= 3:
precision = coord_args[2]
else:
precision = 0.0001 # Default value (~10 m at equator)
target = pywikibot.Coordinate(coord_args[0], coord_args[1], precision=precision)
else:
raise NotImplementedError(
"%s datatype is not yet supported by claimit.py"
% claim.type)
claim.setTarget(target)
claims.append(claim)
generator = gen.getCombinedGenerator()
if not generator:
pywikibot.bot.suggest_help(missing_generator=True)
return False
bot = ClaimRobot(generator, claims, exists_arg)
bot.run()
return True
if __name__ == "__main__":
main()
|
|
"""
ui.turn.menu
Defines helper functions to build up the turn editor menu.
:author: Brandon Arrendondo
:license: MIT, see LICENSE.txt for more details.
"""
from PySide.QtGui import QAction
from PySide.QtGui import QActionGroup
def build_action_map(main_window):
action_map = {}
action_table = [
{
"action_name": '&New...',
"shortcut": 'Ctrl+N',
"status_tip": 'Create new game.',
"trigger": main_window.handle_new_game,
"map_name": 'new'
},
{
"action_name": '&Open...',
"shortcut": 'Ctrl+O',
"status_tip": 'Open a previously saved game.',
"trigger": main_window.handle_open_game,
"map_name": 'open'
},
{
"action_name": '&Save',
"shortcut": 'Ctrl+S',
"status_tip": 'Save the current game changes.',
"trigger": main_window.handle_save_game,
"map_name": 'save'
},
{
"action_name": 'Custom &Race Wizard...',
"shortcut": None,
"status_tip": 'Build a custom race.',
"trigger": main_window.handle_custom_race_wizard,
"map_name": 'custom_race'
},
{
"action_name": '&Close',
"shortcut": None,
"status_tip": 'Close the current game and return to the main \
screen.',
"trigger": main_window.handle_close_game,
"map_name": 'close'
},
{
"action_name": 'E&xit',
"shortcut": None,
"status_tip": 'Exit the game.',
"trigger": main_window.handle_exit_game,
"map_name": 'exit'
},
{
"action_name": '&Toolbar',
"shortcut": None,
"status_tip": 'Toggle the toolbar.',
"trigger": main_window.handle_toggle_toolbar,
"map_name": 'toggle_toolbar'
},
{
"action_name": '&Find...',
"shortcut": 'Ctrl+F',
"status_tip": 'Find a planet or fleet.',
"trigger": main_window.handle_find,
"map_name": 'find'
},
{
"action_name": '25%',
"shortcut": None,
"status_tip": 'Zoom the space window to 25%.',
"trigger": main_window.handle_zoom_25,
"map_name": 'zoom_25'
},
{
"action_name": '38%',
"shortcut": None,
"status_tip": 'Zoom the space window to 38%.',
"trigger": main_window.handle_zoom_38,
"map_name": 'zoom_38'
},
{
"action_name": '50%',
"shortcut": None,
"status_tip": 'Zoom the space window to 50%.',
"trigger": main_window.handle_zoom_50,
"map_name": 'zoom_50'
},
{
"action_name": '100%',
"shortcut": None,
"status_tip": 'Zoom the space window to 100%.',
"trigger": main_window.handle_zoom_100,
"map_name": 'zoom_100'
},
{
"action_name": '125%',
"shortcut": None,
"status_tip": 'Zoom the space window to 125%.',
"trigger": main_window.handle_zoom_125,
"map_name": 'zoom_125'
},
{
"action_name": '150%',
"shortcut": None,
"status_tip": 'Zoom the space window to 150%.',
"trigger": main_window.handle_zoom_150,
"map_name": 'zoom_150'
},
{
"action_name": '200%',
"shortcut": None,
"status_tip": 'Zoom the space window to 200%.',
"trigger": main_window.handle_zoom_200,
"map_name": 'zoom_200'
},
{
"action_name": '400%',
"shortcut": None,
"status_tip": 'Zoom the space window to 400%.',
"trigger": main_window.handle_zoom_400,
"map_name": 'zoom_400'
},
{
"action_name": "&Race...",
"shortcut": 'F8',
"status_tip": "View your current race.",
"trigger": main_window.handle_current_race_wizard,
"map_name": 'current_race'
},
{
"action_name": "&Game Parameters...",
"shortcut": None,
"status_tip": "View the current game paramters.",
"trigger": main_window.handle_view_game_parameters,
"map_name": "game_parameters"
},
{
"action_name": "&Generate",
"shortcut": "F9",
"status_tip": "Generate the next turn.",
"trigger": main_window.handle_generate,
"map_name": "generate"
},
{
"action_name": "&Ship Design...",
"shortcut": "F4",
"status_tip": "Build custom ship designs.",
"trigger": main_window.handle_ship_design,
"map_name": "ship_design"
},
{
"action_name": "&Research...",
"shortcut": "F5",
"status_tip": "Set research levels.",
"trigger": main_window.handle_research,
"map_name": "research"
},
{
"action_name": "&Battle Plans...",
"shortcut": "F6",
"status_tip": "Create battle orders for ships and fleets.",
"trigger": main_window.handle_battle_plans,
"map_name": "battle_plans"
},
{
"action_name": "&Planets...",
"shortcut": None,
"status_tip": "Show a report of colonized planets.",
"trigger": main_window.handle_planets_report,
"map_name": "planets"
},
{
"action_name": "&Fleets...",
"shortcut": "Show a report of your fleets.",
"status_tip": None,
"trigger": main_window.handle_fleets_report,
"map_name": "fleets"
},
{
"action_name": "&Other Fleets...",
"shortcut": None,
"status_tip": "Show a report of fleets that are not your own.",
"trigger": main_window.handle_other_fleets_report,
"map_name": "other_fleets"
},
{
"action_name": "&Battles...",
"shortcut": None,
"status_tip": "Show a report of battles you have been involved in \
this turn",
"trigger": main_window.handle_battle_report,
"map_name": "battles"
},
{
"action_name": "&Score...",
"shortcut": "F10",
"status_tip": "Show the current game score",
"trigger": main_window.handle_score,
"map_name": "score"
},
{
"action_name": "&Universe Information",
"shortcut": None,
"status_tip": "Write all known universe information to a file.",
"trigger": main_window.handle_dump_universe,
"map_name": "dump_universe"
},
{
"action_name": "&Planet Information",
"shortcut": None,
"status_tip": "Write all known planet information to a file.",
"trigger": main_window.handle_dump_planets,
"map_name": "dump_planets"
},
{
"action_name": "&Fleet Information",
"shortcut": None,
"status_tip": "Write all known fleet information to a file.",
"trigger": main_window.handle_dump_fleets,
"map_name": "dump_fleets"
},
{
"action_name": "&Introduction...",
"shortcut": None,
"status_tip": "Introduction to this game.",
"trigger": main_window.handle_introduction,
"map_name": "introduction"
},
{
"action_name": "&Player's Guide...",
"shortcut": "F1",
"status_tip": "The player's reference manual for this game.",
"trigger": main_window.handle_guide,
"map_name": "guide"
},
{
"action_name": "Technology &Browser...",
"shortcut": "F2",
"status_tip": "View components available via technology advances.",
"trigger": main_window.handle_tech_browser,
"map_name": "tech_browser"
},
{
"action_name": "&Tutorial",
"shortcut": None,
"status_tip": "Begin the tutorial.",
"trigger": main_window.handle_tutorial,
"map_name": "tutorial"
},
{
"action_name": "&About...",
"shortcut": None,
"status_tip": "View details of this game.",
"trigger": main_window.handle_about,
"map_name": "about"
}
]
for action in action_table:
built_action = build_action(main_window,
action["action_name"],
action["shortcut"],
action["status_tip"],
action["trigger"])
action_map.update({action["map_name"]: built_action})
return action_map
def build_main_menu(main_window):
menu_bar = main_window.menuBar()
action_map = main_window.action_map
menu_map = {}
# File Menu
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(action_map["new"])
file_menu.addAction(action_map["open"])
file_menu.addAction(action_map["save"])
file_menu.addSeparator()
file_menu.addAction(action_map["custom_race"])
file_menu.addSeparator()
file_menu.addAction(action_map["close"])
file_menu.addAction(action_map["exit"])
menu_map.update({"file": file_menu})
# View Menu
view_menu = menu_bar.addMenu('&View')
view_menu.addAction(action_map["toggle_toolbar"])
action_map["toggle_toolbar"].setCheckable(True)
view_menu.addSeparator()
view_menu.addAction(action_map["find"])
zoom_menu = view_menu.addMenu("&Zoom")
zoom_levels = QActionGroup(main_window)
zoom_levels.addAction(action_map["zoom_25"])
zoom_levels.addAction(action_map["zoom_38"])
zoom_levels.addAction(action_map["zoom_50"])
zoom_levels.addAction(action_map["zoom_100"])
zoom_levels.addAction(action_map["zoom_125"])
zoom_levels.addAction(action_map["zoom_150"])
zoom_levels.addAction(action_map["zoom_200"])
zoom_levels.addAction(action_map["zoom_400"])
for action in zoom_levels.actions():
action.setCheckable(True)
zoom_menu.addAction(action)
zoom_levels.actions()[main_window.view_options.zoom_level].setChecked(True)
menu_map.update({"zoom": zoom_menu})
view_menu.addSeparator()
view_menu.addAction(action_map["current_race"])
view_menu.addAction(action_map["game_parameters"])
menu_map.update({"view": view_menu})
# Turn Menu
turn_menu = menu_bar.addMenu('&Turn')
turn_menu.addAction(action_map["generate"])
menu_map.update({"turn": turn_menu})
# Commands Menu
commands_menu = menu_bar.addMenu('&Commands')
commands_menu.addAction(action_map["ship_design"])
commands_menu.addAction(action_map["research"])
commands_menu.addAction(action_map["battle_plans"])
menu_map.update({"commands": commands_menu})
# Report
report_menu = menu_bar.addMenu('&Report')
report_menu.addAction(action_map["planets"])
report_menu.addAction(action_map["fleets"])
report_menu.addAction(action_map["other_fleets"])
report_menu.addSeparator()
report_menu.addAction(action_map["battles"])
report_menu.addSeparator()
report_menu.addAction(action_map["score"])
report_menu.addSeparator()
dump_menu = report_menu.addMenu("&Dump to Text File")
dump_menu.addAction(action_map["dump_universe"])
dump_menu.addAction(action_map["dump_planets"])
dump_menu.addAction(action_map["dump_fleets"])
menu_map.update({"report": report_menu})
menu_map.update({"dump": dump_menu})
# Help Menu
help_menu = menu_bar.addMenu('&Help')
help_menu.addAction(action_map["introduction"])
help_menu.addAction(action_map["guide"])
help_menu.addSeparator()
help_menu.addAction(action_map["tech_browser"])
help_menu.addAction(action_map["tutorial"])
help_menu.addSeparator()
help_menu.addAction(action_map["about"])
menu_map.update({"help": help_menu})
return menu_map
def build_action(main_window, action_name, shortcut, status_tip, trigger):
"""
Builds a QAction from its component parts.
"""
built_action = QAction(action_name, main_window)
if not shortcut is None:
built_action.setShortcut(shortcut)
built_action.setStatusTip(status_tip)
built_action.triggered.connect(trigger)
return built_action
|
|
#!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""protoc plugin to create C++ reader/writer for JSON-encoded protobufs
The reader/writer use Chrome's base::Values.
"""
import os
import sys
from util import plugin_protos, types, writer
class CppConverterWriter(writer.CodeWriter):
def WriteProtoFile(self, proto_file, output_dir):
err = proto_file.CheckSupported()
if err:
self.AddError(err)
return
self.WriteCStyleHeader()
self.Output('#include "{output_dir}{generated_pb_h}"',
output_dir=output_dir + '/' if output_dir else '',
generated_pb_h=proto_file.CppBaseHeader())
self.Output('')
# import is not supported
assert [] == proto_file.GetDependencies()
self.Output('// base dependencies')
self.Output('#include "base/values.h"')
self.Output('')
self.Output('#include <memory>')
self.Output('#include <string>')
self.Output('#include <utility>')
self.Output('')
namespaces = proto_file.ProtoNamespaces() + ['json']
for name in namespaces:
self.Output('namespace {name} {{', name=name)
self.IncreaseIndent()
for message in proto_file.GetMessages():
self.WriteMessage(message)
# Nothing to do for enums
for name in namespaces:
self.DecreaseIndent()
self.Output('}}')
def WriteMessage(self, message):
self.Output('class {class_name} {{',
class_name=message.CppConverterClassName())
self.Output(' public:')
with self.AddIndent():
for nested_class in message.GetMessages():
self.WriteMessage(nested_class)
generated_class_name = message.QualifiedTypes().cpp_base
# Nothing to write for enums.
self.Output(
'static bool ReadFromValue(const base::Value& dict, {generated_class_name}* message) {{\n'
' if (!dict.is_dict()) goto error;\n'
'',
generated_class_name=generated_class_name)
with self.AddIndent():
for field_proto in message.GetFields():
self.WriteFieldRead(field_proto)
self.Output(
' return true;\n'
'\n'
'error:\n'
' return false;\n'
'}}\n'
'\n'
'static base::Value WriteToValue(const {generated_class_name}& message) {{\n'
' base::Value dict(base::Value::Type::DICTIONARY);\n'
'',
generated_class_name=generated_class_name)
with self.AddIndent():
for field_proto in message.GetFields():
self.FieldWriteToValue(field_proto)
self.Output(
' return dict;\n'
'',
generated_class_name=generated_class_name)
self.Output('}}')
self.Output('}};')
self.Output('')
def FieldWriteToValue(self, field):
if field.IsRepeated():
self.Output('{{')
else:
self.Output('if (message.has_{field_name}()) {{\n', field_name=field.name)
with self.AddIndent():
if field.IsRepeated():
self.RepeatedMemberFieldWriteToValue(field)
else:
self.OptionalMemberFieldWriteToValue(field)
self.Output('}}')
def RepeatedMemberFieldWriteToValue(self, field):
if field.IsClassType():
self.Output(
'const auto& repeated_field = message.{field_name}();\n'
'base::Value::ListStorage field_list;\n'
'field_list.reserve(repeated_field.size());\n'
'for (const auto& element : repeated_field) {{\n'
' field_list.push_back(\n'
' {inner_class_converter}::WriteToValue(element));\n'
'}}\n'
'dict.SetKey("{field_number}",\n'
' base::Value(std::move(field_list)));\n',
field_number=field.JavascriptIndex(),
field_name=field.name,
inner_class_converter=field.CppConverterType()
)
else:
self.Output(
'const auto& repeated_field = message.{field_name}();\n'
'base::Value::ListStorage field_list(\n'
' repeated_field.begin(), repeated_field.end());\n'
'dict.SetKey("{field_number}",\n'
' base::Value(std::move(field_list)));\n',
field_number=field.JavascriptIndex(),
field_name=field.name
)
def OptionalMemberFieldWriteToValue(self, field):
if field.IsClassType():
self.Output(
'dict.SetKey("{field_number}",\n'
' {inner_class_converter}::WriteToValue(\n'
' message.{field_name}()));\n',
field_number=field.JavascriptIndex(),
field_name=field.name,
inner_class_converter=field.CppConverterType()
)
else:
self.Output(
'dict.Set{value_type}Key("{field_number}", message.{field_name}());\n',
field_number=field.JavascriptIndex(),
field_name=field.name,
value_type=field.CppValueType()
)
def WriteFieldRead(self, field):
self.Output('if (const auto* value = dict.FindKey("{field_number}")) {{',
field_number=field.JavascriptIndex())
with self.AddIndent():
if field.IsRepeated():
self.RepeatedMemberFieldRead(field)
else:
self.OptionalMemberFieldRead(field)
self.Output('}}')
def RepeatedMemberFieldRead(self, field):
self.Output(
'if (!value->is_list()) {{\n'
' goto error;\n'
'}}\n'
'for (const auto& element : value->GetList()) {{\n'
)
with self.AddIndent():
if field.IsClassType():
self.Output(
'if (!{inner_class_parser}::ReadFromValue(element, message->add_{field_name}())) {{\n'
' goto error;\n'
'}}\n',
field_name=field.name,
inner_class_parser=field.CppConverterType()
)
else:
self.Output(
'if (!{predicate}) {{\n'
' goto error;\n'
'}}\n'
'message->add_{field_name}(element.Get{value_type}());\n',
field_name=field.name,
value_type=field.CppValueType(),
predicate=field.CppValuePredicate('element')
)
self.Output('}}\n')
def OptionalMemberFieldRead(self, field):
if field.IsClassType():
self.Output(
'if (!{inner_class_parser}::ReadFromValue(*value, message->mutable_{field_name}())) {{\n'
' goto error;\n'
'}}\n',
field_number=field.JavascriptIndex(),
field_name=field.name,
inner_class_parser=field.CppConverterType()
)
else:
self.Output(
'if (!{predicate}) {{\n'
' goto error;\n'
'}}\n'
'message->set_{field_name}(value->Get{value_type}());\n',
field_name=field.name,
value_type=field.CppValueType(),
predicate=field.CppValuePredicate('(*value)')
)
def Indented(s, indent=2):
return '\n'.join((' ' * indent) + p for p in s.rstrip('\n').split('\n'))
def SetBinaryStdio():
import platform
if platform.system() == 'Windows':
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
def ReadRequestFromStdin():
stream = sys.stdin if sys.version_info[0] < 3 else sys.stdin.buffer
data = stream.read()
return plugin_protos.PluginRequestFromString(data)
def main():
SetBinaryStdio()
request = ReadRequestFromStdin()
response = plugin_protos.PluginResponse()
output_dir = request.GetArgs().get('output_dir', '')
for proto_file in request.GetAllFiles():
types.RegisterProtoFile(proto_file)
cppwriter = CppConverterWriter()
cppwriter.WriteProtoFile(proto_file, output_dir)
converter_filename = proto_file.CppConverterFilename()
if output_dir:
converter_filename = os.path.join(output_dir,
os.path.split(converter_filename)[1])
response.AddFileWithContent(converter_filename, cppwriter.GetValue())
if cppwriter.GetErrors():
response.AddError('\n'.join(cppwriter.GetErrors()))
response.WriteToStdout()
if __name__ == '__main__':
main()
|
|
# Copyright 2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import date, datetime
from decimal import Decimal
import unittest
from uuid import UUID
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.usertype import UserType
from cassandra.cqlengine import columns
from cassandra.cqlengine.management import sync_table, sync_type, create_keyspace_simple, drop_keyspace
from tests.integration import get_server_versions
from tests.integration.cqlengine.base import BaseCassEngTestCase
class UserDefinedTypeTests(BaseCassEngTestCase):
@classmethod
def setUpClass(self):
if get_server_versions()[0] < (2, 1, 0):
raise unittest.SkipTest("UDTs require Cassandra 2.1 or greater")
def test_can_create_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John")
self.assertEqual(42, user.age)
self.assertEqual("John", user.name)
# Add a field
class User(UserType):
age = columns.Integer()
name = columns.Text()
gender = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John", gender="male")
self.assertEqual(42, user.age)
self.assertEqual("John", user.name)
self.assertEqual("male", user.gender)
# Remove a field
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
user = User(age=42, name="John", gender="male")
with self.assertRaises(AttributeError):
user.gender
def test_can_insert_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
UserModel.create(id=0, info=user)
self.assertEqual(1, UserModel.objects.count())
john = UserModel.objects().first()
self.assertEqual(0, john.id)
self.assertTrue(type(john.info) is User)
self.assertEqual(42, john.info.age)
self.assertEqual("John", john.info.name)
def test_can_update_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
created_user = UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
created_user.info = User(age=22, name="Mary")
created_user.save()
mary_info = UserModel.objects().first().info
self.assertEqual(22, mary_info.age)
self.assertEqual("Mary", mary_info.name)
def test_can_create_same_udt_different_keyspaces(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
sync_type("cqlengine_test", User)
create_keyspace_simple("simplex", 1)
sync_type("simplex", User)
drop_keyspace("simplex")
def test_can_insert_partial_udts(self):
class User(UserType):
age = columns.Integer()
name = columns.Text()
gender = columns.Text()
class UserModel(Model):
id = columns.Integer(primary_key=True)
info = columns.UserDefinedType(User)
sync_table(UserModel)
user = User(age=42, name="John")
UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertEqual("John", john_info.name)
self.assertIsNone(john_info.gender)
user = User(age=42)
UserModel.create(id=0, info=user)
john_info = UserModel.objects().first().info
self.assertEqual(42, john_info.age)
self.assertIsNone(john_info.name)
self.assertIsNone(john_info.gender)
def test_can_insert_nested_udts(self):
class Depth_0(UserType):
age = columns.Integer()
name = columns.Text()
class Depth_1(UserType):
value = columns.UserDefinedType(Depth_0)
class Depth_2(UserType):
value = columns.UserDefinedType(Depth_1)
class Depth_3(UserType):
value = columns.UserDefinedType(Depth_2)
class DepthModel(Model):
id = columns.Integer(primary_key=True)
v_0 = columns.UserDefinedType(Depth_0)
v_1 = columns.UserDefinedType(Depth_1)
v_2 = columns.UserDefinedType(Depth_2)
v_3 = columns.UserDefinedType(Depth_3)
sync_table(DepthModel)
udts = [Depth_0(age=42, name="John")]
udts.append(Depth_1(value=udts[0]))
udts.append(Depth_2(value=udts[1]))
udts.append(Depth_3(value=udts[2]))
DepthModel.create(id=0, v_0=udts[0], v_1=udts[1], v_2=udts[2], v_3=udts[3])
output = DepthModel.objects().first()
self.assertEqual(udts[0], output.v_0)
self.assertEqual(udts[1], output.v_1)
self.assertEqual(udts[2], output.v_2)
self.assertEqual(udts[3], output.v_3)
def test_can_insert_udts_with_nulls(self):
class AllDatatypes(UserType):
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.Date()
f = columns.DateTime()
g = columns.Decimal()
h = columns.Float(double_precision=False)
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(AllDatatypes)
sync_table(AllDatatypesModel)
input = AllDatatypes(a=None, b=None, c=None, d=None, e=None, f=None, g=None, h=None, i=None, j=None, k=None, l=None, m=None, n=None)
AllDatatypesModel.create(id=0, data=input)
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first().data
self.assertEqual(input, output)
def test_can_insert_udts_with_all_datatypes(self):
class AllDatatypes(UserType):
a = columns.Ascii()
b = columns.BigInt()
c = columns.Blob()
d = columns.Boolean()
e = columns.Date()
f = columns.DateTime()
g = columns.Decimal()
h = columns.Double()
i = columns.Inet()
j = columns.Integer()
k = columns.Text()
l = columns.TimeUUID()
m = columns.UUID()
n = columns.VarInt()
class AllDatatypesModel(Model):
id = columns.Integer(primary_key=True)
data = columns.UserDefinedType(AllDatatypes)
sync_table(AllDatatypesModel)
input = AllDatatypes(a='ascii', b=2 ** 63 - 1, c=bytearray(b'hello world'), d=True, e=date(1970, 1, 1),
f=datetime.utcfromtimestamp(872835240),
g=Decimal('12.3E+7'), h=3.4028234663852886e+38, i='123.123.123.123', j=2147483647,
k='text', l= UUID('FE2B4360-28C6-11E2-81C1-0800200C9A66'),
m=UUID('067e6162-3b6f-4ae2-a171-2470b63dff00'), n=int(str(2147483647) + '000'))
alldata = AllDatatypesModel.create(id=0, data=input)
self.assertEqual(1, AllDatatypesModel.objects.count())
output = AllDatatypesModel.objects().first().data
for i in range(ord('a'), ord('a') + 14):
self.assertEqual(input[chr(i)], output[chr(i)])
|
|
#!/usr/bin/env vpython
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import datetime
import hashlib
import logging
import sys
import unittest
from parameterized import parameterized
import test_env
test_env.setup_test_env()
from google.protobuf import struct_pb2
from google.protobuf import timestamp_pb2
from google.appengine.api import memcache
from google.appengine.ext import ndb
from components import utils
from test_support import test_case
from proto.api import swarming_pb2 # pylint: disable=no-name-in-module
from server import bq_state
from server import bot_management
from server import config
from server import task_queues
_VERSION = unicode(hashlib.sha256().hexdigest())
def _bot_event(bot_id=None,
external_ip='8.8.4.4',
authenticated_as=None,
dimensions=None,
state=None,
version=_VERSION,
quarantined=False,
maintenance_msg=None,
task_id=None,
task_name=None,
**kwargs):
"""Calls bot_management.bot_event with default arguments."""
if not bot_id:
bot_id = u'id1'
if not dimensions:
dimensions = {
u'id': [bot_id],
u'os': [u'Ubuntu', u'Ubuntu-16.04'],
u'pool': [u'default'],
}
if not authenticated_as:
authenticated_as = u'bot:%s.domain' % bot_id
register_dimensions = kwargs.get('event_type').startswith('request_')
return bot_management.bot_event(
bot_id=bot_id,
external_ip=external_ip,
authenticated_as=authenticated_as,
dimensions=dimensions,
state=state or {'ram': 65},
version=version,
quarantined=quarantined,
maintenance_msg=maintenance_msg,
task_id=task_id,
task_name=task_name,
register_dimensions=register_dimensions,
**kwargs)
def _ensure_bot_info(bot_id=u'id1', **kwargs):
_bot_event(bot_id=bot_id, event_type='request_sleep', **kwargs)
return bot_management.get_info_key(bot_id).get()
def _gen_bot_info(**kwargs):
out = {
'authenticated_as': u'bot:id1.domain',
'composite': [
bot_management.BotInfo.NOT_IN_MAINTENANCE,
bot_management.BotInfo.ALIVE,
bot_management.BotInfo.HEALTHY,
bot_management.BotInfo.IDLE,
],
'dimensions': {
u'id': [u'id1'],
u'os': [u'Ubuntu', u'Ubuntu-16.04'],
u'pool': [u'default'],
},
'external_ip': u'8.8.4.4',
'first_seen_ts': utils.utcnow(),
'id': 'id1',
'idle_since_ts': None,
'is_dead': False,
'last_seen_ts': utils.utcnow(),
'lease_id': None,
'lease_expiration_ts': None,
'leased_indefinitely': None,
'machine_lease': None,
'machine_type': None,
'quarantined': False,
'maintenance_msg': None,
'state': {
u'ram': 65
},
'task_id': None,
'task_name': None,
'version': _VERSION,
}
out.update(kwargs)
return out
def _gen_bot_event(**kwargs):
out = {
'authenticated_as': u'bot:id1.domain',
'dimensions': {
u'id': [u'id1'],
u'os': [u'Ubuntu', u'Ubuntu-16.04'],
u'pool': [u'default'],
},
'external_ip': u'8.8.4.4',
'idle_since_ts': None,
'last_seen_ts': None,
'lease_id': None,
'lease_expiration_ts': None,
'leased_indefinitely': None,
'machine_lease': None,
'machine_type': None,
'message': None,
'quarantined': False,
'maintenance_msg': None,
'state': {
u'ram': 65
},
'task_id': None,
'ts': utils.utcnow(),
'version': _VERSION,
}
out.update(kwargs)
return out
class BotManagementTest(test_case.TestCase):
APP_DIR = test_env.APP_DIR
def setUp(self):
super(BotManagementTest, self).setUp()
self.now = datetime.datetime(2010, 1, 2, 3, 4, 5, 6)
self.mock_now(self.now)
def test_all_apis_are_tested(self):
actual = frozenset(i[5:] for i in dir(self) if i.startswith('test_'))
# Contains the list of all public APIs.
expected = frozenset(
i for i in dir(bot_management)
if i[0] != '_' and hasattr(getattr(bot_management, i), 'func_name'))
missing = expected - actual
self.assertFalse(missing)
def test_BotEvent_proto_empty(self):
# Assert that it doesn't throw on empty entity.
actual = swarming_pb2.BotEvent()
bot_management.BotEvent().to_proto(actual)
self.assertEqual(swarming_pb2.BotEvent(), actual)
def test_BotEvent_proto_events(self):
# Ensures all bot event states can be converted to a proto.
dimensions = {
u'id': [u'id1'],
u'os': [u'Ubuntu', u'Ubuntu-16.04'],
u'pool': [u'default'],
}
for name in bot_management.BotEvent.ALLOWED_EVENTS:
event_key = _bot_event(
event_type=name, bot_id=u'id1', dimensions=dimensions)
if name == u'task_update':
# TODO(maruel): Store request_sleep IFF the state changed.
self.assertIsNone(event_key, name)
continue
# Just asserts it doesn't crash.
actual = swarming_pb2.BotEvent()
event_key.get().to_proto(actual)
def test_BotEvent_proto_maintenance(self):
# Also test a misconfigured bot not in a pool.
event_key = _bot_event(
event_type=u'bot_connected',
bot_id=u'id1',
dimensions={u'id': [u'id1']},
maintenance_msg=u'Too hot')
actual = swarming_pb2.BotEvent()
event_key.get().to_proto(actual)
expected = swarming_pb2.BotEvent(
event=swarming_pb2.BOT_NEW_SESSION,
bot=swarming_pb2.Bot(
bot_id=u'id1',
dimensions=[
swarming_pb2.StringListPair(key=u'id', values=[u'id1']),
],
status=swarming_pb2.OVERHEAD_MAINTENANCE_EXTERNAL,
status_msg=u'Too hot',
info=swarming_pb2.BotInfo(
supplemental=struct_pb2.Struct(fields={
u'ram': struct_pb2.Value(number_value=65),
}),
version=_VERSION,
external_ip=u'8.8.4.4',
authenticated_as=u'bot:id1.domain',
),
),
)
expected.event_time.FromDatetime(self.now)
self.assertEqual(unicode(expected), unicode(actual))
def test_BotEvent_proto_quarantine(self):
# Also test that a bot can belong to two pools.
event_key = _bot_event(
event_type=u'bot_connected',
bot_id=u'id1',
dimensions={
u'id': [u'id1'],
u'pool': [u'next', u'previous']
},
state={
u'ram': 65,
u'quarantined': u'sad bot'
},
quarantined=True)
actual = swarming_pb2.BotEvent()
event_key.get().to_proto(actual)
expected = swarming_pb2.BotEvent(
event=swarming_pb2.BOT_NEW_SESSION,
bot=swarming_pb2.Bot(
bot_id=u'id1',
pools=[u'next', u'previous'],
dimensions=[
swarming_pb2.StringListPair(key=u'id', values=[u'id1']),
swarming_pb2.StringListPair(
key=u'pool', values=[u'next', u'previous']),
],
status=swarming_pb2.QUARANTINED_BY_BOT,
status_msg=u'sad bot',
info=swarming_pb2.BotInfo(
supplemental=struct_pb2.Struct(
fields={
u'quarantined':
struct_pb2.Value(string_value=u'sad bot'),
u'ram':
struct_pb2.Value(number_value=65),
}),
version=_VERSION,
external_ip=u'8.8.4.4',
authenticated_as=u'bot:id1.domain',
),
),
)
expected.event_time.FromDatetime(self.now)
self.assertEqual(unicode(expected), unicode(actual))
def test_bot_event(self):
# connected.
d = {
u'id': [u'id1'],
u'os': [u'Ubuntu', u'Ubuntu-16.04'],
u'pool': [u'default'],
}
event = 'request_sleep'
_bot_event(event_type=event, bot_id='id1', dimensions=d)
expected = _gen_bot_info(idle_since_ts=self.now)
self.assertEqual(
expected, bot_management.get_info_key('id1').get().to_dict())
@parameterized.expand([
(u'task_completed', True, False),
(u'task_error', True, False),
(u'task_killed', True, False),
(u'request_sleep', True, True),
(u'task_update', False, True),
])
def test_bot_event_reset_task(self, event, reset_task, skip_store_event):
bot_id = u'id1'
task_id = u'12311'
task_name = u'yo'
d = {
u'id': [u'id1'],
u'os': [u'Ubuntu', u'Ubuntu-16.04'],
u'pool': [u'default'],
}
bot_info = _ensure_bot_info(
bot_id=bot_id, dimensions=d, task_id=task_id, task_name=task_name)
_bot_event(
event_type=event,
bot_id=bot_id,
dimensions=d,
task_id=task_id,
task_name=task_name)
# check bot_info
composite = [
bot_management.BotInfo.NOT_IN_MAINTENANCE,
bot_management.BotInfo.ALIVE,
bot_management.BotInfo.HEALTHY,
]
if event == 'request_sleep':
composite += [bot_management.BotInfo.IDLE]
idle_since_ts = self.now
else:
composite += [bot_management.BotInfo.BUSY]
idle_since_ts = None
if reset_task:
# bot_info.task_id and bot_info.task_name should be reset
expected = _gen_bot_info(
composite=composite,
id=bot_id,
task_name=None,
idle_since_ts=idle_since_ts)
else:
# bot_info.task_id and bot_info.task_name should be kept
expected = _gen_bot_info(
composite=composite,
id=bot_id,
task_id=task_id,
task_name=task_name,
idle_since_ts=None)
self.assertEqual(expected, bot_info.key.get().to_dict())
# bot_event should have task_id
if not skip_store_event:
expected_event = _gen_bot_event(event_type=event, task_id=task_id)
last_event = bot_management.get_events_query(bot_id, True).get()
self.assertEqual(expected_event, last_event.to_dict())
def test_get_events_query(self):
_bot_event(event_type='bot_connected')
expected = [_gen_bot_event(event_type=u'bot_connected')]
self.assertEqual(
expected,
[i.to_dict() for i in bot_management.get_events_query('id1', True)])
def test_bot_event_poll_sleep(self):
_bot_event(event_type='request_sleep')
# Assert that BotInfo was updated too.
expected = _gen_bot_info(
idle_since_ts=self.now,
composite=[
bot_management.BotInfo.NOT_IN_MAINTENANCE,
bot_management.BotInfo.ALIVE,
bot_management.BotInfo.HEALTHY,
bot_management.BotInfo.IDLE,
])
bot_info = bot_management.get_info_key('id1').get()
self.assertEqual(expected, bot_info.to_dict())
# BotEvent is registered for poll when BotInfo creates
expected_event = _gen_bot_event(event_type=u'request_sleep')
bot_events = bot_management.get_events_query('id1', True)
self.assertEqual([expected_event], [e.to_dict() for e in bot_events])
# flush bot events
ndb.delete_multi(e.key for e in bot_events)
# BotEvent is not registered for poll when no dimensions change
_bot_event(event_type='request_sleep')
self.assertEqual([], bot_management.get_events_query('id1', True).fetch())
# BotEvent is registered for poll when dimensions change
dims = {u'foo': [u'bar']}
_bot_event(event_type='request_sleep', dimensions=dims)
expected_event['dimensions'] = dims
bot_events = bot_management.get_events_query('id1', True).fetch()
self.assertEqual([expected_event], [e.to_dict() for e in bot_events])
def test_bot_event_busy(self):
_bot_event(event_type='bot_connected')
_bot_event(event_type='request_task', task_id='12311', task_name='yo')
expected = _gen_bot_info(
composite=[
bot_management.BotInfo.NOT_IN_MAINTENANCE,
bot_management.BotInfo.ALIVE,
bot_management.BotInfo.HEALTHY,
bot_management.BotInfo.BUSY,
],
task_id=u'12311',
task_name=u'yo')
bot_info = bot_management.get_info_key('id1').get()
self.assertEqual(expected, bot_info.to_dict())
expected = [
_gen_bot_event(event_type=u'request_task', task_id=u'12311'),
_gen_bot_event(event_type=u'bot_connected'),
]
self.assertEqual(
expected,
[e.to_dict() for e in bot_management.get_events_query('id1', True)])
def test_bot_event_update_dimensions(self):
bot_id = 'id1'
bot_info_key = bot_management.get_info_key(bot_id)
# bot dimensions generated without injected bot_config.py.
dimensions_invalid = {'id': ['id1'], 'os': ['Ubuntu'], 'pool': ['default']}
# 'bot_connected' event creates BotInfo only with id and pool dimensions.
_bot_event(
bot_id=bot_id,
event_type='bot_connected',
dimensions=dimensions_invalid)
self.assertEqual(bot_info_key.get().dimensions_flat,
[u'id:id1', u'pool:default'])
# 'bot_hook_log' event does not register dimensions other than id and pool.
_bot_event(
bot_id=bot_id, event_type='bot_hook_log', dimensions=dimensions_invalid)
self.assertEqual(bot_info_key.get().dimensions_flat,
[u'id:id1', u'pool:default'])
# 'request_sleep' registers given dimensions to BotInfo.
_bot_event(
bot_id=bot_id,
event_type='request_sleep',
dimensions={
'id': ['id1'],
'os': ['Android'],
'pool': ['default']
})
self.assertEqual(bot_info_key.get().dimensions_flat,
[u'id:id1', u'os:Android', u'pool:default'])
# 'bot_connected' doesn't update dimensions since bot_config isn't injected.
_bot_event(
bot_id=bot_id,
event_type='bot_connected',
dimensions=dimensions_invalid)
self.assertEqual(bot_info_key.get().dimensions_flat,
[u'id:id1', u'os:Android', u'pool:default'])
def test_get_info_key(self):
self.assertEqual(
ndb.Key(bot_management.BotRoot, 'foo', bot_management.BotInfo, 'info'),
bot_management.get_info_key('foo'))
def test_get_root_key(self):
self.assertEqual(
ndb.Key(bot_management.BotRoot, 'foo'),
bot_management.get_root_key('foo'))
def test_get_settings_key(self):
expected = ndb.Key(
bot_management.BotRoot, 'foo', bot_management.BotSettings, 'settings')
self.assertEqual(expected, bot_management.get_settings_key('foo'))
def test_get_aggregation_key(self):
expected = ndb.Key(bot_management.DimensionAggregation, 'foo')
self.assertEqual(expected, bot_management.get_aggregation_key('foo'))
def test_has_capacity(self):
# The bot can service this dimensions.
d = {u'pool': [u'default'], u'os': [u'Ubuntu-16.04']}
# The bot can service one of 'or' dimensions.
or_dimensions = {
u'pool': [u'default'],
u'os': [u'Ubuntu-14.04|Ubuntu-16.04'],
}
# By default, nothing has capacity.
self.assertEqual(False, bot_management.has_capacity(d))
# By default, nothing has capacity.
self.assertEqual(False, bot_management.has_capacity(or_dimensions))
# A bot comes online. There's some capacity now.
_bot_event(
event_type='request_sleep',
dimensions={'id': ['id1'], 'pool': ['default'], 'os': ['Ubuntu',
'Ubuntu-16.04']})
self.assertEqual(1, bot_management.BotInfo.query().count())
self.assertEqual(True, bot_management.has_capacity(d))
self.assertEqual(True, bot_management.has_capacity(or_dimensions))
# Disable the memcache code path to confirm the DB based behavior.
self.mock(task_queues, 'probably_has_capacity', lambda *_: None)
self.assertEqual(True, bot_management.has_capacity(d))
self.assertEqual(True, bot_management.has_capacity(or_dimensions))
def test_has_capacity_BotEvent(self):
# Disable the memcache code path to confirm the DB based behavior.
self.mock(task_queues, 'probably_has_capacity', lambda *_: None)
d = {u'pool': [u'default'], u'os': [u'Ubuntu-16.04']}
botid = 'id1'
_bot_event(
event_type='request_sleep',
dimensions={'id': [botid], 'pool': ['default'], 'os': ['Ubuntu',
'Ubuntu-16.04']})
self.assertEqual(True, bot_management.has_capacity(d))
or_dimensions = {
u'pool': [u'default'],
u'os': [u'Ubuntu-14.04|Ubuntu-16.04'],
}
# Delete the BotInfo, so the bot will disappear.
bot_management.get_info_key(botid).delete()
# The capacity is still found due to a recent BotEvent with this dimension.
self.assertEqual(True, bot_management.has_capacity(d))
self.assertEqual(True, bot_management.has_capacity(or_dimensions))
self.mock_now(self.now, config.settings().bot_death_timeout_secs-1)
self.assertEqual(True, bot_management.has_capacity(d))
self.assertEqual(True, bot_management.has_capacity(or_dimensions))
self.mock_now(self.now, config.settings().bot_death_timeout_secs)
self.assertEqual(False, bot_management.has_capacity(d))
self.assertEqual(False, bot_management.has_capacity(or_dimensions))
def test_get_pools_from_dimensions_flat(self):
pools = bot_management.get_pools_from_dimensions_flat(
['id:id1', 'os:Linux', 'pool:pool1', 'pool:pool2'])
self.assertEqual(pools, ['pool1', 'pool2'])
def test_cron_update_bot_info(self):
# Create two bots, one becomes dead, updating the cron job fixes composite.
timeout = bot_management.config.settings().bot_death_timeout_secs
def check_dead(bots):
q = bot_management.filter_availability(
bot_management.BotInfo.query(), quarantined=None, in_maintenance=None,
is_dead=True, is_busy=None)
self.assertEqual(bots, [t.to_dict() for t in q])
def check_alive(bots):
q = bot_management.filter_availability(
bot_management.BotInfo.query(),
quarantined=None,
in_maintenance=None,
is_dead=False,
is_busy=None)
self.assertEqual(bots, [t.to_dict() for t in q])
_bot_event(event_type='request_sleep')
# One second before the timeout value.
then = self.mock_now(self.now, timeout-1)
_bot_event(
event_type='request_sleep',
bot_id='id2',
external_ip='8.8.4.4', authenticated_as='bot:id2.domain',
dimensions={'id': ['id2'], 'foo': ['bar']})
bot1_alive = _gen_bot_info(
first_seen_ts=self.now, idle_since_ts=self.now, last_seen_ts=self.now)
bot1_dead = _gen_bot_info(
first_seen_ts=self.now,
last_seen_ts=self.now,
composite=[
bot_management.BotInfo.NOT_IN_MAINTENANCE,
bot_management.BotInfo.DEAD,
bot_management.BotInfo.HEALTHY,
bot_management.BotInfo.BUSY,
],
is_dead=True)
bot2_alive = _gen_bot_info(
authenticated_as=u'bot:id2.domain',
dimensions={
u'foo': [u'bar'],
u'id': [u'id2']
},
first_seen_ts=then,
id='id2',
idle_since_ts=then,
last_seen_ts=then)
check_dead([])
check_alive([bot1_alive, bot2_alive])
self.assertEqual(0, bot_management.cron_update_bot_info())
check_dead([])
check_alive([bot1_alive, bot2_alive])
# Just stale enough to trigger the dead logic.
then = self.mock_now(self.now, timeout)
# The cron job didn't run yet, so it still has ALIVE bit.
check_dead([])
check_alive([bot1_alive, bot2_alive])
self.assertEqual(1, bot_management.cron_update_bot_info())
# The cron job ran, so it's now correct.
check_dead([bot1_dead])
check_alive([bot2_alive])
# the last event should be bot_missing
events = list(bot_management.get_events_query('id1', order=True))
event = events[0]
bq_event = swarming_pb2.BotEvent()
event.to_proto(bq_event)
self.assertEqual(event.event_type, 'bot_missing')
self.assertEqual(event.last_seen_ts, bot1_dead['last_seen_ts'])
self.assertEqual(bq_event.event, swarming_pb2.BOT_MISSING)
self.assertEqual(bq_event.bot.status, swarming_pb2.MISSING)
last_seen_ts = timestamp_pb2.Timestamp()
last_seen_ts.FromDatetime(bot1_dead['last_seen_ts'])
self.assertEqual(bq_event.bot.info.last_seen_ts, last_seen_ts)
def test_cron_delete_old_bot_events(self):
# Create an old BotEvent right at the cron job cut off, and another one one
# second later (that will be kept).
_bot_event(event_type='bot_connected')
now = self.now
self.mock_now(now, 1)
event_key = _bot_event(event_type='bot_connected')
self.mock_now(now + bot_management._OLD_BOT_EVENTS_CUT_OFF, 0)
self.assertEqual(1, bot_management.cron_delete_old_bot_events())
actual = bot_management.BotEvent.query().fetch(keys_only=True)
self.assertEqual([event_key], actual)
def test_cron_delete_old_bot(self):
# Create a Bot with no BotEvent and another bot with one.
event_key = _bot_event(bot_id=u'id1', event_type='request_sleep')
# Delete the BotEvent entity.
_bot_event(bot_id=u'id2', event_type='request_sleep').delete()
# BotRoot + BotInfo.
self.assertEqual(2, bot_management.cron_delete_old_bot())
actual = bot_management.BotEvent.query().fetch(keys_only=True)
self.assertEqual([event_key], actual)
self.assertEqual(
[u'id1'],
[k.string_id() for k in
bot_management.BotRoot.query().fetch(keys_only=True)])
def test_cron_aggregate_dimensions(self):
_ensure_bot_info(
bot_id='id1', dimensions={
'pool': ['p1', 'p2'],
'foo1': ['bar1']
})
_ensure_bot_info(
bot_id='id2', dimensions={
'pool': ['p3'],
'foo2': ['bar2']
})
bot_management.cron_aggregate_dimensions()
# dimensions of all pools.
dims_all = bot_management.get_aggregation_key('all').get().dimensions
self.assertEqual(dims_all, [
bot_management.DimensionValues(dimension='foo1', values=['bar1']),
bot_management.DimensionValues(dimension='foo2', values=['bar2']),
bot_management.DimensionValues(
dimension='pool', values=['p1', 'p2', 'p3']),
])
# dimensions of p1.
dims_p1 = bot_management.get_aggregation_key('p1').get().dimensions
self.assertEqual(dims_p1, [
bot_management.DimensionValues(dimension='foo1', values=['bar1']),
bot_management.DimensionValues(dimension='pool', values=['p1', 'p2']),
])
# dimensions of p2.
dims_p1 = bot_management.get_aggregation_key('p2').get().dimensions
self.assertEqual(dims_p1, [
bot_management.DimensionValues(dimension='foo1', values=['bar1']),
bot_management.DimensionValues(dimension='pool', values=['p1', 'p2']),
])
# dimensions of p2.
dims_p1 = bot_management.get_aggregation_key('p3').get().dimensions
self.assertEqual(dims_p1, [
bot_management.DimensionValues(dimension='foo2', values=['bar2']),
bot_management.DimensionValues(dimension='pool', values=['p3']),
])
def test_filter_dimensions(self):
pass # Tested in handlers_endpoints_test
def test_filter_availability(self):
pass # Tested in handlers_endpoints_test
def test_task_bq_empty(self):
# Empty, nothing is done.
start = utils.utcnow()
end = start+datetime.timedelta(seconds=60)
self.assertEqual(0, bot_management.task_bq_events(start, end))
def test_task_bq_events(self):
payloads = []
def send_to_bq(table_name, rows):
self.assertEqual('bot_events', table_name)
payloads.append(rows)
self.mock(bq_state, 'send_to_bq', send_to_bq)
# Generate a few events.
start = self.mock_now(self.now, 10)
_bot_event(bot_id=u'id1', event_type='bot_connected')
self.mock_now(self.now, 11)
_bot_event(event_type='request_sleep') # stored
self.mock_now(self.now, 12)
_bot_event(event_type='request_sleep') # not stored
self.mock_now(self.now, 13)
_bot_event(event_type='request_sleep', quarantined=True) # stored
self.mock_now(self.now, 14)
_bot_event(event_type='request_sleep', quarantined=True) # not stored
self.mock_now(self.now, 15)
_bot_event(event_type='request_sleep') # stored
self.mock_now(self.now, 16)
_bot_event(event_type='request_sleep') # not stored
self.mock_now(self.now, 17)
_bot_event(event_type='request_sleep', maintenance_msg='foo') # stored
self.mock_now(self.now, 18)
_bot_event(event_type='request_sleep', maintenance_msg='bar') # not stored
self.mock_now(self.now, 19)
_bot_event(event_type='request_sleep') # stored
self.mock_now(self.now, 20)
_bot_event(event_type='request_sleep') # not stored
self.mock_now(self.now, 21)
_bot_event(event_type='request_task', task_id='12311', task_name='yo')
self.mock_now(self.now, 22)
_bot_event(event_type='task_update', task_id='12311') # not stored
self.mock_now(self.now, 23)
_bot_event(event_type='task_completed', task_id='12311')
self.mock_now(self.now, 24)
_bot_event(event_type='request_sleep') # stored
end = self.mock_now(self.now, 25)
# normal request_sleep is not streamed.
bot_management.task_bq_events(start, end)
self.assertEqual(1, len(payloads))
actual_rows = payloads[0]
expected = [
(r[0], bot_management.BotEvent._MAPPING[r[1]]) for r in [
# (bq_key, event)
('id1:2010-01-02T03:04:15.000006Z', 'bot_connected'),
('id1:2010-01-02T03:04:16.000006Z',
'request_sleep'), # dimensions update
('id1:2010-01-02T03:04:18.000006Z',
'request_sleep'), # quarantine start
('id1:2010-01-02T03:04:20.000006Z', 'request_sleep'), # recovered
('id1:2010-01-02T03:04:22.000006Z',
'request_sleep'), # maintenance start
('id1:2010-01-02T03:04:24.000006Z', 'request_sleep'), # recovered
('id1:2010-01-02T03:04:26.000006Z', 'request_task'),
('id1:2010-01-02T03:04:28.000006Z', 'task_completed'),
('id1:2010-01-02T03:04:29.000006Z', 'request_sleep'), # first idle
]
]
self.assertEqual(expected, [(r[0], r[1].event) for r in actual_rows])
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR)
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
|
|
# Copyright 2014 Ahmed El-Hassany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define base (mostly abstract) entities used by sts.
"""
import abc
import logging
from functools import partial
from pox.openflow.libopenflow_01 import ofp_phy_port
from sts.util.procutils import popen_filtered
from sts.util.convenience import object_fullname
from sts.util.convenience import class_fullname
from sts.util.convenience import load_class
from sts.util.convenience import get_json_attr
def serialize_ofp_phy_port(port):
"""
Serializes OpenFlow physical port to JSON Dict
"""
attrs = ['port_no', 'hw_addr', 'name', 'config', 'state', 'curr',
'advertised', 'supported', 'peer']
json_dict = {'__type__': object_fullname(port)}
print class_fullname(ofp_phy_port)
for attr in attrs:
value = getattr(port, attr, None)
if hasattr(value, 'toStr'):
value = value.toStr()
json_dict[attr] = value
return json_dict
def deserialize_ofp_phy_port(cls, json_dict):
"""
De-Serializes JSON Dict to OpenFlow physical port
"""
assert json_dict['__type__'] == class_fullname(cls)
json_dict.pop('__type__')
port = cls(**json_dict)
return port
# Monkey patching
ofp_phy_port.to_json = lambda self: serialize_ofp_phy_port(self)
ofp_phy_port.from_json = classmethod(deserialize_ofp_phy_port)
class DirectedLinkAbstractClass(object):
"""
A directed network link
"""
__metaclass__ = abc.ABCMeta
def __init__(self, start_node, start_port, end_node, end_port):
"""
Init new directed link.
start_port has to be member of start_node, likewise for end_port
"""
if hasattr(start_node, 'has_port'):
assert start_node.has_port(start_node)
if hasattr(end_node, 'has_port'):
assert end_node.has_port(end_node)
self._start_node = start_node
self._start_port = start_port
self._end_node = end_node
self._end_port = end_port
@property
def start_node(self):
"""The starting node"""
return self._start_node
@property
def start_port(self):
"""The starting port"""
return self._start_port
@property
def end_node(self):
"""The destination node"""
return self._end_node
@property
def end_port(self):
"""The destination port"""
return self._end_port
def __eq__(self, other):
return (self.start_node == getattr(other, 'start_node', None) and
self.start_port == getattr(other, 'start_port', None) and
self.end_node == getattr(other, 'end_node', None) and
self.end_port == getattr(other, 'end_port', None))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "(%s:%s) -> (%s:%s)" % (self.start_node, self.start_port,
self.end_node, self.end_port)
def create_reversed_link(self):
"""Create a Link that is in the opposite direction of this Link."""
return DirectedLinkAbstractClass(self.end_node, self.end_port,
self.start_node, self.start_port)
def to_json(self):
"""Serialize to JSON dict"""
return {'__type__': object_fullname(self),
'start_node': get_json_attr(self.start_node),
'start_port': get_json_attr(self.start_port),
'end_node': get_json_attr(self.end_node),
'end_port': get_json_attr(self.end_port)}
@classmethod
def from_json(cls, json_dict):
assert class_fullname(cls) == json_dict['__type__']
start_node = json_dict['start_node']
start_port = json_dict['start_port']
end_node = json_dict['end_node']
end_port = json_dict['end_port']
if isinstance(start_node, dict) and start_node.get('__type__', None):
start_node = load_class(start_node['__type__']).from_json(start_node)
if isinstance(start_port, dict) and start_port.get('__type__', None):
start_port = load_class(start_port['__type__']).from_json(start_port)
if isinstance(end_node, dict) and end_node.get('__type__', None):
end_node = load_class(end_node['__type__']).from_json(end_node)
if isinstance(end_port, dict) and end_port.get('__type__', None):
end_port = load_class(end_port['__type__']).from_json(end_port)
return cls(start_node, start_port, end_node, end_port)
class BiDirectionalLinkAbstractClass(object):
"""
An bi-directed network link
"""
__metaclass__ = abc.ABCMeta
def __init__(self, node1, port1, node2, port2):
if hasattr(node1, 'has_port'):
assert node1.has_port(port1)
if hasattr(node2, 'has_port'):
assert node2.has_port(port2)
self._node1 = node1
self._port1 = port1
self._node2 = node2
self._port2 = port2
@property
def node1(self):
return self._node1
@property
def port1(self):
return self._port1
@property
def node2(self):
return self._node2
@property
def port2(self):
return self._port2
def __eq__(self, other):
return ((self.node1 == getattr(other, 'node1', None) and
self.port1 == getattr(other, 'port1', None) and
self.node2 == getattr(other, 'node2', None) and
self.port2 == getattr(other, 'port2', None)) or
(self.node1 == getattr(other, 'node2', None) and
self.port1 == getattr(other, 'port2', None) and
self.node2 == getattr(other, 'node1', None) and
self.port2 == getattr(other, 'port1', None)))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "(%s:%s) <-> (%s:%s)" % (self.node1, self.port1,
self.node2, self.port2)
def to_json(self):
"""Serialize to JSON dict"""
return {'__type__': object_fullname(self),
'node1': get_json_attr(self.node1),
'port1': get_json_attr(self.port1),
'node2': get_json_attr(self.node2),
'port2': get_json_attr(self.port2)}
@classmethod
def from_json(cls, json_dict):
assert class_fullname(cls) == json_dict['__type__']
node1 = json_dict['node1']
port1 = json_dict['port1']
node2 = json_dict['node2']
port2 = json_dict['port2']
if isinstance(node1, dict) and node1.get('__type__', None):
node1 = load_class(node1['__type__']).from_json(node1)
if isinstance(port1, dict) and port1.get('__type__', None):
port1 = load_class(port1['__type__']).from_json(port1)
if isinstance(node2, dict) and node2.get('__type__', None):
node2 = load_class(node2['__type__']).from_json(node2)
if isinstance(port2, dict) and port2.get('__type__', None):
port2 = load_class(port2['__type__']).from_json(port2)
return cls(node1, port1, node2, port2)
class SSHEntity(object):
"""
Controls an entity via ssh.
"""
def __init__(self, host, port=22, username=None, password=None,
key_filename=None, cwd=None, label=None, redirect_output=False,
block=False):
"""
If username, password, and key_filename are None, the SSH will be use the
default ssh key loaded into the system and will work if the destination
host is configured to accept that key.
Args:
host: the server address to connect to.
port: the server port to connect to (default 22)
username: the username to authenticate as (default local username)
password: password to authenticate or to unlock the private key
key_filename: private key for authentication
cwd: working dir for commands
label: human readable label to associated with output
redirect_output: If true remote stdout & stderr are redirected to stdout
block: if True execute_command will block until the command is complete
"""
self._host = host
self._port = port
self._username = username
self._password = password
self._key_filename = key_filename
self._ssh_client = None
self._ssh_cls = None
self.redirect_output = redirect_output
self.block = block
self.cwd = cwd
self.label = label or ""
if self._ssh_cls is None:
try:
import paramiko
except ImportError:
raise RuntimeError('''Must install paramiko to use ssh: \n'''
''' $ sudo pip install paramiko ''')
# Suppress normal SSH messages
logging.getLogger("paramiko").setLevel(logging.WARN)
self._ssh_cls = paramiko.SSHClient
self.log = logging.getLogger("SSHEntity")
@property
def host(self):
"""The server address to connect to"""
return self._host
@property
def port(self):
"""The server port to connect to"""
return self._port
@property
def username(self):
"""The username to authenticate as (default local username)"""
return self._username
@property
def password(self):
"""Password to authenticate or to unlock the private key."""
return self._password
@property
def key_filename(self):
"""Private key for authentication"""
return self._key_filename
@property
def ssh_cls(self):
"""
Returns reference to the SSH Client class
"""
return self._ssh_cls
@property
def check_key_policy(self):
"""
Returns the the policy for missing host keys
Default: accept all keys
"""
try:
import paramiko
except ImportError:
raise RuntimeError('''Must install paramiko to use ssh: \n'''
''' $ sudo pip install paramiko ''')
return paramiko.AutoAddPolicy()
@property
def ssh_client(self):
"""Returns instance of the ssh client
Will connect to the host if not already connected.
"""
if self._ssh_client is None:
self._ssh_client = self.ssh_cls()
# Ignore host identify check
self._ssh_client.set_missing_host_key_policy(self.check_key_policy)
self._ssh_client.connect(hostname=self.host, port=self.port,
username=self.username, password=self.password,
key_filename=self.key_filename)
return self._ssh_client
def get_new_session(self):
"""Return new ssh session handler to the host"""
ssh = self.ssh_client
transport = ssh.get_transport()
session = transport.open_channel(kind='session')
return session
def execute_command(self, cmd):
"""
Execute command remotely and return the stdout results
"""
# procutils was meant to be a leaf dependency
from sts.util.procutils import _prefix_thread
from sts.util.procutils import color_normal
from sts.util.procutils import color_error
if self.cwd is not None:
cmd = "cd " + self.cwd + " ;" + cmd
r_stdin, r_stdout, r_stderr = self.ssh_client.exec_command(cmd)
if self.redirect_output:
stdout_thread = _prefix_thread(r_stdout,
partial(color_normal, label=self.label))
stderr_thread = _prefix_thread(r_stderr,
partial(color_error, label=self.label))
if self.block:
channel = r_stdout.channel
while True:
if channel.recv_ready() is False and channel.exit_status_ready():
break
return ""
else:
# dealing directly with the channel makes it easier to detect exit status
reply = ""
channel = r_stdout.channel
while True:
if channel.recv_ready():
reply += channel.recv(100) # arbitrary
elif channel.recv_ready() is False and channel.exit_status_ready():
break
channel.close()
return reply
def __del__(self):
if self._ssh_client:
try:
self._ssh_client.close()
except Exception as exp:
self.log.warn("Error at closing ssh connection: '%s'" % exp)
class LocalEntity(object):
"""
Controls an entity via local unix command.
"""
def __init__(self, cwd=None, label=None, redirect_output=False):
"""
Args:
cwd: working dir for commands
label: human readable label to associated with output
redirect_output: If true remote stdout & stderr are redirected to stdout
"""
self.cwd = cwd
self.label = label or ""
self.redirect_output = redirect_output
self.log = logging.getLogger("LocalEntity")
def execute_command(self, cmd):
"""
Execute command locally and return the stdout results
"""
process = popen_filtered("[%s]" % self.label, cmd, self.cwd,
shell=True, redirect_output=self.redirect_output)
output = ""
if self.redirect_output:
return ''
while True:
recv = process.stdout.read(100) # arbitrary
output += recv
if recv == '' and process.poll() is not None:
break
return output
|
|
"""Implements actions used by doit tasks
"""
import subprocess, sys
import six
from six import StringIO
import inspect
from threading import Thread
from .exceptions import InvalidTask, TaskFailed, TaskError
def normalize_callable(ref):
"""return a list with (callabe, *args, **kwargs)
ref can be a simple callable or a tuple
"""
if isinstance(ref, tuple):
return list(ref)
return [ref, (), {}]
# Actions
class BaseAction(object):
"""Base class for all actions"""
# must implement:
# def execute(self, out=None, err=None)
@staticmethod
def _prepare_kwargs(task, func, args, kwargs):
"""
Prepare keyword arguments (targets, dependencies, changed,
cmd line options)
Inspect python callable and add missing arguments:
- that the callable expects
- have not been passed (as a regular arg or as keyword arg)
- are available internally through the task object
"""
# Return just what was passed in task generator
# dictionary if the task isn't available
if not task:
return kwargs
try:
argspec = inspect.getargspec(func)
except TypeError: # a callable object, not a function
argspec = inspect.getargspec(func.__call__)
# use task meta information as extra_args
meta_args = {
'task': task,
'targets': task.targets,
'dependencies': task.file_dep,
'changed': task.dep_changed,
}
extra_args = dict(meta_args)
# tasks parameter options
extra_args.update(task.options)
if task.pos_arg is not None:
extra_args[task.pos_arg] = task.pos_arg_val
kwargs = kwargs.copy()
for key in six.iterkeys(extra_args):
# check key is a positional parameter
if key in argspec.args:
arg_pos = argspec.args.index(key)
# it is forbidden to use default values for this arguments
# because the user might be unware of this magic.
if (key in meta_args and argspec.defaults and
len(argspec.defaults) > (len(argspec.args) - (arg_pos+1))):
msg = ("Task %s, action %s(): The argument '%s' is not "
"allowed to have a default value (reserved by doit)"
% (task.name, func.__name__, key))
raise InvalidTask(msg)
# if not over-written by value passed in *args use extra_arg
overwritten = arg_pos < len(args)
if not overwritten:
kwargs[key] = extra_args[key]
# if function has **kwargs include extra_arg on it
elif argspec.keywords and key not in kwargs:
kwargs[key] = extra_args[key]
return kwargs
class CmdAction(BaseAction):
"""
Command line action. Spawns a new process.
@ivar action(str,list,callable): subprocess command string or string list,
see subprocess.Popen first argument.
It may also be a callable that generates the command string.
Strings may contain python mappings with the keys: dependencies,
changed and targets. ie. "zip %(targets)s %(changed)s"
@ivar task(Task): reference to task that contains this action
@ivar save_out: (str) name used to save output in `values`
@ivar shell: use shell to execute command
see subprocess.Popen `shell` attribute
@ivar pkwargs: Popen arguments except 'stdout' and 'stderr'
"""
def __init__(self, action, task=None, save_out=None, shell=True,
**pkwargs): #pylint: disable=W0231
for forbidden in ('stdout', 'stderr'):
if forbidden in pkwargs:
msg = "CmdAction can't take param named '{0}'."
raise InvalidTask(msg.format(forbidden))
self._action = action
self.task = task
self.out = None
self.err = None
self.result = None
self.values = {}
self.save_out = save_out
self.shell = shell
self.pkwargs = pkwargs
@property
def action(self):
if isinstance(self._action, (six.string_types, list)):
return self._action
else:
# action can be a callable that returns a string command
ref, args, kw = normalize_callable(self._action)
kwargs = self._prepare_kwargs(self.task, ref, args, kw)
return ref(*args, **kwargs)
def _print_process_output(self, process, input_, capture, realtime):
"""read 'input_' untill process is terminated
write 'input_' content to 'capture' and 'realtime' streams
"""
if realtime:
if hasattr(realtime, 'encoding'):
encoding = realtime.encoding or 'utf-8'
else: # pragma: no cover
encoding = 'utf-8'
while True:
# line buffered
try:
line = input_.readline().decode('utf-8')
except:
process.terminate()
input_.read()
raise
if not line:
break
capture.write(line)
if realtime:
if sys.version > '3': # pragma: no cover
realtime.write(line)
else:
realtime.write(line.encode(encoding))
def execute(self, out=None, err=None):
"""
Execute command action
both stdout and stderr from the command are captured and saved
on self.out/err. Real time output is controlled by parameters
@param out: None - no real time output
a file like object (has write method)
@param err: idem
@return failure:
- None: if successful
- TaskError: If subprocess return code is greater than 125
- TaskFailed: If subprocess return code isn't zero (and
not greater than 125)
"""
try:
action = self.expand_action()
except Exception as exc:
return TaskError("CmdAction Error creating command string", exc)
# spawn task process
process = subprocess.Popen(
action, shell=self.shell,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**self.pkwargs)
output = StringIO()
errput = StringIO()
t_out = Thread(target=self._print_process_output,
args=(process, process.stdout, output, out))
t_err = Thread(target=self._print_process_output,
args=(process, process.stderr, errput, err))
t_out.start()
t_err.start()
t_out.join()
t_err.join()
self.out = output.getvalue()
self.err = errput.getvalue()
self.result = self.out + self.err
# make sure process really terminated
process.wait()
# task error - based on:
# http://www.gnu.org/software/bash/manual/bashref.html#Exit-Status
# it doesnt make so much difference to return as Error or Failed anyway
if process.returncode > 125:
return TaskError("Command error: '%s' returned %s" %
(action,process.returncode))
# task failure
if process.returncode != 0:
return TaskFailed("Command failed: '%s' returned %s" %
(action,process.returncode))
# save stdout in values
if self.save_out:
self.values[self.save_out] = self.out
def expand_action(self):
"""expand action string using task meta informations
@returns (string) - expanded string after substitution
"""
if not self.task:
return self.action
# cant expand keywords if action is a list of strings
if isinstance(self.action, list):
return self.action
subs_dict = {'targets' : " ".join(self.task.targets),
'dependencies': " ".join(self.task.file_dep)}
# just included changed if it is set
if self.task.dep_changed is not None:
subs_dict['changed'] = " ".join(self.task.dep_changed)
# task option parameters
subs_dict.update(self.task.options)
# convert postional parameters from list space-separated string
if self.task.pos_arg:
subs_dict[self.task.pos_arg] = ' '.join(self.task.pos_arg_val)
return self.action % subs_dict
def __str__(self):
return "Cmd: %s" % self._action
def __repr__(self):
return "<CmdAction: '%s'>" % str(self._action)
class Writer(object):
"""write to many streams"""
def __init__(self, *writers):
"""@param writers - file stream like objects"""
self.writers = []
self._isatty = True
for writer in writers:
self.add_writer(writer)
def add_writer(self, stream, isatty=None):
"""adds a stream to the list of writers
@param isatty: (bool) if specified overwrites real isatty from stream
"""
self.writers.append(stream)
isatty = stream.isatty() if (isatty is None) else isatty
self._isatty = self._isatty and isatty
def write(self, text):
"""write 'text' to all streams"""
for stream in self.writers:
stream.write(text)
def flush(self):
"""flush all streams"""
for stream in self.writers:
stream.flush()
def isatty(self):
return self._isatty
class PythonAction(BaseAction):
"""Python action. Execute a python callable.
@ivar py_callable: (callable) Python callable
@ivar args: (sequence) Extra arguments to be passed to py_callable
@ivar kwargs: (dict) Extra keyword arguments to be passed to py_callable
@ivar task(Task): reference to task that contains this action
"""
def __init__(self, py_callable, args=None, kwargs=None, task=None):
#pylint: disable=W0231
self.py_callable = py_callable
self.task = task
self.out = None
self.err = None
self.result = None
self.values = {}
if args is None:
self.args = []
else:
self.args = args
if kwargs is None:
self.kwargs = {}
else:
self.kwargs = kwargs
# check valid parameters
if not hasattr(self.py_callable, '__call__'):
msg = "%r PythonAction must be a 'callable' got %r."
raise InvalidTask(msg % (self.task, self.py_callable))
if inspect.isclass(self.py_callable):
msg = "%r PythonAction can not be a class got %r."
raise InvalidTask(msg % (self.task, self.py_callable))
if inspect.isbuiltin(self.py_callable):
msg = "%r PythonAction can not be a built-in got %r."
raise InvalidTask(msg % (self.task, self.py_callable))
if type(self.args) is not tuple and type(self.args) is not list:
msg = "%r args must be a 'tuple' or a 'list'. got '%s'."
raise InvalidTask(msg % (self.task, self.args))
if type(self.kwargs) is not dict:
msg = "%r kwargs must be a 'dict'. got '%s'"
raise InvalidTask(msg % (self.task, self.kwargs))
def _prepare_kwargs(self):
return BaseAction._prepare_kwargs(self.task, self.py_callable,
self.args, self.kwargs)
def execute(self, out=None, err=None):
"""Execute command action
both stdout and stderr from the command are captured and saved
on self.out/err. Real time output is controlled by parameters
@param out: None - no real time output
a file like object (has write method)
@param err: idem
@return failure: see CmdAction.execute
"""
# set std stream
old_stdout = sys.stdout
output = StringIO()
out_writer = Writer()
# capture output but preserve isatty() from original stream
out_writer.add_writer(output, old_stdout.isatty())
if out:
out_writer.add_writer(out)
sys.stdout = out_writer
old_stderr = sys.stderr
errput = StringIO()
err_writer = Writer()
err_writer.add_writer(errput, old_stderr.isatty())
if err:
err_writer.add_writer(err)
sys.stderr = err_writer
kwargs = self._prepare_kwargs()
# execute action / callable
try:
returned_value = self.py_callable(*self.args, **kwargs)
except Exception as exception:
return TaskError("PythonAction Error", exception)
finally:
# restore std streams /log captured streams
sys.stdout = old_stdout
sys.stderr = old_stderr
self.out = output.getvalue()
self.err = errput.getvalue()
# if callable returns false. Task failed
if returned_value is False:
return TaskFailed("Python Task failed: '%s' returned %s" %
(self.py_callable, returned_value))
elif returned_value is True or returned_value is None:
pass
elif isinstance(returned_value, six.string_types):
self.result = returned_value
elif isinstance(returned_value, dict):
self.values = returned_value
self.result = returned_value
else:
return TaskError("Python Task error: '%s'. It must return:\n"
"False for failed task.\n"
"True, None, string or dict for successful task\n"
"returned %s (%s)" %
(self.py_callable, returned_value,
type(returned_value)))
def __str__(self):
# get object description excluding runtime memory address
return "Python: %s"% str(self.py_callable)[1:].split(' at ')[0]
def __repr__(self):
return "<PythonAction: '%s'>"% (repr(self.py_callable))
def create_action(action, task_ref):
"""
Create action using proper constructor based on the parameter type
@param action: Action to be created
@type action: L{BaseAction} subclass object, str, tuple or callable
@raise InvalidTask: If action parameter type isn't valid
"""
if isinstance(action, BaseAction):
action.task = task_ref
return action
if isinstance(action, six.string_types):
return CmdAction(action, task_ref, shell=True)
if isinstance(action, list):
return CmdAction(action, task_ref, shell=False)
if isinstance(action, tuple):
if len(action) > 3:
msg = "Task '%s': invalid 'actions' tuple length. got:%r %s"
raise InvalidTask(msg % (task_ref.name, action, type(action)))
py_callable, args, kwargs = (list(action) + [None]*(3-len(action)))
return PythonAction(py_callable, args, kwargs, task_ref)
if hasattr(action, '__call__'):
return PythonAction(action, task=task_ref)
msg = "Task '%s': invalid 'actions' type. got:%r %s"
raise InvalidTask(msg % (task_ref.name, action, type(action)))
|
|
#!/usr/bin/env python
import re
import struct
import logging
try:
import hashlib as md5
except ImportError:
import md5
try:
from Crypto.Cipher import ARC4
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
except ImportError:
AES = SHA256 = None
from . import arcfour as ARC4
from .psparser import PSEOF
from .psparser import literal_name
from .psparser import LIT
from .psparser import KWD
from .psparser import STRICT
from .pdftypes import PDFException
from .pdftypes import PDFTypeError
from .pdftypes import PDFStream
from .pdftypes import PDFObjectNotFound
from .pdftypes import decipher_all
from .pdftypes import int_value
from .pdftypes import bytes_value
from .pdftypes import list_value
from .pdftypes import dict_value
from .pdftypes import stream_value
from .pdfparser import PDFSyntaxError
from .pdfparser import PDFStreamParser
from .utils import choplist
from .utils import nunpack
from .utils import decode_text
## Exceptions
##
class PDFNoValidXRef(PDFSyntaxError):
pass
class PDFNoOutlines(PDFException):
pass
class PDFDestinationNotFound(PDFException):
pass
class PDFEncryptionError(PDFException):
pass
class PDFPasswordIncorrect(PDFEncryptionError):
pass
class PDFTextExtractionNotAllowed(PDFEncryptionError):
pass
# some predefined literals and keywords.
LITERAL_OBJSTM = LIT('ObjStm')
LITERAL_XREF = LIT('XRef')
LITERAL_CATALOG = LIT('Catalog')
## XRefs
##
class PDFBaseXRef:
debug = False
def get_trailer(self):
raise NotImplementedError
def get_objids(self):
return []
# Must return
# (strmid, index, genno)
# or (None, pos, genno)
def get_pos(self, objid):
raise KeyError(objid)
## PDFXRef
##
class PDFXRef(PDFBaseXRef):
def __init__(self):
self.offsets = {}
self.trailer = {}
return
def __repr__(self):
return '<PDFXRef: offsets=%r>' % (self.offsets.keys())
def load(self, parser):
while 1:
try:
(pos, line) = parser.nextline()
if not line.strip():
continue
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
if not line:
raise PDFNoValidXRef('Premature eof: %r' % parser)
if line.startswith(b'trailer'):
parser.seek(pos)
break
f = line.strip().split(b' ')
if len(f) != 2:
raise PDFNoValidXRef('Trailer not found: %r: line=%r' % (parser, line))
try:
(start, nobjs) = map(int, f)
except ValueError:
raise PDFNoValidXRef('Invalid line: %r: line=%r' % (parser, line))
for objid in range(start, start+nobjs):
try:
(_, line) = parser.nextline()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
f = line.strip().split(b' ')
if len(f) != 3:
raise PDFNoValidXRef('Invalid XRef format: %r, line=%r' % (parser, line))
(pos, genno, use) = f
if use != b'n':
continue
self.offsets[objid] = (None, int(pos), int(genno))
if self.debug: logging.info('xref objects: %r' % self.offsets)
self.load_trailer(parser)
return
KEYWORD_TRAILER = KWD(b'trailer')
def load_trailer(self, parser):
try:
(_, kwd) = parser.nexttoken()
assert kwd is self.KEYWORD_TRAILER
(_, dic) = parser.nextobject()
except PSEOF:
x = parser.pop(1)
if not x:
raise PDFNoValidXRef('Unexpected EOF - file corrupted')
(_, dic) = x[0]
self.trailer.update(dict_value(dic))
return
def get_trailer(self):
return self.trailer
def get_objids(self):
return self.offsets.keys()
def get_pos(self, objid):
try:
return self.offsets[objid]
except KeyError:
raise
## PDFXRefFallback
##
class PDFXRefFallback(PDFXRef):
def __repr__(self):
return '<PDFXRefFallback: offsets=%r>' % (self.offsets.keys())
PDFOBJ_CUE = re.compile(br'^(\d+)\s+(\d+)\s+obj\b')
def load(self, parser):
parser.seek(0)
while 1:
try:
(pos, line) = parser.nextline()
except PSEOF:
break
if line.startswith(b'trailer'):
parser.seek(pos)
self.load_trailer(parser)
if self.debug: logging.info('trailer: %r' % self.get_trailer())
break
m = self.PDFOBJ_CUE.match(line)
if not m:
continue
(objid, genno) = m.groups()
objid = int(objid)
genno = int(genno)
self.offsets[objid] = (None, pos, genno)
# expand ObjStm.
parser.seek(pos)
(_, obj) = parser.nextobject()
if isinstance(obj, PDFStream) and obj.get('Type') is LITERAL_OBJSTM:
stream = stream_value(obj)
try:
n = stream['N']
except KeyError:
if STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
parser1 = PDFStreamParser(stream.get_data())
objs = []
try:
while 1:
(_, obj) = parser1.nextobject()
objs.append(obj)
except PSEOF:
pass
n = min(n, len(objs)//2)
for index in range(n):
objid1 = objs[index*2]
self.offsets[objid1] = (objid, index, 0)
return
## PDFXRefStream
##
class PDFXRefStream(PDFBaseXRef):
debug = False
def __init__(self):
self.data = None
self.entlen = None
self.fl1 = self.fl2 = self.fl3 = None
self.ranges = []
return
def __repr__(self):
return '<PDFXRefStream: ranges=%r>' % (self.ranges)
def load(self, parser):
(_, objid) = parser.nexttoken() # ignored
(_, genno) = parser.nexttoken() # ignored
(_, kwd) = parser.nexttoken()
(_, stream) = parser.nextobject()
if not isinstance(stream, PDFStream) or stream['Type'] is not LITERAL_XREF:
raise PDFNoValidXRef('Invalid PDF stream spec.')
size = stream['Size']
index_array = stream.get('Index', (0, size))
if len(index_array) % 2 != 0:
raise PDFSyntaxError('Invalid index number')
self.ranges.extend(choplist(2, index_array))
(self.fl1, self.fl2, self.fl3) = stream['W']
self.data = stream.get_data()
self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.attrs
if self.debug:
logging.info('xref stream: objid=%s, fields=%d,%d,%d' %
(', '.join(map(repr, self.ranges)),
self.fl1, self.fl2, self.fl3))
return
def get_trailer(self):
return self.trailer
def get_objids(self):
for (start, nobjs) in self.ranges:
for i in range(nobjs):
offset = self.entlen * i
ent = self.data[offset:offset+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
if f1 == 1 or f1 == 2:
yield start+i
return
def get_pos(self, objid):
index = 0
for (start, nobjs) in self.ranges:
if start <= objid and objid < start+nobjs:
index += objid - start
break
else:
index += nobjs
else:
raise KeyError(objid)
offset = self.entlen * index
ent = self.data[offset:offset+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
f2 = nunpack(ent[self.fl1:self.fl1+self.fl2])
f3 = nunpack(ent[self.fl1+self.fl2:])
if f1 == 1:
return (None, f2, f3)
elif f1 == 2:
return (f2, f3, 0)
else:
# this is a free object
raise KeyError(objid)
## PDFSecurityHandler
##
class PDFStandardSecurityHandler:
PASSWORD_PADDING = (b'(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08'
b'..\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz')
supported_revisions = (2, 3)
def __init__(self, docid, param, password=b''):
self.docid = docid
self.param = param
self.password = password
self.init()
return
def init(self):
self.init_params()
if self.r not in self.supported_revisions:
raise PDFEncryptionError('Unsupported revision: param=%r' % self.param)
self.init_key()
return
def init_params(self):
self.v = int_value(self.param.get('V', 0))
self.r = int_value(self.param['R'])
self.p = int_value(self.param['P'])
self.o = bytes_value(self.param['O'])
self.u = bytes_value(self.param['U'])
self.length = int_value(self.param.get('Length', 40))
return
def init_key(self):
self.key = self.authenticate(self.password)
if self.key is None:
raise PDFPasswordIncorrect
return
def is_printable(self):
return bool(self.p & 4)
def is_modifiable(self):
return bool(self.p & 8)
def is_extractable(self):
return bool(self.p & 16)
def compute_u(self, key):
if self.r == 2:
# Algorithm 3.4
return ARC4.new(key).encrypt(self.PASSWORD_PADDING) # 2
else:
# Algorithm 3.5
hash = md5.md5(self.PASSWORD_PADDING) # 2
hash.update(self.docid[0]) # 3
result = ARC4.new(key).encrypt(hash.digest()) # 4
for i in range(1, 20): # 5
k = bytes( (c ^ i) for c in key )
result = ARC4.new(k).encrypt(result)
result += result # 6
return result
def compute_encryption_key(self, password):
# Algorithm 3.2
password = (password + self.PASSWORD_PADDING)[:32] # 1
hash = md5.md5(password) # 2
hash.update(self.o) # 3
hash.update(struct.pack('<l', self.p)) # 4
hash.update(self.docid[0]) # 5
if self.r >= 4:
if not self.encrypt_metadata:
hash.update(b'\xff\xff\xff\xff')
result = hash.digest()
n = 5
if self.r >= 3:
n = self.length // 8
for _ in range(50):
result = md5.md5(result[:n]).digest()
return result[:n]
def authenticate(self, password):
key = self.authenticate_user_password(password)
if key is None:
key = self.authenticate_owner_password(password)
return key
def authenticate_user_password(self, password):
key = self.compute_encryption_key(password)
if self.verify_encryption_key(key):
return key
else:
return None
def verify_encryption_key(self, key):
# Algorithm 3.6
u = self.compute_u(key)
if self.r == 2:
return u == self.u
return u[:16] == self.u[:16]
def authenticate_owner_password(self, password):
# Algorithm 3.7
password = (password + self.PASSWORD_PADDING)[:32]
hash = md5.md5(password)
if self.r >= 3:
for _ in range(50):
hash = md5.md5(hash.digest())
n = 5
if self.r >= 3:
n = self.length // 8
key = hash.digest()[:n]
if self.r == 2:
user_password = ARC4.new(key).decrypt(self.o)
else:
user_password = self.o
for i in range(19, -1, -1):
k = bytes( (c ^ i) for c in key )
user_password = ARC4.new(k).decrypt(user_password)
return self.authenticate_user_password(user_password)
def decrypt(self, objid, genno, data, attrs=None):
return self.decrypt_rc4(objid, genno, data)
def decrypt_rc4(self, objid, genno, data):
key = self.key + struct.pack('<L', objid)[:3] + struct.pack('<L', genno)[:2]
hash = md5.md5(key)
key = hash.digest()[:min(len(key), 16)]
return ARC4.new(key).decrypt(data)
class PDFStandardSecurityHandlerV4(PDFStandardSecurityHandler):
supported_revisions = (4,)
def init_params(self):
super(PDFStandardSecurityHandlerV4, self).init_params()
self.length = 128
self.cf = dict_value(self.param.get('CF'))
self.stmf = literal_name(self.param['StmF'])
self.strf = literal_name(self.param['StrF'])
self.encrypt_metadata = bool(self.param.get('EncryptMetadata', True))
if self.stmf != self.strf:
raise PDFEncryptionError('Unsupported crypt filter: param=%r' % self.param)
self.cfm = {}
for k, v in self.cf.items():
f = self.get_cfm(literal_name(v['CFM']))
if f is None:
raise PDFEncryptionError('Unknown crypt filter method: param=%r' % self.param)
self.cfm[k] = f
self.cfm['Identity'] = self.decrypt_identity
if self.strf not in self.cfm:
raise PDFEncryptionError('Undefined crypt filter: param=%r' % self.param)
return
def get_cfm(self, name):
if name == 'V2':
return self.decrypt_rc4
elif name == 'AESV2':
return self.decrypt_aes128
else:
return None
def decrypt(self, objid, genno, data, attrs=None, name=None):
if not self.encrypt_metadata and attrs is not None:
t = attrs.get('Type')
if t is not None and literal_name(t) == 'Metadata':
return data
if name is None:
name = self.strf
return self.cfm[name](objid, genno, data)
def decrypt_identity(self, objid, genno, data):
return data
def decrypt_aes128(self, objid, genno, data):
key = self.key + struct.pack('<L', objid)[:3] + struct.pack('<L', genno)[:2] + b'sAlT'
hash = md5.md5(key)
key = hash.digest()[:min(len(key), 16)]
return AES.new(key, mode=AES.MODE_CBC, IV=data[:16]).decrypt(data[16:])
class PDFStandardSecurityHandlerV5(PDFStandardSecurityHandlerV4):
supported_revisions = (5,)
def init_params(self):
super(PDFStandardSecurityHandlerV5, self).init_params()
self.length = 256
self.oe = bytes_value(self.param['OE'])
self.ue = bytes_value(self.param['UE'])
self.o_hash = self.o[:32]
self.o_validation_salt = self.o[32:40]
self.o_key_salt = self.o[40:]
self.u_hash = self.u[:32]
self.u_validation_salt = self.u[32:40]
self.u_key_salt = self.u[40:]
return
def get_cfm(self, name):
if name == 'AESV3':
return self.decrypt_aes256
else:
return None
def authenticate(self, password):
password = password[:127]
hash = SHA256.new(password)
hash.update(self.o_validation_salt)
hash.update(self.u)
if hash.digest() == self.o_hash:
hash = SHA256.new(password)
hash.update(self.o_key_salt)
hash.update(self.u)
return AES.new(hash.digest(), mode=AES.MODE_CBC, IV=b'\x00' * 16).decrypt(self.oe)
hash = SHA256.new(password)
hash.update(self.u_validation_salt)
if hash.digest() == self.u_hash:
hash = SHA256.new(password)
hash.update(self.u_key_salt)
return AES.new(hash.digest(), mode=AES.MODE_CBC, IV=b'\x00' * 16).decrypt(self.ue)
return None
def decrypt_aes256(self, objid, genno, data):
return AES.new(self.key, mode=AES.MODE_CBC, IV=data[:16]).decrypt(data[16:])
## PDFDocument
##
class PDFDocument:
"""PDFDocument object represents a PDF document.
Since a PDF file can be very big, normally it is not loaded at
once. So PDF document has to cooperate with a PDF parser in order to
dynamically import the data as processing goes.
Typical usage:
doc = PDFDocument(parser, password)
obj = doc.getobj(objid)
"""
security_handler_registry = {
1: PDFStandardSecurityHandler,
2: PDFStandardSecurityHandler,
}
if AES is not None:
security_handler_registry[4] = PDFStandardSecurityHandlerV4
if SHA256 is not None:
security_handler_registry[5] = PDFStandardSecurityHandlerV5
debug = 0
def __init__(self, parser, password=b'', caching=True, fallback=True):
"Set the document to use a given PDFParser object."
self.caching = caching
self.xrefs = []
self.info = []
self.catalog = None
self.encryption = None
self.decipher = None
self._parser = None
self._cached_objs = {}
self._parsed_objs = {}
self._parser = parser
self._parser.set_document(self)
self.is_printable = self.is_modifiable = self.is_extractable = True
# Retrieve the information of each header that was appended
# (maybe multiple times) at the end of the document.
try:
pos = self.find_xref(parser)
self.read_xref_from(parser, pos, self.xrefs)
except PDFNoValidXRef:
fallback = True
if fallback:
parser.fallback = True
xref = PDFXRefFallback()
xref.load(parser)
self.xrefs.append(xref)
for xref in self.xrefs:
trailer = xref.get_trailer()
if not trailer:
continue
# If there's an encryption info, remember it.
if 'Encrypt' in trailer:
#assert not self.encryption
self.encryption = (list_value(trailer['ID']),
dict_value(trailer['Encrypt']))
self._initialize_password(password)
if 'Info' in trailer:
self.info.append(dict_value(trailer['Info']))
if 'Root' in trailer:
# Every PDF file must have exactly one /Root dictionary.
self.catalog = dict_value(trailer['Root'])
break
else:
raise PDFSyntaxError('No /Root object! - Is this really a PDF?')
if self.catalog.get('Type') is not LITERAL_CATALOG:
if STRICT:
raise PDFSyntaxError('Catalog not found!')
return
# _initialize_password(password=b'')
# Perform the initialization with a given password.
def _initialize_password(self, password=b''):
(docid, param) = self.encryption
if literal_name(param.get('Filter')) != 'Standard':
raise PDFEncryptionError('Unknown filter: param=%r' % param)
v = int_value(param.get('V', 0))
factory = self.security_handler_registry.get(v)
if factory is None:
raise PDFEncryptionError('Unknown algorithm: param=%r' % param)
handler = factory(docid, param, password)
self.decipher = handler.decrypt
self.is_printable = handler.is_printable()
self.is_modifiable = handler.is_modifiable()
self.is_extractable = handler.is_extractable()
self._parser.fallback = False # need to read streams with exact length
return
def _getobj_objstm(self, stream, index, objid):
if stream.objid in self._parsed_objs:
(objs, n) = self._parsed_objs[stream.objid]
else:
(objs, n) = self._get_objects(stream)
if self.caching:
self._parsed_objs[stream.objid] = (objs, n)
i = n*2+index
try:
obj = objs[i]
except IndexError:
raise PDFSyntaxError('index too big: %r' % index)
return obj
def _get_objects(self, stream):
if stream.get('Type') is not LITERAL_OBJSTM:
if STRICT:
raise PDFSyntaxError('Not a stream object: %r' % stream)
try:
n = stream['N']
except KeyError:
if STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
parser = PDFStreamParser(stream.get_data())
parser.set_document(self)
objs = []
try:
while 1:
(_, obj) = parser.nextobject()
objs.append(obj)
except PSEOF:
pass
return (objs, n)
KEYWORD_OBJ = KWD(b'obj')
def _getobj_parse(self, pos, objid):
self._parser.seek(pos)
(_, objid1) = self._parser.nexttoken() # objid
if objid1 != objid:
raise PDFSyntaxError('objid mismatch: %r=%r' % (objid1, objid))
(_, genno) = self._parser.nexttoken() # genno
(_, kwd) = self._parser.nexttoken()
if kwd is not self.KEYWORD_OBJ:
raise PDFSyntaxError('Invalid object spec: offset=%r' % pos)
(_, obj) = self._parser.nextobject()
return obj
# can raise PDFObjectNotFound
def getobj(self, objid):
assert objid != 0
if not self.xrefs:
raise PDFException('PDFDocument is not initialized')
if self.debug:
logging.debug('getobj: objid=%r' % objid)
if objid in self._cached_objs:
(obj, genno) = self._cached_objs[objid]
else:
for xref in self.xrefs:
try:
(strmid, index, genno) = xref.get_pos(objid)
except KeyError:
continue
try:
if strmid is not None:
stream = stream_value(self.getobj(strmid))
obj = self._getobj_objstm(stream, index, objid)
else:
obj = self._getobj_parse(index, objid)
if self.decipher:
obj = decipher_all(self.decipher, objid, genno, obj)
if isinstance(obj, PDFStream):
obj.set_objid(objid, genno)
break
except (PSEOF, PDFSyntaxError):
continue
else:
raise PDFObjectNotFound(objid)
if self.debug:
logging.debug('register: objid=%r: %r' % (objid, obj))
if self.caching:
self._cached_objs[objid] = (obj, genno)
return obj
def get_outlines(self):
if 'Outlines' not in self.catalog:
raise PDFNoOutlines
def search(entry, level):
entry = dict_value(entry)
if 'Title' in entry:
if 'A' in entry or 'Dest' in entry:
title = decode_text(bytes_value(entry['Title']))
dest = entry.get('Dest')
action = entry.get('A')
se = entry.get('SE')
yield (level, title, dest, action, se)
if 'First' in entry and 'Last' in entry:
for x in search(entry['First'], level+1):
yield x
if 'Next' in entry:
for x in search(entry['Next'], level):
yield x
return
return search(self.catalog['Outlines'], 0)
def lookup_name(self, cat, key):
try:
names = dict_value(self.catalog['Names'])
except (PDFTypeError, KeyError):
raise KeyError((cat, key))
# may raise KeyError
d0 = dict_value(names[cat])
def lookup(d):
if 'Limits' in d:
(k1, k2) = list_value(d['Limits'])
if key < k1 or k2 < key:
return None
if 'Names' in d:
objs = list_value(d['Names'])
names = dict(choplist(2, objs))
return names[key]
if 'Kids' in d:
for c in list_value(d['Kids']):
v = lookup(dict_value(c))
if v:
return v
raise KeyError((cat, key))
return lookup(d0)
def get_dest(self, name):
try:
# PDF-1.2 or later
obj = self.lookup_name('Dests', name)
except KeyError:
# PDF-1.1 or prior
if 'Dests' not in self.catalog:
raise PDFDestinationNotFound(name)
d0 = dict_value(self.catalog['Dests'])
if name not in d0:
raise PDFDestinationNotFound(name)
obj = d0[name]
return obj
# find_xref
def find_xref(self, parser):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in parser.revreadlines():
line = line.strip()
if self.debug:
logging.debug('find_xref: %r' % line)
if line == b'startxref':
break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('xref found: pos=%r' % prev)
return int(prev)
# read xref table
def read_xref_from(self, parser, start, xrefs):
"""Reads XRefs from the given location."""
parser.seek(start)
parser.reset()
try:
(pos, token) = parser.nexttoken()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF')
if self.debug:
logging.info('read_xref_from: start=%d, token=%r' % (start, token))
if isinstance(token, int):
# XRefStream: PDF-1.5
parser.seek(pos)
parser.reset()
xref = PDFXRefStream()
xref.load(parser)
else:
if token is parser.KEYWORD_XREF:
parser.nextline()
xref = PDFXRef()
xref.load(parser)
xrefs.append(xref)
trailer = xref.get_trailer()
if self.debug:
logging.info('trailer: %r' % trailer)
if 'XRefStm' in trailer:
pos = int_value(trailer['XRefStm'])
self.read_xref_from(parser, pos, xrefs)
if 'Prev' in trailer:
# find previous xref
pos = int_value(trailer['Prev'])
self.read_xref_from(parser, pos, xrefs)
return
|
|
"""
Technical Analysis Factors
--------------------------
"""
from __future__ import division
from numpy import (
abs,
average,
clip,
diff,
dstack,
inf,
)
from numexpr import evaluate
from zipline.pipeline.data import EquityPricing
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nanmin,
)
from zipline.utils.numpy_utils import rolling_window
from .basic import exponential_weights
from .basic import ( # noqa reexport
# These are re-exported here for backwards compatibility with the old
# definition site.
LinearWeightedMovingAverage,
MaxDrawdown,
SimpleMovingAverage,
VWAP,
WeightedAverageValue
)
class RSI(SingleInputMixin, CustomFactor):
"""
Relative Strength Index
**Default Inputs**: :data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length**: 15
"""
window_length = 15
inputs = (EquityPricing.close,)
window_safe = True
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (EquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator
**Defaults Inputs:** :data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.high`
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
""" # noqa
inputs = (EquityPricing.low, EquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (EquityPricing.close, EquityPricing.low, EquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
""" # noqa
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (EquityPricing.high, EquityPricing.low, EquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
EquityPricing.high,
EquityPricing.low,
EquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (EquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
# Convenience aliases.
MACDSignal = MovingAverageConvergenceDivergenceSignal
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is selfserve-make-lists.py - process ML queue and make new lists.
Runbook:
- Should run every 2-4 hours as a cron job, owned by apmail
- only accepts requests >= 12 hours old (grace period)
- validates the JSON request (valid fqdn, list name, muopts, mods etc)
- checks if the fqdn (foo.apache.org) has been set up in ezmlm,
if not, create it and update qmail too.
- runs makelist-apache.sh with the supplied args (after whitelisting)
- notifies infra and $project about new list
- on error, notifies infra with what/why, halts queue
"""
import sys
import json
import os
import re
import urllib3
urllib3.disable_warnings() # Mute urllib3 on fbsd - it gets loud!
import requests
import subprocess
import smtplib
import time
import email.utils
execfile("common.conf")
# Define some vars
QUEUE_URL = "https://selfserve.apache.org/cgi-bin/queue.cgi"
DEBUG = False # Set to true to not actually make lists, just process
# queue and remove items after fake-processing them.
ROOT_DOMAIN = "apache.org" # Our root domain, don't accept if no match.
INFRAML = '[email protected]' # Infra ML
# Valid moderation settings
MUDICT = {
"mu": "Allow subscribers to post, moderate all others",
"Mu": "Allow subscribers to post, reject all others",
"mU": "Moderate all posts"
}
# I thought of using ezt here, but it's just one message, so...
ML_CREATED_TMPL = """
As requested by %s, the following mailing list has been created:
List name: %s@%s
Moderators: %s
Settings: %s
Reply-To: %s
%s
---
The list will start accepting mail in 60 minutes from now. If it's a public
list, it will appear on https://lists.apache.org/ within a few minutes of
the first post to it.
"""
def sendemail(rcpt, subject, message):
"""Simple email helper function"""
sender = "ASF Self-Service Platform <[email protected]>"
receivers = [rcpt]
if isinstance(rcpt, list):
receivers = rcpt
# Weed out infra, we're adding that explicitly to every email.
receivers = [k for k in receivers if k != INFRAML]
receivers.append("ASF Infrastructure <%s>"% INFRAML)
msgid = email.utils.make_msgid()
msg = """From: %s
Message-ID: %s
To: %s
Reply-To: ASF Infrastructure <[email protected]>
Subject: %s
%s
With regards,
ASF Self-Service Platform, https://selfserve.apache.org
For inquiries, please contact: [email protected]
""" % (sender, msgid, ", ".join(receivers), subject, message)
msg = msg.encode('ascii', errors='replace')
smtpObj = smtplib.SMTP("mail.apache.org:2025")
smtpObj.sendmail(sender, receivers, msg)
def process_request(entry):
# Gather data
eid = entry['id']
requester = entry['requester']
fqdn = entry['domain']
project = fqdn.split('.')[0] # get first part before a dot
listname = entry['list']
mods = ",".join(entry['mods'])
muopts = entry['muopts']
private = entry.get('private', False)
trailer = entry.get('trailer', None)
# This list should hopefully remain empty!
errors = []
# FQDN must be foo.apache.org or just apache.org, reject all others
if not re.match(r"^([-.a-z0-9]+\.)?apache\.org$", fqdn):
errors.append("Invalid FQDN")
# Project must be valid [a-z0-9] name, but can be omitted
if re.search(r"[^a-z0-9]", project):
errors.append("Invalid apache project requested")
# Listname must exist and be valid [a-z0-9](-[a-z0-9]) name
# like foo or foo-chat
if not listname or not re.match(r"^[a-z0-9]+(?:-[a-z0-9]+)?$", listname):
errors.append("Invalid or missing list name")
# No bad chars in the mod addresses
if re.search(r"[&;<>!\"\s\?\\]", mods) or len(mods) < 6:
errors.append("Invalid or missing moderator list")
# muopts must be one of three recognized options
if muopts not in ['mu', 'Mu', 'mU']:
errors.append("Invalid muopts. Must be mu, Mu or mU")
# Mailing list can't already exist
if os.path.exists("LISTS_DIR/%s/%s" % (fqdn, listname)):
errors.append("This mailing list appears to already exist!")
if errors:
return errors
# Make the list if all the above proved to be valid.
print("Preparing to create %s@%s..." % (listname, fqdn))
# Make sure parent fqdn exists, and if not, make it.
# NOTE: This does NOT set up DNS entries for new podlings.
# We'll have to that manually still, and wait for an automated
# solution later on.
if not os.path.exists("LISTS_DIR/%s" % fqdn):
print(" - %s seems to be a new FQDN, setting up parent dir first" % fqdn)
try:
if not DEBUG:
os.mkdir("LISTS_DIR/%s" % fqdn)
os.chmod("LISTS_DIR/%s" % fqdn, 0o755)
print(" - adding %s to rcpthosts" % fqdn)
if not DEBUG:
open("/var/qmail/control/rcpthosts", "a").write("%s\n" % fqdn)
print(" - adding %s to virtualdomains" % fqdn)
if not DEBUG:
open("/var/qmail/control/virtualdomains", "a").write("%s:apmail-%s\n" % (fqdn, project))
print(" - all done, you may have to run the following (I can't do that!): pkill -HUP qmail-send")
except Exception as err:
reason = "Could not set up ezmlm/qmail, aborting: %s" % err
print(reason)
# Just bail out now
return [ reason ]
# Now construct the args for makelist-apache.sh:
# Basic bash binary and script name
args = ['/usr/local/bin/bash', 'BIN_DIR/makelist-apache.sh']
# muopts (-mu, -Mu or -mU)
args.append('-' + muopts)
print(" - muopts set to %s: %s" % (muopts, MUDICT[muopts]))
# Trailer?
if trailer and trailer == 't':
args.append('-t')
# Moderators (-m [email protected],[email protected])
args.extend(['-m', mods])
print(" - moderators: %s" % mods)
# -v listname, if not [email protected]
if project and fqdn != ROOT_DOMAIN:
args.extend(['-v', project])
print(" - this is a 3rd level domain (%s), adding -v flag" % fqdn)
else:
print(" - this is a 2nd level domain (%s), not adding -v" % fqdn)
# Make sure private@ and security@ are always private
if listname in ['private', 'security']:
print(" - this is %s@, forcing private flag" % listname)
private = True
# Prefix list name with . if private
ptxt = "This list is public."
if private:
args.append('.' + listname)
ptxt= "This list is private."
print(" - prefixing list name wih . to signify privacy")
else:
args.append(listname)
print(" - this is a public list")
# If commits|cvs|svn|notif*|issue*@, then note reply-to is dev@
rto = "%s@%s" % (listname, fqdn)
if re.match(r"(commits|cvs|svn|notif.*|issue.*)$", listname):
rto = "dev@%s (forced)" % fqdn
# Run makelist-apache.sh with args
print("Going to create %s@%s now..." % (listname, fqdn))
if not DEBUG:
print("Running: %s" % " ".join(args))
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
reason = "makelist returned an error: %s" % err.output
print("Bork: %s" % reason)
# Just bail out now
return [ reason ]
else:
print("[DBG] Would run: %s" % " ".join(args))
notify = [ "%[email protected]" % requester ]
# If there exists a private list already, notify it
if listname != "private" and os.path.exists("LISTS_DIR/%s/private" % fqdn):
notify.append('private@%s' % fqdn)
# Notify [email protected] of all new security list.
if listname == 'security':
notify.append('[email protected]')
sendemail(notify,
"[NOTICE] List created: %s@%s" % (listname, fqdn),
ML_CREATED_TMPL % (requester, listname, fqdn, mods,
MUDICT[muopts], rto, ptxt))
print("Done, removing %s from queue" % eid)
requests.get("%s?rm=%s" % (QUEUE_URL, eid))
return [ ]
def main():
# Fetch queue
rv = requests.get(QUEUE_URL)
queue = rv.json()
# Keep score of queue size and time
processed = 0
now = int(time.time())
# Go through queue
for entry in queue:
# We only handle mailing lists!
if entry.get('type') != "mailinglist":
continue
# Make sure this request is old enough, >= 12h
if entry['requested'] > (now - (3600 * 12)):
print("skipping %s, request too new!" % entry['id'])
continue
processed += 1
# Do all the work for this ML requeset
errors = process_request(entry)
# If we found a buggo, ABORT!
if errors:
print("Request was not valid: \n%s" % ", ".join(errors))
print("Notifying %s" % INFRAML)
sendemail(INFRAML, "Creation of mailing list FAILED!", "As a precaution, the queue has been suspended. Will retry in 4 hours!\nOutput from program was: %s\n\nJSON input was: \n%s\n" % ("\n".join(errors), json.dumps(entry)))
break
print("All done for now, processed %u list requests" % processed)
if __name__ == '__main__':
main()
|
|
# source: http://stackoverflow.com/questions/2758159/how-to-embed-a-python-interpreter-in-a-pyqt-widget
import sys, os, re
import traceback, platform
from PyQt4 import QtCore
from PyQt4 import QtGui
from electrum_arg import util
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class Console(QtGui.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtGui.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QtGui.QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QtGui.QFont(MONOSPACE_FONT, 10, QtGui.QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run':self.run_script})
self.set_json(False)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
result = eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QtGui.QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = unicode(doc.findBlockByLineNumber(doc.lineCount() - 1).text())
self.moveCursor(QtGui.QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QtGui.QTextCursor.Left, QtGui.QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QtGui.QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = map(lambda x: x.split('.')[-1], completions)
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
l = self.completions_end - self.completions_pos
for x in range(l): c.deleteChar()
self.moveCursor(QtGui.QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QtGui.QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QtGui.QTextCursor.Right)
def register_command(self, c, func):
methods = { c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
if type(self.namespace.get(command)) == type(lambda:None):
self.appendPlainText("'%s' is a function. Type '%s()' to use it in the Python console."%(command, command))
self.newPrompt()
return
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
result = eval(command, self.namespace, self.namespace)
if result != None:
if self.is_json:
util.print_msg(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
exec command in self.namespace
except SystemExit:
self.close()
except Exception:
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3,2,1,-1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(' |\(|\)',cmd)[-1]
beginning = cmd[0:-len(lastword)]
path = lastword.split('.')
ns = self.namespace.keys()
if len(path) == 1:
ns = ns
prefix = ''
else:
obj = self.namespace.get(path[0])
prefix = path[0] + '.'
ns = dir(obj)
completions = []
for x in ns:
if x[0] == '_':continue
xx = prefix + x
if xx.startswith(lastword):
completions.append(xx)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p)>len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1' : app, 'myVar2' : 1234})
console.show();
sys.exit(app.exec_())
|
|
import os
import shutil
import tempfile
import warnings
import numpy as np
from vtk.numpy_interface import dataset_adapter as dsa
# VTK imports:
from vtk.util import numpy_support as nps
import PVGeo
from base import TestBase
# Functionality to test:
from PVGeo.ubc import (
GravObsReader,
TensorMeshAppender,
TensorMeshReader,
TopoMeshAppender,
TopoReader,
WriteImageDataToUBC,
WriteRectilinearGridToUBC,
)
discretize_available = False
try:
with PVGeo._helpers.HiddenPrints():
import discretize
except ImportError:
warnings.warn('`discretize` is NOT available. Be sure to install it.')
else:
discretize_available = True
from PVGeo.ubc import OcTreeAppender, OcTreeReader
RTOL = 0.000001
###############################################################################
class ubcMeshTesterBase(TestBase):
def _check_shape(self, grid):
self.assertEqual(grid.GetExtent(), self.extent)
self.assertEqual(
grid.GetNumberOfCells(), self.extent[1] * self.extent[3] * self.extent[5]
)
return
def _check_data(self, grid, data):
arr = nps.vtk_to_numpy(grid.GetCellData().GetArray(0))
self.assertTrue(np.allclose(data, arr, rtol=RTOL))
return
def _check_spatial_reference(self, grid):
bounds = grid.GetBounds()
corner = (bounds[0], bounds[2], bounds[5])
self.assertEqual(corner, self.origin)
return
###############################################################################
class Test3DTensorMesh(ubcMeshTesterBase):
"""
Test the `TensorMeshReader`, `TensorMeshAppender`, `TopoMeshAppender`,
`WriteRectilinearGridToUBC`, and `WriteImageDataToUBC` for 3D data
"""
def _write_mesh(self):
filename = os.path.join(self.test_dir, 'test.msh')
with open(filename, 'w') as f:
f.write('%d %d %d\n' % self.shape)
f.write('%d %d %d\n' % self.origin)
f.write('%s\n' % self.xCells)
f.write('%s\n' % self.yCells)
f.write('%s\n' % self.zCells)
return filename
def _write_model(self, filename='test.mod'):
filename = os.path.join(self.test_dir, filename)
model = np.random.random(self.n)
np.savetxt(filename, model, delimiter=' ', comments='! ')
model = np.reshape(model, self.shape)
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :] # Note it is in Fortran ordering
model = model.flatten()
return filename, model
def _write_model_multi(self, filename='test.fld'):
"""writes a multi component model"""
filename = os.path.join(self.test_dir, filename)
model = np.random.random((self.n, 3))
np.savetxt(filename, model, delimiter=' ', comments='! ')
shp = self.shape
model = np.reshape(model, (shp[0], shp[1], shp[2], 3))
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :, :] # Note it is in Fortran ordering
model = np.reshape(model, (shp[0] * shp[1] * shp[2], 3))
return filename, model
def setUp(self):
TestBase.setUp(self)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.origin = (-350, -400, 0)
self.xCells = '200 100 50 20*50.0 50 100 200'
self.yCells = '200 100 50 21*50.0 50 100 200'
self.zCells = '20*25.0 50 100 200'
self.shape = (26, 27, 23)
self.n = self.shape[0] * self.shape[1] * self.shape[2]
self.extent = (0, self.shape[0], 0, self.shape[1], 0, self.shape[2])
self.data_name = 'foo'
##### Now generate output for testing ####
# Produce data and write out files:
self.meshname = self._write_mesh()
self.modname, self.data = self._write_model()
self.modname_multi, self.data_multi = self._write_model_multi()
# Set up the reader:
reader = TensorMeshReader()
reader.set_mesh_filename(self.meshname)
# Get and test output:
reader.Update() # Read only mesh upfront
reader.add_model_file_name(self.modname)
reader.set_data_name(self.data_name)
reader.Update() # Read models upfront
self.GRID = reader.GetOutput()
#### Now read mesh with multi component data
# Set up the reader:
reader = TensorMeshReader()
reader.set_mesh_filename(self.meshname)
# Get and test output:
reader.Update() # Read only mesh upfront
reader.add_model_file_name(self.modname_multi)
reader.set_data_name(self.data_name)
reader.Update() # Read models upfront
self.GRID_MULTI = reader.GetOutput()
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
###########################################
def test_grid_spatial_reference(self):
"""`TensorMeshReader` 3D: Spatial reference"""
self._check_spatial_reference(self.GRID)
self._check_spatial_reference(self.GRID_MULTI)
def test_grid_shape(self):
"""`TensorMeshReader` 3D: Shape of output grid"""
self._check_shape(self.GRID)
self._check_shape(self.GRID_MULTI)
def test_grid_data(self):
"""`TensorMeshReader` 3D: Data fidelity"""
self._check_data(self.GRID, self.data)
self._check_data(self.GRID_MULTI, self.data_multi)
def test_grid_data_name(self):
"""`TensorMeshReader` 3D: Data array name"""
self.assertEqual(self.GRID.GetCellData().GetArrayName(0), self.data_name)
self.assertEqual(self.GRID_MULTI.GetCellData().GetArrayName(0), self.data_name)
def test_model_appender(self):
"""`TensorMeshAppender` 3D: Data array name"""
modname, appdata = self._write_model('testApp.mod')
f = TensorMeshAppender()
f.SetInputDataObject(self.GRID)
f.add_model_file_name(modname)
f.set_data_name('appended')
f.Update()
output = f.GetOutput()
self.assertEqual(output.GetCellData().GetNumberOfArrays(), 2)
self.assertEqual(output.GetCellData().GetArrayName(1), 'appended')
def test_topo_appender(self):
"""`TopoMeshAppender` 3D:Test topography appender"""
indices = np.array(
[
[0, 0, 1],
[0, 1, 1],
[0, 2, 1],
[1, 0, 1],
[1, 1, 1],
[1, 2, 1],
[2, 0, 1],
[2, 1, 1],
[2, 2, 2],
],
dtype=int,
)
filename = os.path.join(self.test_dir, 'disc-topo.txt')
np.savetxt(filename, X=indices, fmt='%d', comments='', header='3 3')
# Create input grid
grid = PVGeo.model_build.CreateTensorMesh(
xcellstr='1.0 1.0 1.0', ycellstr='1.0 1.0 1.0', zcellstr='1.0 1.0 1.0'
).apply()
# run the filter
f = TopoMeshAppender()
f.SetInputDataObject(grid)
f.set_topo_filename(filename)
f.Update()
output = f.GetOutput()
# TODO: check output
self.assertIsNotNone(output)
def test_writer(self):
"""`WriteRectilinearGridToUBC`: Test data integretiy across I/O"""
# Write known data back out using the writer:
writer = WriteRectilinearGridToUBC()
filename = os.path.join(self.test_dir, 'test-writer.msh')
writer.SetFileName(filename)
writer.Write(self.GRID)
# Now read in the data again and compare!
reader = TensorMeshReader()
reader.set_mesh_filename(filename)
modname = os.path.join(self.test_dir, '%s.mod' % self.data_name)
reader.add_model_file_name(modname)
reader.set_data_name(self.data_name)
reader.Update()
test = reader.GetOutput()
# Compare the data
self._check_shape(test)
self._check_spatial_reference(test)
self._check_data(test, self.data)
self.assertEqual(test.GetCellData().GetArrayName(0), self.data_name)
return
def test_chile_example(self):
"""`TensorMeshReader`: Test Chile mesh example"""
meshfile = os.path.join(
os.path.dirname(__file__), 'data/Craig-Chile/craig_chile.msh'
)
modfile = os.path.join(os.path.dirname(__file__), 'data/Craig-Chile/Lpout.mod')
reader = TensorMeshReader()
reader.set_mesh_filename(meshfile)
# Get and test output:
reader.Update() # Read only mesh upfront
reader.add_model_file_name(modfile)
reader.set_data_name('Lpout')
mesh = reader.apply()
self.assertTrue(mesh.n_cells > 0)
self.assertTrue(mesh.n_arrays > 0)
###############################################################################
class Test2DTensorMeshReader(ubcMeshTesterBase):
"""
Test the `TensorMeshReader` and `TensorMeshAppender` for 2D data
"""
def _write_mesh(self):
filename = os.path.join(self.test_dir, 'test.msh')
with open(filename, 'w') as f:
f.write(self.mesh)
return filename
def _write_model(self, filename='test.mod'):
filename = os.path.join(self.test_dir, filename)
model = np.random.random((self.nz, self.nx))
with open(filename, 'w') as f:
f.write('%d %d\n' % (self.nx, self.nz))
for k in range(self.nz):
for i in range(self.nx):
f.write('%.6e ' % model[k, i])
f.write('\n')
f.close()
model = np.reshape(model.flatten(order='F'), self.shape)
model = np.swapaxes(model, 0, 1)
model = np.swapaxes(model, 0, 2)
# Now reverse Z axis
model = model[::-1, :, :] # Note it is in Fortran ordering
model = model.flatten()
return filename, model
def setUp(self):
TestBase.setUp(self)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.mesh = """9
-300.0 -180.0 1
-130.0 1
-110.0 1
-100.0 1
100.0 40
110.0 1
130.0 1
180.0 1
300.0 1
14
-10.0 10.0 5
22.0 4
42.0 5
57.0 3
63.0 1
71.0 1
81.0 1
95.0 1
115.0 1
140.0 1
170.0 1
205.0 1
245.0 1
300.0 1
"""
self.origin = (-300, 0, 10)
self.nx = 48
self.nz = 27
self.shape = (self.nx, 1, self.nz)
self.extent = (0, self.shape[0], 0, self.shape[1], 0, self.shape[2])
self.data_name = 'foo'
##### Now generate output for testing ####
# Produce data and write out files:
meshname = self._write_mesh()
modname, self.data = self._write_model()
# Set up the reader:
reader = TensorMeshReader()
reader.set_mesh_filename(meshname)
# Get and test output:
reader.Update() # Test the read up front for the mesh
reader.add_model_file_name(modname)
reader.set_data_name(self.data_name)
reader.Update() # Now read the models upfront
self.GRID = reader.GetOutput()
return
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
###########################################
def test_grid_spatial_reference(self):
"""`TensorMeshReader` 2D: Spatial reference"""
self._check_spatial_reference(self.GRID)
def test_grid_shape(self):
"""`TensorMeshReader` 2D: Shape of output grid"""
self._check_shape(self.GRID)
def test_grid_data(self):
"""`TensorMeshReader` 2D: Data fidelity"""
self._check_data(self.GRID, self.data)
def test_grid_data_name(self):
"""`TensorMeshReader` 2D: Data array name"""
self.assertEqual(self.GRID.GetCellData().GetArrayName(0), self.data_name)
def test_model_appender(self):
"""`TensorMeshAppender` 2D: Data array name"""
modname, appdata = self._write_model('testApp.mod')
f = TensorMeshAppender()
f.SetInputDataObject(self.GRID)
f.add_model_file_name(modname)
f.set_data_name('appended')
f.Update()
output = f.GetOutput()
self.assertEqual(output.GetCellData().GetNumberOfArrays(), 2)
self.assertEqual(output.GetCellData().GetArrayName(1), 'appended')
###############################################################################
if discretize_available:
class TestOcTreeMeshReader(ubcMeshTesterBase):
"""
Test the `OcTreeReader`
"""
def setUp(self):
TestBase.setUp(self)
self.test_dir = tempfile.mkdtemp()
treeMesh = """16 16 16
0.0000 0.0000 48.0000
1.000 2.000 3.000
29
1 1 1 8
9 1 1 8
1 9 1 8
9 9 1 8
1 1 9 4
5 1 9 4
9 1 9 8
1 5 9 4
5 5 9 4
1 9 9 8
9 9 9 8
1 1 13 2
3 1 13 2
5 1 13 4
1 3 13 2
3 3 13 2
1 5 13 4
5 5 13 4
1 1 15 1
2 1 15 1
3 1 15 2
1 2 15 1
2 2 15 1
1 3 15 2
3 3 15 2
1 1 16 1
2 1 16 1
1 2 16 1
2 2 16 1
"""
# Write out mesh file
filename = os.path.join(self.test_dir, 'octree.msh')
self.meshFileName = filename
with open(filename, 'w') as f:
f.write(treeMesh)
# write out model file(s)
self.nt = 5
self.modelFileNames = ['model%d.mod' % i for i in range(self.nt)]
self.modelFileNames = [
os.path.join(self.test_dir, self.modelFileNames[i])
for i in range(self.nt)
]
self.arrs = [None] * self.nt
for i in range(self.nt):
self.arrs[i] = np.random.random(29)
np.savetxt(
self.modelFileNames[i], self.arrs[i], delimiter=' ', comments='! '
)
return
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
def reshapeArrs(self, mesh):
for i in range(self.nt):
ind_reorder = nps.vtk_to_numpy(
mesh.GetCellData().GetArray('index_cell_corner')
)
self.arrs[i] = self.arrs[i][ind_reorder]
def test_simple_octree(self):
"""`OcTreeReader`: simple octree mesh file"""
reader = OcTreeReader()
reader.set_mesh_filename(self.meshFileName)
reader.Update()
tree = reader.GetOutput()
self.assertIsNotNone(tree)
self.assertEqual(tree.GetNumberOfCells(), 29)
self.assertEqual(tree.GetNumberOfPoints(), 84)
def test_simple_octree_models(self):
"""`OcTreeReader`: simple octree mesh with models"""
reader = OcTreeReader()
reader.set_mesh_filename(self.meshFileName)
reader.add_model_file_name(self.modelFileNames)
reader.set_data_name('foo')
reader.Update() # Check that normal update works
tree = reader.GetOutputDataObject(0)
self.assertIsNotNone(tree)
self.assertEqual(tree.GetNumberOfCells(), 29)
self.assertEqual(tree.GetNumberOfPoints(), 84)
self.reshapeArrs(tree)
wtree = dsa.WrapDataObject(tree)
# Now check time series
for i in range(self.nt):
reader.UpdateTimeStep(i)
arr = wtree.CellData['foo']
self.assertTrue(np.allclose(arr, self.arrs[i], rtol=RTOL))
return
def test_model_appender(self):
"""`OcTreeAppender` 2D: Data array name"""
# Creat a tree mesh to append
reader = OcTreeReader()
reader.set_mesh_filename(self.meshFileName)
reader.add_model_file_name(self.modelFileNames[0])
reader.set_data_name('Initial Data')
reader.Update()
tree = reader.GetOutput()
self.assertIsNotNone(tree)
self.assertEqual(tree.GetNumberOfCells(), 29)
self.assertEqual(tree.GetNumberOfPoints(), 84)
# Now use the model appender
f = OcTreeAppender()
f.SetInputDataObject(tree)
f.add_model_file_name(self.modelFileNames[1::])
f.set_data_name('Appended Data')
f.Update()
output = f.GetOutput()
# remember that 2 arrays is added by the reader
self.assertEqual(output.GetCellData().GetNumberOfArrays(), 4)
self.assertEqual(
output.GetCellData().GetArrayName(3),
os.path.basename(self.modelFileNames[1]),
) # use file as name
self.assertEqual(len(f.get_time_step_values()), self.nt - 1)
return
###############################################################################
class TestGravObsReader(TestBase):
"""
Test the `GravObsReader`
"""
def setUp(self):
TestBase.setUp(self)
self.filename = os.path.join(
os.path.dirname(__file__), 'data/Craig-Chile/LdM_grav_obs.grv'
)
def test(self):
"""`GravObsReader`: Test reader for UBC Gravity Observations"""
reader = GravObsReader()
data = reader.apply(self.filename)
self.assertIsNotNone(data)
# Check shapes of the surfer grid
self.assertTrue(data.n_points > 0)
return
###############################################################################
class TestTopoReader(TestBase):
"""
Test the `TopoReader`
"""
def setUp(self):
TestBase.setUp(self)
self.filename = os.path.join(
os.path.dirname(__file__), 'data/Craig-Chile/LdM_topo.topo'
)
def test(self):
"""`TopoReader`: Test reader for UBC topography"""
reader = TopoReader()
data = reader.apply(self.filename)
self.assertIsNotNone(data)
# Check shapes of the surfer grid
self.assertTrue(data.n_points > 0)
return
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
import unittest
unittest.main()
###############################################################################
###############################################################################
###############################################################################
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from abc import abstractmethod
from hashlib import sha1
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.validation import assert_list
from pants.util.meta import AbstractClass
def stable_json_dumps(obj):
return json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True)
def stable_json_sha1(obj):
return sha1(stable_json_dumps(obj)).hexdigest()
def combine_hashes(hashes):
"""A simple helper function to combine other hashes. Sorts the hashes before rolling them in."""
hasher = sha1()
for h in sorted(hashes):
hasher.update(h)
return hasher.hexdigest()
class PayloadField(AbstractClass):
"""An immutable, hashable structure to be mixed into Payload instances."""
_fingerprint_memo = None
def fingerprint(self):
"""A memoized sha1 hexdigest hashing the contents of this PayloadField
The fingerprint returns either a bytestring or None. If the return is None, consumers of the
fingerprint may choose to elide this PayloadField from their combined hash computation.
"""
if self._fingerprint_memo is None:
self._fingerprint_memo = self._compute_fingerprint()
return self._fingerprint_memo
@abstractmethod
def _compute_fingerprint(self):
"""This method will be called and the result memoized for ``PayloadField.fingerprint``."""
pass
@property
def value(self):
return self
class FingerprintedMixin(object):
"""Mixin this class to make your class suitable for passing to FingerprintedField."""
def fingerprint(self):
"""Override this method to implement a fingerprint for your class.
:returns: a sha1 hexdigest hashing the contents of this structure."""
raise NotImplementedError()
class FingerprintedField(PayloadField):
"""Use this field to fingerprint any class that mixes in FingerprintedMixin.
The caller must ensure that the class properly implements fingerprint()
to hash the contents of the object.
"""
def __init__(self, value):
self._value = value
def _compute_fingerprint(self):
return self._value.fingerprint()
@property
def value(self):
return self._value
class SourcesField(PayloadField):
"""A PayloadField encapsulating specified sources."""
def __init__(self, sources_rel_path, sources, ref_address=None, filespec=None):
"""
:param sources_rel_path: path that sources parameter may be relative to
:param sources: list of strings representing relative file paths
:param ref_address: optional address spec of target that provides these sources
:param filespec: glob and exclude data that generated this set of sources
"""
self._rel_path = sources_rel_path
self._source_paths = assert_list(sources)
self._ref_address = ref_address
self._filespec = filespec
@property
def filespec(self):
return self._filespec
@property
def rel_path(self):
return self._rel_path
@property
def source_paths(self):
return self._source_paths
@property
def address(self):
"""Returns the address this sources field refers to (used by some derived classses)"""
return self._ref_address
@property
def num_chunking_units(self):
"""For tasks that require chunking, this is the number of chunk units this field represents.
By default, this is just the number of sources. Other heuristics might consider the number
of bytes or lines in the combined source files.
"""
if self._source_paths:
return len(self._source_paths)
return 1
def has_sources(self, extension=None):
if not self._source_paths:
return False
return any(source.endswith(extension) for source in self._source_paths)
def relative_to_buildroot(self):
"""All sources joined with ``self.rel_path``."""
return [os.path.join(self.rel_path, source) for source in self.source_paths]
def _compute_fingerprint(self):
hasher = sha1()
hasher.update(self._rel_path)
for source in sorted(self.relative_to_buildroot()):
hasher.update(source)
with open(os.path.join(get_buildroot(), source), 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
class DeferredSourcesField(SourcesField):
""" A SourcesField that isn't populated immediately when the graph is constructed.
You must subclass this and provide a fingerprint implementation. Requires a task
to call populate() to provide its contents later during processing. For example,
if sources are in an archive, you might use the fingerprint of the archive. If they
are from an external artifact, you might take a fingerprint of the name and version of
the artifact.
"""
class AlreadyPopulatedError(Exception):
"""Raised when a DeferredSourcesField has already been populated."""
pass
class NotPopulatedError(Exception):
""" Raised when the PayloadField has not been populated yet."""
def __init__(self):
super(Exception, self).__init__(
"Field requires a call to populate() before this method can be called.")
def __init__(self, ref_address):
self._populated = False
super(DeferredSourcesField, self).__init__(sources_rel_path=None, sources=[],
ref_address=ref_address)
def populate(self, sources, rel_path=None):
"""Call this method to set the list of files represented by the target.
Intended to be invoked by the DeferredSourcesMapper task.
:param list sources: strings representing absolute paths of files to be included in the source set
:param string rel_path: common prefix for files.
"""
if self._populated:
raise self.AlreadyPopulatedError("Called with rel_path={rel_path} sources={sources}"
.format(rel_path=rel_path, sources=sources))
self._rel_path = rel_path
self._source_paths = assert_list(sources)
self._populated = True
@property
def source_paths(self):
if not self._populated:
raise self.NotPopulatedError()
return self._source_paths
def _compute_fingerprint(self):
"""A subclass must provide an implementation of _compute_fingerprint that can return a valid
fingerprint even if the sources aren't unpacked yet.
"""
if not self._populated:
raise self.NotPopulatedError()
return super(DeferredSourcesField, self)._compute_fingerprint()
class PythonRequirementsField(frozenset, PayloadField):
"""A frozenset subclass that mixes in PayloadField.
Must be initialized with an iterable of PythonRequirement instances.
"""
def _compute_fingerprint(self):
def fingerprint_iter():
for req in self:
# TODO(pl): See PythonRequirement note about version_filter
hash_items = (
repr(req._requirement),
req._repository,
req._name,
req._use_2to3,
req.compatibility,
)
yield stable_json_sha1(hash_items)
return combine_hashes(fingerprint_iter())
def hash_bundle(bundle):
hasher = sha1()
hasher.update(bundle._rel_path)
for abs_path in sorted(bundle.filemap.keys()):
buildroot_relative_path = os.path.relpath(abs_path, get_buildroot())
hasher.update(buildroot_relative_path)
hasher.update(bundle.filemap[abs_path])
with open(abs_path, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
class BundleField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of Bundle instances.
"""
def _compute_fingerprint(self):
return combine_hashes(map(hash_bundle, self))
class ExcludesField(OrderedSet, PayloadField):
"""An OrderedSet subclass that mixes in PayloadField.
Must be initialized with an iterable of Excludes instances.
"""
def _compute_fingerprint(self):
return stable_json_sha1(tuple(repr(exclude) for exclude in self))
class ConfigurationsField(OrderedSet, PayloadField):
"""An OrderedSet subclass that mixes in PayloadField.
Must be initialized with an iterable of strings.
"""
def _compute_fingerprint(self):
return combine_hashes(sha1(s).hexdigest() for s in self)
class JarsField(tuple, PayloadField):
"""A tuple subclass that mixes in PayloadField.
Must be initialized with an iterable of JarDependency instances.
"""
def _compute_fingerprint(self):
return stable_json_sha1(tuple(jar.cache_key() for jar in self))
class PrimitiveField(PayloadField):
"""A general field for primitive types.
As long as the contents are JSON representable, their hash can be stably inferred.
"""
def __init__(self, underlying=None):
self._underlying = underlying
@property
def value(self):
return self._underlying
def _compute_fingerprint(self):
return stable_json_sha1(self._underlying)
|
|
"""Graphviz's dot language parser.
The dotparser parses graphviz files in
dot and dot files and transforms them
into a class representation defined by pydot.
Author: Michael Krause <[email protected]>
Fixes by: Ero Carrera <[email protected]>
"""
from __future__ import division
from __future__ import print_function
import sys
from pyparsing import (
nestedExpr, Literal, CaselessLiteral,
Word, OneOrMore,
Forward,
Group, Optional, Combine,
restOfLine, cStyleComment, nums, alphanums,
printables,
ParseException, ParseResults, CharsNotIn,
QuotedString)
import pydot
__author__ = ['Michael Krause', 'Ero Carrera']
__license__ = 'MIT'
PY3 = sys.version_info >= (3, 0, 0)
if PY3:
str_type = str
else:
str_type = basestring
class P_AttrList(object):
def __init__(self, toks):
self.attrs = {}
i = 0
while i < len(toks):
attrname = toks[i]
if i+2 < len(toks) and toks[i+1] == '=':
attrvalue = toks[i+2]
i += 3
else:
attrvalue = None
i += 1
self.attrs[attrname] = attrvalue
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.attrs)
class DefaultStatement(P_AttrList):
def __init__(self, default_type, attrs):
self.default_type = default_type
self.attrs = attrs
def __repr__(self):
return "%s(%s, %r)" % (self.__class__.__name__,
self.default_type, self.attrs)
top_graphs = list()
def push_top_graph_stmt(str, loc, toks):
attrs = {}
g = None
for element in toks:
if (isinstance(element, (ParseResults, tuple, list)) and
len(element) == 1 and
isinstance(element[0], str_type)):
element = element[0]
if element == 'strict':
attrs['strict'] = True
elif element in ['graph', 'digraph']:
attrs = {}
g = pydot.Dot(graph_type=element, **attrs)
attrs['type'] = element
top_graphs.append( g )
elif isinstance( element, str_type):
g.set_name( element )
elif isinstance(element, pydot.Subgraph):
g.obj_dict['attributes'].update( element.obj_dict['attributes'] )
g.obj_dict['edges'].update( element.obj_dict['edges'] )
g.obj_dict['nodes'].update( element.obj_dict['nodes'] )
g.obj_dict['subgraphs'].update( element.obj_dict['subgraphs'] )
g.set_parent_graph(g)
elif isinstance(element, P_AttrList):
attrs.update(element.attrs)
elif isinstance(element, (ParseResults, list)):
add_elements(g, element)
else:
raise ValueError(
'Unknown element statement: {s}'.format(s=element))
for g in top_graphs:
update_parent_graph_hierarchy(g)
if len( top_graphs ) == 1:
return top_graphs[0]
return top_graphs
def update_parent_graph_hierarchy(g, parent_graph=None, level=0):
if parent_graph is None:
parent_graph = g
for key_name in ('edges',):
if isinstance(g, pydot.frozendict):
item_dict = g
else:
item_dict = g.obj_dict
if key_name not in item_dict:
continue
for key, objs in item_dict[key_name].items():
for obj in objs:
if ('parent_graph' in obj and
obj['parent_graph'].get_parent_graph()==g):
if obj['parent_graph'] is g:
pass
else:
obj['parent_graph'].set_parent_graph(parent_graph)
if key_name == 'edges' and len(key) == 2:
for idx, vertex in enumerate( obj['points'] ):
if isinstance( vertex,
(pydot.Graph,
pydot.Subgraph, pydot.Cluster)):
vertex.set_parent_graph(parent_graph)
if isinstance( vertex, pydot.frozendict):
if vertex['parent_graph'] is g:
pass
else:
vertex['parent_graph'].set_parent_graph(
parent_graph)
def add_defaults(element, defaults):
d = element.__dict__
for key, value in defaults.items():
if not d.get(key):
d[key] = value
def add_elements(g, toks, defaults_graph=None,
defaults_node=None, defaults_edge=None):
if defaults_graph is None:
defaults_graph = {}
if defaults_node is None:
defaults_node = {}
if defaults_edge is None:
defaults_edge = {}
for elm_idx, element in enumerate(toks):
if isinstance(element, (pydot.Subgraph, pydot.Cluster)):
add_defaults(element, defaults_graph)
g.add_subgraph(element)
elif isinstance(element, pydot.Node):
add_defaults(element, defaults_node)
g.add_node(element)
elif isinstance(element, pydot.Edge):
add_defaults(element, defaults_edge)
g.add_edge(element)
elif isinstance(element, ParseResults):
for e in element:
add_elements(g, [e], defaults_graph,
defaults_node, defaults_edge)
elif isinstance(element, DefaultStatement):
if element.default_type == 'graph':
default_graph_attrs = pydot.Node('graph', **element.attrs)
g.add_node(default_graph_attrs)
elif element.default_type == 'node':
default_node_attrs = pydot.Node('node', **element.attrs)
g.add_node(default_node_attrs)
elif element.default_type == 'edge':
default_edge_attrs = pydot.Node('edge', **element.attrs)
g.add_node(default_edge_attrs)
defaults_edge.update(element.attrs)
else:
raise ValueError(
'Unknown DefaultStatement: {s}'.format(
s=element.default_type))
elif isinstance(element, P_AttrList):
g.obj_dict['attributes'].update(element.attrs)
else:
raise ValueError(
'Unknown element statement: {s}'.format(s=element))
def push_graph_stmt(str, loc, toks):
g = pydot.Subgraph('')
add_elements(g, toks)
return g
def push_subgraph_stmt(str, loc, toks):
g = pydot.Subgraph('')
for e in toks:
if len(e)==3:
e[2].set_name(e[1])
if e[0] == 'subgraph':
e[2].obj_dict['show_keyword'] = True
return e[2]
else:
if e[0] == 'subgraph':
e[1].obj_dict['show_keyword'] = True
return e[1]
return g
def push_default_stmt(str, loc, toks):
# The pydot class instances should be marked as
# default statements to be inherited by actual
# graphs, nodes and edges.
#
default_type = toks[0][0]
if len(toks) > 1:
attrs = toks[1].attrs
else:
attrs = {}
if default_type in ['graph', 'node', 'edge']:
return DefaultStatement(default_type, attrs)
else:
raise ValueError(
'Unknown default statement: {s}'.format(s=toks))
def push_attr_list(str, loc, toks):
p = P_AttrList(toks)
return p
def get_port(node):
if len(node)>1:
if isinstance(node[1], ParseResults):
if len(node[1][0])==2:
if node[1][0][0]==':':
return node[1][0][1]
return None
def do_node_ports(node):
node_port = ''
if len(node) > 1:
node_port = ''.join( [str(a)+str(b) for a,b in node[1] ] )
return node_port
def push_edge_stmt(str, loc, toks):
tok_attrs = [a for a in toks if isinstance(a, P_AttrList)]
attrs = {}
for a in tok_attrs:
attrs.update(a.attrs)
e = []
if isinstance(toks[0][0], pydot.Graph):
n_prev = pydot.frozendict(toks[0][0].obj_dict)
else:
n_prev = toks[0][0] + do_node_ports( toks[0] )
if isinstance(toks[2][0], ParseResults):
n_next_list = [[n.get_name(),] for n in toks[2][0] ]
for n_next in [n for n in n_next_list]:
n_next_port = do_node_ports(n_next)
e.append(pydot.Edge(n_prev, n_next[0]+n_next_port, **attrs))
elif isinstance(toks[2][0], pydot.Graph):
e.append(pydot.Edge(n_prev,
pydot.frozendict(toks[2][0].obj_dict),
**attrs))
elif isinstance(toks[2][0], pydot.Node):
node = toks[2][0]
if node.get_port() is not None:
name_port = node.get_name() + ":" + node.get_port()
else:
name_port = node.get_name()
e.append(pydot.Edge(n_prev, name_port, **attrs))
# if the target of this edge is the name of a node
elif isinstance(toks[2][0], str_type):
for n_next in [n for n in tuple(toks)[2::2]]:
if (isinstance(n_next, P_AttrList) or
not isinstance(n_next[0], str_type)):
continue
n_next_port = do_node_ports( n_next )
e.append(pydot.Edge(n_prev, n_next[0]+n_next_port, **attrs))
n_prev = n_next[0]+n_next_port
else:
raise Exception(
'Edge target {r} with type {s} unsupported.'.format(
r=toks[2][0], s=type(toks[2][0])))
return e
def push_node_stmt(s, loc, toks):
if len(toks) == 2:
attrs = toks[1].attrs
else:
attrs = {}
node_name = toks[0]
if isinstance(node_name, list) or isinstance(node_name, tuple):
if len(node_name)>0:
node_name = node_name[0]
n = pydot.Node(str(node_name), **attrs)
return n
graphparser = None
def graph_definition():
global graphparser
if not graphparser:
# punctuation
colon = Literal(":")
lbrace = Literal("{")
rbrace = Literal("}")
lbrack = Literal("[")
rbrack = Literal("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Literal("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Literal(";")
at = Literal("@")
minus = Literal("-")
# keywords
strict_ = CaselessLiteral("strict")
graph_ = CaselessLiteral("graph")
digraph_ = CaselessLiteral("digraph")
subgraph_ = CaselessLiteral("subgraph")
node_ = CaselessLiteral("node")
edge_ = CaselessLiteral("edge")
# token definitions
identifier = Word(alphanums + "_." ).setName("identifier")
double_quoted_string = QuotedString(
'"', multiline=True, unquoteResults=False, escChar='\\') # dblQuotedString
noncomma = "".join([c for c in printables if c != ","])
alphastring_ = OneOrMore(CharsNotIn(noncomma + ' '))
def parse_html(s, loc, toks):
return '<%s>' % ''.join(toks[0])
opener = '<'
closer = '>'
html_text = nestedExpr( opener, closer,
( CharsNotIn( opener + closer ) )
).setParseAction(parse_html).leaveWhitespace()
ID = ( identifier | html_text |
double_quoted_string | #.setParseAction(strip_quotes) |
alphastring_ ).setName("ID")
float_number = Combine(Optional(minus) +
OneOrMore(Word(nums + "."))).setName("float_number")
righthand_id = (float_number | ID ).setName("righthand_id")
port_angle = (at + ID).setName("port_angle")
port_location = (OneOrMore(Group(colon + ID)) |
Group(colon + lparen +
ID + comma + ID + rparen)).setName("port_location")
port = (Group(port_location + Optional(port_angle)) |
Group(port_angle + Optional(port_location))).setName("port")
node_id = (ID + Optional(port))
a_list = OneOrMore(ID + Optional(equals + righthand_id) +
Optional(comma.suppress())).setName("a_list")
attr_list = OneOrMore(lbrack.suppress() + Optional(a_list) +
rbrack.suppress()).setName("attr_list")
attr_stmt = (Group(graph_ | node_ | edge_) +
attr_list).setName("attr_stmt")
edgeop = (Literal("--") | Literal("->")).setName("edgeop")
stmt_list = Forward()
graph_stmt = Group(lbrace.suppress() + Optional(stmt_list) +
rbrace.suppress() +
Optional(semi.suppress())).setName("graph_stmt")
edge_point = Forward()
edgeRHS = OneOrMore(edgeop + edge_point)
edge_stmt = edge_point + edgeRHS + Optional(attr_list)
subgraph = Group(
subgraph_ + Optional(ID) + graph_stmt).setName("subgraph")
edge_point << Group(
subgraph | graph_stmt | node_id).setName('edge_point')
node_stmt = (
node_id + Optional(attr_list) +
Optional(semi.suppress())).setName("node_stmt")
assignment = (ID + equals + righthand_id).setName("assignment")
stmt = (assignment | edge_stmt | attr_stmt |
subgraph | graph_stmt | node_stmt).setName("stmt")
stmt_list << OneOrMore(stmt + Optional(semi.suppress()))
graphparser = OneOrMore(
(Optional(strict_) + Group((graph_ | digraph_)) +
Optional(ID) + graph_stmt).setResultsName("graph"))
singleLineComment = Group(
"//" + restOfLine) | Group("#" + restOfLine)
# actions
graphparser.ignore(singleLineComment)
graphparser.ignore(cStyleComment)
assignment.setParseAction(push_attr_list)
a_list.setParseAction(push_attr_list)
edge_stmt.setParseAction(push_edge_stmt)
node_stmt.setParseAction(push_node_stmt)
attr_stmt.setParseAction(push_default_stmt)
subgraph.setParseAction(push_subgraph_stmt)
graph_stmt.setParseAction(push_graph_stmt)
graphparser.setParseAction(push_top_graph_stmt)
return graphparser
def parse_dot_data(s):
"""Parse DOT description in (unicode) string `s`."""
global top_graphs
top_graphs = list()
try:
graphparser = graph_definition()
graphparser.parseWithTabs()
tokens = graphparser.parseString(s)
return list(tokens)
except ParseException as err:
print(
err.line +
" "*(err.column-1) + "^" +
err)
return None
|
|
"""Support for Google Calendar event device sensors."""
from __future__ import annotations
from datetime import timedelta
import logging
import re
from typing import cast, final
from aiohttp import web
from homeassistant.components import http
from homeassistant.const import HTTP_BAD_REQUEST, STATE_OFF, STATE_ON
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
time_period_str,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.util import dt
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "calendar"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup(hass, config):
"""Track states and offer events for calendars."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
hass.http.register_view(CalendarListView(component))
hass.http.register_view(CalendarEventView(component))
hass.components.frontend.async_register_built_in_panel(
"calendar", "calendar", "hass:calendar"
)
await component.async_setup(config)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
def get_date(date):
"""Get the dateTime from date or dateTime as a local."""
if "date" in date:
return dt.start_of_local_day(
dt.dt.datetime.combine(dt.parse_date(date["date"]), dt.dt.time.min)
)
return dt.as_local(dt.parse_datetime(date["dateTime"]))
def normalize_event(event):
"""Normalize a calendar event."""
normalized_event = {}
start = event.get("start")
end = event.get("end")
start = get_date(start) if start is not None else None
end = get_date(end) if end is not None else None
normalized_event["dt_start"] = start
normalized_event["dt_end"] = end
start = start.strftime(DATE_STR_FORMAT) if start is not None else None
end = end.strftime(DATE_STR_FORMAT) if end is not None else None
normalized_event["start"] = start
normalized_event["end"] = end
# cleanup the string so we don't have a bunch of double+ spaces
summary = event.get("summary", "")
normalized_event["message"] = re.sub(" +", "", summary).strip()
normalized_event["location"] = event.get("location", "")
normalized_event["description"] = event.get("description", "")
normalized_event["all_day"] = "date" in event["start"]
return normalized_event
def calculate_offset(event, offset):
"""Calculate event offset.
Return the updated event with the offset_time included.
"""
summary = event.get("summary", "")
# check if we have an offset tag in the message
# time is HH:MM or MM
reg = f"{offset}([+-]?[0-9]{{0,2}}(:[0-9]{{0,2}})?)"
search = re.search(reg, summary)
if search and search.group(1):
time = search.group(1)
if ":" not in time:
if time[0] == "+" or time[0] == "-":
time = f"{time[0]}0:{time[1:]}"
else:
time = f"0:{time}"
offset_time = time_period_str(time)
summary = (summary[: search.start()] + summary[search.end() :]).strip()
event["summary"] = summary
else:
offset_time = dt.dt.timedelta() # default it
event["offset_time"] = offset_time
return event
def is_offset_reached(event):
"""Have we reached the offset time specified in the event title."""
start = get_date(event["start"])
if start is None or event["offset_time"] == dt.dt.timedelta():
return False
return start + event["offset_time"] <= dt.now(start.tzinfo)
class CalendarEventDevice(Entity):
"""Base class for calendar event entities."""
@property
def event(self):
"""Return the next upcoming event."""
raise NotImplementedError()
@final
@property
def state_attributes(self):
"""Return the entity state attributes."""
event = self.event
if event is None:
return None
event = normalize_event(event)
return {
"message": event["message"],
"all_day": event["all_day"],
"start_time": event["start"],
"end_time": event["end"],
"location": event["location"],
"description": event["description"],
}
@property
def state(self):
"""Return the state of the calendar event."""
event = self.event
if event is None:
return STATE_OFF
event = normalize_event(event)
start = event["dt_start"]
end = event["dt_end"]
if start is None or end is None:
return STATE_OFF
now = dt.now()
if start <= now < end:
return STATE_ON
return STATE_OFF
async def async_get_events(self, hass, start_date, end_date):
"""Return calendar events within a datetime range."""
raise NotImplementedError()
class CalendarEventView(http.HomeAssistantView):
"""View to retrieve calendar content."""
url = "/api/calendars/{entity_id}"
name = "api:calendars:calendar"
def __init__(self, component: EntityComponent) -> None:
"""Initialize calendar view."""
self.component = component
async def get(self, request, entity_id):
"""Return calendar events."""
entity = self.component.get_entity(entity_id)
start = request.query.get("start")
end = request.query.get("end")
if None in (start, end, entity):
return web.Response(status=HTTP_BAD_REQUEST)
try:
start_date = dt.parse_datetime(start)
end_date = dt.parse_datetime(end)
except (ValueError, AttributeError):
return web.Response(status=HTTP_BAD_REQUEST)
event_list = await entity.async_get_events(
request.app["hass"], start_date, end_date
)
return self.json(event_list)
class CalendarListView(http.HomeAssistantView):
"""View to retrieve calendar list."""
url = "/api/calendars"
name = "api:calendars"
def __init__(self, component: EntityComponent) -> None:
"""Initialize calendar view."""
self.component = component
async def get(self, request: web.Request) -> web.Response:
"""Retrieve calendar list."""
hass = request.app["hass"]
calendar_list: list[dict[str, str]] = []
for entity in self.component.entities:
state = hass.states.get(entity.entity_id)
calendar_list.append({"name": state.name, "entity_id": entity.entity_id})
return self.json(sorted(calendar_list, key=lambda x: cast(str, x["name"])))
|
|
import math
import re
import api.fake
from fuzzywuzzy import process
from flask import Blueprint, request, jsonify, render_template
from flask_weasyprint import HTML, render_pdf
from api.core import db
from api.utils import get_part_by_id, build_part_properties, is_number,\
check_addtl_info, get_items, get_item, delete_item, get_categories,\
engineering_notation
from api.decorators import requires_login, requires_roles, requires_keys,\
requires_debug
from api.models.parts import Part, IC, Manufacturer, ChipType,\
PartProperty, Unit, Kit
blueprint = Blueprint('parts', __name__, url_prefix='/parts')
FUZZ_THRESHOLD = 65
@blueprint.route('/', methods=['GET'])
def get_all():
return jsonify(get_items('parts', Part, request))
@blueprint.route('/<int:id>/', methods=['GET'])
def get(id):
return jsonify(get_item(id, 'part', Part))
@blueprint.route('/<int:id>/', methods=['DELETE'])
@requires_login
@requires_roles('parts_manager')
def delete_part(id):
return jsonify(delete_item(id, Part))
# We still don't have a better method for this!
@blueprint.route('/category/names/', methods=['GET'])
def get_part_cats():
return jsonify(success=True, cats=get_categories())
def frexp10(x):
exp = int(math.log10(x))
return x / 10**exp, exp
def part_num_search(n, category):
tmp = PartProperty.query.join(Part)\
.filter(PartProperty.primary is True)\
.filter(PartProperty.value is not None)\
.filter(Part.category == category)\
.all()
tmp = [prop for prop in tmp if
math.fabs(n - prop.value) / prop.value < 0.1 or
frexp10(n)[0] == frexp10(prop.value)[0]]
return [Part.query.get(prop.part_id).serialize for prop in tmp]
def part_attr_search(s, category):
tmp = process.extract(s, [i.name for i in Unit.query.all()])
tmp = [Unit.query.join(PartProperty).join(Part)
.filter(Part.category == category)
.filter(Unit.name == pair[0]).first() for
pair in tmp if pair[1] > FUZZ_THRESHOLD]
return [Part.query.join(PartProperty)
.filter(PartProperty.unit == u).first().serialize for
u in tmp if u]
def ic_attr_search(s, model, attr):
tmp = process.extract(s, [getattr(i, attr) for i in model.query.all()])
return [model.query.filter_by(**{attr: pair[0]}).first().serialize for
pair in tmp if pair[1] > FUZZ_THRESHOLD]
@blueprint.route('/search/', methods=['POST'])
@requires_keys('category', 'query')
def search():
errors = []
json = request.get_json(force=True)
parts = []
if not json['category'] in get_categories() or not json['query']:
errors.append('Bad Query!')
if not errors and json['category'] == 'IC':
parts += ic_attr_search(json['query'], IC, 'mftr_part_no')
parts += ic_attr_search(json['query'], IC, 'name')
parts += ic_attr_search(json['query'], IC, 'addtlInfo')
elif not errors:
parts += part_attr_search(json['query'], json['category'])
n = re.sub('[^0-9]', '', json['query'])
n = float(n) if is_number(n) else None
if n:
parts += part_num_search(n, json['category'])
return jsonify(success=not errors, errors=errors, parts=parts)
@requires_keys('name', 'mftr', 'part_no', 'chip_type', 'location', 'stock')
def new_ic():
errors = []
json = request.get_json(force=True)
part_id = None
mftr = Manufacturer.query.filter_by(name=json['mftr']).first()
chip_type = ChipType.query.filter_by(name=json['chip_type']).first()
if not mftr:
errors.append('Manufacturer does not exist')
if not chip_type:
errors.append('Chip Type does not exist')
if not errors:
ic = IC(
name=json['name'],
stock=json['stock'],
price=json['price'],
location=json['location'],
mftr_id=mftr.id,
mftr_part_no=json['part_no'],
chip_type_id=chip_type.id,
addtlInfo=json['main_info']['name']
)
db.session.add(ic)
db.session.commit()
part_id = ic.id + IC.IC_START_ID
part = Part(category='IC', price=ic.price, id=part_id)
ic.part_id = part_id
db.session.add(part)
db.session.commit()
return jsonify(success=not errors, errors=errors, id=part_id)
@blueprint.route('/', methods=['POST'])
@requires_login
@requires_roles('parts_manager')
@requires_keys('category', 'price', 'main_info')
def new():
errors = []
json = request.get_json(force=True)
part_id = None
if not is_number(json['price']) or int(json['price']) <= 0:
errors.append('Price must be a positive number.')
if not json['category'] or json['category'] == 'Choose...':
errors.append('Invalid category.')
if not errors and json['category'] == 'IC':
return new_ic()
elif not errors:
if not check_addtl_info([json['main_info']]):
errors.append('Bad main_info structure')
elif 'addtl_info' in json.keys() and \
not check_addtl_info(json['addtl_info']):
errors.append('Bad addtl_info structure')
if not errors:
part = Part(json['category'], json['price'])
db.session.add(part)
db.session.commit()
part_id = part.id
build_part_properties(part, [json['main_info']], True)
if 'addtl_info' in json.keys():
build_part_properties(part, json['addtl_info'])
db.session.commit()
return jsonify(success=not errors, errors=errors, id=part_id)
@blueprint.route('/fake/', methods=['POST'])
@requires_debug
@requires_keys('count', 'type')
def fake():
json = request.get_json(force=True)
n = json['count']
if json['type'] == 'IC':
ics = [api.fake.ic() for _ in xrange(n)]
return jsonify(success=True, errors=[],
ids=[i.id + IC.IC_START_ID for i in ics])
else:
parts = [api.fake.static_part(json['type']) for _ in xrange(n)]
return jsonify(success=True, errors=[],
ids=[i.id for i in parts])
@blueprint.route('/init/', methods=['POST'])
@requires_debug
def init():
for _ in xrange(50):
api.fake.static_part('RESISTOR')
for _ in xrange(30):
api.fake.static_part('CAPACITOR')
for _ in xrange(5):
api.fake.static_part('DIODE')
for _ in xrange(5):
api.fake.static_part('POT')
for _ in xrange(50):
api.fake.ic()
return jsonify(success=True, errors=[])
@requires_keys('name', 'mftr', 'part_no', 'chip_type', 'location', 'stock')
def edit_ic(id):
errors = []
json = request.get_json(force=True)
ic = IC.query.get(id - IC.IC_START_ID)
ic.name = json['name']
ic.stock = json['stock']
ic.price = json['price']
ic.location = json['location']
ic.mftr_id = Manufacturer.query.filter_by(name=json['mftr']).first().id
ic.mftr_part_no = json['part_no']
ic.chip_type_id = ChipType.query.\
filter_by(name=json['chip_type']).first().id
ic.addtlInfo = json['main_info']['name']
ic.part.price = json['price']
db.session.add(ic)
db.session.commit()
return jsonify(success=not errors, errors=errors, id=id)
@blueprint.route('/<int:id>/', methods=['PUT'])
@requires_login
@requires_roles('parts_manager')
@requires_keys('category', 'price', 'main_info')
def edit_part(id):
errors = []
json = request.get_json(force=True)
part = get_part_by_id(id)
if part is None:
errors.append('Does not exist.')
if not is_number(json['price']) or int(json['price']) <= 0:
errors.append('Price must be a positive number.')
if not errors and json['category'] == 'IC':
return edit_ic(id)
elif not errors:
part.category = json['category']
part.price = json['price']
# db.session.delete(part.properties)
PartProperty.query.filter_by(part_id=part.id).delete()
db.session.add(part)
db.session.commit()
build_part_properties(part, [json['main_info']], True)
if 'addtl_info' in json.keys():
build_part_properties(part, json['addtl_info'])
db.session.commit()
return jsonify(success=not errors, errors=errors, id=id)
@blueprint.route('/catalog.pdf')
@requires_login
@requires_roles('officer')
def catalog():
cats = [i for i in get_categories() if i !=
'IC' and i != 'RESISTOR' and i != 'CAPACITOR']
parts = {}
# Sort resistors and capacitors by their primary value e.g. Ohm or F
parts['RESISTOR'] = [i.serialize for i in
Part.query.join(PartProperty)
.filter(Part.category == 'RESISTOR')
.filter(PartProperty.primary)
.order_by(PartProperty.value)]
parts['CAPACITOR'] = [i.serialize for i in
Part.query.join(PartProperty)
.filter(Part.category == 'CAPACITOR')
.filter(PartProperty.primary)
.order_by(PartProperty.value)]
for cat in cats:
parts[cat] = [i.serialize for i in
Part.query.filter_by(category=cat)
.order_by(Part.id).all()]
return render_pdf(HTML(string=render_template(
'catalog.html',
parts=parts,
ics=[i.serialize for i in IC.query.order_by(IC.id).all()],
kits=[i.serialize for i in Kit.query.order_by(Kit.id).all()],
engineering_notation=engineering_notation
)))
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from bs4 import BeautifulSoup
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.http import QueryDict
from cms.api import add_plugin
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.plugins import build_plugin_tree
from cmsplugin_cascade.models import CascadeElement, CascadeClipboard
from cmsplugin_cascade.bootstrap3.container import (BootstrapContainerPlugin, BootstrapRowPlugin,
BootstrapRowForm, BootstrapColumnPlugin, BS3_BREAKPOINT_KEYS)
from .test_base import CascadeTestCase
class ClipboardPluginTest(CascadeTestCase):
maxDiff = None
identifier = "Test saved clipboard"
placeholder_data = {'plugins': [['BootstrapContainerPlugin', {
'glossary': {'media_queries': {'md': ['(min-width: 992px)'], 'sm': ['(max-width: 992px)']},
'container_max_widths': {'md': 970, 'sm': 750}, 'fluid': '',
'breakpoints': ['sm', 'md']}},
[['BootstrapRowPlugin', {'glossary': {}}, [
['BootstrapColumnPlugin',
{'glossary': {'sm-responsive-utils': '',
'md-column-offset': '',
'sm-column-width': 'col-sm-3',
'md-responsive-utils': '',
'md-column-ordering': '',
'sm-column-ordering': '',
'sm-column-offset': 'col-sm-offset-1',
'container_max_widths': {'md': 212.5,
'sm': 157.5},
'md-column-width': ''}}, []],
['BootstrapColumnPlugin', {
'glossary': {'sm-responsive-utils': 'hidden-sm',
'md-column-offset': '',
'sm-column-width': 'col-sm-4',
'md-responsive-utils': '',
'md-column-ordering': '',
'sm-column-ordering': '',
'sm-column-offset': '',
'container_max_widths': {'md': 293.33,
'sm': 220.0},
'md-column-width': ''}}, []],
['BootstrapColumnPlugin', {
'glossary': {
'container_max_widths': {
'md': 293.33,
'sm': 220.0},
'sm-column-width': 'col-sm-4'
}},
[]]]]]]]}
def setUp(self):
super(ClipboardPluginTest, self).setUp()
UserModel = get_user_model()
self.admin_user = UserModel.objects.get(username='admin')
# add a Bootstrap Container Plugin
container_model = add_plugin(self.placeholder, BootstrapContainerPlugin, 'en',
glossary={'breakpoints': BS3_BREAKPOINT_KEYS})
self.assertIsInstance(container_model, CascadeElement)
container_plugin = container_model.get_plugin_class_instance(self.admin_site)
self.assertIsInstance(container_plugin, BootstrapContainerPlugin)
ModelForm = container_plugin.get_form(self.request, container_model)
post_data = QueryDict('', mutable=True)
post_data.setlist('breakpoints', ['sm', 'md'])
form = ModelForm(post_data, None, instance=container_model)
html = form.as_p()
self.assertInHTML(
'<input id="id_glossary_breakpoints_0" name="breakpoints" type="checkbox" value="xs" />',
html)
self.assertInHTML(
'<input checked="checked" id="id_glossary_breakpoints_2" name="breakpoints" type="checkbox" value="md" />',
html)
self.assertInHTML('<input id="id_glossary_fluid" name="fluid" type="checkbox" />', html)
container_plugin.save_model(self.request, container_model, form, False)
self.assertListEqual(container_model.glossary['breakpoints'], ['sm', 'md'])
self.assertTrue('fluid' in container_model.glossary)
self.assertEqual(str(container_model), 'for tablets, laptops')
# add a RowPlugin with 3 Columns
row_model = add_plugin(self.placeholder, BootstrapRowPlugin, 'en', target=container_model)
row_plugin = row_model.get_plugin_class_instance()
row_change_form = BootstrapRowForm({'num_children': 3})
row_change_form.full_clean()
row_plugin.save_model(self.request, row_model, row_change_form, False)
self.assertDictEqual(row_model.glossary, {})
self.assertIsInstance(row_model, CascadeElement)
self.assertEqual(str(row_model), 'with 3 columns')
plugin_list = [container_model, row_model]
columns_qs = CascadeElement.objects.filter(parent_id=row_model.id)
self.assertEqual(columns_qs.count(), 3)
row_data = []
for column_model in columns_qs:
self.assertIsInstance(column_model, CascadeElement)
column_plugin = column_model.get_plugin_class_instance()
self.assertIsInstance(column_plugin, BootstrapColumnPlugin)
self.assertEqual(column_model.parent.id, row_model.id)
self.assertEqual(str(column_model), 'default width: 4 units')
plugin_list.append(column_model)
row_data.append(['BootstrapColumnPlugin', {'glossary': column_model.glossary}, []])
# container_data = ['BootstrapRowPlugin', {'glossary': row_model.glossary}, row_data]
# Render the Container Plugin with all of its children
build_plugin_tree(plugin_list)
html = self.get_html(container_model, self.get_request_context())
self.assertHTMLEqual(html, '<div class="container"><div class="row">' +
'<div class="col-sm-4"></div><div class="col-sm-4"></div>' +
'<div class="col-sm-4"></div>' +
'</div></div>')
# change data inside the first column
column_model = columns_qs[0]
delattr(column_model, '_inst')
column_plugin = column_model.get_plugin_class_instance(self.admin_site)
column_plugin.cms_plugin_instance = column_model
post_data = QueryDict('', mutable=True)
post_data.update({'sm-column-offset': 'col-sm-offset-1', 'sm-column-width': 'col-sm-3'})
ModelForm = column_plugin.get_form(self.request, column_model)
form = ModelForm(post_data, None, instance=column_model)
self.assertTrue(form.is_valid())
column_plugin.save_model(self.request, column_model, form, True)
# change data inside the second column
column_model = columns_qs[1]
delattr(column_model, '_inst')
column_plugin = column_model.get_plugin_class_instance(self.admin_site)
column_plugin.cms_plugin_instance = column_model
post_data = QueryDict('', mutable=True)
post_data.update({'sm-responsive-utils': 'hidden-sm', 'sm-column-width': 'col-sm-4'})
ModelForm = column_plugin.get_form(self.request, column_model)
form = ModelForm(post_data, None, instance=column_model)
self.assertTrue(form.is_valid())
column_plugin.save_model(self.request, column_model, form, False)
html = self.get_html(container_model, self.get_request_context())
self.assertHTMLEqual(html, '<div class="container"><div class="row">' +
'<div class="col-sm-3 col-sm-offset-1"></div>' +
'<div class="col-sm-4 hidden-sm"></div><div class="col-sm-4"></div>' +
'</div></div>')
def test_save_clipboard(self):
with self.login_user_context(self.admin_user):
request = self.get_request('/')
request.toolbar = CMSToolbar(request)
self.assertIsNotNone(request.toolbar.clipboard)
data = {'source_placeholder_id': self.placeholder.pk, 'source_plugin_id': '',
'source_language': 'en', 'target_plugin_id': '',
'target_placeholder_id': request.toolbar.clipboard.pk, 'target_language': 'en'}
# check that clipboard is empty
self.assertEqual(request.toolbar.clipboard.cmsplugin_set.count(), 0)
# copy plugins from placeholder to clipboard
copy_plugins_url = reverse('admin:cms_page_copy_plugins') # + '?cms_path=%2Fen%2F'
response = self.client.post(copy_plugins_url, data)
self.assertEqual(response.status_code, 200)
# serialize and persist clipboard content
add_clipboard_url = reverse('admin:cmsplugin_cascade_cascadeclipboard_add')
data = {'identifier': self.identifier, 'save_clipboard': 'Save', 'data': {}}
response = self.client.post(add_clipboard_url, data)
self.assertEqual(response.status_code, 302)
change_clipboard_url = response['location']
response = self.client.get(change_clipboard_url, data)
soup = BeautifulSoup(response.content)
ul = soup.find('ul', class_='messagelist')
self.assertEqual(ul.li.text, 'The Persited Clipboard Content "Test saved clipboard" was added successfully. You may edit it again below.')
self.assertEqual(CascadeClipboard.objects.all().count(), 1)
# now examine the serialized data in the clipboard
cascade_clipboard = CascadeClipboard.objects.get(identifier=self.identifier)
self.remove_primary_keys(cascade_clipboard.data['plugins'])
self.assertDictEqual(cascade_clipboard.data, self.placeholder_data)
def test_restore_clipboard(self):
with self.login_user_context(self.admin_user):
cascade_clipboard = CascadeClipboard.objects.create(identifier=self.identifier, data=self.placeholder_data)
cascade_clipboard.save()
request = self.get_request('/')
request.toolbar = CMSToolbar(request)
self.assertIsNotNone(request.toolbar.clipboard)
# check that clipboard is empty
self.assertEqual(request.toolbar.clipboard.cmsplugin_set.count(), 0)
# copy plugins from CascadeClipboard to CMS clipboard
change_clipboard_url = reverse('admin:cmsplugin_cascade_cascadeclipboard_change', args=(cascade_clipboard.pk,))
data = {'identifier': self.identifier, 'restore_clipboard': 'Restore', 'data': json.dumps(self.placeholder_data)}
response = self.client.post(change_clipboard_url, data)
self.assertEqual(response.status_code, 302)
change_clipboard_url = response['location']
response = self.client.get(change_clipboard_url, data)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
ul = soup.find('ul', class_='messagelist')
self.assertEqual(ul.li.text, 'The Persited Clipboard Content "Test saved clipboard" was changed successfully. You may edit it again below.')
# check if clipboard has been populated with plugins from serialized data
ref_plugin = request.toolbar.clipboard.get_plugins().first()
self.assertEqual(ref_plugin.plugin_type, 'PlaceholderPlugin')
inst = ref_plugin.get_plugin_instance()[0]
plugins = inst.placeholder_ref.get_plugins()
self.assertEqual(plugins.count(), 5)
self.assertEqual(plugins[0].plugin_type, 'BootstrapContainerPlugin')
self.assertEqual(plugins[1].plugin_type, 'BootstrapRowPlugin')
self.assertEqual(plugins[2].plugin_type, 'BootstrapColumnPlugin')
def remove_primary_keys(self, plugin_data):
for plugin_type, data, children_data in plugin_data:
data.pop('pk', None)
self.remove_primary_keys(children_data)
|
|
"""
Functions for identifying peaks in signals.
"""
import numpy as np
from scipy.signal.wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
def _boolrelextrema(data, comparator,
axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
comparator(data[n],data[n+1:n+order+1]) = True.
Parameters
----------
data: ndarray
comparator: function
function to use to compare two data points.
Should take 2 numbers as arguments
axis: int, optional
axis over which to select from `data`
order: int, optional
How many points on each side to require
a `comparator`(n,n+x) = True.
mode: string, optional
How the edges of the vector are treated.
'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema: ndarray
Indices of the extrema, as boolean array
of same shape as data. True for an extrema,
False else.
See also
--------
argrelmax,argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> argrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
See also
--------
argrelextrema,argrelmax
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
See also
--------
argrelextrema,argrelmin
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator,
axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`
Returns
-------
extrema: ndarray
Indices of the extrema, as an array
of integers (same format as argmin, argmax
See also
--------
argrelmin, argrelmax
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
if ~results.any():
return (np.array([]),) * 2
else:
return np.where(results)
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2D matrix. Expect that the width of
the wavelet feature increases with increasing row number.
Parameters
----------
matr: 2-D ndarray
Matrix in which to identify ridge lines.
max_distances: 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh: int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines: tuple
tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the ii-th
ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none found.
Each ridge-line will be sorted by row (increasing), but the order
of the ridge lines is not specified
References
----------
Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = identify_ridge_lines(data, 1, 1)
Notes:
------
This function is intended to be used in conjuction with `cwt`
as part of find_peaks_cwt.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
#Highest row for which there are any relative maxima
has_relmax = np.where(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
#Each ridge line is a 3-tuple:
#rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.where(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
#Increment gap number of each line,
#set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
#XXX These should always be all_max_cols[row]
#But the order might be different. Might be an efficiency gain
#to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
#Look through every relative maximum found at current row
#Attempt to connect them with existing ridge lines.
new_lines = []
for ind, col in enumerate(this_max_cols):
"""
If there is a previous ridge line within
the max_distance to connect to, do so.
Otherwise start a new one.
"""
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
#Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
#Remove the ridge lines with gap_number too high
#XXX Modifying a list while iterating over it.
#Should be safe, since we iterate backwards, but
#still tacky.
for ind in xrange(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
-------------
cwt : 2-D ndarray
Continuous wavelet transform from which
the ridge_lines were defined
ridge_lines: 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively)
window_size: int, optional
Size of window to use to calculate noise floor.
Default is `cwt`.shape[1]/20
min_length: int, optional
Minimum length a ridge line needs to be acceptable.
Default is `cwt`.shape[0]/4, ie 1/4th the number of widths.
min_snr: float, optional
Minimum SNR ratio. Default 0. The signal is the value of
the cwt matrix at the shortest length scale (`cwt`[0,loc]), the noise is
the `noise_perc`th percentile of datapoints contained within
a window of `window_size` around `cwt`[0,loc]
noise_perc: float,optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
hf_window = window_size / 2
#Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window = np.arange(max([ind - hf_window, 0]), min([ind + hf_window, num_points]))
window = window.astype(int)
noises[ind] = scoreatpercentile(row_one[window], per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return filter(filt_func, ridge_lines)
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, gap_thresh=None,
min_length=None, min_snr=1, noise_perc=10):
"""
Attempt to find the peaks in the given 1-D array `vector`.
The general approach is to smooth `vector` by convolving it with `wavelet(width)`
for each width in `widths`. Relative maxima which appear at enough length scales,
and with sufficiently high SNR, are accepted.
Parameters
----------
vector: 1-D ndarray
widths: 1-D sequence
Widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet: function
Should take a single variable and return a 1d array to convolve
with `vector`. Should be normalized to unit area. Default
is the ricker wavelet
max_distances: 1-D ndarray,optional
Default `widths`/4. See identify_ridge_lines
gap_thresh: float, optional
Default 2. See identify_ridge_lines
min_length: int, optional
Default None. See filter_ridge_lines
min_snr: float, optional
Default 1. See filter_ridge_lines
noise_perc: float, optional
Default 10. See filter_ridge_lines
Notes
---------
This approach was designed for finding sharp peaks among noisy data, however
with proper parameter selection it should function well for different
peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind],data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
min_snr=min_snr, noise_perc=noise_perc)
max_locs = map(lambda x: x[1][0], filtered)
return sorted(max_locs)
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a spatial analysis against an arbitrary library.
To use, build the 'binary_size_tool' target. Then run this tool, passing
in the location of the library to be analyzed along with any other options
you desire.
"""
import collections
import json
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import binary_size_utils
# This path changee is not beautiful. Temporary (I hope) measure until
# the chromium project has figured out a proper way to organize the
# library of python tools. http://crbug.com/375725
elf_symbolizer_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'..',
'build',
'android',
'pylib'))
sys.path.append(elf_symbolizer_path)
import symbols.elf_symbolizer as elf_symbolizer # pylint: disable=F0401
# Node dictionary keys. These are output in json read by the webapp so
# keep them short to save file size.
# Note: If these change, the webapp must also change.
NODE_TYPE_KEY = 'k'
NODE_NAME_KEY = 'n'
NODE_CHILDREN_KEY = 'children'
NODE_SYMBOL_TYPE_KEY = 't'
NODE_SYMBOL_SIZE_KEY = 'value'
NODE_MAX_DEPTH_KEY = 'maxDepth'
NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement'
# The display name of the bucket where we put symbols without path.
NAME_NO_PATH_BUCKET = '(No Path)'
# Try to keep data buckets smaller than this to avoid killing the
# graphing lib.
BIG_BUCKET_LIMIT = 3000
# TODO(andrewhayden): Only used for legacy reports. Delete.
def FormatBytes(byte_count):
"""Pretty-print a number of bytes."""
if byte_count > 1e6:
byte_count = byte_count / 1.0e6
return '%.1fm' % byte_count
if byte_count > 1e3:
byte_count = byte_count / 1.0e3
return '%.1fk' % byte_count
return str(byte_count)
# TODO(andrewhayden): Only used for legacy reports. Delete.
def SymbolTypeToHuman(symbol_type):
"""Convert a symbol type as printed by nm into a human-readable name."""
return {'b': 'bss',
'd': 'data',
'r': 'read-only data',
't': 'code',
'w': 'weak symbol',
'v': 'weak symbol'}[symbol_type]
def _MkChild(node, name):
child = node[NODE_CHILDREN_KEY].get(name)
if child is None:
child = {NODE_NAME_KEY: name,
NODE_CHILDREN_KEY: {}}
node[NODE_CHILDREN_KEY][name] = child
return child
def SplitNoPathBucket(node):
"""NAME_NO_PATH_BUCKET can be too large for the graphing lib to
handle. Split it into sub-buckets in that case."""
root_children = node[NODE_CHILDREN_KEY]
if NAME_NO_PATH_BUCKET in root_children:
no_path_bucket = root_children[NAME_NO_PATH_BUCKET]
old_children = no_path_bucket[NODE_CHILDREN_KEY]
count = 0
for symbol_type, symbol_bucket in old_children.iteritems():
count += len(symbol_bucket[NODE_CHILDREN_KEY])
if count > BIG_BUCKET_LIMIT:
new_children = {}
no_path_bucket[NODE_CHILDREN_KEY] = new_children
current_bucket = None
index = 0
for symbol_type, symbol_bucket in old_children.iteritems():
for symbol_name, value in symbol_bucket[NODE_CHILDREN_KEY].iteritems():
if index % BIG_BUCKET_LIMIT == 0:
group_no = (index / BIG_BUCKET_LIMIT) + 1
current_bucket = _MkChild(no_path_bucket,
'%s subgroup %d' % (NAME_NO_PATH_BUCKET,
group_no))
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
index += 1
symbol_size = value[NODE_SYMBOL_SIZE_KEY]
AddSymbolIntoFileNode(current_bucket, symbol_type,
symbol_name, symbol_size)
def MakeChildrenDictsIntoLists(node):
largest_list_len = 0
if NODE_CHILDREN_KEY in node:
largest_list_len = len(node[NODE_CHILDREN_KEY])
child_list = []
for child in node[NODE_CHILDREN_KEY].itervalues():
child_largest_list_len = MakeChildrenDictsIntoLists(child)
if child_largest_list_len > largest_list_len:
largest_list_len = child_largest_list_len
child_list.append(child)
node[NODE_CHILDREN_KEY] = child_list
return largest_list_len
def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size):
"""Puts symbol into the file path node |node|.
Returns the number of added levels in tree. I.e. returns 2."""
# 'node' is the file node and first step is to find its symbol-type bucket.
node[NODE_LAST_PATH_ELEMENT_KEY] = True
node = _MkChild(node, symbol_type)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b'
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 'b' # b for bucket
# 'node' is now the symbol-type bucket. Make the child entry.
node = _MkChild(node, symbol_name)
if NODE_CHILDREN_KEY in node:
if node[NODE_CHILDREN_KEY]:
logging.warning('A container node used as symbol for %s.' % symbol_name)
# This is going to be used as a leaf so no use for child list.
del node[NODE_CHILDREN_KEY]
node[NODE_SYMBOL_SIZE_KEY] = symbol_size
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 's' # s for symbol
return 2 # Depth of the added subtree.
def MakeCompactTree(symbols):
result = {NODE_NAME_KEY: '/',
NODE_CHILDREN_KEY: {},
NODE_TYPE_KEY: 'p',
NODE_MAX_DEPTH_KEY: 0}
seen_symbol_with_path = False
for symbol_name, symbol_type, symbol_size, file_path in symbols:
if 'vtable for ' in symbol_name:
symbol_type = '@' # hack to categorize these separately
# Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz']
if file_path:
file_path = os.path.normpath(file_path)
seen_symbol_with_path = True
else:
file_path = NAME_NO_PATH_BUCKET
if file_path.startswith('/'):
file_path = file_path[1:]
path_parts = file_path.split('/')
# Find pre-existing node in tree, or update if it already exists
node = result
depth = 0
while len(path_parts) > 0:
path_part = path_parts.pop(0)
if len(path_part) == 0:
continue
depth += 1
node = _MkChild(node, path_part)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size)
result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth)
if not seen_symbol_with_path:
logging.warning('Symbols lack paths. Data will not be structured.')
# The (no path) bucket can be extremely large if we failed to get
# path information. Split it into subgroups if needed.
SplitNoPathBucket(result)
largest_list_len = MakeChildrenDictsIntoLists(result)
if largest_list_len > BIG_BUCKET_LIMIT:
logging.warning('There are sections with %d nodes. '
'Results might be unusable.' % largest_list_len)
return result
# TODO(andrewhayden): Only used for legacy reports. Delete.
def TreeifySymbols(symbols):
"""Convert symbols into a path-based tree, calculating size information
along the way.
The result is a dictionary that contains two kinds of nodes:
1. Leaf nodes, representing source code locations (e.g., c++ files)
These nodes have the following dictionary entries:
sizes: a dictionary whose keys are categories (such as code, data,
vtable, etceteras) and whose values are the size, in bytes, of
those categories;
size: the total size, in bytes, of all the entries in the sizes dict
2. Non-leaf nodes, representing directories
These nodes have the following dictionary entries:
children: a dictionary whose keys are names (path entries; either
directory or file names) and whose values are other nodes;
size: the total size, in bytes, of all the leaf nodes that are
contained within the children dict (recursively expanded)
The result object is itself a dictionary that represents the common ancestor
of all child nodes, e.g. a path to which all other nodes beneath it are
relative. The 'size' attribute of this dict yields the sum of the size of all
leaf nodes within the data structure.
"""
dirs = {'children': {}, 'size': 0}
for sym, symbol_type, size, path in symbols:
dirs['size'] += size
if path:
path = os.path.normpath(path)
if path.startswith('/'):
path = path[1:]
parts = None
if path:
parts = path.split('/')
if parts:
assert path
file_key = parts.pop()
tree = dirs
try:
# Traverse the tree to the parent of the file node, creating as needed
for part in parts:
assert part != ''
if part not in tree['children']:
tree['children'][part] = {'children': {}, 'size': 0}
tree = tree['children'][part]
tree['size'] += size
# Get (creating if necessary) the node for the file
# This node doesn't have a 'children' attribute
if file_key not in tree['children']:
tree['children'][file_key] = {'sizes': collections.defaultdict(int),
'size': 0}
tree = tree['children'][file_key]
tree['size'] += size
# Accumulate size into a bucket within the file
symbol_type = symbol_type.lower()
if 'vtable for ' in sym:
tree['sizes']['[vtable]'] += size
elif 'r' == symbol_type:
tree['sizes']['[rodata]'] += size
elif 'd' == symbol_type:
tree['sizes']['[data]'] += size
elif 'b' == symbol_type:
tree['sizes']['[bss]'] += size
elif 't' == symbol_type:
# 'text' in binary parlance means 'code'.
tree['sizes']['[code]'] += size
elif 'w' == symbol_type:
tree['sizes']['[weak]'] += size
else:
tree['sizes']['[other]'] += size
except:
print >> sys.stderr, sym, parts, file_key
raise
else:
key = 'symbols without paths'
if key not in dirs['children']:
dirs['children'][key] = {'sizes': collections.defaultdict(int),
'size': 0}
tree = dirs['children'][key]
subkey = 'misc'
if (sym.endswith('::__FUNCTION__') or
sym.endswith('::__PRETTY_FUNCTION__')):
subkey = '__FUNCTION__'
elif sym.startswith('CSWTCH.'):
subkey = 'CSWTCH'
elif '::' in sym:
subkey = sym[0:sym.find('::') + 2]
tree['sizes'][subkey] = tree['sizes'].get(subkey, 0) + size
tree['size'] += size
return dirs
# TODO(andrewhayden): Only used for legacy reports. Delete.
def JsonifyTree(tree, name):
"""Convert TreeifySymbols output to a JSON treemap.
The format is very similar, with the notable exceptions being
lists of children instead of maps and some different attribute names."""
children = []
css_class_map = {
'[vtable]': 'vtable',
'[rodata]': 'read-only_data',
'[data]': 'data',
'[bss]': 'bss',
'[code]': 'code',
'[weak]': 'weak_symbol'
}
if 'children' in tree:
# Non-leaf node. Recurse.
for child_name, child in tree['children'].iteritems():
children.append(JsonifyTree(child, child_name))
else:
# Leaf node; dump per-file stats as entries in the treemap
for kind, size in tree['sizes'].iteritems():
child_json = {'name': kind + ' (' + FormatBytes(size) + ')',
'data': { '$area': size }}
css_class = css_class_map.get(kind)
if css_class is not None:
child_json['data']['$symbol'] = css_class
children.append(child_json)
# Sort children by size, largest to smallest.
children.sort(key=lambda child: -child['data']['$area'])
# For leaf nodes, the 'size' attribute is the size of the leaf;
# Non-leaf nodes don't really have a size, but their 'size' attribute is
# the sum of the sizes of all their children.
return {'name': name + ' (' + FormatBytes(tree['size']) + ')',
'data': { '$area': tree['size'] },
'children': children }
def DumpCompactTree(symbols, outfile):
tree_root = MakeCompactTree(symbols)
with open(outfile, 'w') as out:
out.write('var tree_data = ')
json.dump(tree_root, out)
print('Writing %d bytes json' % os.path.getsize(outfile))
# TODO(andrewhayden): Only used for legacy reports. Delete.
def DumpTreemap(symbols, outfile):
dirs = TreeifySymbols(symbols)
out = open(outfile, 'w')
try:
out.write('var kTree = ' + json.dumps(JsonifyTree(dirs, '/')))
finally:
out.flush()
out.close()
# TODO(andrewhayden): Only used for legacy reports. Delete.
def DumpLargestSymbols(symbols, outfile, n):
# a list of (sym, symbol_type, size, path); sort by size.
symbols = sorted(symbols, key=lambda x: -x[2])
dumped = 0
out = open(outfile, 'w')
try:
out.write('var largestSymbols = [\n')
for sym, symbol_type, size, path in symbols:
if symbol_type in ('b', 'w'):
continue # skip bss and weak symbols
if path is None:
path = ''
entry = {'size': FormatBytes(size),
'symbol': sym,
'type': SymbolTypeToHuman(symbol_type),
'location': path }
out.write(json.dumps(entry))
out.write(',\n')
dumped += 1
if dumped >= n:
return
finally:
out.write('];\n')
out.flush()
out.close()
def MakeSourceMap(symbols):
sources = {}
for _sym, _symbol_type, size, path in symbols:
key = None
if path:
key = os.path.normpath(path)
else:
key = '[no path]'
if key not in sources:
sources[key] = {'path': path, 'symbol_count': 0, 'size': 0}
record = sources[key]
record['size'] += size
record['symbol_count'] += 1
return sources
# TODO(andrewhayden): Only used for legacy reports. Delete.
def DumpLargestSources(symbols, outfile, n):
source_map = MakeSourceMap(symbols)
sources = sorted(source_map.values(), key=lambda x: -x['size'])
dumped = 0
out = open(outfile, 'w')
try:
out.write('var largestSources = [\n')
for record in sources:
entry = {'size': FormatBytes(record['size']),
'symbol_count': str(record['symbol_count']),
'location': record['path']}
out.write(json.dumps(entry))
out.write(',\n')
dumped += 1
if dumped >= n:
return
finally:
out.write('];\n')
out.flush()
out.close()
# TODO(andrewhayden): Only used for legacy reports. Delete.
def DumpLargestVTables(symbols, outfile, n):
vtables = []
for symbol, _type, size, path in symbols:
if 'vtable for ' in symbol:
vtables.append({'symbol': symbol, 'path': path, 'size': size})
vtables = sorted(vtables, key=lambda x: -x['size'])
dumped = 0
out = open(outfile, 'w')
try:
out.write('var largestVTables = [\n')
for record in vtables:
entry = {'size': FormatBytes(record['size']),
'symbol': record['symbol'],
'location': record['path']}
out.write(json.dumps(entry))
out.write(',\n')
dumped += 1
if dumped >= n:
return
finally:
out.write('];\n')
out.flush()
out.close()
# Regex for parsing "nm" output. A sample line looks like this:
# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95
#
# The fields are: address, size, type, name, source location
# Regular expression explained ( see also: https://xkcd.com/208 ):
# ([0-9a-f]{8,}+) The address
# [\s]+ Whitespace separator
# ([0-9a-f]{8,}+) The size. From here on out it's all optional.
# [\s]+ Whitespace separator
# (\S?) The symbol type, which is any non-whitespace char
# [\s*] Whitespace separator
# ([^\t]*) Symbol name, any non-tab character (spaces ok!)
# [\t]? Tab separator
# (.*) The location (filename[:linennum|?][ (discriminator n)]
sNmPattern = re.compile(
r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)')
class Progress():
def __init__(self):
self.count = 0
self.skip_count = 0
self.collisions = 0
self.time_last_output = time.time()
self.count_last_output = 0
def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs):
nm_output = RunNm(library, nm_binary)
nm_output_lines = nm_output.splitlines()
nm_output_lines_len = len(nm_output_lines)
address_symbol = {}
progress = Progress()
def map_address_symbol(symbol, addr):
progress.count += 1
if addr in address_symbol:
# 'Collision between %s and %s.' % (str(symbol.name),
# str(address_symbol[addr].name))
progress.collisions += 1
else:
address_symbol[addr] = symbol
progress_chunk = 100
if progress.count % progress_chunk == 0:
time_now = time.time()
time_spent = time_now - progress.time_last_output
if time_spent > 1.0:
# Only output at most once per second.
progress.time_last_output = time_now
chunk_size = progress.count - progress.count_last_output
progress.count_last_output = progress.count
if time_spent > 0:
speed = chunk_size / time_spent
else:
speed = 0
progress_percent = (100.0 * (progress.count + progress.skip_count) /
nm_output_lines_len)
print('%.1f%%: Looked up %d symbols (%d collisions) - %.1f lookups/s.' %
(progress_percent, progress.count, progress.collisions, speed))
symbolizer = elf_symbolizer.ELFSymbolizer(library, addr2line_binary,
map_address_symbol,
max_concurrent_jobs=jobs)
user_interrupted = False
try:
for line in nm_output_lines:
match = sNmPattern.match(line)
if match:
location = match.group(5)
if not location:
addr = int(match.group(1), 16)
size = int(match.group(2), 16)
if addr in address_symbol: # Already looked up, shortcut
# ELFSymbolizer.
map_address_symbol(address_symbol[addr], addr)
continue
elif size == 0:
# Save time by not looking up empty symbols (do they even exist?)
print('Empty symbol: ' + line)
else:
symbolizer.SymbolizeAsync(addr, addr)
continue
progress.skip_count += 1
except KeyboardInterrupt:
user_interrupted = True
print('Interrupting - killing subprocesses. Please wait.')
try:
symbolizer.Join()
except KeyboardInterrupt:
# Don't want to abort here since we will be finished in a few seconds.
user_interrupted = True
print('Patience you must have my young padawan.')
if user_interrupted:
print('Skipping the rest of the file mapping. '
'Output will not be fully classified.')
with open(outfile, 'w') as out:
for line in nm_output_lines:
match = sNmPattern.match(line)
if match:
location = match.group(5)
if not location:
addr = int(match.group(1), 16)
symbol = address_symbol.get(addr)
if symbol is not None:
path = '??'
if symbol.source_path is not None:
path = symbol.source_path
line_number = 0
if symbol.source_line is not None:
line_number = symbol.source_line
out.write('%s\t%s:%d\n' % (line, path, line_number))
continue
out.write('%s\n' % line)
print('%d symbols in the results.' % len(address_symbol))
def RunNm(binary, nm_binary):
print('Starting nm')
cmd = [nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort',
binary]
nm_process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(process_output, err_output) = nm_process.communicate()
if nm_process.returncode != 0:
if err_output:
raise Exception, err_output
else:
raise Exception, process_output
print('Finished nm')
return process_output
def GetNmSymbols(nm_infile, outfile, library, jobs, verbose,
addr2line_binary, nm_binary):
if nm_infile is None:
if outfile is None:
outfile = tempfile.NamedTemporaryFile(delete=False).name
if verbose:
print 'Running parallel addr2line, dumping symbols to ' + outfile
RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs)
nm_infile = outfile
elif verbose:
print 'Using nm input from ' + nm_infile
with file(nm_infile, 'r') as infile:
return list(binary_size_utils.ParseNm(infile))
def _find_in_system_path(binary):
"""Locate the full path to binary in the system path or return None
if not found."""
system_path = os.environ["PATH"].split(os.pathsep)
for path in system_path:
binary_path = os.path.join(path, binary)
if os.path.isfile(binary_path):
return binary_path
return None
def CheckDebugFormatSupport(library, addr2line_binary):
"""Kills the program if debug data is in an unsupported format.
There are two common versions of the DWARF debug formats and
since we are right now transitioning from DWARF2 to newer formats,
it's possible to have a mix of tools that are not compatible. Detect
that and abort rather than produce meaningless output."""
tool_output = subprocess.check_output([addr2line_binary, '--version'])
version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M)
parsed_output = version_re.match(tool_output)
major = int(parsed_output.group(1))
minor = int(parsed_output.group(2))
supports_dwarf4 = major > 2 or major == 2 and minor > 22
if supports_dwarf4:
return
print('Checking version of debug information in %s.' % library)
debug_info = subprocess.check_output(['readelf', '--debug-dump=info',
'--dwarf-depth=1', library])
dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M)
parsed_dwarf_format_output = dwarf_version_re.search(debug_info)
version = int(parsed_dwarf_format_output.group(1))
if version > 2:
print('The supplied tools only support DWARF2 debug data but the binary\n' +
'uses DWARF%d. Update the tools or compile the binary\n' % version +
'with -gdwarf-2.')
sys.exit(1)
def main():
usage = """%prog [options]
Runs a spatial analysis on a given library, looking up the source locations
of its symbols and calculating how much space each directory, source file,
and so on is taking. The result is a report that can be used to pinpoint
sources of large portions of the binary, etceteras.
Under normal circumstances, you only need to pass two arguments, thusly:
%prog --library /path/to/library --destdir /path/to/output
In this mode, the program will dump the symbols from the specified library
and map those symbols back to source locations, producing a web-based
report in the specified output directory.
Other options are available via '--help'.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--nm-in', metavar='PATH',
help='if specified, use nm input from <path> instead of '
'generating it. Note that source locations should be '
'present in the file; i.e., no addr2line symbol lookups '
'will be performed when this option is specified. '
'Mutually exclusive with --library.')
parser.add_option('--destdir', metavar='PATH',
help='write output to the specified directory. An HTML '
'report is generated here along with supporting files; '
'any existing report will be overwritten.')
parser.add_option('--library', metavar='PATH',
help='if specified, process symbols in the library at '
'the specified path. Mutually exclusive with --nm-in.')
parser.add_option('--nm-binary',
help='use the specified nm binary to analyze library. '
'This is to be used when the nm in the path is not for '
'the right architecture or of the right version.')
parser.add_option('--addr2line-binary',
help='use the specified addr2line binary to analyze '
'library. This is to be used when the addr2line in '
'the path is not for the right architecture or '
'of the right version.')
parser.add_option('--jobs', type='int',
help='number of jobs to use for the parallel '
'addr2line processing pool; defaults to 1. More '
'jobs greatly improve throughput but eat RAM like '
'popcorn, and take several gigabytes each. Start low '
'and ramp this number up until your machine begins to '
'struggle with RAM. '
'This argument is only valid when using --library.')
parser.add_option('-v', dest='verbose', action='store_true',
help='be verbose, printing lots of status information.')
parser.add_option('--nm-out', metavar='PATH',
help='keep the nm output file, and store it at the '
'specified path. This is useful if you want to see the '
'fully processed nm output after the symbols have been '
'mapped to source locations. By default, a tempfile is '
'used and is deleted when the program terminates.'
'This argument is only valid when using --library.')
parser.add_option('--legacy', action='store_true',
help='emit legacy binary size report instead of modern')
opts, _args = parser.parse_args()
if ((not opts.library) and (not opts.nm_in)) or (opts.library and opts.nm_in):
parser.error('exactly one of --library or --nm-in is required')
if (opts.nm_in):
if opts.jobs:
print >> sys.stderr, ('WARNING: --jobs has no effect '
'when used with --nm-in')
if not opts.destdir:
parser.error('--destdir is required argument')
if not opts.jobs:
# Use the number of processors but cap between 2 and 4 since raw
# CPU power isn't the limiting factor. It's I/O limited, memory
# bus limited and available-memory-limited. Too many processes and
# the computer will run out of memory and it will be slow.
opts.jobs = max(2, min(4, str(multiprocessing.cpu_count())))
if opts.addr2line_binary:
assert os.path.isfile(opts.addr2line_binary)
addr2line_binary = opts.addr2line_binary
else:
addr2line_binary = _find_in_system_path('addr2line')
assert addr2line_binary, 'Unable to find addr2line in the path. '\
'Use --addr2line-binary to specify location.'
if opts.nm_binary:
assert os.path.isfile(opts.nm_binary)
nm_binary = opts.nm_binary
else:
nm_binary = _find_in_system_path('nm')
assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\
'to specify location.'
print('addr2line: %s' % addr2line_binary)
print('nm: %s' % nm_binary)
CheckDebugFormatSupport(opts.library, addr2line_binary)
symbols = GetNmSymbols(opts.nm_in, opts.nm_out, opts.library,
opts.jobs, opts.verbose is True,
addr2line_binary, nm_binary)
if not os.path.exists(opts.destdir):
os.makedirs(opts.destdir, 0755)
if opts.legacy: # legacy report
DumpTreemap(symbols, os.path.join(opts.destdir, 'treemap-dump.js'))
DumpLargestSymbols(symbols,
os.path.join(opts.destdir, 'largest-symbols.js'), 100)
DumpLargestSources(symbols,
os.path.join(opts.destdir, 'largest-sources.js'), 100)
DumpLargestVTables(symbols,
os.path.join(opts.destdir, 'largest-vtables.js'), 100)
treemap_out = os.path.join(opts.destdir, 'webtreemap')
if not os.path.exists(treemap_out):
os.makedirs(treemap_out, 0755)
treemap_src = os.path.join('third_party', 'webtreemap', 'src')
shutil.copy(os.path.join(treemap_src, 'COPYING'), treemap_out)
shutil.copy(os.path.join(treemap_src, 'webtreemap.js'), treemap_out)
shutil.copy(os.path.join(treemap_src, 'webtreemap.css'), treemap_out)
shutil.copy(os.path.join('tools', 'binary_size', 'legacy_template',
'index.html'), opts.destdir)
else: # modern report
DumpCompactTree(symbols, os.path.join(opts.destdir, 'data.js'))
d3_out = os.path.join(opts.destdir, 'd3')
if not os.path.exists(d3_out):
os.makedirs(d3_out, 0755)
d3_src = os.path.join(os.path.dirname(__file__),
'..',
'..',
'third_party', 'd3', 'src')
template_src = os.path.join(os.path.dirname(__file__),
'template')
shutil.copy(os.path.join(d3_src, 'LICENSE'), d3_out)
shutil.copy(os.path.join(d3_src, 'd3.js'), d3_out)
shutil.copy(os.path.join(template_src, 'index.html'), opts.destdir)
shutil.copy(os.path.join(template_src, 'D3SymbolTreeMap.js'), opts.destdir)
print 'Report saved to ' + opts.destdir + '/index.html'
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create the data for the LSTM.
"""
import os
import sys
import argparse
import numpy as np
import h5py
import itertools
from collections import defaultdict
class Indexer:
def __init__(self, symbols = ["<pad>","<unk>","<s>","</s>"]):
self.vocab = defaultdict(int)
self.PAD = symbols[0]
self.UNK = symbols[1]
self.BOS = symbols[2]
self.EOS = symbols[3]
self.d = {self.PAD: 0, self.UNK: 1, self.BOS: 2, self.EOS: 3}
self.idx2word = {}
def add_w(self, ws):
for w in ws:
if w not in self.d:
self.d[w] = len(self.d)
def convert(self, w):
return self.d[w] if w in self.d else self.d[self.UNK]
def convert_sequence(self, ls):
return [self.convert(l) for l in ls]
def write(self, outfile):
out = open(outfile, "w")
items = [(v, k) for k, v in self.d.items()]
items.sort()
for v, k in items:
out.write(" ".join([k, str(v)]) + "\n")
out.close()
def prune_vocab(self, k, cnt = False):
vocab_list = [(word, count) for word, count in self.vocab.items()]
if cnt:
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list if pair[1] > k}
else:
vocab_list.sort(key = lambda x: x[1], reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list[:k]}
for word in self.pruned_vocab:
if word not in self.d:
self.d[word] = len(self.d)
for word, idx in self.d.items():
self.idx2word[idx] = word
def load_vocab(self, vocab_file):
self.d = {}
for line in open(vocab_file, 'r'):
v, k = line.strip().split()
self.d[v] = int(k)
for word, idx in self.d.items():
self.idx2word[idx] = word
def pad(ls, length, symbol):
if len(ls) >= length:
return ls[:length]
return ls + [symbol] * (length -len(ls))
def get_data(args):
indexer = Indexer(["<pad>","<unk>","<s>","</s>"])
def make_vocab(textfile, seqlength, train=1):
num_sents = 0
for sent in open(textfile, 'r'):
sent = sent.strip().split()
if len(sent) > seqlength or len(sent) < 1:
continue
num_sents += 1
if train == 1:
for word in sent:
indexer.vocab[word] += 1
return num_sents
def convert(textfile, batchsize, seqlength, outfile, num_sents, max_sent_l=0,shuffle=0):
newseqlength = seqlength + 2 #add 2 for EOS and BOS
sents = np.zeros((num_sents, newseqlength), dtype=int)
sent_lengths = np.zeros((num_sents,), dtype=int)
dropped = 0
sent_id = 0
for sent in open(textfile, 'r'):
sent = [indexer.BOS] + sent.strip().split() + [indexer.EOS]
max_sent_l = max(len(sent), max_sent_l)
if len(sent) > seqlength + 2 or len(sent) < 3:
dropped += 1
continue
sent_pad = pad(sent, newseqlength, indexer.PAD)
sents[sent_id] = np.array(indexer.convert_sequence(sent_pad), dtype=int)
sent_lengths[sent_id] = (sents[sent_id] != 0).sum()
sent_id += 1
if sent_id % 100000 == 0:
print("{}/{} sentences processed".format(sent_id, num_sents))
print(sent_id, num_sents)
if shuffle == 1:
rand_idx = np.random.permutation(sent_id)
sents = sents[rand_idx]
sent_lengths = sent_lengths[rand_idx]
#break up batches based on source lengths
sent_lengths = sent_lengths[:sent_id]
sent_sort = np.argsort(sent_lengths)
sents = sents[sent_sort]
sent_l = sent_lengths[sent_sort]
curr_l = 1
l_location = [] #idx where sent length changes
for j,i in enumerate(sent_sort):
if sent_lengths[i] > curr_l:
curr_l = sent_lengths[i]
l_location.append(j)
l_location.append(len(sents))
#get batch sizes
curr_idx = 0
batch_idx = [0]
nonzeros = []
batch_l = []
batch_w = []
for i in range(len(l_location)-1):
while curr_idx < l_location[i+1]:
curr_idx = min(curr_idx + batchsize, l_location[i+1])
batch_idx.append(curr_idx)
for i in range(len(batch_idx)-1):
batch_l.append(batch_idx[i+1] - batch_idx[i])
batch_w.append(sent_l[batch_idx[i]])
# Write output
f = h5py.File(outfile, "w")
f["source"] = sents
f["batch_l"] = np.array(batch_l, dtype=int)
f["source_l"] = np.array(batch_w, dtype=int)
f["sents_l"] = np.array(sent_l, dtype = int)
f["batch_idx"] = np.array(batch_idx[:-1], dtype=int)
f["vocab_size"] = np.array([len(indexer.d)])
print("Saved {} sentences (dropped {} due to length/unk filter)".format(
len(f["source"]), dropped))
f.close()
return max_sent_l
print("First pass through data to get vocab...")
num_sents_train = make_vocab(args.trainfile, args.seqlength)
print("Number of sentences in training: {}".format(num_sents_train))
num_sents_valid = make_vocab(args.valfile, args.seqlength, 0)
print("Number of sentences in valid: {}".format(num_sents_valid))
num_sents_test = make_vocab(args.testfile, args.seqlength, 0)
print("Number of sentences in test: {}".format(num_sents_test))
if args.vocabminfreq >= 0:
indexer.prune_vocab(args.vocabminfreq, True)
else:
indexer.prune_vocab(args.vocabsize, False)
if args.vocabfile != '':
print('Loading pre-specified source vocab from ' + args.vocabfile)
indexer.load_vocab(args.vocabfile)
indexer.write(args.outputfile + ".dict")
print("Vocab size: Original = {}, Pruned = {}".format(len(indexer.vocab),
len(indexer.d)))
max_sent_l = 0
max_sent_l = convert(args.valfile, args.batchsize, args.seqlength,
args.outputfile + "-val.hdf5", num_sents_valid,
max_sent_l, args.shuffle)
max_sent_l = convert(args.testfile, args.batchsize, args.seqlength,
args.outputfile + "-test.hdf5", num_sents_test,
max_sent_l, args.shuffle)
max_sent_l = convert(args.trainfile, args.batchsize, args.seqlength,
args.outputfile + "-train.hdf5", num_sents_train,
max_sent_l, args.shuffle)
print("Max sent length (before dropping): {}".format(max_sent_l))
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vocabsize', help="Size of source vocabulary, constructed "
"by taking the top X most frequent words. "
" Rest are replaced with special UNK tokens.",
type=int, default=70000)
parser.add_argument('--vocabminfreq', help="Minimum frequency for vocab, if using frequency cutoff",
type=int, default=-1)
parser.add_argument('--trainfile', help="Path to training data.", required=True)
parser.add_argument('--valfile', help="Path validation data.", required=True)
parser.add_argument('--testfile', help="Path to test data.", required=True)
parser.add_argument('--batchsize', help="Size of each minibatch.", type=int, default=32)
parser.add_argument('--seqlength', help="Maximum source sequence length. Sequences longer "
"than this are dropped.", type=int, default=200)
parser.add_argument('--outputfile', help="Prefix of the output file names. ", type=str)
parser.add_argument('--vocabfile', help="If working with a preset vocab, "
"then including this will ignore srcvocabsize and use the"
"vocab provided here.",
type = str, default='')
parser.add_argument('--shuffle', help="If = 1, shuffle sentences before sorting (based on "
"source length).",
type = int, default = 1)
args = parser.parse_args(arguments)
get_data(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
data = (
' ', # 0x00
', ', # 0x01
'. ', # 0x02
'"', # 0x03
'[JIS]', # 0x04
'"', # 0x05
'/', # 0x06
'0', # 0x07
'<', # 0x08
'> ', # 0x09
'<<', # 0x0a
'>> ', # 0x0b
'[', # 0x0c
'] ', # 0x0d
'{', # 0x0e
'} ', # 0x0f
'[(', # 0x10
')] ', # 0x11
'@', # 0x12
'X ', # 0x13
'[', # 0x14
'] ', # 0x15
'[[', # 0x16
']] ', # 0x17
'((', # 0x18
')) ', # 0x19
'[[', # 0x1a
']] ', # 0x1b
'~ ', # 0x1c
'``', # 0x1d
'\'\'', # 0x1e
',,', # 0x1f
'@', # 0x20
'1', # 0x21
'2', # 0x22
'3', # 0x23
'4', # 0x24
'5', # 0x25
'6', # 0x26
'7', # 0x27
'8', # 0x28
'9', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'~', # 0x30
'+', # 0x31
'+', # 0x32
'+', # 0x33
'+', # 0x34
'', # 0x35
'@', # 0x36
' // ', # 0x37
'+10+', # 0x38
'+20+', # 0x39
'+30+', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'a', # 0x41
'a', # 0x42
'i', # 0x43
'i', # 0x44
'u', # 0x45
'u', # 0x46
'e', # 0x47
'e', # 0x48
'o', # 0x49
'o', # 0x4a
'ka', # 0x4b
'ga', # 0x4c
'ki', # 0x4d
'gi', # 0x4e
'ku', # 0x4f
'gu', # 0x50
'ke', # 0x51
'ge', # 0x52
'ko', # 0x53
'go', # 0x54
'sa', # 0x55
'za', # 0x56
'shi', # 0x57
'zi', # 0x58
'su', # 0x59
'zu', # 0x5a
'se', # 0x5b
'ze', # 0x5c
'so', # 0x5d
'zo', # 0x5e
'ta', # 0x5f
'da', # 0x60
'chi', # 0x61
'di', # 0x62
'tsu', # 0x63
'tsu', # 0x64
'du', # 0x65
'te', # 0x66
'de', # 0x67
'to', # 0x68
'do', # 0x69
'na', # 0x6a
'ni', # 0x6b
'nu', # 0x6c
'ne', # 0x6d
'no', # 0x6e
'ha', # 0x6f
'ba', # 0x70
'pa', # 0x71
'hi', # 0x72
'bi', # 0x73
'pi', # 0x74
'hu', # 0x75
'bu', # 0x76
'pu', # 0x77
'he', # 0x78
'be', # 0x79
'pe', # 0x7a
'ho', # 0x7b
'bo', # 0x7c
'po', # 0x7d
'ma', # 0x7e
'mi', # 0x7f
'mu', # 0x80
'me', # 0x81
'mo', # 0x82
'ya', # 0x83
'ya', # 0x84
'yu', # 0x85
'yu', # 0x86
'yo', # 0x87
'yo', # 0x88
'ra', # 0x89
'ri', # 0x8a
'ru', # 0x8b
're', # 0x8c
'ro', # 0x8d
'wa', # 0x8e
'wa', # 0x8f
'wi', # 0x90
'we', # 0x91
'wo', # 0x92
'n', # 0x93
'vu', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'"', # 0x9d
'"', # 0x9e
'[?]', # 0x9f
'[?]', # 0xa0
'a', # 0xa1
'a', # 0xa2
'i', # 0xa3
'i', # 0xa4
'u', # 0xa5
'u', # 0xa6
'e', # 0xa7
'e', # 0xa8
'o', # 0xa9
'o', # 0xaa
'ka', # 0xab
'ga', # 0xac
'ki', # 0xad
'gi', # 0xae
'ku', # 0xaf
'gu', # 0xb0
'ke', # 0xb1
'ge', # 0xb2
'ko', # 0xb3
'go', # 0xb4
'sa', # 0xb5
'za', # 0xb6
'shi', # 0xb7
'zi', # 0xb8
'su', # 0xb9
'zu', # 0xba
'se', # 0xbb
'ze', # 0xbc
'so', # 0xbd
'zo', # 0xbe
'ta', # 0xbf
'da', # 0xc0
'chi', # 0xc1
'di', # 0xc2
'tsu', # 0xc3
'tsu', # 0xc4
'du', # 0xc5
'te', # 0xc6
'de', # 0xc7
'to', # 0xc8
'do', # 0xc9
'na', # 0xca
'ni', # 0xcb
'nu', # 0xcc
'ne', # 0xcd
'no', # 0xce
'ha', # 0xcf
'ba', # 0xd0
'pa', # 0xd1
'hi', # 0xd2
'bi', # 0xd3
'pi', # 0xd4
'hu', # 0xd5
'bu', # 0xd6
'pu', # 0xd7
'he', # 0xd8
'be', # 0xd9
'pe', # 0xda
'ho', # 0xdb
'bo', # 0xdc
'po', # 0xdd
'ma', # 0xde
'mi', # 0xdf
'mu', # 0xe0
'me', # 0xe1
'mo', # 0xe2
'ya', # 0xe3
'ya', # 0xe4
'yu', # 0xe5
'yu', # 0xe6
'yo', # 0xe7
'yo', # 0xe8
'ra', # 0xe9
'ri', # 0xea
'ru', # 0xeb
're', # 0xec
'ro', # 0xed
'wa', # 0xee
'wa', # 0xef
'wi', # 0xf0
'we', # 0xf1
'wo', # 0xf2
'n', # 0xf3
'vu', # 0xf4
'ka', # 0xf5
'ke', # 0xf6
'va', # 0xf7
'vi', # 0xf8
've', # 0xf9
'vo', # 0xfa
'', # 0xfb
'', # 0xfc
'"', # 0xfd
'"', # 0xfe
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest.mock
from datetime import datetime
import pytest
from itsdangerous import URLSafeSerializer
from parameterized import parameterized
from airflow import DAG
from airflow.api_connexion.exceptions import EXCEPTIONS_LINK_MAP
from airflow.configuration import conf
from airflow.models import DagBag, DagModel
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.dummy import DummyOperator
from airflow.security import permissions
from airflow.utils.session import provide_session
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
SERIALIZER = URLSafeSerializer(conf.get('webserver', 'secret_key'))
FILE_TOKEN = SERIALIZER.dumps(__file__)
DAG_ID = "test_dag"
TASK_ID = "op1"
DAG2_ID = "test_dag2"
DAG3_ID = "test_dag3"
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
create_user(app, username="test_granular_permissions", role_name="TestGranularDag") # type: ignore
app.appbuilder.sm.sync_perm_for_dag( # type: ignore
"TEST_DAG_1",
access_control={'TestGranularDag': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
app.appbuilder.sm.sync_perm_for_dag( # type: ignore
"TEST_DAG_1",
access_control={'TestGranularDag': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
with DAG(
DAG_ID,
start_date=datetime(2020, 6, 15),
doc_md="details",
params={"foo": 1},
tags=['example'],
) as dag:
DummyOperator(task_id=TASK_ID)
with DAG(DAG2_ID, start_date=datetime(2020, 6, 15)) as dag2: # no doc_md
DummyOperator(task_id=TASK_ID)
with DAG(DAG3_ID) as dag3: # DAG start_date set to None
DummyOperator(task_id=TASK_ID, start_date=datetime(2019, 6, 12))
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag, dag2.dag_id: dag2, dag3.dag_id: dag3}
app.dag_bag = dag_bag
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
delete_user(app, username="test_granular_permissions") # type: ignore
class TestDagEndpoint:
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app) -> None:
self.clean_db()
self.app = configured_app
self.client = self.app.test_client() # type:ignore
self.dag_id = DAG_ID
self.dag2_id = DAG2_ID
self.dag3_id = DAG3_ID
def teardown_method(self) -> None:
self.clean_db()
@provide_session
def _create_dag_models(self, count, session=None):
for num in range(1, count + 1):
dag_model = DagModel(
dag_id=f"TEST_DAG_{num}",
fileloc=f"/tmp/dag_{num}.py",
schedule_interval="2 2 * * *",
is_active=True,
is_paused=False,
)
session.add(dag_model)
@provide_session
def _create_deactivated_dag(self, session=None):
dag_model = DagModel(
dag_id="TEST_DAG_DELETED_1",
fileloc="/tmp/dag_del_1.py",
schedule_interval="2 2 * * *",
is_active=False,
)
session.add(dag_model)
class TestGetDag(TestDagEndpoint):
@conf_vars({("webserver", "secret_key"): "mysecret"})
def test_should_respond_200(self):
self._create_dag_models(1)
response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
"is_paused": False,
"is_active": True,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {"__type": "CronExpression", "value": "2 2 * * *"},
"tags": [],
} == response.json
@conf_vars({("webserver", "secret_key"): "mysecret"})
def test_should_respond_200_with_schedule_interval_none(self, session):
dag_model = DagModel(
dag_id="TEST_DAG_1",
fileloc="/tmp/dag_1.py",
schedule_interval=None,
is_paused=False,
)
session.add(dag_model)
session.commit()
response = self.client.get("/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": 'Ii90bXAvZGFnXzEucHki.EnmIdPaUPo26lHQClbWMbDFD1Pk',
"is_paused": False,
"is_active": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": None,
"tags": [],
} == response.json
def test_should_respond_200_with_granular_dag_access(self):
self._create_dag_models(1)
response = self.client.get(
"/api/v1/dags/TEST_DAG_1", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
def test_should_respond_404(self):
response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
self._create_dag_models(1)
response = self.client.get("/api/v1/dags/TEST_DAG_1")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test_no_permissions"}
)
assert response.status_code == 403
def test_should_respond_403_with_granular_access_for_different_dag(self):
self._create_dag_models(3)
response = self.client.get(
"/api/v1/dags/TEST_DAG_2", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 403
class TestGetDagDetails(TestDagEndpoint):
def test_should_respond_200(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"max_active_tasks": 16,
"dag_id": "test_dag",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": "details",
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_active": None,
"is_subdag": False,
"orientation": "LR",
"owners": ['airflow'],
"params": {
"foo": {
'__class': 'airflow.models.param.Param',
'value': 1,
'description': None,
'schema': {},
}
},
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": [{'name': 'example'}],
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_response_200_with_doc_md_none(self):
response = self.client.get(
f"/api/v1/dags/{self.dag2_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"max_active_tasks": 16,
"dag_id": "test_dag2",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": None,
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_active": None,
"is_subdag": False,
"orientation": "LR",
"owners": ['airflow'],
"params": {},
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": [],
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_response_200_for_null_start_date(self):
response = self.client.get(
f"/api/v1/dags/{self.dag3_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
"catchup": True,
"concurrency": 16,
"max_active_tasks": 16,
"dag_id": "test_dag3",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": None,
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_active": None,
"is_subdag": False,
"orientation": "LR",
"owners": ['airflow'],
"params": {},
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": None,
"tags": [],
"timezone": "Timezone('UTC')",
}
assert response.json == expected
def test_should_respond_200_serialized(self):
# Get the dag out of the dagbag before we patch it to an empty one
SerializedDagModel.write_dag(self.app.dag_bag.get_dag(self.dag_id))
# Create empty app with empty dagbag to check if DAG is read from db
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
patcher = unittest.mock.patch.object(self.app, 'dag_bag', dag_bag)
patcher.start()
expected = {
"catchup": True,
"concurrency": 16,
"max_active_tasks": 16,
"dag_id": "test_dag",
"dag_run_timeout": None,
"default_view": "tree",
"description": None,
"doc_md": "details",
"fileloc": __file__,
"file_token": FILE_TOKEN,
"is_paused": None,
"is_active": None,
"is_subdag": False,
"orientation": "LR",
"owners": ['airflow'],
"params": {
"foo": {
'__class': 'airflow.models.param.Param',
'value': 1,
'description': None,
'schema': {},
}
},
"schedule_interval": {
"__type": "TimeDelta",
"days": 1,
"microseconds": 0,
"seconds": 0,
},
"start_date": "2020-06-15T00:00:00+00:00",
"tags": [{'name': 'example'}],
"timezone": "Timezone('UTC')",
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
assert response.json == expected
patcher.stop()
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 200
expected = {
'catchup': True,
'concurrency': 16,
'max_active_tasks': 16,
'dag_id': 'test_dag',
'dag_run_timeout': None,
'default_view': 'tree',
'description': None,
'doc_md': 'details',
'fileloc': __file__,
"file_token": FILE_TOKEN,
'is_paused': None,
"is_active": None,
'is_subdag': False,
'orientation': 'LR',
'owners': ['airflow'],
"params": {
"foo": {
'__class': 'airflow.models.param.Param',
'value': 1,
'description': None,
'schema': {},
}
},
'schedule_interval': {'__type': 'TimeDelta', 'days': 1, 'microseconds': 0, 'seconds': 0},
'start_date': '2020-06-15T00:00:00+00:00',
'tags': [{'name': 'example'}],
'timezone': "Timezone('UTC')",
}
assert response.json == expected
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/details")
assert_401(response)
def test_should_raise_404_when_dag_is_not_found(self):
response = self.client.get(
"/api/v1/dags/non_existing_dag_id/details", environ_overrides={'REMOTE_USER': "test"}
)
assert response.status_code == 404
assert response.json == {
'detail': 'The DAG with dag_id: non_existing_dag_id was not found',
'status': 404,
'title': 'DAG not found',
'type': EXCEPTIONS_LINK_MAP[404],
}
class TestGetDags(TestDagEndpoint):
@provide_session
def test_should_respond_200(self, session):
self._create_dag_models(2)
self._create_deactivated_dag()
dags_query = session.query(DagModel).filter(~DagModel.is_subdag)
assert len(dags_query.all()) == 3
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test"})
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
file_token2 = SERIALIZER.dumps("/tmp/dag_2.py")
assert response.status_code == 200
assert {
"dags": [
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": file_token,
"is_paused": False,
"is_active": True,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
{
"dag_id": "TEST_DAG_2",
"description": None,
"fileloc": "/tmp/dag_2.py",
"file_token": file_token2,
"is_paused": False,
"is_active": True,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
],
"total_entries": 2,
} == response.json
def test_only_active_true_returns_active_dags(self):
self._create_dag_models(1)
self._create_deactivated_dag()
response = self.client.get("api/v1/dags?only_active=True", environ_overrides={'REMOTE_USER': "test"})
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
assert response.status_code == 200
assert {
"dags": [
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": file_token,
"is_paused": False,
"is_active": True,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
],
"total_entries": 1,
} == response.json
def test_only_active_false_returns_all_dags(self):
self._create_dag_models(1)
self._create_deactivated_dag()
response = self.client.get("api/v1/dags?only_active=False", environ_overrides={'REMOTE_USER': "test"})
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
file_token_2 = SERIALIZER.dumps("/tmp/dag_del_1.py")
assert response.status_code == 200
assert {
"dags": [
{
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": file_token,
"is_paused": False,
"is_active": True,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
{
"dag_id": "TEST_DAG_DELETED_1",
"description": None,
"fileloc": "/tmp/dag_del_1.py",
"file_token": file_token_2,
"is_paused": False,
"is_active": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
},
],
"total_entries": 2,
} == response.json
@parameterized.expand(
[
("api/v1/dags?tags=t1", ['TEST_DAG_1', 'TEST_DAG_3']),
("api/v1/dags?tags=t2", ['TEST_DAG_2', 'TEST_DAG_3']),
("api/v1/dags?tags=t1,t2", ["TEST_DAG_1", "TEST_DAG_2", "TEST_DAG_3"]),
("api/v1/dags", ["TEST_DAG_1", "TEST_DAG_2", "TEST_DAG_3", "TEST_DAG_4"]),
]
)
def test_filter_dags_by_tags_works(self, url, expected_dag_ids):
# test filter by tags
dag1 = DAG(dag_id="TEST_DAG_1", tags=['t1'])
dag2 = DAG(dag_id="TEST_DAG_2", tags=['t2'])
dag3 = DAG(dag_id="TEST_DAG_3", tags=['t1', 't2'])
dag4 = DAG(dag_id="TEST_DAG_4")
dag1.sync_to_db()
dag2.sync_to_db()
dag3.sync_to_db()
dag4.sync_to_db()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_ids = [dag["dag_id"] for dag in response.json["dags"]]
assert expected_dag_ids == dag_ids
@parameterized.expand(
[
("api/v1/dags?dag_id_pattern=DAG_1", {'TEST_DAG_1', 'SAMPLE_DAG_1'}),
("api/v1/dags?dag_id_pattern=SAMPLE_DAG", {'SAMPLE_DAG_1', 'SAMPLE_DAG_2'}),
(
"api/v1/dags?dag_id_pattern=_DAG_",
{"TEST_DAG_1", "TEST_DAG_2", 'SAMPLE_DAG_1', 'SAMPLE_DAG_2'},
),
]
)
def test_filter_dags_by_dag_id_works(self, url, expected_dag_ids):
# test filter by tags
dag1 = DAG(dag_id="TEST_DAG_1")
dag2 = DAG(dag_id="TEST_DAG_2")
dag3 = DAG(dag_id="SAMPLE_DAG_1")
dag4 = DAG(dag_id="SAMPLE_DAG_2")
dag1.sync_to_db()
dag2.sync_to_db()
dag3.sync_to_db()
dag4.sync_to_db()
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_ids = {dag["dag_id"] for dag in response.json["dags"]}
assert expected_dag_ids == dag_ids
def test_should_respond_200_with_granular_dag_access(self):
self._create_dag_models(3)
response = self.client.get(
"/api/v1/dags", environ_overrides={'REMOTE_USER': "test_granular_permissions"}
)
assert response.status_code == 200
assert len(response.json['dags']) == 1
assert response.json['dags'][0]['dag_id'] == 'TEST_DAG_1'
@parameterized.expand(
[
("api/v1/dags?limit=1", ["TEST_DAG_1"]),
("api/v1/dags?limit=2", ["TEST_DAG_1", "TEST_DAG_10"]),
(
"api/v1/dags?offset=5",
["TEST_DAG_5", "TEST_DAG_6", "TEST_DAG_7", "TEST_DAG_8", "TEST_DAG_9"],
),
(
"api/v1/dags?offset=0",
[
"TEST_DAG_1",
"TEST_DAG_10",
"TEST_DAG_2",
"TEST_DAG_3",
"TEST_DAG_4",
"TEST_DAG_5",
"TEST_DAG_6",
"TEST_DAG_7",
"TEST_DAG_8",
"TEST_DAG_9",
],
),
("api/v1/dags?limit=1&offset=5", ["TEST_DAG_5"]),
("api/v1/dags?limit=1&offset=1", ["TEST_DAG_10"]),
("api/v1/dags?limit=2&offset=2", ["TEST_DAG_2", "TEST_DAG_3"]),
]
)
def test_should_respond_200_and_handle_pagination(self, url, expected_dag_ids):
self._create_dag_models(10)
response = self.client.get(url, environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
dag_ids = [dag["dag_id"] for dag in response.json["dags"]]
assert expected_dag_ids == dag_ids
assert 10 == response.json["total_entries"]
def test_should_respond_200_default_limit(self):
self._create_dag_models(101)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 200
assert 100 == len(response.json["dags"])
assert 101 == response.json["total_entries"]
def test_should_raises_401_unauthenticated(self):
response = self.client.get("api/v1/dags")
assert_401(response)
def test_should_respond_403_unauthorized(self):
self._create_dag_models(1)
response = self.client.get("api/v1/dags", environ_overrides={'REMOTE_USER': "test_no_permissions"})
assert response.status_code == 403
class TestPatchDag(TestDagEndpoint):
file_token = SERIALIZER.dumps("/tmp/dag_1.py")
def test_should_respond_200_on_patch_is_paused(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
expected_response = {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": self.file_token,
"is_paused": False,
"is_active": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
assert response.json == expected_response
def test_should_respond_200_on_patch_with_granular_dag_access(self):
self._create_dag_models(1)
response = self.client.patch(
"/api/v1/dags/TEST_DAG_1",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test_granular_permissions"},
)
assert response.status_code == 200
def test_should_respond_400_on_invalid_request(self):
patch_body = {
"is_paused": True,
"schedule_interval": {
"__type": "CronExpression",
"value": "1 1 * * *",
},
}
dag_model = self._create_dag_model()
response = self.client.patch(f"/api/v1/dags/{dag_model.dag_id}", json=patch_body)
assert response.status_code == 400
assert response.json == {
'detail': "Property is read-only - 'schedule_interval'",
'status': 400,
'title': 'Bad Request',
'type': EXCEPTIONS_LINK_MAP[400],
}
def test_should_respond_404(self):
response = self.client.get("/api/v1/dags/INVALID_DAG", environ_overrides={'REMOTE_USER': "test"})
assert response.status_code == 404
@provide_session
def _create_dag_model(self, session=None):
dag_model = DagModel(
dag_id="TEST_DAG_1", fileloc="/tmp/dag_1.py", schedule_interval="2 2 * * *", is_paused=True
)
session.add(dag_model)
return dag_model
def test_should_raises_401_unauthenticated(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
)
assert_401(response)
def test_should_respond_200_with_update_mask(self):
dag_model = self._create_dag_model()
payload = {
"is_paused": False,
}
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}?update_mask=is_paused",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 200
expected_response = {
"dag_id": "TEST_DAG_1",
"description": None,
"fileloc": "/tmp/dag_1.py",
"file_token": self.file_token,
"is_paused": False,
"is_active": False,
"is_subdag": False,
"owners": [],
"root_dag_id": None,
"schedule_interval": {
"__type": "CronExpression",
"value": "2 2 * * *",
},
"tags": [],
}
assert response.json == expected_response
@parameterized.expand(
[
(
{
"is_paused": True,
},
"update_mask=description",
"Only `is_paused` field can be updated through the REST API",
),
(
{
"is_paused": True,
},
"update_mask=schedule_interval, description",
"Only `is_paused` field can be updated through the REST API",
),
]
)
def test_should_respond_400_for_invalid_fields_in_update_mask(self, payload, update_mask, error_message):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}?{update_mask}",
json=payload,
environ_overrides={'REMOTE_USER': "test"},
)
assert response.status_code == 400
assert response.json['detail'] == error_message
def test_should_respond_403_unauthorized(self):
dag_model = self._create_dag_model()
response = self.client.patch(
f"/api/v1/dags/{dag_model.dag_id}",
json={
"is_paused": False,
},
environ_overrides={'REMOTE_USER': "test_no_permissions"},
)
assert response.status_code == 403
|
|
# -*- coding:utf8 -*-
from __future__ import print_function
import os
import shutil
import android_tools
from build_commands import CompileCommand, IncAaptCommand, IncJavacCommand, IncDexCommand
from builder import IncrementalBuilder, Builder
from gradle_tools import get_project_info, GradleDirectoryFinder, GradleSyncClient, GradleSyncTask, \
GradleCleanCacheTask, GradleMergeDexTask, get_sync_native_file_path, fix_package_name, DataBindingProcessor, \
DatabindingDirectoryLookUp
from task import find_root_tasks, find_last_tasks, Task
from utils import get_file_content, write_file_content, is_windows_system, cexec, load_json_cache, get_md5, \
write_json_cache
from tracing import Tracing
from exceptions import FreelineException
class GradleIncBuilder(IncrementalBuilder):
def __init__(self, changed_files, config, task_engine, project_info=None):
IncrementalBuilder.__init__(self, changed_files, config, task_engine, builder_name="gradle_inc_builder")
self._project_info = project_info
self._tasks_dictionary = {}
self._module_dependencies = {}
self._all_modules = []
self._is_art = False
self._module_dir_map = {}
self._has_res_changed = self.__is_any_modules_have_res_changed()
self._changed_modules = self.__changed_modules()
self._original_changed_files = dict(changed_files)
def check_build_environment(self):
if not self._project_info:
self._project_info = get_project_info(self._config)
self._all_modules = self._project_info.keys()
for item in self._project_info.values():
self._module_dir_map[item['name']] = item['relative_dir']
for key, value in self._project_info.iteritems():
self._module_dependencies[key] = [item for item in value['local_module_dep']]
self._is_art = android_tools.get_device_sdk_version_by_adb(Builder.get_adb(self._config)) > 20
# merge all resources modified files to main resources
self.__merge_res_files()
self.__merge_native_files()
def generate_sorted_build_tasks(self):
"""
sort build tasks according to the module's dependency
:return: None
"""
for module in self._all_modules:
task = android_tools.AndroidIncrementalBuildTask(module, self.__setup_inc_command(module))
self._tasks_dictionary[module] = task
for module in self._all_modules:
task = self._tasks_dictionary[module]
for dep in self._module_dependencies[module]:
task.add_parent_task(self._tasks_dictionary[dep])
def __setup_inc_command(self, module):
return GradleCompileCommand(module, self.__setup_invoker(module))
def __setup_invoker(self, module):
return GradleIncBuildInvoker(module, self._project_info[module]['path'], self._config,
self._changed_files['projects'][module], self._project_info[module], self._is_art,
all_module_info=self._project_info, module_dir_map=self._module_dir_map,
is_any_modules_have_res_changed=self._has_res_changed,
changed_modules=self._changed_modules)
def __merge_res_files(self):
main_res = self._changed_files['projects'][self._config['main_project_name']]
for module, file_dict in self._changed_files['projects'].iteritems():
if module == self._config['main_project_name']:
continue
for key, files in file_dict.iteritems():
if key == 'res' or key == 'assets':
main_res[key].extend(files)
self._changed_files['projects'][self._config['main_project_name']] = main_res
def __merge_native_files(self):
so_files = []
for module, file_dict in self._changed_files['projects'].iteritems():
for key, files in file_dict.iteritems():
if key == 'so':
for m in range(len(files)):
self.debug('append {} to native queue'.format(files[m]))
so_files.append(files[m])
if len(so_files) > 0:
from zipfile import ZipFile
with ZipFile(get_sync_native_file_path(self._config['build_cache_dir']), "w") as nativeZip:
for m in range(len(so_files)):
nativeZip.write(so_files[m])
def __is_any_modules_have_res_changed(self):
for key, value in self._changed_files['projects'].iteritems():
if len(value['res']) > 0:
self.debug('find {} modules have res changed'.format(key))
return True
return False
def __changed_modules(self):
modules = []
for module, file_dict in self._changed_files['projects'].iteritems():
if len(file_dict['src']) > 0 or len(file_dict['res']) or len(file_dict['assets']) > 0:
modules.append(module)
return modules
def incremental_build(self):
merge_dex_task = GradleMergeDexTask(self._config['build_cache_dir'], self._all_modules, self._project_info)
aapt_task = GradleAaptTask(self.__setup_invoker(self._config['main_project_name']),
self._original_changed_files, self._changed_files)
task_list = self._tasks_dictionary.values()
last_tasks = find_last_tasks(task_list)
for rtask in find_root_tasks(task_list):
aapt_task.add_child_task(rtask)
clean_cache_task = GradleCleanCacheTask(self._config['build_cache_dir'], self._project_info)
sync_client = GradleSyncClient(self._is_art, self._config, self._project_info, self._all_modules)
connect_task = android_tools.ConnectDeviceTask(sync_client)
sync_task = GradleSyncTask(sync_client, self._config['build_cache_dir'])
update_stat_task = android_tools.UpdateStatTask(self._config, self._changed_files['projects'])
map(lambda task: task.add_child_task(merge_dex_task), last_tasks)
connect_task.add_child_task(sync_task)
merge_dex_task.add_child_task(sync_task)
sync_task.add_child_task(clean_cache_task)
clean_cache_task.add_child_task(update_stat_task)
# self._task_engine.add_root_task(find_root_tasks(task_list))
self._task_engine.add_root_task(aapt_task)
self._task_engine.add_root_task(connect_task)
self._task_engine.start()
class GradleAaptTask(Task):
def __init__(self, invoker, original_changed_files, changed_files):
Task.__init__(self, 'gradle_aapt_task')
self._invoker = invoker
self._original_changed_files = original_changed_files
self._changed_files_ref = changed_files
def execute(self):
should_run_res_task = self._invoker.check_res_task()
if not should_run_res_task:
self.debug('no need to execute')
return
self.debug('start to execute aapt command...')
self._invoker.fill_dependant_jars()
self._invoker.check_ids_change()
with Tracing("generate_id_keeper_files"):
self._invoker.generate_r_file()
# self._invoker.backup_res_files()
with Tracing("incremental_databinding_process"):
self._invoker.process_databinding(self._original_changed_files, self._changed_files_ref)
with Tracing("run_incremental_aapt_task"):
self._invoker.run_aapt_task()
with Tracing("check_other_modules_resources"):
self._invoker.check_other_modules_resources()
self._invoker.recover_original_file_path()
class GradleCompileCommand(CompileCommand):
def __init__(self, module, invoker):
self._module = module
CompileCommand.__init__(self, 'gradle_{}_compile_command'.format(module), invoker)
def _setup(self):
# self.add_command(GradleIncAaptCommand(self._module, self._invoker))
self.add_command(GradleIncJavacCommand(self._module, self._invoker))
self.add_command(GradleIncDexCommand(self._module, self._invoker))
def execute(self):
map(lambda command: command.execute(), self.command_list)
class GradleIncAaptCommand(IncAaptCommand):
def __init__(self, module_name, invoker):
IncAaptCommand.__init__(self, module_name, invoker)
def execute(self):
should_run_res_task = self._invoker.check_res_task()
if not should_run_res_task:
self.debug('no need to execute')
return
self.debug('start to execute aapt command...')
self._invoker.fill_dependant_jars()
self._invoker.check_ids_change()
self._invoker.generate_r_file()
# self._invoker.backup_res_files()
self._invoker.run_aapt_task()
class GradleIncJavacCommand(IncJavacCommand):
def __init__(self, module_name, invoker):
IncJavacCommand.__init__(self, module_name, invoker)
def execute(self):
self._invoker.check_r_md5() # check if R.java has changed
# self._invoker.check_other_modules_resources()
should_run_javac_task = self._invoker.check_javac_task()
if not should_run_javac_task:
self.debug('no need to execute')
return
self.debug('start to execute javac command...')
self._invoker.append_r_file()
self._invoker.fill_classpaths()
self._invoker.fill_extra_javac_args()
self._invoker.clean_dex_cache()
self._invoker.run_apt_only()
self._invoker.run_javac_task()
self._invoker.run_retrolambda()
class GradleIncDexCommand(IncDexCommand):
def __init__(self, module_name, invoker):
IncDexCommand.__init__(self, module_name, invoker)
def execute(self):
should_run_dex_task = self._invoker.check_dex_task()
if not should_run_dex_task:
self.debug('no need to execute')
return
self.debug('start to execute dex command...')
self._invoker.run_dex_task()
class GradleIncBuildInvoker(android_tools.AndroidIncBuildInvoker):
def __init__(self, module_name, path, config, changed_files, module_info, is_art, all_module_info=None,
module_dir_map=None, is_any_modules_have_res_changed=False, changed_modules=None):
android_tools.AndroidIncBuildInvoker.__init__(self, module_name, path, config, changed_files, module_info,
is_art=is_art)
self._all_module_info = all_module_info
self._module_dir_map = module_dir_map
self._is_any_modules_have_res_changed = is_any_modules_have_res_changed
self._changed_modules = changed_modules
self._merged_res_paths = []
self._merged_res_paths.append(self._finder.get_backup_res_dir())
self._replace_mapper = {}
self._is_retrolambda_enabled = 'retrolambda' in self._config and self._name in self._config['retrolambda'] \
and self._config['retrolambda'][self._name]['enabled']
self._is_databinding_enabled = 'databinding_modules' in self._config and self._name in self._config[
'databinding_modules']
self._is_dagger_enabled = 'apt_libraries' in self._config and self._config['apt_libraries']['dagger']
self._apt_output_dir = None
for mname in self._all_module_info.keys():
if mname in self._config['project_source_sets']:
self._merged_res_paths.extend(self._config['project_source_sets'][mname]['main_res_directory'])
self._merged_res_paths.extend(self._config['project_source_sets'][mname]['main_assets_directory'])
def before_execute(self):
self._finder = GradleDirectoryFinder(self._name, self._module_path, self._cache_dir,
package_name=self._module_info['packagename'], config=self._config)
def check_res_task(self):
if self._name != self._config['main_project_name']:
self.debug('skip {} aapt task'.format(self._name))
return False
return android_tools.AndroidIncBuildInvoker.check_res_task(self)
def fill_dependant_jars(self):
self._res_dependencies = self._module_info['dep_jar_path']
def process_databinding(self, original_changed_files, changed_files_ref):
if 'databinding' in self._config:
if self._config['databinding_modules'] == 0:
self.debug('no modules for processing databinding')
return
databinding_config = self._config['databinding']
DatabindingDirectoryLookUp.load_path_map(self._config['build_cache_dir'])
procossor = DataBindingProcessor(self._config)
for module_config in databinding_config:
module_name = module_config['name']
if module_name in original_changed_files['projects']:
resources_files = original_changed_files['projects'][module_config['name']]['res']
if len(resources_files) == 0:
self.debug('module {} has no resources files changed'.format(module_name))
continue
changed_files_map = {}
res_dirs = self._config['project_source_sets'][module_name]['main_res_directory']
# TODO: detect matches missing issue
for path in resources_files:
for rdir in res_dirs:
if path.startswith(rdir):
if rdir in changed_files_map:
changed_files_map[rdir].append(path)
else:
changed_files_map[rdir] = [path]
break
for rdir in changed_files_map.keys():
output_res_dir = DatabindingDirectoryLookUp.find_target_res_path(rdir)
output_java_dir = DatabindingDirectoryLookUp.find_target_java_path(rdir)
output_layoutinfo_dir = DatabindingDirectoryLookUp.get_merged_layoutinfo_dir(self._cache_dir)
if output_res_dir and output_java_dir and output_layoutinfo_dir:
changed_files_list = changed_files_map[rdir]
procossor.process_module_databinding(module_config, rdir, output_res_dir,
output_layoutinfo_dir, output_java_dir,
self._config['sdk_directory'],
changed_files=changed_files_list)
# replace file path
for path in changed_files_list:
new_path = path.replace(rdir, output_res_dir)
self._merged_res_paths.append(output_res_dir) # append new path prefix
self.debug('replace {} with output path: {}'.format(path, new_path))
self._replace_mapper[new_path] = path
self._changed_files['res'].remove(path)
self._changed_files['res'].append(new_path)
# mark java compiler
if os.path.exists(output_layoutinfo_dir):
has_layoutinfo = False
for name in os.listdir(output_layoutinfo_dir):
if name.endswith('.xml'):
has_layoutinfo = True
break
if has_layoutinfo:
info_file = os.path.join(output_java_dir, 'android', 'databinding', 'layouts',
'DataBindingInfo.java')
if os.path.exists(info_file):
append_files = [info_file]
append_files.extend(procossor.extract_related_java_files(module_name,
output_layoutinfo_dir))
if 'apt' not in changed_files_ref['projects'][module_name]:
changed_files_ref['projects'][module_name]['apt'] = []
for fpath in append_files:
self.debug('add {} to {} module'.format(fpath, module_name))
changed_files_ref['projects'][module_name]['apt'].append(fpath)
if not android_tools.is_src_changed(self._config['build_cache_dir']):
android_tools.mark_src_changed(self._config['build_cache_dir'])
def _get_aapt_args(self):
aapt_args = [self._aapt, 'package', '-f', '-I',
os.path.join(self._config['compile_sdk_directory'], 'android.jar'),
'-M', fix_package_name(self._config, self._finder.get_dst_manifest_path())]
for rdir in self._config['project_source_sets'][self._name]['main_res_directory']:
if os.path.exists(rdir):
aapt_args.append('-S')
aapt_args.append(DatabindingDirectoryLookUp.find_target_res_path(rdir))
for rdir in self._module_info['local_dep_res_path']:
if os.path.exists(rdir):
aapt_args.append('-S')
aapt_args.append(DatabindingDirectoryLookUp.find_target_res_path(rdir))
for resdir in self._module_info['dep_res_path']:
if os.path.exists(resdir):
aapt_args.append('-S')
aapt_args.append(resdir)
if 'extra_dep_res_paths' in self._config and self._config['extra_dep_res_paths'] is not None:
arr = self._config['extra_dep_res_paths']
for path in arr:
path = path.strip()
if os.path.isdir(path):
aapt_args.append('-S')
aapt_args.append(path)
aapt_args.append('-S')
aapt_args.append(self._finder.get_backup_res_dir())
freeline_assets_dir = os.path.join(self._config['build_cache_dir'], 'freeline-assets')
aapt_args.append('-A')
aapt_args.append(freeline_assets_dir)
for adir in self._config['project_source_sets'][self._name]['main_assets_directory']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
for adir in self._module_info['local_dep_assets_path']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
for adir in self._module_info['dep_assets_path']:
if os.path.exists(adir):
aapt_args.append('-A')
aapt_args.append(adir)
gen_path = self._finder.get_backup_dir()
aapt_args.append('--custom-package')
aapt_args.append(self._config['package'])
aapt_args.append('-m')
aapt_args.append('-J')
aapt_args.append(gen_path)
aapt_args.append('--auto-add-overlay')
aapt_args.append('-P')
aapt_args.append(self._finder.get_public_xml_path())
final_changed_list = self._parse_changed_list()
if is_windows_system():
final_changed_list = [fpath.replace('\\', '/') for fpath in final_changed_list]
final_changed_list_chain = ':'.join(final_changed_list)
aapt_args.append('-F')
aapt_args.append(self._finder.get_dst_res_pack_path(self._name))
aapt_args.append('--debug-mode')
aapt_args.append('--auto-add-overlay')
aapt_args.append('--no-version-vectors')
if len(final_changed_list_chain) > 0 and self._is_art:
aapt_args.append('--buildIncrement')
aapt_args.append(final_changed_list_chain)
aapt_args.append('--resoucres-md5-cache-path')
aapt_args.append(os.path.join(self._cache_dir, "arsc_cache.dat"))
aapt_args.append('--ignore-assets')
aapt_args.append('public_id.xml:public.xml:*.bak:.*')
if 'ignore_resource_ids' in self._config and len(self._config['ignore_resource_ids']) > 0 and not is_windows_system():
aapt_args.append('--ignore-ids')
aapt_args.append(':'.join(self._config['ignore_resource_ids']))
return aapt_args, final_changed_list
def recover_original_file_path(self):
copylist = list(self._changed_files['res'])
for fpath in copylist:
if fpath in self._replace_mapper:
self._changed_files['res'].remove(fpath)
self._changed_files['res'].append(self._replace_mapper[fpath])
def check_other_modules_resources(self):
if self._name == self._config['main_project_name'] and self._all_module_info is not None:
changed_modules = self._changed_modules
if len(changed_modules) > 0:
self.__modify_main_r()
for module in changed_modules:
fpath = self.__modify_other_modules_r(self._all_module_info[module]['packagename'])
self.debug('modify {}'.format(fpath))
def __modify_main_r(self):
main_r_fpath = os.path.join(self._finder.get_backup_dir(),
self._module_info['packagename'].replace('.', os.sep), 'R.java')
self.debug('modify {}'.format(main_r_fpath))
buf = GradleIncBuildInvoker.remove_final_tag(get_file_content(main_r_fpath))
buf = android_tools.fix_unicode_parse_error(buf, main_r_fpath)
write_file_content(main_r_fpath, buf)
target_main_r_dir = os.path.join(self.__get_freeline_backup_r_dir(),
self._module_info['packagename'].replace('.', os.sep))
if not os.path.exists(target_main_r_dir):
os.makedirs(target_main_r_dir)
target_main_r_path = os.path.join(target_main_r_dir, 'R.java')
self.debug('copy {} to {}'.format(main_r_fpath, target_main_r_path))
shutil.copy(main_r_fpath, target_main_r_path)
def append_r_file(self):
if self._name != self._config['main_project_name']:
backupdir = self.__get_freeline_backup_r_dir()
main_r_path = os.path.join(backupdir, self._config['package'].replace('.', os.sep), 'R.java')
# main_r_path existence means that resource modification exists, so that need to add R.java to classpath
if os.path.exists(main_r_path):
pns = [self._config['package'], self._module_info['packagename']]
for m in self._module_info['local_module_dep']:
pns.append(self._all_module_info[m]['packagename'])
for pn in pns:
rpath = os.path.join(backupdir, pn.replace('.', os.sep), 'R.java')
if os.path.exists(rpath) and rpath not in self._changed_files['src']:
self._changed_files['src'].append(rpath)
self.debug('add R.java to changed list: ' + rpath)
elif pn == self._module_info['packagename']:
fpath = self.__modify_other_modules_r(pn)
self.debug('modify {}'.format(fpath))
if fpath and os.path.exists(fpath):
self._changed_files['src'].append(fpath)
self.debug('add R.java to changed list: ' + fpath)
else:
if is_windows_system():
main_r_path = os.path.join(self._finder.get_backup_dir(),
self._module_info['packagename'].replace('.', os.sep), 'R.java')
if os.path.exists(main_r_path):
content = android_tools.fix_unicode_parse_error(get_file_content(main_r_path), main_r_path)
write_file_content(main_r_path, content)
def fill_classpaths(self):
# classpaths:
# 1. patch classes
# 2. dependent modules' patch classes
# 3. android.jar
# 4. third party jars
# 5. generated classes in build directory
patch_classes_cache_dir = self._finder.get_patch_classes_cache_dir()
self._classpaths.append(patch_classes_cache_dir)
self._classpaths.append(self._finder.get_dst_classes_dir())
for module in self._module_info['local_module_dep']:
finder = GradleDirectoryFinder(module, self._module_dir_map[module], self._cache_dir)
self._classpaths.append(finder.get_patch_classes_cache_dir())
# add main module classes dir to classpath to generate databinding files
main_module_name = self._config['main_project_name']
if self._name != main_module_name and self._is_databinding_enabled:
finder = GradleDirectoryFinder(main_module_name, self._module_dir_map[main_module_name], self._cache_dir,
config=self._config)
self._classpaths.append(finder.get_dst_classes_dir())
self._classpaths.append(os.path.join(self._config['compile_sdk_directory'], 'android.jar'))
self._classpaths.extend(self._module_info['dep_jar_path'])
# remove existing same-name class in build directory
srcdirs = self._config['project_source_sets'][self._name]['main_src_directory']
for dirpath, dirnames, files in os.walk(patch_classes_cache_dir):
for fn in files:
if self._is_r_file_changed and self._module_info['packagename'] + '.R.' in fn:
android_tools.delete_class(dirpath, fn.replace('.class', ''))
if fn.endswith('.class') and '$' not in fn and 'R.' not in fn and 'Manifest.' not in fn:
cp = os.path.join(dirpath, fn)
java_src = cp.replace('.class', '.java').split('classes' + os.path.sep)[1]
existence = True
for src_dir in srcdirs:
if os.path.exists(os.path.join(src_dir, java_src)):
existence = True
break
if not existence:
android_tools.delete_class(dirpath, fn.replace('.class', ''))
def fill_extra_javac_args(self):
if 'apt' in self._config and self._name in self._config['apt'] and self._config['apt'][self._name]['enabled']:
apt_config = self._config['apt'][self._name]
self._apt_output_dir = apt_config['aptOutput']
apt_args = ['-s', apt_config['aptOutput']]
if apt_config['processor']:
apt_args.append('-processor')
apt_args.append(apt_config['processor'])
if not apt_config['disableDiscovery']:
apt_args.append('-processorpath')
apt_args.append(apt_config['processorPath'])
apt_args.extend(apt_config['aptArgs'])
self._extra_javac_args.extend(apt_args)
elif self._is_databinding_enabled:
if self._name == self._config['main_project_name']:
apt_output = os.path.join(self._config['build_directory'], 'generated', 'source', 'apt',
self._config['product_flavor'], 'debug')
else:
apt_output = os.path.join(self._config['build_directory'], 'generated', 'source', 'apt', 'release')
self._apt_output_dir = apt_output
if not os.path.exists(apt_output):
os.makedirs(apt_output)
if self._config['databinding_compiler_jar'] != '':
self.debug('add compiler jar to classpath: {}'.format(self._config['databinding_compiler_jar']))
self._module_info['dep_jar_path'].append(self._config['databinding_compiler_jar'])
apt_args = ['-s', apt_output, '-processorpath', os.pathsep.join(self._module_info['dep_jar_path'])]
self._extra_javac_args.extend(apt_args)
def run_apt_only(self):
if self._is_databinding_enabled and self._should_run_databinding_apt():
apt_args = self._generate_java_compile_args(extra_javac_args_enabled=True)
self.debug('apt exec: ' + ' '.join(apt_args))
output, err, code = cexec(apt_args, callback=None)
if code != 0:
raise FreelineException('apt compile failed.', '{}\n{}'.format(output, err))
if self._apt_output_dir and os.path.exists(self._apt_output_dir):
apt_cache_path = os.path.join(self._config['build_cache_dir'], 'apt_files_stat_cache.json')
if os.path.exists(apt_cache_path):
apt_cache = load_json_cache(apt_cache_path)
for dirpath, dirnames, files in os.walk(self._apt_output_dir):
for fn in files:
fpath = os.path.join(dirpath, fn)
if apt_cache and self._name in apt_cache:
if fpath in apt_cache[self._name]:
new_md5 = get_md5(fpath)
if new_md5 != apt_cache[self._name][fpath]['md5']:
self.debug('detect new md5 value, add apt file to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
else:
self.debug('find new apt file, add to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
else:
self.debug('apt cache not found, add to change list: {}'.format(fpath))
self._changed_files['src'].append(fpath)
def run_javac_task(self):
if self._is_only_r_changed() and not self._is_other_modules_has_src_changed:
self._is_need_javac = False
android_tools.clean_src_changed_flag(self._cache_dir)
self.debug('apt process do not generate new files, ignore javac task.')
return
extra_javac_args_enabled = not (self._is_databinding_enabled and self._should_run_databinding_apt())
javacargs = self._generate_java_compile_args(extra_javac_args_enabled=extra_javac_args_enabled)
self.debug('javac exec: ' + ' '.join(javacargs))
output, err, code = cexec(javacargs, callback=None)
if code != 0:
raise FreelineException('incremental javac compile failed.', '{}\n{}'.format(output, err))
else:
if self._is_r_file_changed:
old_r_file = self._finder.get_dst_r_path(config=self._config)
new_r_file = android_tools.DirectoryFinder.get_r_file_path(self._finder.get_backup_dir())
if old_r_file and new_r_file:
shutil.copyfile(new_r_file, old_r_file)
self.debug('copy {} to {}'.format(new_r_file, old_r_file))
def _should_run_databinding_apt(self):
if 'apt' in self._changed_files:
for fpath in self._changed_files['apt']:
if fpath.endswith('DataBindingInfo.java'):
return True
return False
def _generate_java_compile_args(self, extra_javac_args_enabled=False):
javacargs = [self._javac]
arguments = ['-encoding', 'UTF-8', '-g']
if not self._is_retrolambda_enabled:
arguments.extend(['-target', '1.7', '-source', '1.7'])
arguments.append('-cp')
arguments.append(os.pathsep.join(self._classpaths))
for fpath in self._changed_files['src']:
arguments.append(fpath)
if extra_javac_args_enabled:
if 'apt' in self._changed_files:
for fpath in self._changed_files['apt']:
arguments.append(fpath)
filter_tags = []
if self._is_databinding_enabled:
filter_tags.extend(['BindingAdapter', 'BindingConversion', 'Bindable'])
if self._is_dagger_enabled:
filter_tags.extend(['DaggerComponent', 'DaggerModule'])
files = self._get_apt_related_files(filter_tags=filter_tags)
for fpath in files:
if fpath and os.path.exists(fpath) and fpath not in self._changed_files['src']:
if 'apt' in self._changed_files and fpath in self._changed_files['apt']:
continue
self.debug('add apt related file: {}'.format(fpath))
arguments.append(fpath)
arguments.extend(self._extra_javac_args)
arguments.append('-d')
arguments.append(self._finder.get_patch_classes_cache_dir())
# ref: https://support.microsoft.com/en-us/kb/830473
if is_windows_system():
arguments_length = sum(map(len, arguments))
if arguments_length > 8000:
argument_file_path = os.path.join(self._finder.get_module_cache_dir(), 'javac_args_file')
self.debug('arguments length: {} > 8000, save args to {}'.format(arguments_length, argument_file_path))
if os.path.exists(argument_file_path):
os.remove(argument_file_path)
arguments_content = ' '.join(arguments)
self.debug('javac arguments: ' + arguments_content)
write_file_content(argument_file_path, arguments_content)
arguments = ['@{}'.format(argument_file_path)]
javacargs.extend(arguments)
return javacargs
def _get_apt_related_files(self, filter_tags=None):
path = self._get_apt_related_files_cache_path()
if os.path.exists(path):
return load_json_cache(path)
else:
info_path = os.path.join(self._cache_dir, 'freeline_annotation_info.json')
if os.path.exists(info_path):
info_cache = load_json_cache(info_path)
related_files = []
for anno, files in info_cache.iteritems():
if filter_tags and anno not in filter_tags:
self.debug('ignore annotation: {}'.format(anno))
continue
for info in files:
if info['module'] == self._name or info['module'] in self._module_info['local_module_dep']:
if 'java_path' in info and info['java_path']:
related_files.append(info['java_path'])
write_json_cache(self._get_apt_related_files_cache_path(), related_files)
return related_files
return []
def _append_new_related_files(self):
related_files = self._get_apt_related_files()
def append_files(file_list):
for fpath in file_list:
if fpath and fpath not in related_files:
self.debug('add new related file: {}'.format(fpath))
related_files.append(fpath)
append_files(self._changed_files['src'])
append_files(self._changed_files['apt'])
write_json_cache(self._get_apt_related_files_cache_path(), related_files)
def _get_apt_related_files_cache_path(self):
return os.path.join(self._cache_dir, 'apt_related_files_cache.json')
def run_retrolambda(self):
if self._is_need_javac and self._is_retrolambda_enabled:
lambda_config = self._config['retrolambda'][self._name]
target_dir = self._finder.get_patch_classes_cache_dir()
jar_args = [Builder.get_java(self._config),
'-Dretrolambda.inputDir={}'.format(target_dir),
'-Dretrolambda.outputDir={}'.format(target_dir)]
if lambda_config['supportIncludeFiles']:
files_stat_path = os.path.join(self._cache_dir, self._name, 'lambda_files_stat.json')
include_files = []
if os.path.exists(files_stat_path):
files_stat = load_json_cache(files_stat_path)
else:
files_stat = {}
for dirpath, dirnames, files in os.walk(target_dir):
for fn in files:
fpath = os.path.join(dirpath, fn)
if fpath not in files_stat:
include_files.append(fpath)
self.debug('incremental build new lambda file: {}'.format(fpath))
else:
if os.path.getmtime(fpath) > files_stat[fpath]['mtime']:
include_files.append(fpath)
self.debug('incremental build lambda file: {}'.format(fpath))
include_files_param = os.pathsep.join(include_files)
if len(include_files_param) > 3496:
include_files_path = os.path.join(self._cache_dir, self._name, 'retrolambda_inc.list')
self.__save_parms_to_file(include_files_path, include_files)
jar_args.append('-Dretrolambda.includedFile={}'.format(include_files_path))
else:
jar_args.append('-Dretrolambda.includedFiles={}'.format(include_files_param))
lambda_classpaths = [target_dir, lambda_config['rtJar']]
lambda_classpaths.extend(self._classpaths)
param = os.pathsep.join(lambda_classpaths)
if lambda_config['supportIncludeFiles'] and len(param) > 3496:
classpath_file = os.path.join(self._cache_dir, self._name, 'retrolambda_classpaths.path')
self.__save_parms_to_file(classpath_file, lambda_classpaths)
jar_args.append('-Dretrolambda.classpathFile={}'.format(classpath_file))
else:
jar_args.append('-Dretrolambda.classpath={}'.format(param))
jar_args.append('-cp')
jar_args.append(lambda_config['targetJar'])
jar_args.append(lambda_config['mainClass'])
self.debug('retrolambda exec: ' + ' '.join(jar_args))
output, err, code = cexec(jar_args, callback=None)
if code != 0:
raise FreelineException('retrolambda compile failed.', '{}\n{}'.format(output, err))
if lambda_config['supportIncludeFiles']:
for fpath in include_files:
if fpath not in files_stat:
files_stat[fpath] = {}
files_stat[fpath]['mtime'] = os.path.getmtime(fpath)
write_json_cache(files_stat_path, files_stat)
self.debug('save lambda files stat to {}'.format(files_stat_path))
def __save_parms_to_file(self, path, params):
if os.path.exists(path):
os.remove(path)
content = ''
for param in params:
content += param + '\n'
write_file_content(path, content)
self.debug('save retrolambda params to {}'.format(path))
def _get_res_incremental_dst_path(self, fpath):
if 'assets' + os.sep in fpath:
return os.path.join(self._finder.get_base_gen_dir(), 'assets', 'debug', fpath.split('assets' + os.sep)[1])
elif 'res' + os.sep in fpath:
return os.path.join(self._finder.get_res_dir(), fpath.split('res' + os.sep)[1])
def _parse_changed_list(self):
changed_list = []
for rfile in self._changed_files['res']:
if rfile not in changed_list:
changed_list.append(self._get_res_relative_path(rfile))
for afile in self._changed_files['assets']:
if afile not in changed_list:
changed_list.append(self._get_res_relative_path(afile))
return changed_list
def _get_res_relative_path(self, res):
if res.startswith('res') or res.startswith('AndroidManifest.xml'):
return res
def path_fix(path):
return path if path.endswith(os.sep) else path + os.sep
for respath in self._merged_res_paths:
respath = path_fix(respath)
if res.startswith(respath):
index = respath.strip(os.sep).rfind(os.sep)
if index >= 0:
res_dir_name = respath[index + 1:].strip(os.sep)
relative_path = os.path.join(res_dir_name, res.replace(respath, ''))
self.debug("find relative path: {}".format(relative_path))
return relative_path
self.debug('relative path not found: {}'.format(res))
return None
def __get_freeline_backup_r_dir(self):
dirpath = os.path.join(self._cache_dir, 'freeline-backup-r')
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return dirpath
def __modify_other_modules_r(self, package_name, finder=None):
if not finder:
finder = self._finder
r_path = android_tools.find_r_file(finder.get_dst_r_dir(), package_name=package_name)
if r_path and os.path.exists(r_path):
target_dir = os.path.join(self.__get_freeline_backup_r_dir(), package_name.replace('.', os.sep))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_path = os.path.join(target_dir, 'R.java')
if not os.path.exists(target_path):
self.debug('copy {} to {}'.format(r_path, target_path))
shutil.copy(r_path, target_path)
content = get_file_content(target_path)
content = GradleIncBuildInvoker.remove_final_tag(content)
content = GradleIncBuildInvoker.extend_main_r(content, self._config['package'])
content = android_tools.fix_unicode_parse_error(content, target_path)
write_file_content(target_path, content)
return target_path
def __find_res_in_which_module(self, res_path):
for module in self._all_module_info.keys():
# rdir = android_tools.get_res_dir(module)
res_dirs = self._config['project_source_sets'][module]['main_res_directory']
for rdir in res_dirs:
if rdir is not None:
if res_path.startswith(rdir) or rdir in res_path:
return module
return None
@staticmethod
def remove_final_tag(content):
content = content.replace('public final class', 'public class').replace('public static final class',
'public static class')
return content
@staticmethod
def extend_main_r(content, main_package_name):
import re
result = re.findall(r'''public static class (.*) \{''', content)
for tag in result:
content = content.replace('class ' + tag + ' {',
'class ' + tag + ' extends ' + main_package_name + '.R.' + tag + ' {')
return content
|
|
"""
Module for a subscription object, which manages a podcast URL, name, and information about how
many episodes of the podcast we have.
"""
import collections
import datetime
import enum
import logging
import os
import platform
import time
from typing import Any, Dict, List, Mapping, Optional, Tuple, MutableSequence
import drewtilities as util
import eyed3
import feedparser
import magic
import requests
from eyed3.id3 import Genre
import puckfetcher.constants as constants
import puckfetcher.error as error
DATE_FORMAT_STRING = "%Y%m%dT%H:%M:%S.%f"
HEADERS = {"User-Agent": constants.USER_AGENT}
MAX_RECURSIVE_ATTEMPTS = 10
SUMMARY_LIMIT = 15
LOG = logging.getLogger("root")
# TODO describe field members, function parameters in docstrings.
class Subscription(object):
"""Object describing a podcast subscription."""
def __init__(self, url: str=None, name: str=None, directory: str=None) -> None:
"""
Object constructor for subscription.
:param url: URL of subscription. Required.
:param name: Name of scription, used as identifier. Required.
:param directory: Storage directory for subscription. Will otherwise be based on name.
"""
# Maintain separate data members for originally provided URL, and URL we may change due to
# redirects.
if url is None or url == "":
msg = f"URL '{url}' is None or empty - can't create subscription."
raise error.MalformedSubscriptionError(msg)
# Maintain name of podcast.
if name is None or name == "":
msg = f"Name '{name}' is None or empty - can't create subscription."
raise error.MalformedSubscriptionError(msg)
# Temporary storage for swapping around urls.
self.temp_url: str = ""
LOG.debug(f"Storing provided url '{url}'.")
self.url = url
self.original_url = url
LOG.debug(f"Storing provided name '{name}'.")
self.metadata = {
"name": name,
"artist": "",
"album": name,
"album_artist": "",
}
# Our file downloader.
self.downloader = util.generate_downloader(HEADERS, self.metadata["name"])
feedparser.USER_AGENT = constants.USER_AGENT
# Our wrapper around feedparser's parse for rate limiting.
self.parser = _generate_feedparser(self.metadata["name"])
# Store feed state, including etag/last_modified.
self.feed_state = _FeedState()
self.directory = _process_directory(directory)
self.settings: Dict[str, Any] = {
"use_title_as_filename": None,
"backlog_limit": 0,
"set_tags": False,
"overwrite_title": False,
}
@classmethod
def decode_subscription(cls, sub_dictionary: Mapping[str, Any]) -> "Subscription":
"""
Decode subscription from dictionary.
:param sub_dictionary: Dictionary from JSON to use to decode object.
:returns: Subscription object built from dictionary.
"""
url = sub_dictionary.get("url", None)
if url is None:
msg = "URL in subscription to decode is null. Cannot decode."
raise error.MalformedSubscriptionError(msg)
original_url = sub_dictionary.get("original_url", None)
directory = sub_dictionary.get("directory", None)
feed_state = _FeedState(feedstate_dict=sub_dictionary.get("feed_state", None))
name = sub_dictionary.get("name", None)
if name is None:
msg = "Name in subscription to decode is null. Cannot decode."
raise error.MalformedSubscriptionError(msg)
sub = Subscription(url=url, name=name, directory=directory)
sub.original_url = original_url
sub.feed_state = feed_state
if "settings" in sub_dictionary.keys():
sub.settings = sub_dictionary["settings"]
else:
sub.settings = {
"use_title_as_filename": sub_dictionary.get("use_title_as_filename", False),
"backlog_limit": sub_dictionary.get("backlog_limit", 0),
"set_tags": sub_dictionary.get("set_tags", False),
"overwrite_title": sub_dictionary.get("overwrite_title", False),
}
if "metadata" in sub_dictionary.keys():
sub.metadata = sub_dictionary["metadata"]
else:
sub.metadata = {
"name": name,
"artist": sub_dictionary.get("artist", ""),
"album": sub_dictionary.get("album", name),
"album_artist": sub_dictionary.get("album_artist", ""),
}
# Generate data members that shouldn't/won't be cached.
sub.downloader = util.generate_downloader(HEADERS, sub.metadata["name"])
return sub
@classmethod
def encode_subscription(cls, sub: "Subscription") -> Mapping[str, Any]:
"""
Encode subscription to dictionary.
:param sub: Subscription object to turn into a dictionary.
:returns: A dictionary that can be written to JSON or passed around.
"""
return {
"__type__": "subscription",
"__version__": constants.VERSION,
"url": sub.url,
"original_url": sub.original_url,
"directory": sub.directory,
"settings": sub.settings,
"feed_state": sub.feed_state.as_dict(),
"metadata": sub.metadata,
"name": sub.metadata["name"],
}
@staticmethod
def parse_from_user_yaml(
sub_yaml: Mapping[str, Any],
defaults: Mapping[str, Any],
) -> "Subscription":
"""
Parse YAML user-provided subscription into a subscription object, using config-provided
options as defaults.
:param sub_yaml: Dictionary from user config file to use to build subscription.
:param defaults: Default values for all fields to substitute for what user didn't provide.
:returns: Subscription object based on user config and defaults.
"""
if "name" not in sub_yaml:
msg = "No name provided in config file. Cannot create subscription."
raise error.MalformedSubscriptionError(msg)
if "url" not in sub_yaml:
msg = "No URL provided in config file. Cannot create subscription."
raise error.MalformedSubscriptionError(msg)
name = sub_yaml["name"]
url = sub_yaml["url"]
directory = sub_yaml.get("directory", os.path.join(defaults["directory"], name))
sub = Subscription(url=url, name=name, directory=directory)
sub.original_url = sub_yaml["url"]
sub.settings["use_title_as_filename"] = sub_yaml.get("use_title_as_filename",
defaults["use_title_as_filename"])
sub.settings["backlog_limit"] = sub_yaml.get("backlog_limit", defaults["backlog_limit"])
sub.settings["set_tags"] = sub_yaml.get("set_tags", defaults["set_tags"])
sub.settings["overwrite_title"] = sub_yaml.get("overwrite_title", False)
sub.metadata["name"] = name
sub.metadata["artist"] = sub_yaml.get("artist", "")
sub.metadata["album"] = sub_yaml.get("album", name)
sub.metadata["album_artist"] = sub_yaml.get("album_artist", "")
return sub
# "Public" functions.
def latest(self) -> int:
"""
Return latest entry number.
:returns: Latest entry number for this subscription.
"""
return self.feed_state.latest_entry_number
def attempt_update(self) -> bool:
"""
Attempt to download new entries for a subscription.
:returns: Whether update succeeded or failed.
"""
# Attempt to populate self.feed_state from subscription URL.
feed_get_result = self.get_feed()
if feed_get_result not in (UpdateResult.SUCCESS, UpdateResult.UNNEEDED):
return False
LOG.info(f"Subscription {self.metadata['name']} got updated feed.")
# Only consider backlog if we don't have a latest entry number already.
number_feeds = len(self.feed_state.entries)
if self.latest() is None:
if self.settings["backlog_limit"] is None:
self.feed_state.latest_entry_number = 0
LOG.info(f"Interpreting 'None' backlog limit as 'No Limit' and downloading full "
f"backlog ({number_feeds} entries).")
elif self.settings["backlog_limit"] < 0:
LOG.error(f"Invalid backlog limit {self.settings['backlog_limit']}, "
f"downloading nothing.")
return False
elif self.settings["backlog_limit"] > 0:
LOG.info(f"Backlog limit provided as '{self.settings['backlog_limit']}'")
self.settings["backlog_limit"] = util.max_clamp(self.settings["backlog_limit"],
number_feeds)
LOG.info(f"Backlog limit clamped to '{self.settings['backlog_limit']}'")
self.feed_state.latest_entry_number = number_feeds - self.settings["backlog_limit"]
else:
self.feed_state.latest_entry_number = number_feeds
LOG.info(f"Download backlog for {self.metadata['name']} is zero."
f"\nNot downloading backlog but setting number downloaded to "
f"{self.latest()}.")
if self.latest() >= number_feeds:
LOG.info(f"Num downloaded for {self.metadata['name']} matches feed "
f"entry count {number_feeds}."
f"\nNothing to do.")
return True
number_to_download = number_feeds - self.latest()
LOG.info(f"Number of downloaded feeds for {self.metadata['name']} is {self.latest()}, "
f"{number_to_download} less than feed entry count {number_feeds}."
f"\nDownloading {number_to_download} entries.")
# Queuing feeds in order of age makes the most sense for RSS feeds, so we do that.
for i in range(self.latest(), number_feeds):
self.feed_state.queue.append(i + 1)
self.download_queue()
return True
def download_queue(self) -> None:
"""
Download feed enclosure(s) for all entries in the queue.
"""
LOG.info(f"Queue for sub {self.metadata['name']} "
f"has {len(self.feed_state.queue)} entries.")
try:
while self.feed_state.queue:
# Pull index from queue, transform from one-indexing to zero-indexing.
one_indexed_entry_num = self.feed_state.queue.popleft()
entry_num = one_indexed_entry_num - 1
# Fetch matching entry.
num_entries = len(self.feed_state.entries)
# Do a bounds check in case we accidentally let something bad into the queue.
if entry_num < 0 or entry_num >= num_entries:
LOG.debug(f"Invalid num {one_indexed_entry_num} in queue - skipping.")
continue
entry_age = num_entries - (one_indexed_entry_num)
entry = self.feed_state.entries[entry_age]
# Don't overwrite files if we have the matching entry downloaded already, according
# to records.
if self.feed_state.entries_state_dict.get(entry_num, False):
LOG.info(f"SKIPPING entry number {one_indexed_entry_num} (age {entry_age}) "
f"for '{self.metadata['name']}' - it's recorded as downloaded.")
else:
urls = entry["urls"]
num_entry_files = len(urls)
LOG.info(f"Trying to download entry number {one_indexed_entry_num}"
f"(age {entry_age}) for '{self.metadata['name']}'.")
# Create directory just for enclosures for this entry if there are many.
directory = self.directory
if num_entry_files > 1:
directory = os.path.join(directory, entry["title"])
msg = f"Creating directory to store {num_entry_files} enclosures."
LOG.info(msg)
for i, url in enumerate(urls):
if num_entry_files > 1:
LOG.info(f"Downloading enclosure {i+1} of {num_entry_files}.")
LOG.debug(f"Extracted url {url} from enclosure.")
# TODO catch errors? What if we try to save to a nonsense file?
dest = self._get_dest(url=url, title=entry["title"], directory=directory)
self.downloader(url=url, dest=dest)
self.check_tag_edit_safe(dest, entry)
if one_indexed_entry_num > self.feed_state.latest_entry_number:
self.feed_state.latest_entry_number = one_indexed_entry_num
LOG.info(
f"Have {one_indexed_entry_num} entries for "
f"{self.metadata['name']}.")
# Update various things now that we've downloaded a new entry.
self.feed_state.entries_state_dict[entry_num] = True
self.feed_state.summary_queue.append(
{
"number": one_indexed_entry_num,
"name": entry["title"],
"is_this_session": True,
})
except KeyboardInterrupt:
self.feed_state.queue.appendleft(entry_num)
def enqueue(self, nums: List[int]) -> List[int]:
"""
Add entries to this subscription's download queue.
:param nums: List of episode numbers (numbered from 1 in the RSS feed) to add to queue.
:returns: List of episode numbers added to queue.
Duplicates and items already in queue will be ignored,
as will numbers out-of-bounds.
"""
actual_nums = _filter_nums(nums=nums, max_lim=len(self.feed_state.entries))
for one_indexed_num in actual_nums:
if one_indexed_num not in self.feed_state.queue:
self.feed_state.queue.append(one_indexed_num)
LOG.info(f"New queue for {self.metadata['name']}: {list(self.feed_state.queue)}")
return actual_nums
def mark(self, nums: List[int]) -> List[int]:
"""
Mark entries as downloaded for this subscription. Do not download or do anything else.
:param nums: List of episode numbers (numbered from 1 in the RSS feed) to mark as
downloaded.
:returns: List of episode numbers marked as downloaded.
Items out-of-bounds will be ignored.
"""
actual_nums = _filter_nums(nums=nums, max_lim=len(self.feed_state.entries))
for one_indexed_num in actual_nums:
num = one_indexed_num - 1
self.feed_state.entries_state_dict[num] = True
LOG.info(f"Items marked as downloaded for {self.metadata['name']}: {actual_nums}.")
return actual_nums
def unmark(self, nums: List[int]) -> List[int]:
"""
Mark entries as not downloaded for this subscription.
Do not download or do anything else.
:param nums: List of episode numbers (numbered from 1 in the RSS feed) to unmark as
downloaded.
:returns: List of episode numbers unmarked as downloaded.
Items out-of-bounds will be ignored.
"""
actual_nums = _filter_nums(nums=nums, max_lim=len(self.feed_state.entries))
for one_indexed_num in actual_nums:
num = one_indexed_num - 1
self.feed_state.entries_state_dict[num] = False
LOG.info(f"Items marked as not downloaded for {self.metadata['name']}: {actual_nums}.")
return actual_nums
def update(
self,
*,
directory: str=None,
config_dir: Any=None,
url: str=None,
set_original: bool=False,
name: str=None,
settings: Mapping[str, Any]=None,
metadata: Mapping[str, Any]=None,
) -> None:
"""
Update values for this subscription.
If a value is not provided,
the corresponding property will not be updated.
:param directory: New directory for subscription.
:param config_dir: New config_dir for subscription.
:param url: New url for subscription.
:param set_original: When provided with new url,
will update the original_url property as well.
:param name: New name for subscription.
:param settings: New settings to merge with current settings.
:param metadata: New metadata to merge with current metadata.
"""
if directory == "":
directory = None
if config_dir is None:
config_dir = "."
if directory is not None:
d = util.expand(directory)
if self.directory != d:
if os.path.isabs(d):
self.directory = d
else:
self.directory = os.path.join(config_dir, d)
util.ensure_dir(self.directory)
if url is not None:
self.url = url
if set_original:
self.original_url = url
if settings is not None:
self.settings = {**self.settings, **settings}
if metadata is not None:
self.metadata = {**self.metadata, **metadata}
def default_missing_fields(self, settings: Mapping[str, Any]) -> None:
"""
Set default values for any fields that are None (ones that were never set).
:param settings: New settings to override unset ones.
"""
# NOTE - directory is set separately, because we'll want to create it.
# The options set here are just plain options.
if self.settings["backlog_limit"] is None:
self.settings["backlog_limit"] = settings["backlog_limit"]
if self.settings["use_title_as_filename"] is None:
self.settings["use_title_as_filename"] = settings["use_title_as_filename"]
if not hasattr(self, "feed_state") or self.feed_state is None:
self.feed_state = _FeedState()
self.downloader = util.generate_downloader(HEADERS, self.metadata["name"])
self.parser = _generate_feedparser(self.metadata["name"])
def get_status(self, index: int, total_subs: int) -> str:
"""
Provide status of subscription.
:param index: Index of this subscription,
used for display purposes.
:param total_subs: Total number of subscriptions,
used for display purposes.
:returns: String with display numbers,
name,
and latest entry number.
"""
one_indexed_indent = index + 1
pad_num = len(str(total_subs))
padded_cur_num = str(one_indexed_indent).zfill(pad_num)
if self.latest() is not None:
one_indexed_entry_num = self.latest() + 1
else:
one_indexed_entry_num = self.latest()
return f"{padded_cur_num}/{total_subs} - '{self.metadata['name']}' " + \
f"|{one_indexed_entry_num}|"
def get_details(self, index: int, total_subs: int) -> None:
"""
Provide multiline summary of subscription state.
:param index: Index of this subscription,
used for display purposes.
:param total_subs: Total number of subscriptions,
used for display purposes.
:returns: Multiline string containing oneline status,
this subscription's queue,
and the state of this subscription's entries.
"""
detail_lines = []
detail_lines.append(self.get_status(index, total_subs))
num_entries = len(self.feed_state.entries)
pad_num = len(str(num_entries))
detail_lines.append("Status of podcast queue:")
detail_lines.append(f"{repr(self.feed_state.queue)}")
detail_lines.append("")
detail_lines.append("Status of podcast entries:")
entry_indicators = []
for entry in range(num_entries):
if self.feed_state.entries_state_dict.get(entry, False):
indicator = "+"
else:
indicator = "-"
entry_indicators.append(f"{str(entry+1).zfill(pad_num)}{indicator}")
detail_lines.append(" ".join(entry_indicators))
details = "\n".join(detail_lines)
LOG.info(details)
def get_feed(self, attempt_count: int=0) -> "UpdateResult":
"""
Get latest RSS structure for this subscription.
Return status code indicating result.
:param attempt_count: Number of times to attempt.
:returns: UpdateResult status code.
"""
res = None
if attempt_count > MAX_RECURSIVE_ATTEMPTS:
LOG.debug(f"Too many recursive attempts ({attempt_count}) to get feed for sub"
f"{self.metadata['name']}, canceling.")
res = UpdateResult.FAILURE
elif self.url is None or self.url == "":
LOG.debug(f"URL {self.url} is empty , cannot get feed for sub "
f"{self.metadata['name']}.")
res = UpdateResult.FAILURE
if res is not None:
return res
else:
LOG.info(f"Getting entries (attempt {attempt_count}) for {self.metadata['name']} "
f"from {self.url}.")
(parsed, code) = self._feedparser_parse_with_options()
if code == UpdateResult.UNNEEDED:
LOG.info("We have the latest feed, nothing to do.")
return code
elif code != UpdateResult.SUCCESS:
LOG.info(f"Feedparser parse failed ({code}), aborting.")
return code
LOG.debug("Feedparser parse succeeded.")
# Detect some kinds of HTTP status codes signaling failure.
code = self._handle_http_codes(parsed)
if code == UpdateResult.ATTEMPT_AGAIN:
LOG.debug("Transient HTTP error, attempting again.")
temp = self.temp_url
code = self.get_feed(attempt_count=attempt_count + 1)
if temp is not None:
self.url = temp
elif code != UpdateResult.SUCCESS:
LOG.debug(f"Ran into HTTP error ({code}), aborting.")
else:
self.feed_state.load_rss_info(parsed)
return code
def session_summary(self) -> List[str]:
"""
Provide items downloaded in this session in convenient form.
:returns: List of name/number strings for items downloaded this session.
"""
return [f"{item['name']} (#{item['number']})"
for item in self.feed_state.summary_queue
if item["is_this_session"]]
def full_summary(self) -> List[str]:
"""
Provide items downloaded recently in convenient form.
:returns: List of name/number strings for items in the summary queue.
"""
return [f"{item['name']} (#{item['number']})" for item in self.feed_state.summary_queue]
def check_tag_edit_safe(self, dest: str, entry: Mapping[str, Any]) -> None:
"""
Check if we can safely edit ID3v2 tags.
Will do nothing if this is not an MP3 file.
:param dest: File to change tags on.
:param entry: Entry from RSS feed to use,
alongside metadata,
to populate ID3v2 tags.
"""
# need to test this is an MP3 file first.
magic_res = magic.from_file(dest)
# love 2 parse files
# we're fairly conservative here, because I would rather not put tags in
# than accidentally break a file.
mp3_signifiers = [
"MPEG ADTS, layer III",
"Audio file with ID3",
]
file_is_mp3 = False
for signifier in mp3_signifiers:
if signifier in magic_res:
file_is_mp3 = True
break
if not file_is_mp3:
LOG.info(
f"Skipping adding tags for {dest}, "
"because it doesn't seem to be an mp3 file."
)
return
LOG.info(f"Editing tags for {dest}.")
self.process_tags(dest, entry)
def process_tags(self, dest: str, entry: Mapping[str, Any]) -> None:
"""
Set ID3v2 tags on downloaded MP3 file.
:param dest: File to change tags on.
:param entry: Entry from RSS feed to use,
alongside metadata,
to populate ID3v2 tags.
"""
audiofile = eyed3.load(dest)
# Process tags. If set to set_tags and tags are empty, write tags.
# Pull tags into sub metadata if it's not set.
# Pull tags into entry unless they're empty, and then try sub.
# TODO do this cleaner, but we shouldn't crash out if tags can't be set.
try:
LOG.info(f"Artist tag is '{audiofile.tag.artist}'.")
if audiofile.tag.artist == "" and self.settings["set_tags"]:
LOG.info(f"Setting artist tag to '{self.metadata['artist']}'.")
audiofile.tag.artist = self.metadata["artist"]
if self.metadata["artist"] == "":
self.metadata["artist"] = audiofile.tag.artist
if audiofile.tag.artist != "":
entry["metadata"]["artist"] = audiofile.tag.artist
else:
entry["metadata"]["artist"] = self.metadata["artist"]
LOG.info(f"Album tag is '{audiofile.tag.album}'.")
if audiofile.tag.album == "":
LOG.info(f"Setting album tag to '{self.metadata['album']}'.")
audiofile.tag.album = self.metadata["album"]
if self.metadata["album"] == "":
self.metadata["album"] = audiofile.tag.album
if audiofile.tag.album != "":
entry["metadata"]["album"] = audiofile.tag.album
else:
entry["metadata"]["album"] = self.metadata["album"]
LOG.info(f"Album Artist tag is '{audiofile.tag.album_artist}'.")
if audiofile.tag.album_artist == "":
LOG.info(f"Setting album_artist tag to '{self.metadata['album_artist']}'.")
audiofile.tag.album_artist = self.metadata["album_artist"]
if self.metadata["album_artist"] == "":
self.metadata["album_artist"] = audiofile.tag.album_artist
if audiofile.tag.album_artist != "":
entry["metadata"]["album_artist"] = audiofile.tag.album_artist
else:
entry["metadata"]["album_artist"] = self.metadata["album_artist"]
LOG.info(f"Title tag is '{audiofile.tag.title}'.")
LOG.info(f"Overwrite setting is set to '{self.settings['overwrite_title']}'.")
if audiofile.tag.title == "" or self.settings["overwrite_title"]:
LOG.info(f"Setting title tag to '{entry['title']}'.")
audiofile.tag.title = entry["title"]
# Store some extra tags on the entry.
# Doesn't matter if they're empty, they're empty on the # entry too.
# If the genre tag is not set,
# default it to the Podcast genre.
# Genre id list can be found at:
# https://eyed3.readthedocs.io/en/latest/plugins/genres_plugin.html?highlight=genre
if audiofile.tag.genre is None:
audiofile.tag.genre = Genre(id=186)
entry["metadata"]["genre"] = audiofile.tag.genre.name
entry["metadata"]["date"] = str(audiofile.tag.getBestDate(prefer_recording_date=True))
# TODO this is catching "Unable to write ID3 v2.2" error.
# should be able to do more gracefully.
try:
audiofile.tag.save()
except NotImplementedError as ef:
LOG.warning(f"Caught NotImplementedError {ef}. Skipping tag setting.")
return
# TODO this is catching tag not being present at all, I think, which should be fixable.
except AttributeError as e:
LOG.warning(f"Caught AttributeError {e}. Skipping tag setting.")
return
def as_config_yaml(self) -> Mapping[str, Any]:
"""
Return self as config file YAML-ready dictionary.
:returns: Dict of parameters ready for YAML.
"""
return {
"url": self.original_url,
"name": self.metadata["name"],
"artist": self.metadata["artist"],
"album": self.metadata["album"],
"album_artist": self.metadata["album_artist"],
"backlog_limit": self.settings["backlog_limit"],
"set_tags": self.settings["set_tags"],
"overwrite_title": self.settings["overwrite_title"],
"directory": self.directory
}
# "Private" class functions (messy internals).
def _feedparser_parse_with_options(self) -> Tuple[feedparser.FeedParserDict, "UpdateResult"]:
"""
Perform a feedparser parse, providing arguments (like etag) we might want it to use.
Don't provide etag/last_modified if the last get was unsuccessful.
"""
if self.feed_state.last_modified is not None:
last_mod = self.feed_state.last_modified.timetuple()
else:
last_mod = None
# NOTE - this naming is a bit confusing here - parser is really a thing you call with
# arguments to get a feedparser result.
# Maybe better called parser-generator, or parse-performer or something?
parsed = self.parser(self.url, self.feed_state.etag, last_mod)
self.feed_state.etag = parsed.get("etag", self.feed_state.etag)
self.feed_state.store_last_modified(parsed.get("modified_parsed", None))
# Detect bozo errors (malformed RSS/ATOM feeds).
if "status" not in parsed and parsed.get("bozo", None) == 1:
# NOTE: Feedparser documentation indicates that you can always call getMessage, but
# it's possible for feedparser to spit out a URLError, which doesn't have getMessage.
# Catch this case.
if hasattr(parsed.bozo_exception, "getMessage()"):
msg = parsed.bozo_exception.getMessage()
else:
msg = repr(parsed.bozo_exception)
LOG.info(f"Unable to retrieve feed for {self.metadata['name']} from {self.url}.")
LOG.debug(f"Update failed because bozo exception {msg} occurred.")
return (None, UpdateResult.FAILURE)
elif parsed.get("status") == requests.codes["NOT_MODIFIED"]:
LOG.debug("No update to feed, nothing to do.")
return (None, UpdateResult.UNNEEDED)
else:
return (parsed, UpdateResult.SUCCESS)
def _handle_http_codes(self, parsed: feedparser.FeedParserDict) -> "UpdateResult":
"""
Given feedparser parse result, determine if parse succeeded, and what to do about that.
"""
# feedparser gives no status if you feedparse a local file.
if "status" not in parsed:
LOG.debug("Saw status 200 - OK, all is well.")
return UpdateResult.SUCCESS
status = parsed.get("status", 200)
result = UpdateResult.SUCCESS
if status == requests.codes["NOT_FOUND"]:
LOG.error(f"Saw status {status}, unable to retrieve feed text for "
f"{self.metadata['name']}."
f"\nStored URL {self.url} for {self.metadata['name']} will be preserved"
f"and checked again on next attempt.")
result = UpdateResult.FAILURE
elif status in [requests.codes["UNAUTHORIZED"], requests.codes["GONE"]]:
LOG.error(f"Saw status {status}, unable to retrieve feed text for "
f"{self.metadata['name']}."
f"\nClearing stored URL {self.url} for {self.metadata['name']}."
f"\nPlease provide new URL and authorization for subscription "
f"{self.metadata['name']}.")
self.url = ""
result = UpdateResult.FAILURE
# handle redirecting errors
elif status in [requests.codes["MOVED_PERMANENTLY"], requests.codes["PERMANENT_REDIRECT"]]:
LOG.warning(f"Saw status {status} indicating permanent URL change."
f"\nChanging stored URL {self.url} for {self.metadata['name']} to "
f"{parsed.get('href')} and attempting get with new URL.")
self.url = parsed.get("href")
result = UpdateResult.ATTEMPT_AGAIN
elif status in [requests.codes["FOUND"], requests.codes["SEE_OTHER"],
requests.codes["TEMPORARY_REDIRECT"]]:
LOG.warning(f"Saw status {status} indicating temporary URL change."
f"\nAttempting with new URL {parsed.get('href')}."
f"\nStored URL {self.url} for {self.metadata['name']} will be unchanged.")
self.temp_url = self.url
self.url = parsed.get("href")
result = UpdateResult.ATTEMPT_AGAIN
elif status != 200:
LOG.warning(f"Saw '{status}'. Retrying retrieve for {self.metadata['name']} "
f"at {self.url}.")
result = UpdateResult.ATTEMPT_AGAIN
else:
LOG.debug("Saw status 200. Success!")
return result
def _get_dest(self, url: str, title: str, directory: str) -> str:
"""
Process url and title and base directory into a full filename,
suitable for writing to.
"""
# url example: "https://www.example.com/foo.mp3?test=1"
# cut everything but filename and (possibly) query params.
url_end = url.split("/")[-1]
# url_end example: "foo.mp3?test=1"
# cut query params.
# I think I could assume there's only one '?' after the file extension,
# but after being surprised by query parameters,
# I want to be extra careful.
url_filename = url_end.split("?")[0]
# url_filename example: "foo.mp3"
# this is one of the parts of the project intended to make it not MP3 specific.
if self.settings["use_title_as_filename"]:
ext = os.path.splitext(url_filename)[1][1:]
filename = f"{title}.{ext}" # RIP owl
# remove characters we can't allow in filenames.
filename = util.sanitize(filename)
else:
filename = url_filename
return os.path.join(directory, filename)
def __eq__(self, rhs: Any) -> bool:
return isinstance(rhs, Subscription) and repr(self) == repr(rhs)
def __ne__(self, rhs: Any) -> bool:
return not self.__eq__(rhs)
def __str__(self) -> str:
return str(Subscription.encode_subscription(self))
def __repr__(self) -> str:
return str(self)
class _FeedState(object):
def __init__(self, feedstate_dict: Mapping[str, Any]=None) -> None:
if feedstate_dict is not None:
LOG.debug("Successfully loaded feed state dict.")
self.feed = feedstate_dict.get("feed", {})
self.entries = feedstate_dict.get("entries", [])
self.entries_state_dict = feedstate_dict.get("entries_state_dict", {})
self.queue = collections.deque(feedstate_dict.get("queue", []))
# Store the most recent SUMMARY_LIMIT items we've downloaded.
temp_list = feedstate_dict.get("summary_queue", [])
self.summary_queue: MutableSequence[Dict[str, Any]] = collections.deque(
[],
SUMMARY_LIMIT,
)
# When we load from the cache file, mark all of the items in the summary queue as not
# being from the current session.
for elem in temp_list:
elem["is_this_session"] = False
self.summary_queue.append(elem)
last_modified = feedstate_dict.get("last_modified", None)
self.store_last_modified(last_modified)
self.etag: str = feedstate_dict.get("etag", "")
self.latest_entry_number = feedstate_dict.get("latest_entry_number", None)
else:
LOG.debug("Did not successfully load feed state dict.")
LOG.debug("Creating blank dict.")
self.feed = {}
self.entries = []
self.entries_state_dict = {}
self.queue = collections.deque([])
self.summary_queue = collections.deque([], SUMMARY_LIMIT)
self.last_modified: Any = None
self.etag = ""
self.latest_entry_number = None
def load_rss_info(self, parsed: feedparser.FeedParserDict) -> None:
"""
Load some RSS subscription elements into this feed state."""
self.entries = []
for entry in parsed.get("entries"):
new_entry = {}
new_entry["title"] = entry["title"]
new_entry["urls"] = []
new_entry["metadata"] = {}
for enclosure in entry["enclosures"]:
new_entry["urls"].append(enclosure["href"])
self.entries.append(new_entry)
def as_dict(self) -> Dict[str, Any]:
"""
Return dictionary of this feed state object.
:returns: Dictionary of this object's state.
"""
return {"entries": self.entries,
"entries_state_dict": self.entries_state_dict,
"queue": list(self.queue),
"latest_entry_number": self.latest_entry_number,
"summary_queue": list(self.summary_queue),
"last_modified": None,
"etag": self.etag,
}
def store_last_modified(self, last_modified: Any) -> None:
"""
Store last_modified as a datetime.
:param last_modified: Last-modified time as time.struct_time.
"""
if isinstance(last_modified, time.struct_time):
LOG.debug("Updated last_modified.")
self.last_modified = datetime.datetime.fromtimestamp(time.mktime(last_modified))
else:
LOG.debug("Unhandled 'last_modified' type, ignoring.")
self.last_modified = None
def __str__(self) -> str:
return str(self.as_dict())
def __repr__(self) -> str:
return str(self)
# "Private" file functions (messy internals).
def _process_directory(d: Optional[str]) -> str:
"""Assign directory if none was given, and create directory if necessary."""
if d is None:
LOG.debug(f"No directory provided, creating a default one.")
return util.expand(constants.APPDIRS.user_data_dir)
directory = util.expand(d)
LOG.debug(f"Using directory {directory}.")
util.ensure_dir(directory)
return directory
def _filter_nums(*, nums: List[int], min_lim: int=0, max_lim: int) -> List[int]:
"""Given two limits, remove elements from the list that aren't in that range."""
return [num for num in nums if num > min_lim and num <= max_lim]
def _generate_feedparser(name: str) -> Any:
"""Perform rate-limited parse with feedparser."""
@util.rate_limited(120, name)
def _rate_limited_parser(url: str, etag: str, last_modified: Any) -> feedparser.FeedParserDict:
# pylint: disable=no-member
return feedparser.parse(url, etag=etag, modified=last_modified)
return _rate_limited_parser
class UpdateResult(enum.Enum):
"""Enum describing possible results of trying to update a subscription."""
SUCCESS = 0
UNNEEDED = -1
FAILURE = -2
ATTEMPT_AGAIN = -3
|
|
# Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Daniel Strohmeier <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from ..source_estimate import SourceEstimate, VolSourceEstimate
from ..source_space import _ensure_src
from ..utils import check_random_state, logger
from ..externals.six.moves import zip
def select_source_in_label(src, label, random_state=None):
"""Select source positions using a label
Parameters
----------
src : list of dict
The source space
label : Label
the label (read with mne.read_label)
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
lh_vertno : list
selected source coefficients on the left hemisphere
rh_vertno : list
selected source coefficients on the right hemisphere
"""
lh_vertno = list()
rh_vertno = list()
rng = check_random_state(random_state)
if label.hemi == 'lh':
src_sel_lh = np.intersect1d(src[0]['vertno'], label.vertices)
idx_select = rng.randint(0, len(src_sel_lh), 1)
lh_vertno.append(src_sel_lh[idx_select][0])
else:
src_sel_rh = np.intersect1d(src[1]['vertno'], label.vertices)
idx_select = rng.randint(0, len(src_sel_rh), 1)
rh_vertno.append(src_sel_rh[idx_select][0])
return lh_vertno, rh_vertno
def simulate_sparse_stc(src, n_dipoles, times,
data_fun=lambda t: 1e-7 * np.sin(20 * np.pi * t),
labels=None, random_state=None):
"""Generate sparse (n_dipoles) sources time courses from data_fun
This function randomly selects n_dipoles vertices in the whole cortex
or one single vertex in each label if labels is not None. It uses data_fun
to generate waveforms for each vertex.
Parameters
----------
src : instance of SourceSpaces
The source space.
n_dipoles : int
Number of dipoles to simulate.
times : array
Time array
data_fun : callable
Function to generate the waveforms. The default is a 100 nAm, 10 Hz
sinusoid as ``1e-7 * np.sin(20 * pi * t)``. The function should take
as input the array of time samples in seconds and return an array of
the same length containing the time courses.
labels : None | list of Labels
The labels. The default is None, otherwise its size must be n_dipoles.
random_state : None | int | np.random.RandomState
To specify the random generator state.
Returns
-------
stc : SourceEstimate
The generated source time courses.
Notes
-----
.. versionadded:: 0.10.0
"""
rng = check_random_state(random_state)
src = _ensure_src(src, verbose=False)
data = np.zeros((n_dipoles, len(times)))
for i_dip in range(n_dipoles):
data[i_dip, :] = data_fun(times)
if labels is None:
# can be vol or surface source space
offsets = np.linspace(0, n_dipoles, len(src) + 1).astype(int)
n_dipoles_ss = np.diff(offsets)
# don't use .choice b/c not on old numpy
vs = [s['vertno'][np.sort(rng.permutation(np.arange(s['nuse']))[:n])]
for n, s in zip(n_dipoles_ss, src)]
datas = data
else:
if n_dipoles != len(labels):
logger.warning('The number of labels is different from the number '
'of dipoles. %s dipole(s) will be generated.'
% min(n_dipoles, len(labels)))
labels = labels[:n_dipoles] if n_dipoles < len(labels) else labels
vertno = [[], []]
lh_data = [np.empty((0, data.shape[1]))]
rh_data = [np.empty((0, data.shape[1]))]
for i, label in enumerate(labels):
lh_vertno, rh_vertno = select_source_in_label(src, label, rng)
vertno[0] += lh_vertno
vertno[1] += rh_vertno
if len(lh_vertno) != 0:
lh_data.append(data[i][np.newaxis])
elif len(rh_vertno) != 0:
rh_data.append(data[i][np.newaxis])
else:
raise ValueError('No vertno found.')
vs = [np.array(v) for v in vertno]
datas = [np.concatenate(d) for d in [lh_data, rh_data]]
# need to sort each hemi by vertex number
for ii in range(2):
order = np.argsort(vs[ii])
vs[ii] = vs[ii][order]
if len(order) > 0: # fix for old numpy
datas[ii] = datas[ii][order]
datas = np.concatenate(datas)
tmin, tstep = times[0], np.diff(times[:2])[0]
assert datas.shape == data.shape
cls = SourceEstimate if len(vs) == 2 else VolSourceEstimate
stc = cls(datas, vertices=vs, tmin=tmin, tstep=tstep)
return stc
def simulate_stc(src, labels, stc_data, tmin, tstep, value_fun=None):
"""Simulate sources time courses from waveforms and labels
This function generates a source estimate with extended sources by
filling the labels with the waveforms given in stc_data.
By default, the vertices within a label are assigned the same waveform.
The waveforms can be scaled for each vertex by using the label values
and value_fun. E.g.,
# create a source label where the values are the distance from the center
labels = circular_source_labels('sample', 0, 10, 0)
# sources with decaying strength (x will be the distance from the center)
fun = lambda x: exp(- x / 10)
stc = generate_stc(fwd, labels, stc_data, tmin, tstep, fun)
Parameters
----------
src : list of dict
The source space
labels : list of Labels
The labels
stc_data : array (shape: len(labels) x n_times)
The waveforms
tmin : float
The beginning of the timeseries
tstep : float
The time step (1 / sampling frequency)
value_fun : function
Function to apply to the label values
Returns
-------
stc : SourceEstimate
The generated source time courses.
"""
if len(labels) != len(stc_data):
raise ValueError('labels and stc_data must have the same length')
vertno = [[], []]
stc_data_extended = [[], []]
hemi_to_ind = {'lh': 0, 'rh': 1}
for i, label in enumerate(labels):
hemi_ind = hemi_to_ind[label.hemi]
src_sel = np.intersect1d(src[hemi_ind]['vertno'],
label.vertices)
if value_fun is not None:
idx_sel = np.searchsorted(label.vertices, src_sel)
values_sel = np.array([value_fun(v) for v in
label.values[idx_sel]])
data = np.outer(values_sel, stc_data[i])
else:
data = np.tile(stc_data[i], (len(src_sel), 1))
vertno[hemi_ind].append(src_sel)
stc_data_extended[hemi_ind].append(np.atleast_2d(data))
# format the vertno list
for idx in (0, 1):
if len(vertno[idx]) > 1:
vertno[idx] = np.concatenate(vertno[idx])
elif len(vertno[idx]) == 1:
vertno[idx] = vertno[idx][0]
vertno = [np.array(v) for v in vertno]
# the data is in the order left, right
data = list()
if len(vertno[0]) != 0:
idx = np.argsort(vertno[0])
vertno[0] = vertno[0][idx]
data.append(np.concatenate(stc_data_extended[0])[idx])
if len(vertno[1]) != 0:
idx = np.argsort(vertno[1])
vertno[1] = vertno[1][idx]
data.append(np.concatenate(stc_data_extended[1])[idx])
data = np.concatenate(data)
stc = SourceEstimate(data, vertices=vertno, tmin=tmin, tstep=tstep)
return stc
|
|
"""
Implement the random and np.random module functions.
"""
from __future__ import print_function, absolute_import, division
import math
import os
import random
import numpy as np
from llvmlite import ir
from numba.extending import overload
from numba.targets.imputils import (Registry, impl_ret_untracked,
impl_ret_new_ref)
from numba.typing import signature
from numba import _helperlib, cgutils, types, jit
registry = Registry()
lower = registry.lower
int32_t = ir.IntType(32)
int64_t = ir.IntType(64)
def const_int(x):
return ir.Constant(int32_t, x)
double = ir.DoubleType()
N = 624
N_const = ir.Constant(int32_t, N)
_pid = None
def random_init():
"""
Initialize the random states with system entropy.
"""
global _pid
if _pid != os.getpid():
b = os.urandom(N * 4)
for n in ('py_random_state', 'np_random_state'):
_helperlib.rnd_seed(_helperlib.c_helpers[n], b)
_pid = os.getpid()
# This is the same struct as rnd_state_t in _helperlib.c.
rnd_state_t = ir.LiteralStructType(
[int32_t, ir.ArrayType(int32_t, N),
int32_t, double])
rnd_state_ptr_t = ir.PointerType(rnd_state_t)
# Accessors
def get_index_ptr(builder, state_ptr):
return cgutils.gep_inbounds(builder, state_ptr, 0, 0)
def get_array_ptr(builder, state_ptr):
return cgutils.gep_inbounds(builder, state_ptr, 0, 1)
def get_has_gauss_ptr(builder, state_ptr):
return cgutils.gep_inbounds(builder, state_ptr, 0, 2)
def get_gauss_ptr(builder, state_ptr):
return cgutils.gep_inbounds(builder, state_ptr, 0, 3)
def get_rnd_shuffle(builder):
"""
Get the internal function to shuffle the MT taste.
"""
fnty = ir.FunctionType(ir.VoidType(), (rnd_state_ptr_t,))
fn = builder.function.module.get_or_insert_function(fnty, "numba_rnd_shuffle")
fn.args[0].add_attribute("nocapture")
return fn
def get_next_int32(context, builder, state_ptr):
"""
Get the next int32 generated by the PRNG at *state_ptr*.
"""
idxptr = get_index_ptr(builder, state_ptr)
idx = builder.load(idxptr)
need_reshuffle = builder.icmp_unsigned('>=', idx, N_const)
with cgutils.if_unlikely(builder, need_reshuffle):
fn = get_rnd_shuffle(builder)
builder.call(fn, (state_ptr,))
builder.store(const_int(0), idxptr)
idx = builder.load(idxptr)
array_ptr = get_array_ptr(builder, state_ptr)
y = builder.load(cgutils.gep_inbounds(builder, array_ptr, 0, idx))
idx = builder.add(idx, const_int(1))
builder.store(idx, idxptr)
# Tempering
y = builder.xor(y, builder.lshr(y, const_int(11)))
y = builder.xor(y, builder.and_(builder.shl(y, const_int(7)),
const_int(0x9d2c5680)))
y = builder.xor(y, builder.and_(builder.shl(y, const_int(15)),
const_int(0xefc60000)))
y = builder.xor(y, builder.lshr(y, const_int(18)))
return y
def get_next_double(context, builder, state_ptr):
"""
Get the next double generated by the PRNG at *state_ptr*.
"""
# a = rk_random(state) >> 5, b = rk_random(state) >> 6;
a = builder.lshr(get_next_int32(context, builder, state_ptr), const_int(5))
b = builder.lshr(get_next_int32(context, builder, state_ptr), const_int(6))
# return (a * 67108864.0 + b) / 9007199254740992.0;
a = builder.uitofp(a, double)
b = builder.uitofp(b, double)
return builder.fdiv(
builder.fadd(b, builder.fmul(a, ir.Constant(double, 67108864.0))),
ir.Constant(double, 9007199254740992.0))
def get_next_int(context, builder, state_ptr, nbits):
"""
Get the next integer with width *nbits*.
"""
c32 = ir.Constant(nbits.type, 32)
def get_shifted_int(nbits):
shift = builder.sub(c32, nbits)
y = get_next_int32(context, builder, state_ptr)
return builder.lshr(y, builder.zext(shift, y.type))
ret = cgutils.alloca_once_value(builder, ir.Constant(int64_t, 0))
is_32b = builder.icmp_unsigned('<=', nbits, c32)
with builder.if_else(is_32b) as (ifsmall, iflarge):
with ifsmall:
low = get_shifted_int(nbits)
builder.store(builder.zext(low, int64_t), ret)
with iflarge:
# XXX This assumes nbits <= 64
low = get_next_int32(context, builder, state_ptr)
high = get_shifted_int(builder.sub(nbits, c32))
total = builder.add(
builder.zext(low, int64_t),
builder.shl(builder.zext(high, int64_t), ir.Constant(int64_t, 32)))
builder.store(total, ret)
return builder.load(ret)
def get_py_state_ptr(context, builder):
return context.get_c_value(builder, rnd_state_t,
"numba_py_random_state")
def get_np_state_ptr(context, builder):
return context.get_c_value(builder, rnd_state_t,
"numba_np_random_state")
def get_state_ptr(context, builder, name):
return {
"py": get_py_state_ptr,
"np": get_np_state_ptr,
}[name](context, builder)
def _fill_defaults(context, builder, sig, args, defaults):
"""
Assuming a homogenous signature (same type for result and all arguments),
fill in the *defaults* if missing from the arguments.
"""
ty = sig.return_type
llty = context.get_data_type(ty)
args = tuple(args) + tuple(ir.Constant(llty, d) for d in defaults[len(args):])
sig = signature(*(ty,) * (len(args) + 1))
return sig, args
@lower("random.seed", types.uint32)
def seed_impl(context, builder, sig, args):
res = _seed_impl(context, builder, sig, args, get_state_ptr(context,
builder, "py"))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.seed", types.uint32)
def seed_impl(context, builder, sig, args):
res = _seed_impl(context, builder, sig, args, get_state_ptr(context,
builder, "np"))
return impl_ret_untracked(context, builder, sig.return_type, res)
def _seed_impl(context, builder, sig, args, state_ptr):
seed_value, = args
fnty = ir.FunctionType(ir.VoidType(), (rnd_state_ptr_t, int32_t))
fn = builder.function.module.get_or_insert_function(fnty, "numba_rnd_init")
builder.call(fn, (state_ptr, seed_value))
return context.get_constant(types.none, None)
@lower("random.random")
def random_impl(context, builder, sig, args):
state_ptr = get_state_ptr(context, builder, "py")
res = get_next_double(context, builder, state_ptr)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.random")
def random_impl(context, builder, sig, args):
state_ptr = get_state_ptr(context, builder, "np")
res = get_next_double(context, builder, state_ptr)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.gauss", types.Float, types.Float)
@lower("random.normalvariate", types.Float, types.Float)
def gauss_impl(context, builder, sig, args):
res = _gauss_impl(context, builder, sig, args, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.standard_normal")
@lower("np.random.normal")
@lower("np.random.normal", types.Float)
@lower("np.random.normal", types.Float, types.Float)
def np_gauss_impl(context, builder, sig, args):
sig, args = _fill_defaults(context, builder, sig, args, (0.0, 1.0))
res = _gauss_impl(context, builder, sig, args, "np")
return impl_ret_untracked(context, builder, sig.return_type, res)
def _gauss_pair_impl(_random):
def compute_gauss_pair():
"""
Compute a pair of numbers on the normal distribution.
"""
while True:
x1 = 2.0 * _random() - 1.0
x2 = 2.0 * _random() - 1.0
r2 = x1*x1 + x2*x2
if r2 < 1.0 and r2 != 0.0:
break
# Box-Muller transform
f = math.sqrt(-2.0 * math.log(r2) / r2)
return f * x1, f * x2
return compute_gauss_pair
def _gauss_impl(context, builder, sig, args, state):
# The type for all computations (either float or double)
ty = sig.return_type
llty = context.get_data_type(ty)
state_ptr = get_state_ptr(context, builder, state)
_random = {"py": random.random,
"np": np.random.random}[state]
ret = cgutils.alloca_once(builder, llty, name="result")
gauss_ptr = get_gauss_ptr(builder, state_ptr)
has_gauss_ptr = get_has_gauss_ptr(builder, state_ptr)
has_gauss = cgutils.is_true(builder, builder.load(has_gauss_ptr))
with builder.if_else(has_gauss) as (then, otherwise):
with then:
# if has_gauss: return it
builder.store(builder.load(gauss_ptr), ret)
builder.store(const_int(0), has_gauss_ptr)
with otherwise:
# if not has_gauss: compute a pair of numbers using the Box-Muller
# transform; keep one and return the other
pair = context.compile_internal(builder,
_gauss_pair_impl(_random),
signature(types.UniTuple(ty, 2)),
())
first, second = cgutils.unpack_tuple(builder, pair, 2)
builder.store(first, gauss_ptr)
builder.store(second, ret)
builder.store(const_int(1), has_gauss_ptr)
mu, sigma = args
return builder.fadd(mu,
builder.fmul(sigma, builder.load(ret)))
@lower("random.getrandbits", types.Integer)
def getrandbits_impl(context, builder, sig, args):
nbits, = args
too_large = builder.icmp_unsigned(">=", nbits, const_int(65))
too_small = builder.icmp_unsigned("==", nbits, const_int(0))
with cgutils.if_unlikely(builder, builder.or_(too_large, too_small)):
msg = "getrandbits() limited to 64 bits"
context.call_conv.return_user_exc(builder, OverflowError, (msg,))
state_ptr = get_state_ptr(context, builder, "py")
res = get_next_int(context, builder, state_ptr, nbits)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _randrange_impl(context, builder, start, stop, step, state):
state_ptr = get_state_ptr(context, builder, state)
ty = stop.type
zero = ir.Constant(ty, 0)
one = ir.Constant(ty, 1)
nptr = cgutils.alloca_once(builder, ty, name="n")
# n = stop - start
builder.store(builder.sub(stop, start), nptr)
with builder.if_then(builder.icmp_signed('<', step, zero)):
# n = (n + step + 1) // step
w = builder.add(builder.add(builder.load(nptr), step), one)
n = builder.sdiv(w, step)
builder.store(n, nptr)
with builder.if_then(builder.icmp_signed('>', step, one)):
# n = (n + step - 1) // step
w = builder.sub(builder.add(builder.load(nptr), step), one)
n = builder.sdiv(w, step)
builder.store(n, nptr)
n = builder.load(nptr)
with cgutils.if_unlikely(builder, builder.icmp_signed('<=', n, zero)):
# n <= 0
msg = "empty range for randrange()"
context.call_conv.return_user_exc(builder, ValueError, (msg,))
fnty = ir.FunctionType(ty, [ty, cgutils.true_bit.type])
fn = builder.function.module.get_or_insert_function(fnty, "llvm.ctlz.%s" % ty)
nbits = builder.trunc(builder.call(fn, [n, cgutils.true_bit]), int32_t)
nbits = builder.sub(ir.Constant(int32_t, ty.width), nbits)
bbwhile = builder.append_basic_block("while")
bbend = builder.append_basic_block("while.end")
builder.branch(bbwhile)
builder.position_at_end(bbwhile)
r = get_next_int(context, builder, state_ptr, nbits)
r = builder.trunc(r, ty)
too_large = builder.icmp_signed('>=', r, n)
builder.cbranch(too_large, bbwhile, bbend)
builder.position_at_end(bbend)
return builder.add(start, builder.mul(r, step))
@lower("random.randrange", types.Integer)
def randrange_impl_1(context, builder, sig, args):
stop, = args
start = ir.Constant(stop.type, 0)
step = ir.Constant(stop.type, 1)
res = _randrange_impl(context, builder, start, stop, step, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.randrange", types.Integer, types.Integer)
def randrange_impl_2(context, builder, sig, args):
start, stop = args
step = ir.Constant(start.type, 1)
res = _randrange_impl(context, builder, start, stop, step, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.randrange", types.Integer,
types.Integer, types.Integer)
def randrange_impl_3(context, builder, sig, args):
start, stop, step = args
res = _randrange_impl(context, builder, start, stop, step, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.randint", types.Integer, types.Integer)
def randint_impl_1(context, builder, sig, args):
start, stop = args
step = ir.Constant(start.type, 1)
stop = builder.add(stop, step)
res = _randrange_impl(context, builder, start, stop, step, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.randint", types.Integer)
def randint_impl_2(context, builder, sig, args):
stop, = args
start = ir.Constant(stop.type, 0)
step = ir.Constant(stop.type, 1)
res = _randrange_impl(context, builder, start, stop, step, "np")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.randint", types.Integer, types.Integer)
def randrange_impl_2(context, builder, sig, args):
start, stop = args
step = ir.Constant(start.type, 1)
res = _randrange_impl(context, builder, start, stop, step, "np")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.uniform", types.Float, types.Float)
def uniform_impl(context, builder, sig, args):
res = uniform_impl(context, builder, sig, args, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.uniform", types.Float, types.Float)
def uniform_impl(context, builder, sig, args):
res = uniform_impl(context, builder, sig, args, "np")
return impl_ret_untracked(context, builder, sig.return_type, res)
def uniform_impl(context, builder, sig, args, state):
state_ptr = get_state_ptr(context, builder, state)
a, b = args
width = builder.fsub(b, a)
r = get_next_double(context, builder, state_ptr)
return builder.fadd(a, builder.fmul(width, r))
@lower("random.triangular", types.Float, types.Float)
def triangular_impl_2(context, builder, sig, args):
fltty = sig.return_type
low, high = args
state_ptr = get_state_ptr(context, builder, "py")
randval = get_next_double(context, builder, state_ptr)
def triangular_impl_2(randval, low, high):
u = randval
c = 0.5
if u > c:
u = 1.0 - u
low, high = high, low
return low + (high - low) * math.sqrt(u * c)
res = context.compile_internal(builder, triangular_impl_2,
signature(*(fltty,) * 4),
(randval, low, high))
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.triangular", types.Float,
types.Float, types.Float)
def triangular_impl_3(context, builder, sig, args):
low, high, mode = args
res = _triangular_impl_3(context, builder, sig, low, high, mode, "py")
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.triangular", types.Float,
types.Float, types.Float)
def triangular_impl_3(context, builder, sig, args):
low, mode, high = args
res = _triangular_impl_3(context, builder, sig, low, high, mode, "np")
return impl_ret_untracked(context, builder, sig.return_type, res)
def _triangular_impl_3(context, builder, sig, low, high, mode, state):
fltty = sig.return_type
state_ptr = get_state_ptr(context, builder, state)
randval = get_next_double(context, builder, state_ptr)
def triangular_impl_3(randval, low, high, mode):
if high == low:
return low
u = randval
c = (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * math.sqrt(u * c)
return context.compile_internal(builder, triangular_impl_3,
signature(*(fltty,) * 5),
(randval, low, high, mode))
@lower("random.gammavariate",
types.Float, types.Float)
def gammavariate_impl(context, builder, sig, args):
res = _gammavariate_impl(context, builder, sig, args, random.random)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.standard_gamma", types.Float)
@lower("np.random.gamma", types.Float)
@lower("np.random.gamma", types.Float, types.Float)
def gammavariate_impl(context, builder, sig, args):
sig, args = _fill_defaults(context, builder, sig, args, (None, 1.0))
res = _gammavariate_impl(context, builder, sig, args, np.random.random)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _gammavariate_impl(context, builder, sig, args, _random):
_exp = math.exp
_log = math.log
_sqrt = math.sqrt
_e = math.e
TWOPI = 2.0 * math.pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
def gammavariate_impl(alpha, beta):
"""Gamma distribution. Taken from CPython.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = _random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - _random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = _random()
while u <= 1e-7:
u = _random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = _random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = _random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
return context.compile_internal(builder, gammavariate_impl,
sig, args)
@lower("random.betavariate",
types.Float, types.Float)
def betavariate_impl(context, builder, sig, args):
res = _betavariate_impl(context, builder, sig, args,
random.gammavariate)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.beta",
types.Float, types.Float)
def betavariate_impl(context, builder, sig, args):
res = _betavariate_impl(context, builder, sig, args,
np.random.gamma)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _betavariate_impl(context, builder, sig, args, gamma):
def betavariate_impl(alpha, beta):
"""Beta distribution. Taken from CPython.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = gamma(alpha, 1.)
if y == 0.0:
return 0.0
else:
return y / (y + gamma(beta, 1.))
return context.compile_internal(builder, betavariate_impl,
sig, args)
@lower("random.expovariate",
types.Float)
def expovariate_impl(context, builder, sig, args):
_random = random.random
_log = math.log
def expovariate_impl(lambd):
"""Exponential distribution. Taken from CPython.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - _random()) / lambd
res = context.compile_internal(builder, expovariate_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.exponential", types.Float)
def exponential_impl(context, builder, sig, args):
_random = np.random.random
_log = math.log
def exponential_impl(scale):
return -_log(1.0 - _random()) * scale
res = context.compile_internal(builder, exponential_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.standard_exponential")
@lower("np.random.exponential")
def exponential_impl(context, builder, sig, args):
_random = np.random.random
_log = math.log
def exponential_impl():
return -_log(1.0 - _random())
res = context.compile_internal(builder, exponential_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.lognormal")
@lower("np.random.lognormal", types.Float)
@lower("np.random.lognormal", types.Float, types.Float)
def np_lognormal_impl(context, builder, sig, args):
sig, args = _fill_defaults(context, builder, sig, args, (0.0, 1.0))
res = _lognormvariate_impl(context, builder, sig, args,
np.random.normal)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.lognormvariate",
types.Float, types.Float)
def lognormvariate_impl(context, builder, sig, args):
res = _lognormvariate_impl(context, builder, sig, args, random.gauss)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _lognormvariate_impl(context, builder, sig, args, _gauss):
_exp = math.exp
def lognormvariate_impl(mu, sigma):
return _exp(_gauss(mu, sigma))
return context.compile_internal(builder, lognormvariate_impl,
sig, args)
@lower("random.paretovariate", types.Float)
def paretovariate_impl(context, builder, sig, args):
_random = random.random
def paretovariate_impl(alpha):
"""Pareto distribution. Taken from CPython."""
# Jain, pg. 495
u = 1.0 - _random()
return 1.0 / u ** (1.0/alpha)
res = context.compile_internal(builder, paretovariate_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.pareto", types.Float)
def pareto_impl(context, builder, sig, args):
_random = np.random.random
def pareto_impl(alpha):
# Same as paretovariate() - 1.
u = 1.0 - _random()
return 1.0 / u ** (1.0/alpha) - 1
res = context.compile_internal(builder, pareto_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.weibullvariate",
types.Float, types.Float)
def weibullvariate_impl(context, builder, sig, args):
_random = random.random
_log = math.log
def weibullvariate_impl(alpha, beta):
"""Weibull distribution. Taken from CPython."""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - _random()
return alpha * (-_log(u)) ** (1.0/beta)
res = context.compile_internal(builder, weibullvariate_impl,
sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.weibull", types.Float)
def weibull_impl(context, builder, sig, args):
_random = np.random.random
_log = math.log
def weibull_impl(beta):
# Same as weibullvariate(1.0, beta)
u = 1.0 - _random()
return (-_log(u)) ** (1.0/beta)
res = context.compile_internal(builder, weibull_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.vonmisesvariate",
types.Float, types.Float)
def vonmisesvariate_impl(context, builder, sig, args):
res = _vonmisesvariate_impl(context, builder, sig, args, random.random)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.vonmises",
types.Float, types.Float)
def vonmisesvariate_impl(context, builder, sig, args):
res = _vonmisesvariate_impl(context, builder, sig, args, np.random.random)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _vonmisesvariate_impl(context, builder, sig, args, _random):
_exp = math.exp
_sqrt = math.sqrt
_cos = math.cos
_acos = math.acos
_pi = math.pi
TWOPI = 2.0 * _pi
def vonmisesvariate_impl(mu, kappa):
"""Circular data distribution. Taken from CPython.
Note the algorithm in Python 2.6 and Numpy is different:
http://bugs.python.org/issue17141
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
if kappa <= 1e-6:
return TWOPI * _random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = _random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = _random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = _random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
return context.compile_internal(builder, vonmisesvariate_impl,
sig, args)
@lower("np.random.binomial", types.Integer, types.Float)
def binomial_impl(context, builder, sig, args):
intty = sig.return_type
_random = np.random.random
def binomial_impl(n, p):
"""
Binomial distribution. Numpy's variant of the BINV algorithm
is used.
(Numpy uses BTPE for n*p >= 30, though)
"""
if n < 0:
raise ValueError("binomial(): n <= 0")
if not (0.0 <= p <= 1.0):
raise ValueError("binomial(): p outside of [0, 1]")
if p == 0.0:
return 0
if p == 1.0:
return n
flipped = p > 0.5
if flipped:
p = 1.0 - p
q = 1.0 - p
niters = 1
qn = q ** n
while qn <= 1e-308:
# Underflow => split into several iterations
# Note this is much slower than Numpy's BTPE
niters <<= 2
n >>= 2
qn = q ** n
assert n > 0
np = n * p
bound = min(n, np + 10.0 * math.sqrt(np * q + 1))
finished = False
total = 0
while niters > 0:
X = 0
U = _random()
px = qn
while X <= bound:
if U <= px:
total += n - X if flipped else X
niters -= 1
break
U -= px
X += 1
px = ((n - X + 1) * p * px) / (X * q)
return total
res = context.compile_internal(builder, binomial_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.chisquare", types.Float)
def chisquare_impl(context, builder, sig, args):
def chisquare_impl(df):
return 2.0 * np.random.standard_gamma(df / 2.0)
res = context.compile_internal(builder, chisquare_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.f", types.Float, types.Float)
def f_impl(context, builder, sig, args):
def f_impl(num, denom):
return ((np.random.chisquare(num) * denom) /
(np.random.chisquare(denom) * num))
res = context.compile_internal(builder, f_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.geometric", types.Float)
def geometric_impl(context, builder, sig, args):
_random = np.random.random
intty = sig.return_type
def geometric_impl(p):
# Numpy's algorithm.
if p <= 0.0 or p > 1.0:
raise ValueError("geometric(): p outside of (0, 1]")
q = 1.0 - p
if p >= 0.333333333333333333333333:
X = intty(1)
sum = prod = p
U = _random()
while U > sum:
prod *= q
sum += prod
X += 1
return X
else:
return math.ceil(math.log(1.0 - _random()) / math.log(q))
res = context.compile_internal(builder, geometric_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.gumbel", types.Float, types.Float)
def gumbel_impl(context, builder, sig, args):
_random = np.random.random
_log = math.log
def gumbel_impl(loc, scale):
U = 1.0 - _random()
return loc - scale * _log(-_log(U))
res = context.compile_internal(builder, gumbel_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.hypergeometric", types.Integer,
types.Integer, types.Integer)
def hypergeometric_impl(context, builder, sig, args):
_random = np.random.random
_floor = math.floor
def hypergeometric_impl(ngood, nbad, nsamples):
"""Numpy's algorithm for hypergeometric()."""
d1 = nbad + ngood - nsamples
d2 = float(min(nbad, ngood))
Y = d2
K = nsamples
while Y > 0.0 and K > 0:
Y -= _floor(_random() + Y / (d1 + K))
K -= 1
Z = int(d2 - Y)
if ngood > nbad:
return nsamples - Z
else:
return Z
res = context.compile_internal(builder, hypergeometric_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.laplace")
@lower("np.random.laplace", types.Float)
@lower("np.random.laplace", types.Float, types.Float)
def laplace_impl(context, builder, sig, args):
_random = np.random.random
_log = math.log
def laplace_impl(loc, scale):
U = _random()
if U < 0.5:
return loc + scale * _log(U + U)
else:
return loc - scale * _log(2.0 - U - U)
sig, args = _fill_defaults(context, builder, sig, args, (0.0, 1.0))
res = context.compile_internal(builder, laplace_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.logistic")
@lower("np.random.logistic", types.Float)
@lower("np.random.logistic", types.Float, types.Float)
def logistic_impl(context, builder, sig, args):
_random = np.random.random
_log = math.log
def logistic_impl(loc, scale):
U = _random()
return loc + scale * _log(U / (1.0 - U))
sig, args = _fill_defaults(context, builder, sig, args, (0.0, 1.0))
res = context.compile_internal(builder, logistic_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.logseries", types.Float)
def logseries_impl(context, builder, sig, args):
intty = sig.return_type
_random = np.random.random
_log = math.log
_exp = math.exp
def logseries_impl(p):
"""Numpy's algorithm for logseries()."""
if p <= 0.0 or p > 1.0:
raise ValueError("logseries(): p outside of (0, 1]")
r = _log(1.0 - p)
while 1:
V = _random()
if V >= p:
return 1
U = _random()
q = 1.0 - _exp(r * U)
if V <= q * q:
# XXX what if V == 0.0 ?
return intty(1.0 + _log(V) / _log(q))
elif V >= q:
return 1
else:
return 2
res = context.compile_internal(builder, logseries_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.negative_binomial", types.int64, types.Float)
def negative_binomial_impl(context, builder, sig, args):
_gamma = np.random.gamma
_poisson = np.random.poisson
def negative_binomial_impl(n, p):
if n <= 0:
raise ValueError("negative_binomial(): n <= 0")
if p < 0.0 or p > 1.0:
raise ValueError("negative_binomial(): p outside of [0, 1]")
Y = _gamma(n, (1.0 - p) / p)
return _poisson(Y)
res = context.compile_internal(builder, negative_binomial_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.poisson")
@lower("np.random.poisson", types.Float)
def poisson_impl(context, builder, sig, args):
state_ptr = get_np_state_ptr(context, builder)
retptr = cgutils.alloca_once(builder, int64_t, name="ret")
bbcont = builder.append_basic_block("bbcont")
bbend = builder.append_basic_block("bbend")
if len(args) == 1:
lam, = args
big_lam = builder.fcmp_ordered('>=', lam, ir.Constant(double, 10.0))
with builder.if_then(big_lam):
# For lambda >= 10.0, we switch to a more accurate
# algorithm (see _helperlib.c).
fnty = ir.FunctionType(int64_t, (rnd_state_ptr_t, double))
fn = builder.function.module.get_or_insert_function(fnty,
"numba_poisson_ptrs")
ret = builder.call(fn, (state_ptr, lam))
builder.store(ret, retptr)
builder.branch(bbend)
builder.branch(bbcont)
builder.position_at_end(bbcont)
_random = np.random.random
_exp = math.exp
def poisson_impl(lam):
"""Numpy's algorithm for poisson() on small *lam*.
This method is invoked only if the parameter lambda of the
distribution is small ( < 10 ). The algorithm used is described
in "Knuth, D. 1969. 'Seminumerical Algorithms. The Art of
Computer Programming' vol 2.
"""
if lam < 0.0:
raise ValueError("poisson(): lambda < 0")
if lam == 0.0:
return 0
enlam = _exp(-lam)
X = 0
prod = 1.0
while 1:
U = _random()
prod *= U
if prod <= enlam:
return X
X += 1
if len(args) == 0:
sig = signature(sig.return_type, types.float64)
args = (ir.Constant(double, 1.0),)
ret = context.compile_internal(builder, poisson_impl, sig, args)
builder.store(ret, retptr)
builder.branch(bbend)
builder.position_at_end(bbend)
res = builder.load(retptr)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.power", types.Float)
def power_impl(context, builder, sig, args):
def power_impl(a):
if a <= 0.0:
raise ValueError("power(): a <= 0")
return math.pow(1 - math.exp(-np.random.standard_exponential()),
1./a)
res = context.compile_internal(builder, power_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.rayleigh")
@lower("np.random.rayleigh", types.Float)
def rayleigh_impl(context, builder, sig, args):
_random = np.random.random
def rayleigh_impl(mode):
if mode <= 0.0:
raise ValueError("rayleigh(): mode <= 0")
return mode * math.sqrt(-2.0 * math.log(1.0 - _random()))
sig, args = _fill_defaults(context, builder, sig, args, (1.0,))
res = context.compile_internal(builder, rayleigh_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.standard_cauchy")
def cauchy_impl(context, builder, sig, args):
_gauss = np.random.standard_normal
def cauchy_impl():
return _gauss() / _gauss()
res = context.compile_internal(builder, cauchy_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.standard_t", types.Float)
def standard_t_impl(context, builder, sig, args):
def standard_t_impl(df):
N = np.random.standard_normal()
G = np.random.standard_gamma(df / 2.0)
X = math.sqrt(df / 2.0) * N / math.sqrt(G)
return X
res = context.compile_internal(builder, standard_t_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.wald", types.Float, types.Float)
def wald_impl(context, builder, sig, args):
def wald_impl(mean, scale):
if mean <= 0.0:
raise ValueError("wald(): mean <= 0")
if scale <= 0.0:
raise ValueError("wald(): scale <= 0")
mu_2l = mean / (2.0 * scale)
Y = np.random.standard_normal()
Y = mean * Y * Y
X = mean + mu_2l * (Y - math.sqrt(4 * scale * Y + Y * Y))
U = np.random.random()
if U <= mean / (mean + X):
return X
else:
return mean * mean / X
res = context.compile_internal(builder, wald_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.zipf", types.Float)
def zipf_impl(context, builder, sig, args):
_random = np.random.random
intty = sig.return_type
def zipf_impl(a):
if a <= 1.0:
raise ValueError("zipf(): a <= 1")
am1 = a - 1.0
b = 2.0 ** am1
while 1:
U = 1.0 - _random()
V = _random()
X = intty(math.floor(U ** (-1.0 / am1)))
T = (1.0 + 1.0 / X) ** am1
if X >= 1 and V * X * (T - 1.0) / (b - 1.0) <= (T / b):
return X
res = context.compile_internal(builder, zipf_impl, sig, args)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("random.shuffle", types.Buffer)
def shuffle_impl(context, builder, sig, args):
res = _shuffle_impl(context, builder, sig, args, random.randrange)
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("np.random.shuffle", types.Buffer)
def shuffle_impl(context, builder, sig, args):
res = _shuffle_impl(context, builder, sig, args, np.random.randint)
return impl_ret_untracked(context, builder, sig.return_type, res)
def _shuffle_impl(context, builder, sig, args, _randrange):
def shuffle_impl(arr):
i = arr.shape[0] - 1
while i > 0:
j = _randrange(i + 1)
arr[i], arr[j] = arr[j], arr[i]
i -= 1
return context.compile_internal(builder, shuffle_impl, sig, args)
# ------------------------------------------------------------------------
# Array-producing variants of scalar random functions
for typing_key, arity in [
("np.random.beta", 3),
("np.random.binomial", 3),
("np.random.chisquare", 2),
("np.random.exponential", 2),
("np.random.f", 3),
("np.random.gamma", 3),
("np.random.geometric", 2),
("np.random.gumbel", 3),
("np.random.hypergeometric", 4),
("np.random.laplace", 3),
("np.random.logistic", 3),
("np.random.lognormal", 3),
("np.random.logseries", 2),
("np.random.negative_binomial", 3),
("np.random.normal", 3),
("np.random.pareto", 2),
("np.random.poisson", 2),
("np.random.power", 2),
("np.random.random", 1),
("np.random.randint", 3),
("np.random.rayleigh", 2),
("np.random.standard_cauchy", 1),
("np.random.standard_exponential", 1),
("np.random.standard_gamma", 2),
("np.random.standard_normal", 1),
("np.random.standard_t", 2),
("np.random.triangular", 4),
("np.random.uniform", 3),
("np.random.vonmises", 3),
("np.random.wald", 3),
("np.random.weibull", 2),
("np.random.zipf", 2),
]:
@lower(typing_key, *(types.Any,) * arity)
def random_arr(context, builder, sig, args, typing_key=typing_key):
from . import arrayobj
arrty = sig.return_type
dtype = arrty.dtype
scalar_sig = signature(dtype, *sig.args[:-1])
scalar_args = args[:-1]
# Allocate array...
shapes = arrayobj._parse_shape(context, builder, sig.args[-1], args[-1])
arr = arrayobj._empty_nd_impl(context, builder, arrty, shapes)
# ... and populate it in natural order
scalar_impl = context.get_function(typing_key, scalar_sig)
with cgutils.for_range(builder, arr.nitems) as loop:
val = scalar_impl(builder, scalar_args)
ptr = cgutils.gep(builder, arr.data, loop.index)
arrayobj.store_item(context, builder, arrty, val, ptr)
return impl_ret_new_ref(context, builder, sig.return_type, arr._getvalue())
# ------------------------------------------------------------------------
# Irregular aliases: np.random.rand, np.random.randn
@overload(np.random.rand)
def rand(*size):
if len(size) == 0:
# Scalar output
def rand_impl():
return np.random.random()
else:
# Array output
def rand_impl(*size):
return np.random.random(size)
return rand_impl
@overload(np.random.randn)
def randn(*size):
if len(size) == 0:
# Scalar output
def randn_impl():
return np.random.standard_normal()
else:
# Array output
def randn_impl(*size):
return np.random.standard_normal(size)
return randn_impl
# ------------------------------------------------------------------------
# np.random.choice
@overload(np.random.choice)
def choice(a, size=None, replace=True):
if isinstance(a, types.Array):
# choice() over an array population
assert a.ndim == 1
dtype = a.dtype
@jit(nopython=True)
def get_source_size(a):
return len(a)
@jit(nopython=True)
def copy_source(a):
return a.copy()
@jit(nopython=True)
def getitem(a, a_i):
return a[a_i]
elif isinstance(a, types.Integer):
# choice() over an implied arange() population
dtype = np.intp
@jit(nopython=True)
def get_source_size(a):
return a
@jit(nopython=True)
def copy_source(a):
return np.arange(a)
@jit(nopython=True)
def getitem(a, a_i):
return a_i
else:
raise TypeError("np.random.choice() first argument should be "
"int or array, got %s" % (a,))
if size in (None, types.none):
def choice_impl(a, size=None, replace=True):
"""
choice() implementation returning a single sample
(note *replace* is ignored)
"""
n = get_source_size(a)
i = np.random.randint(0, n)
return getitem(a, i)
else:
def choice_impl(a, size=None, replace=True):
"""
choice() implementation returning an array of samples
"""
n = get_source_size(a)
if replace:
out = np.empty(size, dtype)
fl = out.flat
for i in range(len(fl)):
j = np.random.randint(0, n)
fl[i] = getitem(a, j)
return out
else:
# Note we have to construct the array to compute out.size
# (`size` can be an arbitrary int or tuple of ints)
out = np.empty(size, dtype)
if out.size > n:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
# Get a contiguous copy of the source so as to permute it
src = copy_source(a)
fl = out.flat
for i in range(len(fl)):
j = np.random.randint(i, n)
fl[i] = src[j]
# Move away selected element
src[j] = src[i]
return out
return choice_impl
# ------------------------------------------------------------------------
# np.random.multinomial
@overload(np.random.multinomial)
def multinomial(n, pvals, size=None):
dtype = np.intp
@jit(nopython=True)
def multinomial_inner(n, pvals, out):
# Numpy's algorithm for multinomial()
fl = out.flat
sz = out.size
plen = len(pvals)
for i in range(0, sz, plen):
# Loop body: take a set of n experiments and fill up
# fl[i:i + plen] with the distribution of results.
# Current sum of outcome probabilities
p_sum = 1.0
# Current remaining number of experiments
n_experiments = n
# For each possible outcome `j`, compute the number of results
# with this outcome. This is done by considering the
# conditional probability P(X=j | X>=j) and running a binomial
# distribution over the remaining number of experiments.
for j in range(0, plen - 1):
p_j = pvals[j]
n_j = fl[i + j] = np.random.binomial(n_experiments, p_j / p_sum)
n_experiments -= n_j
if n_experiments <= 0:
# Note the output was initialized to zero
break
p_sum -= p_j
if n_experiments > 0:
# The remaining experiments end up in the last bucket
fl[i + plen - 1] = n_experiments
if not isinstance(n, types.Integer):
raise TypeError("np.random.multinomial(): n should be an "
"integer, got %s" % (n,))
if not isinstance(pvals, (types.Sequence, types.Array)):
raise TypeError("np.random.multinomial(): pvals should be an "
"array or sequence, got %s" % (pvals,))
if size in (None, types.none):
def multinomial_impl(n, pvals, size=None):
"""
multinomial(..., size=None)
"""
out = np.zeros(len(pvals), dtype)
multinomial_inner(n, pvals, out)
return out
elif isinstance(size, types.Integer):
def multinomial_impl(n, pvals, size=None):
"""
multinomial(..., size=int)
"""
out = np.zeros((size, len(pvals)), dtype)
multinomial_inner(n, pvals, out)
return out
elif isinstance(size, types.BaseTuple):
def multinomial_impl(n, pvals, size=None):
"""
multinomial(..., size=tuple)
"""
out = np.zeros(size + (len(pvals),), dtype)
multinomial_inner(n, pvals, out)
return out
else:
raise TypeError("np.random.multinomial(): size should be int or "
"tuple or None, got %s" % (size,))
return multinomial_impl
|
|
import database_hotel
import time
from date_type import *
import os
from prettytable import PrettyTable
import clients
clear = lambda: os.system('cls')
def menu(username):
"""
This function is what makes the actual reservations.
It searches for free rooms during a given period,
calculates every added extra and every discount.
@type username: string
@param username: the username of the user
"""
print("\nChoose carefully!")
print("Keep in mind that the reservations are for the current year only!")
print("When do you want your vacation to start?")
begin_month = input("Month: ")
begin_month = int(begin_month)
begin_day = input("Day: ")
begin_day = form_a_day(begin_day)
print("\nWhen do you want your vacation to end?")
end_month = input("Month: ")
end_month = int(end_month)
end_day = input("Day: ")
end_day = form_a_day(end_day)
today_year = database_hotel.today_define()[0:4]
begin_date = "{}-{}-{}".format(today_year, begin_month, begin_day)
end_date = "{}-{}-{}".format(today_year, end_month, end_day)
if begin_date >= end_date:
print("Wrong input. End date can't be earlier than the begin date!")
time.sleep(2)
clear()
menu(username)
if not date_validate(begin_month, begin_day) or\
not date_validate(end_month, end_day):
print("Wrong input. Try again!")
time.sleep(2)
clear()
menu(username)
num_primary = input("How many adults: ")
num_primary = int(num_primary)
num_extra = input("How many kids: ")
num_extra = int(num_extra)
if num_extra:
first, second, third, fourth = age_define(num_extra)
total_people = num_primary + num_extra
if total_people <= 3:
rooms = find_for_three(begin_date, end_date)
elif total_people == 4:
rooms = find_for_four(begin_date, end_date)
elif total_people == 5:
rooms = find_for_five(begin_date, end_date)
else:
print("We can't fit that many people in one room.")
print("Try again.")
time.sleep(2)
clear()
menu(username)
if not rooms:
print("Looks like we are all out of rooms! :(")
time.sleep(2)
return
available_extras =\
list(set([database_hotel.get_room_extras(room) for room in rooms]))
available = PrettyTable(["Option",
"Extras"])
count = 0
for extra in available_extras:
count += 1
available.add_row((count, extra))
print(available)
print("\nChoose an option")
option = input("Option: ")
option = int(option)
while option > len(available_extras) or option < 1:
print("Wrong option! Try again.")
print("\nChoose an option")
option = input("Option: ")
option = int(option)
for room in rooms:
if database_hotel.get_room_extras(room) == available_extras[option-1]:
reserved_room = room
break
extras = available_extras[option-1].split(sep=', ')
print("\nHow do you want to be fed?\n")
while True:
print("1. BB")
print("2. HB")
print("3. FB")
feed = input("Option: ")
feed = int(feed)
if feed == 1:
feed = 'BB'
break
elif feed == 2:
feed = 'HB'
break
elif feed == 3:
feed = 'FB'
break
else:
print("Wrong input")
price_person = database_hotel.get_price_per_person(begin_date,
end_date, feed)
if num_extra:
first_price = price_person -\
price_person*database_hotel.get_children_discount(first)/100.0
second_price = price_person -\
price_person*database_hotel.get_children_discount(second)/100.0
third_price = price_person -\
price_person*database_hotel.get_children_discount(third)/100.0
fourth_price = price_person -\
price_person*database_hotel.get_children_discount(fourth)/100.0
extras_price =\
sum([database_hotel.get_extra_price(extra) for extra in extras])
bar = input("Do you want a mini bar? It is priced seperately!(y/n): ")
if bar == 'y':
extras_price += database_hotel.get_extra_price("Mini bar")
if num_extra:
children_price =\
child_pricing(num_extra, first_price, second_price,
third_price, fourth_price)
else:
children_price = 0
price = num_primary*price_person + children_price + extras_price
visits = database_hotel.get_client_visits(username)
final_price = price -\
price*database_hotel.get_visits_discount(visits)/100.0
print("\nYour final price for the reservation is {}".format(final_price))
choice = input("Do you still want to make it?(y/n): ")
if choice != 'y':
exit()
if database_hotel.client_or_admin_login(username) == 'client':
id_client = database_hotel.get_client_id(username)
database_hotel.add_reservation(id_client, begin_date, end_date,
num_primary, num_extra, feed,
available_extras[option-1],
final_price, reserved_room)
database_hotel.update_client_visits(username)
res_id = database_hotel.get_reservation_id(id_client, reserved_room)
email = database_hotel.get_client_email(username)
text = """Hello,\n
You just made a reservation in our hotel.\n
It's identification number is {}\n
It's price is {}\n
You have to pay it in full one week before the reservation\n
""".format(res_id, final_price)
clients.send_email(email, text)
else:
id_admin = database_hotel.get_admin_id(username)
res_id = database_hotel.get_reservation_id(id_admin, reserved_room)
database_hotel.add_reservation(id_admin, begin_date, end_date,
num_primary, num_extra, feed,
available_extras[option-1],
final_price, reserved_room)
database_hotel.pay_reservation(res_id)
def find_for_three(begin, end_):
"""
This function seraches for rooms fit for 3 or less people.
It returns a list of the available room numbers
@type begin: string
@param begin: the begining of the searched period
@type end_: string
@param end_: the ending of the searched period
@rtype: list
@return: a list of available room numbers
"""
triple_rooms = database_hotel.get_triple_rooms()
occupied = set(database_hotel.get_occupied_smallest_rooms(begin, end_))
for num in occupied:
if num in triple_rooms:
triple_rooms.remove(num)
if not triple_rooms:
return find_for_four(begin, end_)
return triple_rooms
def find_for_four(begin, end_):
"""
This function seraches for rooms fit for 4 people.
It returns a list of the available room numbers
@type begin: string
@param begin: the begining of the searched period
@type end_: string
@param end_: the ending of the searched period
@rtype: list
@return: a list of available room numbers
"""
quadruple_rooms = database_hotel.get_quadruple_rooms()
occupied = set(database_hotel.get_occupied_bigger_rooms(begin, end_, 4))
for num in occupied:
if num in quadruple_rooms:
quadruple_rooms.remove(num)
if not quadruple_rooms:
return find_for_five(begin, end_)
return quadruple_rooms
def find_for_five(begin, end_):
"""
This function seraches for rooms fit for 5 people.
It returns a list of the available room numbers
@type begin: string
@param begin: the begining of the searched period
@type end_: string
@param end_: the ending of the searched period
@rtype: list
@return: a list of available room numbers
"""
apartment_rooms = database_hotel.get_apartment_rooms()
occupied = set(database_hotel.get_occupied_bigger_rooms(begin, end_, 5))
for num in occupied:
if num in apartment_rooms:
apartment_rooms.remove(num)
if not apartment_rooms:
return []
return apartment_rooms
def child_pricing(num, first, second, third, fourth):
"""
This function helps me calculate the children price
for the reservation.
@type num: integer
@param num: the number of kids
@type first: float
@param first: the price for the first child
@type second: float
@param second: the price for the second child
@type third: float
@param third: the price for the third child
@type fourth: float
@param fourth: the price for the fourth child
"""
if num == 1:
return first
if num == 2:
return first + second
if num == 3:
return first + second + third
if num == 4:
return first + second + third + fourth
def age_define(num):
"""
This function fills the age for each children.
Even if it isn't needed.
@type num: integer
@param num: the number kids attending
@rtype: tuple
@return: returns the years of the children
"""
if num == 1:
first = input("How old is the first child?: ")
first = int(first)
second = 20
third = 20
fourth = 20
elif num == 2:
first = input("How old is the first child?: ")
first = int(first)
second = input("How old is the second child?: ")
second = int(second)
third = 20
fourth = 20
elif num == 3:
first = input("How old is the first child?: ")
first = int(first)
second = input("How old is the second child?: ")
second = int(second)
third = input("How old is the third child?: ")
third = int(third)
fourth = 20
else:
first = input("How old is the first child?: ")
first = int(first)
second = input("How old is the second child?: ")
second = int(second)
third = input("How old is the third child?: ")
third = int(third)
fourth = input("How old is the fourth child?: ")
fourth = int(fourth)
return first, second, third, fourth
def form_a_day(day):
"""
Makes a one-digit number into a one-digit number
with a '0' in front
@type day: string
@param day: some number to be converted
@rtype: string
@return: returns the converted number in str type
"""
day = int(day)
if day in range(1, 10):
day = '0' + str(day)
return day
|
|
import redis
import simplejson
import time
import operator
from cryptokit import bits_to_difficulty
from gevent.event import Event
from powerpool.lib import loop
from powerpool.jobmanagers import Jobmanager
from binascii import hexlify
class MonitorNetworkMulti(Jobmanager):
defaults = config = dict(jobmanagers=None,
profit_poll_int=1,
redis={},
margin_switch=1.2,
exchange_manager={})
def __init__(self, config):
self._configure(config)
# Since some MonitorNetwork objs are polling and some aren't....
self.gl_methods = ['update_profit']
# Child jobmanagers
self.jobmanagers = {}
self.price_data = {}
self.profit_data = {}
self.next_network = None
self.current_network = None
# Currently active jobs keyed by their unique ID
self.jobs = {}
self.new_job = Event()
self.redis = redis.Redis(**self.config['redis'])
@property
def latest_job(self):
""" Proxy the jobmanager we're currently mining ons job """
return self.jobmanagers[self.current_network].latest_job
@property
def status(self):
""" For display in the http monitor """
return dict(price_data=self.price_data,
profit_data=self.profit_data,
next_network=self.next_network,
current_network=self.current_network)
@loop(interval='profit_poll_int')
def update_profit(self):
""" Continually check redis for new profit information """
# Acessing Redis can cause greenlet switches because new jobs. We don't
# want to potentially switch jobs multiple times quickly, so we update
# the profitability information all at once after the loop to avoid
# multiple network switches
new_price_data = {}
for manager in self.jobmanagers.itervalues():
currency = manager.config['currency']
pscore = self.redis.get("{}_profit".format(currency))
# Deserialize
if pscore:
try:
pscore = simplejson.loads(pscore, use_decimal=True)
except Exception:
self.logger.warn(
"Error parsing profit score for {}! Setting it to 0.."
.format(currency))
pscore = 0
pass
# If no score was grabbed, pass a 0 value score
else:
self.logger.warn("Unable to grab profit info for {}!"
.format(currency))
pscore = 0
ratio = self.redis.get("{}_ratio".format(currency)) or 1.0
ratio = float(ratio)
# Only set updated if it actually changed
if self.price_data[currency][0] != pscore or self.price_data[currency][1] != ratio:
new_price_data[currency] = (pscore, ratio, time.time())
# If we have some new information, adjust accordingly
if new_price_data:
self.logger.info("Updated price information for {}"
.format(new_price_data.keys()))
# Atomic update in gevent
self.price_data.update(new_price_data)
# Update all the profit info. No preemption, just maths
for currency in self.jobmanagers.iterkeys():
self.update_profitability(currency)
self.logger.debug(
"Re-checking best network after new price data for {}"
.format(new_price_data.keys()))
self.check_best()
def check_best(self):
""" Assuming that `profit_data` is completely up to date, evaluate the
most profitable network and switch immediately if there's a big enough
difference. Otherwise set it to be changed at next block notification.
"""
# Get the most profitable network based on our current data
new_best = max(self.profit_data.iteritems(),
key=operator.itemgetter(1))[0]
if self.current_network is None:
self.logger.info(
"No active network, so switching to {} with profit of {:,.4f}"
.format(new_best, self.profit_data[new_best]))
self.next_network = new_best
self.switch_network()
return
# If the currently most profitable network is 120% the profitability
# of what we're mining on, we should switch immediately
margin_switch = self.config['margin_switch']
if (margin_switch and
self.profit_data[self.next_network] >
(self.profit_data[self.current_network] * margin_switch)):
self.logger.info(
"Network {} {:,.4f} now more profitable than current network "
"{} {:,.4f} by a fair margin. Switching NOW."
.format(new_best, self.profit_data[new_best], self.current_network,
self.profit_data[self.current_network]))
self.next_network = new_best
self.switch_network()
return
if new_best != self.next_network:
self.logger.info(
"Network {} {:,.4f} now more profitable than current best "
"{} {:,.4f}. Switching on next block from current network {}."
.format(new_best, self.profit_data[new_best], self.next_network,
self.profit_data[self.next_network], self.current_network))
self.next_network = new_best
return
self.logger.debug("Network {} {:,.4f} still most profitable"
.format(new_best, self.profit_data[new_best]))
def switch_network(self):
""" Pushes a network change to the user if it's needed """
if self.next_network != self.current_network:
job = self.jobmanagers[self.next_network].latest_job
if job is None:
self.logger.error(
"Tried to switch network to {} that has no job!"
.format(self.next_network))
return
if self.current_network:
self.logger.info(
"Switching from {} {:,.4f} -> {} {:,.4f} and pushing job NOW"
.format(self.current_network, self.profit_data[self.current_network],
self.next_network, self.profit_data[self.next_network]))
self.current_network = self.next_network
job.type = 0
self.new_job.job = job
self.new_job.set()
self.new_job.clear()
return True
return False
def update_profitability(self, currency):
""" Recalculates the profitability for a specific currency """
jobmanager = self.jobmanagers[currency]
last_job = jobmanager.latest_job
pscore, ratio, _ = self.price_data[currency]
# We can't update if we don't have a job and profit data
if last_job is None or pscore is None:
return False
max_blockheight = jobmanager.config['max_blockheight']
if max_blockheight is not None and last_job.block_height >= max_blockheight:
self.profit_data[currency] = 0
self.logger.debug(
"{} height {} is >= the configured maximum blockheight of {}, "
"setting profitability to 0."
.format(currency, last_job.block_height, max_blockheight))
return True
block_value = last_job.total_value / 100000000.0
diff = bits_to_difficulty(hexlify(last_job.bits))
self.profit_data[currency] = (block_value * float(pscore) / diff) * ratio * 1000000
self.logger.debug(
"Updating {} profit data;\n\tblock_value {};\n\tavg_price {:,.8f}"
";\n\tdiff {};\n\tratio {};\n\tresult {}"
.format(currency, block_value, float(pscore), diff,
ratio, self.profit_data[currency]))
self.manager.log_event("{name}.profitability.{curr}:{metric}|g"
.format(name=self.manager.config['procname'],
curr=currency,
metric=self.profit_data[currency]))
return True
def new_job_notif(self, event):
currency = event.job.currency
flush = event.job.type == 0
if currency == self.current_network:
self.logger.info("Recieved new job on most profitable network {}"
.format(currency))
# See if we need to switch now that we're done with that block. If
# not, push a new job on this network
if not self.switch_network():
self.new_job.job = event.job
self.new_job.set()
self.new_job.clear()
# If we're recieving a new block then diff has changed, so update the
# network profit and recompute best network
if flush and self.update_profitability(currency):
self.logger.debug("Re-checking best network after new job from {}"
.format(currency))
self.check_best()
def start(self):
Jobmanager.start(self)
self.config['jobmanagers'] = set(self.config['jobmanagers'])
found_managers = set()
for manager in self.manager.component_types['Jobmanager']:
if manager.key in self.config['jobmanagers']:
currency = manager.config['currency']
self.jobmanagers[currency] = manager
self.profit_data[currency] = 0
self.price_data[currency] = (None, None, None)
found_managers.add(manager.key)
manager.new_job.rawlink(self.new_job_notif)
for monitor in self.config['jobmanagers'] - found_managers:
self.logger.error("Unable to locate Jobmanager(s) '{}'".format(monitor))
|
|
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = '[email protected] (Wesley Chun)'
from datetime import datetime, timedelta, time as timed
import json
import os
import time
import logging
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import Session
from models import SessionForm
from models import SessionForms
from models import SpeakerForm
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": [ "Default", "Topic" ],
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1, required=True),
typeOfSession=messages.StringField(2)
)
SESSION_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1, required=True),
)
SPEAKER_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
speaker=messages.StringField(1, required=True),
)
WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeSessionKey=messages.StringField(1, required=True),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _getUserId():
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = _getUserId()
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = _getUserId()
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, _getUserId()))
prof = ndb.Key(Profile, _getUserId()).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organizer displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in \
conferences]
)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Given a conference, returns all sessions."""
# copy the ConferenceForm/ProtoRPC Message into a dict object
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# get the existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check to see if that conference exists
if not conf:
raise endpoints.NotFoundException(
'Sorry but, no conference found with the key: %s' % request.websafeConferenceKey)
# create query for all key matches for this conference
sessions = Session.query(ancestor=ndb.Key(Conference, conf.key.id()))
# return SessionForm objects per Session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(SESSION_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions/by_type/{typeOfSession}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""return all sessions of a specified type"""
# copy the ConferenceForm/ProtoRPC Message into a dict object
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
typeOfSession = data['typeOfSession']
# get the existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check to see if that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# create query for all key matches for this conference
sessions = Session.query(Session.typeOfSession == typeOfSession, ancestor=ndb.Key(Conference, conf.key.id()))
# return ConferenceForm objects per Conference
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
http_method='GET', name='getConferenceSessionFeed')
def getConferenceSessionFeed(self, request):
"""Returns a conference's sorted feed of sessions occurring same day and later."""
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# fetch existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
sessions = Session.query(ancestor=ndb.Key(Conference, conf.key.id()))\
.filter(Session.date >= datetime.now()-timedelta(1))\
.order(Session.date, Session.startTime)
# return set of SessionForm objects per Session
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
# - - - Session objects - - - - - - - - - - - - - - - - - - -
def _createSessionObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# load necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = _getUserId()
if not request.name:
raise endpoints.BadRequestException("Session 'name' field required")
# get and check conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check to see if that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is the owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can add sessions.')
# copy SessionForm/ProtoRPC Message into a dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# convert dates from strings to Dates
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
# convert time from strings to Times
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'][:5], "%H:%M").time()
# make key based off of relationship
p_key = ndb.Key(Conference, conf.key.id())
c_id = Session.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Session, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = user_id
del data['websafeConferenceKey']
del data['websafeKey']
Session(**data).put()
# check if speaker exists in other sections if the do, add to memcache
taskqueue.add(params={'speaker': data['speaker']}, url='/tasks/set_featured_speaker')
# send request over to form.
return self._copySessionToForm(request)
def _copySessionToForm(self, session):
"""Copy relevant fields from Session to SessionForm."""
sf = SessionForm()
for field in sf.all_fields():
if hasattr(session, field.name):
# convert Date and Time to date string; just copy others
if field.name in ['startTime', 'date']:
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, session.key.urlsafe())
sf.check_initialized()
return sf
@endpoints.method(SessionForm, SessionForm,
path='sessions',
http_method='POST', name='createSession')
def createSession(self, request):
"""Open to the organizer of the conference"""
return self._createSessionObject(request)
@endpoints.method(SPEAKER_GET_REQUEST, SessionForms,
path='sessions/{speaker}',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""Given a speaker, return all sessions given by him/her across all conferences"""
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
speaker = data['speaker']
# query sessions by speaker
sessions = Session.query(Session.speaker == speaker)
# returnConferenceForm objects by Conference
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(message_types.VoidMessage, SessionForms,
http_method='GET', name='getTBDSessions')
def getTBDSessions(self, request):
"""Returns sessions missing time/date information"""
sessions = Session.query(ndb.OR(
Session.duration == None,
Session.startTime == None,
Session.date == None
))
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
@endpoints.method(message_types.VoidMessage, SessionForms,
http_method='GET', name='getEarlyNonWorkshopSessions')
def getEarlyNonWorkshopSessions(self, request):
"""Returns non-workshop sessions occurring before 7pm"""
sessions = Session.query(ndb.AND(
Session.startTime != None,
Session.startTime <= timed(hour=19)
))
filtered_sessions = []
for session in sessions:
if 'workshop' in session.typeOfSession:
continue
else:
filtered_sessions.append(session)
return SessionForms(
items=[self._copySessionToForm(session) for session in filtered_sessions]
)
@staticmethod
def _cacheFeaturedSpeaker(speaker):
sessions = Session.query(Session.speaker == speaker).fetch()
# If more than one session is returned:
if len(sessions) > 1:
# Update string for the new speaker.
featSpeak = (SPEAKER_TPL % speaker) + ' ' + 'Sessions:'
# Set Memcache with update.
for session in sessions:
featSpeak += ' ' + session.name
memcache.set(MEMCACHE_SPEAKER_KEY, featSpeak)
# Otherwise set featSpeak equal to previous value
else:
featSpeak = (memcache.get(MEMCACHE_SPEAKER_KEY) or "")
# Return featured speaker.
return featSpeak
@endpoints.method(message_types.VoidMessage, SpeakerForm,
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Returns the sessions of the featured speaker"""
# attempt to get data from memcache
data = memcache.get('featured_speaker')
from pprint import pprint
pprint(data)
sessions = []
sessionNames = []
speaker = None
if data and data.has_key('speaker') and data.has_key('sessionNames'):
speaker = data['speaker']
sessionNames = data['sessionNames']
# if memcache fails or is empty, pull speaker from upcoming session
else:
upcoming_session = Session.query(Session.date >= datetime.now())\
.order(Session.date, Session.startTime).get()
if upcoming_session:
speaker = upcoming_session.speaker
sessions = Session.query(Session.speaker == speaker)
sessionNames = [session.name for session in sessions]
# populate speaker form
sf = SpeakerForm()
for field in sf.all_fields():
if field.name == 'sessionNames':
setattr(sf, field.name, sessionNames)
elif field.name == 'speaker':
setattr(sf, field.name, speaker)
sf.check_initialized()
return sf
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = _getUserId()
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
#if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
#else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Wishlists - - - - - - - - - - - - - - - - -
@endpoints.method(WISHLIST_POST_REQUEST, SessionForm,
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Saves a session to a users wishlist"""
# get necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get and check session
session = ndb.Key(urlsafe=request.websafeSessionKey).get()
# check to see if that session exists
if not session:
raise endpoints.NotFoundException(
'No session found with key: %s' % request.websafeSessionKey)
# get profile
prof = self._getProfileFromUser()
# check if that session was already added to wishlist
if session.key in prof.sessionsToAttend:
raise endpoints.BadRequestException(
'This session was already saved to wishlist: %s' % request.websafeSessionKey)
# add to user profiles wishlist
prof.sessionsToAttend.append(session.key)
prof.put()
return self._copySessionToForm(session)
@endpoints.method(message_types.VoidMessage, SessionForms,
http_method='POST', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Returns a user's wishlist of sessions"""
# get necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get profile and wishlist
prof = self._getProfileFromUser()
session_keys = prof.sessionsToAttend
sessions = [session_key.get() for session_key in session_keys]
# return set
return SessionForms(
items=[self._copySessionToForm(session) for session in sessions]
)
# Delete session in user wishlist
@endpoints.method(WISHLIST_POST_REQUEST, SessionForm,
http_method='DELETE', name='deleteSessionFromWishlist')
def deleteSessionFromWishlist(self, request):
"""Delete Session from user wishlist."""
prof = self._getProfileFromUser()
wssk = request.websafeSessionKey
if wssk in prof.sessionsToAttend:
prof.sessionsToAttend.remove(wssk)
retval = True
else:
retval = False
prof.put()
return BooleanMessage(data=retval)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/put',
http_method='GET', name='putAnnouncement')
def putAnnouncement(self, request):
"""Put Announcement into memcache"""
return StringMessage(data=self._cacheAnnouncement())
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
raise endpoints.UnauthorizedException('You are not authorized to remove this')
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])\
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
api = endpoints.api_server([ConferenceApi]) # register API
|
|
import json
import pytest
import os
from typing import ClassVar, Dict, List, Sequence, Tuple, Union
from kat.harness import sanitize, variants, Query, Runner
from abstract_tests import AmbassadorTest, HTTP, AHTTP
from abstract_tests import MappingTest, OptionTest, ServiceType, Node, Test
# The phase that we should wait until before performing test checks. Normally
# this would be phase 2, which is 10 seconds after the first wave of queries,
# but we increase it to phase 3 here to make sure that Zipkin and other tracers
# have _plenty_ of time to receive traces from Envoy and index them for retrieval
# through the API. We've seen this test flake when the check is performed in phase
# 2, so the hope is that phase 3 reduces the likelihood of the test flaking again.
check_phase = 3
class TracingTest(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkin
spec:
selector:
app: zipkin
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin
spec:
selector:
matchLabels:
app: zipkin
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin
spec:
containers:
- name: zipkin
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
# Use self.target here, because we want this mapping to be annotated
# on the service, not the Ambassador.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping
prefix: /target/
service: {self.target.path.fqdn}
""")
# Configure the TracingService.
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: TracingService
name: tracing
service: zipkin:9411
driver: zipkin
tag_headers:
- "x-watsup"
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkin:9411/api/v2/services"))
def queries(self):
# Speak through each Ambassador to the traced service...
for i in range(100):
yield Query(self.url("target/"), headers={'x-watsup':'nothin'}, phase=1)
# ...then ask the Zipkin for services and spans. Including debug=True in these queries
# is particularly helpful.
yield Query("http://zipkin:9411/api/v2/services", phase=check_phase)
yield Query("http://zipkin:9411/api/v2/spans?serviceName=tracingtest-default", phase=check_phase)
yield Query("http://zipkin:9411/api/v2/traces?serviceName=tracingtest-default", phase=check_phase)
# The diagnostics page should load properly
yield Query(self.url("ambassador/v0/diag/"), phase=check_phase)
def check(self):
for i in range(100):
assert self.results[i].backend.name == self.target.path.k8s
print(f"self.results[100] = {self.results[100]}")
assert self.results[100].backend is not None and self.results[100].backend.name == "raw", \
f"unexpected self.results[100] = {self.results[100]}"
assert len(self.results[100].backend.response) == 1
assert self.results[100].backend.response[0] == 'tracingtest-default'
assert self.results[101].backend.name == "raw"
tracelist = { x: True for x in self.results[101].backend.response }
assert 'router cluster_tracingtest_http_default egress' in tracelist
# Look for the host that we actually queried, since that's what appears in the spans.
assert self.results[0].backend.request.host in tracelist
# Ensure we generate 128-bit traceids by default
trace = self.results[102].json[0][0]
traceId = trace['traceId']
assert len(traceId) == 32
for t in self.results[102].json[0]:
if t.get('tags', {}).get('node_id') == 'test-id':
assert 'x-watsup' in t['tags']
assert t['tags']['x-watsup'] == 'nothin'
class TracingTestLongClusterName(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkinservicenamewithoversixtycharacterstoforcenamecompression
spec:
selector:
app: zipkin-longclustername
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-longclustername
spec:
selector:
matchLabels:
app: zipkin-longclustername
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin-longclustername
spec:
containers:
- name: zipkin
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
# Use self.target here, because we want this mapping to be annotated
# on the service, not the Ambassador.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping_longclustername
prefix: /target/
service: {self.target.path.fqdn}
""")
# Configure the TracingService.
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: TracingService
name: tracing-longclustername
service: zipkinservicenamewithoversixtycharacterstoforcenamecompression:9411
driver: zipkin
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkinservicenamewithoversixtycharacterstoforcenamecompression:9411/api/v2/services"))
def queries(self):
# Speak through each Ambassador to the traced service...
for i in range(100):
yield Query(self.url("target/"), phase=1)
# ...then ask the Zipkin for services and spans. Including debug=True in these queries
# is particularly helpful.
yield Query("http://zipkinservicenamewithoversixtycharacterstoforcenamecompression:9411/api/v2/services", phase=check_phase)
yield Query("http://zipkinservicenamewithoversixtycharacterstoforcenamecompression:9411/api/v2/spans?serviceName=tracingtestlongclustername-default", phase=check_phase)
yield Query("http://zipkinservicenamewithoversixtycharacterstoforcenamecompression:9411/api/v2/traces?serviceName=tracingtestlongclustername-default", phase=check_phase)
# The diagnostics page should load properly, even though our Tracing Service
# has a long cluster name https://github.com/datawire/ambassador/issues/3021
yield Query(self.url("ambassador/v0/diag/"), phase=check_phase)
def check(self):
for i in range(100):
assert self.results[i].backend.name == self.target.path.k8s
print(f"self.results[100] = {self.results[100]}")
assert self.results[100].backend is not None and self.results[100].backend.name == "raw", \
f"unexpected self.results[100] = {self.results[100]}"
assert len(self.results[100].backend.response) == 1
assert self.results[100].backend.response[0] == 'tracingtestlongclustername-default'
assert self.results[101].backend.name == "raw"
tracelist = { x: True for x in self.results[101].backend.response }
assert 'router cluster_tracingtestlongclustername_http_default egress' in tracelist
# Look for the host that we actually queried, since that's what appears in the spans.
assert self.results[0].backend.request.host in tracelist
# Ensure we generate 128-bit traceids by default
trace = self.results[102].json[0][0]
traceId = trace['traceId']
assert len(traceId) == 32
class TracingTestShortTraceId(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkin-64
spec:
selector:
app: zipkin-64
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-64
spec:
selector:
matchLabels:
app: zipkin-64
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin-64
spec:
containers:
- name: zipkin
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
# Use self.target here, because we want this mapping to be annotated
# on the service, not the Ambassador.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping_64
prefix: /target-64/
service: {self.target.path.fqdn}
""")
# Configure the TracingService.
yield self, """
---
apiVersion: getambassador.io/v2
kind: TracingService
name: tracing-64
service: zipkin-64:9411
driver: zipkin
config:
trace_id_128bit: false
"""
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkin-64:9411/api/v2/services"))
def queries(self):
# Speak through each Ambassador to the traced service...
yield Query(self.url("target-64/"), phase=1)
# ...then ask the Zipkin for services and spans. Including debug=True in these queries
# is particularly helpful.
yield Query("http://zipkin-64:9411/api/v2/traces", phase=check_phase)
# The diagnostics page should load properly
yield Query(self.url("ambassador/v0/diag/"), phase=check_phase)
def check(self):
# Ensure we generated 64-bit traceids
trace = self.results[1].json[0][0]
traceId = trace['traceId']
assert len(traceId) == 16
# This test asserts that the external authorization server receives the proper tracing
# headers when Ambassador is configured with an HTTP AuthService.
class TracingExternalAuthTest(AmbassadorTest):
def init(self):
self.target = HTTP()
self.auth = AHTTP(name="auth")
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkin-auth
spec:
selector:
app: zipkin-auth
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-auth
spec:
selector:
matchLabels:
app: zipkin-auth
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin-auth
spec:
containers:
- name: zipkin-auth
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping
prefix: /target/
service: {self.target.path.fqdn}
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: TracingService
name: tracing-auth
service: zipkin-auth:9411
driver: zipkin
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: AuthService
name: {self.auth.path.k8s}
auth_service: "{self.auth.path.fqdn}"
path_prefix: "/extauth"
allowed_headers:
- Requested-Status
- Requested-Header
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkin-auth:9411/api/v2/services"))
def queries(self):
yield Query(self.url("target/"), headers={"Requested-Status": "200"}, expected=200)
def check(self):
extauth_res = json.loads(self.results[0].headers["Extauth"][0])
request_headers = self.results[0].backend.request.headers
assert self.results[0].status == 200
assert self.results[0].headers["Server"] == ["envoy"]
assert extauth_res["request"]["headers"]["x-b3-parentspanid"] == request_headers["x-b3-parentspanid"]
assert extauth_res["request"]["headers"]["x-b3-sampled"] == request_headers["x-b3-sampled"]
assert extauth_res["request"]["headers"]["x-b3-spanid"] == request_headers["x-b3-spanid"]
assert extauth_res["request"]["headers"]["x-b3-traceid"] == request_headers["x-b3-traceid"]
assert extauth_res["request"]["headers"]["x-request-id"] == request_headers["x-request-id"]
class TracingTestSampling(AmbassadorTest):
"""
Test for the "sampling" in TracingServices
"""
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkin-65
spec:
selector:
app: zipkin-65
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-65
spec:
selector:
matchLabels:
app: zipkin-65
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin-65
spec:
containers:
- name: zipkin
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
# Use self.target here, because we want this mapping to be annotated
# on the service, not the Ambassador.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping_65
prefix: /target-65/
service: {self.target.path.fqdn}
""")
# Configure the TracingService.
yield self, """
---
apiVersion: getambassador.io/v2
kind: TracingService
name: tracing-65
service: zipkin-65:9411
driver: zipkin
sampling:
overall: 10
"""
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkin-65:9411/api/v2/services"))
def queries(self):
# Speak through each Ambassador to the traced service...
for i in range(0, 100):
yield Query(self.url("target-65/"), phase=1, ignore_result=True)
# ...then ask the Zipkin for services and spans. Including debug=True in these queries
# is particularly helpful.
yield Query("http://zipkin-65:9411/api/v2/traces?limit=10000", phase=check_phase)
# The diagnostics page should load properly
yield Query(self.url("ambassador/v0/diag/"), phase=check_phase)
def check(self):
traces = self.results[100].json
print("%d traces obtained" % len(traces))
#import json
#print(json.dumps(traces, indent=4, sort_keys=True))
# We constantly find that Envoy's RNG isn't exactly predictable with small sample
# sizes, so even though 10% of 100 is 10, we'll make this pass as long as we don't
# go over 50 or under 1.
assert 1 <= len(traces) <= 50
class TracingTestZipkinV2(AmbassadorTest):
"""
Test for the "collector_endpoint_version" Zipkin config in TracingServices
"""
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkin-v2
spec:
selector:
app: zipkin-v2
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-v2
spec:
selector:
matchLabels:
app: zipkin-v2
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin-v2
spec:
containers:
- name: zipkin
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
# Use self.target here, because we want this mapping to be annotated
# on the service, not the Ambassador.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping
prefix: /target/
service: {self.target.path.fqdn}
""")
# Configure the TracingService.
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: TracingService
name: tracing
service: zipkin-v2:9411
driver: zipkin
config:
collector_endpoint: /api/v2/spans
collector_endpoint_version: HTTP_JSON
collector_hostname: zipkin-v2
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkin-v2:9411/api/v2/services"))
def queries(self):
# Speak through each Ambassador to the traced service...
for i in range(100):
yield Query(self.url("target/"), phase=1)
# ...then ask the Zipkin for services and spans. Including debug=True in these queries
# is particularly helpful.
yield Query("http://zipkin-v2:9411/api/v2/services", phase=check_phase)
yield Query("http://zipkin-v2:9411/api/v2/spans?serviceName=tracingtestzipkinv2-default", phase=check_phase)
yield Query("http://zipkin-v2:9411/api/v2/traces?serviceName=tracingtestzipkinv2-default", phase=check_phase)
# The diagnostics page should load properly
yield Query(self.url("ambassador/v0/diag/"), phase=check_phase)
def check(self):
for i in range(100):
assert self.results[i].backend.name == self.target.path.k8s
print(f"self.results[100] = {self.results[100]}")
assert self.results[100].backend is not None and self.results[100].backend.name == "raw", \
f"unexpected self.results[100] = {self.results[100]}"
assert len(self.results[100].backend.response) == 1
assert self.results[100].backend.response[0] == 'tracingtestzipkinv2-default'
assert self.results[101].backend.name == "raw"
tracelist = { x: True for x in self.results[101].backend.response }
assert 'router cluster_tracingtestzipkinv2_http_default egress' in tracelist
# Look for the host that we actually queried, since that's what appears in the spans.
assert self.results[0].backend.request.host in tracelist
# Ensure we generate 128-bit traceids by default
trace = self.results[102].json[0][0]
traceId = trace['traceId']
assert len(traceId) == 32
class TracingTestZipkinV1(AmbassadorTest):
"""
Test for the "collector_endpoint_version" Zipkin config in TracingServices
"""
def init(self):
if os.environ.get('KAT_USE_ENVOY_V3', '') != '':
self.skip_node = True
self.target = HTTP()
def manifests(self) -> str:
return """
---
apiVersion: v1
kind: Service
metadata:
name: zipkin-v1
spec:
selector:
app: zipkin-v1
ports:
- port: 9411
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zipkin-v1
spec:
selector:
matchLabels:
app: zipkin-v1
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: zipkin-v1
spec:
containers:
- name: zipkin
image: openzipkin/zipkin:2.17
ports:
- name: http
containerPort: 9411
""" + super().manifests()
def config(self):
# Use self.target here, because we want this mapping to be annotated
# on the service, not the Ambassador.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tracing_target_mapping
prefix: /target/
service: {self.target.path.fqdn}
""")
# Configure the TracingService.
yield self, self.format("""
---
apiVersion: ambassador/v2
kind: TracingService
name: tracing
service: zipkin-v1:9411
driver: zipkin
config:
collector_endpoint: /api/v1/spans
collector_endpoint_version: HTTP_JSON_V1
collector_hostname: zipkin-v1
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query("http://zipkin-v1:9411/api/v2/services"))
def queries(self):
# Speak through each Ambassador to the traced service...
for i in range(100):
yield Query(self.url("target/"), phase=1)
# ...then ask the Zipkin for services and spans. Including debug=True in these queries
# is particularly helpful.
yield Query("http://zipkin-v1:9411/api/v2/services", phase=check_phase)
yield Query("http://zipkin-v1:9411/api/v2/spans?serviceName=tracingtestzipkinv1-default", phase=check_phase)
yield Query("http://zipkin-v1:9411/api/v2/traces?serviceName=tracingtestzipkinv1-default", phase=check_phase)
# The diagnostics page should load properly
yield Query(self.url("ambassador/v0/diag/"), phase=check_phase)
def check(self):
for i in range(100):
assert self.results[i].backend.name == self.target.path.k8s
print(f"self.results[100] = {self.results[100]}")
assert self.results[100].backend is not None and self.results[100].backend.name == "raw", \
f"unexpected self.results[100] = {self.results[100]}"
assert len(self.results[100].backend.response) == 1
assert self.results[100].backend.response[0] == 'tracingtestzipkinv1-default'
assert self.results[101].backend.name == "raw"
tracelist = { x: True for x in self.results[101].backend.response }
assert 'router cluster_tracingtestzipkinv1_http_default egress' in tracelist
# Look for the host that we actually queried, since that's what appears in the spans.
assert self.results[0].backend.request.host in tracelist
# Ensure we generate 128-bit traceids by default
trace = self.results[102].json[0][0]
traceId = trace['traceId']
assert len(traceId) == 32
|
|
#!/usr/bin/env python
# a bar plot with errorbars
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
width = 0.97 # the width of the bars
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 13}
matplotlib.rc('font', **font)
# plot with various axes scales
plt.figure(1)
# tried to have a single y label but did not work
#fig,axes = plt.subplots(sharey=True)
def plot_bars_with_stdev_MO(SRmeans, SRStd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(223)
ax = plt.subplot2grid((3,2),(0, 0), colspan=2)
#rects1 = ax.bar(ind, SRMeans, width, color='m', hatch="//", yerr=SRStd, label = 'Monthly Avg SR')
rects1 = ax.bar(ind, SRMeans, width, color='m', \
align='center', yerr=SRStd, linewidth=0, \
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
# add some text for labels, title and axes ticks
#ax.set_ylabel('Avg SR')
ax.set_title('Whole network')
ax.set_xticks(ind)
ax.set_xticklabels(('Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov'))
ax.set_ylim([-0.1, 0.35])
ax.set_yticks((-0.1,0.1,0.3))
#plt.legend(loc=2, frameon=False)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.3f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
return plt
def plot_bars_with_stdev_2(DeletionMeans, DeletionStd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(322)
ax = plt.subplot2grid((3,2),(1, 1))
#rects1 = ax.bar(ind, DeletionMeans, width, color='c', hatch='*', yerr=DeletionStd, label = 'decommission')
rects1 = ax.bar(ind, DeletionMeans, width, color='c', \
align='center', yerr=DeletionStd, linewidth=0, \
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
# add some text for labels, title and axes ticks
#ax.set_ylabel('Avg SR')
ax.set_title('Interaction decommission')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At decommission', 'After'))
ax.set_ylim([-0.1, 0.35])
ax.set_yticks((-0.1,0.1,0.3))
#plt.legend(frameon=False)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.3f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
return plt
def plot_bars_with_stdev_1(DeletionMeans, DeletionStd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((3,2),(1, 0))
#rects1 = ax.bar(ind, DeletionMeans, width, color='darkred', hatch='x', yerr=DeletionStd, label = 'Activation')
rects1 = ax.bar(ind, DeletionMeans, width, color='darkred', \
align='center', yerr=DeletionStd, linewidth=0, \
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
# add some text for labels, title and axes ticks
#ax.set_ylabel('Avg SR')
ax.set_title('Interaction activation')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At activation', 'After'))
ax.set_ylim([-0.1, 0.35])
ax.set_yticks((-0.1,0.1,0.3))
#plt.legend(frameon=False)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.3f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
return plt
def plot_bars_with_stdev_7(formationDeletionMeans, formationDeletionStd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
ax = plt.subplot2grid((4,2),(2, 0), colspan=2)
#rects1 = ax.bar(ind, formationDeletionMeans, width, color='y', hatch='+', yerr=formationDeletionStd, label = 'Activation and decommission')
rects1 = ax.bar(ind, formationDeletionMeans, width, color='y', \
align='center', yerr=formationDeletionStd, linewidth=0, \
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.set_ylim([-0.1, 0.35])
# add some text for labels, title and axes ticks
#ax.set_ylabel('Avg SR')
ax.set_title('Non-persisting interactions')
ax.set_xticks(ind)
ax.set_xticklabels(('Before', 'At activation', 'At Mid', 'At decommission', 'After'))
#ax.set_xlim([])
ax.set_yticks((-0.1,0.1,0.3))
#plt.legend(frameon=False)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.3f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
return plt
# this is to have 3 in one, but as this
# 1) edge formation: those that are deleted and those that are not
# 2) edge deletion: those that are formed in our dataset and before
# 3) 6 months SR change
###################################################################################
# edge formation two version
# V3
N = 3
# PERSISTING LINKS
#formationNodeletionMeans = (0.0121447496769, 0.110398018511, 0.10617694085)
#formationNodeletionStd = (0.0539546551446, 0.192962632767, 0.1715092024)
#processed 13492 edges
#Average SR 0.019252 and stdev 0.066896 before, at the time 0.097444, 0.176203 and after 0.070327, 0.138657 edges formation
formationMeans = (0.0192521818311, 0.0974437259893, 0.07032720813)
formationStd = (0.0668960682412, 0.176202988944, 0.138657262854)
#plt3 = plot_bars_with_stdev_3(formationMeans, formationStd, formationNodeletionMeans, formationNodeletionStd)
#plt3.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/monthly_SR_change_list_of_users/edges_SR_change_v2.png", dpi=440)
plt1 = plot_bars_with_stdev_1(formationMeans, formationStd)
###################################################################################
# edge deletion two versions
# v4 # V22 final
N = 3
# PERSISTING LINKS
deletionNoformationMeans = (0.0727004565385, 0.0680258988703, 0.0229713363971)
deletionNoformationStd = (0.123489476215, 0.140276874039, 0.0746578827396)
deletionNoformationMeans = (0.0727004565385, 0.0680258988703, 0.0229713363971)
deletionNoformationStd = (0.123489476215, 0.140276874039, 0.0746578827396)
#processed 10080 edges
# Average SR 0.038934 and stdev 0.090531 before, at the time 0.083006, 0.157389 and after 0.038228, 0.101566 edges deletion
deletionMeans = (0.038933802188, 0.0830056870558, 0.0382280669398)
deletionStd = (0.0905313753995, 0.157388968035, 0.101565579395)
#plt4 = plot_bars_with_stdev_4(deletionMeans, deletionStd, deletionNoformationMeans, deletionNoformationStd)
# this final, no need for 2 types of deletion
plt2 = plot_bars_with_stdev_2(deletionMeans, deletionStd)
###################################################################################
# MONTHLY
# 0.008100, 0.017923, 0.025976, 0.037767, 0.048156, 0.054721, 0.029074
# 0.053316, 0.077368, 0.094393, 0.111137, 0.126394, 0.136750, 0.107575
N = 5
#SRMeans = (0.017923, 0.025976, 0.037767, 0.048156, 0.054721)
#SRStd = (0.077368, 0.094393, 0.111137, 0.126394, 0.136750)
# improved to discount for edges not present at the MO
#SRMeans = (0.050, 0.053, 0.058, 0.061, 0.065)
#SRStd = (0.123, 0.130, 0.134, 0.14, 0.147)
# but i should not discount for edges not present
# but as always with formation and deletion etc
# consider the same set of edges as below
"""
Monthly edges 69960
6 mention network SR: $\mu=0.018$, $\sigma= 0.077$
Monthly edges 69960
7 mention network SR: $\mu=0.026$, $\sigma= 0.094$
Monthly edges 69960
8 mention network SR: $\mu=0.038$, $\sigma= 0.111$
Monthly edges 69960
9 mention network SR: $\mu=0.048$, $\sigma= 0.126$
Monthly edges 69960
10 mention network SR: $\mu=0.055$, $\sigma= 0.137$
"""
SRMeans = (0.018, 0.026, 0.038, 0.048, 0.055)
SRStd = (0.077, 0.094, 0.111, 0.126, 0.137)
plt3 = plot_bars_with_stdev_MO(SRMeans, SRStd)
#plt6.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/monthly_SR_change_list_of_users/edges_monthly_SR_change.png", dpi=440)
#plt.show()
#plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/monthly_SR_change_list_of_users/ALL_SR_change.png", dpi=1000)
###################################################################################################
# SR of persisting edges
def plot_bars_with_stdev_MO_2(SRmeans, SRStd):
ind = np.arange(N) # the x locations for the groups
#ax = plt.subplot(111)
ax = plt.subplot2grid((3,2),(2, 0), colspan=2)
#rects1 = ax.bar(ind, SRmeans, width, color='r', hatch='O', yerr=SRStd, label = 'Monthly Avg SR')
rects1 = ax.bar(ind, SRmeans, width, color='r', \
align='center', yerr=SRStd, linewidth=0, \
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
# add some text for labels, title and axes ticks
#ax.set_ylabel('Avg SR')
ax.set_title('Persisting interactions')
ax.set_xticks(ind)
ax.set_xticklabels(('Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov'))
ax.set_ylim([-0.1, 0.35])
ax.set_yticks((-0.1,0.1,0.3))
#plt.legend(loc=2,frameon=False)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.3f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
#processed 3076 edges
#Average SR, stdev 0.077627, 0.158114, at the time 6
#Average SR, stdev 0.073275, 0.160656, at the time 7
#Average SR, stdev 0.069833, 0.151127, at the time 8
#Average SR, stdev 0.064159, 0.149817, at the time 9
#Average SR, stdev 0.073046, 0.155852, at the time 10
###################################################################################
# edge formation deletion separate
N = 5
#formationDeletionMeans = (0.023888, 0.088995, 0.087686, 0.086517, 0.009626)
#formationDeletionStd = (0.073761, 0.163803, 0.156189, 0.160936, 0.039921)
#plt7 = plot_bars_with_stdev_7(formationDeletionMeans, formationDeletionStd)
###################################################################################
N = 5
SRmeans = (0.077627, 0.073275, 0.069833, 0.064159, 0.073046)
SRStd = (0.158114, 0.160656, 0.151127, 0.149817, 0.155852)
plt4 = plot_bars_with_stdev_MO_2(SRmeans, SRStd)
#plt.show()
#for ax7 in axes:
# ax7.set_ylabel('Common y-label')
plt.tight_layout()
fig = plt.gcf()
plt.tight_layout()
fig.set_size_inches(8.3,6.5)
plt.tight_layout()
plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/monthly_SR_change_list_of_users/temporal_SR_persisting_formations_deletions7s7s.eps", dpi=710)
|
|
# Copyright 2012 Managed I.T.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_messaging.notify import notifier
from designate.tests.test_api import ApiTestCase
from designate import context
from designate import exceptions
from designate import rpc
from designate.api import middleware
class FakeRequest(object):
def __init__(self):
self.headers = {}
self.environ = {}
self.params = {}
def get_response(self, app):
return "FakeResponse"
class KeystoneContextMiddlewareTest(ApiTestCase):
def test_process_request(self):
app = middleware.KeystoneContextMiddleware({})
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
}
# Process the request
app.process_request(request)
self.assertIn('context', request.environ)
context = request.environ['context']
self.assertFalse(context.is_admin)
self.assertEqual('AuthToken', context.auth_token)
self.assertEqual('UserID', context.user)
self.assertEqual('TenantID', context.tenant)
self.assertEqual(['admin', 'Member'], context.roles)
def test_process_request_invalid_keystone_token(self):
app = middleware.KeystoneContextMiddleware({})
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': 'TenantID',
'X-Roles': 'admin,Member',
'X-Identity-Status': 'Invalid'
}
# Process the request
response = app(request)
self.assertEqual(response.status_code, 401)
def test_process_unscoped_token(self):
app = middleware.KeystoneContextMiddleware({})
request = FakeRequest()
request.headers = {
'X-Auth-Token': 'AuthToken',
'X-User-ID': 'UserID',
'X-Tenant-ID': None,
'X-Roles': 'admin,Member',
}
# Process the request
response = app(request)
self.assertEqual(response.status_code, 401)
class NoAuthContextMiddlewareTest(ApiTestCase):
def test_process_request(self):
app = middleware.NoAuthContextMiddleware({})
request = FakeRequest()
# Process the request
app.process_request(request)
self.assertIn('context', request.environ)
ctxt = request.environ['context']
self.assertIsNone(ctxt.auth_token)
self.assertEqual('noauth-user', ctxt.user)
self.assertEqual('noauth-project', ctxt.tenant)
self.assertEqual(['admin'], ctxt.roles)
class MaintenanceMiddlewareTest(ApiTestCase):
def test_process_request_disabled(self):
self.config(maintenance_mode=False, group='service:api')
request = FakeRequest()
app = middleware.MaintenanceMiddleware({})
# Process the request
response = app(request)
# Ensure request was not blocked
self.assertEqual(response, 'FakeResponse')
def test_process_request_enabled_reject(self):
self.config(maintenance_mode=True, maintenance_mode_role='admin',
group='service:api')
request = FakeRequest()
request.environ['context'] = context.DesignateContext(roles=['user'])
app = middleware.MaintenanceMiddleware({})
# Process the request
response = app(request)
# Ensure request was blocked
self.assertEqual(response.status_code, 503)
def test_process_request_enabled_reject_no_roles(self):
self.config(maintenance_mode=True, maintenance_mode_role='admin',
group='service:api')
request = FakeRequest()
request.environ['context'] = context.DesignateContext(roles=[])
app = middleware.MaintenanceMiddleware({})
# Process the request
response = app(request)
# Ensure request was blocked
self.assertEqual(response.status_code, 503)
def test_process_request_enabled_reject_no_context(self):
self.config(maintenance_mode=True, maintenance_mode_role='admin',
group='service:api')
request = FakeRequest()
app = middleware.MaintenanceMiddleware({})
# Process the request
response = app(request)
# Ensure request was blocked
self.assertEqual(response.status_code, 503)
def test_process_request_enabled_bypass(self):
self.config(maintenance_mode=True, maintenance_mode_role='admin',
group='service:api')
request = FakeRequest()
request.environ['context'] = context.DesignateContext(roles=['admin'])
app = middleware.MaintenanceMiddleware({})
# Process the request
response = app(request)
# Ensure request was not blocked
self.assertEqual(response, 'FakeResponse')
class NormalizeURIMiddlewareTest(ApiTestCase):
def test_strip_trailing_slases(self):
request = FakeRequest()
request.environ['PATH_INFO'] = 'resource/'
app = middleware.NormalizeURIMiddleware({})
# Process the request
app(request)
# Ensure request's PATH_INFO had the trailing slash removed.
self.assertEqual(request.environ['PATH_INFO'], 'resource')
def test_strip_trailing_slases_multiple(self):
request = FakeRequest()
request.environ['PATH_INFO'] = 'resource///'
app = middleware.NormalizeURIMiddleware({})
# Process the request
app(request)
# Ensure request's PATH_INFO had the trailing slash removed.
self.assertEqual(request.environ['PATH_INFO'], 'resource')
class FaultMiddlewareTest(ApiTestCase):
@mock.patch.object(notifier.Notifier, "error")
def test_notify_of_fault(self, mock_notifier):
self.config(notify_api_faults=True)
rpc.init(cfg.CONF)
app = middleware.FaultWrapperMiddleware({})
class RaisingRequest(FakeRequest):
def get_response(self, request):
raise exceptions.DuplicateDomain()
request = RaisingRequest()
ctxt = context.DesignateContext()
ctxt.request_id = 'one'
request.environ['context'] = ctxt
# Process the request
app(request)
self.assertEqual(mock_notifier.call_count, 1)
mock_notifier.call_args(
ctxt,
'dns.api.fault',
{"url": None, "status": 409, "exception": ""})
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import test
from tensorflow.tensorboard.backend.event_processing import reservoir
class ReservoirTest(test.TestCase):
def testEmptyReservoir(self):
r = reservoir.Reservoir(1)
self.assertFalse(r.Keys())
def testRespectsSize(self):
r = reservoir.Reservoir(42)
self.assertEqual(r._buckets['meaning of life']._max_size, 42)
def testItemsAndKeys(self):
r = reservoir.Reservoir(42)
r.AddItem('foo', 4)
r.AddItem('bar', 9)
r.AddItem('foo', 19)
self.assertItemsEqual(r.Keys(), ['foo', 'bar'])
self.assertEqual(r.Items('foo'), [4, 19])
self.assertEqual(r.Items('bar'), [9])
def testExceptions(self):
with self.assertRaises(ValueError):
reservoir.Reservoir(-1)
with self.assertRaises(ValueError):
reservoir.Reservoir(13.3)
r = reservoir.Reservoir(12)
with self.assertRaises(KeyError):
r.Items('missing key')
def testDeterminism(self):
"""Tests that the reservoir is deterministic."""
key = 'key'
r1 = reservoir.Reservoir(10)
r2 = reservoir.Reservoir(10)
for i in xrange(100):
r1.AddItem('key', i)
r2.AddItem('key', i)
self.assertEqual(r1.Items(key), r2.Items(key))
def testBucketDeterminism(self):
"""Tests that reservoirs are deterministic at a bucket level.
This means that only the order elements are added within a bucket matters.
"""
separate_reservoir = reservoir.Reservoir(10)
interleaved_reservoir = reservoir.Reservoir(10)
for i in xrange(100):
separate_reservoir.AddItem('key1', i)
for i in xrange(100):
separate_reservoir.AddItem('key2', i)
for i in xrange(100):
interleaved_reservoir.AddItem('key1', i)
interleaved_reservoir.AddItem('key2', i)
for key in ['key1', 'key2']:
self.assertEqual(
separate_reservoir.Items(key), interleaved_reservoir.Items(key))
def testUsesSeed(self):
"""Tests that reservoirs with different seeds keep different samples."""
key = 'key'
r1 = reservoir.Reservoir(10, seed=0)
r2 = reservoir.Reservoir(10, seed=1)
for i in xrange(100):
r1.AddItem('key', i)
r2.AddItem('key', i)
self.assertNotEqual(r1.Items(key), r2.Items(key))
def testFilterItemsByKey(self):
r = reservoir.Reservoir(100, seed=0)
for i in xrange(10):
r.AddItem('key1', i)
r.AddItem('key2', i)
self.assertEqual(len(r.Items('key1')), 10)
self.assertEqual(len(r.Items('key2')), 10)
self.assertEqual(r.FilterItems(lambda x: x <= 7, 'key2'), 2)
self.assertEqual(len(r.Items('key2')), 8)
self.assertEqual(len(r.Items('key1')), 10)
self.assertEqual(r.FilterItems(lambda x: x <= 3, 'key1'), 6)
self.assertEqual(len(r.Items('key1')), 4)
self.assertEqual(len(r.Items('key2')), 8)
class ReservoirBucketTest(test.TestCase):
def testEmptyBucket(self):
b = reservoir._ReservoirBucket(1)
self.assertFalse(b.Items())
def testFillToSize(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(100):
b.AddItem(i)
self.assertEqual(b.Items(), list(xrange(100)))
self.assertEqual(b._num_items_seen, 100)
def testDoesntOverfill(self):
b = reservoir._ReservoirBucket(10)
for i in xrange(1000):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 1000)
def testMaintainsOrder(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10000):
b.AddItem(i)
items = b.Items()
prev = -1
for item in items:
self.assertTrue(item > prev)
prev = item
def testKeepsLatestItem(self):
b = reservoir._ReservoirBucket(5)
for i in xrange(100):
b.AddItem(i)
last = b.Items()[-1]
self.assertEqual(last, i)
def testSizeOneBucket(self):
b = reservoir._ReservoirBucket(1)
for i in xrange(20):
b.AddItem(i)
self.assertEqual(b.Items(), [i])
self.assertEqual(b._num_items_seen, 20)
def testSizeZeroBucket(self):
b = reservoir._ReservoirBucket(0)
for i in xrange(20):
b.AddItem(i)
self.assertEqual(b.Items(), list(range(i + 1)))
self.assertEqual(b._num_items_seen, 20)
def testSizeRequirement(self):
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(-1)
with self.assertRaises(ValueError):
reservoir._ReservoirBucket(10.3)
def testRemovesItems(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10):
b.AddItem(i)
self.assertEqual(len(b.Items()), 10)
self.assertEqual(b._num_items_seen, 10)
self.assertEqual(b.FilterItems(lambda x: x <= 7), 2)
self.assertEqual(len(b.Items()), 8)
self.assertEqual(b._num_items_seen, 8)
def testRemovesItemsWhenItemsAreReplaced(self):
b = reservoir._ReservoirBucket(100)
for i in xrange(10000):
b.AddItem(i)
self.assertEqual(b._num_items_seen, 10000)
# Remove items
num_removed = b.FilterItems(lambda x: x <= 7)
self.assertGreater(num_removed, 92)
self.assertEqual([], [item for item in b.Items() if item > 7])
self.assertEqual(b._num_items_seen,
int(round(10000 * (1 - float(num_removed) / 100))))
def testLazyFunctionEvaluationAndAlwaysKeepLast(self):
class FakeRandom(object):
def randint(self, a, b): # pylint:disable=unused-argument
return 999
class Incrementer(object):
def __init__(self):
self.n = 0
def increment_and_double(self, x):
self.n += 1
return x * 2
# We've mocked the randomness generator, so that once it is full, the last
# item will never get durable reservoir inclusion. Since always_keep_last is
# false, the function should only get invoked 100 times while filling up
# the reservoir. This laziness property is an essential performance
# optimization.
b = reservoir._ReservoirBucket(100, FakeRandom(), always_keep_last=False)
incrementer = Incrementer()
for i in xrange(1000):
b.AddItem(i, incrementer.increment_and_double)
self.assertEqual(incrementer.n, 100)
self.assertEqual(b.Items(), [x * 2 for x in xrange(100)])
# This time, we will always keep the last item, meaning that the function
# should get invoked once for every item we add.
b = reservoir._ReservoirBucket(100, FakeRandom(), always_keep_last=True)
incrementer = Incrementer()
for i in xrange(1000):
b.AddItem(i, incrementer.increment_and_double)
self.assertEqual(incrementer.n, 1000)
self.assertEqual(b.Items(), [x * 2 for x in xrange(99)] + [999 * 2])
class ReservoirBucketStatisticalDistributionTest(test.TestCase):
def setUp(self):
self.total = 1000000
self.samples = 10000
self.n_buckets = 100
self.total_per_bucket = self.total // self.n_buckets
self.assertEqual(self.total % self.n_buckets, 0, 'total must be evenly '
'divisible by the number of buckets')
self.assertTrue(self.total > self.samples, 'need to have more items '
'than samples')
def AssertBinomialQuantity(self, measured):
p = 1.0 * self.n_buckets / self.samples
mean = p * self.samples
variance = p * (1 - p) * self.samples
error = measured - mean
# Given that the buckets were actually binomially distributed, this
# fails with probability ~2E-9
passed = error * error <= 36.0 * variance
self.assertTrue(passed, 'found a bucket with measured %d '
'too far from expected %d' % (measured, mean))
def testBucketReservoirSamplingViaStatisticalProperties(self):
# Not related to a 'ReservoirBucket', but instead number of buckets we put
# samples into for testing the shape of the distribution
b = reservoir._ReservoirBucket(_max_size=self.samples)
# add one extra item because we always keep the most recent item, which
# would skew the distribution; we can just slice it off the end instead.
for i in xrange(self.total + 1):
b.AddItem(i)
divbins = [0] * self.n_buckets
modbins = [0] * self.n_buckets
# Slice off the last item when we iterate.
for item in b.Items()[0:-1]:
divbins[item // self.total_per_bucket] += 1
modbins[item % self.n_buckets] += 1
for bucket_index in xrange(self.n_buckets):
divbin = divbins[bucket_index]
modbin = modbins[bucket_index]
self.AssertBinomialQuantity(divbin)
self.AssertBinomialQuantity(modbin)
if __name__ == '__main__':
test.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import threading
import time
import tensorflow as tf
def StopInN(coord, n_secs):
time.sleep(n_secs)
coord.request_stop()
def RaiseInN(coord, n_secs, ex, report_exception):
try:
time.sleep(n_secs)
raise ex
except RuntimeError as e:
if report_exception:
coord.request_stop(e)
else:
coord.request_stop(sys.exc_info())
def RaiseInNUsingContextHandler(coord, n_secs, ex):
with coord.stop_on_exception():
time.sleep(n_secs)
raise ex
def SleepABit(n_secs, coord=None):
if coord:
coord.register_thread(threading.current_thread())
time.sleep(n_secs)
class CoordinatorTest(tf.test.TestCase):
def testStopAPI(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
coord.request_stop()
self.assertTrue(coord.should_stop())
self.assertTrue(coord.wait_for_stop(0.01))
def testStopAsync(self):
coord = tf.train.Coordinator()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.1))
threading.Thread(target=StopInN, args=(coord, 0.02)).start()
self.assertFalse(coord.should_stop())
self.assertFalse(coord.wait_for_stop(0.01))
self.assertTrue(coord.wait_for_stop(0.05))
self.assertTrue(coord.should_stop())
def testJoin(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01,)),
threading.Thread(target=SleepABit, args=(0.02,)),
threading.Thread(target=SleepABit, args=(0.01,))]
for t in threads:
t.start()
coord.join(threads)
for t in threads:
self.assertFalse(t.is_alive())
def testJoinAllRegistered(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02, coord)),
threading.Thread(target=SleepABit, args=(0.01, coord))]
for t in threads:
t.start()
coord.join()
for t in threads:
self.assertFalse(t.is_alive())
def testJoinSomeRegistered(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=SleepABit, args=(0.01, coord)),
threading.Thread(target=SleepABit, args=(0.02)),
threading.Thread(target=SleepABit, args=(0.01, coord))]
for t in threads:
t.start()
# threads[1] is not registred we must pass it in.
coord.join(threads[1:1])
for t in threads:
self.assertFalse(t.is_alive())
def testJoinGraceExpires(self):
def TestWithGracePeriod(stop_grace_period):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=StopInN, args=(coord, 0.01)),
threading.Thread(target=SleepABit, args=(10.0,))]
for t in threads:
t.daemon = True
t.start()
with self.assertRaisesRegexp(RuntimeError, "threads still running"):
coord.join(threads, stop_grace_period_secs=stop_grace_period)
TestWithGracePeriod(1e-10)
TestWithGracePeriod(0.002)
TestWithGracePeriod(1.0)
def testJoinRaiseReportExcInfo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), False)),
threading.Thread(target=RaiseInN,
args=(coord, 0.05, RuntimeError("Too late"), False))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinRaiseReportException(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
threading.Thread(target=RaiseInN,
args=(coord, 0.05, RuntimeError("Too late"), True))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testJoinIgnoresOutOfRange(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01,
tf.errors.OutOfRangeError(None, None, "First"),
True))
]
for t in threads:
t.start()
coord.join(threads)
def testJoinIgnoresMyExceptionType(self):
coord = tf.train.Coordinator(clean_stop_exception_types=(ValueError,))
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, ValueError("Clean stop"), True))
]
for t in threads:
t.start()
coord.join(threads)
def testJoinRaiseReportExceptionUsingHandler(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.01, RuntimeError("First"))),
threading.Thread(target=RaiseInNUsingContextHandler,
args=(coord, 0.05, RuntimeError("Too late")))]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
def testClearStopClearsExceptionToo(self):
coord = tf.train.Coordinator()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("First"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "First"):
coord.join(threads)
coord.clear_stop()
threads = [
threading.Thread(target=RaiseInN,
args=(coord, 0.01, RuntimeError("Second"), True)),
]
for t in threads:
t.start()
with self.assertRaisesRegexp(RuntimeError, "Second"):
coord.join(threads)
def testRequestStopRaisesIfJoined(self):
coord = tf.train.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError as e:
reported = True
coord.request_stop(e)
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError as e:
coord.request_stop(e)
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def testRequestStopRaisesIfJoined_ExcInfo(self):
# Same as testRequestStopRaisesIfJoined but using syc.exc_info().
coord = tf.train.Coordinator()
# Join the coordinator right away.
coord.join([])
reported = False
with self.assertRaisesRegexp(RuntimeError, "Too late"):
try:
raise RuntimeError("Too late")
except RuntimeError:
reported = True
coord.request_stop(sys.exc_info())
self.assertTrue(reported)
# If we clear_stop the exceptions are handled normally.
coord.clear_stop()
try:
raise RuntimeError("After clear")
except RuntimeError:
coord.request_stop(sys.exc_info())
with self.assertRaisesRegexp(RuntimeError, "After clear"):
coord.join([])
def _StopAt0(coord, n):
if n[0] == 0:
coord.request_stop()
else:
n[0] -= 1
class LooperTest(tf.test.TestCase):
def testTargetArgs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord, n))
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetKwargs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
kwargs={"coord": coord, "n": n})
coord.join([thread])
self.assertEqual(0, n[0])
def testTargetMixedArgs(self):
n = [3]
coord = tf.train.Coordinator()
thread = tf.train.LooperThread.loop(coord, 0, target=_StopAt0,
args=(coord,), kwargs={"n": n})
coord.join([thread])
self.assertEqual(0, n[0])
if __name__ == "__main__":
tf.test.main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from website.models import Question
from website.views.data_migration import migrate_organizationMember
class Migration(SchemaMigration):
def forwards(self, orm):
migrate_organizationMember()
def backwards(self, orm):
#no action needed
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.actiontutorial': {
'Meta': {'object_name': 'ActionTutorial'},
'action_identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.application': {
'Meta': {'object_name': 'Application'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'current_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True', 'blank': 'True'})
},
'website.applicationanswer': {
'Meta': {'object_name': 'ApplicationAnswer'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicationhistory': {
'Meta': {'object_name': 'ApplicationHistory'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Application']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'status_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.document': {
'Meta': {'object_name': 'Document'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'file_path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.documentcategory': {
'Meta': {'object_name': 'DocumentCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Region']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.person': {
'Meta': {'object_name': 'Person'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'phone_mobile': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'phone_secondary': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.personaddress': {
'Meta': {'object_name': 'PersonAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'address_type': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Person']", 'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.questiondependency': {
'Meta': {'object_name': 'QuestionDependency'},
'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'question1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question1'", 'to': "orm['website.Question']"}),
'question2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_questionDependency_question2'", 'to': "orm['website.Question']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'strength': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.region': {
'Meta': {'object_name': 'Region'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.tutorialpage': {
'Meta': {'object_name': 'TutorialPage'},
'display_order': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'selector': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'tip': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userreward': {
'Meta': {'object_name': 'UserReward'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RewardCategory']", 'null': 'True', 'blank': 'True'}),
'reward_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usertutorialhistory': {
'Meta': {'object_name': 'UserTutorialHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.usertutorialpagehistory': {
'Meta': {'object_name': 'UserTutorialPageHistory'},
'checked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.TutorialPage']", 'null': 'True', 'blank': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Tutorial']", 'null': 'True', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
|
|
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Federate class.
"""
import numpy as np
import re
import logging
from .controller import Controller
from .operations import Operations
# from .algorithms import list2dict
class Federate(Controller):
def __init__(self, name=None, initialCash=0, elements=None,
contracts=None, operations=Operations()):
"""
@param name: the name of this federate
@type name: L{str}
@param initialCash: the initial cash for this federate
@type initialCash: L{float}
@param elements: the elements controlled by this federate
@type elements: L{list}
@param contracts: the contracts owned by this federate
@type contracts: L{list}
@param operations: the operations model of this federate
@type operations: L{Operations}
"""
Controller.__init__(self, name=name)
self.initialCash = initialCash
self.cash = self.initialCash
if elements is None:
self._initElements = []
else:
self._initElements = elements[:]
self.elements = self._initElements
if contracts is None:
self._initContracts = []
else:
self._initContracts = contracts[:]
self.contracts = self._initContracts
self.operations = operations
self.costDic = {}
# self.contractSignals = {}
# self.demandSignals = {}
self.thirdContract = {}
self.receivedDemand = {}
self.issuedDemand = {}
self.costHistory = {}
self.groundSections = None
self.name = name
def getElements(self):
"""
Gets the elements controlled by this controller.
@return L{list}
"""
return self.elements[:]
def getFederates(self):
"""
Gets the federates controlled by this controller.
@return L{list}
"""
return [self]
def getContracts(self):
"""
Gets the contracts controlled by this controller.
@return L{list}
"""
return self.contracts[:]
def design(self, element):
"""
Designs an element for this federate.
@param element: the element to design
@type element: L{Element}
@return L{bool}
"""
if element.getContentsSize() > element.capacity:
logging.warning('{0} contents exceeds capacity.'
.format(element.name))
elif element.getDesignCost() > self.cash:
logging.warning('{0} design costs exceeds cash.'
.format(element.name))
else:
self.elements.append(element)
cost = element.getDesignCost()
self.cash -= cost
logging.info('{0} designed {1} for {2}'
.format(self.name, element.name, cost))
self.trigger('design', self, element, cost)
return True
return False
def commission(self, element, location, context):
"""
Commissions an element at a location.
@param element: the element to commission
@type element: L{Element}
@param location: the location at which to commission
@type location: L{Location}
@param context: the context
@type context: L{Context}
@return: L{bool}
"""
if element not in self.getElements():
logging.warning('{0} does not control {1}.'
.format(self.name, element.name))
elif element.getCommissionCost(location, context) > self.cash:
logging.warning('{0} commission cost exceeds cash.'
.format(element.name))
elif element.commission(location, context):
logging.info('{0} commissioned {1} for {2}.'
.format(self.name, element.name,
element.getCommissionCost(location, context)))
cost = element.getCommissionCost(location, context)
self.cash -= cost
self.trigger('commission', self, element, location, cost)
return True
else:
logging.warning('{0} could not commission {1}.'
.format(self.name, element.name))
return False
def decommission(self, element):
"""
Decommissions an element.
@param element: the element to decommission
@type element: L{Element}
@return: L{bool}
"""
if element not in self.getElements():
logging.info('{0} could not decommission {1}.'.format(
self.name, element.name))
else:
self.elements.remove(element)
# self.cash += element.getDecommissionValue()
logging.info('{0} decommissioned {1} for {2}.'.format(
self.name, element.name, element.getDecommissionValue()))
self.trigger('decommission', self, element)
return True
return False
def init(self, sim):
"""
Initializes this federate in a simulation.
@param sim: the simulator
"""
super(Federate, self).init(sim)
self.cash = self.initialCash
self.elements = self._initElements[:]
for element in self.elements:
element.init(sim)
self.contracts = self._initContracts[:]
for contract in self.contracts:
contract.init(sim)
def tick(self, sim):
"""
Ticks this federate in a simulation.
@param sim: the simulator
"""
super(Federate, self).tick(sim)
for element in self.elements:
element.tick(sim)
for contract in self.contracts:
contract.tick(sim)
# print "Tick cash: ", self.cash
def tock(self):
"""
Tocks this federate in a simulation.
"""
super(Federate, self).tock()
for element in self.elements:
element.tock()
for contract in self.contracts:
contract.tock()
# print "Tock cash: ", self.cash
def setCost(self, protocol, cost):
self.costDic[protocol] = cost
def getCost(self, protocol, federate=None):
# key = '{}-{}'.format(federate, protocol)
# return self.costDic[protocol] if key not in self.costDic else self.costDic[key]
name_dic = {'P1': 300, 'P2': 300, 'P3': 300}
# c = 200*np.round(10*np.random.normal()) +
# mutual_cost = []
# for k, v in self.receivedDemand.items():
# if protocol not in k or federate not in k:
# continue
#
# g = re.search(r'.+_(\w+)_(\d+)', k).groups()
# cost = int(g[1])
# mutual_cost.append()
# if protocol not in self.costHistory:
# self.costHistory[protocol] = []
#
# self.costHistory[protocol].append(c)
return name_dic[self.name]
# return
# def addContractSignal(self, issuer, protocol, cost):
# """
# :param cType: contract type
# :param issuer: the issuer of the contract
# """
# k = '{0}_{1}_{2}'.format(issuer, protocol, cost)
# self.contractSignals[k] = self.contractSignals[k]+1 if k in self.contractSignals else 1
#
# def addDemandSignal(self, issuer, protocol, cost):
# """
# :param cType: contract type
# :param issuer: the issuer of the contract
# """
# k = '{0}_{1}_{2}'.format(issuer, protocol, cost)
# self.demandSignals[k] = self.demandSignals[k]+1 if k in self.demandSignals else 1
def addThirdContract(self, sender, protocol, cost):
"""
:param cType: contract type
:param issuer: the issuer of the contract
"""
k = '{0}_{1}_{2}'.format(sender, protocol, cost)
self.thirdContract[k] = self.thirdContract[k]+1 if k in self.thirdContract else 1
def addThirdDemand(self, sender, protocol, cost):
"""
:param cType: contract type
:param issuer: the issuer of the contract
"""
k = '{0}_{1}_{2}'.format(sender, protocol, cost)
self.receivedDemand[k] = self.receivedDemand[k] + 1 if k in self.receivedDemand else 1
# def updateCost(self):
def addIssueDemand(self, receiver, protocol, cost):
k = '{0}_{1}_{2}'.format(receiver, protocol, cost)
self.issuedDemand[k] = self.issuedDemand[k] + 1 if k in self.issuedDemand else 1
def getthirdcontractsdemands(self):
return (self.thirdContract, self.receivedDemand)
def getCostRewards(self):
woncontracts = {}
# print self.receivedDemand
for k, v in self.receivedDemand.items():
g = re.search(r'.+_(\w+)_(\d+)', k).groups()
protocol = g[0]
cost = int(g[1])
if protocol in woncontracts:
for i in range(v):
woncontracts[protocol].append(cost)
else:
woncontracts[protocol] = []
for k in woncontracts:
woncontracts[k] = list2dict(woncontracts[k])
# print k
# print "The won count:", list2dict(woncontracts[k])
# print "Offer Count:", list2dict(self.costHistory[k])
# costrewards = {}
return woncontracts
|
|
#!/usr/bin/python
import numpy as np
from numpy import sin, cos, deg2rad, rad2deg
from numpy.linalg import norm
from kepler import julian_date, rvToElements, mu_sun, mu_earth
def dmsToDeg(dms):
"""Converts from deg:min:sec to decimal degree
:dms: list or tuple with degree, minute, second
:returns: angle in degrees
"""
dmsDeg = sum([abs(e)/60.**i for i, e in enumerate(dms)])
dmsDeg *= np.sign(dms[0])
return dmsDeg
def hmsToDeg(hms):
"""Converts RA in hour:min:sec to decimal degree
:hms: list or tuple with degree, minute, second
:returns: angle in degrees
"""
hmsToDecHour = sum([e/60.**i for i, e in enumerate(hms)])
hmsDeg = 360*hmsToDecHour/24
return hmsDeg
def __cosineVector__(ra, dec):
"""Generates matrix for transformation from perifocal
coordinate system to geocentric-equatorial coordinate
system.
Note: All angles MUST be in DEGREES
:i: inclination
:dec: longitude of ascending node
:ra: argument of periapsis
:returns: Transformation Matrix
"""
ra = deg2rad(ra)
dec = deg2rad(dec)
return np.array([cos(dec)*cos(ra), cos(dec)*sin(ra), sin(dec)])
def __celestialTransform__(e, juliandate):
"""Transforms unit position vector from ECI to HCI
:e: cosine position vector of celestial object
:juliandate: Julian date for transform
:returns: transformed cosine vector of celestial object
"""
# Julian centuries Terestial Time since J2000 TT
J2000 = 2451545.0
Jcentury = 36525.0
T = (juliandate-J2000)/Jcentury
eps = deg2rad(23.43929111 + (-46.8150*T - 0.00059*T**2 + 0.001813*T**3)/3600)
Rx = np.zeros((3,3))
Rx[0, 0] = 1.0
Rx[1, 1] = cos(eps)
Rx[1, 2] = -sin(eps)
Rx[2, 1] = sin(eps)
Rx[2, 2] = cos(eps)
e_new = Rx.dot(e)
return e_new
def __sectorToTriangle__(juliandates, rvec_a, rvec_b, ea, mu):
"""Determines the sector to triangle ratio from times and position vectors.
:juliandates: Julian dates of observations
:rvec_a: position vector for first observation
:rvec_b: position vector for second observation
:mu: mu for centric body (defaults to sun)
:returns: orbital elements for orbiting body
"""
tau = np.sqrt(mu)*np.diff(juliandates)[0]*24*3600
ra = norm(rvec_a)
rb = norm(rvec_b)
r0 = norm(rvec_b - rvec_b.dot(ea)*ea)
delta = 0.5*ra*r0
m = tau**2/np.sqrt(2*(ra*rb+rvec_a.dot(rvec_b)))**3
l = (ra+rb)/(2*np.sqrt(2*(ra*rb+rvec_a.dot(rvec_b)))) - 0.5
g = lambda w: 2*np.arcsin(np.sqrt(w))
W = lambda w: (2*g(w) - sin(2*g(w)))/sin(g(w))**3
f = lambda eta: 1 - eta + (m/eta**2)*W(m/eta**2-l)
eta0 = (12 + 10*np.sqrt(1+44*m/(9*(l+5./6.))))/22
eta1 = eta0 + 0.1
eta2 = eta0
err = 1
while abs(err) > 1e-10:
eta3 = eta2 - f(eta2)*(eta2-eta1)/(f(eta2)-f(eta1))
eta1, eta2 = eta2, eta3
err = eta2 - eta1
return eta3
def twoPositions(times, positions, mu=mu_earth):
"""Determines orbital elements from two position vectors using Gauss' method. Can
either be used for Earth orbiting satellite, in which case positions are from center
of Earth to surface observation location, or for heliocentric orbit, in which case
positions are from Sun to Earth at particular times.
:times: times for three observations in (YYYY, MM, DD, hh, mm, ss) format
:positions: position vectors from inertial reference frame to observation points
:mu: mu for centric body (defaults to sun)
:returns: position and velocity vectors for observations
"""
# Convert times from YYYY, MM, DD, hh:mm:ss to Julian date
juliandates = np.array([julian_date(*time) for time in times])
tau = np.sqrt(mu)*np.diff(juliandates)[0]*24*3600
rvec_a, rvec_b = [np.array(r) for r in positions]
ra = norm(rvec_a)
rb = norm(rvec_b)
ea = rvec_a/ra
rvec_0 = rvec_b - rvec_b.dot(ea)*ea
r0 = norm(rvec_0)
e0 = rvec_0/r0
# area of triangle from two position vectors
delta = 0.5*ra*r0
eta = __sectorToTriangle__(juliandates, rvec_a, rvec_b, ea, mu)
W = np.cross(ea, e0)
# argument of latitude
u = np.arctan2(rvec_a[-1], -np.cross(rvec_a, W)[-1])
# semi-latus rectum
p = (2*delta*eta/tau)**2
# eccentricity and true anomaly
e_cos_nu = p/ra - 1
e_sin_nu = ((p/ra - 1)*rvec_b.dot(ea)/rb - (p/rb - 1))/(r0/rb)
e = np.sqrt(e_cos_nu**2 + e_sin_nu**2)
nu = np.arctan2(e_sin_nu, e_cos_nu)
# inclination
i = np.arctan2(norm(W[:2]), W[2])
# eccentric anomaly and mean anomaly
E = np.arctan2(np.sqrt(1-e**2)*sin(nu), cos(nu)+e)
M = E-e*sin(E)
# right ascension of the ascending node
Omega = np.arctan2(W[0], -W[1])
# conversions from radians to degrees
i = rad2deg(i) % 360
Omega = rad2deg(Omega) % 360
nu = rad2deg(nu) % 360
u = rad2deg(u) % 360
E = rad2deg(E) % 360
M = rad2deg(M) % 360
# argument of perigee
omega = (u - nu) % 360
# semi-major axis
a = p/(1-e**2)
return a, e, i, Omega, omega, nu
def threeAngles(times, positions, angles, heliocentric=False):
"""Determines orbital elements from three separate observations using Gauss' method.
Can either be used for Earth orbiting satellite, in which case positions are from
center of Earth to surface observation location, or for heliocentric orbit, in which
case positions are from Sun to Earth at particular times.
:times: times for three observations in (YYYY, MM, DD, hh, mm, ss) format
:positions: distances from inertial reference frame to observation points
:angles: right ascension (hms) and declination (dms) for the observations
:heliocentric: flag to indicate heliocentric (True) or geocentric (False) orbit
:returns: position vectors for observations
"""
# Convert times from YYYY, MM, DD, hh:mm:ss to Julian date
juliandates = np.array([julian_date(*time) for time in times])
# Convert angles from HMS and DMS to decimal degree
angles = [(hmsToDeg(angle[0]), dmsToDeg(angle[1])) for angle in angles]
# Determine cosine position vector
e = [__cosineVector__(angle[0], angle[1]) for angle in angles]
if heliocentric:
# Transform to celestial coordinates
e = [__celestialTransform__(ei, jd) for ei, jd in zip(e, juliandates)]
mu = mu_sun
else:
mu = mu_earth
# Auxiliary unit vectors (Bucerius, 1950)
d = [np.cross(e[(i+1)%3], e[(i+2)%3]) for i in xrange(3)]
D0 = np.average([ei.dot(di) for ei, di in zip(e, d)])
D = np.array([[di.dot(Rj) for Rj in positions] for di in d])
# D = np.array([[di.dot(Rj) for di in d] for Rj in positions]).T()
tau = np.array([(juliandates[(i+2)%3]-juliandates[(i+1)%3]) /
(juliandates[-1]-juliandates[0]) for i in xrange(3)])
eta = np.ones(3)
errors = np.ones(3)
while max(abs(errors)) >1e-10:
eta_old = eta.copy()
n = eta[1]*tau/eta
rho = abs(D.dot(n)/(n*D0))
rhovec = [ri*ei for ri, ei in zip(rho, e)]
rvec = [R + rho for R, rho in zip(positions, rhovec)]
eta[0] = __sectorToTriangle__(juliandates[:2], rvec[0], rvec[1], e[0], mu)
eta[1] = __sectorToTriangle__((juliandates[1:]), rvec[1], rvec[2], e[1], mu)
eta[2] = __sectorToTriangle__(juliandates[[0, 2]], rvec[0], rvec[2], e[0], mu)
errors = eta - eta_old
return rvec
def main():
times = ((2016, 11, 28, 23, 25, 0),
(2016, 11, 29, 01, 05, 0),
(2016, 11, 29, 02, 50, 0))
RA = ((21, 31, 31),
(18, 22, 25),
(21, 24, 21))
dec = ((+44, 07, 40),
(+ 8, 22, 25),
(-24, 36, 11))
Re = 6378.137 # km
lam = -85.483127
phi = 32.605763
H = 200.601 # m
f = 1/298.256421867
lam = np.deg2rad(lam)
phi = np.deg2rad(phi)
xc = (Re/np.sqrt(1-(2*f-f**2)*sin(phi)**2) + H/1000)*cos(phi)
zc = (Re*(1-2*f-f**2)/np.sqrt(1-(2*f-f**2)*sin(phi)**2) + H//1000)*sin(phi)
J2000 = julian_date(2000, 1, 1, 12, 0, 0)
lams = [280.4606 + 360.9856473*(julian_date(*time)-J2000) for time in times]
positions = [(xc*cos(lam), xc*sin(lam), zc) for lam in lams]
print positions
angles = zip(RA, dec)
rvec = threeAngles(times, positions, angles)
print rvec
times = times[0], times[-1]
rvec = rvec[0], rvec[-1]
return twoPositions(times, rvec, mu_earth)
if __name__ == "__main__":
a, e, i, Omega, omega, nu = main()
# times = ((1999, 04, 02, 00, 30, 00.0),
# (1999, 04, 02, 03, 00, 00.0))
# positions = ((11959.978, -16289.478, -5963.827),
# (39863.390, -13730.547, -4862.350))
# a, e, i, Omega, omega, nu = twoPositions(times, positions, mu=mu_earth)
print 'a: ', a
print 'e: ', e
print 'i: ', i
print 'Omega: ', Omega
print 'omega: ', omega
print 'nu: ', nu
|
|
# Copyright (C) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import threading
import time
from unittest import skip
import fixtures
from nodepool import jobs
from nodepool import tests
from nodepool import nodedb
import nodepool.fakeprovider
import nodepool.nodepool
class TestNodepool(tests.DBTestCase):
log = logging.getLogger("nodepool.TestNodepool")
def test_db(self):
db = nodedb.NodeDatabase(self.dburi)
with db.getSession() as session:
session.getNodes()
def test_node(self):
"""Test that an image and node are created"""
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_disabled_label(self):
"""Test that an image and node are not created"""
configfile = self.setup_config('node_disabled_label.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
def test_node_net_name(self):
"""Test that a node is created with a net name"""
configfile = self.setup_config('node_net_name.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_node_vhd_image(self):
"""Test that a image and node are created vhd image"""
configfile = self.setup_config('node_vhd.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_node_vhd_and_qcow2(self):
"""Test label provided by vhd and qcow2 images builds"""
configfile = self.setup_config('node_vhd_and_qcow2.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider1', 'fake-image')
self.waitForImage('fake-provider2', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
def test_dib_upload_fail(self):
"""Test that an image upload failure is contained."""
configfile = self.setup_config('node_upload_fail.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider2', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
def test_subnodes(self):
"""Test that an image and node are created"""
configfile = self.setup_config('subnodes.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
nodes = session.getNodes(provider_name='fake-provider',
label_name='multi-fake',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.subnodes), 2)
for subnode in node.subnodes:
self.assertEqual(subnode.state, nodedb.READY)
def test_subnode_deletion_success(self):
"""Test that subnodes are deleted with parent node"""
configfile = self.setup_config('subnodes.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
subnode_ids = []
node_ids = []
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='multi-fake',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 2)
for node in nodes:
self.assertEqual(len(node.subnodes), 2)
for subnode in node.subnodes:
self.assertEqual(subnode.state, nodedb.READY)
subnode_ids.append(subnode.id)
node_ids.append(node.id)
for node_id in node_ids:
pool.deleteNode(node_id)
self.wait_for_threads()
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
for subnode_id in subnode_ids:
s = session.getSubNode(subnode_id)
self.assertIsNone(s)
def test_node_az(self):
"""Test that an image and node are created with az specified"""
configfile = self.setup_config('node_az.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].az, 'az1')
def test_node_ipv6(self):
"""Test that a node is created w/ or w/o ipv6 preferred flag"""
configfile = self.setup_config('node_ipv6.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider1', 'fake-image')
self.waitForImage('fake-provider2', 'fake-image')
self.waitForImage('fake-provider3', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
# ipv6 preferred set to true and ipv6 address available
nodes = session.getNodes(provider_name='fake-provider1',
label_name='fake-label1',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].ip, 'fake_v6')
# ipv6 preferred unspecified and ipv6 address available
nodes = session.getNodes(provider_name='fake-provider2',
label_name='fake-label2',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].ip, 'fake')
# ipv6 preferred set to true but ipv6 address unavailable
nodes = session.getNodes(provider_name='fake-provider3',
label_name='fake-label3',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].ip, 'fake')
def test_node_delete_success(self):
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
node_id = -1
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
node_id = nodes[0].id
pool.deleteNode(node_id)
self.wait_for_threads()
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
ready_nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
deleted_nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.DELETE)
# Make sure we have one node which is a new node
self.assertEqual(len(ready_nodes), 1)
self.assertNotEqual(node_id, ready_nodes[0].id)
# Make sure our old node was deleted
self.assertEqual(len(deleted_nodes), 0)
def test_node_delete_failure(self):
def fail_delete(self, name):
raise RuntimeError('Fake Error')
fake_delete = 'nodepool.fakeprovider.FakeJenkins.delete_node'
self.useFixture(fixtures.MonkeyPatch(fake_delete, fail_delete))
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
node_id = -1
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
node_id = nodes[0].id
pool.deleteNode(node_id)
self.wait_for_threads()
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
ready_nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
deleted_nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.DELETE)
# Make sure we have one node which is a new node
self.assertEqual(len(ready_nodes), 1)
self.assertNotEqual(node_id, ready_nodes[0].id)
# Make sure our old node is in delete state
self.assertEqual(len(deleted_nodes), 1)
self.assertEqual(node_id, deleted_nodes[0].id)
def test_leaked_node(self):
"""Test that a leaked node is deleted"""
configfile = self.setup_config('leaked_node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.log.debug("Waiting for initial pool...")
self.waitForNodes(pool)
self.log.debug("...done waiting for initial pool.")
# Make sure we have a node built and ready
provider = pool.config.providers['fake-provider']
manager = pool.getProviderManager(provider)
servers = manager.listServers()
self.assertEqual(len(servers), 1)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
# Delete the node from the db, but leave the instance
# so it is leaked.
self.log.debug("Delete node db record so instance is leaked...")
for node in nodes:
node.delete()
self.log.debug("...deleted node db so instance is leaked.")
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
# Wait for nodepool to replace it, which should be enough
# time for it to also delete the leaked node
self.log.debug("Waiting for replacement pool...")
self.waitForNodes(pool)
self.log.debug("...done waiting for replacement pool.")
# Make sure we end up with only one server (the replacement)
servers = manager.listServers()
self.assertEqual(len(servers), 1)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
@skip("Disabled for early v3 development")
def test_building_image_cleanup_on_start(self):
"""Test that a building image is deleted on start"""
configfile = self.setup_config('node.yaml')
pool = nodepool.nodepool.NodePool(self.secure_conf, configfile,
watermark_sleep=1)
try:
pool.start()
self.waitForImage(pool, 'fake-provider', 'fake-image')
self.waitForNodes(pool)
finally:
# Stop nodepool instance so that it can be restarted.
pool.stop()
with pool.getDB().getSession() as session:
images = session.getSnapshotImages()
self.assertEqual(len(images), 1)
self.assertEqual(images[0].state, nodedb.READY)
images[0].state = nodedb.BUILDING
# Start nodepool instance which should delete our old image.
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start()
# Ensure we have a config loaded for periodic cleanup.
while not pool.config:
time.sleep(0)
# Wait for startup to shift state to a state that periodic cleanup
# will act on.
while True:
with pool.getDB().getSession() as session:
if session.getSnapshotImages()[0].state != nodedb.BUILDING:
break
time.sleep(0)
# Necessary to force cleanup to happen within the test timeframe
pool.periodicCleanup()
self.waitForImage(pool, 'fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
images = session.getSnapshotImages()
self.assertEqual(len(images), 1)
self.assertEqual(images[0].state, nodedb.READY)
# should be second image built.
self.assertEqual(images[0].id, 2)
@skip("Disabled for early v3 development")
def test_building_dib_image_cleanup_on_start(self):
"""Test that a building dib image is deleted on start"""
configfile = self.setup_config('node.yaml')
pool = nodepool.nodepool.NodePool(self.secure_conf, configfile,
watermark_sleep=1)
self._useBuilder(configfile)
try:
pool.start()
self.waitForImage(pool, 'fake-provider', 'fake-image')
self.waitForNodes(pool)
finally:
# Stop nodepool instance so that it can be restarted.
pool.stop()
with pool.getDB().getSession() as session:
# We delete the snapshot image too to force a new dib image
# to be built so that a new image can be uploaded to replace
# the image that was in the snapshot table.
images = session.getSnapshotImages()
self.assertEqual(len(images), 1)
self.assertEqual(images[0].state, nodedb.READY)
images[0].state = nodedb.BUILDING
images = session.getDibImages()
self.assertEqual(len(images), 1)
self.assertEqual(images[0].state, nodedb.READY)
images[0].state = nodedb.BUILDING
# Start nodepool instance which should delete our old image.
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start()
# Ensure we have a config loaded for periodic cleanup.
while not pool.config:
time.sleep(0)
# Wait for startup to shift state to a state that periodic cleanup
# will act on.
while True:
with pool.getDB().getSession() as session:
if session.getDibImages()[0].state != nodedb.BUILDING:
break
time.sleep(0)
# Necessary to force cleanup to happen within the test timeframe
pool.periodicCleanup()
self.waitForImage(pool, 'fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
images = session.getDibImages()
self.assertEqual(len(images), 1)
self.assertEqual(images[0].state, nodedb.READY)
# should be second image built.
self.assertEqual(images[0].id, 2)
def test_job_start_event(self):
"""Test that job start marks node used"""
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
msg_obj = {'name': 'fake-job',
'build': {'node_name': 'fake-label-fake-provider-1'}}
json_string = json.dumps(msg_obj)
handler = nodepool.nodepool.NodeUpdateListener(pool,
'tcp://localhost:8881')
handler.handleEvent('onStarted', json_string)
self.wait_for_threads()
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.USED)
self.assertEqual(len(nodes), 1)
def test_job_end_event(self):
"""Test that job end marks node delete"""
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
msg_obj = {'name': 'fake-job',
'build': {'node_name': 'fake-label-fake-provider-1',
'status': 'SUCCESS'}}
json_string = json.dumps(msg_obj)
# Don't delay when deleting.
self.useFixture(fixtures.MonkeyPatch(
'nodepool.nodepool.DELETE_DELAY',
0))
handler = nodepool.nodepool.NodeUpdateListener(pool,
'tcp://localhost:8881')
handler.handleEvent('onFinalized', json_string)
self.wait_for_threads()
with pool.getDB().getSession() as session:
node = session.getNode(1)
self.assertEqual(node, None)
def _test_job_auto_hold(self, result):
configfile = self.setup_config('node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
session.createJob('fake-job', hold_on_failure=1)
msg_obj = {'name': 'fake-job',
'build': {'node_name': 'fake-label-fake-provider-1',
'status': result}}
json_string = json.dumps(msg_obj)
# Don't delay when deleting.
self.useFixture(fixtures.MonkeyPatch(
'nodepool.nodepool.DELETE_DELAY',
0))
handler = nodepool.nodepool.NodeUpdateListener(pool,
'tcp://localhost:8881')
handler.handleEvent('onFinalized', json_string)
self.wait_for_threads()
return pool
def test_job_auto_hold_success(self):
"""Test that a successful job does not hold a node"""
pool = self._test_job_auto_hold('SUCCESS')
with pool.getDB().getSession() as session:
node = session.getNode(1)
self.assertIsNone(node)
def test_job_auto_hold_failure(self):
"""Test that a failed job automatically holds a node"""
pool = self._test_job_auto_hold('FAILURE')
with pool.getDB().getSession() as session:
node = session.getNode(1)
self.assertEqual(node.state, nodedb.HOLD)
def test_job_auto_hold_failure_max(self):
"""Test that a failed job automatically holds only one node"""
pool = self._test_job_auto_hold('FAILURE')
with pool.getDB().getSession() as session:
node = session.getNode(1)
self.assertEqual(node.state, nodedb.HOLD)
# Wait for a replacement node
self.waitForNodes(pool)
with pool.getDB().getSession() as session:
node = session.getNode(2)
self.assertEqual(node.state, nodedb.READY)
# Fail the job again
msg_obj = {'name': 'fake-job',
'build': {'node_name': 'fake-label-fake-provider-2',
'status': 'FAILURE'}}
json_string = json.dumps(msg_obj)
handler = nodepool.nodepool.NodeUpdateListener(pool,
'tcp://localhost:8881')
handler.handleEvent('onFinalized', json_string)
self.wait_for_threads()
# Ensure that the second node was deleted
with pool.getDB().getSession() as session:
node = session.getNode(2)
self.assertEqual(node, None)
class TestGearClient(tests.DBTestCase):
def test_wait_for_completion(self):
wj = jobs.WatchableJob('test', 'test', 'test')
def call_on_completed():
time.sleep(.2)
wj.onCompleted()
t = threading.Thread(target=call_on_completed)
t.start()
wj.waitForCompletion()
def test_handle_disconnect(self):
class MyJob(jobs.WatchableJob):
def __init__(self, *args, **kwargs):
super(MyJob, self).__init__(*args, **kwargs)
self.disconnect_called = False
def onDisconnect(self):
self.disconnect_called = True
super(MyJob, self).onDisconnect()
client = nodepool.nodepool.GearmanClient()
client.addServer('localhost', self.gearman_server.port)
client.waitForServer()
job = MyJob('test-job', '', '')
client.submitJob(job)
self.gearman_server.shutdown()
job.waitForCompletion()
self.assertEqual(job.disconnect_called, True)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
import traceback
import uuid
import datetime
from jsonschema import exceptions as json_schema_exceptions
from st2actions.runners import ActionRunner
from st2common import log as logging
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_TIMED_OUT
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2common.constants.action import LIVEACTION_STATUS_CANCELED
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.action import LIVEACTION_FAILED_STATES
from st2common.constants.keyvalue import SYSTEM_SCOPE
from st2common.content.loader import MetaLoader
from st2common.exceptions.action import (ParameterRenderingFailedException,
InvalidActionReferencedException)
from st2common.exceptions import actionrunner as runnerexceptions
from st2common.models.api.notification import NotificationsHelper
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.system import actionchain
from st2common.models.utils import action_param_utils
from st2common.persistence.execution import ActionExecution
from st2common.services import action as action_service
from st2common.services.keyvalues import KeyValueLookup
from st2common.util import action_db as action_db_util
from st2common.util import isotime
from st2common.util import date as date_utils
from st2common.util import jinja as jinja_utils
LOG = logging.getLogger(__name__)
RESULTS_KEY = '__results'
JINJA_START_MARKERS = [
'{{',
'{%'
]
PUBLISHED_VARS_KEY = 'published'
class ChainHolder(object):
def __init__(self, chainspec, chainname):
self.actionchain = actionchain.ActionChain(**chainspec)
self.chainname = chainname
if not self.actionchain.default:
default = self._get_default(self.actionchain)
self.actionchain.default = default
LOG.debug('Using %s as default for %s.', self.actionchain.default, self.chainname)
if not self.actionchain.default:
raise Exception('Failed to find default node in %s.' % (self.chainname))
self.vars = {}
def init_vars(self, action_parameters):
if self.actionchain.vars:
self.vars = self._get_rendered_vars(self.actionchain.vars,
action_parameters=action_parameters)
def validate(self):
"""
Function which performs a simple compile time validation.
Keep in mind that some variables are only resolved during run time which means we can
perform only simple validation during compile / create time.
"""
all_nodes = self._get_all_nodes(action_chain=self.actionchain)
for node in self.actionchain.chain:
on_success_node_name = node.on_success
on_failure_node_name = node.on_failure
# Check "on-success" path
valid_name = self._is_valid_node_name(all_node_names=all_nodes,
node_name=on_success_node_name)
if not valid_name:
msg = ('Unable to find node with name "%s" referenced in "on-success" in '
'task "%s".' % (on_success_node_name, node.name))
raise ValueError(msg)
# Check "on-failure" path
valid_name = self._is_valid_node_name(all_node_names=all_nodes,
node_name=on_failure_node_name)
if not valid_name:
msg = ('Unable to find node with name "%s" referenced in "on-failure" in '
'task "%s".' % (on_failure_node_name, node.name))
raise ValueError(msg)
# check if node specified in default is valid.
if self.actionchain.default:
valid_name = self._is_valid_node_name(all_node_names=all_nodes,
node_name=self.actionchain.default)
if not valid_name:
msg = ('Unable to find node with name "%s" referenced in "default".' %
self.actionchain.default)
raise ValueError(msg)
return True
@staticmethod
def _get_default(action_chain):
# default is defined
if action_chain.default:
return action_chain.default
# no nodes in chain
if not action_chain.chain:
return None
# The first node with no references is the default node. Assumptions
# that support this are :
# 1. There are no loops in the chain. Even if there are loops there is
# at least 1 node which does not end up in this loop.
# 2. There are no fragments in the chain.
all_nodes = ChainHolder._get_all_nodes(action_chain=action_chain)
node_names = set(all_nodes)
on_success_nodes = ChainHolder._get_all_on_success_nodes(action_chain=action_chain)
on_failure_nodes = ChainHolder._get_all_on_failure_nodes(action_chain=action_chain)
referenced_nodes = on_success_nodes | on_failure_nodes
possible_default_nodes = node_names - referenced_nodes
if possible_default_nodes:
# This is to preserve order. set([..]) does not preserve the order so iterate
# over original array.
for node in all_nodes:
if node in possible_default_nodes:
return node
# If no node is found assume the first node in the chain list to be default.
return action_chain.chain[0].name
@staticmethod
def _get_all_nodes(action_chain):
"""
Return names for all the nodes in the chain.
"""
all_nodes = [node.name for node in action_chain.chain]
return all_nodes
@staticmethod
def _get_all_on_success_nodes(action_chain):
"""
Return names for all the tasks referenced in "on-success".
"""
on_success_nodes = set([node.on_success for node in action_chain.chain])
return on_success_nodes
@staticmethod
def _get_all_on_failure_nodes(action_chain):
"""
Return names for all the tasks referenced in "on-failure".
"""
on_failure_nodes = set([node.on_failure for node in action_chain.chain])
return on_failure_nodes
def _is_valid_node_name(self, all_node_names, node_name):
"""
Function which validates that the provided node name is defined in the workflow definition
and it's valid.
Keep in mind that we can only perform validation for task names which don't include jinja
expressions since those are rendered at run time.
"""
if not node_name:
# This task name needs to be resolved during run time so we cant validate the name now
return True
is_jinja_expression = jinja_utils.is_jinja_expression(value=node_name)
if is_jinja_expression:
# This task name needs to be resolved during run time so we cant validate the name
# now
return True
return node_name in all_node_names
@staticmethod
def _get_rendered_vars(vars, action_parameters):
if not vars:
return {}
context = {SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)}
context.update(action_parameters)
return jinja_utils.render_values(mapping=vars, context=context)
def get_node(self, node_name=None, raise_on_failure=False):
if not node_name:
return None
for node in self.actionchain.chain:
if node.name == node_name:
return node
if raise_on_failure:
raise runnerexceptions.ActionRunnerException('Unable to find node with name "%s".' %
(node_name))
return None
def get_next_node(self, curr_node_name=None, condition='on-success'):
if not curr_node_name:
return self.get_node(self.actionchain.default)
current_node = self.get_node(curr_node_name)
if condition == 'on-success':
return self.get_node(current_node.on_success, raise_on_failure=True)
elif condition == 'on-failure':
return self.get_node(current_node.on_failure, raise_on_failure=True)
raise runnerexceptions.ActionRunnerException('Unknown condition %s.' % condition)
class ActionChainRunner(ActionRunner):
def __init__(self, runner_id):
super(ActionChainRunner, self).__init__(runner_id=runner_id)
self.chain_holder = None
self._meta_loader = MetaLoader()
self._stopped = False
self._skip_notify_tasks = []
self._display_published = False
self._chain_notify = None
def pre_run(self):
super(ActionChainRunner, self).pre_run()
chainspec_file = self.entry_point
LOG.debug('Reading action chain from %s for action %s.', chainspec_file,
self.action)
try:
chainspec = self._meta_loader.load(file_path=chainspec_file,
expected_type=dict)
except Exception as e:
message = ('Failed to parse action chain definition from "%s": %s' %
(chainspec_file, str(e)))
LOG.exception('Failed to load action chain definition.')
raise runnerexceptions.ActionRunnerPreRunError(message)
try:
self.chain_holder = ChainHolder(chainspec, self.action_name)
except json_schema_exceptions.ValidationError as e:
# preserve the whole nasty jsonschema message as that is better to get to the
# root cause
message = str(e)
LOG.exception('Failed to instantiate ActionChain.')
raise runnerexceptions.ActionRunnerPreRunError(message)
except Exception as e:
message = e.message or str(e)
LOG.exception('Failed to instantiate ActionChain.')
raise runnerexceptions.ActionRunnerPreRunError(message)
# Runner attributes are set lazily. So these steps
# should happen outside the constructor.
if getattr(self, 'liveaction', None):
self._chain_notify = getattr(self.liveaction, 'notify', None)
if self.runner_parameters:
self._skip_notify_tasks = self.runner_parameters.get('skip_notify', [])
self._display_published = self.runner_parameters.get('display_published', False)
# Perform some pre-run chain validation
try:
self.chain_holder.validate()
except Exception as e:
raise runnerexceptions.ActionRunnerPreRunError(e.message)
def run(self, action_parameters):
# holds final result we store.
result = {'tasks': []}
# published variables are to be stored for display.
if self._display_published:
result[PUBLISHED_VARS_KEY] = {}
context_result = {} # holds result which is used for the template context purposes
top_level_error = None # stores a reference to a top level error
fail = True
action_node = None
try:
# initialize vars once we have the action_parameters. This allows
# vars to refer to action_parameters.
self.chain_holder.init_vars(action_parameters)
action_node = self.chain_holder.get_next_node()
except Exception as e:
LOG.exception('Failed to get starting node "%s".', action_node.name)
error = ('Failed to get starting node "%s". Lookup failed: %s' %
(action_node.name, str(e)))
trace = traceback.format_exc(10)
top_level_error = {
'error': error,
'traceback': trace
}
parent_context = {
'execution_id': self.execution_id
}
if getattr(self.liveaction, 'context', None):
parent_context.update(self.liveaction.context)
while action_node:
fail = False
timeout = False
error = None
liveaction = None
created_at = date_utils.get_datetime_utc_now()
try:
liveaction = self._get_next_action(
action_node=action_node, parent_context=parent_context,
action_params=action_parameters, context_result=context_result)
except InvalidActionReferencedException as e:
error = ('Failed to run task "%s". Action with reference "%s" doesn\'t exist.' %
(action_node.name, action_node.ref))
LOG.exception(error)
fail = True
top_level_error = {
'error': error,
'traceback': traceback.format_exc(10)
}
break
except ParameterRenderingFailedException as e:
# Rendering parameters failed before we even got to running this action, abort and
# fail the whole action chain
LOG.exception('Failed to run action "%s".', action_node.name)
fail = True
error = ('Failed to run task "%s". Parameter rendering failed: %s' %
(action_node.name, str(e)))
trace = traceback.format_exc(10)
top_level_error = {
'error': error,
'traceback': trace
}
break
try:
liveaction = self._run_action(liveaction)
except Exception as e:
# Save the traceback and error message
LOG.exception('Failure in running action "%s".', action_node.name)
error = {
'error': 'Task "%s" failed: %s' % (action_node.name, str(e)),
'traceback': traceback.format_exc(10)
}
context_result[action_node.name] = error
else:
# Update context result
context_result[action_node.name] = liveaction.result
# Render and publish variables
rendered_publish_vars = ActionChainRunner._render_publish_vars(
action_node=action_node, action_parameters=action_parameters,
execution_result=liveaction.result, previous_execution_results=context_result,
chain_vars=self.chain_holder.vars)
if rendered_publish_vars:
self.chain_holder.vars.update(rendered_publish_vars)
if self._display_published:
result[PUBLISHED_VARS_KEY].update(rendered_publish_vars)
finally:
# Record result and resolve a next node based on the task success or failure
updated_at = date_utils.get_datetime_utc_now()
format_kwargs = {'action_node': action_node, 'liveaction_db': liveaction,
'created_at': created_at, 'updated_at': updated_at}
if error:
format_kwargs['error'] = error
task_result = self._format_action_exec_result(**format_kwargs)
result['tasks'].append(task_result)
if self.liveaction_id:
self._stopped = action_service.is_action_canceled_or_canceling(
self.liveaction_id)
if self._stopped:
LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
status = LIVEACTION_STATUS_CANCELED
return (status, result, None)
try:
if not liveaction:
fail = True
action_node = self.chain_holder.get_next_node(action_node.name,
condition='on-failure')
elif liveaction.status in LIVEACTION_FAILED_STATES:
if liveaction and liveaction.status == LIVEACTION_STATUS_TIMED_OUT:
timeout = True
else:
fail = True
action_node = self.chain_holder.get_next_node(action_node.name,
condition='on-failure')
elif liveaction.status == LIVEACTION_STATUS_CANCELED:
# User canceled an action (task) in the workflow - cancel the execution of
# rest of the workflow
self._stopped = True
LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
elif liveaction.status == LIVEACTION_STATUS_SUCCEEDED:
action_node = self.chain_holder.get_next_node(action_node.name,
condition='on-success')
except Exception as e:
LOG.exception('Failed to get next node "%s".', action_node.name)
fail = True
error = ('Failed to get next node "%s". Lookup failed: %s' %
(action_node.name, str(e)))
trace = traceback.format_exc(10)
top_level_error = {
'error': error,
'traceback': trace
}
# reset action_node here so that chain breaks on failure.
action_node = None
break
if self._stopped:
LOG.info('Chain execution (%s) canceled by user.', self.liveaction_id)
status = LIVEACTION_STATUS_CANCELED
return (status, result, None)
if fail:
status = LIVEACTION_STATUS_FAILED
elif timeout:
status = LIVEACTION_STATUS_TIMED_OUT
else:
status = LIVEACTION_STATUS_SUCCEEDED
if top_level_error:
# Include top level error information
result['error'] = top_level_error['error']
result['traceback'] = top_level_error['traceback']
return (status, result, None)
@staticmethod
def _render_publish_vars(action_node, action_parameters, execution_result,
previous_execution_results, chain_vars):
"""
If no output is specified on the action_node the output is the entire execution_result.
If any output is specified then only those variables are published as output of an
execution of this action_node.
The output variable can refer to a variable from the execution_result,
previous_execution_results or chain_vars.
"""
if not action_node.publish:
return {}
context = {}
context.update(action_parameters)
context.update({action_node.name: execution_result})
context.update(previous_execution_results)
context.update(chain_vars)
context.update({RESULTS_KEY: previous_execution_results})
context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
try:
rendered_result = jinja_utils.render_values(mapping=action_node.publish,
context=context)
except Exception as e:
key = getattr(e, 'key', None)
value = getattr(e, 'value', None)
msg = ('Failed rendering value for publish parameter "%s" in task "%s" '
'(template string=%s): %s' % (key, action_node.name, value, str(e)))
raise ParameterRenderingFailedException(msg)
return rendered_result
@staticmethod
def _resolve_params(action_node, original_parameters, results, chain_vars, chain_context):
# setup context with original parameters and the intermediate results.
context = {}
context.update(original_parameters)
context.update(results)
context.update(chain_vars)
context.update({RESULTS_KEY: results})
context.update({SYSTEM_SCOPE: KeyValueLookup(scope=SYSTEM_SCOPE)})
context.update({ACTION_CONTEXT_KV_PREFIX: chain_context})
try:
rendered_params = jinja_utils.render_values(mapping=action_node.get_parameters(),
context=context)
except Exception as e:
LOG.exception('Jinja rendering for parameter "%s" failed.' % (e.key))
key = getattr(e, 'key', None)
value = getattr(e, 'value', None)
msg = ('Failed rendering value for action parameter "%s" in task "%s" '
'(template string=%s): %s') % (key, action_node.name, value, str(e))
raise ParameterRenderingFailedException(msg)
LOG.debug('Rendered params: %s: Type: %s', rendered_params, type(rendered_params))
return rendered_params
def _get_next_action(self, action_node, parent_context, action_params, context_result):
# Verify that the referenced action exists
# TODO: We do another lookup in cast_param, refactor to reduce number of lookups
task_name = action_node.name
action_ref = action_node.ref
action_db = action_db_util.get_action_by_ref(ref=action_ref)
if not action_db:
error = 'Task :: %s - Action with ref %s not registered.' % (task_name, action_ref)
raise InvalidActionReferencedException(error)
resolved_params = ActionChainRunner._resolve_params(
action_node=action_node, original_parameters=action_params,
results=context_result, chain_vars=self.chain_holder.vars,
chain_context={'parent': parent_context})
liveaction = self._build_liveaction_object(
action_node=action_node,
resolved_params=resolved_params,
parent_context=parent_context)
return liveaction
def _run_action(self, liveaction, wait_for_completion=True, sleep_delay=1.0):
"""
:param sleep_delay: Number of seconds to wait during "is completed" polls.
:type sleep_delay: ``float``
"""
try:
# request return canceled
liveaction, _ = action_service.request(liveaction)
except Exception as e:
liveaction.status = LIVEACTION_STATUS_FAILED
LOG.exception('Failed to schedule liveaction.')
raise e
while (wait_for_completion and liveaction.status not in LIVEACTION_COMPLETED_STATES):
eventlet.sleep(sleep_delay)
liveaction = action_db_util.get_liveaction_by_id(liveaction.id)
return liveaction
def _build_liveaction_object(self, action_node, resolved_params, parent_context):
liveaction = LiveActionDB(action=action_node.ref)
# Setup notify for task in chain.
notify = self._get_notify(action_node)
if notify:
liveaction.notify = notify
LOG.debug('%s: Task notify set to: %s', action_node.name, liveaction.notify)
liveaction.context = {
'parent': parent_context,
'chain': vars(action_node)
}
liveaction.parameters = action_param_utils.cast_params(action_ref=action_node.ref,
params=resolved_params)
return liveaction
def _get_notify(self, action_node):
if action_node.name not in self._skip_notify_tasks:
if action_node.notify:
task_notify = NotificationsHelper.to_model(action_node.notify)
return task_notify
elif self._chain_notify:
return self._chain_notify
return None
def _format_action_exec_result(self, action_node, liveaction_db, created_at, updated_at,
error=None):
"""
Format ActionExecution result so it can be used in the final action result output.
:rtype: ``dict``
"""
assert isinstance(created_at, datetime.datetime)
assert isinstance(updated_at, datetime.datetime)
result = {}
execution_db = None
if liveaction_db:
execution_db = ActionExecution.get(liveaction__id=str(liveaction_db.id))
result['id'] = action_node.name
result['name'] = action_node.name
result['execution_id'] = str(execution_db.id) if execution_db else None
result['workflow'] = None
result['created_at'] = isotime.format(dt=created_at)
result['updated_at'] = isotime.format(dt=updated_at)
if error or not liveaction_db:
result['state'] = LIVEACTION_STATUS_FAILED
else:
result['state'] = liveaction_db.status
if error:
result['result'] = error
else:
result['result'] = liveaction_db.result
return result
def get_runner():
return ActionChainRunner(str(uuid.uuid4()))
|
|
from __future__ import absolute_import, division, print_function
import math
import itertools
import operator
import pytest
from datetime import datetime, date
import datashape
from collections import Iterator, Iterable
import blaze
from blaze.compute.python import (nunique, mean, rrowfunc, rowfunc,
reduce_by_funcs, optimize)
from blaze import dshape
from blaze.compute.core import compute, compute_up, pre_compute
from blaze.expr import (symbol, by, merge, join, count, distinct,
Apply, sum, min, max, any, summary,
count, std, head, transform)
import numpy as np
from blaze import cos, sin
from blaze.compatibility import builtins
from blaze.utils import raises
t = symbol('t', 'var * {name: string, amount: int, id: int}')
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
tbig = symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
def test_dispatched_rowfunc():
cw = optimize(t['amount'] + 100, [])
assert rowfunc(t)(t) == t
assert rowfunc(cw)(('Alice', 100, 1)) == 200
def test_reduce_by_funcs():
e = summary(number=t.id.max(), sum=t.amount.sum())
b = by(t, e)
assert reduce_by_funcs(b)[2]([1,2,3], [4,5,6]) == (1, 7)
def test_symbol():
assert compute(t, data) == data
def test_projection():
assert list(compute(t['name'], data)) == [x[0] for x in data]
def test_eq():
assert list(compute(t['amount'] == 100, data)) == [x[1] == 100 for x in data]
def test_selection():
assert list(compute(t[t['amount'] == 0], data)) == \
[x for x in data if x[1] == 0]
assert list(compute(t[t['amount'] > 150], data)) == \
[x for x in data if x[1] > 150]
def test_arithmetic():
assert list(compute(t['amount'] + t['id'], data)) == \
[b + c for a, b, c, in data]
assert list(compute(t['amount'] * t['id'], data)) == \
[b * c for a, b, c, in data]
assert list(compute(t['amount'] % t['id'], data)) == \
[b % c for a, b, c, in data]
def test_unary_ops():
for op in ('cos', 'sin', 'exp', 'ceil', 'floor', 'trunc', 'isnan'):
f = getattr(blaze, op)
pyf = getattr(math, op)
result = list(compute(f(t['amount']), data))
assert result == [pyf(x[1]) for x in data]
def test_neg():
expr = optimize(-t.amount, [])
assert list(compute(expr, data)) == [-x[1] for x in data]
def test_reductions():
assert compute(sum(t['amount']), data) == 100 + 200 + 50
assert compute(min(t['amount']), data) == 50
assert compute(max(t['amount']), data) == 200
assert compute(nunique(t['amount']), data) == 3
assert compute(nunique(t['name']), data) == 2
assert compute(count(t['amount']), data) == 3
assert compute(any(t['amount'] > 150), data) is True
assert compute(any(t['amount'] > 250), data) is False
assert compute(t.amount[0], data) == 100
assert compute(t.amount[-1], data) == 50
def test_1d_reductions_keepdims():
for r in [sum, min, max, nunique, count]:
assert compute(r(t.amount, keepdims=True), data) == \
(compute(r(t.amount), data),)
def test_count():
t = symbol('t', '3 * int')
assert compute(t.count(), [1, None, 2]) == 2
def reduction_runner(funcs):
from blaze.compatibility import builtins as bts
exprs = sum, min, max
for blaze_expr, py_func in itertools.product(exprs, funcs):
f = getattr(operator, py_func)
reduc_f = getattr(bts, blaze_expr.__name__)
ground_truth = f(reduc_f([100, 200, 50]), 5)
assert compute(f(blaze_expr(t['amount']), 5), data) == ground_truth
def test_reduction_arithmetic():
funcs = 'add', 'mul'
reduction_runner(funcs)
def test_reduction_compare():
funcs = 'eq', 'ne', 'lt', 'gt', 'le', 'ge'
reduction_runner(funcs)
def test_mean():
assert compute(mean(t['amount']), data) == float(100 + 200 + 50) / 3
assert 50 < compute(std(t['amount']), data) < 100
def test_std():
amt = [row[1] for row in data]
assert np.allclose(compute(t.amount.std(), data), np.std(amt))
assert np.allclose(compute(t.amount.std(unbiased=True), data),
np.std(amt, ddof=1))
assert np.allclose(compute(t.amount.var(), data), np.var(amt))
assert np.allclose(compute(t.amount.var(unbiased=True), data),
np.var(amt, ddof=1))
def test_by_no_grouper():
names = t['name']
assert set(compute(by(names, count=names.count()), data)) == \
set([('Alice', 2), ('Bob', 1)])
def test_by_one():
print(compute(by(t['name'], total=t['amount'].sum()), data))
assert set(compute(by(t['name'], total=t['amount'].sum()), data)) == \
set([('Alice', 150), ('Bob', 200)])
def test_by_compound_apply():
print(compute(by(t['name'], total=(t['amount'] + 1).sum()), data))
assert set(compute(by(t['name'], total=(t['amount'] + 1).sum()), data)) == \
set([('Alice', 152), ('Bob', 201)])
def test_by_two():
result = compute(by(tbig[['name', 'sex']], total=tbig['amount'].sum()),
databig)
expected = [('Alice', 'F', 200),
('Drew', 'F', 100),
('Drew', 'M', 300)]
print(set(result))
assert set(result) == set(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
databig)
expected = [('Alice', 'F', 204),
('Drew', 'F', 104),
('Drew', 'M', 310)]
print(result)
assert set(result) == set(expected)
def test_works_on_generators():
assert list(compute(t['amount'], iter(data))) == \
[x[1] for x in data]
assert list(compute(t['amount'], (i for i in data))) == \
[x[1] for x in data]
def test_join():
left = [['Alice', 100], ['Bob', 200]]
right = [['Alice', 1], ['Bob', 2]]
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
assert dshape(joined.schema) == \
dshape('{name: string, amount: int, id: int}')
result = list(compute(joined, {L: left, R: right}))
expected = [('Alice', 100, 1), ('Bob', 200, 2)]
assert result == expected
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='outer'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
print(list(compute(j, {L: left, R: right})))
assert list(compute(j, {L: left, R: right})) == [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
@pytest.mark.xfail(reason="This doesn't necessarily make sense")
def test_column_of_column():
assert list(compute(t['name']['name'], data)) == \
list(compute(t['name'], data))
def test_distinct():
assert set(compute(distinct(t['name']), data)) == set(['Alice', 'Bob'])
assert set(compute(distinct(t), data)) == set(map(tuple, data))
e = distinct(t)
assert list(compute(e, [])) == []
def test_distinct_count():
t2 = t['name'].distinct()
gby = by(t2, total=t2.count())
result = set(compute(gby, data))
assert result == set([('Alice', 1), ('Bob', 1)])
def test_sort():
assert list(compute(t.sort('amount'), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort('amount', ascending=True), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort(['amount', 'id']), data)) == \
sorted(data, key=lambda x: (x[1], x[2]), reverse=False)
def test_fancy_sort():
assert list(compute(t.sort(t['amount']), data)) ==\
list(compute(t.sort('amount'), data))
assert list(compute(t.sort(t[['amount', 'id']]), data)) ==\
list(compute(t.sort(['amount', 'id']), data))
assert list(compute(t.sort(0-t['amount']), data)) ==\
list(compute(t.sort('amount'), data))[::-1]
def test_sort_on_column():
assert list(compute(t.name.distinct().sort('name'), data)) == \
['Alice', 'Bob']
def test_head():
assert list(compute(t.head(1), data)) == [data[0]]
e = head(t, 101)
p = list(range(1000))
assert len(list(compute(e, p))) == 101
def test_graph_double_join():
idx = [['A', 1],
['B', 2],
['C', 3],
['D', 4],
['E', 5],
['F', 6]]
arc = [[1, 3],
[2, 3],
[4, 3],
[5, 3],
[3, 1],
[2, 1],
[5, 1],
[1, 6],
[2, 6],
[4, 6]]
wanted = [['A'],
['F']]
t_idx = symbol('t_idx', 'var * {name: string, b: int32}')
t_arc = symbol('t_arc', 'var * {a: int32, b: int32}')
t_wanted = symbol('t_wanted', 'var * {name: string}')
j = join(join(t_idx, t_arc, 'b'), t_wanted, 'name')[['name', 'b', 'a']]
result = compute(j, {t_idx: idx, t_arc: arc, t_wanted: wanted})
result = sorted(map(tuple, result))
expected = sorted([('A', 3, 1),
('A', 2, 1),
('A', 5, 1),
('F', 1, 6),
('F', 2, 6),
('F', 4, 6)])
assert result == expected
def test_label():
assert list(compute((t['amount'] * 1).label('foo'), data)) == \
list(compute((t['amount'] * 1), data))
def test_relabel_join():
names = symbol('names', 'var * {first: string, last: string}')
siblings = join(names.relabel({'first': 'left'}),
names.relabel({'first': 'right'}),
'last')[['left', 'right']]
data = [('Alice', 'Smith'),
('Bob', 'Jones'),
('Charlie', 'Smith')]
print(set(compute(siblings, {names: data})))
assert ('Alice', 'Charlie') in set(compute(siblings, {names: data}))
assert ('Alice', 'Bob') not in set(compute(siblings, {names: data}))
def test_map_column():
inc = lambda x: x + 1
assert list(compute(t['amount'].map(inc, 'int'), data)) == [x[1] + 1 for x in data]
def test_map():
assert (list(compute(t.map(lambda tup: tup[1] + tup[2], 'int'), data)) ==
[x[1] + x[2] for x in data])
def test_apply_column():
result = compute(t.amount.apply(builtins.sum, 'real'), data)
expected = compute(t.amount.sum(), data)
assert result == expected
def test_apply():
data2 = tuple(map(tuple, data))
assert compute(t.apply(hash, 'int'), data2) == hash(data2)
def test_map_datetime():
from datetime import datetime
data = [['A', 0], ['B', 1]]
t = symbol('t', 'var * {foo: string, datetime: int64}')
result = list(compute(t['datetime'].map(datetime.utcfromtimestamp,
'datetime'), data))
expected = [datetime(1970, 1, 1, 0, 0, 0), datetime(1970, 1, 1, 0, 0, 1)]
assert result == expected
def test_by_multi_column_grouper():
t = symbol('t', 'var * {x: int, y: int, z: int}')
expr = by(t[['x', 'y']], total=t['z'].count())
data = [(1, 2, 0), (1, 2, 0), (1, 1, 0)]
print(set(compute(expr, data)))
assert set(compute(expr, data)) == set([(1, 2, 2), (1, 1, 1)])
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert list(compute(expr, data)) == [(row[0], row[1] * 2) for row in data]
def test_transform():
expr = transform(t, x=t.amount / t.id)
assert list(compute(expr, data)) == [('Alice', 100, 1, 100),
('Bob', 200, 2, 100),
('Alice', 50, 3, 50 / 3)]
def test_map_columnwise():
colwise = t['amount'] * t['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data]
def test_map_columnwise_of_selection():
tsel = t[t['name'] == 'Alice']
colwise = tsel['amount'] * tsel['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data[::2]]
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
assert list(compute(expr, data)) == ['Alice']
def test_recursive_rowfunc():
f = rrowfunc(t['name'], t)
assert [f(row) for row in data] == [row[0] for row in data]
expr = optimize(t['amount'] + t['id'], [])
f = rrowfunc(expr, t)
assert [f(row) for row in data] == [row[1] + row[2] for row in data]
assert raises(Exception, lambda: rrowfunc(t[t['amount'] < 0]['name'], t))
def test_recursive_rowfunc_is_used():
expr = by(t['name'], total=(2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2*(101 + 53)),
('Bob', 2*(202))]
assert set(compute(expr, data)) == set(expected)
class TestFunctionExpressions(object):
def test_compound(self):
s = t.amount.mean()
r = compute(s, data)
assert isinstance(r, float)
expr = cos(s) ** 2 + sin(s) ** 2
result = compute(expr, data)
expected = math.cos(r) ** 2 + math.sin(r) ** 2
assert result == expected
def test_user_defined_function(self):
s = t.amount.count()
r = compute(s, data)
assert isinstance(r, int)
def myfunc(x):
return (cos(x) + sin(x)) ** 2 / math.pi
result = compute(myfunc(s), data)
expected = (math.cos(r) + math.sin(r)) ** 2 / math.pi
assert result == expected
def test_user_defined_calls(self):
s = t.amount.count()
r = compute(s, data)
def myother(y):
return 2 + y ** 10
def myfunc(x):
return myother((cos(x) + sin(x)) ** 2 / math.pi)
result = compute(myfunc(s), data)
expected = myother((math.cos(r) + math.sin(r)) ** 2 / math.pi)
assert result == expected
def test_by_groupby_deep():
data = [(1, 2, 'Alice'),
(1, 3, 'Bob'),
(2, 4, 'Alice'),
(2, 4, '')]
schema = '{x: int, y: int, name: string}'
t = symbol('t', datashape.var * schema)
t2 = t[t['name'] != '']
t3 = merge(t2.x, t2.name)
expr = by(t3.name, avg=t3.x.mean())
result = set(compute(expr, data))
assert result == set([('Alice', 1.5), ('Bob', 1.0)])
def test_by_then_sort_dict_items_sequence():
expr = by(tbig.name, total=tbig.amount.sum()).sort('name')
assert compute(expr, databig)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert compute(expr, data) == (3, 350)
assert compute(expr, iter(data)) == (3, 350)
def test_summary_keepdims():
assert compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=True), data) == \
(compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=False), data),)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 150),
('Bob', 1, 200)])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 152),
('Bob', 1, 201)])
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
assert set(compute(expr, data)) == set([('Alice', 2, 151),
('Bob', 1, 201)])
def test_summary_by_first():
expr = by(t.name, amt=t.amount[0])
assert set(compute(expr, data)) == set((('Bob', 200), ('Alice', 100)))
def test_summary_by_last():
expr = by(t.name, amt=t.amount[-1])
assert set(compute(expr, data)) == set((('Bob', 200), ('Alice', 50)))
def test_reduction_arithmetic():
expr = t.amount.sum() + 1
assert compute(expr, data) == 351
def test_scalar_arithmetic():
x = symbol('x', 'real')
y = symbol('y', 'real')
assert compute(x + y, {x: 2, y: 3}) == 5
assert compute_up(x + y, 2, 3) == 5
assert compute_up(x * y, 2, 3) == 6
assert compute_up(x / y, 6, 3) == 2
assert compute_up(x % y, 4, 3) == 1
assert compute_up(x ** y, 4, 3) == 64
assert compute(x + 1, {x: 2}) == 3
assert compute(x * 2, {x: 2}) == 4
assert compute(1 + x, {x: 2}) == 3
assert compute(2 * x, {x: 2}) == 4
assert compute_up(-x, 1) == -1
assert compute_up(blaze.sin(x), 1) == math.sin(1)
def test_like():
t = symbol('t', 'var * {name: string, city: string}')
data = [('Alice Smith', 'New York'),
('Bob Smith', 'Chicago'),
('Alice Walker', 'LA')]
assert list(compute(t.like(name='Alice*'), data)) == [data[0], data[2]]
assert list(compute(t.like(name='lice*'), data)) == []
assert list(compute(t.like(name='*Smith*'), data)) == [data[0], data[1]]
assert list(compute(t.like(name='*Smith*', city='New York'), data)) == [data[0]]
def test_datetime_comparison():
data = [['Alice', date(2000, 1, 1)],
['Bob', date(2000, 2, 2)],
['Alice', date(2000, 3, 3)]]
t = symbol('t', 'var * {name: string, when: date}')
assert list(compute(t[t.when > '2000-01-01'], data)) == data[1:]
def test_datetime_access():
data = [['Alice', 100, 1, datetime(2000, 1, 1, 1, 1, 1)],
['Bob', 200, 2, datetime(2000, 1, 1, 1, 1, 1)],
['Alice', 50, 3, datetime(2000, 1, 1, 1, 1, 1)]]
t = symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
assert list(compute(t.when.year, data)) == [2000, 2000, 2000]
assert list(compute(t.when.second, data)) == [1, 1, 1]
assert list(compute(t.when.date, data)) == [date(2000, 1, 1)] * 3
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
assert list(compute(t.utcfromtimestamp, [0])) == \
[datetime(1970, 1, 1, 0, 0)]
payments = [{'name': 'Alice', 'payments': [
{'amount': 100, 'when': datetime(2000, 1, 1, 1, 1 ,1)},
{'amount': 200, 'when': datetime(2000, 2, 2, 2, 2, 2)}
]},
{'name': 'Bob', 'payments': [
{'amount': 300, 'when': datetime(2000, 3, 3, 3, 3 ,3)},
{'amount': -400, 'when': datetime(2000, 4, 4, 4, 4, 4)},
{'amount': 500, 'when': datetime(2000, 5, 5, 5, 5, 5)}
]},
]
payments_ordered = [('Alice', [( 100, datetime(2000, 1, 1, 1, 1 ,1)),
( 200, datetime(2000, 2, 2, 2, 2, 2))]),
('Bob', [( 300, datetime(2000, 3, 3, 3, 3 ,3)),
(-400, datetime(2000, 4, 4, 4, 4, 4)),
( 500, datetime(2000, 5, 5, 5, 5, 5))])]
payment_dshape = 'var * {name: string, payments: var * {amount: int32, when: datetime}}'
@pytest.mark.xfail(reason="Can't reason about nested broadcasts yet")
def test_nested():
t = symbol('t', payment_dshape)
assert list(compute(t.name, payments_ordered)) == ['Alice', 'Bob']
assert list(compute(t.payments, payments_ordered)) == \
[p[1] for p in payments_ordered]
assert list(compute(t.payments.amount, payments_ordered)) == \
[(100, 200), (300, -400, 500)]
assert list(compute(t.payments.amount + 1, payments_ordered)) ==\
[(101, 201), (301, -399, 501)]
@pytest.mark.xfail(reason="Can't reason about nested broadcasts yet")
def test_scalar():
s = symbol('s', '{name: string, id: int32, payments: var * {amount: int32, when: datetime}}')
data = ('Alice', 1, ((100, datetime(2000, 1, 1, 1, 1 ,1)),
(200, datetime(2000, 2, 2, 2, 2, 2)),
(300, datetime(2000, 3, 3, 3, 3, 3))))
assert compute(s.name, data) == 'Alice'
assert compute(s.id + 1, data) == 2
assert tuple(compute(s.payments.amount, data)) == (100, 200, 300)
assert tuple(compute(s.payments.amount + 1, data)) == (101, 201, 301)
def test_slice():
assert compute(t[0], data) == data[0]
assert list(compute(t[:2], data)) == list(data[:2])
assert list(compute(t.name[:2], data)) == [data[0][0], data[1][0]]
def test_negative_slicing():
assert list(compute(t[-1:], data)) == data[-1:]
assert list(compute(t[-1:], iter(data))) == data[-1:]
assert list(compute(t[-1], data)) == data[-1]
assert list(compute(t[-1], iter(data))) == data[-1]
assert list(compute(t[-2], data)) == data[-2]
assert list(compute(t[-2], iter(data))) == data[-2]
@pytest.mark.xfail(raises=ValueError,
reason="No support for stop and step having negative values")
def test_negative_slicing_raises_on_stop_and_step_not_None():
assert list(compute(t[-2:-5:-1], data)) == data[-2:-5:-1]
def test_multi_dataset_broadcast():
x = symbol('x', '3 * int')
y = symbol('y', '3 * int')
a = [1, 2, 3]
b = [10, 20, 30]
assert list(compute(x + y, {x: a, y: b})) == [11, 22, 33]
assert list(compute(2*x + (y + 1), {x: a, y: b})) == [13, 25, 37]
@pytest.mark.xfail(reason="Optimize doesn't create multi-table-broadcasts")
def test_multi_dataset_broadcast_with_Record_types():
x = symbol('x', '3 * {p: int, q: int}')
y = symbol('y', '3 * int')
a = [(1, 1), (2, 2), (3, 3)]
b = [10, 20, 30]
assert list(compute(x.p + x.q + y, {x: iter(a), y: iter(b)})) == [12, 24, 36]
def eq(a, b):
if isinstance(a, (Iterable, Iterator)):
a = list(a)
if isinstance(b, (Iterable, Iterator)):
b = list(b)
return a == b
def test_pre_compute():
s = symbol('s', 'var * {a: int, b: int}')
assert pre_compute(s, [(1, 2)]) == [(1, 2)]
assert list(pre_compute(s, iter([(1, 2)]))) == [(1, 2)]
assert list(pre_compute(s, iter([(1, 2), (3, 4)]))) == [(1, 2), (3, 4)]
assert list(pre_compute(s, iter([{'a': 1, 'b': 2},
{'a': 3, 'b': 4}]))) == [(1, 2), (3, 4)]
def test_dicts():
t = symbol('t', 'var * {name: string, amount: int, id: int}')
L = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
d = [{'name': 'Alice', 'amount': 100, 'id': 1},
{'name': 'Bob', 'amount': 200, 'id': 2},
{'name': 'Alice', 'amount': 50, 'id': 3}]
assert list(pre_compute(t, d)) == list(map(tuple, L))
for expr in [t.amount, t.amount.sum(), by(t.name, sum=t.amount.sum())]:
assert eq(compute(expr, {t: L}),
compute(expr, {t: d}))
for expr in [t.amount, t.amount.sum(), by(t.name, sum=t.amount.sum())]:
assert eq(compute(expr, {t: iter(L)}),
compute(expr, {t: iter(d)}))
assert eq(compute(expr, {t: iter(L)}),
compute(expr, {t: L}))
def test_nelements_list_tuple():
assert compute(t.nelements(), data) == len(data)
def test_nelements_iterator():
x = (row for row in data)
assert compute(t.nelements(), x) == len(data)
def test_nrows():
assert compute(t.nrows, data) == len(data)
x = (row for row in data)
assert compute(t.nrows, x) == len(data)
@pytest.mark.xfail(raises=Exception, reason="Only 1D reductions allowed")
def test_nelements_2D():
assert compute(t.nelements(axis=1), data) == len(data[0])
def test_compute_field_on_dicts():
s = symbol('s', '{x: 3 * int, y: 3 * int}')
d = {'x': [1, 2, 3], 'y': [4, 5, 6]}
assert compute(s.x, {s: d}) == [1, 2, 3]
def test_truncate():
s = symbol('x', 'real')
assert compute(s.truncate(20), 154) == 140
assert compute(s.truncate(0.1), 3.1415) == 3.1
def test_truncate_datetime():
s = symbol('x', 'datetime')
assert compute(s.truncate(2, 'days'), datetime(2002, 1, 3, 12, 30)) ==\
date(2002, 1, 2)
s = symbol('x', 'var * datetime')
assert list(compute(s.truncate(2, 'days'),
[datetime(2002, 1, 3, 12, 30)])) ==\
[date(2002, 1, 2)]
def test_compute_up_on_base():
d = datetime.now()
s = symbol('s', 'datetime')
assert compute(s.minute, d) == d.minute
@pytest.mark.parametrize('keys', [['Alice'], ['Bob', 'Alice']])
def test_isin(keys):
expr = t[t.name.isin(keys)]
result = list(compute(expr, data))
expected = [el for el in data if el[0] in keys]
assert result == expected
|
|
# coding=UTF-8
'''
Copyright (c) 2010 openpyxl
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@license: http://www.opensource.org/licenses/mit-license.php
@author: Eric Gazoni
'''
from ..shared.xmltools import Element, SubElement, get_document_content
from ..chart import Chart, ErrorBar
class ChartWriter(object):
def __init__(self, chart):
self.chart = chart
def write(self):
""" write a chart """
root = Element('c:chartSpace',
{'xmlns:c':"http://schemas.openxmlformats.org/drawingml/2006/chart",
'xmlns:a':"http://schemas.openxmlformats.org/drawingml/2006/main",
'xmlns:r':"http://schemas.openxmlformats.org/officeDocument/2006/relationships"})
SubElement(root, 'c:lang', {'val':self.chart.lang})
self._write_chart(root)
self._write_print_settings(root)
self._write_shapes(root)
return get_document_content(root)
def _write_chart(self, root):
chart = self.chart
ch = SubElement(root, 'c:chart')
self._write_title(ch)
plot_area = SubElement(ch, 'c:plotArea')
layout = SubElement(plot_area, 'c:layout')
mlayout = SubElement(layout, 'c:manualLayout')
SubElement(mlayout, 'c:layoutTarget', {'val':'inner'})
SubElement(mlayout, 'c:xMode', {'val':'edge'})
SubElement(mlayout, 'c:yMode', {'val':'edge'})
SubElement(mlayout, 'c:x', {'val':str(chart._get_margin_left())})
SubElement(mlayout, 'c:y', {'val':str(chart._get_margin_top())})
SubElement(mlayout, 'c:w', {'val':str(chart.width)})
SubElement(mlayout, 'c:h', {'val':str(chart.height)})
if chart.type == Chart.SCATTER_CHART:
subchart = SubElement(plot_area, 'c:scatterChart')
SubElement(subchart, 'c:scatterStyle', {'val':str('lineMarker')})
else:
if chart.type == Chart.BAR_CHART:
subchart = SubElement(plot_area, 'c:barChart')
SubElement(subchart, 'c:barDir', {'val':'col'})
else:
subchart = SubElement(plot_area, 'c:lineChart')
SubElement(subchart, 'c:grouping', {'val':chart.grouping})
self._write_series(subchart)
SubElement(subchart, 'c:marker', {'val':'1'})
SubElement(subchart, 'c:axId', {'val':str(chart.x_axis.id)})
SubElement(subchart, 'c:axId', {'val':str(chart.y_axis.id)})
if chart.type == Chart.SCATTER_CHART:
self._write_axis(plot_area, chart.x_axis, 'c:valAx')
else:
self._write_axis(plot_area, chart.x_axis, 'c:catAx')
self._write_axis(plot_area, chart.y_axis, 'c:valAx')
self._write_legend(ch)
SubElement(ch, 'c:plotVisOnly', {'val':'1'})
def _write_title(self, chart):
if self.chart.title != '':
title = SubElement(chart, 'c:title')
tx = SubElement(title, 'c:tx')
rich = SubElement(tx, 'c:rich')
SubElement(rich, 'a:bodyPr')
SubElement(rich, 'a:lstStyle')
p = SubElement(rich, 'a:p')
pPr = SubElement(p, 'a:pPr')
SubElement(pPr, 'a:defRPr')
r = SubElement(p, 'a:r')
SubElement(r, 'a:rPr', {'lang':self.chart.lang})
t = SubElement(r, 'a:t').text = self.chart.title
SubElement(title, 'c:layout')
def _write_axis(self, plot_area, axis, label):
ax = SubElement(plot_area, label)
SubElement(ax, 'c:axId', {'val':str(axis.id)})
scaling = SubElement(ax, 'c:scaling')
SubElement(scaling, 'c:orientation', {'val':axis.orientation})
if label == 'c:valAx':
SubElement(scaling, 'c:max', {'val':str(axis.max)})
SubElement(scaling, 'c:min', {'val':str(axis.min)})
SubElement(ax, 'c:axPos', {'val':axis.position})
if label == 'c:valAx':
SubElement(ax, 'c:majorGridlines')
SubElement(ax, 'c:numFmt', {'formatCode':"General", 'sourceLinked':'1'})
SubElement(ax, 'c:tickLblPos', {'val':axis.tick_label_position})
SubElement(ax, 'c:crossAx', {'val':str(axis.cross)})
SubElement(ax, 'c:crosses', {'val':axis.crosses})
if axis.auto:
SubElement(ax, 'c:auto', {'val':'1'})
if axis.label_align:
SubElement(ax, 'c:lblAlgn', {'val':axis.label_align})
if axis.label_offset:
SubElement(ax, 'c:lblOffset', {'val':str(axis.label_offset)})
if label == 'c:valAx':
if self.chart.type == Chart.SCATTER_CHART:
SubElement(ax, 'c:crossBetween', {'val':'midCat'})
else:
SubElement(ax, 'c:crossBetween', {'val':'between'})
SubElement(ax, 'c:majorUnit', {'val':str(axis.unit)})
def _write_series(self, subchart):
for i, serie in enumerate(self.chart._series):
ser = SubElement(subchart, 'c:ser')
SubElement(ser, 'c:idx', {'val':str(i)})
SubElement(ser, 'c:order', {'val':str(i)})
if serie.legend:
tx = SubElement(ser, 'c:tx')
self._write_serial(tx, serie.legend)
if serie.color:
sppr = SubElement(ser, 'c:spPr')
if self.chart.type == Chart.BAR_CHART:
# fill color
fillc = SubElement(sppr, 'a:solidFill')
SubElement(fillc, 'a:srgbClr', {'val':serie.color})
# edge color
ln = SubElement(sppr, 'a:ln')
fill = SubElement(ln, 'a:solidFill')
SubElement(fill, 'a:srgbClr', {'val':serie.color})
if serie.error_bar:
self._write_error_bar(ser, serie)
marker = SubElement(ser, 'c:marker')
SubElement(marker, 'c:symbol', {'val':serie.marker})
if serie.labels:
cat = SubElement(ser, 'c:cat')
self._write_serial(cat, serie.labels)
if self.chart.type == Chart.SCATTER_CHART:
if serie.xvalues:
xval = SubElement(ser, 'c:xVal')
self._write_serial(xval, serie.xvalues)
yval = SubElement(ser, 'c:yVal')
self._write_serial(yval, serie.values)
else:
val = SubElement(ser, 'c:val')
self._write_serial(val, serie.values)
def _write_serial(self, node, serie, literal=False):
cache = serie._get_cache()
if isinstance(cache[0], str):
typ = 'str'
else:
typ = 'num'
if not literal:
if typ == 'num':
ref = SubElement(node, 'c:numRef')
else:
ref = SubElement(node, 'c:strRef')
SubElement(ref, 'c:f').text = serie._get_ref()
if typ == 'num':
data = SubElement(ref, 'c:numCache')
else:
data = SubElement(ref, 'c:strCache')
else:
data = SubElement(node, 'c:numLit')
if typ == 'num':
SubElement(data, 'c:formatCode').text = 'General'
if literal:
values = (1,)
else:
values = cache
SubElement(data, 'c:ptCount', {'val':str(len(values))})
for j, val in enumerate(values):
point = SubElement(data, 'c:pt', {'idx':str(j)})
SubElement(point, 'c:v').text = str(val)
def _write_error_bar(self, node, serie):
flag = {ErrorBar.PLUS_MINUS:'both',
ErrorBar.PLUS:'plus',
ErrorBar.MINUS:'minus'}
eb = SubElement(node, 'c:errBars')
SubElement(eb, 'c:errBarType', {'val':flag[serie.error_bar.type]})
SubElement(eb, 'c:errValType', {'val':'cust'})
plus = SubElement(eb, 'c:plus')
self._write_serial(plus, serie.error_bar.values,
literal=(serie.error_bar.type==ErrorBar.MINUS))
minus = SubElement(eb, 'c:minus')
self._write_serial(minus, serie.error_bar.values,
literal=(serie.error_bar.type==ErrorBar.PLUS))
def _write_legend(self, chart):
legend = SubElement(chart, 'c:legend')
SubElement(legend, 'c:legendPos', {'val':self.chart.legend.position})
SubElement(legend, 'c:layout')
def _write_print_settings(self, root):
settings = SubElement(root, 'c:printSettings')
SubElement(settings, 'c:headerFooter')
margins = dict([(k, str(v)) for (k,v) in self.chart.print_margins.items()])
SubElement(settings, 'c:pageMargins', margins)
SubElement(settings, 'c:pageSetup')
def _write_shapes(self, root):
if self.chart._shapes:
SubElement(root, 'c:userShapes', {'r:id':'rId1'})
def write_rels(self, drawing_id):
root = Element('Relationships', {'xmlns' : 'http://schemas.openxmlformats.org/package/2006/relationships'})
attrs = {'Id' : 'rId1',
'Type' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/chartUserShapes',
'Target' : '../drawings/drawing%s.xml' % drawing_id }
SubElement(root, 'Relationship', attrs)
return get_document_content(root)
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function
from __future__ import division
from builtins import range
import pickle
import os.path as op
import numpy as np
import nibabel as nb
import networkx as nx
import scipy.io as sio
from ..base import (BaseInterface, BaseInterfaceInputSpec, traits,
File, TraitedSpec, InputMultiPath, Directory,
OutputMultiPath, isdefined)
from ...utils.filemanip import split_filename
from ... import logging
iflogger = logging.getLogger('interface')
def length(xyz, along=False):
"""
Euclidean length of track line
Parameters
----------
xyz : array-like shape (N,3)
array representing x,y,z of N points in a track
along : bool, optional
If True, return array giving cumulative length along track,
otherwise (default) return scalar giving total length.
Returns
-------
L : scalar or array shape (N-1,)
scalar in case of `along` == False, giving total length, array if
`along` == True, giving cumulative lengths.
Examples
--------
>>> xyz = np.array([[1,1,1],[2,3,4],[0,0,0]])
>>> expected_lens = np.sqrt([1+2**2+3**2, 2**2+3**2+4**2])
>>> length(xyz) == expected_lens.sum()
True
>>> len_along = length(xyz, along=True)
>>> np.allclose(len_along, expected_lens.cumsum())
True
>>> length([])
0
>>> length([[1, 2, 3]])
0
>>> length([], along=True)
array([0])
"""
xyz = np.asarray(xyz)
if xyz.shape[0] < 2:
if along:
return np.array([0])
return 0
dists = np.sqrt((np.diff(xyz, axis=0) ** 2).sum(axis=1))
if along:
return np.cumsum(dists)
return np.sum(dists)
def get_rois_crossed(pointsmm, roiData, voxelSize):
n_points = len(pointsmm)
rois_crossed = []
for j in range(0, n_points):
# store point
x = int(pointsmm[j, 0] / float(voxelSize[0]))
y = int(pointsmm[j, 1] / float(voxelSize[1]))
z = int(pointsmm[j, 2] / float(voxelSize[2]))
if not roiData[x, y, z] == 0:
rois_crossed.append(roiData[x, y, z])
rois_crossed = list(dict.fromkeys(rois_crossed).keys()) # Removed duplicates from the list
return rois_crossed
def get_connectivity_matrix(n_rois, list_of_roi_crossed_lists):
connectivity_matrix = np.zeros((n_rois, n_rois), dtype=np.uint)
for rois_crossed in list_of_roi_crossed_lists:
for idx_i, roi_i in enumerate(rois_crossed):
for idx_j, roi_j in enumerate(rois_crossed):
if idx_i > idx_j:
if not roi_i == roi_j:
connectivity_matrix[roi_i - 1, roi_j - 1] += 1
connectivity_matrix = connectivity_matrix + connectivity_matrix.T
return connectivity_matrix
def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois):
""" Create the intersection arrays for each fiber
"""
n_fib = len(streamlines)
pc = -1
# Computation for each fiber
final_fiber_ids = []
list_of_roi_crossed_lists = []
for i, fiber in enumerate(streamlines):
pcN = int(round(float(100 * i) / n_fib))
if pcN > pc and pcN % 1 == 0:
pc = pcN
print('%4.0f%%' % (pc))
rois_crossed = get_rois_crossed(fiber[0], roiData, voxelSize)
if len(rois_crossed) > 0:
list_of_roi_crossed_lists.append(list(rois_crossed))
final_fiber_ids.append(i)
connectivity_matrix = get_connectivity_matrix(n_rois, list_of_roi_crossed_lists)
dis = n_fib - len(final_fiber_ids)
iflogger.info("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis * 100.0 / n_fib, n_fib))
iflogger.info("Valid fibers: %i (%f percent)" % (n_fib - dis, 100 - dis * 100.0 / n_fib))
iflogger.info('Returning the intersecting point connectivity matrix')
return connectivity_matrix, final_fiber_ids
def create_endpoints_array(fib, voxelSize):
""" Create the endpoints arrays for each fiber
Parameters
----------
fib: the fibers data
voxelSize: 3-tuple containing the voxel size of the ROI image
Returns
-------
(endpoints: matrix of size [#fibers, 2, 3] containing for each fiber the
index of its first and last point in the voxelSize volume
endpointsmm) : endpoints in milimeter coordinates
"""
# Init
n = len(fib)
endpoints = np.zeros((n, 2, 3))
endpointsmm = np.zeros((n, 2, 3))
pc = -1
# Computation for each fiber
for i, fi in enumerate(fib):
f = fi[0]
# store startpoint
endpoints[i, 0, :] = f[0, :]
# store endpoint
endpoints[i, 1, :] = f[-1, :]
# store startpoint
endpointsmm[i, 0, :] = f[0, :]
# store endpoint
endpointsmm[i, 1, :] = f[-1, :]
# Translate from mm to index
endpoints[i, 0, 0] = int(endpoints[i, 0, 0] / float(voxelSize[0]))
endpoints[i, 0, 1] = int(endpoints[i, 0, 1] / float(voxelSize[1]))
endpoints[i, 0, 2] = int(endpoints[i, 0, 2] / float(voxelSize[2]))
endpoints[i, 1, 0] = int(endpoints[i, 1, 0] / float(voxelSize[0]))
endpoints[i, 1, 1] = int(endpoints[i, 1, 1] / float(voxelSize[1]))
endpoints[i, 1, 2] = int(endpoints[i, 1, 2] / float(voxelSize[2]))
# Return the matrices
iflogger.info('Returning the endpoint matrix')
return (endpoints, endpointsmm)
def cmat(track_file, roi_file, resolution_network_file, matrix_name, matrix_mat_name, endpoint_name, intersections=False):
""" Create the connection matrix for each resolution using fibers and ROIs. """
stats = {}
iflogger.info('Running cmat function')
# Identify the endpoints of each fiber
en_fname = op.abspath(endpoint_name + '_endpoints.npy')
en_fnamemm = op.abspath(endpoint_name + '_endpointsmm.npy')
iflogger.info('Reading Trackvis file {trk}'.format(trk=track_file))
fib, hdr = nb.trackvis.read(track_file, False)
stats['orig_n_fib'] = len(fib)
roi = nb.load(roi_file)
roiData = roi.get_data()
roiVoxelSize = roi.header.get_zooms()
(endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize)
# Output endpoint arrays
iflogger.info('Saving endpoint array: {array}'.format(array=en_fname))
np.save(en_fname, endpoints)
iflogger.info('Saving endpoint array in mm: {array}'.format(array=en_fnamemm))
np.save(en_fnamemm, endpointsmm)
n = len(fib)
iflogger.info('Number of fibers {num}'.format(num=n))
# Create empty fiber label array
fiberlabels = np.zeros((n, 2))
final_fiberlabels = []
final_fibers_idx = []
# Add node information from specified parcellation scheme
path, name, ext = split_filename(resolution_network_file)
if ext == '.pck':
gp = nx.read_gpickle(resolution_network_file)
elif ext == '.graphml':
gp = nx.read_graphml(resolution_network_file)
nROIs = len(gp.nodes())
# add node information from parcellation
if 'dn_position' in gp.node[gp.nodes()[0]]:
G = gp.copy()
else:
G = nx.Graph()
for u, d in gp.nodes_iter(data=True):
G.add_node(int(u), d)
# compute a position for the node based on the mean position of the
# ROI in voxel coordinates (segmentation volume )
xyz = tuple(np.mean(np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), axis=1))
G.node[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]])
if intersections:
iflogger.info("Filtering tractography from intersections")
intersection_matrix, final_fiber_ids = create_allpoints_cmat(fib, roiData, roiVoxelSize, nROIs)
finalfibers_fname = op.abspath(endpoint_name + '_intersections_streamline_final.trk')
stats['intersections_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, final_fiber_ids)
intersection_matrix = np.matrix(intersection_matrix)
I = G.copy()
H = nx.from_numpy_matrix(np.matrix(intersection_matrix))
H = nx.relabel_nodes(H, lambda x: x + 1) # relabel nodes so they start at 1
I.add_weighted_edges_from(((u, v, d['weight']) for u, v, d in H.edges(data=True)))
dis = 0
for i in range(endpoints.shape[0]):
# ROI start => ROI end
try:
startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], endpoints[i, 0, 2]])
endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], endpoints[i, 1, 2]])
except IndexError:
iflogger.error(("AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. PLEASE CHECK ENDPOINT GENERATION" % i))
break
# Filter
if startROI == 0 or endROI == 0:
dis += 1
fiberlabels[i, 0] = -1
continue
if startROI > nROIs or endROI > nROIs:
iflogger.error("Start or endpoint of fiber terminate in a voxel which is labeled higher")
iflogger.error("than is expected by the parcellation node information.")
iflogger.error("Start ROI: %i, End ROI: %i" % (startROI, endROI))
iflogger.error("This needs bugfixing!")
continue
# Update fiber label
# switch the rois in order to enforce startROI < endROI
if endROI < startROI:
tmp = startROI
startROI = endROI
endROI = tmp
fiberlabels[i, 0] = startROI
fiberlabels[i, 1] = endROI
final_fiberlabels.append([startROI, endROI])
final_fibers_idx.append(i)
# Add edge to graph
if G.has_edge(startROI, endROI) and 'fiblist' in G.edge[startROI][endROI]:
G.edge[startROI][endROI]['fiblist'].append(i)
else:
G.add_edge(startROI, endROI, fiblist=[i])
# create a final fiber length array
finalfiberlength = []
if intersections:
final_fibers_indices = final_fiber_ids
else:
final_fibers_indices = final_fibers_idx
for idx in final_fibers_indices:
# compute length of fiber
finalfiberlength.append(length(fib[idx][0]))
# convert to array
final_fiberlength_array = np.array(finalfiberlength)
# make final fiber labels as array
final_fiberlabels_array = np.array(final_fiberlabels, dtype=int)
iflogger.info("Found %i (%f percent out of %i fibers) fibers that start or terminate in a voxel which is not labeled. (orphans)" % (dis, dis * 100.0 / n, n))
iflogger.info("Valid fibers: %i (%f percent)" % (n - dis, 100 - dis * 100.0 / n))
numfib = nx.Graph()
numfib.add_nodes_from(G)
fibmean = numfib.copy()
fibmedian = numfib.copy()
fibdev = numfib.copy()
for u, v, d in G.edges_iter(data=True):
G.remove_edge(u, v)
di = {}
if 'fiblist' in d:
di['number_of_fibers'] = len(d['fiblist'])
idx = np.where((final_fiberlabels_array[:, 0] == int(u)) & (final_fiberlabels_array[:, 1] == int(v)))[0]
di['fiber_length_mean'] = float(np.mean(final_fiberlength_array[idx]))
di['fiber_length_median'] = float(np.median(final_fiberlength_array[idx]))
di['fiber_length_std'] = float(np.std(final_fiberlength_array[idx]))
else:
di['number_of_fibers'] = 0
di['fiber_length_mean'] = 0
di['fiber_length_median'] = 0
di['fiber_length_std'] = 0
if not u == v: # Fix for self loop problem
G.add_edge(u, v, di)
if 'fiblist' in d:
numfib.add_edge(u, v, weight=di['number_of_fibers'])
fibmean.add_edge(u, v, weight=di['fiber_length_mean'])
fibmedian.add_edge(u, v, weight=di['fiber_length_median'])
fibdev.add_edge(u, v, weight=di['fiber_length_std'])
iflogger.info('Writing network as {ntwk}'.format(ntwk=matrix_name))
nx.write_gpickle(G, op.abspath(matrix_name))
numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int)
numfib_dict = {'number_of_fibers': numfib_mlab}
fibmean_mlab = nx.to_numpy_matrix(fibmean, dtype=np.float64)
fibmean_dict = {'mean_fiber_length': fibmean_mlab}
fibmedian_mlab = nx.to_numpy_matrix(fibmedian, dtype=np.float64)
fibmedian_dict = {'median_fiber_length': fibmedian_mlab}
fibdev_mlab = nx.to_numpy_matrix(fibdev, dtype=np.float64)
fibdev_dict = {'fiber_length_std': fibdev_mlab}
if intersections:
path, name, ext = split_filename(matrix_name)
intersection_matrix_name = op.abspath(name + '_intersections') + ext
iflogger.info('Writing intersection network as {ntwk}'.format(ntwk=intersection_matrix_name))
nx.write_gpickle(I, intersection_matrix_name)
path, name, ext = split_filename(matrix_mat_name)
if not ext == '.mat':
ext = '.mat'
matrix_mat_name = matrix_mat_name + ext
iflogger.info('Writing matlab matrix as {mat}'.format(mat=matrix_mat_name))
sio.savemat(matrix_mat_name, numfib_dict)
if intersections:
intersect_dict = {'intersections': intersection_matrix}
intersection_matrix_mat_name = op.abspath(name + '_intersections') + ext
iflogger.info('Writing intersection matrix as {mat}'.format(mat=intersection_matrix_mat_name))
sio.savemat(intersection_matrix_mat_name, intersect_dict)
mean_fiber_length_matrix_name = op.abspath(name + '_mean_fiber_length') + ext
iflogger.info('Writing matlab mean fiber length matrix as {mat}'.format(mat=mean_fiber_length_matrix_name))
sio.savemat(mean_fiber_length_matrix_name, fibmean_dict)
median_fiber_length_matrix_name = op.abspath(name + '_median_fiber_length') + ext
iflogger.info('Writing matlab median fiber length matrix as {mat}'.format(mat=median_fiber_length_matrix_name))
sio.savemat(median_fiber_length_matrix_name, fibmedian_dict)
fiber_length_std_matrix_name = op.abspath(name + '_fiber_length_std') + ext
iflogger.info('Writing matlab fiber length deviation matrix as {mat}'.format(mat=fiber_length_std_matrix_name))
sio.savemat(fiber_length_std_matrix_name, fibdev_dict)
fiberlengths_fname = op.abspath(endpoint_name + '_final_fiberslength.npy')
iflogger.info("Storing final fiber length array as %s" % fiberlengths_fname)
np.save(fiberlengths_fname, final_fiberlength_array)
fiberlabels_fname = op.abspath(endpoint_name + '_filtered_fiberslabel.npy')
iflogger.info("Storing all fiber labels (with orphans) as %s" % fiberlabels_fname)
np.save(fiberlabels_fname, np.array(fiberlabels, dtype=np.int32),)
fiberlabels_noorphans_fname = op.abspath(endpoint_name + '_final_fiberslabels.npy')
iflogger.info("Storing final fiber labels (no orphans) as %s" % fiberlabels_noorphans_fname)
np.save(fiberlabels_noorphans_fname, final_fiberlabels_array)
iflogger.info("Filtering tractography - keeping only no orphan fibers")
finalfibers_fname = op.abspath(endpoint_name + '_streamline_final.trk')
stats['endpoint_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, final_fibers_idx)
stats['endpoints_percent'] = float(stats['endpoint_n_fib']) / float(stats['orig_n_fib']) * 100
stats['intersections_percent'] = float(stats['intersections_n_fib']) / float(stats['orig_n_fib']) * 100
out_stats_file = op.abspath(endpoint_name + '_statistics.mat')
iflogger.info("Saving matrix creation statistics as %s" % out_stats_file)
sio.savemat(out_stats_file, stats)
def save_fibers(oldhdr, oldfib, fname, indices):
""" Stores a new trackvis file fname using only given indices """
hdrnew = oldhdr.copy()
outstreams = []
for i in indices:
outstreams.append(oldfib[i])
n_fib_out = len(outstreams)
hdrnew['n_count'] = n_fib_out
iflogger.info("Writing final non-orphan fibers as %s" % fname)
nb.trackvis.write(fname, outstreams, hdrnew)
return n_fib_out
class CreateMatrixInputSpec(TraitedSpec):
roi_file = File(exists=True, mandatory=True, desc='Freesurfer aparc+aseg file')
tract_file = File(exists=True, mandatory=True, desc='Trackvis tract file')
resolution_network_file = File(exists=True, mandatory=True, desc='Parcellation files from Connectome Mapping Toolkit')
count_region_intersections = traits.Bool(False, usedefault=True, desc='Counts all of the fiber-region traversals in the connectivity matrix (requires significantly more computational time)')
out_matrix_file = File(genfile=True, desc='NetworkX graph describing the connectivity')
out_matrix_mat_file = File('cmatrix.mat', usedefault=True, desc='Matlab matrix describing the connectivity')
out_mean_fiber_length_matrix_mat_file = File(genfile=True, desc='Matlab matrix describing the mean fiber lengths between each node.')
out_median_fiber_length_matrix_mat_file = File(genfile=True, desc='Matlab matrix describing the mean fiber lengths between each node.')
out_fiber_length_std_matrix_mat_file = File(genfile=True, desc='Matlab matrix describing the deviation in fiber lengths connecting each node.')
out_intersection_matrix_mat_file = File(genfile=True, desc='Matlab connectivity matrix if all region/fiber intersections are counted.')
out_endpoint_array_name = File(genfile=True, desc='Name for the generated endpoint arrays')
class CreateMatrixOutputSpec(TraitedSpec):
matrix_file = File(desc='NetworkX graph describing the connectivity', exists=True)
intersection_matrix_file = File(desc='NetworkX graph describing the connectivity', exists=True)
matrix_files = OutputMultiPath(File(desc='All of the gpickled network files output by this interface', exists=True))
matlab_matrix_files = OutputMultiPath(File(desc='All of the MATLAB .mat files output by this interface', exists=True))
matrix_mat_file = File(desc='Matlab matrix describing the connectivity', exists=True)
intersection_matrix_mat_file = File(desc='Matlab matrix describing the mean fiber lengths between each node.', exists=True)
mean_fiber_length_matrix_mat_file = File(desc='Matlab matrix describing the mean fiber lengths between each node.', exists=True)
median_fiber_length_matrix_mat_file = File(desc='Matlab matrix describing the median fiber lengths between each node.', exists=True)
fiber_length_std_matrix_mat_file = File(desc='Matlab matrix describing the deviation in fiber lengths connecting each node.', exists=True)
endpoint_file = File(desc='Saved Numpy array with the endpoints of each fiber', exists=True)
endpoint_file_mm = File(desc='Saved Numpy array with the endpoints of each fiber (in millimeters)', exists=True)
fiber_length_file = File(desc='Saved Numpy array with the lengths of each fiber', exists=True)
fiber_label_file = File(desc='Saved Numpy array with the labels for each fiber', exists=True)
fiber_labels_noorphans = File(desc='Saved Numpy array with the labels for each non-orphan fiber', exists=True)
filtered_tractography = File(desc='TrackVis file containing only those fibers originate in one and terminate in another region', exists=True)
filtered_tractography_by_intersections = File(desc='TrackVis file containing all fibers which connect two regions', exists=True)
filtered_tractographies = OutputMultiPath(File(desc='TrackVis file containing only those fibers originate in one and terminate in another region', exists=True))
stats_file = File(desc='Saved Matlab .mat file with the number of fibers saved at each stage', exists=True)
class CreateMatrix(BaseInterface):
"""
Performs connectivity mapping and outputs the result as a NetworkX graph and a Matlab matrix
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> conmap = cmtk.CreateMatrix()
>>> conmap.roi_file = 'fsLUT_aparc+aseg.nii'
>>> conmap.tract_file = 'fibers.trk'
>>> conmap.run() # doctest: +SKIP
"""
input_spec = CreateMatrixInputSpec
output_spec = CreateMatrixOutputSpec
def _run_interface(self, runtime):
if isdefined(self.inputs.out_matrix_file):
path, name, _ = split_filename(self.inputs.out_matrix_file)
matrix_file = op.abspath(name + '.pck')
else:
matrix_file = self._gen_outfilename('.pck')
matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file)
path, name, ext = split_filename(matrix_mat_file)
if not ext == '.mat':
ext = '.mat'
matrix_mat_file = matrix_mat_file + ext
if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file):
mean_fiber_length_matrix_mat_file = op.abspath(self.inputs.out_mean_fiber_length_matrix_mat_file)
else:
mean_fiber_length_matrix_name = op.abspath(self._gen_outfilename('_mean_fiber_length.mat'))
if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file):
median_fiber_length_matrix_mat_file = op.abspath(self.inputs.out_median_fiber_length_matrix_mat_file)
else:
median_fiber_length_matrix_name = op.abspath(self._gen_outfilename('_median_fiber_length.mat'))
if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file):
fiber_length_std_matrix_mat_file = op.abspath(self.inputs.out_fiber_length_std_matrix_mat_file)
else:
fiber_length_std_matrix_name = op.abspath(self._gen_outfilename('_fiber_length_std.mat'))
if not isdefined(self.inputs.out_endpoint_array_name):
_, endpoint_name, _ = split_filename(self.inputs.tract_file)
endpoint_name = op.abspath(endpoint_name)
else:
endpoint_name = op.abspath(self.inputs.out_endpoint_array_name)
cmat(self.inputs.tract_file, self.inputs.roi_file, self.inputs.resolution_network_file,
matrix_file, matrix_mat_file, endpoint_name, self.inputs.count_region_intersections)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_matrix_file):
path, name, _ = split_filename(self.inputs.out_matrix_file)
out_matrix_file = op.abspath(name + '.pck')
out_intersection_matrix_file = op.abspath(name + '_intersections.pck')
else:
out_matrix_file = op.abspath(self._gen_outfilename('.pck'))
out_intersection_matrix_file = op.abspath(self._gen_outfilename('_intersections.pck'))
outputs['matrix_file'] = out_matrix_file
outputs['intersection_matrix_file'] = out_intersection_matrix_file
matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file)
path, name, ext = split_filename(matrix_mat_file)
if not ext == '.mat':
ext = '.mat'
matrix_mat_file = matrix_mat_file + ext
outputs['matrix_mat_file'] = matrix_mat_file
if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file):
outputs['mean_fiber_length_matrix_mat_file'] = op.abspath(self.inputs.out_mean_fiber_length_matrix_mat_file)
else:
outputs['mean_fiber_length_matrix_mat_file'] = op.abspath(self._gen_outfilename('_mean_fiber_length.mat'))
if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file):
outputs['median_fiber_length_matrix_mat_file'] = op.abspath(self.inputs.out_median_fiber_length_matrix_mat_file)
else:
outputs['median_fiber_length_matrix_mat_file'] = op.abspath(self._gen_outfilename('_median_fiber_length.mat'))
if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file):
outputs['fiber_length_std_matrix_mat_file'] = op.abspath(self.inputs.out_fiber_length_std_matrix_mat_file)
else:
outputs['fiber_length_std_matrix_mat_file'] = op.abspath(self._gen_outfilename('_fiber_length_std.mat'))
if isdefined(self.inputs.out_intersection_matrix_mat_file):
outputs['intersection_matrix_mat_file'] = op.abspath(self.inputs.out_intersection_matrix_mat_file)
else:
outputs['intersection_matrix_mat_file'] = op.abspath(self._gen_outfilename('_intersections.mat'))
if isdefined(self.inputs.out_endpoint_array_name):
endpoint_name = self.inputs.out_endpoint_array_name
outputs['endpoint_file'] = op.abspath(self.inputs.out_endpoint_array_name + '_endpoints.npy')
outputs['endpoint_file_mm'] = op.abspath(self.inputs.out_endpoint_array_name + '_endpointsmm.npy')
outputs['fiber_length_file'] = op.abspath(self.inputs.out_endpoint_array_name + '_final_fiberslength.npy')
outputs['fiber_label_file'] = op.abspath(self.inputs.out_endpoint_array_name + '_filtered_fiberslabel.npy')
outputs['fiber_labels_noorphans'] = op.abspath(self.inputs.out_endpoint_array_name + '_final_fiberslabels.npy')
else:
_, endpoint_name, _ = split_filename(self.inputs.tract_file)
outputs['endpoint_file'] = op.abspath(endpoint_name + '_endpoints.npy')
outputs['endpoint_file_mm'] = op.abspath(endpoint_name + '_endpointsmm.npy')
outputs['fiber_length_file'] = op.abspath(endpoint_name + '_final_fiberslength.npy')
outputs['fiber_label_file'] = op.abspath(endpoint_name + '_filtered_fiberslabel.npy')
outputs['fiber_labels_noorphans'] = op.abspath(endpoint_name + '_final_fiberslabels.npy')
if self.inputs.count_region_intersections:
outputs['matrix_files'] = [out_matrix_file, out_intersection_matrix_file]
outputs['matlab_matrix_files'] = [outputs['matrix_mat_file'],
outputs['mean_fiber_length_matrix_mat_file'], outputs['median_fiber_length_matrix_mat_file'],
outputs['fiber_length_std_matrix_mat_file'], outputs['intersection_matrix_mat_file']]
else:
outputs['matrix_files'] = [out_matrix_file]
outputs['matlab_matrix_files'] = [outputs['matrix_mat_file'],
outputs['mean_fiber_length_matrix_mat_file'], outputs['median_fiber_length_matrix_mat_file'],
outputs['fiber_length_std_matrix_mat_file']]
outputs['filtered_tractography'] = op.abspath(endpoint_name + '_streamline_final.trk')
outputs['filtered_tractography_by_intersections'] = op.abspath(endpoint_name + '_intersections_streamline_final.trk')
outputs['filtered_tractographies'] = [outputs['filtered_tractography'], outputs['filtered_tractography_by_intersections']]
outputs['stats_file'] = op.abspath(endpoint_name + '_statistics.mat')
return outputs
def _gen_outfilename(self, ext):
if ext.endswith("mat") and isdefined(self.inputs.out_matrix_mat_file):
_, name, _ = split_filename(self.inputs.out_matrix_mat_file)
elif isdefined(self.inputs.out_matrix_file):
_, name, _ = split_filename(self.inputs.out_matrix_file)
else:
_, name, _ = split_filename(self.inputs.tract_file)
return name + ext
class ROIGenInputSpec(BaseInterfaceInputSpec):
aparc_aseg_file = File(exists=True, mandatory=True, desc='Freesurfer aparc+aseg file')
LUT_file = File(exists=True, xor=['use_freesurfer_LUT'], desc='Custom lookup table (cf. FreeSurferColorLUT.txt)')
use_freesurfer_LUT = traits.Bool(xor=['LUT_file'], desc='Boolean value; Set to True to use default Freesurfer LUT, False for custom LUT')
freesurfer_dir = Directory(requires=['use_freesurfer_LUT'], desc='Freesurfer main directory')
out_roi_file = File(genfile=True, desc='Region of Interest file for connectivity mapping')
out_dict_file = File(genfile=True, desc='Label dictionary saved in Pickle format')
class ROIGenOutputSpec(TraitedSpec):
roi_file = File(desc='Region of Interest file for connectivity mapping')
dict_file = File(desc='Label dictionary saved in Pickle format')
class ROIGen(BaseInterface):
"""
Generates a ROI file for connectivity mapping and a dictionary file containing relevant node information
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> rg = cmtk.ROIGen()
>>> rg.inputs.aparc_aseg_file = 'aparc+aseg.nii'
>>> rg.inputs.use_freesurfer_LUT = True
>>> rg.inputs.freesurfer_dir = '/usr/local/freesurfer'
>>> rg.run() # doctest: +SKIP
The label dictionary is written to disk using Pickle. Resulting data can be loaded using:
>>> file = open("FreeSurferColorLUT_adapted_aparc+aseg_out.pck", "r")
>>> file = open("fsLUT_aparc+aseg.pck", "r")
>>> labelDict = pickle.load(file) # doctest: +SKIP
>>> labelDict # doctest: +SKIP
"""
input_spec = ROIGenInputSpec
output_spec = ROIGenOutputSpec
def _run_interface(self, runtime):
aparc_aseg_file = self.inputs.aparc_aseg_file
aparcpath, aparcname, aparcext = split_filename(aparc_aseg_file)
iflogger.info('Using Aparc+Aseg file: {name}'.format(name=aparcname + aparcext))
niiAPARCimg = nb.load(aparc_aseg_file)
niiAPARCdata = niiAPARCimg.get_data()
niiDataLabels = np.unique(niiAPARCdata)
numDataLabels = np.size(niiDataLabels)
iflogger.info('Number of labels in image: {n}'.format(n=numDataLabels))
write_dict = True
if self.inputs.use_freesurfer_LUT:
self.LUT_file = self.inputs.freesurfer_dir + '/FreeSurferColorLUT.txt'
iflogger.info('Using Freesurfer LUT: {name}'.format(name=self.LUT_file))
prefix = 'fsLUT'
elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file):
self.LUT_file = op.abspath(self.inputs.LUT_file)
lutpath, lutname, lutext = split_filename(self.LUT_file)
iflogger.info('Using Custom LUT file: {name}'.format(name=lutname + lutext))
prefix = lutname
else:
prefix = 'hardcoded'
write_dict = False
if isdefined(self.inputs.out_roi_file):
roi_file = op.abspath(self.inputs.out_roi_file)
else:
roi_file = op.abspath(prefix + '_' + aparcname + '.nii')
if isdefined(self.inputs.out_dict_file):
dict_file = op.abspath(self.inputs.out_dict_file)
else:
dict_file = op.abspath(prefix + '_' + aparcname + '.pck')
if write_dict:
iflogger.info('Lookup table: {name}'.format(name=op.abspath(self.LUT_file)))
LUTlabelsRGBA = np.loadtxt(self.LUT_file, skiprows=4, usecols=[0, 1, 2, 3, 4, 5], comments='#',
dtype={'names': ('index', 'label', 'R', 'G', 'B', 'A'), 'formats': ('int', '|S30', 'int', 'int', 'int', 'int')})
numLUTLabels = np.size(LUTlabelsRGBA)
if numLUTLabels < numDataLabels:
iflogger.error('LUT file provided does not contain all of the regions in the image')
iflogger.error('Removing unmapped regions')
iflogger.info('Number of labels in LUT: {n}'.format(n=numLUTLabels))
LUTlabelDict = {}
""" Create dictionary for input LUT table"""
for labels in range(0, numLUTLabels):
LUTlabelDict[LUTlabelsRGBA[labels][0]] = [LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], LUTlabelsRGBA[labels][5]]
iflogger.info('Printing LUT label dictionary')
iflogger.info(LUTlabelDict)
mapDict = {}
MAPPING = [[1, 2012], [2, 2019], [3, 2032], [4, 2014], [5, 2020], [6, 2018], [7, 2027], [8, 2028], [9, 2003], [10, 2024], [11, 2017], [12, 2026],
[13, 2002], [14, 2023], [15, 2010], [16, 2022], [17, 2031], [18, 2029], [19, 2008], [20, 2025], [21, 2005], [22, 2021], [23, 2011],
[24, 2013], [25, 2007], [26, 2016], [27, 2006], [28, 2033], [29, 2009], [30, 2015], [31, 2001], [32, 2030], [33, 2034], [34, 2035],
[35, 49], [36, 50], [37, 51], [38, 52], [39, 58], [40, 53], [41, 54], [42, 1012], [43, 1019], [44, 1032], [45, 1014], [46, 1020], [47, 1018],
[48, 1027], [49, 1028], [50, 1003], [51, 1024], [52, 1017], [53, 1026], [54, 1002], [55, 1023], [56, 1010], [57, 1022], [58, 1031],
[59, 1029], [60, 1008], [61, 1025], [62, 1005], [63, 1021], [64, 1011], [65, 1013], [66, 1007], [67, 1016], [68, 1006], [69, 1033],
[70, 1009], [71, 1015], [72, 1001], [73, 1030], [74, 1034], [75, 1035], [76, 10], [77, 11], [78, 12], [79, 13], [80, 26], [81, 17],
[82, 18], [83, 16]]
""" Create empty grey matter mask, Populate with only those regions defined in the mapping."""
niiGM = np.zeros(niiAPARCdata.shape, dtype=np.uint)
for ma in MAPPING:
niiGM[niiAPARCdata == ma[1]] = ma[0]
mapDict[ma[0]] = ma[1]
iflogger.info('Grey matter mask created')
greyMaskLabels = np.unique(niiGM)
numGMLabels = np.size(greyMaskLabels)
iflogger.info('Number of grey matter labels: {num}'.format(num=numGMLabels))
labelDict = {}
GMlabelDict = {}
for label in greyMaskLabels:
try:
mapDict[label]
if write_dict:
GMlabelDict['originalID'] = mapDict[label]
except:
iflogger.info('Label {lbl} not in provided mapping'.format(lbl=label))
if write_dict:
del GMlabelDict
GMlabelDict = {}
GMlabelDict['labels'] = LUTlabelDict[label][0]
GMlabelDict['colors'] = [LUTlabelDict[label][1], LUTlabelDict[label][2], LUTlabelDict[label][3]]
GMlabelDict['a'] = LUTlabelDict[label][4]
labelDict[label] = GMlabelDict
roi_image = nb.Nifti1Image(niiGM, niiAPARCimg.affine,
niiAPARCimg.header)
iflogger.info('Saving ROI File to {path}'.format(path=roi_file))
nb.save(roi_image, roi_file)
if write_dict:
iflogger.info('Saving Dictionary File to {path} in Pickle format'.format(path=dict_file))
file = open(dict_file, 'w')
pickle.dump(labelDict, file)
file.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_roi_file):
outputs['roi_file'] = op.abspath(self.inputs.out_roi_file)
else:
outputs['roi_file'] = op.abspath(self._gen_outfilename('nii'))
if isdefined(self.inputs.out_dict_file):
outputs['dict_file'] = op.abspath(self.inputs.out_dict_file)
else:
outputs['dict_file'] = op.abspath(self._gen_outfilename('pck'))
return outputs
def _gen_outfilename(self, ext):
_, name, _ = split_filename(self.inputs.aparc_aseg_file)
if self.inputs.use_freesurfer_LUT:
prefix = 'fsLUT'
elif not self.inputs.use_freesurfer_LUT and isdefined(self.inputs.LUT_file):
lutpath, lutname, lutext = split_filename(self.inputs.LUT_file)
prefix = lutname
else:
prefix = 'hardcoded'
return prefix + '_' + name + '.' + ext
def create_nodes(roi_file, resolution_network_file, out_filename):
G = nx.Graph()
gp = nx.read_graphml(resolution_network_file)
roi_image = nb.load(roi_file)
roiData = roi_image.get_data()
nROIs = len(gp.nodes())
for u, d in gp.nodes_iter(data=True):
G.add_node(int(u), d)
xyz = tuple(np.mean(np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), axis=1))
G.node[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]])
nx.write_gpickle(G, out_filename)
return out_filename
class CreateNodesInputSpec(BaseInterfaceInputSpec):
roi_file = File(exists=True, mandatory=True, desc='Region of interest file')
resolution_network_file = File(exists=True, mandatory=True, desc='Parcellation file from Connectome Mapping Toolkit')
out_filename = File('nodenetwork.pck', usedefault=True, desc='Output gpickled network with the nodes defined.')
class CreateNodesOutputSpec(TraitedSpec):
node_network = File(desc='Output gpickled network with the nodes defined.')
class CreateNodes(BaseInterface):
"""
Generates a NetworkX graph containing nodes at the centroid of each region in the input ROI file.
Node data is added from the resolution network file.
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> mknode = cmtk.CreateNodes()
>>> mknode.inputs.roi_file = 'ROI_scale500.nii.gz'
>>> mknode.run() # doctest: +SKIP
"""
input_spec = CreateNodesInputSpec
output_spec = CreateNodesOutputSpec
def _run_interface(self, runtime):
iflogger.info('Creating nodes...')
create_nodes(self.inputs.roi_file, self.inputs.resolution_network_file, self.inputs.out_filename)
iflogger.info('Saving node network to {path}'.format(path=op.abspath(self.inputs.out_filename)))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['node_network'] = op.abspath(self.inputs.out_filename)
return outputs
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from pecan import abort
from pecan.decorators import expose
from pecan import response
from pecan import rest
from pecan.secure import secure
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from storyboard._i18n import _
from storyboard.api.auth import authorization_checks as checks
from storyboard.api.v1 import validations
from storyboard.api.v1 import wmodels
from storyboard.common import decorators
from storyboard.common import exception as exc
from storyboard.db.api import base as api_base
from storyboard.db.api import teams as teams_api
from storyboard.db.api import users as users_api
CONF = cfg.CONF
class UsersSubcontroller(rest.RestController):
"""This controller should be used to list, add or remove users from a Team.
"""
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.User], int)
def get(self, team_id):
"""Get users inside a team.
Example::
curl https://my.example.org/api/v1/teams/1/users
:param team_id: An ID of the team.
"""
team = teams_api.team_get(team_id)
if not team:
raise exc.NotFound(_("Team %s not found") % team_id)
users = [api_base._filter_non_public_fields(user, user._public_fields)
for user in team.users]
return [wmodels.User.from_db_model(user) for user in users]
@decorators.db_exceptions
@secure(checks.superuser)
@wsme_pecan.wsexpose(wmodels.User, int, int)
def put(self, team_id, user_id):
"""Add a user to a team.
Example::
TODO
:param team_id: An ID of the team.
:param user_id: An ID of the user.
"""
teams_api.team_add_user(team_id, user_id)
user = users_api.user_get(user_id)
user = api_base._filter_non_public_fields(user, user._public_fields)
return wmodels.User.from_db_model(user)
@decorators.db_exceptions
@secure(checks.superuser)
@wsme_pecan.wsexpose(None, int, int, status_code=204)
def delete(self, team_id, user_id):
"""Delete a user from a team.
Example::
TODO
:param team_id: An ID of the team.
:param user_id: An ID of the user.
"""
teams_api.team_delete_user(team_id, user_id)
class TeamsController(rest.RestController):
"""REST controller for Teams."""
validation_post_schema = validations.TEAMS_POST_SCHEMA
validation_put_schema = validations.TEAMS_PUT_SCHEMA
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose(wmodels.Team, int)
def get_one_by_id(self, team_id):
"""Retrieve information about the given team.
:param team_id: Team ID.
"""
team = teams_api.team_get(team_id)
if team:
return wmodels.Team.from_db_model(team)
else:
raise exc.NotFound(_("Team %s not found") % team_id)
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose(wmodels.Team, wtypes.text)
def get_one_by_name(self, team_name):
"""Retrieve information about the given team.
:param team_name: Team name.
"""
team = teams_api.team_get_by_name(team_name)
if team:
return wmodels.Team.from_db_model(team)
else:
raise exc.NotFound(_("Team %s not found") % team_name)
@decorators.db_exceptions
@secure(checks.guest)
@wsme_pecan.wsexpose([wmodels.Team], int, int, int, wtypes.text,
wtypes.text, wtypes.text, wtypes.text)
def get(self, marker=None, offset=None, limit=None, name=None,
description=None, sort_field='id', sort_dir='asc'):
"""Retrieve a list of teams.
Example::
curl https://my.example.org/api/v1/teams
:param offset: The offset at which to start the page.
:param marker: The resource id where the page should begin.
:param limit: The number of teams to retrieve.
:param name: A string to filter the name by.
:param description: A string to filter the description by.
:param sort_field: The name of the field to sort on.
:param sort_dir: Sort direction for results (asc, desc).
"""
# Boundary check on limit.
if limit is not None:
limit = max(0, limit)
# Resolve the marker record.
marker_team = teams_api.team_get(marker)
teams = teams_api.team_get_all(marker=marker_team,
offset=offset,
limit=limit,
name=name,
description=description,
sort_field=sort_field,
sort_dir=sort_dir)
team_count = teams_api.team_get_count(name=name,
description=description)
# Apply the query response headers.
if limit:
response.headers['X-Limit'] = str(limit)
response.headers['X-Total'] = str(team_count)
if marker_team:
response.headers['X-Marker'] = str(marker_team.id)
if offset is not None:
response.headers['X-Offset'] = str(offset)
return [wmodels.Team.from_db_model(t) for t in teams]
@decorators.db_exceptions
@secure(checks.superuser)
@wsme_pecan.wsexpose(wmodels.Team, body=wmodels.Team)
def post(self, team):
"""Create a new team.
Example::
TODO
:param team: a team within the request body.
"""
result = teams_api.team_create(team.as_dict())
return wmodels.Team.from_db_model(result)
@decorators.db_exceptions
@secure(checks.superuser)
@wsme_pecan.wsexpose(wmodels.Team, int, body=wmodels.Team)
def put(self, team_id, team):
"""Modify this team.
Example::
TODO
:param team_id: An ID of the team.
:param team: A team within the request body.
"""
result = teams_api.team_update(team_id,
team.as_dict(omit_unset=True))
if result:
return wmodels.Team.from_db_model(result)
else:
raise exc.NotFound(_("Team %s not found") % team_id)
users = UsersSubcontroller()
def _is_int(self, s):
try:
int(s)
return True
except ValueError:
return False
@expose()
def _route(self, args, request):
if request.method == 'GET' and len(args) > 0:
# It's a request by a name or id
first_token = args[0]
if self._is_int(first_token):
if len(args) > 1 and args[1] == "users":
# Route to users subcontroller
return super(TeamsController, self)._route(args, request)
# Get by id
return self.get_one_by_id, args
else:
# Get by name
return self.get_one_by_name, ["/".join(args)]
# Use default routing for all other requests
return super(TeamsController, self)._route(args, request)
@decorators.db_exceptions
@secure(checks.superuser)
@wsme_pecan.wsexpose(None, int, status_code=204)
def delete(self, team_id):
"""Delete this team.
Example::
TODO
:param team_id: An ID of the team.
"""
try:
teams_api.team_delete(team_id)
except exc.NotFound as not_found_exc:
abort(404, not_found_exc.message)
except exc.NotEmpty as not_empty_exc:
abort(400, not_empty_exc.message)
|
|
# Copyright (C) 2013-2015 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DXJob Handler
+++++++++++++
Jobs are DNAnexus entities that capture an instantiation of a running
app or applet. They can be created from either
:func:`dxpy.bindings.dxapplet.DXApplet.run` or
:func:`dxpy.bindings.dxapp.DXApp.run` if running an applet or app, or
via :func:`new_dxjob` or :func:`DXJob.new` in the case of an existing
job creating a subjob.
"""
from __future__ import (print_function, unicode_literals)
import os, time
import dxpy
from . import DXObject, DXDataObject, DXJobFailureError, verify_string_dxid
from ..exceptions import DXError
from ..utils.local_exec_utils import queue_entry_point
#########
# DXJob #
#########
def new_dxjob(fn_input, fn_name, name=None, tags=None, properties=None, details=None,
instance_type=None, depends_on=None,
**kwargs):
'''
:param fn_input: Function input
:type fn_input: dict
:param fn_name: Name of the function to be called
:type fn_name: string
:param name: Name for the new job (default is "<parent job name>:<fn_name>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
:rtype: :class:`~dxpy.bindings.dxjob.DXJob`
Creates and enqueues a new job that will execute a particular
function (from the same app or applet as the one the current job is
running). Returns the :class:`~dxpy.bindings.dxjob.DXJob` handle for
the job.
Note that this function is shorthand for::
dxjob = DXJob()
dxjob.new(fn_input, fn_name, **kwargs)
.. note:: This method is intended for calls made from within
already-executing jobs or apps. If it is called from outside of
an Execution Environment, an exception will be thrown. To create
new jobs from outside the Execution Environment, use
:func:`dxpy.bindings.dxapplet.DXApplet.run` or
:func:`dxpy.bindings.dxapp.DXApp.run`.
.. note:: If the environment variable ``DX_JOB_ID`` is not set, this method assmes that it is running within the debug harness, executes the job in place, and provides a debug job handler object that does not have a corresponding remote API job object.
'''
dxjob = DXJob()
dxjob.new(fn_input, fn_name, name=name, tags=tags, properties=properties,
details=details, instance_type=instance_type, depends_on=depends_on, **kwargs)
return dxjob
class DXJob(DXObject):
'''
Remote job object handler.
'''
_class = "job"
def __init__(self, dxid=None):
self._test_harness_result = None
DXObject.__init__(self, dxid=dxid)
self.set_id(dxid)
def new(self, fn_input, fn_name, name=None, tags=None, properties=None, details=None,
instance_type=None, depends_on=None,
**kwargs):
'''
:param fn_input: Function input
:type fn_input: dict
:param fn_name: Name of the function to be called
:type fn_name: string
:param name: Name for the new job (default is "<parent job name>:<fn_name>")
:type name: string
:param tags: Tags to associate with the job
:type tags: list of strings
:param properties: Properties to associate with the job
:type properties: dict with string values
:param details: Details to set for the job
:type details: dict or list
:param instance_type: Instance type on which the job will be run, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID
:type depends_on: list
Creates and enqueues a new job that will execute a particular
function (from the same app or applet as the one the current job
is running).
.. note:: This method is intended for calls made from within
already-executing jobs or apps. If it is called from outside
of an Execution Environment, an exception will be thrown. To
create new jobs from outside the Execution Environment, use
:func:`dxpy.bindings.dxapplet.DXApplet.run` or
:func:`dxpy.bindings.dxapp.DXApp.run`.
'''
final_depends_on = []
if depends_on is not None:
if isinstance(depends_on, list):
for item in depends_on:
if isinstance(item, DXJob) or isinstance(item, DXDataObject):
if item.get_id() is None:
raise DXError('A dxpy handler given in depends_on does not have an ID set')
final_depends_on.append(item.get_id())
elif isinstance(item, basestring):
final_depends_on.append(item)
else:
raise DXError('Expected elements of depends_on to only be either instances of DXJob or DXDataObject, or strings')
else:
raise DXError('Expected depends_on field to be a list')
if 'DX_JOB_ID' in os.environ:
req_input = {}
req_input["input"] = fn_input
req_input["function"] = fn_name
if name is not None:
req_input["name"] = name
if tags is not None:
req_input["tags"] = tags
if properties is not None:
req_input["properties"] = properties
if instance_type is not None:
if isinstance(instance_type, basestring):
req_input["systemRequirements"] = {fn_name: {"instanceType": instance_type}}
elif isinstance(instance_type, dict):
req_input["systemRequirements"] = {stage: {"instanceType": stage_inst} for stage, stage_inst in instance_type.items()}
else:
raise DXError('Expected instance_type field to be either a string or a dict')
if depends_on is not None:
req_input["dependsOn"] = final_depends_on
if details is not None:
req_input["details"] = details
resp = dxpy.api.job_new(req_input, **kwargs)
self.set_id(resp["id"])
else:
self.set_id(queue_entry_point(function=fn_name, input_hash=fn_input,
depends_on=final_depends_on,
name=name))
def set_id(self, dxid):
'''
:param dxid: New job ID to be associated with the handler (localjob IDs also accepted for local runs)
:type dxid: string
Discards the currently stored ID and associates the handler with *dxid*
'''
if dxid is not None:
if not (isinstance(dxid, basestring) and dxid.startswith('localjob-')):
# localjob IDs (which do not follow the usual ID
# syntax) should be allowed; otherwise, follow the
# usual syntax checking
verify_string_dxid(dxid, self._class)
self._dxid = dxid
def describe(self, fields=None, io=None, **kwargs):
"""
:param fields: dict where the keys are field names that should
be returned, and values should be set to True (by default,
all fields are returned)
:type fields: dict
:param io: Include input and output fields in description;
cannot be provided with *fields*; default is True if
*fields* is not provided (deprecated)
:type io: bool
:returns: Description of the job
:rtype: dict
Returns a hash with key-value pairs containing information about
the job, including its state and (optionally) its inputs and
outputs, as described in the API documentation for the
`/job-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method:-/job-xxxx/describe>`_
method.
"""
if fields is not None and io is not None:
raise DXError('DXJob.describe: cannot provide non-None values for both fields and io')
describe_input = {}
if fields is not None:
describe_input['fields'] = fields
if io is not None:
describe_input['io'] = io
self._desc = dxpy.api.job_describe(self._dxid, describe_input, **kwargs)
return self._desc
def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the job
:type tags: list of strings
Adds each of the specified tags to the job. Takes no
action for tags that are already listed for the job.
"""
dxpy.api.job_add_tags(self._dxid, {"tags": tags}, **kwargs)
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the job
:type tags: list of strings
Removes each of the specified tags from the job. Takes
no action for tags that the job does not currently have.
"""
dxpy.api.job_remove_tags(self._dxid, {"tags": tags}, **kwargs)
def set_properties(self, properties, **kwargs):
"""
:param properties: Property names and values given as key-value pairs of strings
:type properties: dict
Given key-value pairs in *properties* for property names and
values, the properties are set on the job for the given
property names. Any property with a value of :const:`None`
indicates the property will be deleted.
.. note:: Any existing properties not mentioned in *properties*
are not modified by this method.
"""
dxpy.api.job_set_properties(self._dxid, {"properties": properties}, **kwargs)
def wait_on_done(self, interval=2, timeout=3600*24*7, **kwargs):
'''
:param interval: Number of seconds between queries to the job's state
:type interval: integer
:param timeout: Maximum amount of time to wait, in seconds, until the job is done running
:type timeout: integer
:raises: :exc:`~dxpy.exceptions.DXError` if the timeout is reached before the job has finished running, or :exc:`dxpy.exceptions.DXJobFailureError` if the job fails
Waits until the job has finished running.
'''
elapsed = 0
while True:
state = self._get_state(**kwargs)
if state == "done":
break
if state == "failed":
desc = self.describe(**kwargs)
err_msg = "Job has failed because of {failureReason}: {failureMessage}".format(**desc)
if desc.get("failureFrom") != None and desc["failureFrom"]["id"] != desc["id"]:
err_msg += " (failure from {id})".format(id=desc['failureFrom']['id'])
raise DXJobFailureError(err_msg)
if state == "terminated":
raise DXJobFailureError("Job was terminated.")
if elapsed >= timeout or elapsed < 0:
raise DXJobFailureError("Reached timeout while waiting for the job to finish")
time.sleep(interval)
elapsed += interval
def terminate(self, **kwargs):
'''
Terminates the associated job.
'''
dxpy.api.job_terminate(self._dxid, **kwargs)
def get_output_ref(self, field, index=None, metadata=None):
'''
:param field: Output field name of this job
:type field: string
:param index: If the referenced field is an array, optionally specify an index (starting from 0) to indicate a particular member of the array
:type index: int
:param metadata: If the referenced field is of a data object class, a string indicating the metadata that should be read, e.g. "name", "properties.propkey", "details.refgenome"
:type metadata: string
Returns a dict containing a valid job-based object reference
to refer to an output of this job. This can be used directly
in place of a DNAnexus link when used as a job output value.
For example, after creating a subjob, the following app
snippet uses a reference to the new job's output as part of
its own output::
mysubjob = dxpy.new_dxjob({}, "my_function")
return { "myfileoutput": mysubjob.get_output_ref("output_field_name"),
"myotherfileoutput": mysubjob.get_output_ref("output_array",
index=1),
"filename": mysubjob.get_output_ref("output_field_name",
metadata="name") }
'''
link = {"$dnanexus_link": {"job": self._dxid, "field": field}}
if index is not None:
link["$dnanexus_link"]["index"] = index
if metadata is not None:
link["$dnanexus_link"]["metadata"] = metadata
return link
def _get_state(self, **kwargs):
'''
:returns: State of the remote object
:rtype: string
Queries the API server for the job's state.
Note that this function is shorthand for:
dxjob.describe(io=False, **kwargs)["state"]
'''
return self.describe(fields=dict(state=True), **kwargs)["state"]
|
|
from types import SimpleNamespace
from typing import Dict
from unittest.mock import MagicMock, patch
from django.http import HttpRequest
from django.http.response import HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.actions import do_rename_stream
from zerver.lib.exceptions import InvalidJSONError, JsonableError
from zerver.lib.send_email import FromAddress
from zerver.lib.test_classes import WebhookTestCase, ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.users import get_api_key
from zerver.lib.webhooks.common import (
INVALID_JSON_MESSAGE,
MISSING_EVENT_HEADER_MESSAGE,
MissingHTTPEventHeader,
get_fixture_http_headers,
standardize_headers,
validate_extract_webhook_http_header,
)
from zerver.models import UserProfile, get_realm, get_user
class WebhooksCommonTestCase(ZulipTestCase):
def test_webhook_http_header_header_exists(self) -> None:
webhook_bot = get_user("[email protected]", get_realm("zulip"))
request = HostRequestMock()
request.META["HTTP_X_CUSTOM_HEADER"] = "custom_value"
request.user = webhook_bot
header_value = validate_extract_webhook_http_header(
request, "X_CUSTOM_HEADER", "test_webhook"
)
self.assertEqual(header_value, "custom_value")
def test_webhook_http_header_header_does_not_exist(self) -> None:
realm = get_realm("zulip")
webhook_bot = get_user("[email protected]", realm)
webhook_bot.last_reminder = None
notification_bot = self.notification_bot(realm)
request = HostRequestMock()
request.user = webhook_bot
request.path = "some/random/path"
exception_msg = "Missing the HTTP event header 'X_CUSTOM_HEADER'"
with self.assertRaisesRegex(MissingHTTPEventHeader, exception_msg):
validate_extract_webhook_http_header(request, "X_CUSTOM_HEADER", "test_webhook")
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path=request.path,
header_name="X_CUSTOM_HEADER",
integration_name="test_webhook",
support_email=FromAddress.SUPPORT,
).rstrip()
self.assertEqual(msg.sender.id, notification_bot.id)
self.assertEqual(msg.content, expected_message)
def test_notify_bot_owner_on_invalid_json(self) -> None:
@webhook_view("ClientName", notify_bot_owner_on_invalid_json=False)
def my_webhook_no_notify(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
raise InvalidJSONError("Malformed JSON")
@webhook_view("ClientName", notify_bot_owner_on_invalid_json=True)
def my_webhook_notify(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
raise InvalidJSONError("Malformed JSON")
webhook_bot_email = "[email protected]"
webhook_bot_realm = get_realm("zulip")
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = get_api_key(webhook_bot)
request = HostRequestMock()
request.POST["api_key"] = webhook_bot_api_key
request.host = "zulip.testserver"
expected_msg = INVALID_JSON_MESSAGE.format(webhook_name="ClientName")
last_message_id = self.get_last_message().id
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_no_notify(request)
# First verify that without the setting, it doesn't send a PM to bot owner.
msg = self.get_last_message()
self.assertEqual(msg.id, last_message_id)
self.assertNotEqual(msg.content, expected_msg.strip())
# Then verify that with the setting, it does send such a message.
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_notify(request)
msg = self.get_last_message()
self.assertNotEqual(msg.id, last_message_id)
self.assertEqual(msg.sender.id, self.notification_bot(webhook_bot_realm).id)
self.assertEqual(msg.content, expected_msg.strip())
@patch("zerver.lib.webhooks.common.importlib.import_module")
def test_get_fixture_http_headers_for_success(self, import_module_mock: MagicMock) -> None:
def fixture_to_headers(fixture_name: str) -> Dict[str, str]:
# A sample function which would normally perform some
# extra operations before returning a dictionary
# corresponding to the fixture name passed. For this test,
# we just return a fixed dictionary.
return {"key": "value"}
fake_module = SimpleNamespace(fixture_to_headers=fixture_to_headers)
import_module_mock.return_value = fake_module
headers = get_fixture_http_headers("some_integration", "complex_fixture")
self.assertEqual(headers, {"key": "value"})
def test_get_fixture_http_headers_for_non_existant_integration(self) -> None:
headers = get_fixture_http_headers("some_random_nonexistant_integration", "fixture_name")
self.assertEqual(headers, {})
@patch("zerver.lib.webhooks.common.importlib.import_module")
def test_get_fixture_http_headers_with_no_fixtures_to_headers_function(
self,
import_module_mock: MagicMock,
) -> None:
fake_module = SimpleNamespace()
import_module_mock.return_value = fake_module
self.assertEqual(
get_fixture_http_headers("some_integration", "simple_fixture"),
{},
)
def test_standardize_headers(self) -> None:
self.assertEqual(standardize_headers({}), {})
raw_headers = {"Content-Type": "text/plain", "X-Event-Type": "ping"}
djangoified_headers = standardize_headers(raw_headers)
expected_djangoified_headers = {"CONTENT_TYPE": "text/plain", "HTTP_X_EVENT_TYPE": "ping"}
self.assertEqual(djangoified_headers, expected_djangoified_headers)
class WebhookURLConfigurationTestCase(WebhookTestCase):
STREAM_NAME = "helloworld"
WEBHOOK_DIR_NAME = "helloworld"
URL_TEMPLATE = "/api/v1/external/helloworld?stream={stream}&api_key={api_key}"
def setUp(self) -> None:
super().setUp()
stream = self.subscribe(self.test_user, self.STREAM_NAME)
# In actual webhook tests, we will not need to use stream id.
# We assign stream id to STREAM_NAME for testing URL configuration only.
self.STREAM_NAME = str(stream.id)
do_rename_stream(stream, "helloworld_renamed", self.test_user)
self.url = self.build_webhook_url()
def test_trigger_stream_message_by_id(self) -> None:
# check_webhook cannot be used here as it
# subscribes the test user to self.STREAM_NAME
payload = self.get_body("hello")
self.send_webhook_payload(
self.test_user, self.url, payload, content_type="application/json"
)
expected_topic = "Hello World"
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Marilyn Monroe](https://en.wikipedia.org/wiki/Marilyn_Monroe)**"
msg = self.get_last_message()
self.assert_stream_message(
message=msg,
stream_name="helloworld_renamed",
topic_name=expected_topic,
content=expected_message,
)
class MissingEventHeaderTestCase(WebhookTestCase):
STREAM_NAME = "groove"
URL_TEMPLATE = "/api/v1/external/groove?stream={stream}&api_key={api_key}"
# This tests the validate_extract_webhook_http_header function with
# an actual webhook, instead of just making a mock
def test_missing_event_header(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(
self.url,
self.get_body("ticket_state_changed"),
content_type="application/x-www-form-urlencoded",
)
self.assert_json_error(result, "Missing the HTTP event header 'X_GROOVE_EVENT'")
realm = get_realm("zulip")
webhook_bot = get_user("[email protected]", realm)
webhook_bot.last_reminder = None
notification_bot = self.notification_bot(realm)
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path="/api/v1/external/groove",
header_name="X_GROOVE_EVENT",
integration_name="Groove",
support_email=FromAddress.SUPPORT,
).rstrip()
if msg.sender.id != notification_bot.id: # nocoverage
# This block seems to fire occasionally; debug output:
print(msg)
print(msg.content)
self.assertEqual(msg.sender.id, notification_bot.id)
self.assertEqual(msg.content, expected_message)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("groove", fixture_name, file_type="json")
|
|
#!/usr/bin/python
import datetime
import math
import os
import shutil
import sys
import time
import gpxpy
import numpy as np
try:
import pyexiv2
from pyexiv2.utils import make_fraction
except ImportError as e:
# pyexiv2 is not available in python 3. We catch the error
# so that py.test can load this module anyway.
# TODO(pau): find an alternative package. Probably py3exiv2.
print("ERROR: pyexiv2 module not available")
from opensfm import geo
'''
(source: https://github.com/mapillary/mapillary_tools)
Script for geotagging images using a gpx file from an external GPS.
Intended as a lightweight tool.
!!! This version needs testing, please report issues.!!!
Uses the capture time in EXIF and looks up an interpolated lat, lon, bearing
for each image, and writes the values to the EXIF of the image.
You can supply a time offset in seconds if the GPS clock and camera clocks are not in sync.
Requires gpxpy, e.g. 'pip install gpxpy'
Requires pyexiv2, see install instructions at http://tilloy.net/dev/pyexiv2/
(or use your favorite installer, e.g. 'brew install pyexiv2').
'''
def utc_to_localtime(utc_time):
utc_offset_timedelta = datetime.datetime.utcnow() - datetime.datetime.now()
return utc_time - utc_offset_timedelta
def get_lat_lon_time(gpx_file, gpx_time='utc'):
'''
Read location and time stamps from a track in a GPX file.
Returns a list of tuples (time, lat, lon, elevation).
GPX stores time in UTC, assume your camera used the local
timezone and convert accordingly.
'''
with open(gpx_file, 'r') as f:
gpx = gpxpy.parse(f)
points = []
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
ptime = utc_to_localtime(point.time) if gpx_time=='utc' else point.time
points.append( (ptime, point.latitude, point.longitude, point.elevation) )
# sort by time just in case
points.sort()
return points
def compute_bearing(start_lat, start_lon, end_lat, end_lon):
'''
Get the compass bearing from start to end.
Formula from
http://www.movable-type.co.uk/scripts/latlong.html
'''
# make sure everything is in radians
start_lat = math.radians(start_lat)
start_lon = math.radians(start_lon)
end_lat = math.radians(end_lat)
end_lon = math.radians(end_lon)
dLong = end_lon - start_lon
dPhi = math.log(math.tan(end_lat/2.0+math.pi/4.0)/math.tan(start_lat/2.0+math.pi/4.0))
if abs(dLong) > math.pi:
if dLong > 0.0:
dLong = -(2.0 * math.pi - dLong)
else:
dLong = (2.0 * math.pi + dLong)
y = math.sin(dLong)*math.cos(end_lat)
x = math.cos(start_lat)*math.sin(end_lat) - math.sin(start_lat)*math.cos(end_lat)*math.cos(dLong)
bearing = (math.degrees(math.atan2(y, x)) + 360.0) % 360.0
return bearing
def interpolate_lat_lon(points, t):
'''
Return interpolated lat, lon and compass bearing for time t.
Points is a list of tuples (time, lat, lon, elevation), t a datetime object.
'''
# find the enclosing points in sorted list
if (t<points[0][0]) or (t>=points[-1][0]):
raise ValueError("Time t not in scope of gpx file.")
for i,point in enumerate(points):
if t<point[0]:
if i>0:
before = points[i-1]
else:
before = points[i]
after = points[i]
break
# time diff
dt_before = (t-before[0]).total_seconds()
dt_after = (after[0]-t).total_seconds()
# simple linear interpolation
lat = (before[1]*dt_after + after[1]*dt_before) / (dt_before + dt_after)
lon = (before[2]*dt_after + after[2]*dt_before) / (dt_before + dt_after)
bearing = compute_bearing(before[1], before[2], after[1], after[2])
if before[3] is not None:
ele = (before[3]*dt_after + after[3]*dt_before) / (dt_before + dt_after)
else:
ele = None
return lat, lon, bearing, ele
def to_deg(value, loc):
'''
Convert decimal position to degrees.
'''
if value < 0:
loc_value = loc[0]
elif value > 0:
loc_value = loc[1]
else:
loc_value = ""
abs_value = abs(value)
deg = int(abs_value)
t1 = (abs_value-deg)*60
mint = int(t1)
sec = round((t1 - mint)* 60, 6)
return (deg, mint, sec, loc_value)
def gpx_lerp(alpha, a, b):
'''Interpolate gpx point as (1 - alpha) * a + alpha * b
'''
dt = alpha * (b[0] - a[0]).total_seconds()
t = a[0] + datetime.timedelta(seconds=dt)
lat = (1 - alpha) * a[1] + alpha * b[1]
lon = (1 - alpha) * a[2] + alpha * b[2]
alt = (1 - alpha) * a[3] + alpha * b[3]
return t, lat, lon, alt
def segment_sphere_intersection(A, B, C, r):
'''Intersect the segment AB and the sphere (C,r).
Assumes A is inside the sphere and B is outside.
Return the ratio between the length of AI and the length
of AB, where I is the intersection.
'''
AB = np.array(B) - np.array(A)
CA = np.array(A) - np.array(C)
a = AB.dot(AB)
b = 2 * AB.dot(CA)
c = CA.dot(CA) - r**2
d = max(0, b**2 - 4 * a * c)
return (-b + np.sqrt(d)) / (2 * a)
def space_next_point(a, b, last, dx):
A = geo.ecef_from_lla(a[1], a[2], 0.)
B = geo.ecef_from_lla(b[1], b[2], 0.)
C = geo.ecef_from_lla(last[1], last[2], 0.)
alpha = segment_sphere_intersection(A, B, C, dx)
return gpx_lerp(alpha, a, b)
def time_next_point(a, b, last, dt):
da = (a[0] - last[0]).total_seconds()
db = (b[0] - last[0]).total_seconds()
alpha = (dt - da) / (db - da)
return gpx_lerp(alpha, a, b)
def time_distance(a, b):
return (b[0] - a[0]).total_seconds()
def space_distance(a, b):
return geo.gps_distance(a[1:3], b[1:3])
def sample_gpx(points, dx, dt=None):
if dt is not None:
dx = float(dt)
print("Sampling GPX file every {0} seconds".format(dx))
distance = time_distance
next_point = time_next_point
else:
print("Sampling GPX file every {0} meters".format(dx))
distance = space_distance
next_point = space_next_point
key_points = [points[0]]
a = points[0]
for i in range(1, len(points)):
a, b = points[i - 1], points[i]
dx_b = distance(key_points[-1], b)
while dx and dx_b >= dx:
a = next_point(a, b, key_points[-1], dx)
key_points.append(a)
assert np.fabs(dx - distance(key_points[-2], key_points[-1])) < 0.1
dx_b = distance(key_points[-1], b)
print("{} points sampled".format(len(key_points)))
return key_points
def add_gps_to_exif(filename, lat, lon, bearing, elevation, updated_filename=None, remove_image_description=True):
'''
Given lat, lon, bearing, elevation, write to EXIF
'''
# TODO: use this within add_exif_using_timestamp
if updated_filename is not None:
shutil.copy2(filename, updated_filename)
filename = updated_filename
metadata = pyexiv2.ImageMetadata(filename)
metadata.read()
lat_deg = to_deg(lat, ["S", "N"])
lon_deg = to_deg(lon, ["W", "E"])
# convert decimal coordinates into degrees, minutes and seconds as fractions for EXIF
exiv_lat = (make_fraction(lat_deg[0],1), make_fraction(int(lat_deg[1]),1), make_fraction(int(lat_deg[2]*1000000),1000000))
exiv_lon = (make_fraction(lon_deg[0],1), make_fraction(int(lon_deg[1]),1), make_fraction(int(lon_deg[2]*1000000),1000000))
# convert direction into fraction
exiv_bearing = make_fraction(int(bearing*100),100)
# add to exif
metadata["Exif.GPSInfo.GPSLatitude"] = exiv_lat
metadata["Exif.GPSInfo.GPSLatitudeRef"] = lat_deg[3]
metadata["Exif.GPSInfo.GPSLongitude"] = exiv_lon
metadata["Exif.GPSInfo.GPSLongitudeRef"] = lon_deg[3]
metadata["Exif.Image.GPSTag"] = 654
metadata["Exif.GPSInfo.GPSMapDatum"] = "WGS-84"
metadata["Exif.GPSInfo.GPSVersionID"] = '2 0 0 0'
metadata["Exif.GPSInfo.GPSImgDirection"] = exiv_bearing
metadata["Exif.GPSInfo.GPSImgDirectionRef"] = "T"
if remove_image_description: metadata["Exif.Image.ImageDescription"] = []
if elevation is not None:
exiv_elevation = make_fraction(int(abs(elevation)*100),100)
metadata["Exif.GPSInfo.GPSAltitude"] = exiv_elevation
metadata["Exif.GPSInfo.GPSAltitudeRef"] = '0' if elevation >= 0 else '1'
metadata.write()
def add_exif_using_timestamp(filename, points, offset_time=0, timestamp=None, orientation=1, image_description=None):
'''
Find lat, lon and bearing of filename and write to EXIF.
'''
metadata = pyexiv2.ImageMetadata(filename)
metadata.read()
if timestamp:
metadata['Exif.Photo.DateTimeOriginal'] = timestamp
t = metadata['Exif.Photo.DateTimeOriginal'].value
# subtract offset in s beween gpx time and exif time
t = t - datetime.timedelta(seconds=offset_time)
try:
lat, lon, bearing, elevation = interpolate_lat_lon(points, t)
lat_deg = to_deg(lat, ["S", "N"])
lon_deg = to_deg(lon, ["W", "E"])
# convert decimal coordinates into degrees, minutes and seconds as fractions for EXIF
exiv_lat = (make_fraction(lat_deg[0],1), make_fraction(int(lat_deg[1]),1), make_fraction(int(lat_deg[2]*1000000),1000000))
exiv_lon = (make_fraction(lon_deg[0],1), make_fraction(int(lon_deg[1]),1), make_fraction(int(lon_deg[2]*1000000),1000000))
# convert direction into fraction
exiv_bearing = make_fraction(int(bearing*1000),1000)
# add to exif
metadata["Exif.GPSInfo.GPSLatitude"] = exiv_lat
metadata["Exif.GPSInfo.GPSLatitudeRef"] = lat_deg[3]
metadata["Exif.GPSInfo.GPSLongitude"] = exiv_lon
metadata["Exif.GPSInfo.GPSLongitudeRef"] = lon_deg[3]
metadata["Exif.Image.GPSTag"] = 654
metadata["Exif.GPSInfo.GPSMapDatum"] = "WGS-84"
metadata["Exif.GPSInfo.GPSVersionID"] = '2 0 0 0'
metadata["Exif.GPSInfo.GPSImgDirection"] = exiv_bearing
metadata["Exif.GPSInfo.GPSImgDirectionRef"] = "T"
metadata["Exif.Image.Orientation"] = orientation
if image_description is not None:
metadata["Exif.Image.ImageDescription"] = image_description
if elevation is not None:
exiv_elevation = make_fraction(int(abs(elevation)*100),100)
metadata["Exif.GPSInfo.GPSAltitude"] = exiv_elevation
metadata["Exif.GPSInfo.GPSAltitudeRef"] = '0' if elevation >= 0 else '1'
metadata.write()
print("Added geodata to: {0} ({1}, {2}, {3}), altitude {4}".format(filename, lat, lon, bearing, elevation))
except ValueError as e:
print("Skipping {0}: {1}".format(filename, e))
if __name__ == '__main__':
'''
Use from command line as: python geotag_from_gpx.py path gpx_file time_offset
The time_offset is optional and defaults to 0.
It is defined as 'exif time' - 'gpx time' in whole seconds,
so if your camera clock is ahead of the gpx clock by 2s,
then the offset is 2.
'''
if len(sys.argv) > 4:
print("Usage: python geotag_from_gpx.py path gpx_file time_offset")
raise IOError("Bad input parameters.")
path = sys.argv[1]
gpx_filename = sys.argv[2]
if len(sys.argv) == 4:
time_offset = int(sys.argv[3])
else:
time_offset = 0
if path.lower().endswith(".jpg"):
# single file
file_list = [path]
else:
# folder(s)
file_list = []
for root, sub_folders, files in os.walk(path):
file_list += [os.path.join(root, filename) for filename in files if filename.lower().endswith(".jpg")]
# start time
t = time.time()
# read gpx file to get track locations
gpx = get_lat_lon_time(gpx_filename)
print("===\nStarting geotagging of {0} images using {1}.\n===".format(len(file_list), gpx_filename))
for filepath in file_list:
add_exif_using_timestamp(filepath, gpx, time_offset)
print("Done geotagging {0} images in {1} seconds.".format(len(file_list), time.time()-t))
|
|
'''
Testing class for database API's archive related functions.
Authors: Ari Kairala, Petteri Ponsimaa
Originally adopted from Ivan's exercise 1 test class.
'''
import unittest, hashlib
import re, base64, copy, json, server
from database_api_test_common import BaseTestCase, db
from flask import json, jsonify
from exam_archive import ExamDatabaseErrorNotFound, ExamDatabaseErrorExists
from unittest import TestCase
from resources_common import COLLECTIONJSON, PROBLEMJSON, ARCHIVE_PROFILE, API_VERSION
class RestArchiveTestCase(BaseTestCase):
'''
RestArchiveTestCase contains archive related unit tests of the database API.
'''
# List of user credentials in exam_archive_data_dump.sql for testing purposes
super_user = "bigboss"
super_pw = hashlib.sha256("ultimatepw").hexdigest()
admin_user = "antti.admin"
admin_pw = hashlib.sha256("qwerty1234").hexdigest()
basic_user = "testuser"
basic_pw = hashlib.sha256("testuser").hexdigest()
wrong_pw = "wrong-pw"
test_archive_template_1 = {"template": {
"data": [{"name": "archiveId", "value": 4},
{"name": "name", "value": "Computer Science"},
{"name": "organisationName", "value": "OTiT"},
{"name": "identificationNeeded", "value": 1}]
}
}
test_archive_template_2 = {"template": {
"data": [{"name": "archiveId", "value": 4},
{"name": "name", "value": "Wireless Communication Engineering"},
{"name": "organisationName", "value": "OTiT"},
{"name": "identificationNeeded", "value": 0}]
}
}
archivelist_resource_url = '/exam_archive/api/archives/'
# Set a ready header for authorized admin user
header_auth = {'Authorization': 'Basic ' + base64.b64encode(super_user + ":" + super_pw)}
# Define a list of the sample contents of the database, so we can later compare it to the test results
@classmethod
def setUpClass(cls):
print "Testing ", cls.__name__
def test_user_not_authorized(self):
'''
Check that user in not able to get user list without authenticating.
'''
print '(' + self.test_user_not_authorized.__name__ + ')', \
self.test_user_not_authorized.__doc__
# Test ArchiveList/GET
rv = self.app.get(self.archivelist_resource_url)
assert rv.status_code == 401
assert PROBLEMJSON in rv.mimetype
# Try to get Archive list as super user with wrong password
rv = self.app.get(self.archivelist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.wrong_pw)})
assert rv.status_code == 401
assert PROBLEMJSON in rv.mimetype
def test_user_authorized(self):
'''
Check that authenticated user is able to get archive list.
'''
print '(' + self.test_user_authorized.__name__ + ')', \
self.test_user_authorized.__doc__
# Get Archive list as basic user
rv = self.app.get(self.archivelist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
assert rv.status_code == 200
assert COLLECTIONJSON in rv.mimetype
# User authorized as super user
rv = self.app.get(self.archivelist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.super_pw)})
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+ARCHIVE_PROFILE,rv.content_type)
def test_archive_get(self):
'''
Check data consistency of Archive/GET and ArchiveList/GET.
'''
print '(' + self.test_archive_get.__name__ + ')', \
self.test_archive_get.__doc__
# Test ArchiveList/GET
self._archive_get(self.archivelist_resource_url)
def _archive_get(self, resource_url):
'''
Check data consistency of ArchiveList/GET.
'''
# Get all the archives from database
archives = db.browse_archives()
# Get all the archives from API
rv = self.app.get(resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+ARCHIVE_PROFILE,rv.content_type)
input = json.loads(rv.data)
assert input
# Go through the data
data = input['collection']
items = data['items']
self.assertEquals(data['href'], resource_url)
self.assertEquals(data['version'], API_VERSION)
for item in items:
obj = self._create_dict(item['data'])
archive = db.get_archive(obj['archiveId'])
assert self._isIdentical(obj, archive)
def test_archive_post(self):
'''
Check that a new archive can be created.
'''
print '(' + self.test_archive_post.__name__ + ')', \
self.test_archive_post.__doc__
resource_url = self.archivelist_resource_url
new_archive = self.test_archive_template_1.copy()
# Test ArchiveList/POST
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_archive))
self.assertEquals(rv.status_code,201)
# Post returns the address of newly created resource URL in header, in 'location'. Get the identifier of
# the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*archives/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Fetch the item from database and set it to archive_id_db, and convert the filled post template data above to
# similar format by replacing the keys with post data attributes.
archive_in_db = db.get_archive(new_id)
archive_posted = self._convert(new_archive)
# Compare the data in database and the post template above.
self.assertDictContainsSubset(archive_posted, archive_in_db)
# Next, try to add the same archive twice - there should be conflict
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_archive))
self.assertEquals(rv.status_code,409)
# Next check that by posting invalid JSON data we get status code 415
invalid_json = "INVALID " + json.dumps(new_archive)
rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,415)
# Check that template structure is validated
invalid_json = json.dumps(new_archive['template'])
rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,400)
# Check for the missing required field by removing the third row in array (archive name)
invalid_template = copy.deepcopy(new_archive)
invalid_template['template']['data'].pop(1)
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(invalid_template))
self.assertEquals(rv.status_code,400)
# Lastly, delete the item
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
def test_archive_put(self):
'''
Check that an existing archive can be modified.
'''
print '(' + self.test_archive_put.__name__ + ')', \
self.test_archive_put.__doc__
resource_url = self.archivelist_resource_url
new_archive = self.test_archive_template_1
edited_archive = self.test_archive_template_2
# First create the archive
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_archive))
self.assertEquals(rv.status_code,201)
location = rv.location
self.assertIsNotNone(location)
# Then try to edit the archive
rv = self.app.put(location, headers=self.header_auth, data=json.dumps(edited_archive))
self.assertEquals(rv.status_code,200)
location = rv.location
self.assertIsNotNone(location)
# Put returns the address of newly created resource URL in header, in 'location'. Get the identifier of
# the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*archives/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Fetch the item from database and set it to archive_id_db, and convert the filled post template data above to
# similar format by replacing the keys with post data attributes.
archive_in_db = db.get_archive(new_id)
archive_posted = self._convert(edited_archive)
# Compare the data in database and the post template above.
self.assertDictContainsSubset(archive_posted, archive_in_db)
# Next check that by posting invalid JSON data we get status code 415
invalid_json = "INVALID " + json.dumps(new_archive)
rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,415)
# Check that template structure is validated
invalid_json = json.dumps(new_archive['template'])
rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,400)
# Lastly, we delete the archive
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
def test_archive_delete(self):
'''
Check that archive in not able to get archive list without authenticating.
'''
print '(' + self.test_archive_delete.__name__ + ')', \
self.test_archive_delete.__doc__
# First create the archive
resource_url = self.archivelist_resource_url
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(self.test_archive_template_2))
self.assertEquals(rv.status_code,201)
location = rv.location
self.assertIsNotNone(location)
# Get the identifier of the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*archives/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Then, we delete the archive
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
# Try to fetch the deleted archive from database - expect to fail
self.assertIsNone(db.get_archive(new_id))
def test_for_method_not_allowed(self):
'''
For inconsistency check for 405, method not allowed.
'''
print '(' + self.test_archive_get.__name__ + ')', \
self.test_archive_get.__doc__
# ArchiveList/PUT should not exist
rv = self.app.put(self.archivelist_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
# ArchiveList/DELETE should not exist
rv = self.app.delete(self.archivelist_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
def _isIdentical(self, api_item, db_item):
'''
Check whether template data corresponds to data stored in the database.
'''
return api_item['archiveId'] == db_item['archive_id'] and \
api_item['name'] == db_item['archive_name'] and \
api_item['organisationName'] == db_item['organisation_name'] and \
api_item['identificationNeeded'] == db_item['identification_needed']
def _convert(self, template_data):
'''
Convert template data to a dictionary representing the format the data is saved in the database.
'''
trans_table = {"name":"archive_name", "organisationName":"organisation_name", "archiveId":"archive_id", "dateModified": "date",
"modifierId":"modifier_id", "archiveId":"archive_id", "identificationNeeded":"identification_needed"}
data = self._create_dict(template_data['template']['data'])
db_item = {}
for key, val in data.items():
db_item[trans_table[key]] = val
return db_item
def _create_dict(self,item):
'''
Create a dictionary from template data for easier handling.
'''
dict = {}
for f in item:
dict[f['name']] = f['value']
return dict
# assert 'No entries here so far' in rv.data
if __name__ == '__main__':
print 'Start running tests'
unittest.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from cinderclient import client as cc
from cinderclient import exceptions
from keystoneclient import exceptions as ks_exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine.clients import client_plugin
from heat.engine import constraints
LOG = logging.getLogger(__name__)
CLIENT_NAME = 'cinder'
class CinderClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [VOLUME, VOLUME_V2] = ['volume', 'volumev2']
def get_volume_api_version(self):
'''Returns the most recent API version.'''
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
try:
self.url_for(service_type=self.VOLUME_V2,
endpoint_type=endpoint_type)
return 2
except ks_exceptions.EndpointNotFound:
try:
self.url_for(service_type=self.VOLUME,
endpoint_type=endpoint_type)
return 1
except ks_exceptions.EndpointNotFound:
return None
def _create(self):
con = self.context
volume_api_version = self.get_volume_api_version()
if volume_api_version == 1:
service_type = self.VOLUME
client_version = '1'
elif volume_api_version == 2:
service_type = self.VOLUME_V2
client_version = '2'
else:
raise exception.Error(_('No volume service available.'))
LOG.info(_LI('Creating Cinder client with volume API version %d.'),
volume_api_version)
endpoint_type = self._get_client_option(CLIENT_NAME, 'endpoint_type')
args = {
'service_type': service_type,
'auth_url': con.auth_url or '',
'project_id': con.tenant_id,
'username': None,
'api_key': None,
'endpoint_type': endpoint_type,
'http_log_debug': self._get_client_option(CLIENT_NAME,
'http_log_debug'),
'cacert': self._get_client_option(CLIENT_NAME, 'ca_file'),
'insecure': self._get_client_option(CLIENT_NAME, 'insecure')
}
client = cc.Client(client_version, **args)
management_url = self.url_for(service_type=service_type,
endpoint_type=endpoint_type)
client.client.auth_token = self.auth_token
client.client.management_url = management_url
client.volume_api_version = volume_api_version
return client
def get_volume(self, volume):
try:
return self.client().volumes.get(volume)
except exceptions.NotFound:
raise exception.EntityNotFound(entity='Volume', name=volume)
def get_volume_snapshot(self, snapshot):
try:
return self.client().volume_snapshots.get(snapshot)
except exceptions.NotFound:
raise exception.EntityNotFound(entity='VolumeSnapshot',
name=snapshot)
def get_volume_backup(self, backup):
try:
return self.client().backups.get(backup)
except exceptions.NotFound:
raise exception.EntityNotFound(entity='Volume backup',
name=backup)
def get_volume_type(self, volume_type):
vt_id = None
volume_type_list = self.client().volume_types.list()
for vt in volume_type_list:
if vt.name == volume_type:
vt_id = vt.id
break
if vt.id == volume_type:
vt_id = vt.id
break
if vt_id is None:
raise exception.EntityNotFound(entity='VolumeType',
name=volume_type)
return vt_id
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.OverLimit)
def is_conflict(self, ex):
return (isinstance(ex, exceptions.ClientException) and
ex.code == 409)
def check_detach_volume_complete(self, vol_id):
try:
vol = self.client().volumes.get(vol_id)
except Exception as ex:
self.ignore_not_found(ex)
return True
if vol.status in ('in-use', 'detaching'):
LOG.debug('%s - volume still in use' % vol_id)
return False
LOG.debug('Volume %(id)s - status: %(status)s' % {
'id': vol.id, 'status': vol.status})
if vol.status not in ('available', 'deleting'):
LOG.debug("Detachment failed - volume %(vol)s "
"is in %(status)s status" % {"vol": vol.id,
"status": vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume detachment failed'))
else:
return True
def check_attach_volume_complete(self, vol_id):
vol = self.client().volumes.get(vol_id)
if vol.status in ('available', 'attaching'):
LOG.debug("Volume %(id)s is being attached - "
"volume status: %(status)s" % {'id': vol_id,
'status': vol.status})
return False
if vol.status != 'in-use':
LOG.debug("Attachment failed - volume %(vol)s is "
"in %(status)s status" % {"vol": vol_id,
"status": vol.status})
raise exception.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume attachment failed'))
LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id})
return True
class BaseCinderConstraint(constraints.BaseCustomConstraint):
resource_client_name = CLIENT_NAME
class VolumeConstraint(BaseCinderConstraint):
resource_getter_name = 'get_volume'
class VolumeSnapshotConstraint(BaseCinderConstraint):
resource_getter_name = 'get_volume_snapshot'
class VolumeTypeConstraint(BaseCinderConstraint):
resource_getter_name = 'get_volume_type'
class VolumeBackupConstraint(BaseCinderConstraint):
resource_getter_name = 'get_volume_backup'
|
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cursor class to iterate over Mongo query results."""
import copy
import threading
import warnings
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
overload,
)
from bson import RE_TYPE, _convert_raw_document_lists_to_streams
from bson.code import Code
from bson.son import SON
from pymongo import helpers
from pymongo.collation import validate_collation_or_none
from pymongo.common import (
validate_boolean,
validate_is_document_type,
validate_is_mapping,
)
from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure
from pymongo.message import (
_CursorAddress,
_GetMore,
_Query,
_RawBatchGetMore,
_RawBatchQuery,
)
from pymongo.response import PinnedResponse
from pymongo.typings import _CollationIn, _DocumentType
# These errors mean that the server has already killed the cursor so there is
# no need to send killCursors.
_CURSOR_CLOSED_ERRORS = frozenset(
[
43, # CursorNotFound
50, # MaxTimeMSExpired
175, # QueryPlanKilled
237, # CursorKilled
# On a tailable cursor, the following errors mean the capped collection
# rolled over.
# MongoDB 2.6:
# {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0}
28617,
# MongoDB 3.0:
# {'$err': 'getMore executor error: UnknownError no details available',
# 'code': 17406, 'ok': 0}
17406,
# MongoDB 3.2 + 3.4:
# {'ok': 0.0, 'errmsg': 'GetMore command executor error:
# CappedPositionLost: CollectionScan died due to failure to restore
# tailable cursor position. Last seen record id: RecordId(3)',
# 'code': 96}
96,
# MongoDB 3.6+:
# {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to
# restore tailable cursor position. Last seen record id: RecordId(3)"',
# 'code': 136, 'codeName': 'CappedPositionLost'}
136,
]
)
_QUERY_OPTIONS = {
"tailable_cursor": 2,
"secondary_okay": 4,
"oplog_replay": 8,
"no_timeout": 16,
"await_data": 32,
"exhaust": 64,
"partial": 128,
}
class CursorType(object):
NON_TAILABLE = 0
"""The standard cursor type."""
TAILABLE = _QUERY_OPTIONS["tailable_cursor"]
"""The tailable cursor type.
Tailable cursors are only for use with capped collections. They are not
closed when the last data is retrieved but are kept open and the cursor
location marks the final document position. If more data is received
iteration of the cursor will continue from the last document received.
"""
TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"]
"""A tailable cursor with the await option set.
Creates a tailable cursor that will wait for a few seconds after returning
the full result set so that it can capture and return additional data added
during the query.
"""
EXHAUST = _QUERY_OPTIONS["exhaust"]
"""An exhaust cursor.
MongoDB will stream batched results to the client without waiting for the
client to request each batch, reducing latency.
"""
class _SocketManager(object):
"""Used with exhaust cursors to ensure the socket is returned."""
def __init__(self, sock, more_to_come):
self.sock = sock
self.more_to_come = more_to_come
self.closed = False
self.lock = threading.Lock()
def update_exhaust(self, more_to_come):
self.more_to_come = more_to_come
def close(self):
"""Return this instance's socket to the connection pool."""
if not self.closed:
self.closed = True
self.sock.unpin()
self.sock = None
_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]]
_Hint = Union[str, _Sort]
if TYPE_CHECKING:
from pymongo.client_session import ClientSession
from pymongo.collection import Collection
class Cursor(Generic[_DocumentType]):
"""A cursor / iterator over Mongo query results."""
_query_class = _Query
_getmore_class = _GetMore
def __init__(
self,
collection: "Collection[_DocumentType]",
filter: Optional[Mapping[str, Any]] = None,
projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None,
skip: int = 0,
limit: int = 0,
no_cursor_timeout: bool = False,
cursor_type: int = CursorType.NON_TAILABLE,
sort: Optional[_Sort] = None,
allow_partial_results: bool = False,
oplog_replay: bool = False,
batch_size: int = 0,
collation: Optional[_CollationIn] = None,
hint: Optional[_Hint] = None,
max_scan: Optional[int] = None,
max_time_ms: Optional[int] = None,
max: Optional[_Sort] = None,
min: Optional[_Sort] = None,
return_key: Optional[bool] = None,
show_record_id: Optional[bool] = None,
snapshot: Optional[bool] = None,
comment: Optional[Any] = None,
session: Optional["ClientSession"] = None,
allow_disk_use: Optional[bool] = None,
let: Optional[bool] = None,
) -> None:
"""Create a new cursor.
Should not be called directly by application developers - see
:meth:`~pymongo.collection.Collection.find` instead.
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
# Initialize all attributes used in __del__ before possibly raising
# an error to avoid attribute errors during garbage collection.
self.__collection: Collection[_DocumentType] = collection
self.__id: Any = None
self.__exhaust = False
self.__sock_mgr: Any = None
self.__killed = False
self.__session: Optional["ClientSession"]
if session:
self.__session = session
self.__explicit_session = True
else:
self.__session = None
self.__explicit_session = False
spec: Mapping[str, Any] = filter or {}
validate_is_mapping("filter", spec)
if not isinstance(skip, int):
raise TypeError("skip must be an instance of int")
if not isinstance(limit, int):
raise TypeError("limit must be an instance of int")
validate_boolean("no_cursor_timeout", no_cursor_timeout)
if no_cursor_timeout and not self.__explicit_session:
warnings.warn(
"use an explicit session with no_cursor_timeout=True "
"otherwise the cursor may still timeout after "
"30 minutes, for more info see "
"https://docs.mongodb.com/v4.4/reference/method/"
"cursor.noCursorTimeout/"
"#session-idle-timeout-overrides-nocursortimeout",
UserWarning,
stacklevel=2,
)
if cursor_type not in (
CursorType.NON_TAILABLE,
CursorType.TAILABLE,
CursorType.TAILABLE_AWAIT,
CursorType.EXHAUST,
):
raise ValueError("not a valid value for cursor_type")
validate_boolean("allow_partial_results", allow_partial_results)
validate_boolean("oplog_replay", oplog_replay)
if not isinstance(batch_size, int):
raise TypeError("batch_size must be an integer")
if batch_size < 0:
raise ValueError("batch_size must be >= 0")
# Only set if allow_disk_use is provided by the user, else None.
if allow_disk_use is not None:
allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use)
if projection is not None:
projection = helpers._fields_list_to_dict(projection, "projection")
if let is not None:
validate_is_document_type("let", let)
self.__let = let
self.__spec = spec
self.__has_filter = filter is not None
self.__projection = projection
self.__skip = skip
self.__limit = limit
self.__batch_size = batch_size
self.__ordering = sort and helpers._index_document(sort) or None
self.__max_scan = max_scan
self.__explain = False
self.__comment = comment
self.__max_time_ms = max_time_ms
self.__max_await_time_ms: Optional[int] = None
self.__max: Optional[Union[SON[Any, Any], _Sort]] = max
self.__min: Optional[Union[SON[Any, Any], _Sort]] = min
self.__collation = validate_collation_or_none(collation)
self.__return_key = return_key
self.__show_record_id = show_record_id
self.__allow_disk_use = allow_disk_use
self.__snapshot = snapshot
self.__set_hint(hint)
# Exhaust cursor support
if cursor_type == CursorType.EXHAUST:
if self.__collection.database.client.is_mongos:
raise InvalidOperation("Exhaust cursors are not supported by mongos")
if limit:
raise InvalidOperation("Can't use limit and exhaust together.")
self.__exhaust = True
# This is ugly. People want to be able to do cursor[5:5] and
# get an empty result set (old behavior was an
# exception). It's hard to do that right, though, because the
# server uses limit(0) to mean 'no limit'. So we set __empty
# in that case and check for it when iterating. We also unset
# it anytime we change __limit.
self.__empty = False
self.__data: deque = deque()
self.__address = None
self.__retrieved = 0
self.__codec_options = collection.codec_options
# Read preference is set when the initial find is sent.
self.__read_preference = None
self.__read_concern = collection.read_concern
self.__query_flags = cursor_type
if no_cursor_timeout:
self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
if allow_partial_results:
self.__query_flags |= _QUERY_OPTIONS["partial"]
if oplog_replay:
self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
# The namespace to use for find/getMore commands.
self.__dbname = collection.database.name
self.__collname = collection.name
@property
def collection(self) -> "Collection[_DocumentType]":
"""The :class:`~pymongo.collection.Collection` that this
:class:`Cursor` is iterating.
"""
return self.__collection
@property
def retrieved(self) -> int:
"""The number of documents retrieved so far."""
return self.__retrieved
def __del__(self) -> None:
self.__die()
def rewind(self) -> "Cursor[_DocumentType]":
"""Rewind this cursor to its unevaluated state.
Reset this cursor if it has been partially or completely evaluated.
Any options that are present on the cursor will remain in effect.
Future iterating performed on this cursor will cause new queries to
be sent to the server, even if the resultant data has already been
retrieved by this cursor.
"""
self.close()
self.__data = deque()
self.__id = None
self.__address = None
self.__retrieved = 0
self.__killed = False
return self
def clone(self) -> "Cursor[_DocumentType]":
"""Get a clone of this cursor.
Returns a new Cursor instance with options matching those that have
been set on the current instance. The clone will be completely
unevaluated, even if the current instance has been partially or
completely evaluated.
"""
return self._clone(True)
def _clone(self, deepcopy=True, base=None):
"""Internal clone helper."""
if not base:
if self.__explicit_session:
base = self._clone_base(self.__session)
else:
base = self._clone_base(None)
values_to_clone = (
"spec",
"projection",
"skip",
"limit",
"max_time_ms",
"max_await_time_ms",
"comment",
"max",
"min",
"ordering",
"explain",
"hint",
"batch_size",
"max_scan",
"query_flags",
"collation",
"empty",
"show_record_id",
"return_key",
"allow_disk_use",
"snapshot",
"exhaust",
"has_filter",
)
data = dict(
(k, v)
for k, v in self.__dict__.items()
if k.startswith("_Cursor__") and k[9:] in values_to_clone
)
if deepcopy:
data = self._deepcopy(data)
base.__dict__.update(data)
return base
def _clone_base(self, session):
"""Creates an empty Cursor object for information to be copied into."""
return self.__class__(self.__collection, session=session)
def __die(self, synchronous=False):
"""Closes this cursor."""
try:
already_killed = self.__killed
except AttributeError:
# __init__ did not run to completion (or at all).
return
self.__killed = True
if self.__id and not already_killed:
cursor_id = self.__id
address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname))
else:
# Skip killCursors.
cursor_id = 0
address = None
self.__collection.database.client._cleanup_cursor(
synchronous,
cursor_id,
address,
self.__sock_mgr,
self.__session,
self.__explicit_session,
)
if not self.__explicit_session:
self.__session = None
self.__sock_mgr = None
def close(self) -> None:
"""Explicitly close / kill this cursor."""
self.__die(True)
def __query_spec(self):
"""Get the spec to use for a query."""
operators = {}
if self.__ordering:
operators["$orderby"] = self.__ordering
if self.__explain:
operators["$explain"] = True
if self.__hint:
operators["$hint"] = self.__hint
if self.__let:
operators["let"] = self.__let
if self.__comment:
operators["$comment"] = self.__comment
if self.__max_scan:
operators["$maxScan"] = self.__max_scan
if self.__max_time_ms is not None:
operators["$maxTimeMS"] = self.__max_time_ms
if self.__max:
operators["$max"] = self.__max
if self.__min:
operators["$min"] = self.__min
if self.__return_key is not None:
operators["$returnKey"] = self.__return_key
if self.__show_record_id is not None:
# This is upgraded to showRecordId for MongoDB 3.2+ "find" command.
operators["$showDiskLoc"] = self.__show_record_id
if self.__snapshot is not None:
operators["$snapshot"] = self.__snapshot
if operators:
# Make a shallow copy so we can cleanly rewind or clone.
spec = copy.copy(self.__spec)
# Allow-listed commands must be wrapped in $query.
if "$query" not in spec:
# $query has to come first
spec = SON([("$query", spec)])
if not isinstance(spec, SON):
# Ensure the spec is SON. As order is important this will
# ensure its set before merging in any extra operators.
spec = SON(spec)
spec.update(operators)
return spec
# Have to wrap with $query if "query" is the first key.
# We can't just use $query anytime "query" is a key as
# that breaks commands like count and find_and_modify.
# Checking spec.keys()[0] covers the case that the spec
# was passed as an instance of SON or OrderedDict.
elif "query" in self.__spec and (
len(self.__spec) == 1 or next(iter(self.__spec)) == "query"
):
return SON({"$query": self.__spec})
return self.__spec
def __check_okay_to_chain(self):
"""Check if it is okay to chain more options onto this cursor."""
if self.__retrieved or self.__id is not None:
raise InvalidOperation("cannot set options after executing query")
def add_option(self, mask: int) -> "Cursor[_DocumentType]":
"""Set arbitrary query flags using a bitmask.
To set the tailable flag:
cursor.add_option(2)
"""
if not isinstance(mask, int):
raise TypeError("mask must be an int")
self.__check_okay_to_chain()
if mask & _QUERY_OPTIONS["exhaust"]:
if self.__limit:
raise InvalidOperation("Can't use limit and exhaust together.")
if self.__collection.database.client.is_mongos:
raise InvalidOperation("Exhaust cursors are not supported by mongos")
self.__exhaust = True
self.__query_flags |= mask
return self
def remove_option(self, mask: int) -> "Cursor[_DocumentType]":
"""Unset arbitrary query flags using a bitmask.
To unset the tailable flag:
cursor.remove_option(2)
"""
if not isinstance(mask, int):
raise TypeError("mask must be an int")
self.__check_okay_to_chain()
if mask & _QUERY_OPTIONS["exhaust"]:
self.__exhaust = False
self.__query_flags &= ~mask
return self
def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]":
"""Specifies whether MongoDB can use temporary disk files while
processing a blocking sort operation.
Raises :exc:`TypeError` if `allow_disk_use` is not a boolean.
.. note:: `allow_disk_use` requires server version **>= 4.4**
:Parameters:
- `allow_disk_use`: if True, MongoDB may use temporary
disk files to store data exceeding the system memory limit while
processing a blocking sort operation.
.. versionadded:: 3.11
"""
if not isinstance(allow_disk_use, bool):
raise TypeError("allow_disk_use must be a bool")
self.__check_okay_to_chain()
self.__allow_disk_use = allow_disk_use
return self
def limit(self, limit: int) -> "Cursor[_DocumentType]":
"""Limits the number of results to be returned by this cursor.
Raises :exc:`TypeError` if `limit` is not an integer. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor`
has already been used. The last `limit` applied to this cursor
takes precedence. A limit of ``0`` is equivalent to no limit.
:Parameters:
- `limit`: the number of results to return
.. seealso:: The MongoDB documentation on `limit <https://dochub.mongodb.org/core/limit>`_.
"""
if not isinstance(limit, int):
raise TypeError("limit must be an integer")
if self.__exhaust:
raise InvalidOperation("Can't use limit and exhaust together.")
self.__check_okay_to_chain()
self.__empty = False
self.__limit = limit
return self
def batch_size(self, batch_size: int) -> "Cursor[_DocumentType]":
"""Limits the number of documents returned in one batch. Each batch
requires a round trip to the server. It can be adjusted to optimize
performance and limit data transfer.
.. note:: batch_size can not override MongoDB's internal limits on the
amount of data it will return to the client in a single batch (i.e
if you set batch size to 1,000,000,000, MongoDB will currently only
return 4-16MB of results per batch).
Raises :exc:`TypeError` if `batch_size` is not an integer.
Raises :exc:`ValueError` if `batch_size` is less than ``0``.
Raises :exc:`~pymongo.errors.InvalidOperation` if this
:class:`Cursor` has already been used. The last `batch_size`
applied to this cursor takes precedence.
:Parameters:
- `batch_size`: The size of each batch of results requested.
"""
if not isinstance(batch_size, int):
raise TypeError("batch_size must be an integer")
if batch_size < 0:
raise ValueError("batch_size must be >= 0")
self.__check_okay_to_chain()
self.__batch_size = batch_size
return self
def skip(self, skip: int) -> "Cursor[_DocumentType]":
"""Skips the first `skip` results of this cursor.
Raises :exc:`TypeError` if `skip` is not an integer. Raises
:exc:`ValueError` if `skip` is less than ``0``. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
already been used. The last `skip` applied to this cursor takes
precedence.
:Parameters:
- `skip`: the number of results to skip
"""
if not isinstance(skip, int):
raise TypeError("skip must be an integer")
if skip < 0:
raise ValueError("skip must be >= 0")
self.__check_okay_to_chain()
self.__skip = skip
return self
def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]":
"""Specifies a time limit for a query operation. If the specified
time is exceeded, the operation will be aborted and
:exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms`
is ``None`` no limit is applied.
Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``.
Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor`
has already been used.
:Parameters:
- `max_time_ms`: the time limit after which the operation is aborted
"""
if not isinstance(max_time_ms, int) and max_time_ms is not None:
raise TypeError("max_time_ms must be an integer or None")
self.__check_okay_to_chain()
self.__max_time_ms = max_time_ms
return self
def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_DocumentType]":
"""Specifies a time limit for a getMore operation on a
:attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other
types of cursor max_await_time_ms is ignored.
Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or
``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this
:class:`Cursor` has already been used.
.. note:: `max_await_time_ms` requires server version **>= 3.2**
:Parameters:
- `max_await_time_ms`: the time limit after which the operation is
aborted
.. versionadded:: 3.2
"""
if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None:
raise TypeError("max_await_time_ms must be an integer or None")
self.__check_okay_to_chain()
# Ignore max_await_time_ms if not tailable or await_data is False.
if self.__query_flags & CursorType.TAILABLE_AWAIT:
self.__max_await_time_ms = max_await_time_ms
return self
@overload
def __getitem__(self, index: int) -> _DocumentType:
...
@overload
def __getitem__(self, index: slice) -> "Cursor[_DocumentType]":
...
def __getitem__(self, index):
"""Get a single document or a slice of documents from this cursor.
.. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each
index access or slice requires that a new query be run using skip
and limit. Do not iterate the cursor using index accesses.
The following example is **extremely inefficient** and may return
surprising results::
cursor = db.collection.find()
# Warning: This runs a new query for each document.
# Don't do this!
for idx in range(10):
print(cursor[idx])
Raises :class:`~pymongo.errors.InvalidOperation` if this
cursor has already been used.
To get a single document use an integral index, e.g.::
>>> db.test.find()[50]
An :class:`IndexError` will be raised if the index is negative
or greater than the amount of documents in this cursor. Any
limit previously applied to this cursor will be ignored.
To get a slice of documents use a slice index, e.g.::
>>> db.test.find()[20:25]
This will return this cursor with a limit of ``5`` and skip of
``20`` applied. Using a slice index will override any prior
limits or skips applied to this cursor (including those
applied through previous calls to this method). Raises
:class:`IndexError` when the slice has a step, a negative
start value, or a stop value less than or equal to the start
value.
:Parameters:
- `index`: An integer or slice index to be applied to this cursor
"""
self.__check_okay_to_chain()
self.__empty = False
if isinstance(index, slice):
if index.step is not None:
raise IndexError("Cursor instances do not support slice steps")
skip = 0
if index.start is not None:
if index.start < 0:
raise IndexError("Cursor instances do not support negative indices")
skip = index.start
if index.stop is not None:
limit = index.stop - skip
if limit < 0:
raise IndexError(
"stop index must be greater than start index for slice %r" % index
)
if limit == 0:
self.__empty = True
else:
limit = 0
self.__skip = skip
self.__limit = limit
return self
if isinstance(index, int):
if index < 0:
raise IndexError("Cursor instances do not support negative indices")
clone = self.clone()
clone.skip(index + self.__skip)
clone.limit(-1) # use a hard limit
clone.__query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371
for doc in clone:
return doc
raise IndexError("no such item for Cursor instance")
raise TypeError("index %r cannot be applied to Cursor instances" % index)
def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]":
"""**DEPRECATED** - Limit the number of documents to scan when
performing the query.
Raises :class:`~pymongo.errors.InvalidOperation` if this
cursor has already been used. Only the last :meth:`max_scan`
applied to this cursor has any effect.
:Parameters:
- `max_scan`: the maximum number of documents to scan
.. versionchanged:: 3.7
Deprecated :meth:`max_scan`. Support for this option is deprecated in
MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side
execution time.
"""
self.__check_okay_to_chain()
self.__max_scan = max_scan
return self
def max(self, spec: _Sort) -> "Cursor[_DocumentType]":
"""Adds ``max`` operator that specifies upper bound for specific index.
When using ``max``, :meth:`~hint` should also be configured to ensure
the query uses the expected index and starting in MongoDB 4.2
:meth:`~hint` will be required.
:Parameters:
- `spec`: a list of field, limit pairs specifying the exclusive
upper bound for all keys of a specific index in order.
.. versionchanged:: 3.8
Deprecated cursors that use ``max`` without a :meth:`~hint`.
.. versionadded:: 2.7
"""
if not isinstance(spec, (list, tuple)):
raise TypeError("spec must be an instance of list or tuple")
self.__check_okay_to_chain()
self.__max = SON(spec)
return self
def min(self, spec: _Sort) -> "Cursor[_DocumentType]":
"""Adds ``min`` operator that specifies lower bound for specific index.
When using ``min``, :meth:`~hint` should also be configured to ensure
the query uses the expected index and starting in MongoDB 4.2
:meth:`~hint` will be required.
:Parameters:
- `spec`: a list of field, limit pairs specifying the inclusive
lower bound for all keys of a specific index in order.
.. versionchanged:: 3.8
Deprecated cursors that use ``min`` without a :meth:`~hint`.
.. versionadded:: 2.7
"""
if not isinstance(spec, (list, tuple)):
raise TypeError("spec must be an instance of list or tuple")
self.__check_okay_to_chain()
self.__min = SON(spec)
return self
def sort(
self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None
) -> "Cursor[_DocumentType]":
"""Sorts this cursor's results.
Pass a field name and a direction, either
:data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`::
for doc in collection.find().sort('field', pymongo.ASCENDING):
print(doc)
To sort by multiple fields, pass a list of (key, direction) pairs::
for doc in collection.find().sort([
('field1', pymongo.ASCENDING),
('field2', pymongo.DESCENDING)]):
print(doc)
Text search results can be sorted by relevance::
cursor = db.test.find(
{'$text': {'$search': 'some words'}},
{'score': {'$meta': 'textScore'}})
# Sort by 'score' field.
cursor.sort([('score', {'$meta': 'textScore'})])
for doc in cursor:
print(doc)
For more advanced text search functionality, see MongoDB's
`Atlas Search <https://docs.atlas.mongodb.com/atlas-search/>`_.
Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used. Only the last :meth:`sort` applied to this
cursor has any effect.
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the keys to sort on
- `direction` (optional): only used if `key_or_list` is a single
key, if not given :data:`~pymongo.ASCENDING` is assumed
"""
self.__check_okay_to_chain()
keys = helpers._index_list(key_or_list, direction)
self.__ordering = helpers._index_document(keys)
return self
def distinct(self, key: str) -> List:
"""Get a list of distinct values for `key` among all documents
in the result set of this query.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
The :meth:`distinct` method obeys the
:attr:`~pymongo.collection.Collection.read_preference` of the
:class:`~pymongo.collection.Collection` instance on which
:meth:`~pymongo.collection.Collection.find` was called.
:Parameters:
- `key`: name of key for which we want to get the distinct values
.. seealso:: :meth:`pymongo.collection.Collection.distinct`
"""
options: Dict[str, Any] = {}
if self.__spec:
options["query"] = self.__spec
if self.__max_time_ms is not None:
options["maxTimeMS"] = self.__max_time_ms
if self.__comment:
options["comment"] = self.__comment
if self.__collation is not None:
options["collation"] = self.__collation
return self.__collection.distinct(key, session=self.__session, **options)
def explain(self) -> _DocumentType:
"""Returns an explain plan record for this cursor.
.. note:: This method uses the default verbosity mode of the
`explain command
<https://docs.mongodb.com/manual/reference/command/explain/>`_,
``allPlansExecution``. To use a different verbosity use
:meth:`~pymongo.database.Database.command` to run the explain
command directly.
.. seealso:: The MongoDB documentation on `explain <https://dochub.mongodb.org/core/explain>`_.
"""
c = self.clone()
c.__explain = True
# always use a hard limit for explains
if c.__limit:
c.__limit = -abs(c.__limit)
return next(c)
def __set_hint(self, index):
if index is None:
self.__hint = None
return
if isinstance(index, str):
self.__hint = index
else:
self.__hint = helpers._index_document(index)
def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]":
"""Adds a 'hint', telling Mongo the proper index to use for the query.
Judicious use of hints can greatly improve query
performance. When doing a query on multiple fields (at least
one of which is indexed) pass the indexed field as a hint to
the query. Raises :class:`~pymongo.errors.OperationFailure` if the
provided hint requires an index that does not exist on this collection,
and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has
already been used.
`index` should be an index as passed to
:meth:`~pymongo.collection.Collection.create_index`
(e.g. ``[('field', ASCENDING)]``) or the name of the index.
If `index` is ``None`` any existing hint for this query is
cleared. The last hint applied to this cursor takes precedence
over all others.
:Parameters:
- `index`: index to hint on (as an index specifier)
"""
self.__check_okay_to_chain()
self.__set_hint(index)
return self
def comment(self, comment: Any) -> "Cursor[_DocumentType]":
"""Adds a 'comment' to the cursor.
http://docs.mongodb.org/manual/reference/operator/comment/
:Parameters:
- `comment`: A string to attach to the query to help interpret and
trace the operation in the server logs and in profile data.
.. versionadded:: 2.7
"""
self.__check_okay_to_chain()
self.__comment = comment
return self
def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]":
"""Adds a `$where`_ clause to this query.
The `code` argument must be an instance of :class:`basestring`
(:class:`str` in python 3) or :class:`~bson.code.Code`
containing a JavaScript expression. This expression will be
evaluated for each document scanned. Only those documents
for which the expression evaluates to *true* will be returned
as results. The keyword *this* refers to the object currently
being scanned. For example::
# Find all documents where field "a" is less than "b" plus "c".
for doc in db.test.find().where('this.a < (this.b + this.c)'):
print(doc)
Raises :class:`TypeError` if `code` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidOperation` if this
:class:`Cursor` has already been used. Only the last call to
:meth:`where` applied to a :class:`Cursor` has any effect.
.. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code`
with scope variables. Consider using `$expr`_ instead.
:Parameters:
- `code`: JavaScript expression to use as a filter
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$where: https://docs.mongodb.com/manual/reference/operator/query/where/
"""
self.__check_okay_to_chain()
if not isinstance(code, Code):
code = Code(code)
# Avoid overwriting a filter argument that was given by the user
# when updating the spec.
spec: Dict[str, Any]
if self.__has_filter:
spec = dict(self.__spec)
else:
spec = cast(Dict, self.__spec)
spec["$where"] = code
self.__spec = spec
return self
def collation(self, collation: Optional[_CollationIn]) -> "Cursor[_DocumentType]":
"""Adds a :class:`~pymongo.collation.Collation` to this query.
Raises :exc:`TypeError` if `collation` is not an instance of
:class:`~pymongo.collation.Collation` or a ``dict``. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
already been used. Only the last collation applied to this cursor has
any effect.
:Parameters:
- `collation`: An instance of :class:`~pymongo.collation.Collation`.
"""
self.__check_okay_to_chain()
self.__collation = validate_collation_or_none(collation)
return self
def __send_message(self, operation):
"""Send a query or getmore operation and handles the response.
If operation is ``None`` this is an exhaust cursor, which reads
the next result batch off the exhaust socket instead of
sending getMore messages to the server.
Can raise ConnectionFailure.
"""
client = self.__collection.database.client
# OP_MSG is required to support exhaust cursors with encryption.
if client._encrypter and self.__exhaust:
raise InvalidOperation("exhaust cursors do not support auto encryption")
try:
response = client._run_operation(
operation, self._unpack_response, address=self.__address
)
except OperationFailure as exc:
if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust:
# Don't send killCursors because the cursor is already closed.
self.__killed = True
self.close()
# If this is a tailable cursor the error is likely
# due to capped collection roll over. Setting
# self.__killed to True ensures Cursor.alive will be
# False. No need to re-raise.
if (
exc.code in _CURSOR_CLOSED_ERRORS
and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"]
):
return
raise
except ConnectionFailure:
# Don't send killCursors because the cursor is already closed.
self.__killed = True
self.close()
raise
except Exception:
self.close()
raise
self.__address = response.address
if isinstance(response, PinnedResponse):
if not self.__sock_mgr:
self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come)
cmd_name = operation.name
docs = response.docs
if response.from_command:
if cmd_name != "explain":
cursor = docs[0]["cursor"]
self.__id = cursor["id"]
if cmd_name == "find":
documents = cursor["firstBatch"]
# Update the namespace used for future getMore commands.
ns = cursor.get("ns")
if ns:
self.__dbname, self.__collname = ns.split(".", 1)
else:
documents = cursor["nextBatch"]
self.__data = deque(documents)
self.__retrieved += len(documents)
else:
self.__id = 0
self.__data = deque(docs)
self.__retrieved += len(docs)
else:
self.__id = response.data.cursor_id
self.__data = deque(docs)
self.__retrieved += response.data.number_returned
if self.__id == 0:
# Don't wait for garbage collection to call __del__, return the
# socket and the session to the pool now.
self.close()
if self.__limit and self.__id and self.__limit <= self.__retrieved:
self.close()
def _unpack_response(
self, response, cursor_id, codec_options, user_fields=None, legacy_response=False
):
return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response)
def _read_preference(self):
if self.__read_preference is None:
# Save the read preference for getMore commands.
self.__read_preference = self.__collection._read_preference_for(self.session)
return self.__read_preference
def _refresh(self):
"""Refreshes the cursor with more data from Mongo.
Returns the length of self.__data after refresh. Will exit early if
self.__data is already non-empty. Raises OperationFailure when the
cursor cannot be refreshed due to an error on the query.
"""
if len(self.__data) or self.__killed:
return len(self.__data)
if not self.__session:
self.__session = self.__collection.database.client._ensure_session()
if self.__id is None: # Query
if (self.__min or self.__max) and not self.__hint:
raise InvalidOperation(
"Passing a 'hint' is required when using the min/max query"
" option to ensure the query utilizes the correct index"
)
q = self._query_class(
self.__query_flags,
self.__collection.database.name,
self.__collection.name,
self.__skip,
self.__query_spec(),
self.__projection,
self.__codec_options,
self._read_preference(),
self.__limit,
self.__batch_size,
self.__read_concern,
self.__collation,
self.__session,
self.__collection.database.client,
self.__allow_disk_use,
self.__exhaust,
)
self.__send_message(q)
elif self.__id: # Get More
if self.__limit:
limit = self.__limit - self.__retrieved
if self.__batch_size:
limit = min(limit, self.__batch_size)
else:
limit = self.__batch_size
# Exhaust cursors don't send getMore messages.
g = self._getmore_class(
self.__dbname,
self.__collname,
limit,
self.__id,
self.__codec_options,
self._read_preference(),
self.__session,
self.__collection.database.client,
self.__max_await_time_ms,
self.__sock_mgr,
self.__exhaust,
)
self.__send_message(g)
return len(self.__data)
@property
def alive(self) -> bool:
"""Does this cursor have the potential to return more data?
This is mostly useful with `tailable cursors
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_
since they will stop iterating even though they *may* return more
results in the future.
With regular cursors, simply use a for loop instead of :attr:`alive`::
for doc in collection.find():
print(doc)
.. note:: Even if :attr:`alive` is True, :meth:`next` can raise
:exc:`StopIteration`. :attr:`alive` can also be True while iterating
a cursor from a failed server. In this case :attr:`alive` will
return False after :meth:`next` fails to retrieve the next batch
of results from the server.
"""
return bool(len(self.__data) or (not self.__killed))
@property
def cursor_id(self) -> Optional[int]:
"""Returns the id of the cursor
.. versionadded:: 2.2
"""
return self.__id
@property
def address(self) -> Optional[Tuple[str, Any]]:
"""The (host, port) of the server used, or None.
.. versionchanged:: 3.0
Renamed from "conn_id".
"""
return self.__address
@property
def session(self) -> Optional["ClientSession"]:
"""The cursor's :class:`~pymongo.client_session.ClientSession`, or None.
.. versionadded:: 3.6
"""
if self.__explicit_session:
return self.__session
return None
def __iter__(self) -> "Cursor[_DocumentType]":
return self
def next(self) -> _DocumentType:
"""Advance the cursor."""
if self.__empty:
raise StopIteration
if len(self.__data) or self._refresh():
return self.__data.popleft()
else:
raise StopIteration
__next__ = next
def __enter__(self) -> "Cursor[_DocumentType]":
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.close()
def __copy__(self) -> "Cursor[_DocumentType]":
"""Support function for `copy.copy()`.
.. versionadded:: 2.4
"""
return self._clone(deepcopy=False)
def __deepcopy__(self, memo: Any) -> Any:
"""Support function for `copy.deepcopy()`.
.. versionadded:: 2.4
"""
return self._clone(deepcopy=True)
def _deepcopy(self, x, memo=None):
"""Deepcopy helper for the data dictionary or list.
Regular expressions cannot be deep copied but as they are immutable we
don't have to copy them when cloning.
"""
y: Any
if not hasattr(x, "items"):
y, is_list, iterator = [], True, enumerate(x)
else:
y, is_list, iterator = {}, False, x.items()
if memo is None:
memo = {}
val_id = id(x)
if val_id in memo:
return memo.get(val_id)
memo[val_id] = y
for key, value in iterator:
if isinstance(value, (dict, list)) and not isinstance(value, SON):
value = self._deepcopy(value, memo)
elif not isinstance(value, RE_TYPE):
value = copy.deepcopy(value, memo)
if is_list:
y.append(value)
else:
if not isinstance(key, RE_TYPE):
key = copy.deepcopy(key, memo)
y[key] = value
return y
class RawBatchCursor(Cursor, Generic[_DocumentType]):
"""A cursor / iterator over raw batches of BSON data from a query result."""
_query_class = _RawBatchQuery
_getmore_class = _RawBatchGetMore
def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None:
"""Create a new cursor / iterator over raw batches of BSON data.
Should not be called directly by application developers -
see :meth:`~pymongo.collection.Collection.find_raw_batches`
instead.
.. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
"""
super(RawBatchCursor, self).__init__(collection, *args, **kwargs)
def _unpack_response(
self, response, cursor_id, codec_options, user_fields=None, legacy_response=False
):
raw_response = response.raw_response(cursor_id, user_fields=user_fields)
if not legacy_response:
# OP_MSG returns firstBatch/nextBatch documents as a BSON array
# Re-assemble the array of documents into a document stream
_convert_raw_document_lists_to_streams(raw_response[0])
return raw_response
def explain(self) -> _DocumentType:
"""Returns an explain plan record for this cursor.
.. seealso:: The MongoDB documentation on `explain <https://dochub.mongodb.org/core/explain>`_.
"""
clone = self._clone(deepcopy=True, base=Cursor(self.collection))
return clone.explain()
def __getitem__(self, index: Any) -> "Cursor[_DocumentType]":
raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor")
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
import socket
import random
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.lib.packet import tcp
from ryu.lib import addrconv
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, \
HANDSHAKE_DISPATCHER
from ryu.controller.handler import set_ev_cls
import ryu.ofproto.ofproto_v1_3 as ofp
import ryu.ofproto.ofproto_v1_3_parser as ofparser
import ryu.ofproto.openstate_v1_0 as osp
import ryu.ofproto.openstate_v1_0_parser as osparser
from ryu.lib.packet import packet
from ryu.topology import event
LOG = logging.getLogger('app.openstate.forwarding_consistency_many_to_many_ctrl')
SWITCH_PORTS = 6
IPV4 = ipv4.ipv4.__name__
TCP = tcp.tcp.__name__
class OSLoadBalancing(app_manager.RyuApp):
OFP_VERSIONS = [ofp.OFP_VERSION]
def __init__(self, *args, **kwargs):
LOG.info("OpenState Forwarding Consistency sample app initialized")
LOG.info("Supporting MAX %d ports per switch" % SWITCH_PORTS)
super(OSLoadBalancing, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.counter = 0
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
in_port = msg.match['in_port']
ip_dst = None
ip_src = None
tcp_dst = None
tcp_src = None
data = None
pkt = packet.Packet(msg.data)
header_list = dict((p.protocol_name, p)
for p in pkt.protocols if type(p) != str)
out_port_m_to_m = random.randint(4,6)
if IPV4 in header_list:
ip_dst = self.ip_addr_ntoa(header_list[IPV4].dst)
ip_src = self.ip_addr_ntoa(header_list[IPV4].src)
if TCP in header_list:
tcp_src = header_list[TCP].src_port
tcp_dst = header_list[TCP].dst_port
if datapath.id == 4:
self.add_flow_many_to_many(datapath, in_port, out_port_m_to_m, ip_src, ip_dst, tcp_src, tcp_dst)
actions = [ofparser.OFPActionOutput(out_port_m_to_m, 0)]
if msg.buffer_id == ofp.OFP_NO_BUFFER:
data = msg.data
out = ofparser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
options ={1: self.one_to_many_switch,
2: self.middleswitch,
3: self.middleswitch,
4: self.many_to_many_switch,
5: self.middleswitch,
6: self.middleswitch,
7: self.many_to_one_switch
}
options[datapath.id](datapath)
def one_to_many_switch(self, datapath):
self.send_features_request(datapath)
self.send_group_mod(datapath)
self.send_table_mod(datapath)
self.send_key_lookup_1(datapath)
self.send_key_update_1(datapath)
self.add_flow_1_to_many(datapath)
def middleswitch(self, datapath):
self.send_features_request(datapath)
self.add_middleswitch_default(datapath)
#STATELESS (ma anche no...)
def many_to_many_switch(self, datapath):
self.send_features_request(datapath)
self.send_table_mod(datapath)
self.send_key_lookup_1(datapath)
self.send_key_update_1(datapath)
self.add_flow_default_many_to_many(datapath)
def many_to_one_switch(self, datapath):
self.send_features_request(datapath)
self.send_table_mod(datapath)
self.send_key_lookup_2(datapath)
self.send_key_update_2(datapath)
self.add_flow_many_to_1(datapath)
def add_flow_1_to_many(self, datapath, table_miss=False):
LOG.info("Configuring flow table for switch %d" % datapath.id)
if table_miss:
LOG.debug("Installing table miss...")
actions = [ofparser.OFPActionOutput(
ofp.OFPP_CONTROLLER, ofp.OFPCML_NO_BUFFER)]
match = ofparser.OFPMatch()
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
else:
# ARP packets flooding
match = ofparser.OFPMatch(eth_type=0x0806)
actions = [
ofparser.OFPActionOutput(ofp.OFPP_FLOOD)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32768, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# Reverse path flow
for in_port in range(2, 5):
match = ofparser.OFPMatch(in_port=in_port)
actions = [
ofparser.OFPActionOutput(1,0)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32767, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# the state of a flow is the selected output port for that flow
for state in range(4):
if state == 0:
# if state=DEFAULT => send it to the first group entry in the group table
actions = [
ofparser.OFPActionGroup(1)]
match = ofparser.OFPMatch(
in_port=1, state=state, eth_type=0x800)
else:
# state x means output port x+1
actions = [
ofparser.OFPActionOutput(state+1, 0)]
match = ofparser.OFPMatch(
in_port=1, state=state, eth_type=0x800)
inst = [
ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0,
hard_timeout=0, priority=32767,
buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY, out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# MANY TO MANY RULES
def add_flow_default_many_to_many(self, datapath):
LOG.info("Configuring default flow entries for switch %d" % datapath.id)
#table miss
actions = [ofparser.OFPActionOutput(
ofp.OFPP_CONTROLLER, ofp.OFPCML_NO_BUFFER)]
match = ofparser.OFPMatch()
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# ARP packets flooding
match = ofparser.OFPMatch(eth_type=0x0806)
actions = [
ofparser.OFPActionOutput(ofp.OFPP_FLOOD)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32768, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# MANY TO MANY
def add_flow_many_to_many(self, datapath, in_port, out_port, ip_src, ip_dst, tcp_src, tcp_dst):
self.counter+=1
#LOG.info('Installing new forward rule for switch %d (rule # %d)' % (datapath.id, self.counter))
# reverse path rule
actions = [ofparser.OFPActionOutput(in_port, 0)]
match = ofparser.OFPMatch(
in_port=out_port, eth_type=0x800, ip_proto=6, ipv4_src=ip_dst, ipv4_dst=ip_src, tcp_src=tcp_dst, tcp_dst=tcp_src)
inst = [
ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0,
hard_timeout=0, priority=32767,
buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY, out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# forward path rule
actions = [ofparser.OFPActionOutput(out_port, 0)]
match = ofparser.OFPMatch(
in_port=in_port, eth_type=0x800, ip_proto=6, ipv4_src=ip_src, ipv4_dst=ip_dst, tcp_src=tcp_src, tcp_dst=tcp_dst)
inst = [
ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0,
hard_timeout=0, priority=32767,
buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY, out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
def add_flow_many_to_1(self, datapath, table_miss=False):
LOG.info("Configuring flow table for switch %d" % datapath.id)
if table_miss:
LOG.debug("Installing table miss...")
actions = [ofparser.OFPActionOutput(
ofp.OFPP_CONTROLLER, ofp.OFPCML_NO_BUFFER)]
match = ofparser.OFPMatch()
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
else:
# ARP packets flooding
match = ofparser.OFPMatch(eth_type=0x0806)
actions = [
ofparser.OFPActionOutput(ofp.OFPP_FLOOD)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32768, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# Reverse path flow
for state in range(1,4):
match = ofparser.OFPMatch(in_port=4, state=state, eth_type=0x800)
actions = [
ofparser.OFPActionOutput(state,0)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32767, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
# the state of a flow is the selected output port for that flow
for in_port in range(1,4):
# state x means output port x+1
actions = [
osparser.OFPExpActionSetState(state=in_port, table_id=0),
ofparser.OFPActionOutput(4, 0)]
match = ofparser.OFPMatch(
in_port=in_port, eth_type=0x800)
inst = [
ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0,
hard_timeout=0, priority=32767,
buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY, out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
def add_middleswitch_default(self, datapath):
LOG.info("Configuring default flow entries for switch %d" % datapath.id)
match = ofparser.OFPMatch(in_port=1)
actions = [
ofparser.OFPActionOutput(2,0)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32768, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
match = ofparser.OFPMatch(in_port=2)
actions = [
ofparser.OFPActionOutput(1,0)]
inst = [ofparser.OFPInstructionActions(
ofp.OFPIT_APPLY_ACTIONS, actions)]
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofp.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=32768, buffer_id=ofp.OFP_NO_BUFFER,
out_port=ofp.OFPP_ANY,
out_group=ofp.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
def send_features_request(self, datapath):
req = ofparser.OFPFeaturesRequest(datapath)
datapath.send_msg(req)
def ip_addr_ntoa(self,ip):
return socket.inet_ntoa(addrconv.ipv4.text_to_bin(ip))
def send_key_lookup_1(self, datapath):
key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osp.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofp.OXM_OF_IPV4_SRC,ofp.OXM_OF_IPV4_DST,ofp.OXM_OF_TCP_SRC,ofp.OXM_OF_TCP_DST], table_id=0)
datapath.send_msg(key_lookup_extractor)
def send_key_update_1(self, datapath):
key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osp.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofp.OXM_OF_IPV4_SRC,ofp.OXM_OF_IPV4_DST,ofp.OXM_OF_TCP_SRC,ofp.OXM_OF_TCP_DST], table_id=0)
datapath.send_msg(key_update_extractor)
def send_key_lookup_2(self, datapath):
key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osp.OFPSC_EXP_SET_L_EXTRACTOR, fields=[ofp.OXM_OF_IPV4_SRC,ofp.OXM_OF_IPV4_DST,ofp.OXM_OF_TCP_SRC,ofp.OXM_OF_TCP_DST], table_id=0)
datapath.send_msg(key_lookup_extractor)
def send_key_update_2(self, datapath):
key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osp.OFPSC_EXP_SET_U_EXTRACTOR, fields=[ofp.OXM_OF_IPV4_DST,ofp.OXM_OF_IPV4_SRC,ofp.OXM_OF_TCP_DST,ofp.OXM_OF_TCP_SRC], table_id=0)
datapath.send_msg(key_update_extractor)
def send_table_mod(self, datapath):
req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=0, stateful=1)
datapath.send_msg(req)
def send_group_mod(self, datapath):
buckets = []
# Action Bucket: <PWD port_i , SetState(i-1)
for port in range(2,5):
max_len = 2000
actions = [
osparser.OFPExpActionSetState(state=port-1, table_id=0),
ofparser.OFPActionOutput(port, max_len)]
weight = 100
watch_port = ofp.OFPP_ANY
watch_group = ofp.OFPG_ANY
buckets.append(ofparser.OFPBucket(weight, watch_port, watch_group,actions))
group_id = 1
req = ofparser.OFPGroupMod(datapath, ofp.OFPGC_ADD,
ofp.OFPGT_SELECT, group_id, buckets)
datapath.send_msg(req)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <[email protected]>
from BaseHTTPServer import BaseHTTPRequestHandler
from Cookie import CookieError, BaseCookie, SimpleCookie
import cgi
from datetime import datetime
import errno
from hashlib import md5
import new
import mimetypes
import os
import socket
from StringIO import StringIO
import sys
import urlparse
from trac.core import Interface, TracError
from trac.util import get_last_traceback, unquote
from trac.util.datefmt import http_date, localtz
from trac.util.text import empty, to_unicode
from trac.util.translation import _
from trac.web.href import Href
from trac.web.wsgi import _FileWrapper
class IAuthenticator(Interface):
"""Extension point interface for components that can provide the name
of the remote user."""
def authenticate(req):
"""Return the name of the remote user, or `None` if the identity of the
user is unknown."""
class IRequestHandler(Interface):
"""Decide which `trac.core.Component` handles which `Request`, and how."""
def match_request(req):
"""Return whether the handler wants to process the given request."""
def process_request(req):
"""Process the request.
Return a `(template_name, data, content_type)` tuple,
where `data` is a dictionary of substitutions for the Genshi template.
"text/html" is assumed if `content_type` is `None`.
Note that if template processing should not occur, this method can
simply send the response itself and not return anything.
:Since 1.0: Clearsilver templates are no longer supported.
"""
class IRequestFilter(Interface):
"""Enable components to interfere with the processing done by the
main handler, either before and/or after it enters in action.
"""
def pre_process_request(req, handler):
"""Called after initial handler selection, and can be used to change
the selected handler or redirect request.
Always returns the request handler, even if unchanged.
"""
def post_process_request(req, template, data, content_type):
"""Do any post-processing the request might need; typically adding
values to the template `data` dictionary, or changing the Genshi
template or mime type.
`data` may be updated in place.
Always returns a tuple of (template, data, content_type), even if
unchanged.
Note that `template`, `data`, `content_type` will be `None` if:
- called when processing an error page
- the default request handler did not return any result
:Since 0.11: there's a `data` argument for supporting Genshi templates;
this introduced a difference in arity which made it possible to
distinguish between the IRequestFilter components still targeted
at ClearSilver templates and the newer ones targeted at Genshi
templates.
:Since 1.0: Clearsilver templates are no longer supported.
"""
class ITemplateStreamFilter(Interface):
"""Transform the generated content by filtering the Genshi event stream
generated by the template, prior to its serialization.
"""
def filter_stream(req, method, filename, stream, data):
"""Return a filtered Genshi event stream, or the original unfiltered
stream if no match.
`req` is the current request object, `method` is the Genshi render
method (xml, xhtml or text), `filename` is the filename of the template
to be rendered, `stream` is the event stream and `data` is the data for
the current template.
See the Genshi_ documentation for more information.
.. _Genshi: http://genshi.edgewall.org/wiki/Documentation/filters.html
"""
HTTP_STATUS = dict([(code, reason.title()) for code, (reason, description)
in BaseHTTPRequestHandler.responses.items()])
class HTTPException(Exception):
def __init__(self, detail, *args):
if isinstance(detail, TracError):
self.detail = detail.message
self.reason = detail.title
else:
self.detail = detail
if args:
self.detail = self.detail % args
Exception.__init__(self, '%s %s (%s)' % (self.code, self.reason,
self.detail))
@classmethod
def subclass(cls, name, code):
"""Create a new Exception class representing a HTTP status code."""
reason = HTTP_STATUS.get(code, 'Unknown')
new_class = new.classobj(name, (HTTPException,), {
'__doc__': 'Exception for HTTP %d %s' % (code, reason)
})
new_class.code = code
new_class.reason = reason
return new_class
_HTTPException_subclass_names = []
for code in [code for code in HTTP_STATUS if code >= 400]:
exc_name = HTTP_STATUS[code].replace(' ', '').replace('-', '')
# 2.5 compatibility hack:
if exc_name == 'InternalServerError':
exc_name = 'InternalError'
if exc_name.lower().startswith('http'):
exc_name = exc_name[4:]
exc_name = 'HTTP' + exc_name
setattr(sys.modules[__name__], exc_name,
HTTPException.subclass(exc_name, code))
_HTTPException_subclass_names.append(exc_name)
del code, exc_name
class _FieldStorage(cgi.FieldStorage):
"""Our own version of cgi.FieldStorage, with tweaks."""
def read_multi(self, *args, **kwargs):
try:
cgi.FieldStorage.read_multi(self, *args, **kwargs)
except ValueError:
# Most likely "Invalid boundary in multipart form",
# possibly an upload of a .mht file? See #9880.
self.read_single()
class _RequestArgs(dict):
"""Dictionary subclass that provides convenient access to request
parameters that may contain multiple values."""
def getfirst(self, name, default=None):
"""Return the first value for the specified parameter, or `default` if
the parameter was not provided.
"""
if name not in self:
return default
val = self[name]
if isinstance(val, list):
val = val[0]
return val
def getlist(self, name):
"""Return a list of values for the specified parameter, even if only
one value was provided.
"""
if name not in self:
return []
val = self[name]
if not isinstance(val, list):
val = [val]
return val
def parse_arg_list(query_string):
"""Parse a query string into a list of `(name, value)` tuples."""
args = []
if not query_string:
return args
for arg in query_string.split('&'):
nv = arg.split('=', 1)
if len(nv) == 2:
(name, value) = nv
else:
(name, value) = (nv[0], empty)
name = unquote(name.replace('+', ' '))
if isinstance(name, str):
name = unicode(name, 'utf-8')
value = unquote(value.replace('+', ' '))
if isinstance(value, str):
value = unicode(value, 'utf-8')
args.append((name, value))
return args
def arg_list_to_args(arg_list):
"""Convert a list of `(name, value)` tuples into into a `_RequestArgs`."""
args = _RequestArgs()
for name, value in arg_list:
if name in args:
if isinstance(args[name], list):
args[name].append(value)
else:
args[name] = [args[name], value]
else:
args[name] = value
return args
class RequestDone(Exception):
"""Marker exception that indicates whether request processing has completed
and a response was sent.
"""
iterable = None
def __init__(self, iterable=None):
self.iterable = iterable
class Cookie(SimpleCookie):
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
SimpleCookie.load(self, rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
# If a key appears multiple times, the first occurrence has the
# narrowest scope, keep that
if key in self:
return
try:
self._strict_set(key, real_value, coded_value)
except CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class Request(object):
"""Represents a HTTP request/response pair.
This class provides a convenience API over WSGI.
"""
def __init__(self, environ, start_response):
"""Create the request wrapper.
:param environ: The WSGI environment dict
:param start_response: The WSGI callback for starting the response
:param callbacks: A dictionary of functions that are used to lazily
evaluate attribute lookups
"""
self.environ = environ
self._start_response = start_response
self._write = None
self._status = '200 OK'
self._response = None
self._outheaders = []
self._outcharset = None
self.outcookie = Cookie()
self.callbacks = {
'arg_list': Request._parse_arg_list,
'args': lambda req: arg_list_to_args(req.arg_list),
'languages': Request._parse_languages,
'incookie': Request._parse_cookies,
'_inheaders': Request._parse_headers
}
self.redirect_listeners = []
self.base_url = self.environ.get('trac.base_url')
if not self.base_url:
self.base_url = self._reconstruct_url()
self.href = Href(self.base_path)
self.abs_href = Href(self.base_url)
def __getattr__(self, name):
"""Performs lazy attribute lookup by delegating to the functions in the
callbacks dictionary."""
if name in self.callbacks:
value = self.callbacks[name](self)
setattr(self, name, value)
return value
raise AttributeError(name)
def __repr__(self):
path_info = self.environ.get('PATH_INFO', '')
return '<%s "%s %r">' % (self.__class__.__name__, self.method,
path_info)
# Public API
@property
def method(self):
"""The HTTP method of the request"""
return self.environ['REQUEST_METHOD']
@property
def path_info(self):
"""Path inside the application"""
path_info = self.environ.get('PATH_INFO', '')
try:
return unicode(path_info, 'utf-8')
except UnicodeDecodeError:
raise HTTPNotFound(_("Invalid URL encoding (was %(path_info)r)",
path_info=path_info))
@property
def query_string(self):
"""Query part of the request"""
return self.environ.get('QUERY_STRING', '')
@property
def remote_addr(self):
"""IP address of the remote user"""
return self.environ.get('REMOTE_ADDR')
@property
def remote_user(self):
""" Name of the remote user.
Will be `None` if the user has not logged in using HTTP authentication.
"""
user = self.environ.get('REMOTE_USER')
if user is not None:
return to_unicode(user)
@property
def scheme(self):
"""The scheme of the request URL"""
return self.environ['wsgi.url_scheme']
@property
def base_path(self):
"""The root path of the application"""
return self.environ.get('SCRIPT_NAME', '')
@property
def server_name(self):
"""Name of the server"""
return self.environ['SERVER_NAME']
@property
def server_port(self):
"""Port number the server is bound to"""
return int(self.environ['SERVER_PORT'])
def add_redirect_listener(self, listener):
"""Add a callable to be called prior to executing a redirect.
The callable is passed the arguments to the `redirect()` call.
"""
self.redirect_listeners.append(listener)
def get_header(self, name):
"""Return the value of the specified HTTP header, or `None` if there's
no such header in the request.
"""
name = name.lower()
for key, value in self._inheaders:
if key == name:
return value
return None
def send_response(self, code=200):
"""Set the status code of the response."""
self._status = '%s %s' % (code, HTTP_STATUS.get(code, 'Unknown'))
def send_header(self, name, value):
"""Send the response header with the specified name and value.
`value` must either be an `unicode` string or can be converted to one
(e.g. numbers, ...)
"""
lower_name = name.lower()
if lower_name == 'content-type':
ctpos = value.find('charset=')
if ctpos >= 0:
self._outcharset = value[ctpos + 8:].strip()
elif lower_name == 'content-length':
self._content_length = int(value)
self._outheaders.append((name, unicode(value).encode('utf-8')))
def end_headers(self):
"""Must be called after all headers have been sent and before the
actual content is written.
"""
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders)
def check_modified(self, datetime, extra=''):
"""Check the request "If-None-Match" header against an entity tag.
The entity tag is generated from the specified last modified time
(`datetime`), optionally appending an `extra` string to
indicate variants of the requested resource.
That `extra` parameter can also be a list, in which case the MD5 sum
of the list content will be used.
If the generated tag matches the "If-None-Match" header of the request,
this method sends a "304 Not Modified" response to the client.
Otherwise, it adds the entity tag as an "ETag" header to the response
so that consecutive requests can be cached.
"""
if isinstance(extra, list):
m = md5()
for elt in extra:
m.update(repr(elt))
extra = m.hexdigest()
etag = 'W/"%s/%s/%s"' % (self.authname, http_date(datetime), extra)
inm = self.get_header('If-None-Match')
if (not inm or inm != etag):
self.send_header('ETag', etag)
else:
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
def redirect(self, url, permanent=False):
"""Send a redirect to the client, forwarding to the specified URL.
The `url` may be relative or absolute, relative URLs will be translated
appropriately.
"""
for listener in self.redirect_listeners:
listener(self, url, permanent)
if permanent:
status = 301 # 'Moved Permanently'
elif self.method == 'POST':
status = 303 # 'See Other' -- safe to use in response to a POST
else:
status = 302 # 'Found' -- normal temporary redirect
self.send_response(status)
if not url.startswith(('http://', 'https://')):
# Make sure the URL is absolute
scheme, host = urlparse.urlparse(self.base_url)[:2]
url = urlparse.urlunparse((scheme, host, url, None, None, None))
# Workaround #10382, IE6+ bug when post and redirect with hash
if status == 303 and '#' in url and \
' MSIE ' in self.environ.get('HTTP_USER_AGENT', ''):
url = url.replace('#', '#__msie303:')
self.send_header('Location', url)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', 0)
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.end_headers()
raise RequestDone
def send(self, content, content_type='text/html', status=200):
self.send_response(status)
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(content))
self.end_headers()
if self.method != 'HEAD':
self.write(content)
raise RequestDone
def send_error(self, exc_info, template='error.html',
content_type='text/html', status=500, env=None, data={}):
try:
if template.endswith('.html'):
if env:
from trac.web.chrome import Chrome
try:
data = Chrome(env).render_template(self, template,
data, 'text/html')
except Exception:
# second chance rendering, in "safe" mode
data['trac_error_rendering'] = True
data = Chrome(env).render_template(self, template,
data, 'text/html')
else:
content_type = 'text/plain'
data = '%s\n\n%s: %s' % (data.get('title'),
data.get('type'),
data.get('message'))
except Exception: # failed to render
data = get_last_traceback()
content_type = 'text/plain'
if isinstance(data, unicode):
data = data.encode('utf-8')
self.send_response(status)
self._outheaders = []
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(data))
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders,
exc_info)
if self.method != 'HEAD':
self.write(data)
raise RequestDone
def send_no_content(self):
self.send_response(204)
self.send_header('Content-Length', 0)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
raise RequestDone
def send_file(self, path, mimetype=None):
"""Send a local file to the browser.
This method includes the "Last-Modified", "Content-Type" and
"Content-Length" headers in the response, corresponding to the file
attributes. It also checks the last modification time of the local file
against the "If-Modified-Since" provided by the user agent, and sends a
"304 Not Modified" response if it matches.
"""
if not os.path.isfile(path):
raise HTTPNotFound(_("File %(path)s not found", path=path))
stat = os.stat(path)
mtime = datetime.fromtimestamp(stat.st_mtime, localtz)
last_modified = http_date(mtime)
if last_modified == self.get_header('If-Modified-Since'):
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
if not mimetype:
mimetype = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', stat.st_size)
self.send_header('Last-Modified', last_modified)
use_xsendfile = getattr(self, 'use_xsendfile', False)
if use_xsendfile:
self.send_header('X-Sendfile', os.path.abspath(path))
self.end_headers()
if not use_xsendfile and self.method != 'HEAD':
fileobj = file(path, 'rb')
file_wrapper = self.environ.get('wsgi.file_wrapper', _FileWrapper)
self._response = file_wrapper(fileobj, 4096)
raise RequestDone
def read(self, size=None):
"""Read the specified number of bytes from the request body."""
fileobj = self.environ['wsgi.input']
if size is None:
size = self.get_header('Content-Length')
if size is None:
size = -1
else:
size = int(size)
data = fileobj.read(size)
return data
def write(self, data):
"""Write the given data to the response body.
*data* **must** be a `str` string, encoded with the charset
which has been specified in the ``'Content-Type'`` header
or UTF-8 otherwise.
Note that when the ``'Content-Length'`` header is specified,
its value either corresponds to the length of *data*, or, if
there are multiple calls to `write`, to the cumulated length
of the *data* arguments.
"""
if not self._write:
self.end_headers()
if isinstance(data, unicode):
raise ValueError("Can't send unicode content")
try:
self._write(data)
except (IOError, socket.error), e:
if e.args[0] in (errno.EPIPE, errno.ECONNRESET, 10053, 10054):
raise RequestDone
# Note that mod_wsgi raises an IOError with only a message
# if the client disconnects
if 'mod_wsgi.version' in self.environ and \
e.args[0] in ('failed to write data',
'client connection closed'):
raise RequestDone
raise
# Internal methods
def _parse_arg_list(self):
"""Parse the supplied request parameters into a list of
`(name, value)` tuples.
"""
fp = self.environ['wsgi.input']
# Avoid letting cgi.FieldStorage consume the input stream when the
# request does not contain form data
ctype = self.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype not in ('application/x-www-form-urlencoded',
'multipart/form-data'):
fp = StringIO('')
# Python 2.6 introduced a backwards incompatible change for
# FieldStorage where QUERY_STRING is no longer ignored for POST
# requests. We'll keep the pre 2.6 behaviour for now...
if self.method == 'POST':
qs_on_post = self.environ.pop('QUERY_STRING', '')
fs = _FieldStorage(fp, environ=self.environ, keep_blank_values=True)
if self.method == 'POST':
self.environ['QUERY_STRING'] = qs_on_post
args = []
for value in fs.list or ():
name = value.name
if not value.filename:
value = unicode(value.value, 'utf-8')
args.append((name, value))
return args
def _parse_cookies(self):
cookies = Cookie()
header = self.get_header('Cookie')
if header:
cookies.load(header, ignore_parse_errors=True)
return cookies
def _parse_headers(self):
headers = [(name[5:].replace('_', '-').lower(), value)
for name, value in self.environ.items()
if name.startswith('HTTP_')]
if 'CONTENT_LENGTH' in self.environ:
headers.append(('content-length', self.environ['CONTENT_LENGTH']))
if 'CONTENT_TYPE' in self.environ:
headers.append(('content-type', self.environ['CONTENT_TYPE']))
return headers
def _parse_languages(self):
"""The list of languages preferred by the remote user, taken from the
``Accept-Language`` header.
"""
header = self.get_header('Accept-Language') or 'en-us'
langs = []
for i, lang in enumerate(header.split(',')):
code, params = cgi.parse_header(lang)
q = 1
if 'q' in params:
try:
q = float(params['q'])
except ValueError:
q = 0
langs.append((-q, i, code))
langs.sort()
return [code for q, i, code in langs]
def _reconstruct_url(self):
"""Reconstruct the absolute base URL of the application."""
host = self.get_header('Host')
if not host:
# Missing host header, so reconstruct the host from the
# server name and port
default_port = {'http': 80, 'https': 443}
if self.server_port and self.server_port != \
default_port[self.scheme]:
host = '%s:%d' % (self.server_name, self.server_port)
else:
host = self.server_name
return urlparse.urlunparse((self.scheme, host, self.base_path, None,
None, None))
def _send_cookie_headers(self):
for name in self.outcookie.keys():
path = self.outcookie[name].get('path')
if path:
path = path.replace(' ', '%20') \
.replace(';', '%3B') \
.replace(',', '%3C')
self.outcookie[name]['path'] = path
cookies = to_unicode(self.outcookie.output(header='')).encode('utf-8')
for cookie in cookies.splitlines():
self._outheaders.append(('Set-Cookie', cookie.strip()))
__no_apidoc__ = _HTTPException_subclass_names
|
|
# -*- coding: utf-8 -*-
"""
Global menus
"""
# Language Menu (available in all screens)
s3.menu_lang = [ T("Language"), True, "#"]
_menu_lang = []
for language in s3.l10n_languages.keys():
_menu_lang.append([s3.l10n_languages[language], False,
URL(r=request, args=request.args,
vars={"_language":language})])
s3.menu_lang.append(_menu_lang)
# Help Menu (available in all screens)
s3.menu_help = [ T("Help"), True, "#",
[
[T("Contact us"), False,
URL(request.application, "default", "contact")],
[T("About"), False, URL(request.application, "default", "about")],
]
]
# Auth Menu (available in all screens)
if not auth.is_logged_in():
login_next = URL(r=request, args=request.args, vars=request.vars)
if request.controller == "default" and \
request.function == "user" and \
"_next" in request.get_vars:
login_next = request.get_vars["_next"]
self_registration = deployment_settings.get_security_self_registration()
if self_registration:
s3.menu_auth = [T("Login"), True,
URL(request.application, "default", "user/login", vars=dict(_next=login_next)),
[
[T("Login"), False,
URL(request.application, "default", "user/login", vars=dict(_next=login_next))],
[T("Register"), False,
URL(request.application, "default", "user/register", vars=dict(_next=login_next))],
[T("Lost Password"), False,
URL(request.application, "default", "user/retrieve_password")]
]
]
else:
s3.menu_auth = [T("Login"), True,
URL(request.application, "default", "user/login", vars=dict(_next=login_next)),
[
[T("Lost Password"), False,
URL(request.application, "default", "user/retrieve_password")]
]
]
else:
s3.menu_auth = [auth.user.email, True, None,
[
[T("Logout"), False,
URL(request.application, "default", "user/logout")],
[T("User Profile"), False,
URL(request.application, "default", "user/profile")],
[T("Personal Data"), False,
URL(request.application, c="pr", f="person",
vars={"person.uid" : auth.user.person_uuid})],
[T("Contact details"), False,
URL(request.application, c="pr", f="person",
args="contact",
vars={"person.uid" : auth.user.person_uuid})],
[T("Subscriptions"), False,
URL(request.application, c="pr", f="person",
args="pe_subscription",
vars={"person.uid" : auth.user.person_uuid})],
[T("Change Password"), False,
URL(request.application, "default", "user/change_password")],
["----", False, None],
[(T("Rapid Data Entry"), "rapid_toggle",
session.s3.rapid_data_entry is True),
False, URL(request.application, "default", "rapid")]
]
]
# Menu for Admin module
# (defined here as used in several different Controller files)
admin_menu_messaging = [
[T("Global Messaging Settings"), False,
URL(r=request, c="msg", f="setting", args=[1, "update"])],
[T("Email Settings"), False,
URL(r=request, c="msg", f="email_settings", args=[1, "update"])],
[T("Twitter Settings"), False,
URL(r=request, c="msg", f="twitter_settings", args=[1, "update"])],
[T("Modem Settings"), False,
URL(r=request, c="msg", f="modem_settings", args=[1, "update"])],
[T("Gateway Settings"), False,
URL(r=request, c="msg", f="gateway_settings", args=[1, "update"])],
[T("Tropo Settings"), False,
URL(r=request, c="msg", f="tropo_settings", args=[1, "update"])],
]
admin_menu_options = [
[T("Settings"), False, URL(r=request, c="admin", f="setting", args=[1, "update"]), [
[T("Edit Themes"), False, URL(r=request, c="admin", f="theme")]
]],
[T("User Management"), False, URL(r=request, c="admin", f="user"), [
[T("Users"), False, URL(r=request, c="admin", f="user")],
[T("Roles"), False, URL(r=request, c="admin", f="role")],
[T("Organizations"), False, URL(r=request, c="admin", f="organisation")],
#[T("Roles"), False, URL(r=request, c="admin", f="group")],
#[T("Membership"), False, URL(r=request, c="admin", f="membership")],
]],
[T("Database"), False, URL(r=request, c="appadmin", f="index"), [
[T("Import"), False, URL(r=request, c="admin", f="import_data")],
[T("Export"), False, URL(r=request, c="admin", f="export_data")],
#[T("Import Jobs"), False, URL(r=request, c="admin", f="import_job")],
[T("Raw Database access"), False, URL(r=request, c="appadmin", f="index")]
]],
[T("Synchronization"), False, URL(r=request, c="sync", f="index"), [
[T("Manual Synchronization"), False, URL(r=request, c="sync", f="now")],
#[T("Offline Sync"), False, URL(r=request, c="sync", f="offline")],
[T("Settings"), False,
URL(r=request, c="sync", f="setting", args=[1, "update"])],
[T("Peers"), False, URL(r=request, c="sync", f="peer")],
[T("Schedule"), False, URL(r=request, c="sync", f="job")],
#[T("Sync Pools"), False, URL(r=request, c="sync", f="pool")],
[T("Peer Registration"), False,
URL(r=request, c="sync", f="registration")],
#[T("Conflict Resolution"), False,
# URL(r=request, c="sync", f="conflict")],
[T("History"), False, URL(r=request, c="sync", f="log")]
]],
[T("Messaging"), False, "#", admin_menu_messaging],
#[T("Edit Application"), False,
# URL(r=request, a="admin", c="default", f="design", args=[request.application])],
[T("Tickets"), False, URL(r=request, c="admin", f="errors")],
]
# Modules Menu (available in all Controllers)
# NB This is just a default menu - most deployments will customise this
s3.menu_modules = []
# Home always 1st
_module = deployment_settings.modules["default"]
s3.menu_modules.append([_module.name_nice, False,
URL(r=request, c="default", f="index")])
# Modules to hide due to insufficient permissions
hidden_modules = auth.permission.hidden_modules()
# The Modules to display at the top level
for module_type in [1, 2, 3, 4, 5]:
for module in deployment_settings.modules:
if module in hidden_modules:
continue
_module = deployment_settings.modules[module]
if (_module.module_type == module_type):
if not _module.access:
s3.menu_modules.append([_module.name_nice, False,
URL(r=request, c=module, f="index")])
else:
authorised = False
groups = re.split("\|", _module.access)[1:-1]
for group in groups:
if s3_has_role(group):
authorised = True
if authorised == True:
s3.menu_modules.append([_module.name_nice, False,
URL(r=request, c=module, f="index")])
# Modules to display off the 'more' menu
modules_submenu = []
for module in deployment_settings.modules:
if module in hidden_modules:
continue
_module = deployment_settings.modules[module]
if (_module.module_type == 10):
if not _module.access:
modules_submenu.append([_module.name_nice, False,
URL(r=request, c=module, f="index")])
else:
authorised = False
groups = re.split("\|", _module.access)[1:-1]
for group in groups:
if s3_has_role(group):
authorised = True
if authorised == True:
modules_submenu.append([_module.name_nice, False,
URL(r=request, c=module, f="index")])
if modules_submenu:
# Only show the 'more' menu if there are entries in the list
module_more_menu = ([T("more"), False, "#"])
module_more_menu.append(modules_submenu)
s3.menu_modules.append(module_more_menu)
# Admin always last
_module = deployment_settings.modules["admin"]
authorised = False
groups = re.split("\|", _module.access)[1:-1]
for group in groups:
if int(group) in session.s3.roles:
authorised = True
if authorised == True:
s3.menu_admin = [_module.name_nice, True,
URL(r=request, c="admin", f="index")]
else:
s3.menu_admin = []
# Build overall menu out of components
response.menu = s3.menu_modules
response.menu.append(s3.menu_help)
response.menu.append(s3.menu_auth)
if deployment_settings.get_L10n_display_toolbar():
response.menu.append(s3.menu_lang)
if s3.menu_admin:
response.menu.append(s3.menu_admin)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import subprocess
import logging
VALID_TYPES = ['image/jpeg', 'image/gif', 'image/tiff', 'image/png', 'image/jp2', 'image/jpx', 'image/jpm']
INVALID_TYPES = ['application/pdf']
# Settings recommended as a starting point by Jon Stroop.
# See https://groups.google.com/forum/?hl=en#!searchin/iiif-discuss/kdu_compress/iiif-discuss/OFzWFLaWVsE/wF2HaykHcd0J
KDU_COMPRESS_BASE_OPTS = [
"-quiet", "-rate",
"2.4,1.48331273,.91673033,.56657224,.35016049,.21641118,.13374944,"
".08266171",
"Creversible=yes", "Clevels=7", "Cblk={64,64}", "Cuse_sop=yes",
"Cuse_eph=yes", "Corder=RLCP", "ORGgen_plt=yes", "ORGtparts=R",
"Stiles={1024,1024}", "-double_buffering", "10", "-num_threads", "4",
"-no_weights"
]
KDU_COMPRESS_DEFAULT_OPTS = KDU_COMPRESS_BASE_OPTS[:]
KDU_COMPRESS_DEFAULT_OPTS.extend(["-jp2_space", "sRGB"])
class Convert(object):
'''
utilities for use in converting an image file to jp2 format
'''
def __init__(self):
self.logger = logging.getLogger(__name__)
self.tiffcp_location = os.environ.get('PATH_TIFFCP',
'/usr/local/bin/tiffcp')
self.magick_convert_location = os.environ.get('PATH_MAGICK_CONVERT',
'/usr/local/bin/convert')
self.kdu_compress_location = os.environ.get(
'PATH_KDU_COMPRESS', '/usr/local/bin/kdu_compress')
self.tiff2rgba_location = os.environ.get('PATH_TIFF2RGBA',
'/usr/local/bin/tiff2rgba')
self.tifficc_location = os.environ.get('PATH_TIFFICC',
'/usr/local/bin/tifficc')
self.kdu_expand_location = os.environ.get('PATH_KDU_EXPAND',
'/usr/local/bin/kdu_expand')
def _pre_check(self, mimetype):
''' do a basic pre-check on the object to see if we think it's
something know how to deal with '''
# see if we recognize this mime type
if mimetype in VALID_TYPES:
passed = True
msg = "Mime-type '{}' was pre-checked and recognized as " \
"something we can try to convert.".format(mimetype)
self.logger.info(msg)
elif mimetype in INVALID_TYPES:
passed = False
msg = "Mime-type '{}' was pre-checked and recognized as " \
"something we don't want to convert.".format(mimetype)
self.logger.info(msg)
else:
passed = False
msg = "Mime-type '{}' was unrecognized. We don't know how to " \
"deal with this".format(mimetype)
self.logger.warning(msg)
return passed, msg
def _uncompress_tiff(self, compressed_path, uncompressed_path):
''' uncompress a tiff using tiffcp.
See http://www.libtiff.org/tools.html '''
try:
subprocess.check_output(
[
self.tiffcp_location, "-c", "none", compressed_path,
uncompressed_path
],
stderr=subprocess.STDOUT)
uncompressed = True
msg = 'File uncompressed. Input: {}, output: {}'.format(
compressed_path, uncompressed_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
uncompressed = False
msg = '`tiffcp` command failed: {}\nreturncode was: {}\n' \
'output was: {}'.format(e.cmd, e.returncode, e.output)
self.logger.error(msg)
return uncompressed, msg
def _uncompress_jp2000(self, compressed_path, uncompressed_path):
''' uncompress a jp2000 file using kdu_expand '''
try:
subprocess.check_output(
[
self.kdu_expand_location, "-i", compressed_path, "-o", uncompressed_path
],
stderr=subprocess.STDOUT)
uncompressed = True
msg = 'File uncompressed using kdu_expand. Input: {}, output: {}'.format(
compressed_path, uncompressed_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
uncompressed = False
msg = '`kdu_expand` command failed: {}\nreturncode was: {}\n' \
'output was: {}'.format(e.cmd, e.returncode, e.output)
self.logger.error(msg)
return uncompressed, msg
def _tiff_to_jp2(self, tiff_path, jp2_path):
''' convert a tiff to jp2 using kdu_compress.
tiff must be uncompressed.'''
basic_args = [
self.kdu_compress_location, "-i", tiff_path, "-o", jp2_path
]
default_args = basic_args[:]
default_args.extend(KDU_COMPRESS_DEFAULT_OPTS)
alt_args = basic_args[:]
alt_args.extend(KDU_COMPRESS_BASE_OPTS)
try:
subprocess.check_output(
default_args, stderr=subprocess.STDOUT)
converted = True
msg = '{} converted to {}'.format(tiff_path, jp2_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
self.logger.info(
'A kdu_compress command failed. Trying alternate.')
try:
subprocess.check_output(
alt_args, stderr=subprocess.STDOUT)
converted = True
msg = '{} converted to {}'.format(tiff_path, jp2_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
converted = False
msg = 'kdu_compress command failed: {}\nreturncode was: {}\n' \
'output was: {}'.format(e.cmd, e.returncode, e.output)
self.logger.error(msg)
return converted, msg
def _pre_convert(self, input_path, output_path):
'''
convert file using ImageMagick `convert`:
http://www.imagemagick.org/script/convert.php
'''
try:
subprocess.check_output(
[
self.magick_convert_location, "-compress", "None",
"-quality", "100", "-auto-orient", input_path, output_path
],
stderr=subprocess.STDOUT)
preconverted = True
msg = 'Used ImagMagick convert to convert {} to {}'.format(
input_path, output_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
preconverted = False
msg = 'ImageMagic `convert` command failed: {}\nreturncode was:' \
'{}\noutput was: {}'.format(e.cmd, e.returncode, e.output)
self.logger.error(msg)
return preconverted, msg
def _tiff_to_srgb_libtiff(self, input_path, output_path):
'''
convert color profile to sRGB using libtiff's `tiff2rgba` tool
'''
try:
subprocess.check_output([
self.tiff2rgba_location, "-c", "none", input_path, output_path
],
stderr=subprocess.STDOUT)
to_srgb = True
msg = "Used tiff2rgba to convert {} to {}, with color profile" \
"sRGB (if not already sRGB)".format(input_path, output_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
to_srgb = False
msg = 'libtiff `tiff2rgba` command failed: {}\nreturncode was:' \
'{}\noutput was: {}'.format(e.cmd, e.returncode, e.output)
self.logger.error(msg)
return to_srgb, msg
def _tiff_to_srgb_little_cms(self, input_path, output_path):
'''
convert color profile to sRGB using Little CMS's `tifficc`
ICC profile applier tool.
'''
try:
subprocess.check_output(
[self.tifficc_location, input_path, output_path],
stderr=subprocess.STDOUT)
to_srgb = True
msg = "Used tifficc to convert {} to {}, with color profile " \
"sRGB (if not already sRGB)".format(input_path, output_path)
self.logger.info(msg)
except subprocess.CalledProcessError, e:
to_srgb = False
msg = 'Little CMS `tifficc` command failed: {}\nreturncode was:' \
'{}\noutput was: {}'.format(e.cmd, e.returncode, e.output)
self.logger.error(msg)
return to_srgb, msg
def main(argv=None):
pass
if __name__ == "__main__":
sys.exit(main())
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base model class."""
from core.platform import models
import utils
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
transaction_services = models.Registry.import_transaction_services()
# The delimiter used to separate the version number from the model instance
# id. To get the instance id from a snapshot id, use Python's rfind()
# method to find the location of this delimiter.
_VERSION_DELIMITER = '-'
# Constants used for generating ids.
MAX_RETRIES = 10
RAND_RANGE = 127 * 127
ID_LENGTH = 12
class BaseModel(ndb.Model):
"""Base model for all persistent object storage classes."""
# When this entity was first created. This can be overwritten and
# set explicitly.
created_on = ndb.DateTimeProperty(auto_now_add=True, indexed=True)
# When this entity was last updated. This cannot be set directly.
last_updated = ndb.DateTimeProperty(auto_now=True, indexed=True)
# Whether the current version of the model instance is deleted.
deleted = ndb.BooleanProperty(indexed=True, default=False)
@property
def id(self):
"""A unique id for this model instance."""
return self.key.id()
def _pre_put_hook(self):
"""This is run before model instances are saved to the datastore.
Subclasses of BaseModel should override this method.
"""
pass
class EntityNotFoundError(Exception):
"""Raised when no entity for a given id exists in the datastore."""
pass
@classmethod
def get(cls, entity_id, strict=True):
"""Gets an entity by id. Fails noisily if strict == True.
Args:
entity_id: str. The id of the entity.
strict: bool. Whether to fail noisily if no entity with the given id
exists in the datastore.
Returns:
None, if strict == False and no undeleted entity with the given id
exists in the datastore. Otherwise, the entity instance that
corresponds to the given id.
Raises:
- base_models.BaseModel.EntityNotFoundError: if strict == True and
no undeleted entity with the given id exists in the datastore.
"""
entity = cls.get_by_id(entity_id)
if entity and entity.deleted:
entity = None
if strict and entity is None:
raise cls.EntityNotFoundError(
'Entity for class %s with id %s not found' %
(cls.__name__, entity_id))
return entity
def put(self):
super(BaseModel, self).put()
@classmethod
def get_multi(cls, entity_ids, include_deleted=False):
"""Returns a list, each entry of which is the instance model
corresponding to the entity_id, except for the following two cases (in
which the corresponding entry is None instead):
- the instance is not found
- the instance has been deleted, and `include_deleted` is True.
"""
entity_keys = [ndb.Key(cls, entity_id) for entity_id in entity_ids]
entities = ndb.get_multi(entity_keys)
if not include_deleted:
for i in xrange(len(entities)):
if entities[i] and entities[i].deleted:
entities[i] = None
return entities
@classmethod
def put_multi(cls, entities):
return ndb.put_multi(entities)
def delete(self):
super(BaseModel, self).key.delete()
@classmethod
def get_all(cls, include_deleted_entities=False):
"""Returns a filterable iterable of all entities of this class.
If include_deleted_entities is True then entities that have been marked
deleted are returned as well.
"""
query = cls.query()
if not include_deleted_entities:
query = query.filter(cls.deleted == False) # pylint: disable=singleton-comparison
return query
@classmethod
def get_new_id(cls, entity_name):
"""Gets a new id for an entity, based on its name.
The returned id is guaranteed to be unique among all instances of this
entity.
Args:
entity_name: the name of the entity. Coerced to a utf-8 encoded
string. Defaults to ''.
Returns:
str: a new unique id for this entity class.
Raises:
- Exception: if an id cannot be generated within a reasonable number
of attempts.
"""
try:
entity_name = unicode(entity_name).encode('utf-8')
except Exception:
entity_name = ''
for _ in range(MAX_RETRIES):
new_id = utils.convert_to_hash(
'%s%s' % (entity_name, utils.get_random_int(RAND_RANGE)),
ID_LENGTH)
if not cls.get_by_id(new_id):
return new_id
raise Exception('New id generator is producing too many collisions.')
@classmethod
def _fetch_page_sorted_by_last_updated(
cls, query, page_size, urlsafe_start_cursor):
if urlsafe_start_cursor:
start_cursor = datastore_query.Cursor(urlsafe=urlsafe_start_cursor)
else:
start_cursor = None
result = query.order(-cls.last_updated).fetch_page(
page_size, start_cursor=start_cursor)
return (
result[0],
(result[1].urlsafe() if result[1] else None),
result[2])
class VersionedModel(BaseModel):
"""Model that handles storage of the version history of model instances.
To use this class, you must declare a SNAPSHOT_METADATA_CLASS and a
SNAPSHOT_CONTENT_CLASS. The former must contain the String fields
'committer_id', 'commit_type' and 'commit_message', and a JSON field for
the Python list of dicts, 'commit_cmds'. The latter must contain the JSON
field 'content'. The item that is being versioned must be serializable to a
JSON blob.
Note that commit() should be used for VersionedModels, as opposed to put()
for direct subclasses of BaseModel.
"""
# The class designated as the snapshot model. This should be a subclass of
# BaseSnapshotMetadataModel.
SNAPSHOT_METADATA_CLASS = None
# The class designated as the snapshot content model. This should be a
# subclass of BaseSnapshotContentModel.
SNAPSHOT_CONTENT_CLASS = None
# Whether reverting is allowed. Default is False.
ALLOW_REVERT = False
### IMPORTANT: Subclasses should only overwrite things above this line. ###
# The possible commit types.
_COMMIT_TYPE_CREATE = 'create'
_COMMIT_TYPE_REVERT = 'revert'
_COMMIT_TYPE_EDIT = 'edit'
_COMMIT_TYPE_DELETE = 'delete'
# A list containing the possible commit types.
COMMIT_TYPE_CHOICES = [
_COMMIT_TYPE_CREATE, _COMMIT_TYPE_REVERT, _COMMIT_TYPE_EDIT,
_COMMIT_TYPE_DELETE
]
# The reserved prefix for keys that are automatically inserted into a
# commit_cmd dict by this model.
_AUTOGENERATED_PREFIX = 'AUTO'
# The current version number of this instance. In each PUT operation,
# this number is incremented and a snapshot of the modified instance is
# stored in the snapshot metadata and content models. The snapshot
# version number starts at 1 when the model instance is first created.
# All data in this instance represents the version at HEAD; data about the
# previous versions is stored in the snapshot models.
version = ndb.IntegerProperty(default=0)
def _require_not_marked_deleted(self):
if self.deleted:
raise Exception('This model instance has been deleted.')
def _compute_snapshot(self):
"""Generates a snapshot (a Python dict) from the model fields."""
return self.to_dict(exclude=['created_on', 'last_updated'])
def _reconstitute(self, snapshot_dict):
"""Makes this instance into a reconstitution of the given snapshot."""
self.populate(**snapshot_dict)
return self
def _reconstitute_from_snapshot_id(self, snapshot_id):
"""Makes this instance into a reconstitution of the given snapshot."""
snapshot_model = self.SNAPSHOT_CONTENT_CLASS.get(snapshot_id)
snapshot_dict = snapshot_model.content
reconstituted_model = self._reconstitute(snapshot_dict)
# TODO(sll): The 'created_on' and 'last_updated' values here will be
# slightly different from the values the entity model would have had,
# since they correspond to the corresponding fields for the snapshot
# content model instead. Figure out whether this is a problem or not,
# and whether we need to record the contents of those fields in the
# actual entity model (in which case we also need a way to deal with
# old snapshots that don't have this information).
reconstituted_model.created_on = snapshot_model.created_on
reconstituted_model.last_updated = snapshot_model.last_updated
return reconstituted_model
@classmethod
def _get_snapshot_id(cls, instance_id, version_number):
return '%s%s%s' % (
instance_id, _VERSION_DELIMITER, version_number)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
if self.SNAPSHOT_METADATA_CLASS is None:
raise Exception('No snapshot metadata class defined.')
if self.SNAPSHOT_CONTENT_CLASS is None:
raise Exception('No snapshot content class defined.')
if not isinstance(commit_cmds, list):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
for item in commit_cmds:
if not isinstance(item, dict):
raise Exception(
'Expected commit_cmds to be a list of dicts, received %s'
% commit_cmds)
self.version += 1
snapshot = self._compute_snapshot()
snapshot_id = self._get_snapshot_id(self.id, self.version)
snapshot_metadata_instance = self.SNAPSHOT_METADATA_CLASS( # pylint: disable=not-callable
id=snapshot_id, committer_id=committer_id, commit_type=commit_type,
commit_message=commit_message, commit_cmds=commit_cmds)
snapshot_content_instance = self.SNAPSHOT_CONTENT_CLASS( # pylint: disable=not-callable
id=snapshot_id, content=snapshot)
transaction_services.run_in_transaction(
ndb.put_multi,
[snapshot_metadata_instance, snapshot_content_instance, self])
def delete(self, committer_id, commit_message, force_deletion=False):
if force_deletion:
current_version = self.version
version_numbers = [str(num + 1) for num in range(current_version)]
snapshot_ids = [
self._get_snapshot_id(self.id, version_number)
for version_number in version_numbers]
metadata_keys = [
ndb.Key(self.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(metadata_keys)
content_keys = [
ndb.Key(self.SNAPSHOT_CONTENT_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
ndb.delete_multi(content_keys)
super(VersionedModel, self).delete()
else:
self._require_not_marked_deleted() # pylint: disable=protected-access
self.deleted = True
commit_cmds = [{
'cmd': '%s_mark_deleted' % self._AUTOGENERATED_PREFIX
}]
self._trusted_commit(
committer_id, self._COMMIT_TYPE_DELETE, commit_message,
commit_cmds)
def put(self, *args, **kwargs):
"""For VersionedModels, this method is replaced with commit()."""
raise NotImplementedError
def commit(self, committer_id, commit_message, commit_cmds):
"""Saves a version snapshot and updates the model.
commit_cmds should give sufficient information to reconstruct the
commit.
"""
self._require_not_marked_deleted()
for commit_cmd in commit_cmds:
if 'cmd' not in commit_cmd:
raise Exception(
'Invalid commit_cmd: %s. Expected a \'cmd\' key.'
% commit_cmd)
if commit_cmd['cmd'].startswith(self._AUTOGENERATED_PREFIX):
raise Exception(
'Invalid change list command: ' % commit_cmd['cmd'])
commit_type = (
self._COMMIT_TYPE_CREATE if self.version == 0 else
self._COMMIT_TYPE_EDIT)
self._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
@classmethod
def revert(cls, model, committer_id, commit_message, version_number):
model._require_not_marked_deleted() # pylint: disable=protected-access
if not model.ALLOW_REVERT:
raise Exception(
'Reverting of objects of type %s is not allowed.'
% model.__class__.__name__)
commit_cmds = [{
'cmd': (
'%s_revert_version_number' %
model._AUTOGENERATED_PREFIX), # pylint: disable=protected-access
'version_number': version_number
}]
# Do not overwrite the version number.
current_version = model.version
# If a new property is introduced after a certain version of a model,
# the property should be its default value when an old snapshot of the
# model is applied during reversion. E.g. states_schema_version in
# ExplorationModel may be added after some version of a saved
# exploration. If that exploration is reverted to a version that does
# not have a states_schema_version property, it should revert to the
# default states_schema_version value rather than taking the
# states_schema_version value from the latest exploration version.
# pylint: disable=protected-access
snapshot_id = model._get_snapshot_id(model.id, version_number)
new_model = cls(id=model.id)
new_model._reconstitute_from_snapshot_id(snapshot_id)
new_model.version = current_version
new_model._trusted_commit(
committer_id, cls._COMMIT_TYPE_REVERT, commit_message,
commit_cmds)
# pylint: enable=protected-access
@classmethod
def get_version(cls, model_instance_id, version_number):
"""Returns a model instance representing the given version.
The snapshot content is used to populate this model instance. The
snapshot metadata is not used.
"""
# pylint: disable=protected-access
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_id = cls._get_snapshot_id(model_instance_id, version_number)
return cls(id=model_instance_id)._reconstitute_from_snapshot_id(
snapshot_id)
# pylint: enable=protected-access
@classmethod
def get(cls, entity_id, strict=True, version=None):
"""Gets an entity by id. Fails noisily if strict == True."""
if version is None:
return super(VersionedModel, cls).get(entity_id, strict=strict)
else:
return cls.get_version(entity_id, version)
@classmethod
def get_snapshots_metadata(
cls, model_instance_id, version_numbers, allow_deleted=False):
"""Returns a list of dicts, each representing a model snapshot.
One dict is returned for each version number in the list of version
numbers requested. If any of the version numbers does not exist, an
error is raised.
If `allow_deleted` is False, an error is raised if the current model
has been deleted.
"""
# pylint: disable=protected-access
if not allow_deleted:
cls.get(model_instance_id)._require_not_marked_deleted()
snapshot_ids = [
cls._get_snapshot_id(model_instance_id, version_number)
for version_number in version_numbers]
# pylint: enable=protected-access
metadata_keys = [
ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id)
for snapshot_id in snapshot_ids]
returned_models = ndb.get_multi(metadata_keys)
for ind, model in enumerate(returned_models):
if model is None:
raise Exception(
'Invalid version number %s for model %s with id %s'
% (version_numbers[ind], cls.__name__, model_instance_id))
return [{
'committer_id': model.committer_id,
'commit_message': model.commit_message,
'commit_cmds': model.commit_cmds,
'commit_type': model.commit_type,
'version_number': version_numbers[ind],
'created_on_ms': utils.get_time_in_millisecs(model.created_on),
} for (ind, model) in enumerate(returned_models)]
class BaseSnapshotMetadataModel(BaseModel):
"""Base class for snapshot metadata classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The id of the user who committed this revision.
committer_id = ndb.StringProperty(required=True)
# The type of the commit associated with this snapshot.
commit_type = ndb.StringProperty(
required=True, choices=VersionedModel.COMMIT_TYPE_CHOICES)
# The commit message associated with this snapshot.
commit_message = ndb.TextProperty(indexed=False)
# A sequence of commands that can be used to describe this commit.
# Represented as a list of dicts.
commit_cmds = ndb.JsonProperty(indexed=False)
# Get the instance id from the versioned id (see
# _get_snapshot_id in VersionedModel)
def get_unversioned_instance_id(self):
return self.id[:self.id.rfind(_VERSION_DELIMITER)]
def get_version_string(self):
return self.id[self.id.rfind(_VERSION_DELIMITER) + 1:]
class BaseSnapshotContentModel(BaseModel):
"""Base class for snapshot content classes.
The id of this model is computed using VersionedModel.get_snapshot_id().
"""
# The snapshot content, as a JSON blob.
content = ndb.JsonProperty(indexed=False)
# Get the instance id from the versioned id (see
# _get_snapshot_id in VersionedModel)
def get_unversioned_instance_id(self):
return self.id[:self.id.rfind(_VERSION_DELIMITER)]
def get_version_string(self):
return self.id[self.id.rfind(_VERSION_DELIMITER) + 1:]
class BaseMapReduceBatchResultsModel(BaseModel):
"""Base model for batch storage for MR jobs.
This model turns off caching, because this results in stale data being
shown after each MapReduce job run. Classes which are used by a MR job to
store its batch results should subclass this class.
"""
_use_cache = False
_use_memcache = False
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output,PointsOutput
from pynamics.particle import Particle
import pynamics.integration
import sympy
import numpy
import matplotlib.pyplot as plt
from pynamics.constraint import KinematicConstraint,AccelerationConstraint
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
tol=1e-5
lO = Constant(name='lO',system=system)
lA = Constant(name='lA',system=system)
lB = Constant(name='lB',system=system)
lC = Constant(name='lC',system=system)
lD = Constant(name='lD',system=system)
mO = Constant(name='mO',system=system)
mA = Constant(name='mA',system=system)
mB = Constant(name='mB',system=system)
mC = Constant(name='mC',system=system)
mD = Constant(name='mD',system=system)
I_main = Constant(name='I_main',system=system)
g = Constant(name='g',system=system)
b = Constant(name='b',system=system)
k = Constant(name='k',system=system)
stall_torque = Constant(name='stall_torque',system=system)
k_constraint = Constant(name='k_constraint',system=system)
b_constraint = Constant(name='b_constraint',system=system)
tinitial = 0
tfinal = 10
tstep = 1/30
t = numpy.r_[tinitial:tfinal:tstep]
preload1 = Constant(name='preload1',system=system)
preload2 = Constant(name='preload2',system=system)
preload3 = Constant(name='preload3',system=system)
preload4 = Constant(name='preload4',system=system)
preload5 = Constant(name='preload5',system=system)
constants = {}
constants[lO]=.5
constants[lA] = .75
constants[lB] = 1
constants[lC] = .75
constants[lD] = 1
constants[mO] = 3
constants[mA] = .1
constants[mB] = .1
constants[mC] = .1
constants[mD] = .1
constants[I_main] = 1
constants[g] = 9.81
constants[b] = 1e0
constants[k] = 1e2
constants[stall_torque] = 1e2
constants[k_constraint] = 1e5
constants[b_constraint] = 1e3
constants[preload1] = 0*pi/180
constants[preload2] = 0*pi/180
constants[preload3] = -180*pi/180
constants[preload4] = 0*pi/180
constants[preload5] = 180*pi/180
x,x_d,x_dd = Differentiable(name='x',system=system)
y,y_d,y_dd = Differentiable(name='y',system=system)
qO,qO_d,qO_dd = Differentiable(name='qO',system=system)
qA,qA_d,qA_dd = Differentiable(name='qA',system=system)
qB,qB_d,qB_dd = Differentiable(name='qB',system=system)
qC,qC_d,qC_dd = Differentiable(name='qC',system=system)
qD,qD_d,qD_dd = Differentiable(name='qD',system=system)
initialvalues={
x: 0,
x_d: 0,
y: 1.25,
y_d: 0,
qO: 0,
qO_d: 0,
qA: -0.89,
qA_d: 0,
qB: -2.64,
qB_d: 0,
qC: -pi+0.89,
qC_d: 0,
qD: -pi+2.64,
qD_d: 0}
statevariables = system.get_state_variables()
ini0 = [initialvalues[item] for item in statevariables]
N = Frame('N',system)
O = Frame('O',system)
A = Frame('A',system)
B = Frame('B',system)
C = Frame('C',system)
D = Frame('D',system)
system.set_newtonian(N)
O.rotate_fixed_axis(N,[0,0,1],qO,system)
A.rotate_fixed_axis(N,[0,0,1],qA,system)
B.rotate_fixed_axis(N,[0,0,1],qB,system)
C.rotate_fixed_axis(N,[0,0,1],qC,system)
D.rotate_fixed_axis(N,[0,0,1],qD,system)
pOrigin = 0*N.x+0*N.y
pOcm=x*N.x+y*N.y
pOA = pOcm+lO/2*O.x
pOC = pOcm-lO/2*O.x
pAB = pOA+lA*A.x
pBtip = pAB + lB*B.x
vBtip = pBtip.time_derivative(N,system)
pCD = pOC + lC*C.x
pDtip = pCD + lD*D.x
vDtip = pDtip.time_derivative(N,system)
points = [pDtip,pCD,pOC,pOA,pAB,pBtip]
eqs = []
eqs.append((pBtip-pDtip).dot(N.x))
eqs.append((pBtip-pDtip).dot(N.y))
constraint_system=KinematicConstraint(eqs)
variables = [qO, qA, qB, qC, qD]
guess = [initialvalues[item] for item in variables]
result = constraint_system.solve_numeric(variables,guess,constants)
ini = []
for item in system.get_state_variables():
if item in variables:
ini.append(result[item])
else:
ini.append(initialvalues[item])
points = PointsOutput(points, constant_values=constants)
points.calc(numpy.array([ini0,ini]),[0,1])
points.plot_time()
pAcm=pOA+lA/2*A.x
pBcm=pAB+lB/2*B.x
pCcm=pOC+lC/2*C.x
pDcm=pCD+lD/2*D.x
wOA = O.get_w_to(A)
wAB = A.get_w_to(B)
wOC = O.get_w_to(C)
wCD = C.get_w_to(D)
wBD = B.get_w_to(D)
BodyO = Body('BodyO',O,pOcm,mO,Dyadic.build(O,I_main,I_main,I_main),system)
#BodyA = Body('BodyA',A,pAcm,mA,Dyadic.build(A,I_leg,I_leg,I_leg),system)
#BodyB = Body('BodyB',B,pBcm,mB,Dyadic.build(B,I_leg,I_leg,I_leg),system)
#BodyC = Body('BodyC',C,pCcm,mC,Dyadic.build(C,I_leg,I_leg,I_leg),system)
#BodyD = Body('BodyD',D,pDcm,mD,Dyadic.build(D,I_leg,I_leg,I_leg),system)
ParticleA = Particle(pAcm,mA,'ParticleA')
ParticleB = Particle(pBcm,mB,'ParticleB')
ParticleC = Particle(pCcm,mC,'ParticleC')
ParticleD = Particle(pDcm,mD,'ParticleD')
system.addforce(-b*wOA,wOA)
system.addforce(-b*wAB,wAB)
system.addforce(-b*wOC,wOC)
system.addforce(-b*wCD,wCD)
system.addforce(-b*wBD,wBD)
#
stretch = -pBtip.dot(N.y)
stretch_s = (stretch+abs(stretch))
on = stretch_s/(2*stretch+1e-10)
system.add_spring_force1(k_constraint,-stretch_s*N.y,vBtip)
system.addforce(-b_constraint*vBtip*on,vBtip)
system.add_spring_force1(k,(qA-qO-preload1)*N.z,wOA)
system.add_spring_force1(k,(qB-qA-preload2)*N.z,wAB)
system.add_spring_force1(k,(qC-qO-preload3)*N.z,wOC)
system.add_spring_force1(k,(qD-qC-preload4)*N.z,wCD)
system.add_spring_force1(k,(qD-qB-preload5)*N.z,wBD)
system.addforcegravity(-g*N.y)
import pynamics.time_series
x = [0,2,2,5,5,6,6,10]
y = [0,0,1,1,-1,-1,0,0]
my_signal, ft2 = pynamics.time_series.build_smoothed_time_signal(x,y,t,'my_signal',window_time_width = .1)
torque = my_signal*stall_torque
system.addforce(torque*O.z,wOA)
system.addforce(-torque*O.z,wOC)
#
eq = []
eq.append(pBtip-pDtip)
eq.append(O.y)
eq_d= [item.time_derivative() for item in eq]
eq_dd= [item.time_derivative() for item in eq_d]
eq_dd_scalar = []
eq_dd_scalar.append(eq_dd[0].dot(N.x))
eq_dd_scalar.append(eq_dd[0].dot(N.y))
eq_dd_scalar.append(eq_dd[1].dot(N.y))
c = AccelerationConstraint(eq_dd_scalar)
# c.linearize(0)
system.add_constraint(c)
#
f,ma = system.getdynamics()
func1 = system.state_space_post_invert(f,ma,constants = constants,variable_functions = {my_signal:ft2})
states=pynamics.integration.integrate(func1,ini,t,rtol=tol,atol=tol)
KE = system.get_KE()
PE = system.getPEGravity(0*N.x) - system.getPESprings()
energy = Output([KE-PE], constant_values=constants)
energy.calc(states,t)
energy.plot_time()
#torque_plot = Output([torque])
#torque_plot.calc(states,t)
#torque_plot.plot_time()
points = [pDtip,pCD,pOC,pOA,pAB,pBtip]
points = PointsOutput(points, constant_values=constants)
y = points.calc(states,t)
y = y.reshape((-1,6,2))
plt.figure()
for item in y[::30]:
plt.plot(*(item.T))
#points.animate(fps = 30, movie_name='parallel_five_bar_jumper.mp4',lw=2)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility methods used by the deploy command."""
import json
import os
import re
from gae_ext_runtime import ext_runtime
from googlecloudsdk.api_lib.app import appengine_api_client
from googlecloudsdk.api_lib.app import cloud_build
from googlecloudsdk.api_lib.app import docker_image
from googlecloudsdk.api_lib.app import metric_names
from googlecloudsdk.api_lib.app import runtime_builders
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.app.images import config
from googlecloudsdk.api_lib.app.runtimes import fingerprinter
from googlecloudsdk.api_lib.cloudbuild import build as cloudbuild_build
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.command_lib.app import exceptions as app_exc
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.console import progress_tracker
from googlecloudsdk.core.util import platforms
from googlecloudsdk.third_party.appengine.api import appinfo
from googlecloudsdk.third_party.appengine.tools import context_util
DEFAULT_DOMAIN = 'appspot.com'
DEFAULT_SERVICE = 'default'
ALT_SEPARATOR = '-dot-'
MAX_DNS_LABEL_LENGTH = 63 # http://tools.ietf.org/html/rfc2181#section-11
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
# Technically, this should be 260 because of the drive, ':\', and a null
# terminator, but any time we're getting close we're in dangerous territory.
_WINDOWS_MAX_PATH = 256
class WindowMaxPathError(exceptions.Error):
"""Raised if a file cannot be read because of the MAX_PATH limitation."""
_WINDOWS_MAX_PATH_ERROR_TEMPLATE = """\
The following file couldn't be read because its path is too long:
[{0}]
For more information on this issue and possible workarounds, please read the
following (links are specific to Node.js, but the information is generally
applicable):
* https://github.com/Microsoft/nodejstools/issues/69
* https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#max_path-explanation-and-workarounds\
"""
def __init__(self, filename):
super(WindowMaxPathError, self).__init__(
self._WINDOWS_MAX_PATH_ERROR_TEMPLATE.format(filename))
class DockerfileError(exceptions.Error):
"""Raised if a Dockerfile was found along with a non-custom runtime."""
class NoDockerfileError(exceptions.Error):
"""No Dockerfile found."""
class UnsatisfiedRequirementsError(exceptions.Error):
"""Raised if we are unable to detect the runtime."""
def _GetDockerfiles(info, dockerfile_dir):
"""Returns file objects to create dockerfiles if the user doesn't have them.
Args:
info: (googlecloudsdk.api_lib.app.yaml_parsing.ServiceYamlInfo)
The service config.
dockerfile_dir: str, path to the directory with the Dockerfile
Raises:
DockerfileError: Raised if a user supplied a Dockerfile and a non-custom
runtime.
NoDockerfileError: Raised if a user didn't supply a Dockerfile and chose a
custom runtime.
UnsatisfiedRequirementsError: Raised if the code in the directory doesn't
satisfy the requirements of the specified runtime type.
Returns:
A dictionary of filename to (str) Dockerfile contents.
"""
dockerfile = os.path.join(dockerfile_dir, 'Dockerfile')
if info.runtime != 'custom' and os.path.exists(dockerfile):
raise DockerfileError(
'There is a Dockerfile in the current directory, and the runtime field '
'in {0} is currently set to [runtime: {1}]. To use your Dockerfile to '
'build a custom runtime, set the runtime field in {0} to '
'[runtime: custom]. To continue using the [{1}] runtime, please omit '
'the Dockerfile from this directory.'.format(info.file, info.runtime))
# If we're "custom" there needs to be a Dockerfile.
if info.runtime == 'custom':
if os.path.exists(dockerfile):
log.info('Using %s found in %s', config.DOCKERFILE, dockerfile_dir)
return {}
else:
raise NoDockerfileError(
'You must provide your own Dockerfile when using a custom runtime. '
'Otherwise provide a "runtime" field with one of the supported '
'runtimes.')
# Check the fingerprinting based code.
gen_files = {}
params = ext_runtime.Params(appinfo=info.parsed, deploy=True)
configurator = fingerprinter.IdentifyDirectory(dockerfile_dir, params)
if configurator:
dockerfiles = configurator.GenerateConfigData()
gen_files.update((d.filename, d.contents) for d in dockerfiles)
return gen_files
# Then throw an error.
else:
raise UnsatisfiedRequirementsError(
'Your application does not satisfy all of the requirements for a '
'runtime of type [{0}]. Please correct the errors and try '
'again.'.format(info.runtime))
def _GetSourceContextsForUpload(source_dir):
"""Gets source context file information.
Args:
source_dir: str, path to the service's source directory
Returns:
A dict of filename to (str) source context file contents.
"""
source_contexts = {}
try:
contexts = context_util.CalculateExtendedSourceContexts(source_dir)
source_contexts[context_util.EXT_CONTEXT_FILENAME] = json.dumps(contexts)
context = context_util.BestSourceContext(contexts)
source_contexts[context_util.CONTEXT_FILENAME] = json.dumps(context)
# This error could either be raised by context_util.BestSourceContext or by
# context_util.CalculateExtendedSourceContexts (in which case stop looking)
except context_util.GenerateSourceContextError as e:
log.warn('Could not generate [{name}]: {error}'.format(
name=context_util.CONTEXT_FILENAME, error=e))
return source_contexts
def _GetDomainAndDisplayId(project_id):
"""Returns tuple (displayed app id, domain)."""
l = project_id.split(':')
if len(l) == 1:
return l[0], None
return l[1], l[0]
def _GetImageName(project, service, version):
"""Returns image tag according to App Engine convention."""
display, domain = _GetDomainAndDisplayId(project)
return (config.DOCKER_IMAGE_NAME_DOMAIN_FORMAT if domain
else config.DOCKER_IMAGE_NAME_FORMAT).format(
display=display, domain=domain, service=service, version=version)
def BuildAndPushDockerImage(project, service, source_dir, version_id,
code_bucket_ref, use_runtime_builders=False):
"""Builds and pushes a set of docker images.
Args:
project: str, The project being deployed to.
service: ServiceYamlInfo, The parsed service config.
source_dir: str, path to the service's source directory
version_id: The version id to deploy these services under.
code_bucket_ref: The reference to the GCS bucket where the source will be
uploaded.
use_runtime_builders: bool, whether to use the new CloudBuild-based runtime
builders (alternative is old externalized runtimes).
Returns:
str, The name of the pushed container image.
"""
# Nothing to do if this is not an image-based deployment.
if not service.RequiresImage():
return None
log.status.Print(
'Building and pushing image for service [{service}]'
.format(service=service.module))
gen_files = dict(_GetSourceContextsForUpload(source_dir))
if not use_runtime_builders:
gen_files.update(_GetDockerfiles(service, source_dir))
image = docker_image.Image(
dockerfile_dir=source_dir,
repo=_GetImageName(project, service.module, version_id),
nocache=False,
tag=config.DOCKER_IMAGE_TAG)
object_ref = storage_util.ObjectReference(code_bucket_ref, image.tagged_repo)
try:
cloud_build.UploadSource(image.dockerfile_dir, object_ref,
gen_files=gen_files,
skip_files=service.parsed.skip_files.regex)
except (OSError, IOError) as err:
if platforms.OperatingSystem.IsWindows():
if err.filename and len(err.filename) > _WINDOWS_MAX_PATH:
raise WindowMaxPathError(err.filename)
raise
metrics.CustomTimedEvent(metric_names.CLOUDBUILD_UPLOAD)
if use_runtime_builders:
builder_version = runtime_builders.RuntimeBuilderVersion.FromServiceInfo(
service)
build = builder_version.LoadCloudBuild({'_OUTPUT_IMAGE': image.tagged_repo})
else:
build = cloud_build.GetDefaultBuild(image.tagged_repo)
cloudbuild_build.CloudBuildClient().ExecuteCloudBuild(
cloud_build.FixUpBuild(build, object_ref), project=project)
metrics.CustomTimedEvent(metric_names.CLOUDBUILD_EXECUTE)
return image.tagged_repo
def DoPrepareManagedVms(gae_client):
"""Call an API to prepare the for App Engine Flexible."""
try:
message = 'If this is your first deployment, this may take a while'
with progress_tracker.ProgressTracker(message):
# Note: this doesn't actually boot the VM, it just prepares some stuff
# for the project via an undocumented Admin API.
gae_client.PrepareVmRuntime()
log.status.Print()
except util.RPCError as err:
# Any failures later due to an unprepared project will be noisy, so it's
# okay not to fail here.
log.warn(
("We couldn't validate that your project is ready to deploy to App "
'Engine Flexible Environment. If deployment fails, please check the '
'following message and try again:\n') + str(err))
def UseSsl(handlers):
"""Returns whether the root URL for an application is served over HTTPS.
More specifically, returns the 'secure' setting of the handler that will serve
the application. This can be 'always', 'optional', or 'never', depending on
when the URL is served over HTTPS.
Will miss a small number of cases, but HTTP is always okay (an HTTP URL to an
HTTPS-only service will result in a redirect).
Args:
handlers: List of googlecloudsdk.third_party.appengine.api.appinfo.URLMap,
the configured URL handlers for the application
Returns:
str, the 'secure' setting of the handler for the root URL.
"""
for handler in handlers:
try:
if re.match(handler.url + '$', '/'):
return handler.secure
except re.error:
# AppEngine uses POSIX Extended regular expressions, which are not 100%
# compatible with Python's re module.
pass
return appinfo.SECURE_HTTP
def GetAppHostname(app=None, app_id=None, service=None, version=None,
use_ssl=appinfo.SECURE_HTTP, deploy=True):
"""Returns the hostname of the given version of the deployed app.
Args:
app: Application resource. One of {app, app_id} must be given.
app_id: str, project ID. One of {app, app_id} must be given. If both are
provided, the hostname from app is preferred.
service: str, the (optional) service being deployed
version: str, the deployed version ID (omit to get the default version URL).
use_ssl: bool, whether to construct an HTTPS URL.
deploy: bool, if this is called during a deployment.
Returns:
str. Constructed URL.
Raises:
TypeError: if neither an app nor an app_id is provided
"""
if not app and not app_id:
raise TypeError('Must provide an application resource or application ID.')
version = version or ''
service_name = service or ''
if service == DEFAULT_SERVICE:
service_name = ''
domain = DEFAULT_DOMAIN
if not app and ':' in app_id:
api_client = appengine_api_client.GetApiClient()
app = api_client.GetApplication()
if app:
app_id, domain = app.defaultHostname.split('.', 1)
# Normally, AppEngine URLs are of the form
# 'http[s]://version.service.app.appspot.com'. However, the SSL certificate
# for appspot.com is not valid for subdomains of subdomains of appspot.com
# (e.g. 'https://app.appspot.com/' is okay; 'https://service.app.appspot.com/'
# is not). To deal with this, AppEngine recognizes URLs like
# 'http[s]://version-dot-service-dot-app.appspot.com/'.
#
# This works well as long as the domain name part constructed in this fashion
# is less than 63 characters long, as per the DNS spec. If the domain name
# part is longer than that, we are forced to use the URL with an invalid
# certificate.
#
# We've tried to do the best possible thing in every case here.
subdomain_parts = filter(bool, [version, service_name, app_id])
scheme = 'http'
if use_ssl == appinfo.SECURE_HTTP:
subdomain = '.'.join(subdomain_parts)
scheme = 'http'
else:
subdomain = ALT_SEPARATOR.join(subdomain_parts)
if len(subdomain) <= MAX_DNS_LABEL_LENGTH:
scheme = 'https'
else:
if deploy:
format_parts = ['$VERSION_ID', '$SERVICE_ID', '$APP_ID']
subdomain_format = ALT_SEPARATOR.join(
[j for (i, j) in zip([version, service_name, app_id], format_parts)
if i])
msg = ('This deployment will result in an invalid SSL certificate for '
'service [{0}]. The total length of your subdomain in the '
'format {1} should not exceed {2} characters. Please verify '
'that the certificate corresponds to the parent domain of your '
'application when you connect.').format(service,
subdomain_format,
MAX_DNS_LABEL_LENGTH)
log.warn(msg)
subdomain = '.'.join(subdomain_parts)
if use_ssl == appinfo.SECURE_HTTP_OR_HTTPS:
scheme = 'http'
elif use_ssl == appinfo.SECURE_HTTPS:
if not deploy:
msg = ('Most browsers will reject the SSL certificate for '
'service [{0}].').format(service)
log.warn(msg)
scheme = 'https'
return '{0}://{1}.{2}'.format(scheme, subdomain, domain)
DEFAULT_DEPLOYABLE = 'app.yaml'
def CreateAppYamlForAppDirectory(directory):
"""Ensures that an app.yaml exists or creates it if necessary.
Attempt to fingerprint the directory and create one. This is an interactive
process. If this does not raise an error, the app.yaml is guaranteed to exist
once this is done.
Args:
directory: str, The path to the directory to create the app.yaml in.
Raises:
NoAppIdentifiedError, If the application type could not be identified, or
if a yaml file could not be generated based on the state of the source.
Returns:
str, The path to the created app.yaml file.
"""
console_io.PromptContinue(
'Deployment to Google App Engine requires an app.yaml file. '
'This command will run `gcloud beta app gen-config` to generate an '
'app.yaml file for you in the current directory (if the current '
'directory does not contain an App Engine service, please answer '
'"no").', cancel_on_no=True)
# This indicates we don't have an app.yaml, we do not want to generate
# docker files (we will do that in a single place later), and that we don't
# want to persist the dockerfiles.
params = ext_runtime.Params(appinfo=None, deploy=False, custom=False)
configurator = fingerprinter.IdentifyDirectory(directory, params=params)
if configurator is None:
raise app_exc.NoAppIdentifiedError(
'Could not identify an app in the current directory.\n\n'
'Please prepare an app.yaml file for your application manually '
'and deploy again.')
configurator.MaybeWriteAppYaml()
yaml_path = os.path.join(directory, DEFAULT_DEPLOYABLE)
if not os.path.exists(yaml_path):
raise app_exc.NoAppIdentifiedError(
'Failed to create an app.yaml for your app.\n\n'
'Please prepare an app.yaml file for your application manually '
'and deploy again.')
return yaml_path
|
|
#!/usr/bin/env python
# Authors:
# Trevor Perrin
# Marcelo Fernandez - bugfix and NPN support
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
from __future__ import print_function
import sys
import os
import os.path
import socket
import time
import getopt
try:
import httplib
from SocketServer import *
from BaseHTTPServer import *
from SimpleHTTPServer import *
except ImportError:
# Python 3.x
from http import client as httplib
from socketserver import *
from http.server import *
if __name__ != "__main__":
raise "This must be run as a command, not used as a module!"
from tlslite.api import *
from tlslite import __version__
try:
from tack.structures.Tack import Tack
except ImportError:
pass
def printUsage(s=None):
if s:
print("ERROR: %s" % s)
print("")
print("Version: %s" % __version__)
print("")
print("RNG: %s" % prngName)
print("")
print("Modules:")
if tackpyLoaded:
print(" tackpy : Loaded")
else:
print(" tackpy : Not Loaded")
if m2cryptoLoaded:
print(" M2Crypto : Loaded")
else:
print(" M2Crypto : Not Loaded")
if pycryptoLoaded:
print(" pycrypto : Loaded")
else:
print(" pycrypto : Not Loaded")
if gmpyLoaded:
print(" GMPY : Loaded")
else:
print(" GMPY : Not Loaded")
print("")
print("""Commands:
server
[-k KEY] [-c CERT] [-t TACK] [-v VERIFIERDB] [-d DIR]
[--reqcert] HOST:PORT
client
[-k KEY] [-c CERT] [-u USER] [-p PASS]
HOST:PORT
""")
sys.exit(-1)
def printError(s):
"""Print error message and exit"""
sys.stderr.write("ERROR: %s\n" % s)
sys.exit(-1)
def handleArgs(argv, argString, flagsList=[]):
# Convert to getopt argstring format:
# Add ":" after each arg, ie "abc" -> "a:b:c:"
getOptArgString = ":".join(argString) + ":"
try:
opts, argv = getopt.getopt(argv, getOptArgString, flagsList)
except getopt.GetoptError as e:
printError(e)
# Default values if arg not present
privateKey = None
certChain = None
username = None
password = None
tacks = None
verifierDB = None
reqCert = False
directory = None
for opt, arg in opts:
if opt == "-k":
s = open(arg, "rb").read()
privateKey = parsePEMKey(s, private=True)
elif opt == "-c":
s = open(arg, "rb").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
elif opt == "-u":
username = arg
elif opt == "-p":
password = arg
elif opt == "-t":
if tackpyLoaded:
s = open(arg, "rU").read()
tacks = Tack.createFromPemList(s)
elif opt == "-v":
verifierDB = VerifierDB(arg)
verifierDB.open()
elif opt == "-d":
directory = arg
elif opt == "--reqcert":
reqCert = True
else:
assert(False)
if not argv:
printError("Missing address")
if len(argv)>1:
printError("Too many arguments")
#Split address into hostname/port tuple
address = argv[0]
address = address.split(":")
if len(address) != 2:
raise SyntaxError("Must specify <host>:<port>")
address = ( address[0], int(address[1]) )
# Populate the return list
retList = [address]
if "k" in argString:
retList.append(privateKey)
if "c" in argString:
retList.append(certChain)
if "u" in argString:
retList.append(username)
if "p" in argString:
retList.append(password)
if "t" in argString:
retList.append(tacks)
if "v" in argString:
retList.append(verifierDB)
if "d" in argString:
retList.append(directory)
if "reqcert" in flagsList:
retList.append(reqCert)
return retList
def printGoodConnection(connection, seconds):
print(" Handshake time: %.3f seconds" % seconds)
print(" Version: %s" % connection.getVersionName())
print(" Cipher: %s %s" % (connection.getCipherName(),
connection.getCipherImplementation()))
if connection.session.srpUsername:
print(" Client SRP username: %s" % connection.session.srpUsername)
if connection.session.clientCertChain:
print(" Client X.509 SHA1 fingerprint: %s" %
connection.session.clientCertChain.getFingerprint())
if connection.session.serverCertChain:
print(" Server X.509 SHA1 fingerprint: %s" %
connection.session.serverCertChain.getFingerprint())
if connection.session.serverName:
print(" SNI: %s" % connection.session.serverName)
if connection.session.tackExt:
if connection.session.tackInHelloExt:
emptyStr = "\n (via TLS Extension)"
else:
emptyStr = "\n (via TACK Certificate)"
print(" TACK: %s" % emptyStr)
print(str(connection.session.tackExt))
print(" Next-Protocol Negotiated: %s" % connection.next_proto)
def clientCmd(argv):
(address, privateKey, certChain, username, password) = \
handleArgs(argv, "kcup")
if (certChain and not privateKey) or (not certChain and privateKey):
raise SyntaxError("Must specify CERT and KEY together")
if (username and not password) or (not username and password):
raise SyntaxError("Must specify USER with PASS")
if certChain and username:
raise SyntaxError("Can use SRP or client cert for auth, not both")
#Connect to server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect(address)
connection = TLSConnection(sock)
settings = HandshakeSettings()
settings.useExperimentalTackExtension = True
try:
start = time.clock()
if username and password:
connection.handshakeClientSRP(username, password,
settings=settings, serverName=address[0])
else:
connection.handshakeClientCert(certChain, privateKey,
settings=settings, serverName=address[0])
stop = time.clock()
print("Handshake success")
except TLSLocalAlert as a:
if a.description == AlertDescription.user_canceled:
print(str(a))
else:
raise
sys.exit(-1)
except TLSRemoteAlert as a:
if a.description == AlertDescription.unknown_psk_identity:
if username:
print("Unknown username")
else:
raise
elif a.description == AlertDescription.bad_record_mac:
if username:
print("Bad username or password")
else:
raise
elif a.description == AlertDescription.handshake_failure:
print("Unable to negotiate mutually acceptable parameters")
else:
raise
sys.exit(-1)
printGoodConnection(connection, stop-start)
connection.close()
def serverCmd(argv):
(address, privateKey, certChain, tacks,
verifierDB, directory, reqCert) = handleArgs(argv, "kctbvd", ["reqcert"])
if (certChain and not privateKey) or (not certChain and privateKey):
raise SyntaxError("Must specify CERT and KEY together")
if tacks and not certChain:
raise SyntaxError("Must specify CERT with Tacks")
print("I am an HTTPS test server, I will listen on %s:%d" %
(address[0], address[1]))
if directory:
os.chdir(directory)
print("Serving files from %s" % os.getcwd())
if certChain and privateKey:
print("Using certificate and private key...")
if verifierDB:
print("Using verifier DB...")
if tacks:
print("Using Tacks...")
#############
sessionCache = SessionCache()
class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, HTTPServer):
def handshake(self, connection):
print("About to handshake...")
activationFlags = 0
if tacks:
if len(tacks) == 1:
activationFlags = 1
elif len(tacks) == 2:
activationFlags = 3
try:
start = time.clock()
settings = HandshakeSettings()
settings.useExperimentalTackExtension=True
connection.handshakeServer(certChain=certChain,
privateKey=privateKey,
verifierDB=verifierDB,
tacks=tacks,
activationFlags=activationFlags,
sessionCache=sessionCache,
settings=settings,
nextProtos=[b"http/1.1"])
# As an example (does not work here):
#nextProtos=[b"spdy/3", b"spdy/2", b"http/1.1"])
stop = time.clock()
except TLSRemoteAlert as a:
if a.description == AlertDescription.user_canceled:
print(str(a))
return False
else:
raise
except TLSLocalAlert as a:
if a.description == AlertDescription.unknown_psk_identity:
if username:
print("Unknown username")
return False
else:
raise
elif a.description == AlertDescription.bad_record_mac:
if username:
print("Bad username or password")
return False
else:
raise
elif a.description == AlertDescription.handshake_failure:
print("Unable to negotiate mutually acceptable parameters")
return False
else:
raise
connection.ignoreAbruptClose = True
printGoodConnection(connection, stop-start)
return True
httpd = MyHTTPServer(address, SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
if len(sys.argv) < 2:
printUsage("Missing command")
elif sys.argv[1] == "client"[:len(sys.argv[1])]:
clientCmd(sys.argv[2:])
elif sys.argv[1] == "server"[:len(sys.argv[1])]:
serverCmd(sys.argv[2:])
else:
printUsage("Unknown command: %s" % sys.argv[1])
|
|
import json
import os
import sys
import unittest
if sys.version_info.major >= 3:
from io import StringIO
else:
from StringIO import StringIO
try:
import pathlib
except ImportError:
pass
import picogeojson as pico
from picogeojson import Serializer, Deserializer, DEFAULTCRS
from picogeojson.transformations import merge, burst
import picogeojson.bbox as bbox
from picogeojson.serializer import fixed_precision
from picogeojson.map import Map
from type_tests import ClosedRingTests, InvalidCoordTests, FuncTests, AfterTests, ValidatorTests
from map_tests import MapTests
TESTDATA = "tests/"
class DeserializerTests(unittest.TestCase):
def setUp(self):
self.deserializer = Deserializer()
return
def test_shorthand(self):
res = pico.fromfile(os.path.join(TESTDATA, 'point.json'))
self.assertEqual(res.raw.coordinates, [100.0, 0.0])
return
def test_fromdict(self):
d = {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]]
},
"properties": {
"cover": "water",
"color": "blue"
}
}
geom = pico.fromdict(d)
self.assertEqual(d["geometry"]["coordinates"], geom.raw.geometry.coordinates)
self.assertEqual(d["properties"], geom.raw.properties)
return
def test_shorthand_result(self):
res = pico.fromfile(os.path.join(TESTDATA, 'point.json'))
self.assertEqual(type(res), Map)
for pt in res.points:
self.assertEqual(pt.coordinates, [100.0, 0.0])
return
def test_shorthand_string(self):
with open(os.path.join(TESTDATA, 'point.json'), 'r') as f:
string = f.read()
res = pico.fromstring(string)
self.assertEqual(res.raw.coordinates, [100.0, 0.0])
return
def test_shorthand_string_result(self):
with open(os.path.join(TESTDATA, 'point.json'), 'r') as f:
string = f.read()
res = pico.fromstring(string)
self.assertEqual(type(res), Map)
for pt in res.points:
self.assertEqual(pt.coordinates, [100.0, 0.0])
return
def test_point_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'point.json'))
self.assertEqual(res.raw.coordinates, [100.0, 0.0])
# check __call__ version
res = self.deserializer(os.path.join(TESTDATA, 'point.json'))
self.assertEqual(res.raw.coordinates, [100.0, 0.0])
return
def test_point_read_fileobject(self):
with open(os.path.join(TESTDATA, 'point.json'), 'r') as f:
res = self.deserializer.fromfile(f)
self.assertEqual(res.raw.coordinates, [100.0, 0.0])
# check __call__ version
with open(os.path.join(TESTDATA, 'point.json'), 'r') as f:
res = self.deserializer(f)
self.assertEqual(res.raw.coordinates, [100.0, 0.0])
return
def test_linestring_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'linestring.json'))
self.assertEqual(res.raw.coordinates, [[100.0, 0.0], [101.0, 1.0]])
return
def test_polygon_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'polygon.json'))
self.assertEqual(res.raw.coordinates,
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]])
return
@unittest.skipIf(sys.version_info < (3, 4), "pathlib support missing")
def test_polygon_read_pathlib(self):
res = self.deserializer.fromfile(pathlib.Path(TESTDATA) / 'polygon.json')
self.assertEqual(res.raw.coordinates,
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]])
return
def test_multipoint_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'multipoint.json'))
self.assertEqual(res.raw.coordinates, [[100.0, 0.0], [101.0, 1.0]])
return
def test_multilinestring_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'multilinestring.json'))
self.assertEqual(res.raw.coordinates, [[[100.0, 0.0], [101.0, 1.0]],
[[102.0, 2.0], [103.0, 3.0]]])
return
def test_multipolygon_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'multipolygon.json'))
self.assertEqual(res.raw.coordinates,
[[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
[[100.2, 0.2], [100.2, 0.8], [100.8, 0.8], [100.8, 0.2], [100.2, 0.2]]]])
return
def test_geometrycollection_read(self):
res = self.deserializer.fromfile(os.path.join(TESTDATA, 'geometrycollection.json'))
self.assertEqual(len(res.raw.geometries), 2)
self.assertTrue(isinstance(res.raw.geometries[0], pico.Point))
self.assertTrue(isinstance(res.raw.geometries[1], pico.LineString))
return
def test_feature_read(self):
fc = self.deserializer.fromfile(os.path.join(TESTDATA, 'feature.json')).raw
self.assertEqual(fc.id, 0)
self.assertEqual(fc.geometry.coordinates,
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]])
self.assertEqual(type(fc.geometry).__name__, "Polygon")
self.assertEqual(fc.properties["name"], "Strathcona")
def test_featurecollection_read(self):
fc = self.deserializer.fromfile(os.path.join(TESTDATA, 'featurecollection.json')).raw
self.assertTrue(isinstance(fc.features[0].geometry, pico.Point))
self.assertEqual(fc.features[0].geometry.coordinates, [102.0, 0.5])
self.assertEqual(fc.features[0].properties, {"prop0": "value0"})
self.assertTrue(isinstance(fc.features[1].geometry, pico.LineString))
self.assertEqual(fc.features[1].geometry.coordinates,
[[102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]])
self.assertEqual(fc.features[1].properties,
{"prop0": "value0", "prop1": 0.0})
self.assertTrue(isinstance(fc.features[2].geometry, pico.Polygon))
self.assertEqual(fc.features[2].geometry.coordinates,
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]])
self.assertEqual(fc.features[2].properties,
{"prop0": "value0", "prop1": {"this": "that"}})
return
class SerializerTests(unittest.TestCase):
def setUp(self):
self.serializer = Serializer(write_crs=True)
return
def test_shorthand(self):
pt = pico.Point((44.0, 17.0), DEFAULTCRS)
d = json.loads(pico.tostring(pt))
self.assertEqual(tuple(pt.coordinates), tuple(d["coordinates"]))
self.assertTrue("crs" not in d)
d = json.loads(pico.tostring(pt, write_crs=True))
self.assertEqual(pt.crs, d["crs"])
def test_shorthand_file(self):
pt = pico.Point((44.0, 17.0), DEFAULTCRS)
f = StringIO()
pico.tofile(pt, f)
f.seek(0)
pt2 = pico.fromfile(f).raw
f.close()
self.assertEqual(tuple(pt.coordinates), tuple(pt2.coordinates))
self.assertEqual(pt.crs, pt2.crs)
def test_todict(self):
geom = pico.Feature(pico.Polygon([[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]]),
{"cover": "water", "color": "blue"})
d = pico.todict(geom)
self.assertEqual(d["geometry"]["coordinates"], geom.geometry.coordinates)
self.assertEqual(d["properties"], geom.properties)
return
def test_serialize_point(self):
pt = pico.Point((44.0, 17.0), DEFAULTCRS)
s = self.serializer(pt)
d = json.loads(s)
self.assertEqual(tuple(pt.coordinates), tuple(d["coordinates"]))
return
def test_serialize_linestring(self):
linestring = pico.LineString([[44.0, 17.0], [43.0, 17.5], [-2.1, 4.0]],
DEFAULTCRS)
s = self.serializer(linestring)
d = json.loads(s)
self.assertEqual(list(linestring.coordinates), list(d["coordinates"]))
return
def test_serialize_polygon(self):
polygon = pico.Polygon([[[44.0, 17.0], [43.0, 17.5], [-2.1, 4.0], [44.0, 17.0]],
[[1.0, 1.0], [0.8, -0.7], [0.5, -0.5], [1.0, 1.0]]],
DEFAULTCRS)
s = self.serializer(polygon)
d = json.loads(s)
self.assertEqual(list(polygon.coordinates), list(d["coordinates"]))
return
def test_serialize_polygon_antimeridian(self):
polygon = pico.Polygon([[(172, -20), (-179, -20), (-177, -25),
(172, -25), (172, -20)]])
s = self.serializer(polygon)
d = json.loads(s)
self.assertEqual(d["type"], "MultiPolygon")
return
def test_serialize_multipoint(self):
multipoint = pico.MultiPoint([[44.0, 17.0], [43.0, 17.5], [-2.1, 4.0]],
DEFAULTCRS)
s = self.serializer(multipoint)
d = json.loads(s)
self.assertEqual(list(multipoint.coordinates), list(d["coordinates"]))
return
def test_serialize_multilinestring(self):
multilinestring = pico.MultiLineString(
[[[44.0, 17.0], [43.0, 17.5], [-2.1, 4.0]],
[[49.0, -3.0], [48.0, -2.5], [2.9, -16.0]]],
DEFAULTCRS)
s = self.serializer(multilinestring)
d = json.loads(s)
self.assertEqual(list(multilinestring.coordinates), list(d["coordinates"]))
return
def test_serialize_multipolygon(self):
multipolygon = pico.MultiPolygon(
[[[[44.0, 17.0], [43.0, 17.5], [-2.1, 4.0], [44.0, 17.0]],
[[1.0, 1.0], [0.8, -0.7], [0.5, -0.5], [1.0, 1.0]]],
[[[49.0, -3.0], [48.0, -2.5], [2.9, -16.0], [49.0, -3.0]]]],
DEFAULTCRS)
s = self.serializer(multipolygon)
d = json.loads(s)
self.assertEqual(list(multipolygon.coordinates), list(d["coordinates"]))
return
def test_serialize_geometrycollection(self):
collection = pico.GeometryCollection(
[pico.Point((3, 4), None),
pico.Point((5, 6), None),
pico.LineString([(1, 2), (3, 4), (3, 2)], None)],
DEFAULTCRS)
s = self.serializer(collection)
d = json.loads(s)
self.assertEqual(len(d.get("geometries", [])), 3)
self.assertEqual(d.get("crs", ""), DEFAULTCRS)
return
def test_serialize_geometrycollection_empty(self):
collection = pico.GeometryCollection([], DEFAULTCRS)
s = self.serializer(collection)
d = json.loads(s)
self.assertEqual(len(d.get("geometries", [0])), 0)
self.assertEqual(d.get("crs", ""), DEFAULTCRS)
return
def test_top_bbox_only_geometry_collection(self):
collection = pico.GeometryCollection(
[pico.Point((3, 4), None),
pico.Polygon([[(5, 6), (7, 8), (9, 10), (5, 6)]], None),
pico.LineString([(1, 2), (3, 4), (3, 2)], None)],
DEFAULTCRS)
s = self.serializer(collection)
d = json.loads(s)
self.assertFalse(d["geometries"][1].get("bbox", False))
self.assertFalse(d["geometries"][2].get("bbox", False))
self.assertTrue(d.get("bbox", False) is not False)
def test_top_bbox_only_feature_collection(self):
collection = pico.FeatureCollection(
[pico.Feature(pico.Point((7,3), None), {"type": "city"}, None, None),
pico.Feature(pico.LineString([(1,2), (1,3), (2, 2)], None),
{"type": "river"}, None, None),
pico.Feature(pico.Polygon([[(1,2), (1,3), (2, 2), (1, 2)]], None),
{"type": "boundary"}, None, None)],
DEFAULTCRS)
s = self.serializer(collection)
d = json.loads(s)
self.assertFalse(d["features"][1]["geometry"].get("bbox", False))
self.assertFalse(d["features"][2]["geometry"].get("bbox", False))
self.assertTrue(d.get("bbox", False) is not False)
def test_serialize_feature(self):
feature = pico.Feature(pico.Point((1,2), None), {"type": "city"}, 1, DEFAULTCRS)
s = self.serializer(feature)
d = json.loads(s)
self.assertEqual(d.get("geometry", {}).get("type", ""), "Point")
self.assertEqual(d.get("id", 0), 1)
self.assertEqual(d.get("properties", {}).get("type", ""), "city")
return
def test_serialize_featurecollection(self):
collection = pico.FeatureCollection(
[pico.Feature(pico.Point((7,3), None), {"type": "city"}, None, None),
pico.Feature(pico.LineString([(1,2), (1,3), (2, 2)], None),
{"type": "river"}, None, None),
pico.Feature(pico.Polygon([[(1,2), (1,3), (2, 2), (2, 1), (1,2)]], None),
{"type": "boundary"}, None, None)],
DEFAULTCRS)
s = self.serializer(collection)
d = json.loads(s)
self.assertEqual(len(d.get("features", [])), 3)
self.assertEqual(d.get("crs", ""), DEFAULTCRS)
return
def test_serialize_featurecollection_empty(self):
collection = pico.FeatureCollection([], DEFAULTCRS)
s = self.serializer(collection)
d = json.loads(s)
self.assertEqual(len(d.get("features", [0])), 0)
self.assertEqual(d.get("crs", ""), DEFAULTCRS)
return
def test_dedup_crs_geometrycollection(self):
crs = {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}
collection = pico.GeometryCollection(
[pico.Point((1, 2), crs=crs)],
crs=crs)
s = self.serializer(collection)
self.assertEqual(s.count('"crs"'), 1)
def test_dedup_crs_feature(self):
crs = {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}
feature = pico.Feature(pico.Point((1, 2), crs=crs),
{"type": "tree"}, id=1, crs=crs)
s = self.serializer(feature)
self.assertEqual(s.count('"crs"'), 1)
def test_dedup_crs_feature_collection(self):
crs = {"type": "name", "properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}}
coll = pico.FeatureCollection(
[pico.Feature(pico.Point((1, 2), crs=crs),
{"type": "tree"}, id=1, crs=crs),
pico.Feature(pico.LineString([(1, 2), (2, 3)], crs=crs),
{"type": "fence"}, id=2, crs=crs),
pico.Feature(pico.Point((5, 4), crs=crs),
{"type": "pothole"}, id=3, crs=crs)],
crs=crs)
s = self.serializer(coll)
self.assertEqual(s.count('"crs"'), 1)
def test_serialize_precision_point(self):
pt = pico.Point((44.1234567, 17.0987654))
ser = Serializer(precision=3)
s = ser(pt)
d = json.loads(s)
self.assertEqual((44.123, 17.099), tuple(d["coordinates"]))
return
def test_serialize_precision_bbox(self):
ls = pico.LineString([(-1.1111111, 2.2222222), (3.3333333, -7.7777777)])
ser = Serializer(precision=3, write_bbox=True)
s = ser(ls)
d = json.loads(s)
self.assertEqual([-1.111, -7.778, 3.333, 2.222], d["bbox"])
return
class AntimerdianTests(unittest.TestCase):
def test_contains(self):
self.assertFalse(pico.antimeridian.contains(
[(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)],
[(2, 0), (2, 1), (3, 1), (3, 0), (2, 0)]))
self.assertTrue(pico.antimeridian.contains(
[(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)],
[(1, 1), (1, 3), (3, 3), (3, 1), (1, 1)]))
return
def test_linestring_split(self):
res = pico.antimeridian.antimeridian_cut(
pico.LineString([(172, 34), (178, 36), (-179, 37), (-177, 39)])
)
self.assertTrue(isinstance(res, pico.MultiLineString))
self.assertEqual(len(res.coordinates), 2)
self.assertEqual(res.coordinates[0][-1], (180, 36.33333333))
self.assertEqual(res.coordinates[1][0], (-179.99999999, 36.33333333))
def test_polygon_split(self):
res = pico.antimeridian.antimeridian_cut(
pico.Polygon([[(172, -20), (-179, -20), (-177, -25), (172, -25), (172, -20)]])
)
self.assertTrue(isinstance(res, pico.MultiPolygon))
self.assertEqual(len(res.coordinates), 2)
def test_polygon_split_holes(self):
res = pico.antimeridian.antimeridian_cut(
pico.Polygon([[(172, -20), (-179, -20), (-177, -25), (172, -25), (172, -20)],
[(174, -22), (-179, -22), (-179, -23), (174, -22)]])
)
self.assertTrue(isinstance(res, pico.MultiPolygon))
self.assertEqual(len(res.coordinates), 2)
self.assertEqual(len(res.coordinates[0]), 2)
self.assertEqual(len(res.coordinates[1]), 2)
def test_multilinestring_split(self):
res = pico.antimeridian.antimeridian_cut(
pico.MultiLineString(
[[(172, 34), (178, 36), (-179, 37), (-177, 39)],
[(172, -34), (178, -36), (-179, -37), (-177, -39)]])
)
self.assertEqual(len(res.coordinates), 4)
def test_featurecollection_split(self):
res = pico.antimeridian.antimeridian_cut(
pico.FeatureCollection([
pico.Feature(
pico.LineString([(172, 34), (178, 36), (-179, 37), (-177, 39)]),
{"desc": "a linestring spanning the dateline"}),
pico.Feature(
pico.Point((1,2)),
{"desc": "a single point"}),
pico.Feature(
pico.GeometryCollection([
pico.Polygon([[(178, 3), (-178, 5), (-178, 7), (178, 5), (178, 3)]]),
pico.LineString([(172, -34), (178, -36), (-179, -37), (-177, -39)])]),
{"desc": "a geometry collection containing a polygon and a linestring"})
]))
self.assertEqual(type(res).__name__, "FeatureCollection")
self.assertEqual(len(res.features), 3)
self.assertEqual(type(res.features[0].geometry).__name__,
"MultiLineString")
self.assertEqual(type(res.features[2].geometry).__name__,
"GeometryCollection")
self.assertEqual(type(res.features[2].geometry.geometries[0]).__name__,
"MultiPolygon")
class OrientationTests(unittest.TestCase):
def test_isccw(self):
self.assertTrue(pico.orientation.is_counterclockwise(
[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]))
self.assertFalse(pico.orientation.is_counterclockwise(
[(0, 0), (0, 1), (1, 1), (1, 0), (0, 0)]))
class BboxTests(unittest.TestCase):
def test_coordinate_bbox_2d(self):
cs = [[i, j] for i in range(0, 30, 3) for j in range(10, -10, -2)]
bbx = bbox.coordstring_bbox(cs)
self.assertEqual(bbx, [0, -8, 27, 10])
def test_coordinate_bbox_3d(self):
cs = [[i, j, k] for i in range(0, 30, 3)
for j in range(10, -10, -2)
for k in range(1, 5)]
bbx = bbox.coordstring_bbox(cs)
self.assertEqual(bbx, [0, -8, 1, 27, 10, 4])
def test_coordinate_bbox_empty(self):
cs = []
bbx = bbox.coordstring_bbox(cs)
self.assertTrue(bbx is None)
def test_point_bbox_2(self):
p = pico.Point((2, 3))
bbx = bbox.geom_bbox(p)
self.assertEqual(bbx, [2, 3, 2, 3])
def test_point_bbox_3(self):
p = pico.Point((2, 3, 1))
bbx = bbox.geom_bbox(p)
self.assertEqual(bbx, [2, 3, 1, 2, 3, 1])
def test_geometrycollection_bbox_2(self):
collection = pico.GeometryCollection(
[pico.Point((3, 4), None),
pico.Point((5, 6), None),
pico.LineString([(1, 2), (3, 4), (3, 2)], None)],
DEFAULTCRS)
bbx = bbox.geom_bbox(collection)
self.assertEqual(bbx, [1, 2, 5, 6])
def test_geometrycollection_bbox_some_empty(self):
collection = pico.GeometryCollection(
[pico.Point((3, 4), None),
pico.Point((5, 6), None),
pico.GeometryCollection([], None)],
DEFAULTCRS)
bbx = bbox.geom_bbox(collection)
self.assertEqual(bbx, [3, 4, 5, 6])
def test_geometrycollection_bbox_3(self):
collection = pico.GeometryCollection(
[pico.Point((3, 4, 1), None),
pico.Point((5, 6, 2), None),
pico.LineString([(1, 2, 2), (3, 4, 5), (3, 2, 3)], None)],
DEFAULTCRS)
bbx = bbox.geom_bbox(collection)
self.assertEqual(bbx, [1, 2, 1, 5, 6, 5])
def test_feature_bbox_2(self):
feature = pico.Feature(
pico.LineString([(1,2), (1,3), (2, 2)], None),
{"type": "river"}, None, None)
bbx = bbox.feature_bbox(feature)
self.assertEqual(bbx, [1, 2, 2, 3])
def test_feature_bbox_3(self):
feature = pico.Feature(
pico.LineString([(1, 2, 1), (1, 3, 0.5), (2, 2, 0)], None),
{"type": "river"}, None, None)
bbx = bbox.feature_bbox(feature)
self.assertEqual(bbx, [1, 2, 0, 2, 3, 1])
def test_feature_collection_bbox_empty(self):
collection = pico.FeatureCollection([], None)
self.assertTrue(bbox.feature_collection_bbox(collection) is None)
def test_feature_collection_bbox(self):
feature1 = pico.Feature(pico.LineString([(1,2), (1,3), (2, 2)], None),
{"type": "river"}, None, None)
feature2 = pico.Feature(pico.Point((0,2), None),
{"type": "spring"}, None, None)
collection = pico.FeatureCollection([feature1, feature2], None)
bbx = bbox.feature_collection_bbox(collection)
self.assertEqual(bbx, [0, 2, 2, 3])
class FixedPrecisionTests(unittest.TestCase):
def test_scalar(self):
self.assertEqual(fixed_precision(3.141592654, 3), 3.142)
def test_list(self):
self.assertEqual(fixed_precision([1.234567, 2.345678, 3.456789], 3),
[1.235, 2.346, 3.457])
def test_nested_list(self):
self.assertEqual(fixed_precision([[1.234567, 2.345678], 3.456789], 3),
[[1.235, 2.346], 3.457])
class MergeBurstTests(unittest.TestCase):
def test_merge_empty(self):
with self.assertRaises(ValueError):
merge([])
def test_merge_one(self):
pt = pico.Point((1, 2))
merged = merge([pt])
self.assertEqual(pt, merged)
def test_merge_points(self):
pts = [pico.Point((1, 2)),
pico.Point((3, 4)),
pico.Point((5, 6)),
pico.Point((7, 8))]
merged = merge(pts)
self.assertEqual(type(merged).__name__, "MultiPoint")
self.assertEqual(len(merged.coordinates), 4)
def test_merge_linestrings(self):
lns = [pico.LineString([(1, 2), (2, 2), (1, 1), (-1, 2)]),
pico.LineString([(3, 4), (3, 4), (3, 2), (-3, 4)]),
pico.LineString([(5, 6), (4, 6), (5, 3), (-5, 6)]),
pico.LineString([(7, 8), (5, 8), (7, 4), (-7, 8)])]
merged = merge(lns)
self.assertEqual(type(merged).__name__, "MultiLineString")
self.assertEqual(len(merged.coordinates), 4)
def test_merge_polygons(self):
plg = [pico.Polygon([[(1, 2), (2, 2), (1, 1), (-1, 2), (1, 2)]]),
pico.Polygon([[(3, 4), (3, 4), (3, 2), (-3, 4), (3, 4)]]),
pico.Polygon([[(5, 6), (4, 6), (5, 3), (-5, 6), (5, 6)]]),
pico.Polygon([[(7, 8), (5, 8), (7, 4), (-7, 8), (7, 8)]])]
merged = merge(plg)
self.assertEqual(type(merged).__name__, "MultiPolygon")
self.assertEqual(len(merged.coordinates), 4)
def test_merge_geometrycollections(self):
gcs = [pico.GeometryCollection([
pico.Point((1, 2)),
pico.LineString([(2, 2), (1, 1), (-1, 2)])]),
pico.GeometryCollection([
pico.MultiPoint([(7, 8), (5, 8), (7, 4), (-7, 8)]),
pico.Point((9, 8))])
]
merged = merge(gcs)
self.assertEqual(type(merged).__name__, "GeometryCollection")
self.assertEqual(len(merged.geometries), 2)
def test_merge_geometries(self):
gms = [pico.LineString([(1, 2), (2, 2), (1, 1), (-1, 2)]),
pico.Point((3, 4)),
pico.Polygon([[(5, 6), (4, 6), (2, 5), (5, 5), (5, 6)]])]
merged = merge(gms)
self.assertEqual(type(merged).__name__, "GeometryCollection")
self.assertEqual(len(merged.geometries), 3)
def test_merge_features(self):
gms = [pico.Feature(
pico.LineString([(1, 2), (2, 2), (1, 1), (-1, 2)]),
{"desc": "single linestring"}
),
pico.Feature(pico.Point((3, 4)),
{"desc": "single point"}),
pico.Feature(pico.GeometryCollection(
[pico.LineString([(1, 2), (2, 3), (1, 4)]),
pico.Point((-2, -3))]),
{"desc": "collection of geometries"}),
pico.Feature(pico.Polygon([[(5, 6), (4, 6), (2, 5), (5, 5), (5, 6)]]),
{"desc": "single polygon"})]
merged = merge(gms)
self.assertEqual(type(merged).__name__, "FeatureCollection")
self.assertEqual(len(merged.features), 4)
def test_merge_features_featurecollections(self):
gms = [pico.Feature(pico.LineString([(1, 2), (2, 2), (1, 1), (-1, 2)]),
{"desc": "single linestring"}),
pico.FeatureCollection(
[pico.Feature(pico.Point((3, 4)),
{"desc": "single point"}),
pico.Feature(pico.GeometryCollection(
[pico.LineString([(1, 2), (2, 3), (1, 4)]),
pico.Point((-2, -3))]),
{"desc": "collection of geometries"})]
),
pico.Feature(pico.Polygon([[(5, 6), (4, 6), (2, 5), (5, 5), (5, 6)]]),
{"desc": "single polygon"})]
merged = merge(gms)
self.assertEqual(type(merged).__name__, "FeatureCollection")
self.assertEqual(len(merged.features), 4)
def test_merge_featurecollections(self):
fcs = [pico.FeatureCollection([
pico.Feature(
pico.LineString([(1, 2), (2, 2), (1, 1), (-1, 2)]),
{"desc": "single linestring"}),
pico.Feature(
pico.LineString([(0, 2), (1, -1), (1, 0), (-1, 3)]),
{"desc": "another linestring"})]),
pico.FeatureCollection(
[pico.Feature(pico.Point((3, 4)),
{"desc": "single point"}),
pico.Feature(pico.GeometryCollection(
[pico.LineString([(1, 2), (2, 3), (1, 4)]),
pico.Point((-2, -3))]),
{"desc": "collection of geometries"})])]
merged = merge(fcs)
self.assertEqual(type(merged).__name__, "FeatureCollection")
self.assertEqual(len(merged.features), 4)
def test_burst_multipoint(self):
result = list(burst(pico.MultiPoint([(1, 2), (3, 4), (5, 6)])))
self.assertEqual(len(result), 3)
self.assertEqual(type(result[0]).__name__, "Point")
self.assertEqual(type(result[1]).__name__, "Point")
self.assertEqual(type(result[2]).__name__, "Point")
def test_burst_point(self):
result = list(burst(pico.Point((1, 2))))
self.assertEqual(len(result), 1)
self.assertEqual(type(result[0]).__name__, "Point")
def test_burst_geometrycollection(self):
result = list(burst(pico.GeometryCollection([
pico.Point((1, 2)),
pico.LineString([(3, 4), (5, 6), (7, 6)]),
pico.Polygon([[(1, 1), (2, 2), (2, 3), (1, 2), (1, 1)]]),
pico.MultiLineString([[(0, 0), (0, 1), (1, 1)],
[(0, 0), (1, 0), (1, 1)]])
], crs=DEFAULTCRS)))
self.assertEqual(len(result), 5)
self.assertEqual(result[0].crs, DEFAULTCRS)
def test_burst_multipolygon(self):
result = list(burst(pico.MultiPolygon([
[[(1, 2), (2, 3), (1, 3), (1, 2)]],
[[(1, 2), (-2, -3), (-1, -3), (1, 2)]]],
crs=DEFAULTCRS)))
self.assertEqual(len(result), 2)
self.assertEqual(result[0].crs, DEFAULTCRS)
def test_burst_feature_collection(self):
result = list(burst(pico.FeatureCollection([
pico.Feature(pico.Point((1, 2)),
properties={"desc": "a point"}),
pico.Feature(pico.MultiPolygon([
[[(1, 2), (2, 3), (1, 3), (1, 2)]],
[[(1, 2), (-2, -3), (-1, -3), (1, 2)]]]),
properties={"desc": "some triangles"})
], crs=DEFAULTCRS)))
self.assertEqual(len(result), 2)
self.assertEqual(result[0].crs, DEFAULTCRS)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_Basic/__init__.py
# Date : Nov 05, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Prepare import *
import gtk
import gtk.glade
import gobject
gtk.gdk.threads_init()
#======================================================================
IOST_Basic_Enable_Write_Config_File = 1
#======================================================================
STATUS_RUN = "Running ..."
STATUS_INIT = "Initting ..."
STATUS_PAUSE = "Pausing ..."
STATUS_DISABLE = "Disable"
STATUS_ENABLE = "Enable"
STATUS_CORRUPT = "Corruped"
STATUS_SUPPORT = "Supported"
STATUS_EMPTY = ""
STATUS_NONE = "None"
STATUS_N_A = "N/A"
STATUS_AVAIL = "Available"
STATUS_NOT_AVAIL = "Not Available "
WRUN_IP_COLOR_DEFAULT = "#990033"
WRIN_SATION_INFO_COLOR = "#0099ff"
TEMPERATURE_STR="Temperature ( "+ unichr(186) +"C) :"
def IOST_ExtracPort(ip_name):
"""
Extracted port numaber from string ip_name
Example:
S = "ETH0"
S1 = IOST_ExtracPort(S)
# --> S1 = 0
"""
return re.findall('\d+', S)[0]
#======================================================================
IOST_CurrenPathWhenRun = ""
#======================================================================
class IOST_Define():
WORD_SEPARATORS = "-A-Za-z0-9,./?%&#:_=+@~"
BUFFER_LINES = 2000000
STARTUP_LOCAL = True
CONFIRM_ON_EXIT = True
FONT_COLOR = ""
BACKGROUND_COLOR = ""
TRANSPARENCY = 0
PASTE_ON_RIGHT_CLICK = 1
CONFIRM_ON_CLOSE_TAB = 0
AUTO_CLOSE_TAB = 0
COLLAPSED_FOLDERS = ""
LEFT_PANEL_WIDTH = 100
CHECK_UPDATES = True
WINDOW_WIDTH = -1
WINDOW_HEIGHT = -1
FONT = ""
AUTO_COPY_SELECTION = 0
LOG_PATH = os.path.expanduser("~")
SHOW_TOOLBAR = True
SHOW_PANEL = True
VERSION = 0
#======================================================================
class IOST_Basic(IOST_Prepare):
def __init__(self):
"""
"""
#-----------------------------------------------------------------------
def Str2Boolean(self, s):
if s in ["True", "Enable", "Yes"]:
return True
elif s in ["False", "Disable", "No"]:
return False
else:
raise ValueError
# evil ValueError that doesn't tell you what the wrong value was
def Boolean2Str(self, boolean):
if boolean:
return 'Enable'
else:
return 'Disable'
#-----------------------------------------------------------------------
def MsgBox(self, text, icon_file=None, parent=None, msg_type=gtk.MESSAGE_ERROR):
""
MsgBox = gtk.MessageDialog(parent, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, text)
if icon_file != None:
MsgBox.set_icon_from_file(icon_file)
image = gtk.Image ()
image.set_from_file (icon_file)
MsgBox.set_image(image)
MsgBox.show_all()
Res = MsgBox.run()
MsgBox.destroy()
#-----------------------------------------------------------------------
def MsgConfirm(self, text=""):
""
# global IOST_Config
MsgBox=gtk.MessageDialog(None, gtk.DIALOG_MODAL, gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL, text)
# MsgBox.set_icon(IOST_Config["IconPath"])
Response = MsgBox.run()
MsgBox.destroy()
return Response
#-----------------------------------------------------------------------
def InputBox(self, title, text, default='', password=False):
""
global IOST_Config
MsgBox = EntryDialog(title, text, default, mask=password)
# MsgBox.set_icon(IOST_Config["IconPath"])
if MsgBox.run() == gtk.RESPONSE_OK:
Response = MsgBox.value
else:
Response = None
MsgBox.destroy()
return Response
#-----------------------------------------------------------------------
def ShowFontDialog(self, parent, title, button):
""
Dlg = gtk.FileChooserDialog(title=title, parent=parent, action=action)
Dlg.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
Dlg.add_button(gtk.STOCK_SAVE if action==gtk.FILE_CHOOSER_ACTION_SAVE else gtk.STOCK_OPEN, gtk.RESPONSE_OK)
Dlg.set_do_overwrite_confirmation(True)
if not hasattr(parent,'lastPath'):
parent.lastPath = os.path.expanduser("~")
Dlg.set_current_folder( parent.lastPath )
if Dlg.run() == gtk.RESPONSE_OK:
filename = dlg.get_filename()
parent.lastPath = os.path.dirname(filename)
else:
filename = None
Dlg.destroy()
return filename
#-----------------------------------------------------------------------
def GetKeyName(self, event):
""
name = ""
if event.state & 4:
name = name + "CTRL+"
if event.state & 1:
name = name + "SHIFT+"
if event.state & 8:
name = name + "ALT+"
if event.state & 67108864:
name = name + "SUPER+"
return name + gtk.gdk.keyval_name(event.keyval).upper()
#-----------------------------------------------------------------------
def GetUserName():
return os.getenv('USER') or os.getenv('LOGNAME') or os.getenv('USERNAME')
# def IOST_Basic_GetPassword():
# return get_username() + enc_passwd
#-----------------------------------------------------------------------
def ReadFileJSON(file_name=""):
with open(file_name) as ReadFileName:
ReadData = json.load(ReadFileName, object_pairs_hook=OrderedDict)
# if IOST_Config_DebugEnable:
# pprint (IOST_Config_ReadData)
return ReadData
#-----------------------------------------------------------------------
def WriteFileJSON(file_name="", data=None):
with open(file_name, 'w') as WriteFileName:
json.dump(data, WriteFileName,indent=4)
#-----------------------------------------------------------------------
def FormatText(self, object_name, color=None, bold=False, italic=False, text=None):
"""
Format the text with color, bold, italic and modify text default
1. color:
format-1: color=<"color">
color="blue"
color="green"
color="red"
format-2: color="#<R><G><B>"
color="#AABBCC"
color="#FF00BB"
2. bold :
bold = True
or bold = False
3. italic :
italic = True
or italic = False
"""
if text == None:
text=object_name.get_text()
if bold:
text = "<b>"+text+"</b>"
if italic:
text = "<b>"+text+"</b>"
if color != None:
if '#' in color:
color = gtk.gdk.Color(color).to_string()
text = "<span foreground='"+color+"'>"+text+"</span>"
else:
text = "<span foreground='"+str(color)+"'>"+text+"</span>"
object_name.set_text(text)
object_name.set_use_markup(True)
#-----------------------------------------------------------------------
def Msg_NotSupported(self, image_file):
msg_Text = " The feature have NOT supported (^.^) "
self.MsgBox(msg_Text, icon_file=image_file, msg_type=gtk.MESSAGE_INFO)
|
|
"""An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
import operator
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import numpy as np
from pandas.compat import set_function_name
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import (
ABCExtensionArray, ABCIndexClass, ABCSeries)
from pandas.core.dtypes.missing import isna
from pandas._typing import ArrayLike
from pandas.core import ops
_not_implemented_message = "{} does not implement {}."
_extension_array_shared_docs = dict() # type: Dict[str, str]
class ExtensionArray:
"""
Abstract base class for custom 1-D array types.
pandas will recognize instances of this class as proper arrays
with a custom type and will not attempt to coerce them to objects. They
may be stored directly inside a :class:`DataFrame` or :class:`Series`.
.. versionadded:: 0.23.0
Notes
-----
The interface includes the following abstract methods that must be
implemented by subclasses:
* _from_sequence
* _from_factorized
* __getitem__
* __len__
* dtype
* nbytes
* isna
* take
* copy
* _concat_same_type
A default repr displaying the type, (truncated) data, length,
and dtype is provided. It can be customized or replaced by
by overriding:
* __repr__ : A default repr for the ExtensionArray.
* _formatter : Print scalars inside a Series or DataFrame.
Some methods require casting the ExtensionArray to an ndarray of Python
objects with ``self.astype(object)``, which may be expensive. When
performance is a concern, we highly recommend overriding the following
methods:
* fillna
* dropna
* unique
* factorize / _values_for_factorize
* argsort / _values_for_argsort
* searchsorted
The remaining methods implemented on this class should be performant,
as they only compose abstract methods. Still, a more efficient
implementation may be available, and these methods can be overridden.
One can implement methods to handle array reductions.
* _reduce
One can implement methods to handle parsing from strings that will be used
in methods such as ``pandas.io.parsers.read_csv``.
* _from_sequence_of_strings
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
ExtensionArrays are limited to 1 dimension.
They may be backed by none, one, or many NumPy arrays. For example,
``pandas.Categorical`` is an extension array backed by two arrays,
one for codes and one for categories. An array of IPv6 address may
be backed by a NumPy structured array with two fields, one for the
lower 64 bits and one for the upper 64 bits. Or they may be backed
by some other storage type, like Python lists. Pandas makes no
assumptions on how the data are stored, just that it can be converted
to a NumPy array.
The ExtensionArray interface does not impose any rules on how this data
is stored. However, currently, the backing data cannot be stored in
attributes called ``.values`` or ``._values`` to ensure full compatibility
with pandas internals. But other names as ``.data``, ``._data``,
``._items``, ... can be freely used.
"""
# '_typ' is for pandas.core.dtypes.generic.ABCExtensionArray.
# Don't override this.
_typ = 'extension'
# ------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""
Construct a new ExtensionArray from a sequence of scalars.
Parameters
----------
scalars : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
"""Construct a new ExtensionArray from a sequence of strings.
.. versionadded:: 0.24.0
Parameters
----------
strings : Sequence
Each element will be an instance of the scalar type for this
array, ``cls.dtype.type``.
dtype : dtype, optional
Construct for this particular dtype. This should be a Dtype
compatible with the ExtensionArray.
copy : boolean, default False
If True, copy the underlying data.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
@classmethod
def _from_factorized(cls, values, original):
"""
Reconstruct an ExtensionArray after factorization.
Parameters
----------
values : ndarray
An integer ndarray with the factorized values.
original : ExtensionArray
The original ExtensionArray that factorize was called on.
See Also
--------
pandas.factorize
ExtensionArray.factorize
"""
raise AbstractMethodError(cls)
# ------------------------------------------------------------------------
# Must be a Sequence
# ------------------------------------------------------------------------
def __getitem__(self, item):
# type (Any) -> Any
"""
Select a subset of self.
Parameters
----------
item : int, slice, or ndarray
* int: The position in 'self' to get.
* slice: A slice object, where 'start', 'stop', and 'step' are
integers or None
* ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
Returns
-------
item : scalar or ExtensionArray
Notes
-----
For scalar ``item``, return a scalar value suitable for the array's
type. This should be an instance of ``self.dtype.type``.
For slice ``key``, return an instance of ``ExtensionArray``, even
if the slice is length 0 or 1.
For a boolean mask, return an instance of ``ExtensionArray``, filtered
to the values where ``item`` is True.
"""
raise AbstractMethodError(self)
def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
"""
Set one or more values inplace.
This method is not required to satisfy the pandas extension array
interface.
Parameters
----------
key : int, ndarray, or slice
When called from, e.g. ``Series.__setitem__``, ``key`` will be
one of
* scalar int
* ndarray of integers.
* boolean ndarray
* slice object
value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
value or values to be set of ``key``.
Returns
-------
None
"""
# Some notes to the ExtensionArray implementor who may have ended up
# here. While this method is not required for the interface, if you
# *do* choose to implement __setitem__, then some semantics should be
# observed:
#
# * Setting multiple values : ExtensionArrays should support setting
# multiple values at once, 'key' will be a sequence of integers and
# 'value' will be a same-length sequence.
#
# * Broadcasting : For a sequence 'key' and a scalar 'value',
# each position in 'key' should be set to 'value'.
#
# * Coercion : Most users will expect basic coercion to work. For
# example, a string like '2018-01-01' is coerced to a datetime
# when setting on a datetime64ns array. In general, if the
# __init__ method coerces that value, then so should __setitem__
# Note, also, that Series/DataFrame.where internally use __setitem__
# on a copy of the data.
raise NotImplementedError(_not_implemented_message.format(
type(self), '__setitem__')
)
def __len__(self) -> int:
"""
Length of this array
Returns
-------
length : int
"""
raise AbstractMethodError(self)
def __iter__(self):
"""
Iterate over elements of the array.
"""
# This needs to be implemented so that pandas recognizes extension
# arrays as list-like. The default implementation makes successive
# calls to ``__getitem__``, which may be slower than necessary.
for i in range(len(self)):
yield self[i]
# ------------------------------------------------------------------------
# Required attributes
# ------------------------------------------------------------------------
@property
def dtype(self) -> ExtensionDtype:
"""
An instance of 'ExtensionDtype'.
"""
raise AbstractMethodError(self)
@property
def shape(self) -> Tuple[int, ...]:
"""
Return a tuple of the array dimensions.
"""
return (len(self),)
@property
def ndim(self) -> int:
"""
Extension Arrays are only allowed to be 1-dimensional.
"""
return 1
@property
def nbytes(self) -> int:
"""
The number of bytes needed to store this object in memory.
"""
# If this is expensive to compute, return an approximate lower bound
# on the number of bytes needed.
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Additional Methods
# ------------------------------------------------------------------------
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
def isna(self) -> ArrayLike:
"""
A 1-D array indicating if each value is missing.
Returns
-------
na_values : Union[np.ndarray, ExtensionArray]
In most cases, this should return a NumPy ndarray. For
exceptional cases like ``SparseArray``, where returning
an ndarray would be expensive, an ExtensionArray may be
returned.
Notes
-----
If returning an ExtensionArray, then
* ``na_values._is_boolean`` should be True
* `na_values` should implement :func:`ExtensionArray._reduce`
* ``na_values.any`` and ``na_values.all`` should be implemented
"""
raise AbstractMethodError(self)
def _values_for_argsort(self) -> np.ndarray:
"""
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
def fillna(self, value=None, method=None, limit=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, array-like
If a scalar value is passed it is used to fill all missing values.
Alternatively, an array-like 'value' can be given. It's expected
that the array-like have the same length as 'self'.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : ExtensionArray with NA/NaN filled
"""
from pandas.api.types import is_array_like
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.missing import pad_1d, backfill_1d
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self.astype(object), limit=limit,
mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def dropna(self):
"""
Return ExtensionArray without NA values
Returns
-------
valid : ExtensionArray
"""
return self[~self.isna()]
def shift(
self,
periods: int = 1,
fill_value: object = None) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b])
def unique(self):
"""
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
def searchsorted(self, value, side="left", sorter=None):
"""
Find indices where elements should be inserted to maintain order.
.. versionadded:: 0.24.0
Find the indices into a sorted array `self` (a) such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Assuming that `self` is sorted:
====== ================================
`side` returned index `i` satisfies
====== ================================
left ``self[i-1] < value <= self[i]``
right ``self[i-1] <= value < self[i]``
====== ================================
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
Returns
-------
array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted : Similar method from NumPy.
"""
# Note: the base tests provided by pandas only test the basics.
# We do not test
# 1. Values outside the range of the `data_for_sorting` fixture
# 2. Values between the values in the `data_for_sorting` fixture
# 3. Missing values.
arr = self.astype(object)
return arr.searchsorted(value, side=side, sorter=sorter)
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
def factorize(
self,
na_sentinel: int = -1,
) -> Tuple[np.ndarray, ABCExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Implementer note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques
_extension_array_shared_docs['repeat'] = """
Repeat elements of a %(klass)s.
Returns a new %(klass)s where each element of the current %(klass)s
is repeated consecutively a given number of times.
Parameters
----------
repeats : int or array of ints
The number of repetitions for each element. This should be a
non-negative integer. Repeating 0 times will return an empty
%(klass)s.
axis : None
Must be ``None``. Has no effect but is accepted for compatibility
with numpy.
Returns
-------
repeated_array : %(klass)s
Newly created %(klass)s with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series.
Index.repeat : Equivalent function for Index.
numpy.repeat : Similar method for :class:`numpy.ndarray`.
ExtensionArray.take : Take arbitrary positions.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.repeat(2)
[a, a, b, b, c, c]
Categories (3, object): [a, b, c]
>>> cat.repeat([1, 2, 3])
[a, b, b, c, c, c]
Categories (3, object): [a, b, c]
"""
@Substitution(klass='ExtensionArray')
@Appender(_extension_array_shared_docs['repeat'])
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
ind = np.arange(len(self)).repeat(repeats)
return self.take(ind)
# ------------------------------------------------------------------------
# Indexing methods
# ------------------------------------------------------------------------
def take(
self,
indices: Sequence[int],
allow_fill: bool = False,
fill_value: Any = None
) -> ABCExtensionArray:
"""
Take elements from an array.
Parameters
----------
indices : sequence of integers
Indices to be taken.
allow_fill : bool, default False
How to handle negative values in `indices`.
* False: negative values in `indices` indicate positional indices
from the right (the default). This is similar to
:func:`numpy.take`.
* True: negative values in `indices` indicate
missing values. These values are set to `fill_value`. Any other
other negative values raise a ``ValueError``.
fill_value : any, optional
Fill value to use for NA-indices when `allow_fill` is True.
This may be ``None``, in which case the default NA value for
the type, ``self.dtype.na_value``, is used.
For many ExtensionArrays, there will be two representations of
`fill_value`: a user-facing "boxed" scalar, and a low-level
physical NA value. `fill_value` should be the user-facing version,
and the implementation should handle translating that to the
physical version for processing the take if necessary.
Returns
-------
ExtensionArray
Raises
------
IndexError
When the indices are out of bounds for the array.
ValueError
When `indices` contains negative values other than ``-1``
and `allow_fill` is True.
Notes
-----
ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
``iloc``, when `indices` is a sequence of values. Additionally,
it's called by :meth:`Series.reindex`, or any other method
that causes realignment, with a `fill_value`.
See Also
--------
numpy.take
pandas.api.extensions.take
Examples
--------
Here's an example implementation, which relies on casting the
extension array to object dtype. This uses the helper method
:func:`pandas.api.extensions.take`.
.. code-block:: python
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
# If the ExtensionArray is backed by an ndarray, then
# just pass that here instead of coercing to object.
data = self.astype(object)
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
# fill value should always be translated from the scalar
# type for the array, to the physical storage type for
# the data, before passing to take.
result = take(data, indices, fill_value=fill_value,
allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
"""
# Implementer note: The `fill_value` parameter should be a user-facing
# value, an instance of self.dtype.type. When passed `fill_value=None`,
# the default of `self.dtype.na_value` should be used.
# This may differ from the physical storage type your ExtensionArray
# uses. In this case, your implementation is responsible for casting
# the user-facing type to the storage type, before using
# pandas.api.extensions.take
raise AbstractMethodError(self)
def copy(self, deep: bool = False) -> ABCExtensionArray:
"""
Return a copy of the array.
Parameters
----------
deep : bool, default False
Also copy the underlying data backing this array.
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(self)
# ------------------------------------------------------------------------
# Printing
# ------------------------------------------------------------------------
def __repr__(self):
from pandas.io.formats.printing import format_object_summary
template = (
'{class_name}'
'{data}\n'
'Length: {length}, dtype: {dtype}'
)
# the short repr has no trailing newline, while the truncated
# repr does. So we include a newline in our template, and strip
# any trailing newlines from format_object_summary
data = format_object_summary(self, self._formatter(),
indent_for_name=False).rstrip(', \n')
class_name = '<{}>\n'.format(self.__class__.__name__)
return template.format(class_name=class_name, data=data,
length=len(self),
dtype=self.dtype)
def _formatter(
self,
boxed: bool = False,
) -> Callable[[Any], Optional[str]]:
"""Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed: bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
"""
if boxed:
return str
return repr
def _formatting_values(self) -> np.ndarray:
# At the moment, this has to be an array since we use result.dtype
"""
An array of values to be printed in, e.g. the Series repr
.. deprecated:: 0.24.0
Use :meth:`ExtensionArray._formatter` instead.
"""
return np.array(self)
# ------------------------------------------------------------------------
# Reshaping
# ------------------------------------------------------------------------
@classmethod
def _concat_same_type(
cls,
to_concat: Sequence[ABCExtensionArray]
) -> ABCExtensionArray:
"""
Concatenate multiple array
Parameters
----------
to_concat : sequence of this type
Returns
-------
ExtensionArray
"""
raise AbstractMethodError(cls)
# The _can_hold_na attribute is set to True so that pandas internals
# will use the ExtensionDtype.na_value as the NA value in operations
# such as take(), reindex(), shift(), etc. In addition, those results
# will then be of the ExtensionArray subclass rather than an array
# of objects
_can_hold_na = True
@property
def _ndarray_values(self) -> np.ndarray:
"""
Internal pandas method for lossy conversion to a NumPy ndarray.
This method is not part of the pandas interface.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
"""
return np.array(self)
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
class ExtensionOpsMixin:
"""
A base class for linking the operators to their dunder names.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _add_arithmetic_ops(cls):
cls.__add__ = cls._create_arithmetic_method(operator.add)
cls.__radd__ = cls._create_arithmetic_method(ops.radd)
cls.__sub__ = cls._create_arithmetic_method(operator.sub)
cls.__rsub__ = cls._create_arithmetic_method(ops.rsub)
cls.__mul__ = cls._create_arithmetic_method(operator.mul)
cls.__rmul__ = cls._create_arithmetic_method(ops.rmul)
cls.__pow__ = cls._create_arithmetic_method(operator.pow)
cls.__rpow__ = cls._create_arithmetic_method(ops.rpow)
cls.__mod__ = cls._create_arithmetic_method(operator.mod)
cls.__rmod__ = cls._create_arithmetic_method(ops.rmod)
cls.__floordiv__ = cls._create_arithmetic_method(operator.floordiv)
cls.__rfloordiv__ = cls._create_arithmetic_method(ops.rfloordiv)
cls.__truediv__ = cls._create_arithmetic_method(operator.truediv)
cls.__rtruediv__ = cls._create_arithmetic_method(ops.rtruediv)
cls.__divmod__ = cls._create_arithmetic_method(divmod)
cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod)
@classmethod
def _add_comparison_ops(cls):
cls.__eq__ = cls._create_comparison_method(operator.eq)
cls.__ne__ = cls._create_comparison_method(operator.ne)
cls.__lt__ = cls._create_comparison_method(operator.lt)
cls.__gt__ = cls._create_comparison_method(operator.gt)
cls.__le__ = cls._create_comparison_method(operator.le)
cls.__ge__ = cls._create_comparison_method(operator.ge)
class ExtensionScalarOpsMixin(ExtensionOpsMixin):
"""
A mixin for defining ops on an ExtensionArray.
It is assumed that the underlying scalar objects have the operators
already defined.
Notes
-----
If you have defined a subclass MyExtensionArray(ExtensionArray), then
use MyExtensionArray(ExtensionArray, ExtensionScalarOpsMixin) to
get the arithmetic operators. After the definition of MyExtensionArray,
insert the lines
MyExtensionArray._add_arithmetic_ops()
MyExtensionArray._add_comparison_ops()
to link the operators to your class.
.. note::
You may want to set ``__array_priority__`` if you want your
implementation to be called when involved in binary operations
with NumPy arrays.
"""
@classmethod
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Examples
--------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {'divmod', 'rdivmod'}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
@classmethod
def _create_arithmetic_method(cls, op):
return cls._create_method(op)
@classmethod
def _create_comparison_method(cls, op):
return cls._create_method(op, coerce_to_dtype=False)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, os, tempfile
# from SimpleCV import Image, Color, Camera, ImageSet
from SimpleCV import *
from operator import add
import cv2
nbComponents = 0
dir_tmp = "static/test/treated/cam/"
def generateId():
global nbComponents
nbComponents += 1
return nbComponents
def createImage(path):
date = time.ctime(os.path.getctime(path))
name = os.path.splitext(os.path.basename(path))[0]
extension = os.path.splitext(path)[1]
if extension in ImageData.extensions:
return ImageData(path, date, name, extension)
else :
return None
class Component(object):
ioComponents = dict(CamReader="Camera Stream Reader", Reader='Picture Reader', Writer='Picture Writer')
processors = dict(Cropper='Cropper', GrayScale='Gray Scale', ChromaKey='Chromakey',ImageStack='Image Blurrer')
dir_tmp = tempfile.gettempdir()
selectors = dict(Splitter="File Splitter", FileFilter='File Filter', Joiner='Joiner')
statistics = []
#classmere.__subclasses__() return list
attr_description = "parent:Component:previous component which this object is related to,\
id:int:component identifiant,images:list:images' list,executed:bool:flag for knowing if the component has to be executed or not"
def __init__(self) :
self.parent = None
self.id = generateId()
self.images = []
self.executed = False
def setParent(self,parent):
self.parent = parent
if parent is None :
self.executed = False
def isSafelyExecuted(self):
return False if (self.parent is None) else True if isinstance(self.parent, Reader) and self.parent.executed and self.executed else self.parent.isSafelyExecuted() and self.executed
def executeParent(self):
if not self.parent.executed:
self.parent.process()
def process():
print 'Abstract class'
def showOutput():
print 'print treated images'
class Splitter(Component):
"""Splits one stream into two based on random"""
description = "Splits one data stream into two data streams depending whether they match some specific random"
attr_description = Component.attr_description + "images2:list(imageData):second output"
def __init__(self):
Component.__init__(self)
self.images2 = None
def process(self):
if not self.executed and self.parent is not None:
self.executeParent()
self.images = []
self.images2 = []
i=2
for image in self.parent.images:
print "Considering ", image.path
if i == 2:
self.images.append(image)
else:
self.images2.append(image)
i = 2 - i
self.executed = True
class FileFilter(Component):
"""Filters some rows based on certains criteria
such as date, time, extension"""
description = "Excludes from data stream files matching any of user's criteria"
attr_description = Component.attr_description + "time_relative:int:-1 before is default value \
0 is equal and 1 is after,time_reference:time:reference time,extensions:list(string):\
list of file extensions,extension_keep:Boolean:Whether we ought to keep specified extensions"
def __init__(self):
Component.__init__(self)
self.criteria = None
self.time_relative = -1 # (-1) before is default value, 0 is equal and 1 is after
self.time_reference = None
self.extensions = None
self.extension_keep = True
def set_time_reference(self, time_reference):
self.time_reference = time_reference
self.extensions = None
def set_extensions(self, extensions):
self.time_reference = None
self.extensions = extensions
def process(self):
if not self.executed and self.parent is not None:
self.executeParent()
tempI = set(self.parent.images)
tempO = set()
if self.time_reference is not None:
for image in tempI:
if self.time_relative == 1:
if self.time_reference < image.date:
tempO.add(image)
elif self.time_relative == -1:
if self.time_reference > image.date:
tempO.add(image)
elif self.time_relative == 0:
if self.time_reference == image.date:
tempO.add(image)
elif self.extensions is not None:
if not self.extension_keep:
tempO = tempO.union(set([im for im in self.parent.images if im.extension in self.extensions]))
else:
tempO = tempO.union(set([im for im in self.parent.images if im.extension not in self.extensions]))
self.images = list(tempO)
class Joiner(Component):
"""Joins two streams into one"""
description = "Joins two data streams into one avoiding duplicates."
attr_description = Component.attr_description + "parent2:component:second parent"
def __init__(self):
Component.__init__(self)
self.parent2 = None
def setParent2(self, parent):
self.parent2 = parent
if parent is None :
self.executed = False
def delParent2(self):
self.parent2 = None
self.executed = False
def process(self):
if not self.executed and self.parent is not None:
self.executeParent()
self.images = list(set(self.parent.images + self.parent2.images))
self.executed = True
class O():
"""Output mechanism"""
def __init__(self):
pass
@classmethod
def write(cls, images, path, ComponentId):
for image in images:
image.path = path + image.name + str(ComponentId) + image.extension
image.image.save(image.path)
image.date = time.ctime(os.path.getctime(image.path))
class ImageData():
"""Image object"""
extensions = [".jpeg",".png",".jpg"]
def __init__(self, path, date, name, extension):
self.path = path
self.date = time.ctime(os.path.getctime(path))
self.name = os.path.splitext(os.path.basename(path))[0]
self.extension = os.path.splitext(path)[1]
self.image = None
def load(self):
self.image = Image(self.path)
def unload(self):
del self.image
class ImageStack(Component):
"""Stacks images"""
description = "Stacks several images for bluhring."
attr_description = Component.attr_description + "directory:string:path for the made up pics\
,intensity:int:number of images you want to merge"
def __init__(self):
Component.__init__(self)
self.directory = "./static/test/treated/cam/"
self.intensity = 5
def process(self):
frames = ImageSet()
images = self.parent.images
l=0
for im in images:
print "Considering " + im.path
im.load()
frames.append(im.image)
if len(frames) > self.intensity:
frames.pop(0)
pic = reduce(add, [i / float(len(frames)) for i in frames])
pic.show()
pth = dir_tmp + "bluhr" + str(l) +".jpeg"
l += 1
if (l< len(images)-self.intensity/4):
pic.save(pth)
imageD = ImageData(pth)
self.images.append(imageD)
class Cropper(Component):
attr_description = Component.attr_description + "output:O:output writer,x:int:top left point's abscisse of cropping rectangle,\
y:int:top left point's ordonate of cropping rectangle,width:int:width of cropping rectangle,\
height:int:height of cropping rectangle"
description = "Component for cropping images. It takes as input a list of images and returns the list of cropped images"
def __init__(self):
Component.__init__(self)
self.output = O()
self.x = 0
self.y = 0
self.width = 100
self.height = 100
def process(self):
if not self.executed and (self.parent is not None):
self.executeParent()
self.images = self.parent.images
for image in self.images:
image.load()
for im in self.images:
im.image = im.image.crop(self.x,self.y,self.width,self.height)
self.output.write(self.images,dir_tmp,self.id)
O.write(self.images, dir_tmp, self.id)
for image in self.images:
image.unload()
self.executed = True
class GrayScale(Component):
attr_description = Component.attr_description + "output:O:output writer,degree:int:output images darkness"
description = "Component for turning the images into gray scale. It takes as input a list of images and returns the list of gray images"
def __init__(self):
Component.__init__(self)
self.degree = 1
def process(self):
if not self.executed and (self.parent is not None):
self.executeParent()
self.images = self.parent.images
for image in self.images:
image.load()
for im in self.images:
(red, green, blue) = im.image.splitChannels(False)
im.image = (red.toGray() + green.toGray() + blue.toGray()) / self.degree
self.executed = True
O.write(self.images,dir_tmp,self.id)
for image in self.images:
image.unload()
class ChromaKey(Component):
description = "Composes all the images in input1 with the background image defined in input2"
attr_description = Component.attr_description + "output:O:output writer,background:ImageData:the image applied as background,parent2:component:second parent"
def __init__(self):
Component.__init__(self)
self.background = None
self.parent2 = None
self.output = O()
def setParent2(self, parent):
self.parent2 = parent
if parent is None :
self.executed = False
def delParent2(self):
self.parent2 = None
self.executed = False
def process(self):
if not self.executed and self.parent is not None:
self.executeParent()
self.images = self.parent.images
self.background = self.parent2.images[0]
for image in self.images:
image.load()
for i in self.images:
background = self.background.image.scale(i.image.width, i.image.height)
mask = background.hueDistance(color=Color.GREEN).binarize()
i.image = (background - mask) + (background - mask.invert())
self.executed = True
O.write(self.images,dir_tmp,self.id)
for image in self.images:
image.unload()
class FacesDetector(Component):
cascade = cv2.CascadeClassifier('../../XML/haarcascade_frontalface_default.xml')
def __init__(self):
Component.__init__(self)
def process(self):
if not self.executed and self.parent is not None:
self.executeParent()
for i in self.parent.images:
img = cv2.imread(i.path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
faces = FacesDetector.cascade.detectMultiScale(gray, 1.3, 5)
#contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for (x,y,w,h),k in zip(faces,range(len(faces))):
#cv2.ellipse(img, (x + w / 2,y + h / 2),(w / 2,h / 2), 0, 0, 360,(255,0,0),2)
o = img[y: y + h, x: x + w]
path = dir_tmp + i.name + str(self.id) + str(k) + '.jpg'
cv2.imwrite(dir_tmp + i.name + str(self.id) + str(k) + '.jpg', o)
image = createImage(path)
image.date = time.ctime(os.path.getctime(image.path))
self.images.append(image)
#for c,k in zip(contours,range(len(contours))):
# if cv2.pointPolygonTest(c,(x,y),False) > -1:
# cv2.drawContours(img, contours, k, (0,255,0), 3)
#cv2.imshow('img',img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
class Recognizer(Component):
def __init__(self):
Component.__init__(self)
self.parent2 = None
self.parent3 = None
def process(self):
if not self.executed and self.parent is not None:
self.executeParent()
#positives
f = open('../static/test/positives.dat', 'w')
for i in self.parent.images:
i.load()
f.write(i.path + " 1 0 0 " + str(i.image.width) + " " + str(i.image.height) + "\n")
i.unload()
nb_positives = len(self.parent.images)
f.close()
#negatives
f = open('../static/test/negatives.dat', 'w')
for i in self.parent2.images:
i.load()
f.write(i.path + "\n")
i.unload()
nb_negatives = len(self.parent2.images)
f.close()
os.system("opencv_createsamples -info ../static/test/positives.dat -vec ../static/test/positives.vec -num "+ str(nb_positives) +" -w 48 -h 48")
os.system("opencv_traincascade -data ../../XML/ -vec ../static/test/positives.vec -w 48 -h 48 -bg ../static/test/negatives.dat -numPos "+ str(nb_positives) + " -numNeg "+ str(nb_negatives))
cascade = cv2.CascadeClassifier('../../XML/haarcascade_frontalface_default.xml')
for i in self.parent3.images:
img = cv2.imread(i.path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
faces = cascade.detectMultiScale(gray, 1.3, 5)
#contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for (x,y,w,h),k in zip(faces,range(len(faces))):
#cv2.ellipse(img, (x + w / 2,y + h / 2),(w / 2,h / 2), 0, 0, 360,(255,0,0),2)
o = img[y: y + h, x: x + w]
path = dir_tmp + i.name + str(self.id) + str(k) + '.jpg'
cv2.imwrite(dir_tmp + i.name + str(self.id) + str(k) + '.jpg', o)
image = createImage(path)
image.date = time.ctime(os.path.getctime(image.path))
self.images.append(image)
class CamReader(Component):
"""Converts a stream from camera into some static filestream"""
attr_description = Component.attr_description + "duration:int:Capture duration"
description = "Converts a stream from camera into some static filestream"
def __init__(self):
Component.__init__(self)
self.duration = 6 # in bi-seconds
def process(self):
self.images = []
cam = Camera()
for i in xrange (0,2*self.duration-1):
pth = dir_tmp + "cam/cam" + str(i) + ".jpeg"
image = cam.getImage()
image.save(pth)
imageD = ImageData(pth)
self.images.append(imageD)
time.sleep(1)
class Reader(Component):
attr_description = Component.attr_description + "pathes:list(string):lists of file or folder pathes,\
length:int:images count,key_points:list:osef,mean_colors:list:osef2"
description = "Creates a data stream from a file or a folder and its subfolders."
def __init__(self):
Component.__init__(self)
self.pathes = ["./static/test"]
self.length = None
self.key_points = []
self.mean_colors = []
def setPathes(self, pathes):
self.pathes = pathes
def process(self):
self.read(self.pathes)
self.length = len(self.images)
for image in self.images:
image.load()
# image.image.show()
# time.sleep(1)
# self.key_points = [i.image.findKeypoints() for i in self.images]
self.mean_colors = [k.meanColor() for k in self.key_points]
O.write(self.images,dir_tmp,self.id)
for image in self.images:
image.unload()
self.executed = True
def read(self, pathes):
for path in pathes:
images = []
if os.path.isfile(path):
# print "Considering file1 ", path
i = createImage(path)
if i is not None:
images.append(i)
elif os.path.isdir(path):
# print "Considering directory ", path
for dirname, dirnames, filenames in os.walk(path):
# print "Directory ", dirname," has subfolders ", dirnames
# print "Directory ", dirname," has subfiles ", filenames
for filename in filenames:
# print "Considering file2 ", filename, " of ", dirname
img = createImage(os.path.join(dirname, filename))
if img is not None:
images.append(img)
self.images = list(set(images))
class Writer(Component):
"""Writes pics on disc"""
attr_description = Component.attr_description + "path:string:File path"
description = "Writes the content of the data stream's content in a specified path."
def __init__(self):
Component.__init__(self)
self.path = "./test/"
def process(self):
O.write(self.images, self.path, "Lol")
self.executed = True
if __name__ == "__main__" :
import string
suffixes = string.letters + string.digits
"""binarizer = GrayScale(suffixes[0])
suffixes = suffixes[1:]
binarizer.process()
chroma = ChromaKey(suffixes[0])
suffixes = suffixes[1:]
chroma.process()"""
detector = FacesDetector()
suffixes = suffixes[1:]
detector.process()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import campaign_extension_setting
from google.ads.googleads.v8.services.types import (
campaign_extension_setting_service,
)
from .base import CampaignExtensionSettingServiceTransport, DEFAULT_CLIENT_INFO
class CampaignExtensionSettingServiceGrpcTransport(
CampaignExtensionSettingServiceTransport
):
"""gRPC backend transport for CampaignExtensionSettingService.
Service to manage campaign extension settings.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_campaign_extension_setting(
self,
) -> Callable[
[campaign_extension_setting_service.GetCampaignExtensionSettingRequest],
campaign_extension_setting.CampaignExtensionSetting,
]:
r"""Return a callable for the get campaign extension setting method over gRPC.
Returns the requested campaign extension setting in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetCampaignExtensionSettingRequest],
~.CampaignExtensionSetting]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_campaign_extension_setting" not in self._stubs:
self._stubs[
"get_campaign_extension_setting"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.CampaignExtensionSettingService/GetCampaignExtensionSetting",
request_serializer=campaign_extension_setting_service.GetCampaignExtensionSettingRequest.serialize,
response_deserializer=campaign_extension_setting.CampaignExtensionSetting.deserialize,
)
return self._stubs["get_campaign_extension_setting"]
@property
def mutate_campaign_extension_settings(
self,
) -> Callable[
[
campaign_extension_setting_service.MutateCampaignExtensionSettingsRequest
],
campaign_extension_setting_service.MutateCampaignExtensionSettingsResponse,
]:
r"""Return a callable for the mutate campaign extension
settings method over gRPC.
Creates, updates, or removes campaign extension settings.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`CriterionError <>`__ `DatabaseError <>`__ `DateError <>`__
`DistinctError <>`__ `ExtensionSettingError <>`__
`FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__
`IdError <>`__ `InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__
`OperationAccessDeniedError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Returns:
Callable[[~.MutateCampaignExtensionSettingsRequest],
~.MutateCampaignExtensionSettingsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_campaign_extension_settings" not in self._stubs:
self._stubs[
"mutate_campaign_extension_settings"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.CampaignExtensionSettingService/MutateCampaignExtensionSettings",
request_serializer=campaign_extension_setting_service.MutateCampaignExtensionSettingsRequest.serialize,
response_deserializer=campaign_extension_setting_service.MutateCampaignExtensionSettingsResponse.deserialize,
)
return self._stubs["mutate_campaign_extension_settings"]
__all__ = ("CampaignExtensionSettingServiceGrpcTransport",)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import copy
from optparse import OptionParser
import os
import pickle
import re
import sys
try:
from xml.etree.ElementTree import parse
except ImportError:
from elementtree.ElementTree import parse
# Make sure we're using Babel source, and not some previously installed version
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..'))
from babel import dates, numbers
from babel.localedata import Alias
from babel.util import set
weekdays = {'mon': 0, 'tue': 1, 'wed': 2, 'thu': 3, 'fri': 4, 'sat': 5,
'sun': 6}
try:
any
except NameError:
def any(iterable):
return filter(None, list(iterable))
def _text(elem):
buf = [elem.text or '']
for child in elem:
buf.append(_text(child))
buf.append(elem.tail or '')
return u''.join(filter(None, buf)).strip()
NAME_RE = re.compile(r"^\w+$")
TYPE_ATTR_RE = re.compile(r"^\w+\[@type='(.*?)'\]$")
NAME_MAP = {
'dateFormats': 'date_formats',
'dateTimeFormats': 'datetime_formats',
'eraAbbr': 'abbreviated',
'eraNames': 'wide',
'eraNarrow': 'narrow',
'timeFormats': 'time_formats'
}
def _translate_alias(ctxt, path):
parts = path.split('/')
keys = ctxt[:]
for part in parts:
if part == '..':
keys.pop()
else:
match = TYPE_ATTR_RE.match(part)
if match:
keys.append(match.group(1))
else:
assert NAME_RE.match(part)
keys.append(NAME_MAP.get(part, part))
return keys
def main():
parser = OptionParser(usage='%prog path/to/cldr')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('incorrect number of arguments')
srcdir = args[0]
destdir = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
'..', 'babel')
sup = parse(os.path.join(srcdir, 'supplemental', 'supplementalData.xml'))
# Import global data from the supplemental files
global_data = {}
territory_zones = global_data.setdefault('territory_zones', {})
zone_aliases = global_data.setdefault('zone_aliases', {})
zone_territories = global_data.setdefault('zone_territories', {})
for elem in sup.findall('.//timezoneData/zoneFormatting/zoneItem'):
tzid = elem.attrib['type']
territory_zones.setdefault(elem.attrib['territory'], []).append(tzid)
zone_territories[tzid] = elem.attrib['territory']
if 'aliases' in elem.attrib:
for alias in elem.attrib['aliases'].split():
zone_aliases[alias] = tzid
# Import Metazone mapping
meta_zones = global_data.setdefault('meta_zones', {})
tzsup = parse(os.path.join(srcdir, 'supplemental', 'metazoneInfo.xml'))
for elem in tzsup.findall('.//timezone'):
for child in elem.findall('usesMetazone'):
if 'to' not in child.attrib: # FIXME: support old mappings
meta_zones[elem.attrib['type']] = child.attrib['mzone']
outfile = open(os.path.join(destdir, 'global.dat'), 'wb')
try:
pickle.dump(global_data, outfile, 2)
finally:
outfile.close()
# build a territory containment mapping for inheritance
regions = {}
for elem in sup.findall('.//territoryContainment/group'):
regions[elem.attrib['type']] = elem.attrib['contains'].split()
# Resolve territory containment
territory_containment = {}
region_items = regions.items()
region_items.sort()
for group, territory_list in region_items:
for territory in territory_list:
containers = territory_containment.setdefault(territory, set([]))
if group in territory_containment:
containers |= territory_containment[group]
containers.add(group)
filenames = os.listdir(os.path.join(srcdir, 'main'))
filenames.remove('root.xml')
filenames.sort(lambda a,b: len(a)-len(b))
filenames.insert(0, 'root.xml')
for filename in filenames:
stem, ext = os.path.splitext(filename)
if ext != '.xml':
continue
print>>sys.stderr, 'Processing input file %r' % filename
tree = parse(os.path.join(srcdir, 'main', filename))
data = {}
language = None
elem = tree.find('.//identity/language')
if elem is not None:
language = elem.attrib['type']
print>>sys.stderr, ' Language: %r' % language
territory = None
elem = tree.find('.//identity/territory')
if elem is not None:
territory = elem.attrib['type']
else:
territory = '001' # world
print>>sys.stderr, ' Territory: %r' % territory
regions = territory_containment.get(territory, [])
print>>sys.stderr, ' Regions: %r' % regions
# <localeDisplayNames>
territories = data.setdefault('territories', {})
for elem in tree.findall('.//territories/territory'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in territories:
continue
territories[elem.attrib['type']] = _text(elem)
languages = data.setdefault('languages', {})
for elem in tree.findall('.//languages/language'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in languages:
continue
languages[elem.attrib['type']] = _text(elem)
variants = data.setdefault('variants', {})
for elem in tree.findall('.//variants/variant'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in variants:
continue
variants[elem.attrib['type']] = _text(elem)
scripts = data.setdefault('scripts', {})
for elem in tree.findall('.//scripts/script'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib['type'] in scripts:
continue
scripts[elem.attrib['type']] = _text(elem)
# <dates>
week_data = data.setdefault('week_data', {})
supelem = sup.find('.//weekData')
for elem in supelem.findall('minDays'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['min_days'] = int(elem.attrib['count'])
for elem in supelem.findall('firstDay'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['first_day'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendStart'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_start'] = weekdays[elem.attrib['day']]
for elem in supelem.findall('weekendEnd'):
territories = elem.attrib['territories'].split()
if territory in territories or any([r in territories for r in regions]):
week_data['weekend_end'] = weekdays[elem.attrib['day']]
zone_formats = data.setdefault('zone_formats', {})
for elem in tree.findall('.//timeZoneNames/gmtFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['gmt'] = unicode(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/regionFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['region'] = unicode(elem.text).replace('{0}', '%s')
break
for elem in tree.findall('.//timeZoneNames/fallbackFormat'):
if 'draft' not in elem.attrib and 'alt' not in elem.attrib:
zone_formats['fallback'] = unicode(elem.text) \
.replace('{0}', '%(0)s').replace('{1}', '%(1)s')
break
time_zones = data.setdefault('time_zones', {})
for elem in tree.findall('.//timeZoneNames/zone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = unicode(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = unicode(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = unicode(child.text)
time_zones[elem.attrib['type']] = info
meta_zones = data.setdefault('meta_zones', {})
for elem in tree.findall('.//timeZoneNames/metazone'):
info = {}
city = elem.findtext('exemplarCity')
if city:
info['city'] = unicode(city)
for child in elem.findall('long/*'):
info.setdefault('long', {})[child.tag] = unicode(child.text)
for child in elem.findall('short/*'):
info.setdefault('short', {})[child.tag] = unicode(child.text)
info['common'] = elem.findtext('commonlyUsed') == 'true'
meta_zones[elem.attrib['type']] = info
for calendar in tree.findall('.//calendars/calendar'):
if calendar.attrib['type'] != 'gregorian':
# TODO: support other calendar types
continue
months = data.setdefault('months', {})
for ctxt in calendar.findall('months/monthContext'):
ctxt_type = ctxt.attrib['type']
ctxts = months.setdefault(ctxt_type, {})
for width in ctxt.findall('monthWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'month':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = unicode(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['months', ctxt_type, width_type],
elem.attrib['path'])
)
days = data.setdefault('days', {})
for ctxt in calendar.findall('days/dayContext'):
ctxt_type = ctxt.attrib['type']
ctxts = days.setdefault(ctxt_type, {})
for width in ctxt.findall('dayWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'day':
dtype = weekdays[elem.attrib['type']]
if ('draft' in elem.attrib or 'alt' not in elem.attrib) \
and dtype in widths:
continue
widths[dtype] = unicode(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['days', ctxt_type, width_type],
elem.attrib['path'])
)
quarters = data.setdefault('quarters', {})
for ctxt in calendar.findall('quarters/quarterContext'):
ctxt_type = ctxt.attrib['type']
ctxts = quarters.setdefault(ctxt.attrib['type'], {})
for width in ctxt.findall('quarterWidth'):
width_type = width.attrib['type']
widths = ctxts.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'quarter':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib['type'])] = unicode(elem.text)
elif elem.tag == 'alias':
ctxts[width_type] = Alias(
_translate_alias(['quarters', ctxt_type, width_type],
elem.attrib['path'])
)
eras = data.setdefault('eras', {})
for width in calendar.findall('eras/*'):
width_type = NAME_MAP[width.tag]
widths = eras.setdefault(width_type, {})
for elem in width.getiterator():
if elem.tag == 'era':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and int(elem.attrib['type']) in widths:
continue
widths[int(elem.attrib.get('type'))] = unicode(elem.text)
elif elem.tag == 'alias':
eras[width_type] = Alias(
_translate_alias(['eras', width_type],
elem.attrib['path'])
)
# AM/PM
periods = data.setdefault('periods', {})
for elem in calendar.findall('am'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.tag in periods:
continue
periods[elem.tag] = unicode(elem.text)
for elem in calendar.findall('pm'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.tag in periods:
continue
periods[elem.tag] = unicode(elem.text)
date_formats = data.setdefault('date_formats', {})
for format in calendar.findall('dateFormats'):
for elem in format.getiterator():
if elem.tag == 'dateFormatLength':
if 'draft' in elem.attrib and \
elem.attrib.get('type') in date_formats:
continue
try:
date_formats[elem.attrib.get('type')] = \
dates.parse_pattern(unicode(elem.findtext('dateFormat/pattern')))
except ValueError, e:
print>>sys.stderr, 'ERROR: %s' % e
elif elem.tag == 'alias':
date_formats = Alias(_translate_alias(
['date_formats'], elem.attrib['path'])
)
time_formats = data.setdefault('time_formats', {})
for format in calendar.findall('timeFormats'):
for elem in format.getiterator():
if elem.tag == 'timeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in time_formats:
continue
try:
time_formats[elem.attrib.get('type')] = \
dates.parse_pattern(unicode(elem.findtext('timeFormat/pattern')))
except ValueError, e:
print>>sys.stderr, 'ERROR: %s' % e
elif elem.tag == 'alias':
time_formats = Alias(_translate_alias(
['time_formats'], elem.attrib['path'])
)
datetime_formats = data.setdefault('datetime_formats', {})
for format in calendar.findall('dateTimeFormats'):
for elem in format.getiterator():
if elem.tag == 'dateTimeFormatLength':
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in datetime_formats:
continue
try:
datetime_formats[elem.attrib.get('type')] = \
unicode(elem.findtext('dateTimeFormat/pattern'))
except ValueError, e:
print>>sys.stderr, 'ERROR: %s' % e
elif elem.tag == 'alias':
datetime_formats = Alias(_translate_alias(
['datetime_formats'], elem.attrib['path'])
)
# <numbers>
number_symbols = data.setdefault('number_symbols', {})
for elem in tree.findall('.//numbers/symbols/*'):
if ('draft' in elem.attrib or 'alt' in elem.attrib):
continue
number_symbols[elem.tag] = unicode(elem.text)
decimal_formats = data.setdefault('decimal_formats', {})
for elem in tree.findall('.//decimalFormats/decimalFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in decimal_formats:
continue
pattern = unicode(elem.findtext('decimalFormat/pattern'))
decimal_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
scientific_formats = data.setdefault('scientific_formats', {})
for elem in tree.findall('.//scientificFormats/scientificFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in scientific_formats:
continue
pattern = unicode(elem.findtext('scientificFormat/pattern'))
scientific_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
currency_formats = data.setdefault('currency_formats', {})
for elem in tree.findall('.//currencyFormats/currencyFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in currency_formats:
continue
pattern = unicode(elem.findtext('currencyFormat/pattern'))
currency_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
percent_formats = data.setdefault('percent_formats', {})
for elem in tree.findall('.//percentFormats/percentFormatLength'):
if ('draft' in elem.attrib or 'alt' in elem.attrib) \
and elem.attrib.get('type') in percent_formats:
continue
pattern = unicode(elem.findtext('percentFormat/pattern'))
percent_formats[elem.attrib.get('type')] = numbers.parse_pattern(pattern)
currency_names = data.setdefault('currency_names', {})
currency_symbols = data.setdefault('currency_symbols', {})
for elem in tree.findall('.//currencies/currency'):
code = elem.attrib['type']
# TODO: support plural rules for currency name selection
for name in elem.findall('displayName'):
if ('draft' in name.attrib or 'count' in name.attrib) \
and code in currency_names:
continue
currency_names[code] = unicode(name.text)
# TODO: support choice patterns for currency symbol selection
symbol = elem.find('symbol')
if symbol is not None and 'draft' not in symbol.attrib \
and 'choice' not in symbol.attrib:
currency_symbols[code] = unicode(symbol.text)
outfile = open(os.path.join(destdir, 'localedata', stem + '.dat'), 'wb')
try:
pickle.dump(data, outfile, 2)
finally:
outfile.close()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
def __init__(self, test_addr_contents=False):
super().__init__()
self.test_addr_contents = test_addr_contents
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def addr_received(self):
return self.num_ipv4_received != 0
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(BitcoinTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 10 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the base ProcessingLayer and a subclass that uses Combiners."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import numpy as np
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import training_generator
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.experimental.preprocessing.PreprocessingLayer')
class PreprocessingLayer(Layer):
"""Base class for PreprocessingLayers."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def adapt(self, data, reset_state=True):
# TODO(momernick): Add examples.
"""Fits the state of the preprocessing layer to the data being passed.
Arguments:
data: The data to train on. It can be passed either as a tf.data
Dataset, or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`, or whether to start
from the existing state. This argument may not be relevant to all
preprocessing layers: a subclass of PreprocessingLayer may choose to
throw if 'reset_state' is set to False.
"""
pass
class CombinerPreprocessingLayer(PreprocessingLayer):
"""Base class for PreprocessingLayers that do computation using a Combiner.
This class provides several helper methods to make creating a
PreprocessingLayer easier. It assumes that the core of your computation will
be done via a Combiner object. Subclassing this class to create a
PreprocessingLayer allows your layer to be compatible with distributed
computation.
This class is compatible with Tensorflow 2.0+.
"""
def __init__(self, combiner, **kwargs):
super(CombinerPreprocessingLayer, self).__init__(**kwargs)
self._combiner = combiner
self._previously_updated = False
self.state_variables = collections.OrderedDict()
def _add_state_variable(self,
name,
shape,
dtype,
initializer=None,
partitioner=None,
use_resource=None,
**kwargs):
"""Add a variable that can hold state which is updated during adapt().
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`
**kwargs: Additional keyword arguments. Accepted values are `getter` and
`collections`.
Returns:
The created variable.
"""
weight = self.add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=None,
trainable=False,
constraint=None,
partitioner=partitioner,
use_resource=use_resource,
**kwargs)
# TODO(momernick): Do not allow collisions here.
self.state_variables[name] = weight
return weight
def _restore_updates(self):
"""Recreates a dict of updates from the layer's weights."""
data_dict = {}
for name, var in self.state_variables.items():
data_dict[name] = var.numpy()
return data_dict
def _dataset_is_infinite(self, dataset):
"""True if the passed dataset is infinite."""
return math_ops.equal(
cardinality.cardinality(dataset), cardinality.INFINITE)
def _get_dataset_iterator(self, dataset):
"""Gets an iterator from a tf.data.Dataset."""
return dataset_ops.make_one_shot_iterator(dataset).get_next
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the data being passed.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
or as a numpy array.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`, or whether to start from
the existing state. Subclasses may choose to throw if reset_state is set
to 'False'.
"""
if reset_state:
accumulator = None
else:
accumulator = self._combiner.restore(self._restore_updates())
if not isinstance(data, (dataset_ops.DatasetV2, np.ndarray)):
raise ValueError(
'adapt() requires a Dataset or a Numpy array as input, got {}'.format(
type(data)))
if isinstance(data, dataset_ops.DatasetV2):
# Validate the datasets to try and ensure we haven't been passed one with
# infinite size. That would cause an infinite loop here.
if self._dataset_is_infinite(data):
raise ValueError(
'The dataset passed to "adapt()" has an infinite number of '
'elements. Please use dataset.take(...) to make the number '
'of elements finite.')
next_data = self._get_dataset_iterator(data)
else:
generator, _ = training_generator.convert_to_generator_like(
data, batch_size=len(data))
# If the data is not a dataset, we can iterate over it using next(foo);
# here, we wrap that into a callable.
next_data = lambda: next(generator)
# TODO(momernick): Some sort of status bar?
# TODO(momernick): Implement parallel processing here?
try:
data_element = next_data()
# First, see if the layer is built or not. If it is not, then we must
# build it.
if not self.built:
try:
# If this is a Numpy array or tensor, we can get shape from .shape.
# If not, an attribute error will be thrown (and we can assume the
# input data is a scalar with shape None.
shape = data_element.shape
except AttributeError:
shape = None
self.build(shape)
# Once we have built the Layer, we can process the input data. We do so
# until we've gotten an exception indicating that we have no more data.
while True:
accumulator = self._combiner.compute(data_element, accumulator)
data_element = next_data()
# Note that this belongs to the outer indentation of 'try' - we need to
# catch exceptions resulting from the first 'next_data()' invocation as
# well.
except (StopIteration, errors.OutOfRangeError):
pass
updates = self._combiner.extract(accumulator)
self._set_state_variables(updates)
def _set_state_variables(self, updates):
"""Directly update the internal state of this Layer.
This method expects a string-keyed dict of {state_variable_name: state}. The
precise nature of the state, and the names associated, are describe by
the subclasses of CombinerPreprocessingLayer.
Args:
updates: A string keyed dict of weights to update.
Raises:
RuntimeError: if 'build()' was not called before 'set_processing_state'.
"""
# TODO(momernick): Do we need to do any more input sanitization?
if not self.built:
raise RuntimeError('_set_state_variables() must be called after build().')
with ops.init_scope():
for var_name, value in updates.items():
self.state_variables[var_name].assign(value)
class Combiner(object):
"""Functional object that defines a shardable computation.
This object defines functions required to create and manipulate data objects.
These data objects, referred to below as 'accumulators', are computation-
specific and may be implemented alongside concrete subclasses of Combiner
(if necessary - some computations may be simple enough that standard Python
types can be used as accumulators).
The intent for this class is that by describing computations in this way, we
can arbitrarily shard a dataset, perform computations on a subset, and then
merge the computation into a final result. This enables distributed
computation.
The combiner itself does not own any state - all computational state is owned
by the accumulator objects. This is so that we can have an arbitrary number of
Combiners (thus sharding the computation N ways) without risking any change
to the underlying computation. These accumulator objects are uniquely
associated with each Combiner; a Combiner defines what the accumulator object
should be and will only work with accumulators of that type.
"""
__metaclass__ = abc.ABCMeta
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
@abc.abstractmethod
def compute(self, batch_values, accumulator=None):
"""Compute a step in this computation, returning a new accumulator.
This method computes a step of the computation described by this Combiner.
If an accumulator is passed, the data in that accumulator is also used; so
compute(batch_values) results in f(batch_values), while
compute(batch_values, accumulator) results in
merge(f(batch_values), accumulator).
Args:
batch_values: A list of ndarrays representing the values of the inputs for
this step of the computation.
accumulator: the current accumulator. Can be None.
Returns:
An accumulator that includes the passed batch of inputs.
"""
pass
@abc.abstractmethod
def merge(self, accumulators):
"""Merge several accumulators to a single accumulator.
This method takes the partial values in several accumulators and combines
them into a single accumulator. This computation must not be order-specific
(that is, merge([a, b]) must return the same result as merge([b, a]).
Args:
accumulators: the accumulators to merge, as a list.
Returns:
A merged accumulator.
"""
pass
@abc.abstractmethod
def extract(self, accumulator):
"""Convert an accumulator into a dict of output values.
Args:
accumulator: The accumulator to convert.
Returns:
A dict of ndarrays representing the data in this accumulator.
"""
pass
@abc.abstractmethod
def restore(self, output):
"""Create an accumulator based on 'output'.
This method creates a new accumulator with identical internal state to the
one used to create the data in 'output'. This means that if you do
output_data = combiner.extract(accumulator_1)
accumulator_2 = combiner.restore(output_data)
then accumulator_1 and accumulator_2 will have identical internal state, and
computations using either of them will be equivalent.
Args:
output: The data output from a previous computation. Should be in the same
form as provided by 'extract_output'.
Returns:
A new accumulator.
"""
pass
@abc.abstractmethod
def serialize(self, accumulator):
"""Serialize an accumulator for a remote call.
This function serializes an accumulator to be sent to a remote process.
Args:
accumulator: The accumulator to serialize.
Returns:
A byte string representing the passed accumulator.
"""
pass
@abc.abstractmethod
def deserialize(self, encoded_accumulator):
"""Deserialize an accumulator received from 'serialize()'.
This function deserializes an accumulator serialized by 'serialize()'.
Args:
encoded_accumulator: A byte string representing an accumulator.
Returns:
The accumulator represented by the passed byte_string.
"""
pass
|
|
"""
Application view.
"""
from datetime import datetime
from django.http import Http404
from django.shortcuts import render, redirect
from django.views.generic import View
from application.models import Application
from .forms import Step1RateForm, Step2RateForm, TerminateForm, ApproveForm
from .models import User
class RateView(View):
"""
Find user by uuid and allow to rate other applications.
Do not allow to vote multiple times for same application.
Constraints for normal application:
- show only application with less than 2 votes
- sort application by vote count and created
Constraints for terminator application:
- show all applications
- sort application by vote count and created
"""
def get_context_data(self, created_by, application, rate_form=None):
if not rate_form:
rate_form = Step1RateForm(
initial={
"created_by": created_by.id,
"application": application.id,
}
)
context = {
"user": created_by,
"rate_count": created_by.rated.count(),
"application": application.info,
"forms": {
"rate": rate_form,
"terminate": TerminateForm(
initial={
"terminate": True,
"application": application.uuid
}
),
"approve": ApproveForm(
initial={
"approve": True,
"application": application.uuid
}
),
}
}
return context
def get(self, request, uuid=None):
try:
created_by = User.objects.get(uuid=uuid)
except User.DoesNotExist:
raise Http404
unrated = Application.objects.get_unrated(created_by)
if unrated:
if created_by.terminator:
request.session["last_application"] = str(unrated[0].uuid)
template_name = "rating/rate.html"
context = self.get_context_data(created_by, unrated[0])
else:
template_name = "rating/all_rated.html"
context = {"user": created_by}
return render(request, template_name, context=context)
def rate(self, request, uuid):
"""
Save rating and redirect to rate view.
"""
rate_form = Step1RateForm(request.POST)
if rate_form.is_valid():
rating = rate_form.save(commit=False)
if str(rating.created_by.uuid) == uuid:
rating.ipaddress = (
request.META.get("HTTP_X_REAL_IP") or request.META["REMOTE_ADDR"]
)
rating.save()
else:
try:
created_by = User.objects.get(uuid=uuid)
except User.DoesNotExist:
raise Http404
application = rate_form.cleaned_data["application"]
context = self.get_context_data(
created_by, application, rate_form=rate_form
)
template_name = "rating/rate.html"
return render(request, template_name, context=context)
return redirect("rating:rate", uuid)
def terminate(self, request, uuid):
"""
Terminate application.
"""
try:
user = User.objects.get(uuid=uuid)
except User.DoesNotExist:
raise Http404
try:
application = Application.objects.get(
uuid=request.session.get("last_application")
)
if str(application.uuid) != request.POST["application"]:
return redirect("rating:rate", uuid)
except Application.DoesNotExist:
raise Http404
if user.terminator:
application.terminated = True
application.terminated_by = user
application.terminated_ip = (
request.META.get("HTTP_X_REAL_IP") or request.META["REMOTE_ADDR"]
)
application.terminated_at = datetime.now()
application.save()
return redirect("rating:rate", uuid)
def approve(self, request, uuid):
"""
Approve application.
"""
try:
user = User.objects.get(uuid=uuid)
except User.DoesNotExist:
raise Http404
try:
application = Application.objects.get(
uuid=request.session.get("last_application")
)
if str(application.uuid) != request.POST["application"]:
return redirect("rating:rate", uuid)
except Application.DoesNotExist:
raise Http404
if user.terminator:
application.approved = True
application.approved_by = user
application.approved_ip = (
request.META.get("HTTP_X_REAL_IP") or request.META["REMOTE_ADDR"]
)
application.approved_at = datetime.now()
application.save()
return redirect("rating:rate", uuid)
def post(self, request, uuid=None):
"""
Handle terminate or rate request.
"""
if request.POST.get("terminate"):
return self.terminate(request, uuid)
elif request.POST.get("approve"):
return self.approve(request, uuid)
else:
return self.rate(request, uuid)
class Rate2View(View):
"""
Find user by uuid and allow to rate applications.
"""
def get_context_data(self, created_by, application, rate_form=None):
if not rate_form:
rate_form = Step2RateForm(
initial={
"created_by": created_by.id,
"application": application.id,
}
)
context = {
"user": created_by,
"rate_count": created_by.rated2.count(),
"application": application.info2(),
"average_rating": application.average_rating,
"approval_level": application.approval_level,
"forms": {
"rate": rate_form,
}
}
return context
def get(self, request, uuid=None):
try:
created_by = User.objects.get(uuid=uuid)
except User.DoesNotExist:
raise Http404
unrated = Application.objects.get_unrated2(created_by)
if unrated:
template_name = "rating/rate2.html"
context = self.get_context_data(created_by, unrated[0])
else:
template_name = "rating/all_rated.html"
context = {"user": created_by}
return render(request, template_name, context=context)
def rate(self, request, uuid):
"""
Save rating and redirect to rate view.
"""
rate_form = Step2RateForm(request.POST)
if rate_form.is_valid():
rating = rate_form.save(commit=False)
if str(rating.created_by.uuid) == uuid:
rating.ipaddress = (
request.META.get("HTTP_X_REAL_IP") or request.META["REMOTE_ADDR"]
)
rating.save()
else:
try:
created_by = User.objects.get(uuid=uuid)
except User.DoesNotExist:
raise Http404
application = rate_form.cleaned_data["application"]
context = self.get_context_data(
created_by, application, rate_form=rate_form
)
template_name = "rating/rate2.html"
return render(request, template_name, context=context)
return redirect("rating:rate2", uuid)
def post(self, request, uuid=None):
"""
Handle terminate or rate request.
"""
return self.rate(request, uuid)
|
|
"""Git interface module for 'git-cvs'."""
import os
import time
import types
import shutil
import sys
from signal import signal, SIGHUP, SIGINT, SIGTERM, SIG_IGN
from subprocess import Popen, PIPE
from cvsgit.changeset import FILE_DELETED
from cvsgit.i18n import _
from cvsgit.error import Error
from cvsgit.utils import stripnl
from cvsgit.term import NoProgress
# I don't know how GIT_DIR and GIT_WORK_TREE and GIT_OBJECT_DIRECTORY
# and all the rest could affect us here, so I'll just discard them all
# for now.
def safe_environ():
env = os.environ.copy()
for k in env.keys():
if k.startswith('GIT_'):
del env[k]
return env
class GitError(Error):
"""Base exception for errors in the cvsgit.git module"""
pass
class GitCommandError(GitError):
"""Failure to run an external "git" command
The 'command' attribute will be an array representing the
command that failed and 'returncode' will contain the exit
code of the command. The 'stderr' member may contain the
error output from the command, but may also be None.
"""
def __init__(self, command, returncode, stderr=None):
self.command = command
self.returncode = returncode
self.stderr = stderr
msg = "'%s' exited with code %d" % \
(' '.join(command), returncode)
if stderr:
stderr = '\n '.join(stripnl(stderr).split('\n'))
msg += '\n\nError output of %s command:\n %s' % \
(command[0], stderr)
super(GitCommandError, self).__init__(msg)
class MissingAuthorFullname(GitError):
"""Raised when there is no known fullname for an author's login
name and --stop-on-missing-author was given on the command-line.
"""
def __init__(self, author):
msg = 'missing fullname for author: %s' % author
super(GitError, self).__init__(msg)
class Git(object):
"""Git repository and optional work tree.
The Git repository may or may not already exist until the init()
method is called, after which the repository will definitely
exist, if the call returns successfully.
"""
def __init__(self, directory=None):
"""Construct a Git repository object.
"""
if directory == None:
self._directory = os.getcwd()
else:
self._directory = directory
def get_directory(self):
"""Return the repository top-level path.
This may be the metadata directory or the top-level of the
work tree depending on whether the repository is bare or not.
This method may be called before the repository has been
initialized and will always return the same result.
"""
return self._directory
directory = property(get_directory)
def is_bare(self):
"""Return True iff the repository exists and is bare.
This method may be called before the repository has been
initialized, but may return a different result.
"""
return self.config_get('core.bare') == 'true'
def get_git_dir(self):
"""Return the path to the metadata directory.
This method may be called before the repository has been
initialized, but may return a different result.
"""
if self.is_bare():
return self.directory
else:
return os.path.join(self.directory, '.git')
git_dir = property(get_git_dir)
def get_git_work_tree(self):
"""Return the path to the working tree.
If the repository is bare, None is returned since it has no
associated work tree.
This method may be called before the repository has been
initialized, but may return a different result.
"""
if self.is_bare():
return None
else:
return self.directory
git_work_tree = property(get_git_work_tree)
def _popen(self, command, **kwargs):
if not kwargs.has_key('env'):
kwargs['env'] = safe_environ()
if not kwargs.has_key('cwd'):
kwargs['cwd'] = self.directory
return Popen(command, **kwargs)
def init(self, bare=False, quiet=False):
"""Initialize or reinitialize the repository.
"""
args = []
if bare:
args.append('--bare')
if quiet:
args.append('--quiet')
directory_created = False
try:
if not os.path.isdir(self.directory):
os.mkdir(self.directory)
directory_created = True
command = ['git', 'init'] + args
pipe = self._popen(command, stderr=PIPE)
dummy, stderr = pipe.communicate()
if pipe.returncode != 0:
raise GitCommandError(command, pipe.returncode, stderr)
except:
if directory_created:
shutil.rmtree(self.directory)
raise
def destroy(self):
"""Recursively remove the repository directory.
"""
if os.path.exists(self.directory):
shutil.rmtree(self.directory)
def checkout(self, *args):
"""Call 'git checkout' with given arguments.
"""
self.check_command('checkout', *args)
def rebase(self, *args):
"""Call 'git rebase' with given arguments.
"""
self.check_command('rebase', *args)
def pull(self, *args):
"""Call 'git pull' with given arguments.
"""
self.check_command('pull', *args)
def rev_parse(self, *args):
"""Return the output of 'git rev-parse <*args>'
"""
return self.check_command('rev-parse', *args, stdout=PIPE)
def rev_list(self, *args):
"""Return the output of 'git rev-list <*args>'
"""
return self.check_command('rev-list', *args, stdout=PIPE)
def symbolic_ref(self, *args):
"""Return the output of 'git symbolic-ref <*args>'
"""
return self.check_command('symbolic-ref', *args, stdout=PIPE)
def call(self, command, *args, **kwargs):
"""Run a "git" subcommand with given arguments.
See subprocess.call for the list of available keyword
arguments.
Returns the command's exit code.
"""
command = ['git', command] + list(args)
return subprocess.call(command, **kwargs)
def check_command(self, command, *args, **kwargs):
"""Run a "git" subcommand with given arguments.
Raises a GitCommandError if the subcommand does not return a
zero exit code.
The stdout keyword argument can take any value that
subprocess.Popen would accept. If it is subprocess.PIPE, then
the output of the command is returned as a string.
"""
stdout = None
for kw in kwargs.keys():
if kw == 'stdout':
stdout = kwargs[kw]
else:
raise ArgumentError, 'Invalid keyword: %s' % kw
command = ['git', command] + list(args)
pipe = self._popen(command, stdout=stdout, stderr=PIPE)
out, err = pipe.communicate()
if pipe.returncode != 0:
raise GitCommandError(command, pipe.returncode, err)
if stdout == PIPE:
return stripnl(out)
def config_get(self, varname, default=None):
"""Retrieve the value of a config variable.
This method may be called before the repository exists. In
that case it will always return the default value.
"""
if not os.path.isdir(self.directory):
return default
command = ['git', 'config', '--get', varname]
pipe = self._popen(command, stdout=PIPE, stderr=PIPE)
stdout, stderr = pipe.communicate()
if pipe.returncode == 0:
return stripnl(stdout)
elif pipe.returncode == 1:
return default
else:
raise GitCommandError(command, pipe.returncode, stderr)
def config_set(self, varname, value):
"""Set the value of a config variable.
"""
self.check_command('config', varname, value)
def import_changesets(self, changesets, branch, domain=None,
limit=None, verbose=False,
progress=None, total=None,
authors=None, stop_on_unknown_author=False):
"""Loop over changesets and import them.
"""
if progress == None:
progress = NoProgress()
with progress:
self._import_changesets(changesets, branch, domain,
limit, verbose, progress,
total, authors, stop_on_unknown_author)
def _import_changesets(self, changesets, branch, domain, limit,
verbose, progress, total, authors,
stop_on_unknown_author):
message = _('Importing changesets')
def do_progress(count, total):
progress(message, count, total)
class SignalIndicator():
def __init__(self):
self.signal = {}
def __call__(self, signalnum, frame):
self.signal[signalnum] = True
def isset(self, signalnum=None):
if signalnum:
return self.signal.has_key(signalnum)
else:
return len(self.signal) > 0
sigaction = SignalIndicator()
signalset = (SIGHUP,SIGINT,SIGTERM,)
old_sigaction = {}
for signalnum in signalset:
old_sigaction[signalnum] = signal(signalnum, sigaction)
def ignore_signals():
for signalnum in signalset:
signal(signalnum, SIG_IGN)
# absolulte file name since cwd is changed in _popen
marksfile = os.path.join(self.git_dir, 'cvsgit.marks')
marksfile = os.path.abspath(marksfile)
command = ['git', 'fast-import', '--quiet']
command.append('--export-marks=' + marksfile)
pipe = self._popen(command, stdin=PIPE, preexec_fn=ignore_signals)
if limit != None and total != None and total > limit:
total = limit
fi = GitFastImport(pipe, branch, domain=domain, verbose=verbose,
authors=authors, stop_on_unknown_author=\
stop_on_unknown_author)
changeset_ids = []
db = None
try:
for changeset in changesets:
if limit != None and len(changeset_ids) >= limit:
break
fi.add_changeset(changeset)
if db == None: db = changeset.provider.metadb # FIXME
changeset_ids.append(changeset.id)
do_progress(len(changeset_ids), total)
if sigaction.isset(SIGINT):
raise KeyboardInterrupt()
elif sigaction.isset():
break
finally:
try:
fi.close()
finally:
self.mark_changesets(db, changeset_ids)
for signalnum in signalset:
signal(signalnum, old_sigaction[signalnum])
if fi.returncode != 0:
raise RuntimeError, _('git fast-import failed')
def mark_changesets(self, db, changeset_ids):
filename = os.path.join(self.git_dir, 'cvsgit.marks')
if not os.path.isfile(filename):
return
marks = {}
f = file(filename, 'r')
try:
for line in f.readlines():
mark, sha1 = line.rstrip().split()
marks[int(mark[1:])] = sha1
finally:
f.close()
for id in changeset_ids:
if marks.has_key(id):
sha1 = marks[id]
db.mark_changeset(id, sha1)
class GitFastImport(object):
def __init__(self, pipe, branch, domain=None, verbose=False,
authors=None, stop_on_unknown_author=False):
self.pipe = pipe
self.branch = branch
self.domain = domain
self.verbose = verbose
self.authors = authors
self.stop_on_unknown_author = stop_on_unknown_author
self.last_changeset = None
self.write('feature notes\n')
def add_changeset(self, changeset):
name = self.author_name(changeset.author)
email = self.author_email(changeset.author)
when = self.raw_date(changeset.timestamp)
if self.verbose:
tstamp = time.strftime('%Y-%m-%d %H:%M:%S',
time.gmtime(changeset.timestamp))
print '[%d] %s %s' % (changeset.id, tstamp, name)
teaser = changeset.log.splitlines()[0]
if len(teaser) > 68:
teaser = teaser[:68] + '...'
print '\t%s' % teaser.encode('ascii', 'replace')
self.write('commit %s\n' % self.branch)
self.write('mark :%s\n' % changeset.id)
self.write('committer %s <%s> %s\n' % (name, email, when))
self.data(changeset.log.encode('utf-8'))
# FIXME: this is a hack; find out if the branch exists
if changeset.id != 1:
if self.last_changeset is None:
self.write('from %s^0\n' % self.branch)
else:
self.write('from :%s\n' % self.last_changeset.id)
note = ''
for c in changeset.changes:
if self.verbose:
print '\t%s %s %s' % (c.filestatus, c.filename, c.revision)
if c.filestatus == FILE_DELETED:
self.write('D %s\n' % c.filename)
else:
perm = changeset.perm(c)
blob = changeset.blob(c)
# Git according to git-fast-import(1) only supports
# these two file modes for plain files.
if (perm & 0111) != 0:
perm = 0755
else:
perm = 0644
self.write('M %o inline %s\n' % (perm, c.filename))
self.data(blob)
note += str(changeset.note(c)) + '\n'
notes_ref = 'refs/notes/cvs'
self.write('commit refs/notes/cvs\n')
self.write('committer %s <%s> %s\n' % ('git-cvs', '', when))
self.data('')
# FIXME: this is a hack; find out if the branch exists
if changeset.id != 1:
if self.last_changeset is None:
self.write('from %s^0\n' % notes_ref)
self.write('N inline :%s\n' % changeset.id)
self.data(note)
self.last_changeset = changeset
def close(self):
try:
self.pipe.stdin.close()
self.pipe.wait()
except:
pass
self.returncode = self.pipe.returncode
def raw_date(self, seconds):
"""Convert 'seconds' from seconds since the epoch to Git's
native date format."""
return '%s +0000' % (seconds)
def author_name(self, author):
# TODO: allow author name mappings to be defined for a
# certain time span, since CVS committer name could be
# reused.
if self.authors:
if self.authors.has_key(author):
return self.authors[author][0]
elif self.stop_on_unknown_author:
raise MissingAuthorFullname(author)
return author
def author_email(self, author):
if (self.authors and
self.authors.has_key(author) and
self.authors[author][1] is not None):
return self.authors[author][1]
elif self.domain:
return '%s@%s' % (author, self.domain)
else:
return author
def data(self, data):
"'data' must be a raw binary string of the str() type."
assert type(data) == types.StringType, \
"data type is %s" % type(data)
self.write('data %d\n' % len(data))
self.write(data)
self.write('\n')
def write(self, data):
self.pipe.stdin.write(data)
|
|
# -*- coding: utf-8 -*-
"""
Enrich BEL graphs
=================
In the current build it is possible to enrich BEL graphs containing metabolites with associated
disease or protein information and to enrich BEL graphs containing disease or protein information with associated metabolites.
This can be done with the functions further explained in `BEL Serialization`_
.. _BEL Serialization: bel_serialization.html
2. Enriching BEL graphs
-----------------------
Using an BEL graph with metabolites (represented using the `HMDB namespace`_) it can be enriched with disease and protein information from HMDB.
.. _HMDB namespace: construct_namspaces.html
2.1 Metabolites-Proteins
~~~~~~~~~~~~~~~~~~~~~~~~
For a graph containing metabolites:
>>> enrich_metabolites_proteins(bel_graph, manager)
The result of this will be a BEL graph which now includes relations between the metabolites and proteins.
For a graph containing proteins (named using uniprot identifiers):
>>> enrich_proteins_metabolites(bel_graph, manager)
This will result in a BEL graph where the proteins are linked to associated metabolites.
2.2 Metabolites-Diseases
~~~~~~~~~~~~~~~~~~~~~~~~
For a graph containing metabolites:
>>> enrich_metabolites_diseases(bel_graph, manager)
The result of this will be a BEL graph which now includes relations between the metabolites and diseases.
For a graph containing diseases (named using HMDB identifiers):
>>> enrich_diseases_metabolites(bel_graph, manager)
This will result in a BEL graph where the diseases are linked to associated metabolites.
"""
import logging
from typing import Optional
from pybel import BELGraph
from pybel.constants import (
ABUNDANCE, ANNOTATIONS, ASSOCIATION, CITATION, CITATION_REFERENCE, CITATION_TYPE, CITATION_TYPE_PUBMED, EVIDENCE,
FUNCTION, NAME, NAMESPACE, PATHOLOGY, PROTEIN, RELATION,
)
from pybel.struct.pipeline.decorators import in_place_transformation
from .manager import Manager
log = logging.getLogger(__name__)
def _check_namespaces(data, bel_function, bel_namespace):
"""Makes code more structured and reusable."""
if data[FUNCTION] != bel_function:
return False
if NAMESPACE not in data:
return False
if data[NAMESPACE] == bel_namespace:
return True
elif data[NAMESPACE] != bel_namespace:
log.warning("Unable to map namespace: %s", data[NAMESPACE])
return False
# enrich proteins and metabolites
@in_place_transformation
def enrich_metabolites_proteins(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes metabolites with proteins, that are associated to the metabolites."""
if manager is None:
manager = Manager()
for node in list(graph):
if _check_namespaces(node, ABUNDANCE, 'HMDB'):
metabolite_protein_interactions = manager.query_metabolite_associated_proteins(node[NAME])
else:
continue
if not metabolite_protein_interactions:
log.warning("Unable to find node: %s", node)
continue
for association in metabolite_protein_interactions:
protein_data = association.protein.serialize_to_bel()
protein_tuple = graph.add_node_from_data(protein_data)
graph.add_edge(protein_tuple, node, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: None,
CITATION_REFERENCE: None,
},
ANNOTATIONS: {
'name': association.protein.name,
'protein_type': association.protein.protein_type
}
})
@in_place_transformation
def enrich_proteins_metabolites(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes uniprot proteins with HMDB metabolites,
that are associated to the proteins.
"""
if manager is None:
manager = Manager()
for node in list(graph):
if _check_namespaces(node, PROTEIN, 'UP'):
protein_metabolite_interactions = manager.query_protein_associated_metabolites(node[NAME])
else:
continue
if protein_metabolite_interactions is None:
log.warning("Unable to find node: %s", node)
continue
for association in protein_metabolite_interactions:
metabolite_data = association.metabolite.serialize_to_bel()
metabolite_tuple = graph.add_node_from_data(metabolite_data)
graph.add_edge(metabolite_tuple, node, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: None,
CITATION_REFERENCE: None,
},
ANNOTATIONS: {
'name': association.protein.name,
'protein_type': association.protein.protein_type
}
})
# enrich diseases and metabolites
@in_place_transformation
def enrich_metabolites_diseases(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes metabolites with diseases, to which the metabolites are associated."""
if manager is None:
manager = Manager()
for data in list(graph):
if _check_namespaces(data, ABUNDANCE, 'HMDB'):
metabolite_disease_interactions = manager.query_metabolite_associated_diseases(data[NAME])
else:
continue
if metabolite_disease_interactions is None:
log.warning("Unable to find node: %s", data)
continue
# add edges and collect all the references for this edge
i = 0
while i < len(metabolite_disease_interactions):
association = metabolite_disease_interactions[i]
references = [] # list for storing the reference articles
old_disease = association.disease
while True: # collect the references for the metabolite disease interaction
try:
if old_disease != metabolite_disease_interactions[i].disease:
break # break if disease has changed
references.append(metabolite_disease_interactions[i].reference.pubmed_id)
i += 1
except IndexError:
break
# add disease node and construct edge
disease_data = association.disease.serialize_to_bel()
disease_tuple = graph.add_node_from_data(disease_data)
graph.add_edge(disease_tuple, data, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: CITATION_TYPE_PUBMED,
CITATION_REFERENCE: references[0],
},
ANNOTATIONS: {
'omim_id': association.disease.omim_id,
'additional_references': references[1::]
}
})
@in_place_transformation
def enrich_diseases_metabolites(graph: BELGraph, manager: Optional[Manager] = None):
"""Enrich a given BEL graph, which includes HMDB diseases with HMDB metabolites, which are associated to the
diseases."""
if manager is None:
manager = Manager()
for data in list(graph):
if _check_namespaces(data, PATHOLOGY, 'HMDB_D'):
disease_metabolite_interactions = manager.query_disease_associated_metabolites(data[NAME])
else:
continue
if not disease_metabolite_interactions:
log.warning("Unable to find node: %s", data)
continue
# add edges and collect all the references for this edge
i = 0
while i < len(disease_metabolite_interactions):
association = disease_metabolite_interactions[i]
references = [] # list for storing the reference articles
old_metabolite = association.metabolite
while True: # collect the references for the metabolite disease interaction
try:
if old_metabolite != disease_metabolite_interactions[i].metabolite:
break # break if disease has changed
references.append(disease_metabolite_interactions[i].reference.pubmed_id)
i += 1
except IndexError:
break
# add disease node and construct edge
metabolite_data = association.metabolite.serialize_to_bel()
metabolite_tuple = graph.add_node_from_data(metabolite_data)
graph.add_edge(metabolite_tuple, data, attr_dict={
RELATION: ASSOCIATION,
EVIDENCE: None,
CITATION: {
CITATION_TYPE: CITATION_TYPE_PUBMED,
CITATION_REFERENCE: references[0],
},
ANNOTATIONS: {
'omim_id': association.disease.omim_id,
'additional_references': references[1::]
}
})
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.hooks import RemovableHandle
import pytorch_lightning as pl
from pytorch_lightning.utilities import AMPType, DeviceType, rank_zero_deprecation
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _TORCH_GREATER_EQUAL_1_8
from pytorch_lightning.utilities.warnings import WarningCache
log = logging.getLogger(__name__)
warning_cache = WarningCache()
PARAMETER_NUM_UNITS = [" ", "K", "M", "B", "T"]
UNKNOWN_SIZE = "?"
class LayerSummary:
"""
Summary class for a single layer in a :class:`~pytorch_lightning.core.lightning.LightningModule`.
It collects the following information:
- Type of the layer (e.g. Linear, BatchNorm1d, ...)
- Input shape
- Output shape
- Number of parameters
The input and output shapes are only known after the example input array was
passed through the model.
Example::
>>> model = torch.nn.Conv2d(3, 8, 3)
>>> summary = LayerSummary(model)
>>> summary.num_parameters
224
>>> summary.layer_type
'Conv2d'
>>> output = model(torch.rand(1, 3, 5, 5))
>>> summary.in_size
[1, 3, 5, 5]
>>> summary.out_size
[1, 8, 3, 3]
Args:
module: A module to summarize
"""
def __init__(self, module: nn.Module):
super().__init__()
self._module = module
self._hook_handle = self._register_hook()
self._in_size = None
self._out_size = None
def __del__(self):
self.detach_hook()
def _register_hook(self) -> Optional[RemovableHandle]:
"""
Registers a hook on the module that computes the input- and output size(s) on the first forward pass.
If the hook is called, it will remove itself from the from the module, meaning that
recursive models will only record their input- and output shapes once.
Registering hooks on :class:`~torch.jit.ScriptModule` is not supported.
Return:
A handle for the installed hook, or ``None`` if registering the hook is not possible.
"""
def hook(module, inp, out):
if len(inp) == 1:
inp = inp[0]
self._in_size = parse_batch_shape(inp)
self._out_size = parse_batch_shape(out)
self._hook_handle.remove()
handle = None
if not isinstance(self._module, torch.jit.ScriptModule):
handle = self._module.register_forward_hook(hook)
return handle
def detach_hook(self):
"""
Removes the forward hook if it was not already removed in the forward pass.
Will be called after the summary is created.
"""
if self._hook_handle is not None:
self._hook_handle.remove()
@property
def in_size(self) -> Union[str, List]:
return self._in_size or UNKNOWN_SIZE
@property
def out_size(self) -> Union[str, List]:
return self._out_size or UNKNOWN_SIZE
@property
def layer_type(self) -> str:
"""Returns the class name of the module."""
return str(self._module.__class__.__name__)
@property
def num_parameters(self) -> int:
"""Returns the number of parameters in this module."""
return sum(np.prod(p.shape) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters())
class ModelSummary:
"""
Generates a summary of all layers in a :class:`~pytorch_lightning.core.lightning.LightningModule`.
Args:
model: The model to summarize (also referred to as the root module).
mode: Can be one of
- `top` (default): only the top-level modules will be recorded (the children of the root module)
- `full`: summarizes all layers and their submodules in the root module
.. deprecated:: v1.4
This parameter was deprecated in v1.4 in favor of `max_depth` and will be removed in v1.6.
max_depth: Maximum depth of modules to show. Use -1 to show all modules or 0 to show no
summary. Defaults to 1.
The string representation of this summary prints a table with columns containing
the name, type and number of parameters for each layer.
The root module may also have an attribute ``example_input_array`` as shown in the example below.
If present, the root module will be called with it as input to determine the
intermediate input- and output shapes of all layers. Supported are tensors and
nested lists and tuples of tensors. All other types of inputs will be skipped and show as `?`
in the summary table. The summary will also display `?` for layers not used in the forward pass.
Example::
>>> import pytorch_lightning as pl
>>> class LitModel(pl.LightningModule):
...
... def __init__(self):
... super().__init__()
... self.net = nn.Sequential(nn.Linear(256, 512), nn.BatchNorm1d(512))
... self.example_input_array = torch.zeros(10, 256) # optional
...
... def forward(self, x):
... return self.net(x)
...
>>> model = LitModel()
>>> ModelSummary(model, max_depth=1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | In sizes | Out sizes
------------------------------------------------------------
0 | net | Sequential | 132 K | [10, 256] | [10, 512]
------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
>>> ModelSummary(model, max_depth=-1) # doctest: +NORMALIZE_WHITESPACE
| Name | Type | Params | In sizes | Out sizes
--------------------------------------------------------------
0 | net | Sequential | 132 K | [10, 256] | [10, 512]
1 | net.0 | Linear | 131 K | [10, 256] | [10, 512]
2 | net.1 | BatchNorm1d | 1.0 K | [10, 512] | [10, 512]
--------------------------------------------------------------
132 K Trainable params
0 Non-trainable params
132 K Total params
0.530 Total estimated model params size (MB)
"""
MODES = dict(top=1, full=-1) # TODO: remove in v1.6
def __init__(self, model, mode: Optional[str] = None, max_depth: Optional[int] = 1):
self._model = model
# temporary mapping from mode to max_depth
if max_depth is None or mode is not None:
if mode in ModelSummary.MODES:
max_depth = ModelSummary.MODES[mode]
rank_zero_deprecation(
"Argument `mode` in `ModelSummary` is deprecated in v1.4"
f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behaviour."
)
else:
raise MisconfigurationException(f"`mode` can be {', '.join(ModelSummary.MODES)}, got {mode}.")
if not isinstance(max_depth, int) or max_depth < -1:
raise ValueError(f"`max_depth` can be -1, 0 or > 0, got {max_depth}.")
self._max_depth = max_depth
self._layer_summary = self.summarize()
# 1 byte -> 8 bits
# TODO: how do we compute precisin_megabytes in case of mixed precision?
precision = self._model.precision if isinstance(self._model.precision, int) else 32
self._precision_megabytes = (precision / 8.0) * 1e-6
@property
def named_modules(self) -> List[Tuple[str, nn.Module]]:
if self._max_depth == 0:
mods = []
elif self._max_depth == 1:
# the children are the top-level modules
mods = self._model.named_children()
else:
mods = self._model.named_modules()
mods = list(mods)[1:] # do not include root module (LightningModule)
return list(mods)
@property
def layer_names(self) -> List[str]:
return list(self._layer_summary.keys())
@property
def layer_types(self) -> List[str]:
return [layer.layer_type for layer in self._layer_summary.values()]
@property
def in_sizes(self) -> List:
return [layer.in_size for layer in self._layer_summary.values()]
@property
def out_sizes(self) -> List:
return [layer.out_size for layer in self._layer_summary.values()]
@property
def param_nums(self) -> List[int]:
return [layer.num_parameters for layer in self._layer_summary.values()]
@property
def total_parameters(self) -> int:
return sum(p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters())
@property
def trainable_parameters(self) -> int:
return sum(
p.numel() if not _is_lazy_weight_tensor(p) else 0 for p in self._model.parameters() if p.requires_grad
)
@property
def model_size(self) -> float:
# todo: seems it does not work with quantized models - it returns 0.0
return self.total_parameters * self._precision_megabytes
def summarize(self) -> Dict[str, LayerSummary]:
summary = OrderedDict((name, LayerSummary(module)) for name, module in self.named_modules)
if self._model.example_input_array is not None:
self._forward_example_input()
for layer in summary.values():
layer.detach_hook()
if self._max_depth >= 1:
# remove summary entries with depth > max_depth
for k in [k for k in summary if k.count(".") >= self._max_depth]:
del summary[k]
return summary
def _forward_example_input(self) -> None:
"""Run the example input through each layer to get input- and output sizes."""
model = self._model
trainer = self._model.trainer
input_ = model.example_input_array
input_ = model._apply_batch_transfer_handler(input_)
if trainer is not None and trainer.amp_backend == AMPType.NATIVE and trainer._device_type != DeviceType.TPU:
model.forward = torch.cuda.amp.autocast()(model.forward)
mode = model.training
model.eval()
with torch.no_grad():
# let the model hooks collect the input- and output shapes
if isinstance(input_, (list, tuple)):
model(*input_)
elif isinstance(input_, dict):
model(**input_)
else:
model(input_)
model.train(mode) # restore mode of module
def __str__(self):
"""
Makes a summary listing with:
Layer Name, Layer Type, Number of Parameters, Input Sizes, Output Sizes, Model Size
"""
arrays = [
[" ", list(map(str, range(len(self._layer_summary))))],
["Name", self.layer_names],
["Type", self.layer_types],
["Params", list(map(get_human_readable_count, self.param_nums))],
]
if self._model.example_input_array is not None:
arrays.append(["In sizes", self.in_sizes])
arrays.append(["Out sizes", self.out_sizes])
total_parameters = self.total_parameters
trainable_parameters = self.trainable_parameters
model_size = self.model_size
return _format_summary_table(total_parameters, trainable_parameters, model_size, *arrays)
def __repr__(self):
return str(self)
def parse_batch_shape(batch: Any) -> Union[str, List]:
if hasattr(batch, "shape"):
return list(batch.shape)
if isinstance(batch, (list, tuple)):
shape = [parse_batch_shape(el) for el in batch]
return shape
return UNKNOWN_SIZE
def _format_summary_table(total_parameters: int, trainable_parameters: int, model_size: float, *cols) -> str:
"""
Takes in a number of arrays, each specifying a column in
the summary table, and combines them all into one big
string defining the summary table that are nicely formatted.
"""
n_rows = len(cols[0][1])
n_cols = 1 + len(cols)
# Get formatting width of each column
col_widths = []
for c in cols:
col_width = max(len(str(a)) for a in c[1]) if n_rows else 0
col_width = max(col_width, len(c[0])) # minimum length is header length
col_widths.append(col_width)
# Formatting
s = "{:<{}}"
total_width = sum(col_widths) + 3 * n_cols
header = [s.format(c[0], l) for c, l in zip(cols, col_widths)]
# Summary = header + divider + Rest of table
summary = " | ".join(header) + "\n" + "-" * total_width
for i in range(n_rows):
line = []
for c, l in zip(cols, col_widths):
line.append(s.format(str(c[1][i]), l))
summary += "\n" + " | ".join(line)
summary += "\n" + "-" * total_width
summary += "\n" + s.format(get_human_readable_count(trainable_parameters), 10)
summary += "Trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters - trainable_parameters), 10)
summary += "Non-trainable params"
summary += "\n" + s.format(get_human_readable_count(total_parameters), 10)
summary += "Total params"
summary += "\n" + s.format(get_formatted_model_size(model_size), 10)
summary += "Total estimated model params size (MB)"
return summary
def get_formatted_model_size(total_model_size: float) -> float:
return f"{total_model_size:,.3f}"
def get_human_readable_count(number: int) -> str:
"""
Abbreviates an integer number with K, M, B, T for thousands, millions,
billions and trillions, respectively.
Examples:
>>> get_human_readable_count(123)
'123 '
>>> get_human_readable_count(1234) # (one thousand)
'1.2 K'
>>> get_human_readable_count(2e6) # (two million)
'2.0 M'
>>> get_human_readable_count(3e9) # (three billion)
'3.0 B'
>>> get_human_readable_count(4e14) # (four hundred trillion)
'400 T'
>>> get_human_readable_count(5e15) # (more than trillion)
'5,000 T'
Args:
number: a positive integer number
Return:
A string formatted according to the pattern described above.
"""
assert number >= 0
labels = PARAMETER_NUM_UNITS
num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)
num_groups = int(np.ceil(num_digits / 3))
num_groups = min(num_groups, len(labels)) # don't abbreviate beyond trillions
shift = -3 * (num_groups - 1)
number = number * (10 ** shift)
index = num_groups - 1
if index < 1 or number >= 100:
return f"{int(number):,d} {labels[index]}"
return f"{number:,.1f} {labels[index]}"
def _is_lazy_weight_tensor(p: Tensor) -> bool:
if _TORCH_GREATER_EQUAL_1_8:
from torch.nn.parameter import UninitializedParameter
if isinstance(p, UninitializedParameter):
warning_cache.warn(
"A layer with UninitializedParameter was found. "
"Thus, the total number of parameters detected may be inaccurate."
)
return True
return False
def summarize(
lightning_module: "pl.LightningModule", mode: Optional[str] = "top", max_depth: Optional[int] = None
) -> Optional[ModelSummary]:
"""
Summarize the LightningModule specified by `lightning_module`.
Args:
lightning_module: `LightningModule` to summarize.
mode: Can be either ``'top'`` (summarize only direct submodules) or ``'full'`` (summarize all layers).
.. deprecated:: v1.4
This parameter was deprecated in v1.4 in favor of `max_depth` and will be removed in v1.6.
max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the
layer summary off. Default: 1.
Return:
The model summary object
"""
# temporary mapping from mode to max_depth
if max_depth is None:
if mode in ModelSummary.MODES:
max_depth = ModelSummary.MODES[mode]
rank_zero_deprecation(
"Argument `mode` in `LightningModule.summarize` is deprecated in v1.4"
f" and will be removed in v1.6. Use `max_depth={max_depth}` to replicate `mode={mode}` behavior."
)
model_summary = ModelSummary(lightning_module, max_depth=max_depth)
elif mode is not None:
raise MisconfigurationException(f"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}")
else:
model_summary = ModelSummary(lightning_module, max_depth=max_depth)
log.info("\n" + str(model_summary))
return model_summary
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.