gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
vispy headless backend for egl.
"""
from __future__ import division
import atexit
from time import sleep
from ..base import (BaseApplicationBackend, BaseCanvasBackend,
BaseTimerBackend)
from ...util.ptime import time
# -------------------------------------------------------------------- init ---
try:
# Inspired by http://www.mesa3d.org/egl.html
# This is likely necessary on Linux since proprietary drivers
# (e.g., NVIDIA) are unlikely to provide EGL support for now.
# XXX TODO: Add use_gl('es2') and somehow incorporate here.
# Also would be good to have us_gl('es3'), since libGLESv2.so on linux
# seems to support both.
from os import environ
environ['EGL_SOFTWARE'] = 'true'
from ...ext import egl
_EGL_DISPLAY = egl.eglGetDisplay()
egl.eglInitialize(_EGL_DISPLAY)
version = [egl.eglQueryString(_EGL_DISPLAY, x) for x in
[egl.EGL_VERSION, egl.EGL_VENDOR, egl.EGL_CLIENT_APIS]]
version = [v.decode('utf-8') for v in version]
version = version[0] + ' ' + version[1] + ': ' + version[2].strip()
atexit.register(egl.eglTerminate, _EGL_DISPLAY)
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
else:
# XXX restore "testable" and "available" once it works properly, and
# remove from ignore list in .coveragerc
available, testable, why_not = False, False, 'Not ready for testing'
which = 'EGL ' + str(version)
_VP_EGL_ALL_WINDOWS = []
def _get_egl_windows():
wins = list()
for win in _VP_EGL_ALL_WINDOWS:
if isinstance(win, CanvasBackend):
wins.append(win)
return wins
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=False,
resizable=True,
decorate=False,
fullscreen=False,
context=False,
multi_window=True,
scroll=False,
parent=False,
always_on_top=False,
)
# ------------------------------------------------------------- application ---
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._timers = list()
def _add_timer(self, timer):
if timer not in self._timers:
self._timers.append(timer)
def _vispy_get_backend_name(self):
return 'egl'
def _vispy_process_events(self):
for timer in self._timers:
timer._tick()
wins = _get_egl_windows()
for win in wins:
if win._needs_draw:
win._needs_draw = False
win._on_draw()
def _vispy_run(self):
wins = _get_egl_windows()
while all(w._surface is not None for w in wins):
self._vispy_process_events()
self._vispy_quit() # to clean up
def _vispy_quit(self):
# Close windows
wins = _get_egl_windows()
for win in wins:
win._vispy_close()
# tear down timers
for timer in self._timers:
timer._vispy_stop()
self._timers = []
def _vispy_get_native_app(self):
return egl
# ------------------------------------------------------------------ canvas ---
class CanvasBackend(BaseCanvasBackend):
""" EGL backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
self._initialized = False
# Deal with context
p.context.shared.add_ref('egl', self)
if p.context.shared.ref is self:
# Store context information
self._native_config = egl.eglChooseConfig(_EGL_DISPLAY)[0]
self._native_context = egl.eglCreateContext(_EGL_DISPLAY,
self._native_config,
None)
else:
# Reuse information from other context
self._native_config = p.context.shared.ref._native_config
self._native_context = p.context.shared.ref._native_context
self._surface = None
self._vispy_set_size(*p.size)
_VP_EGL_ALL_WINDOWS.append(self)
# Init
self._initialized = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
def _destroy_surface(self):
if self._surface is not None:
egl.eglDestroySurface(_EGL_DISPLAY, self._surface)
self._surface = None
def _vispy_set_size(self, w, h):
if self._surface is not None:
self._destroy_surface()
attrib_list = (egl.EGL_WIDTH, w, egl.EGL_HEIGHT, h)
self._surface = egl.eglCreatePbufferSurface(_EGL_DISPLAY,
self._native_config,
attrib_list)
if self._surface == egl.EGL_NO_SURFACE:
raise RuntimeError('Could not create rendering surface')
self._size = (w, h)
self._vispy_update()
def _vispy_warmup(self):
etime = time() + 0.25
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_set_current(self):
if self._surface is None:
return
# Make this the current context
self._vispy_canvas.set_current() # Mark canvs as current
egl.eglMakeCurrent(_EGL_DISPLAY, self._surface, self._surface,
self._native_context)
def _vispy_swap_buffers(self):
if self._surface is None:
return
# Swap front and back buffer
egl.eglSwapBuffers(_EGL_DISPLAY, self._surface)
def _vispy_set_title(self, title):
pass
def _vispy_set_position(self, x, y):
pass
def _vispy_set_visible(self, visible):
pass
def _vispy_update(self):
# Mark that this window wants to be drawn on the next loop iter
self._needs_draw = True
def _vispy_close(self):
self._destroy_surface()
def _vispy_get_size(self):
if self._surface is None:
return
return self._size
def _vispy_get_position(self):
return 0, 0
def _on_draw(self, _id=None):
# This is called by the processing app
if self._vispy_canvas is None or self._surface is None:
return
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None) # (0, 0, w, h))
# ------------------------------------------------------------------- timer ---
class TimerBackend(BaseTimerBackend):
def __init__(self, vispy_timer):
BaseTimerBackend.__init__(self, vispy_timer)
vispy_timer._app._backend._add_timer(self)
self._vispy_stop()
def _vispy_start(self, interval):
self._interval = interval
self._next_time = time() + self._interval
def _vispy_stop(self):
self._next_time = float('inf')
def _tick(self):
if time() >= self._next_time:
self._vispy_timer._timeout()
self._next_time = time() + self._interval
|
|
# Some useful stuff to have in your views.py
import json
import CourseBuilder.settings #replace project with the main project folder with settings
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotFound, HttpResponseBadRequest
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404, render
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
# Models Imported
from models import Teacher, Course, Lesson, Slide
from forms import CourseForm, LessonForm, SlideForm
from django.forms.formsets import formset_factory
def course_admin(request):
print "course admin called"
# instance = get_object_or_404(Coworkers, id=id)
max_num = Course.objects.count()
course_set = formset_factory(CourseForm, extra=max_num, max_num=max_num)
courses = Course.objects.order_by('position').all()
keys = [(c.pk, c.teacher_id) for c in courses]
forms = course_set(initial=courses.values())
forms = zip(forms, keys)
return render(request, 'admin/course.html', {
'forms': forms,
'emptyForm': CourseForm(),
'courses' : Course.objects.order_by('position').all() })
@staff_member_required
@require_POST
def course_admin_update(request, course_id, prefix):
if request.is_ajax():
form = CourseForm(request.POST) if prefix == 'None' else CourseForm(request.POST, prefix=prefix)
if form.is_valid():
try:
course = Course.objects.get(pk=course_id)
course.teacher = form.cleaned_data['teacher']
course.name = form.cleaned_data['name']
course.save()
return HttpResponse('OK')
except ObjectDoesNotExist:
# create new object
position = None
if Course.objects.count() > 0:
course = Course.objects.order_by('-position').all()[0]
position = course.position
else:
position = 1
newcourse = Course()
newcourse.teacher = form.cleaned_data['teacher']
newcourse.name = form.cleaned_data['name']
newcourse.position = position
newcourse.save()
response = {'created_object_id': newcourse.pk}
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
errors_dict = {}
if form.errors:
for error in form.errors:
e = form.errors[error]
field = prefix+"-"+error;
errors_dict[field] = unicode(e)
print errors_dict
return HttpResponseBadRequest(json.dumps(errors_dict))
else:
return HttpResponseNotFound('You do not have permission to access this page!')
@staff_member_required
@require_POST
def course_admin_delete(request, course_id):
if request.is_ajax():
course = Course.objects.get(pk=course_id);
course.delete();
return HttpResponse('OK')
else:
return HttpResponseNotFound('You do not have permission to access this page!')
@staff_member_required
@require_POST
def course_admin_reorder(request):
if request.is_ajax():
courselist = request.POST.getlist('subjectlist[]');
print courselist
for order, course_id in enumerate(courselist):
course = Course.objects.get(pk=course_id);
course.position = order + 1;
course.save()
return HttpResponse('OK')
else:
return HttpResponseNotFound('You do not have permission to access this page!')
def lesson_admin(request, course_id):
print "Lesson admin called"
try:
course = Course.objects.get(pk=course_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('No Course Exists!')
lessons = Lesson.objects.filter(course_id=course_id).order_by('position')
max_num = Lesson.objects.count()
lesson_set = formset_factory(LessonForm, extra=max_num, max_num=max_num)
keys = [c.pk for c in lessons]
forms = lesson_set(initial=lessons.values())
forms = zip(forms, keys)
return render(request, 'admin/lesson.html', {
'page_course_id': course.pk,
'page_course_name': course.name,
'forms': forms,
'emptyForm': LessonForm(),
'lessons' : lessons})
@staff_member_required
@require_POST
def lesson_admin_update(request, lesson_id, prefix):
if request.is_ajax():
form = LessonForm(request.POST) if prefix == 'None' else LessonForm(request.POST, prefix=prefix)
if form.is_valid():
try:
lesson = Lesson.objects.get(pk=lesson_id)
lesson.course = form.cleaned_data['course']
lesson.description = form.cleaned_data['description']
lesson.name = form.cleaned_data['name']
lesson.save()
return HttpResponse('OK')
except ObjectDoesNotExist:
# create new object
position = None
if Lesson.objects.count() > 0:
lesson = Lesson.objects.order_by('-position').all()[0]
position = lesson.position
else:
position = 1
newlesson = Lesson()
newlesson.course = form.cleaned_data['course']
newlesson.name = form.cleaned_data['name']
newlesson.description = form.cleaned_data['description']
newlesson.position = position
newlesson.save()
response = {'created_object_id': newlesson.pk}
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
errors_dict = {}
if form.errors:
for error in form.errors:
e = form.errors[error]
field = prefix+"-"+error;
errors_dict[field] = unicode(e)
print errors_dict
return HttpResponseBadRequest(json.dumps(errors_dict))
else:
return HttpResponseNotFound('You do not have permission to access this page!')
@staff_member_required
@require_POST
def lesson_admin_delete(request, lesson_id):
if request.is_ajax():
lesson = Lesson.objects.get(pk=lesson_id);
lesson.delete();
return HttpResponse('OK')
else:
return HttpResponseNotFound('You do not have permission to access this page!')
@staff_member_required
@require_POST
def lesson_admin_reorder(request):
if request.is_ajax():
lessonlist = request.POST.getlist('subjectlist[]');
print lessonlist
for order, lesson_id in enumerate(lessonlist):
lesson = Lesson.objects.get(pk=lesson_id);
lesson.position = order + 1;
lesson.save()
return HttpResponse('OK')
else:
return HttpResponseNotFound('You do not have permission to access this page!')
def slide_admin(request, course_id, lesson_id):
#Allow users to edit slides and ensure template has a slide preview - that would be cool.
print "slide admin called"
try:
course = Course.objects.get(pk=course_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('No Course Exists!')
try:
lesson = Lesson.objects.get(pk=lesson_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('No Slides Exist Yet For This Course')
slides = Slide.objects.filter(lesson_id=lesson_id).order_by('position')
max_num = Slide.objects.count()
slides_set = formset_factory(SlideForm, extra=max_num, max_num=max_num)
keys = [c.pk for c in slides]
forms = slides_set(initial=slides.values())
forms = zip(forms, keys)
return render(request, 'admin/slide.html', {
'page_course_id': course.pk,
'page_course_name': course.name,
'page_lesson_id': lesson.pk,
'page_lesson_name': lesson.name,
'forms': forms,
'slides' : slides,
'emptyForm': SlideForm()
})
@staff_member_required
@require_POST
def slide_admin_update(request, slide_id, prefix):
if request.is_ajax():
form = SlideForm(request.POST) if prefix == 'None' else SlideForm(request.POST, prefix=prefix)
if form.is_valid():
try:
slide = Slide.objects.get(pk=slide_id)
slide.lesson = form.cleaned_data['lesson']
slide.content = form.cleaned_data['content']
slide.googleStyles = form.cleaned_data['googleStyles']
slide.name = form.cleaned_data['name']
slide.save()
return HttpResponse('OK')
except ObjectDoesNotExist:
# create new object
position = None
if Slide.objects.count() > 0:
slide = Slide.objects.order_by('-position').all()[0]
position = slide.position
else:
position = 1
newslide = Slide()
newslide.lesson = form.cleaned_data['lesson']
newslide.name = form.cleaned_data['name']
newslide.content = form.cleaned_data['content']
newslide.googleStyles = form.cleaned_data['googleStyles']
newslide.position = position
newslide.save()
response = {'created_object_id': newslide.pk}
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
errors_dict = {}
if form.errors:
for error in form.errors:
e = form.errors[error]
field = prefix+"-"+error;
errors_dict[field] = unicode(e)
print errors_dict
return HttpResponseBadRequest(json.dumps(errors_dict))
else:
return HttpResponseNotFound('You do not have permission to access this page!')
@staff_member_required
@require_POST
def slide_admin_delete(request, slide_id):
if request.is_ajax():
slide = Slide.objects.get(pk=slide_id);
slide.delete();
return HttpResponse('OK')
else:
return HttpResponseNotFound('You do not have permission to access this page!')
@staff_member_required
@require_POST
def slide_admin_reorder(request):
if request.is_ajax():
slidelist = request.POST.getlist('subjectlist[]');
print slidelist
for order, slide_id in enumerate(slidelist):
slide = Slide.objects.get(pk=slide_id);
slide.position = order + 1;
slide.save()
return HttpResponse('OK')
else:
return HttpResponseNotFound('You do not have permission to access this page!')
#APP USER views
def course_view(request):
print "course view called"
try:
courses = Course.objects.all()
except ObjectDoesNotExist:
return HttpResponseNotFound('No courses exist!')
return render(request, 'view/course.html', { 'courses': courses })
def about_view(request):
print "about view called"
return render(request, 'view/about.html')
def tutorial_view(request):
print "tutorial view called"
return render(request, 'view/tutorial.html')
def lesson_view(request, course_id):
print "lesson view called"
try:
course = Course.objects.get(pk=course_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('No Course Exists!')
try:
lessons = Lesson.objects.filter(course_id=course_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('No lessons exist yet for this course')
return render(request, 'view/lesson.html', {'course': course, 'lessons' : lessons.all()})
def slideshow_view(request, course_id, lesson_id):
#Generate Google SlideShow view for user
print "slideshow view called "
try:
course = Course.objects.get(pk=course_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('This course does not exist')
try:
lesson = Lesson.objects.get(pk=lesson_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('This lesson does not exist')
try:
slides = Slide.objects.filter(lesson_id=lesson_id)
except ObjectDoesNotExist:
return HttpResponseNotFound('No slideshow for this lesson!')
print slides
return render(request, 'view/slideshow.html', {'course': course, 'lesson': lesson, 'slides' : slides})
|
|
# Control the rotator and download data from a VNA
# Andrew Temme
# [email protected]
# updated 2015-11-4
# currently written for HP 8753
from __future__ import division
import visa
import numpy as np
from datetime import datetime
from os import path, makedirs
import serial
from time import sleep
from math import trunc
# import skrf
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
def openRotatorPort(portNum=0, timeout=5):
"""
Open a serial port for the rotator
Open commport ``portNum`` with a timeout of ``timeout``.
Parameters
----------
portNum : integer, default: 0
commport for the serial connection to the rotator
timeout : number, default: 5 sec
timeout for the commport
Returns
-------
port : serial port object thingy
object that you use to communicate now with the serial port.
"""
ser = serial.Serial(0,timeout=5)
return ser
def advanceRotator(deg):
"""
Advance the rotator ``deg`` degrees
Parameters
----------
deg : number
how many degrees to advance the rotator
Returns
-------
"""
stepsPerRotation = 508000
gearRatio = 6
stepsPerDeg = stepsPerRotation * gearRatio
steps = stepsPerDeg * deg
raise NotImplementedError("Write this function!")
def startRotatorTurning(rpm):
"""
Start the rotator turning at a set RPM
Parameters
----------
rpm : number
RPM for the rotator
Return
------
"""
# gear ratio for rotator in the antenna chamber and arch range
gearRatio = 6
# gear ratio for APS Design Competition turntable
# gearRatio=2.446
delay = 0.01
print "start"
speed=1/(rpm/60*400)/2.04e-6/gearRatio
print speed
speed = trunc(round(speed))
print speed
print str(speed)
ser.write('BD0\r')
sleep(delay)
ser.write('SH\r')
sleep(delay)
ser.write('SO\r')
sleep(delay)
ser.write('SA10\r')
sleep(delay)
ser.write('SM2000\r')
sleep(delay)
ser.write('SD'+str(speed)+'\r')
sleep(delay)
ser.write('H+\r')
def stopRotator():
"""
Stop the rotator
"""
ser.write('H0\r')
ser.close()
print "Port closed"
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
print "start"
# open the rotator
openRotatorPort(0, 5)
# start the rotator
# startRotatorTurning(16)
#------------------------------------------------------------------------------
dataPrefix = 'carpet-foam'
#------------------------------------------------------------------------------
# number of averages
numGrps = 4
# GPIB address for the VNA set below
ena = visa.instrument('GPIB::16', timeout = 120)
idn = ena.ask('*IDN?')
print idn
optLine = "# Hz S RI R 50"
cmd8753D = {\
'basicInit':'HOLD;DUACOFF;CHAN1;S11;LOGM;CONT;AUTO',\
'corrQ':'CORR?',\
'freqSpanQ':'SPAN?',\
'freqStartQ':'STAR?',\
'freqStopQ':'STOP?',\
'getImag':'IMAG;OUTPFORM',\
'getLinMag':'LINM;OUTPFORM',\
'getLogMag':'LOGM;OUTPFORM',\
'getPhase':'PHAS;OUTPFORM',\
'getReal':'REAL;OUTPFORM',\
'hold':'HOLD',\
'IDStr':'HEWLETT PACKARD,8753D,0,6.14',\
'ifbwQ':'IFBW?',\
'numPtsQ':'POIN?',\
'powerQ':'POWE?',\
'preset':'PRES',\
'numGroups':'NUMG',\
'polar':'POLA',\
's11':'S11',\
's21':'S21',\
's12':'S12',\
's22':'S22'\
}
cmdDict = cmd8753D
#------------------------------------------------------------------------------
ena.write('form4')
# number of points
ena.write('POIN1601')
numPts = ena.ask_for_values(cmdDict['numPtsQ'])[0]
freqStart = ena.ask_for_values(cmdDict['freqStartQ'])[0]
freqStop = ena.ask_for_values(cmdDict['freqStopQ'])[0]
freq = np.linspace(freqStart,freqStop,num=numPts,endpoint=True)
ifbw = ena.ask_for_values(cmdDict['ifbwQ'])[0]
pwr = ena.ask_for_values(cmdDict['powerQ'])[0]
corr = ena.ask(cmdDict['corrQ'])
dateString = datetime.now().strftime("%Y-%m-%d")
timeString = datetime.now().strftime("%H:%M:%S")
dataDir = 'Data/' + dateString
if not path.exists(dataDir):
makedirs(dataDir)
i = 0
#saveMeas = True
#for i in range(numMeasurements):
try:
ena.write('pola;numg' + str(numGrps))
# while saveMeas:
while True:
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# move the rotator
# will need to decide about how to handle the first case. Do you advance
# the rotator and then measure or do you measure and then advance? The
# call to ``advanceRotator`` can be made at the end of the loop.
advanceRotator(15)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
print('Starting Measurement Number: %d' % i)
# print "here"
s11polar = np.array(ena.ask_for_values(cmdDict['s11']+cmdDict['polar']+';'+cmdDict['numGroups'] + str(numGrps)+';outpform'))
# print "there"
s11polReal = s11polar[::2] # real values from the polar data
s11polImag = s11polar[1::2] # imaginary values from the polar data
print "s21"
s21polar = np.array(ena.ask_for_values(cmdDict['s21']+cmdDict['polar']+';'+cmdDict['numGroups'] + str(numGrps)+';outpform'))
s21polReal = s21polar[::2] # real values from the polar data
s21polImag = s21polar[1::2] # imaginary values from the polar data
print "s12"
s12polar = np.array(ena.ask_for_values(cmdDict['s12']+cmdDict['polar']+';'+cmdDict['numGroups'] + str(numGrps)+';outpform'))
s12polReal = s12polar[::2] # real values from the polar data
s12polImag = s12polar[1::2] # imaginary values from the polar data
print "s22"
s22polar = np.array(ena.ask_for_values(cmdDict['s22']+cmdDict['polar']+';'+cmdDict['numGroups'] + str(numGrps)+';outpform'))
s22polReal = s22polar[::2] # real values from the polar data
s22polImag = s22polar[1::2] # imaginary values from the polar data
saveData = np.concatenate(([freq],
[s11polReal],[s11polImag],
[s21polReal],[s21polImag],
[s12polReal],[s12polImag],
[s22polReal],[s22polImag])).T
timeAppend = datetime.now().strftime("-%Y-%m-%d-%H%M%S")
dataName = dataDir + '/' + dataPrefix + timeAppend
touchFileName = dataName + ".s2p"
print touchFileName
saveFile = open(touchFileName, "w")
saveFile.write("!"+idn+"\n")
saveFile.write("!Date: " + dateString + " " + timeString + "\n")
saveFile.write("!Data & Calibration Information:\n")
if corr == '0':
saveFile.write("!Freq S11 S21 S12 S22\n")
elif corr== '1':
saveFile.write("!Freq S11:Cal(ON) S21:Cal(ON) S12:Cal(ON) S22:Cal(ON)\n")
saveFile.write("!PortZ Port1:50+j0 Port2:50+j0\n")
saveFile.write(("!Above PortZ is port z conversion or system Z0 "
"setting when saving the data.\n"))
saveFile.write(("!When reading, reference impedance value at option "
"line is always used.\n"))
saveFile.write("!\n")
saveFile.write("!--Config file parameters\n")
saveFile.write("!start = " + str(freqStart) + "\n")
saveFile.write("!stop = " + str(freqStop) + "\n")
saveFile.write("!numPts = " + str(numPts) + "\n")
saveFile.write("!avgFact = " + str(numGrps) + "\n")
saveFile.write("!power = " + str(pwr) + "\n")
saveFile.write("!ifbw = " + str(ifbw) + "\n")
saveFile.write("!\n")
saveFile.write(optLine + "\n")
np.savetxt(saveFile,saveData,delimiter=" ")
saveFile.close()
i += 1
# balun = skrf.Network(touchFileName)
# balun.plot_s_db()
# legend(loc=0)
except KeyboardInterrupt:
print('Ctrl-C Interrupt')
finally:
print('Finally')
visa.vpp43.gpib_control_ren(ena.vi,2)
stopRotator()
print("Done")
|
|
"""
Ansible action plugin to ensure inventory variables are set
appropriately and no conflicting options have been provided.
"""
import fnmatch
import json
import re
from ansible.plugins.action import ActionBase
from ansible import errors
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six.moves.urllib.parse import urlparse
# Valid values for openshift_deployment_type
VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
# Tuple of variable names and default values if undefined.
NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
('openshift_use_flannel', False),
('openshift_use_nuage', False),
('openshift_use_contiv', False),
('openshift_use_calico', False),
('openshift_use_kuryr', False),
('openshift_use_nsx', False))
ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+.*)',
'error_msg': ORIGIN_TAG_REGEX_ERROR}
ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
'openshift-enterprise': ENTERPRISE_TAG_REGEX}
PKG_VERSION_REGEX_ERROR = """openshift_pkg_version must be in the format
-[optional.release]. Examples: -3.6.0, -3.7.0-0.126.0.git.0.9351aae.el7 -3.11*
You specified openshift_pkg_version={}"""
PKG_VERSION_REGEX = {'re': '(^-.*)',
'error_msg': PKG_VERSION_REGEX_ERROR}
RELEASE_REGEX_ERROR = """openshift_release must be in the format
v#[.#[.#]]. Examples: v3.9, v3.10.0
You specified openshift_release={}"""
RELEASE_REGEX = {'re': '(^v?\\d+(\\.\\d+(\\.\\d+)?)?$)',
'error_msg': RELEASE_REGEX_ERROR}
STORAGE_KIND_TUPLE = (
'openshift_loggingops_storage_kind',
'openshift_logging_storage_kind',
'openshift_metrics_storage_kind',
'openshift_prometheus_alertbuffer_storage_kind',
'openshift_prometheus_alertmanager_storage_kind',
'openshift_prometheus_storage_kind')
IMAGE_POLICY_CONFIG_VAR = "openshift_master_image_policy_config"
ALLOWED_REGISTRIES_VAR = "openshift_master_image_policy_allowed_registries_for_import"
REMOVED_VARIABLES = (
('openshift_hostname', 'Removed: See documentation'),
# TODO(michaelgugino): Remove in 3.12
('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
('oreg_url_master', 'oreg_url'),
('oreg_url_node', 'oreg_url'),
('openshift_cockpit_deployer_prefix', 'openshift_cockpit_deployer_image'),
('openshift_cockpit_deployer_basename', 'openshift_cockpit_deployer_image'),
('openshift_cockpit_deployer_version', 'openshift_cockpit_deployer_image'),
('openshift_hosted_logging_elasticsearch_pvc_prefix', 'openshift_logging_es_pvc_prefix'),
('logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
('openshift_hosted_logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
('openshift_hosted_logging_elasticsearch_cluster_size', 'logging_elasticsearch_cluster_size'),
('openshift_hosted_logging_elasticsearch_ops_cluster_size', 'logging_elasticsearch_ops_cluster_size'),
('openshift_hosted_logging_storage_kind', 'openshift_logging_storage_kind'),
('openshift_hosted_logging_storage_host', 'openshift_logging_storage_host'),
('openshift_hosted_logging_storage_labels', 'openshift_logging_storage_labels'),
('openshift_hosted_logging_storage_volume_size', 'openshift_logging_storage_volume_size'),
('openshift_hosted_loggingops_storage_kind', 'openshift_loggingops_storage_kind'),
('openshift_hosted_loggingops_storage_host', 'openshift_loggingops_storage_host'),
('openshift_hosted_loggingops_storage_labels', 'openshift_loggingops_storage_labels'),
('openshift_hosted_loggingops_storage_volume_size', 'openshift_loggingops_storage_volume_size'),
('openshift_hosted_logging_enable_ops_cluster', 'openshift_logging_use_ops'),
('openshift_hosted_logging_image_pull_secret', 'openshift_logging_image_pull_secret'),
('openshift_hosted_logging_hostname', 'openshift_logging_kibana_hostname'),
('openshift_hosted_logging_kibana_nodeselector', 'openshift_logging_kibana_nodeselector'),
('openshift_hosted_logging_kibana_ops_nodeselector', 'openshift_logging_kibana_ops_nodeselector'),
('openshift_hosted_logging_journal_source', 'openshift_logging_fluentd_journal_source'),
('openshift_hosted_logging_journal_read_from_head', 'openshift_logging_fluentd_journal_read_from_head'),
('openshift_hosted_logging_fluentd_nodeselector_label', 'openshift_logging_fluentd_nodeselector'),
('openshift_hosted_logging_elasticsearch_instance_ram', 'openshift_logging_es_memory_limit'),
('openshift_hosted_logging_elasticsearch_nodeselector', 'openshift_logging_es_nodeselector'),
('openshift_hosted_logging_elasticsearch_ops_nodeselector', 'openshift_logging_es_ops_nodeselector'),
('openshift_hosted_logging_elasticsearch_ops_instance_ram', 'openshift_logging_es_ops_memory_limit'),
('openshift_hosted_logging_storage_access_modes', 'openshift_logging_storage_access_modes'),
('openshift_hosted_logging_master_public_url', 'openshift_logging_master_public_url'),
('openshift_hosted_logging_deployer_prefix', 'openshift_logging_image_prefix'),
('openshift_hosted_logging_deployer_version', 'openshift_logging_image_version'),
('openshift_hosted_logging_deploy', 'openshift_logging_install_logging'),
('openshift_hosted_logging_curator_nodeselector', 'openshift_logging_curator_nodeselector'),
('openshift_hosted_logging_curator_ops_nodeselector', 'openshift_logging_curator_ops_nodeselector'),
('openshift_hosted_metrics_storage_access_modes', 'openshift_metrics_storage_access_modes'),
('openshift_hosted_metrics_storage_host', 'openshift_metrics_storage_host'),
('openshift_hosted_metrics_storage_nfs_directory', 'openshift_metrics_storage_nfs_directory'),
('openshift_hosted_metrics_storage_volume_name', 'openshift_metrics_storage_volume_name'),
('openshift_hosted_metrics_storage_volume_size', 'openshift_metrics_storage_volume_size'),
('openshift_hosted_metrics_storage_labels', 'openshift_metrics_storage_labels'),
('openshift_hosted_metrics_deployer_prefix', 'openshift_metrics_image_prefix'),
('openshift_hosted_metrics_deployer_version', 'openshift_metrics_image_version'),
('openshift_hosted_metrics_deploy', 'openshift_metrics_install_metrics'),
('openshift_hosted_metrics_storage_kind', 'openshift_metrics_storage_kind'),
('openshift_hosted_metrics_public_url', 'openshift_metrics_hawkular_hostname'),
('openshift_node_labels', 'openshift_node_groups[<item>].labels'),
('openshift_node_kubelet_args', 'openshift_node_groups[<item>].edits'),
)
# JSON_FORMAT_VARIABLES does not intende to cover all json variables, but
# complicated json variables in hosts.example are covered.
JSON_FORMAT_VARIABLES = (
'openshift_builddefaults_json',
'openshift_buildoverrides_json',
'openshift_master_admission_plugin_config',
'openshift_master_audit_config',
'openshift_crio_docker_gc_node_selector',
'openshift_master_image_policy_allowed_registries_for_import',
'openshift_master_image_policy_config',
'openshift_master_oauth_templates',
'container_runtime_extra_storage',
'openshift_additional_repos',
'openshift_master_identity_providers',
'openshift_master_htpasswd_users',
'openshift_additional_projects',
'openshift_hosted_routers',
'openshift_node_open_ports',
'openshift_master_open_ports',
)
def to_bool(var_to_check):
"""Determine a boolean value given the multiple
ways bools can be specified in ansible."""
# http://yaml.org/type/bool.html
yes_list = (True, 1, "True", "1", "true", "TRUE",
"Yes", "yes", "Y", "y", "YES",
"on", "ON", "On")
return var_to_check in yes_list
def check_for_removed_vars(hostvars, host):
"""Fails if removed variables are found"""
found_removed = []
for item in REMOVED_VARIABLES:
if item in hostvars[host]:
found_removed.append(item)
if found_removed:
msg = "Found removed variables: "
for item in found_removed:
msg += "{} is replaced by {}; ".format(item[0], item[1])
raise errors.AnsibleModuleError(msg)
return None
class ActionModule(ActionBase):
"""Action plugin to execute sanity checks."""
def template_var(self, hostvars, host, varname):
"""Retrieve a variable from hostvars and template it.
If undefined, return None type."""
# We will set the current host and variable checked for easy debugging
# if there are any unhandled exceptions.
# pylint: disable=W0201
self.last_checked_var = varname
# pylint: disable=W0201
self.last_checked_host = host
res = hostvars[host].get(varname)
if res is None:
return None
return self._templar.template(res)
def check_openshift_deployment_type(self, hostvars, host):
"""Ensure a valid openshift_deployment_type is set"""
openshift_deployment_type = self.template_var(hostvars, host,
'openshift_deployment_type')
if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
raise errors.AnsibleModuleError(msg)
return openshift_deployment_type
def get_allowed_registries(self, hostvars, host):
"""Returns a list of configured allowedRegistriesForImport as a list of patterns"""
allowed_registries_for_import = self.template_var(hostvars, host, ALLOWED_REGISTRIES_VAR)
if allowed_registries_for_import is None:
image_policy_config = self.template_var(hostvars, host, IMAGE_POLICY_CONFIG_VAR)
if not image_policy_config:
return image_policy_config
if isinstance(image_policy_config, str):
try:
image_policy_config = json.loads(image_policy_config)
except Exception:
raise errors.AnsibleModuleError(
"{} is not a valid json string".format(IMAGE_POLICY_CONFIG_VAR))
if not isinstance(image_policy_config, dict):
raise errors.AnsibleModuleError(
"expected dictionary for {}, not {}".format(
IMAGE_POLICY_CONFIG_VAR, type(image_policy_config)))
detailed = image_policy_config.get("allowedRegistriesForImport", None)
if not detailed:
return detailed
if not isinstance(detailed, list):
raise errors.AnsibleModuleError("expected list for {}['{}'], not {}".format(
IMAGE_POLICY_CONFIG_VAR, "allowedRegistriesForImport",
type(allowed_registries_for_import)))
try:
return [i["domainName"] for i in detailed]
except Exception:
raise errors.AnsibleModuleError(
"each item of allowedRegistriesForImport must be a dictionary with 'domainName' key")
if not isinstance(allowed_registries_for_import, list):
raise errors.AnsibleModuleError("expected list for {}, not {}".format(
IMAGE_POLICY_CONFIG_VAR, type(allowed_registries_for_import)))
return allowed_registries_for_import
def check_whitelisted_registries(self, hostvars, host):
"""Ensure defined registries are whitelisted"""
allowed = self.get_allowed_registries(hostvars, host)
if allowed is None:
return
unmatched_registries = []
for regvar in (
"oreg_url"
"openshift_cockpit_deployer_prefix",
"openshift_metrics_image_prefix",
"openshift_logging_image_prefix",
"openshift_service_catalog_image_prefix",
"openshift_docker_insecure_registries"):
value = self.template_var(hostvars, host, regvar)
if not value:
continue
if isinstance(value, list):
registries = value
else:
registries = [value]
for reg in registries:
if not any(is_registry_match(reg, pat) for pat in allowed):
unmatched_registries.append((regvar, reg))
if unmatched_registries:
registry_list = ", ".join(["{}:{}".format(n, v) for n, v in unmatched_registries])
raise errors.AnsibleModuleError(
"registry hostnames of the following image prefixes are not whitelisted by image"
" policy configuration: {}".format(registry_list))
def check_python_version(self, hostvars, host, distro):
"""Ensure python version is 3 for Fedora and python 2 for others"""
ansible_python = self.template_var(hostvars, host, 'ansible_python')
if distro == "Fedora":
if ansible_python['version']['major'] != 3:
msg = "openshift-ansible requires Python 3 for {};".format(distro)
msg += " For information on enabling Python 3 with Ansible,"
msg += " see https://docs.ansible.com/ansible/python_3_support.html"
raise errors.AnsibleModuleError(msg)
else:
if ansible_python['version']['major'] != 2:
msg = "openshift-ansible requires Python 2 for {};".format(distro)
def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
"""Ensure openshift_image_tag is formatted correctly"""
openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
if not openshift_image_tag or openshift_image_tag == 'latest':
return None
regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
res = re.match(regex_to_match, str(openshift_image_tag))
if res is None:
msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
msg = msg.format(str(openshift_image_tag))
raise errors.AnsibleModuleError(msg)
def check_pkg_version_format(self, hostvars, host):
"""Ensure openshift_pkg_version is formatted correctly"""
openshift_pkg_version = self.template_var(hostvars, host, 'openshift_pkg_version')
if not openshift_pkg_version:
return None
regex_to_match = PKG_VERSION_REGEX['re']
res = re.match(regex_to_match, str(openshift_pkg_version))
if res is None:
msg = PKG_VERSION_REGEX['error_msg']
msg = msg.format(str(openshift_pkg_version))
raise errors.AnsibleModuleError(msg)
def check_release_format(self, hostvars, host):
"""Ensure openshift_release is formatted correctly"""
openshift_release = self.template_var(hostvars, host, 'openshift_release')
if not openshift_release:
return None
regex_to_match = RELEASE_REGEX['re']
res = re.match(regex_to_match, str(openshift_release))
if res is None:
msg = RELEASE_REGEX['error_msg']
msg = msg.format(str(openshift_release))
raise errors.AnsibleModuleError(msg)
def network_plugin_check(self, hostvars, host):
"""Ensure only one type of network plugin is enabled"""
res = []
# Loop through each possible network plugin boolean, determine the
# actual boolean value, and append results into a list.
for plugin, default_val in NET_PLUGIN_LIST:
res_temp = self.template_var(hostvars, host, plugin)
if res_temp is None:
res_temp = default_val
res.append(to_bool(res_temp))
if sum(res) not in (0, 1):
plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
raise errors.AnsibleModuleError(msg)
def check_hostname_vars(self, hostvars, host):
"""Checks to ensure openshift_kubelet_name_override
and openshift_public_hostname
conform to the proper length of 63 characters or less"""
for varname in ('openshift_public_hostname', 'openshift_kubelet_name_override'):
var_value = self.template_var(hostvars, host, varname)
if var_value and len(var_value) > 63:
msg = '{} must be 63 characters or less'.format(varname)
raise errors.AnsibleModuleError(msg)
def check_session_auth_secrets(self, hostvars, host):
"""Checks session_auth_secrets is correctly formatted"""
sas = self.template_var(hostvars, host,
'openshift_master_session_auth_secrets')
ses = self.template_var(hostvars, host,
'openshift_master_session_encryption_secrets')
# This variable isn't mandatory, only check if set.
if sas is None and ses is None:
return None
if not (
issubclass(type(sas), list) and issubclass(type(ses), list)
) or len(sas) != len(ses):
raise errors.AnsibleModuleError(
'Expects openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets are equal length lists')
for secret in sas:
if len(secret) < 32:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
for secret in ses:
if len(secret) not in [16, 24, 32]:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return None
def check_unsupported_nfs_configs(self, hostvars, host):
"""Fails if nfs storage is in use for any components. This check is
ignored if openshift_enable_unsupported_configurations=True"""
enable_unsupported = self.template_var(
hostvars, host, 'openshift_enable_unsupported_configurations')
if to_bool(enable_unsupported):
return None
for storage in STORAGE_KIND_TUPLE:
kind = self.template_var(hostvars, host, storage)
if kind == 'nfs':
raise errors.AnsibleModuleError(
'nfs is an unsupported type for {}. '
'openshift_enable_unsupported_configurations=True must'
'be specified to continue with this configuration.'
''.format(storage))
return None
def check_htpasswd_provider(self, hostvars, host):
"""Fails if openshift_master_identity_providers contains an entry of
kind HTPasswdPasswordIdentityProvider and
openshift_master_manage_htpasswd is False"""
manage_pass = self.template_var(
hostvars, host, 'openshift_master_manage_htpasswd')
if to_bool(manage_pass):
# If we manage the file, we can just generate in the new path.
return None
idps = self.template_var(
hostvars, host, 'openshift_master_identity_providers')
if not idps:
# If we don't find any identity_providers, nothing for us to do.
return None
old_keys = ('file', 'fileName', 'file_name', 'filename')
if not isinstance(idps, list):
raise errors.AnsibleModuleError("| not a list")
for idp in idps:
if idp['kind'] == 'HTPasswdPasswordIdentityProvider':
for old_key in old_keys:
if old_key in idp is not None:
raise errors.AnsibleModuleError(
'openshift_master_identity_providers contains a '
'provider of kind==HTPasswdPasswordIdentityProvider '
'and {} is set. Please migrate your htpasswd '
'files to /etc/origin/master/htpasswd and update your '
'existing master configs, and remove the {} key'
'before proceeding.'.format(old_key, old_key))
def validate_json_format_vars(self, hostvars, host):
"""Fails if invalid json format are found"""
found_invalid_json = []
for var in JSON_FORMAT_VARIABLES:
if var in hostvars[host]:
json_var = self.template_var(hostvars, host, var)
try:
json.loads(json_var)
except ValueError as json_err:
found_invalid_json.append([var, json_var, json_err])
except BaseException:
pass
if found_invalid_json:
msg = "Found invalid json format variables:\n"
for item in found_invalid_json:
msg += " {} specified in {} is invalid json format\n {}".format(item[1], item[0], item[2])
raise errors.AnsibleModuleError(msg)
return None
def check_for_oreg_password(self, hostvars, host, odt):
"""Ensure oreg_password is defined when using registry.redhat.io"""
reg_to_check = 'registry.redhat.io'
err_msg = ("oreg_auth_user and oreg_auth_password must be provided when"
"deploying openshift-enterprise")
err_msg2 = ("oreg_auth_user and oreg_auth_password must be provided when using"
"{}".format(reg_to_check))
oreg_password = self.template_var(hostvars, host, 'oreg_auth_password')
if oreg_password is not None:
# A password is defined, so we're good to go.
return None
oreg_url = self.template_var(hostvars, host, 'oreg_url')
if oreg_url is not None:
if reg_to_check in oreg_url:
raise errors.AnsibleModuleError(err_msg2)
elif odt == 'openshift-enterprise':
# We're not using an oreg_url, we're using default enterprise
# registry. We require oreg_auth_user and oreg_auth_password
raise errors.AnsibleModuleError(err_msg)
def run_checks(self, hostvars, host):
"""Execute the hostvars validations against host"""
distro = self.template_var(hostvars, host, 'ansible_distribution')
odt = self.check_openshift_deployment_type(hostvars, host)
self.check_whitelisted_registries(hostvars, host)
self.check_python_version(hostvars, host, distro)
self.check_image_tag_format(hostvars, host, odt)
self.check_pkg_version_format(hostvars, host)
self.check_release_format(hostvars, host)
self.network_plugin_check(hostvars, host)
self.check_hostname_vars(hostvars, host)
self.check_session_auth_secrets(hostvars, host)
self.check_unsupported_nfs_configs(hostvars, host)
self.check_htpasswd_provider(hostvars, host)
check_for_removed_vars(hostvars, host)
self.validate_json_format_vars(hostvars, host)
self.check_for_oreg_password(hostvars, host, odt)
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# pylint: disable=W0201
self.last_checked_host = "none"
# pylint: disable=W0201
self.last_checked_var = "none"
# self._task.args holds task parameters.
# check_hosts is a parameter to this plugin, and should provide
# a list of hosts.
check_hosts = self._task.args.get('check_hosts')
if not check_hosts:
msg = "check_hosts is required"
raise errors.AnsibleModuleError(msg)
# We need to access each host's variables
hostvars = self.task_vars.get('hostvars')
if not hostvars:
msg = hostvars
raise errors.AnsibleModuleError(msg)
# We loop through each host in the provided list check_hosts
for host in check_hosts:
try:
self.run_checks(hostvars, host)
except Exception as uncaught_e:
msg = "last_checked_host: {}, last_checked_var: {};"
msg = msg.format(self.last_checked_host, self.last_checked_var)
msg += str(uncaught_e)
raise errors.AnsibleModuleError(msg)
result["changed"] = False
result["failed"] = False
result["msg"] = "Sanity Checks passed"
return result
def is_registry_match(item, pattern):
"""returns True if the registry matches the given whitelist pattern
Unlike in OpenShift, the comparison is done solely on hostname part
(excluding the port part) since the latter is much more difficult due to
vague definition of port defaulting based on insecure flag. Moreover, most
of the registries will be listed without the port and insecure flag.
"""
item = "schema://" + item.split('://', 1)[-1]
pat = pattern.rsplit(':', 1)[0]
name = urlparse(item).hostname
return fnmatch.fnmatch(name, pat)
|
|
import bitcoin
from ecdsa import NIST256p, VerifyingKey
from .Helper import *
from neo.Core.UInt160 import UInt160
from .ECCurve import EllipticCurve
class Crypto(object):
_Instance = None
@staticmethod
def SetupSignatureCurve():
"""
Setup the Elliptic curve parameters.
"""
bitcoin.change_curve(
int("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF", 16),
int("FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551", 16),
int("FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFC", 16),
int("5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B", 16),
int("6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C296", 16),
int("4FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5", 16)
)
@staticmethod
def Default():
"""
Get the default Crypto instance.
Returns:
CryptoInstance:
"""
if not Crypto._Instance:
Crypto._Instance = CryptoInstance()
return Crypto._Instance
@staticmethod
def Hash160(message):
"""
Get a hash of the provided message using the ripemd160 algorithm.
Args:
message (str): message to hash.
Returns:
str: hash as a double digit hex string.
"""
return bin_hash160(message)
@staticmethod
def Hash160Bytes(message):
"""
Get a hash of the provided message using the ripemd160 algorithm.
Args:
message (str): message to hash.
Returns:
bytes: hash.
"""
return bin_hash160Bytes(message)
@staticmethod
def Hash256(message):
"""
Get a the hash of a double SHA256 operation on the message. i.e. SHA256(SHA256(message))
Args:
message (str): message to hash.
Returns:
bytes: hash.
"""
return bin_dbl_sha256(message)
@staticmethod
def ToScriptHash(data, unhex=True):
"""
Get a script hash of the data.
Args:
data (bytes): data to hash.
unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'
Returns:
UInt160: script hash.
"""
if len(data) > 1 and unhex:
data = binascii.unhexlify(data)
return UInt160(data=binascii.unhexlify(bytes(Crypto.Hash160(data), encoding='utf-8')))
@staticmethod
def ToAddress(script_hash):
"""
Get the public address of the script hash.
Args:
script_hash (UInt160):
Returns:
str: base58 encoded string representing the wallet address.
"""
return scripthash_to_address(script_hash.Data)
@staticmethod
def Sign(message, private_key):
"""
Sign the message with the given private key.
Args:
message (hexstr): message to be signed
private_key (str): 32 byte key as a double digit hex string (e.g. having a length of 64)
Returns:
bytearray: the signature of the message.
"""
Crypto.SetupSignatureCurve()
hash = hashlib.sha256(binascii.unhexlify(message)).hexdigest()
v, r, s = bitcoin.ecdsa_raw_sign(hash, private_key)
rb = bytearray(r.to_bytes(32, 'big'))
sb = bytearray(s.to_bytes(32, 'big'))
sig = rb + sb
return sig
@staticmethod
def VerifySignature(message, signature, public_key, unhex=True):
"""
Verify the integrity of the message.
Args:
message (hexstr or str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint|bytes): the public key to use for verifying the signature. If `public_key` is of type bytes then it should be raw bytes (i.e. b'\xAA\xBB').
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
"""
if type(public_key) is EllipticCurve.ECPoint:
pubkey_x = public_key.x.value.to_bytes(32, 'big')
pubkey_y = public_key.y.value.to_bytes(32, 'big')
public_key = pubkey_x + pubkey_y
if unhex:
try:
message = binascii.unhexlify(message)
except binascii.Error:
pass
elif isinstance(message, str):
message = message.encode('utf-8')
if len(public_key) == 33:
public_key = bitcoin.decompress(public_key)
public_key = public_key[1:]
try:
vk = VerifyingKey.from_string(public_key, curve=NIST256p, hashfunc=hashlib.sha256)
res = vk.verify(signature, message, hashfunc=hashlib.sha256)
return res
except Exception:
pass
return False
class CryptoInstance():
def __init__(self):
Crypto.SetupSignatureCurve()
def Hash160(self, message):
"""
Get a hash of the provided message using the ripemd160 algorithm.
Args:
message (str): message to hash.
Returns:
str: hash as a double digit hex string.
"""
return Crypto.Hash160Bytes(message)
def Hash256(self, message):
"""
Get a the hash of a double SHA256 operation on the message. i.e. SHA256(SHA256(message))
Args:
message (str): message to hash.
Returns:
bytes: hash.
"""
return Crypto.Hash256(message)
def Sign(self, message, private_key):
"""
Sign the message with the given private key.
Args:
message (str): message to be signed
private_key (str): 32 byte key as a double digit hex string (e.g. having a length of 64)
Returns:
bytearray: the signature of the message.
"""
return Crypto.Sign(message, private_key)
def VerifySignature(self, message, signature, public_key, unhex=True):
"""
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint): the public key to use for verifying the signature.
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
"""
return Crypto.VerifySignature(message, signature, public_key, unhex=unhex)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import os.path
import socket
import ssl
import sys
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from oslo.utils import excutils
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from nova import exception
from nova.i18n import _, _LE, _LI
from nova.openstack.common import log as logging
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=1000,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help="If False, closes the client socket connection "
"explicitly."),
cfg.IntOpt('client_socket_timeout', default=900,
help="Timeout for client connections' socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever."),
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = CONF.wsgi_default_pool_size
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self.pool_size = pool_size or self.default_pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
self.client_socket_timeout = CONF.client_socket_timeout or None
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
try:
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error(_LE("Could not bind to %(host)s:%(port)s"),
{'host': host, 'port': port})
raise
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_LI("%(name)s listening on %(host)s:%(port)s"),
{'name': self.name, 'host': self.host, 'port': self.port})
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
# The server socket object will be closed after server exits,
# but the underlying file descriptor will remain open, and will
# give bad file descriptor error. So duplicating the socket object,
# to keep file descriptor usable.
dup_socket = self._socket.dup()
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
dup_socket = eventlet.wrap_ssl(dup_socket,
**ssl_kwargs)
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
dup_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
dup_socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support"),
{'name': self.name, 'host': self.host,
'port': self.port})
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': dup_socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format,
'debug': False,
'keepalive': CONF.wsgi_keep_alive,
'socket_timeout': self.client_socket_timeout
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self._pool.resize(self.pool_size)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_LI("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
self._pool.waitall()
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_LI("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print(('*' * 40) + ' BODY')
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
self.config_path = None
config_path = config_path or CONF.api_paste_config
if not os.path.isabs(config_path):
self.config_path = CONF.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
LOG.debug("Loading app %(name)s from %(path)s",
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
|
|
""" Plugin for playing music.
Some of the logic is very similar to the example at:
https://github.com/Rapptz/discord.py/blob/master/examples/playlist.py
TUTORIAL:
This module would require the bot to have ffmpeg installed and set in
PATH, so that one could run `ffmpeg` in the shell.
See: https://www.ffmpeg.org/
The bot owner can link a music channel to any voice channel in a server
using the !music link <voice channel ...> command. After doing this, the
bot should automatically join the linked channel whenever a member joins
it. The members in the channel can then manage music playing.
ISSUES:
The music player seems to randomly start skipping songs, or rather
stopping them way too early. I have no solution to this issue and do not
know why it happens, but apparently I'm not the only bot creator who has
experienced said issue.
Commands:
music
"""
import random
import re
from collections import namedtuple, deque
from traceback import print_exc
from typing import Dict
import discord
import plugins
from pcbot import utils, Annotate, Config
client = plugins.client # type: discord.Client
music_channels = Config("music_channels", data=[])
voice_states = {} # type: Dict[discord.Server, VoiceState]
youtube_dl_options = dict(
format="bestaudio/best",
extractaudio=True,
audioformat="mp3",
noplaylist=True,
default_search="auto",
quiet=True,
nocheckcertificate=True
)
ffmpeg_before_options = "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5"
max_songs_queued = 6 # How many songs each member are allowed in the queue at once
max_song_length = 10 * 60 * 60 # The maximum song length in seconds
default_volume = .6
if not discord.opus.is_loaded():
discord.opus.load_opus('libopus-0.x64.dll')
Song = namedtuple("Song", "channel player requester")
disposition_pattern = re.compile(r"filename=\"(?P<name>.+)\"")
async def on_reload(name: str):
""" Preserve voice states. """
global voice_states
local_states = voice_states
await plugins.reload(name)
voice_states = local_states
def format_song(song: Song, url=True):
""" Format a song request. """
# The player duration is given in seconds; convert it to h:mm
duration = ""
if song.player.duration:
duration = " / **{0}:{1:02}**".format(*divmod(int(song.player.duration), 60))
return "**{0.title}** requested by **{1.display_name}**{2}".format(song.player, song.requester, duration) \
+ ("\n**URL**: <{0.url}>".format(song.player) if url else "")
class VoiceState:
def __init__(self, voice):
self.voice = voice
self.current = None
self._volume = default_volume
self.queue = deque() # The queue contains items of type Song
self.skip_votes = set()
def is_playing(self):
""" Check if the bot is playing music. """
if self.current:
return not self.current.player.is_done()
return False
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value: float):
if value > 1:
value = 1
elif value < .01:
value = default_volume
self._volume = value
if self.is_playing():
self.current.player.volume = value
def format_playing(self):
if self.is_playing():
return format_song(self.current)
else:
return "*Nothing.*"
def play_next(self):
""" Play the next song if there are any. """
self.skip_votes.clear()
if not self.queue:
return
self.current = self.queue.popleft()
self.current.player.start()
def skip(self):
""" Skip the song currently playing. """
if self.is_playing():
self.current.player.stop()
@plugins.command(aliases="m", disabled_pm=True)
async def music(message, _: utils.placeholder):
""" Manage music. If a music channel is assigned, the bot will join
whenever someone joins it. """
pass
def get_server_channel(server: discord.Server):
""" Return the server's music channel or None. """
for channel in server.channels:
if channel.id in music_channels.data:
return channel
return None
def client_connected(server: discord.Server):
""" Returns True or False whether the bot is client_connected to the
Music channel in this server. """
channel = get_server_channel(server)
return server.me.voice_channel == channel and server in voice_states
def assert_connected(member: discord.Member):
""" Throws an AssertionError exception when neither the bot nor
the member is connected to the music channel."""
channel = get_server_channel(member.server)
assert member.voice.voice_channel == channel, "**You are not connected to the voice channel.**"
assert client_connected(member.server), "**The bot is not connected to the voice channel.**"
@music.command(aliases="p pl")
async def play(message: discord.Message, song: Annotate.CleanContent):
""" Play a song. The given song could either be a URL or keywords
to lookup videos in youtube. """
assert_connected(message.author)
state = voice_states[message.server]
# Check that the member hasn't already requested enough songs
songs_queued = sum(1 for s in state.queue if s.requester == message.author)
assert songs_queued < max_songs_queued, "**You have queued enough songs for now.**"
# Strip any embed characters, spaces or code symbols.
song = song.strip("< >`")
try:
player = await state.voice.create_ytdl_player(song, ytdl_options=youtube_dl_options, after=state.play_next,
before_options=ffmpeg_before_options)
except:
await client.say(message, "**Could not add this song to the queue.**")
print_exc()
return
# Make sure the song isn't too long
if player.duration:
assert player.duration < max_song_length, "**The requested song is too long.**"
url_match = utils.http_url_pattern.match(song)
if url_match and player.title == url_match.group("sub"):
# Try retrieving the filename as this is probably a file
headers = await utils.retrieve_headers(song)
if "Content-Disposition" in headers:
name_match = disposition_pattern.search(headers["Content-Disposition"])
if name_match:
player.title = "".join(name_match.group("name").split(".")[:-1])
player.volume = state.volume
song = Song(player=player, requester=message.author, channel=message.channel)
await client.send_message(song.channel, "Queued: " + format_song(song))
state.queue.append(song)
# Start the song when there are none
if not state.is_playing():
state.play_next()
@music.command(aliases="s next")
async def skip(message: discord.Message):
""" Skip the song currently playing. """
assert_connected(message.author)
state = voice_states[message.server]
assert state.is_playing(), "**There is no song currently playing.**"
assert message.author not in state.skip_votes, "**You have already voted to skip this song.**"
# We want to skip immediately when the requester skips their own song.
if message.author == state.current.requester:
await client.say(message, "Skipped song on behalf of the requester.")
state.skip()
return
state.skip_votes.add(message.author)
# In order to skip, everyone but the requester and the bot must vote
needed_to_skip = len(get_server_channel(message.server).voice_members) - 2
votes = len(state.skip_votes)
if votes >= needed_to_skip:
await client.say(message, "**Skipped song.**")
state.skip()
else:
await client.say(message, "Voted to skip the current song. `{}/{}`".format(votes, needed_to_skip))
@music.command(aliases="u nvm fuck no")
async def undo(message: discord.Message):
""" Undo your previously queued song. This will not *skip* the song if it's playing. """
assert_connected(message.author)
state = voice_states[message.server]
for song in reversed(state.queue):
if song.requester == message.author:
await client.say(message, "Removed previous request **{0.title}** from the queue.".format(song.player))
state.queue.remove(song)
return
await client.say(message, "**You have nothing to undo.**")
@music.command()
async def clear(message: discord.Message):
""" Remove all songs you have queued. """
assert_connected(message.author)
state = voice_states[message.server]
removed = False
for song in list(state.queue):
if song.requester == message.author:
state.queue.remove(song)
removed = True
if removed:
await client.say(message, "Removed all queued songs by **{0.display_name}**.".format(message.author))
else:
await client.say(message, "**You have no queued songs.**")
@music.command(roles="Shuffler")
async def shuffle(message: discord.Message):
""" Shuffles the current queue. """
assert_connected(message.author)
state = voice_states[message.server]
random.shuffle(state.queue)
await queue(message)
@music.command(aliases="v volume")
async def vol(message: discord.Message, volume: int):
""" Set the volume of the player. Volume should be a number in percent. """
assert_connected(message.author)
state = voice_states[message.server]
state.volume = volume / 100
await client.say(message, "Set the volume to **{:.00%}**.".format(state.volume))
@music.command(aliases="np")
async def playing(message: discord.Message):
""" Return the name and URL of the song currently playing. """
assert_connected(message.author)
state = voice_states[message.server]
await client.say(message, "Playing: " + state.format_playing())
@music.command(aliases="q l list")
async def queue(message: discord.Message):
""" Return a list of the queued songs. """
assert_connected(message.author)
state = voice_states[message.server]
assert state.queue, "**There are no songs queued.**"
await client.say(message, "```elm\n{}```".format(
"\n".join(format_song(s, url=False).replace("**", "") for s in state.queue)))
@music.command(owner=True)
async def link(message: discord.Message, voice_channel: Annotate.VoiceChannel):
""" Link the Music bot to a voice channel in a server. """
assert voice_channel.id not in music_channels.data, "**This voice channel is already linked.**"
assert get_server_channel(message.server) is None, "**A voice channel is already linked to this server.**"
# Link the channel
music_channels.data.append(voice_channel.id)
music_channels.save()
await client.say(message, "Voice channel **{0.name}** is now the music channel.".format(voice_channel))
@music.command(owner=True)
async def unlink(message: discord.Message):
""" Unlink this server's music channel. """
channel = get_server_channel(message.server)
assert channel, "**This server has no voice channel linked.**"
# Unlink the channel
music_channels.data.remove(channel.id)
music_channels.save()
await client.say(message, "This server no longer has a music channel.")
@plugins.event()
async def on_voice_state_update(before: discord.Member, after: discord.Member):
""" Handle joining and leaving channels. The bot will automatically
join the server's voice channel when a member joins. """
server = before.server
channel = get_server_channel(server)
if channel is None:
return
count_members = sum(1 for m in channel.voice_members if not m.bot)
# Leave the voice channel we're client_connected to when the only one here is the bot
if server in voice_states and server.me.voice_channel == channel:
if count_members == 0:
state = voice_states[server]
await state.voice.disconnect()
if state.is_playing():
state.queue.clear()
state.skip()
del voice_states[server]
# Connect to the voice channel when there are people in it
else:
if count_members >= 1:
try:
voice = await client.join_voice_channel(channel)
except discord.errors.ClientException:
# The bot is in another channel, so we'll get the voice client and move the bot
voice = client.voice_client_in(server)
await voice.move_to(channel)
voice_states[server] = VoiceState(voice)
|
|
#!/usr/bin/env python3
# -+-coding: utf-8 -+-
#--------------------------------------------
# Authors:
# Frank Boers <[email protected]>
# Christian Kiefer <[email protected]>
#--------------------------------------------
# Date: 12.112.19
#--------------------------------------------
# License: BSD (3-clause)
#--------------------------------------------
# Updates
#--------------------------------------------
import os,os.path as op
import numpy as np
import time,datetime
from distutils.dir_util import mkpath
import mne
from jumeg.decompose.ica_replace_mean_std import ICA,apply_ica_replace_mean_std
from jumeg.jumeg_preprocessing import get_ics_cardiac, get_ics_ocular
#---
from jumeg.base import jumeg_logger
from jumeg.base.jumeg_base import jumeg_base as jb
from jumeg.base.jumeg_base_config import JuMEG_CONFIG as jCFG
#---
from jumeg.base.pipelines.jumeg_pipelines_ica_perfromance import JuMEG_ICA_PERFORMANCE
from jumeg.base.pipelines.jumeg_pipelines_ica_svm import JuMEG_ICA_SVM
from jumeg.base.pipelines.jumeg_pipelines_chopper import JuMEG_PIPELINES_CHOPPER
#---
from jumeg.filter.jumeg_mne_filter import JuMEG_MNE_FILTER
logger = jumeg_logger.get_logger()
__version__= "2020.04.21.001"
def fit_ica(raw, picks, reject, ecg_ch, eog_hor, eog_ver,
flow_ecg, fhigh_ecg, flow_eog, fhigh_eog, ecg_thresh,
eog_thresh, use_jumeg=True, random_state=42):
"""
author: C.Kiefer; [email protected]
Fit an ICA object to the raw file. Identify cardiac and ocular components
and mark them for removal.
Parameters:
-----------
inst : instance of Raw, Epochs or Evoked
Raw measurements to be decomposed.
picks : array-like of int
Channels to be included. This selection remains throughout the
initialized ICA solution. If None only good data channels are used.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'ecog', 'eog', 'ecg',
'hbo', 'hbr'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if `inst` is of type Raw.
ecg_ch : array-like | ch_name | None
ECG channel to which the sources shall be compared. It has to be
of the same shape as the sources. If some string is supplied, a
routine will try to find a matching channel. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
eog_hor : array-like | ch_name | None
Horizontal EOG channel to which the sources shall be compared.
It has to be of the same shape as the sources. If some string
is supplied, a routine will try to find a matching channel. If
None, a score function expecting only one input-array argument
must be used, for instance, scipy.stats.skew (default).
eog_ver : array-like | ch_name | None
Vertical EOG channel to which the sources shall be compared.
It has to be of the same shape as the sources. If some string
is supplied, a routine will try to find a matching channel. If
None, a score function expecting only one input-array argument
must be used, for instance, scipy.stats.skew (default).
flow_ecg : float
Low pass frequency for ECG component identification.
fhigh_ecg : float
High pass frequency for ECG component identification.
flow_eog : float
Low pass frequency for EOG component identification.
fhigh_eog : float
High pass frequency for EOG component identification.
ecg_thresh : float
Threshold for ECG component idenfication.
eog_thresh : float
Threshold for EOG component idenfication.
use_jumeg : bool
Use the JuMEG scoring method for the identification of
artifact components.
random_state : None | int | instance of np.random.RandomState
np.random.RandomState to initialize the FastICA estimation.
As the estimation is non-deterministic it can be useful to
fix the seed to have reproducible results. Defaults to None.
Returns:
--------
ica : mne.preprocessing.ICA
ICA object for raw file with ECG and EOG components marked for removal.
"""
# increased iteration to make it converge
# fix the number of components to 40, depending on your application you
# might want to raise the number
# 'extended-infomax', 'fastica', 'picard'
logger.info('Start ICA FIT: init ICA object')
ica = ICA(method='fastica', n_components=40, random_state=random_state,
max_pca_components=None, max_iter=5000, verbose=False)
logger.debug('ICA FIT: apply ICA.fit\n reject: {} \n picks: {}'.format(reject,picks))
ica.fit(raw, picks=picks, decim=None, reject=reject, verbose=True)
logger.info('Done ICA FIT')
#######################################################################
# identify bad components
#######################################################################
if use_jumeg:
logger.info("JuMEG Computing scores and identifying components ...")
#--- get ECG related components using JuMEG
ic_ecg,sc_ecg = get_ics_cardiac(raw, ica, flow=flow_ecg, fhigh=fhigh_ecg,
thresh=ecg_thresh, tmin=-0.5, tmax=0.5, name_ecg=ecg_ch,
use_CTPS=True) #[0]
ic_ecg = list(set(ic_ecg))
ic_ecg.sort()
#--- get EOG related components using JuMEG
ic_eog = get_ics_ocular(raw, ica, flow=flow_eog, fhigh=fhigh_eog,
thresh=eog_thresh, name_eog_hor=eog_hor, name_eog_ver=eog_ver,
score_func='pearsonr')
ic_eog = list(set(ic_eog))
ic_eog.sort()
#--- if necessary include components identified by correlation as well
bads_list = []
bads_list.extend( ic_ecg )
bads_list.extend( ic_eog )
bads_list.sort()
ica.exclude = bads_list
msg = ["JuMEG identified ICA components",
" -> ECG components: {}".format(ic_ecg),
" -> scores: {}".format(sc_ecg[ic_ecg]),
" -> EOG components: {}".format(ic_eog)
]
logger.debug("\n".join(msg))
else:
logger.info("MNE Computing scores and identifying components ...")
ecg_scores = ica.score_sources(raw, target=ecg_ch, score_func='pearsonr',
l_freq=flow_ecg, h_freq=fhigh_ecg, verbose=False)
# horizontal channel
eog1_scores = ica.score_sources(raw, target=eog_hor, score_func='pearsonr',
l_freq=flow_eog, h_freq=fhigh_eog, verbose=False)
# vertical channel
eog2_scores = ica.score_sources(raw, target=eog_ver, score_func='pearsonr',
l_freq=flow_eog, h_freq=fhigh_eog, verbose=False)
# print the top ecg, eog correlation scores
ecg_inds = np.where(np.abs(ecg_scores) > ecg_thresh)[0]
eog1_inds = np.where(np.abs(eog1_scores) > eog_thresh)[0]
eog2_inds = np.where(np.abs(eog2_scores) > eog_thresh)[0]
highly_corr = list(set(np.concatenate((ecg_inds, eog1_inds, eog2_inds))))
highly_corr.sort()
highly_corr_ecg = list(set(ecg_inds))
highly_corr_eog1 = list(set(eog1_inds))
highly_corr_eog2 = list(set(eog2_inds))
highly_corr_ecg.sort()
highly_corr_eog1.sort()
highly_corr_eog2.sort()
# if necessary include components identified by correlation as well
ica.exclude = highly_corr
msg = ["MNE Highly correlated artifact components",
" -> ECG : {} ".format(highly_corr_ecg),
" -> EOG 1: {} ".format(highly_corr_eog1),
" -> EOG 2: {} ".format(highly_corr_eog2)
]
logger.debug("\n".join(msg))
logger.info("done ICA FIT\n -> excluded ICs: {}\n".format(ica.exclude))
return ica
class JuMEG_PIPELINES_ICA(object):
def __init__(self,**kwargs):
super().__init__()
self._CFG = jCFG(**kwargs)
self.PreFilter = JuMEG_MNE_FILTER()
self.Chopper = JuMEG_PIPELINES_CHOPPER()
self.ICAPerformance = JuMEG_ICA_PERFORMANCE()
self.SVM = JuMEG_ICA_SVM()
self.useSVM = False
self.report_key = "ica"
self._plot_dir = None
self._ics_found_svm = None
self.verbose = False
self.debug = False
self._clear()
@property
def ICs(self): return self._ica_obj.exclude
@property
def stage(self): return self._stage
@stage.setter
def stage(self,v):
self._stage=v
@property
def path(self): return self._raw_path
@path.setter
def path(self,v):
if not v: return
if jb.isPath(v):
self._raw_path = v
else:
logger.exception("!!! No such path: {}".format(v))
@property
def path_ica(self): return os.path.join(self.path,"ica")
@property
def path_ica_chops(self): return os.path.join(self.path_ica,"chops")
@property
def plot_dir(self): return os.path.join(self.path,self.cfg.plot_dir)
@property
def raw(self): return self._raw
@property
def raw_fname(self): return self._raw_fname
@raw_fname.setter
def raw_fname(self,v):
self._raw_fname = jb.isFile(v,path=self.path)
@property
def picks(self): return self._picks
@property
def CFG(self): return self._CFG
@property
def cfg(self): return self._CFG._data
def clear(self,objects=None):
if isinstance(objects,(list)):
while objects:
try:
o = objects.pop()
o.close()
del o
except:
pass
self.PreFilter.clear()
self.Chopper.clear()
self.ICAPerformance.clear()
self._clear()
def _clear(self):
self._start_time = time.time()
self._stage = None
self._path = None
self._path_ica = None
self._raw = None
self._raw_path = None
self._raw_fname = None
self._raw_isfiltered = False
self._ica_obj = None
self._picks = None
#self._chop_times = None
#self._chop_indices = None
self._filter_prefix = ""
self._filter_fname = ""
def _update_from_kwargs(self,**kwargs):
self._raw = kwargs.get("raw",self._raw)
self.path = kwargs.get("path",self._path)
self._stage = kwargs.get("stage",self.stage)
self.raw_fname = kwargs.get("raw_fname",self._raw_fname)
def _set_ecg_eog_annotations(self):
"""
finding ECG, EOG events in raw, setting events as anotations
"""
#--- find ECG in raw
self.ICAPerformance.ECG.find_events(raw=self.raw,**self.CFG.GetDataDict("ecg"))
#--- find EOG in raw
annotations = self.ICAPerformance.EOG.find_events(raw=self.raw,**self.CFG.GetDataDict("eog"))
self.raw.set_annotations(annotations)
def trunc_nd(self,n,d):
"""
https://stackoverflow.com/questions/8595973/truncate-to-three-decimals-in-python/8595991
"""
n = str(n)
return (n if not n.find('.') + 1 else n[:n.find('.') + d + 1])
def _initRawObj(self):
"""
load or get RAW obj
init & mkdir path tree <stage>/../ica/chops
init picks from RAW
init report HDF file name
"""
self._raw,self._raw_fname = jb.get_raw_obj(self.raw_fname,raw=self.raw)
self._raw_path = os.path.dirname(self._raw_fname)
if self.stage:
self._raw_path = os.join(self.stage,self._raw_path)
#---
mkpath(self.path_ica_chops,mode=0o770)
#--- get picks from raw
self._picks = jb.picks.meg_nobads(self._raw)
def _get_chop_name(self,raw,chop=None,extention="-ica.fif",postfix=None,fullpath=False):
"""
raw
chop = None
extention= "-ica.fif" [-raw.fif]
postfix = None [ar]
fullpath = True
if True: includes path in filename
Return:
-------
fname chop,fname orig
"""
fname = jb.get_raw_filename(raw)
fname,fextention = op.basename(fname).rsplit('-',1)
if fullpath:
if fname.startswith(self.path_ica_chops):
fchop = fname
else:
fchop = op.join(self.path_ica_chops,fname)
else:
fchop = os.path.basename(fname)
if postfix:
fchop +=","+postfix
try:
if len(chop):
if np.isnan(chop[1]):
fchop += ',{:04d}-{:04d}'.format(int(chop[0]),int(self.raw.times[-1]))
else:
fchop += ',{:04d}-{:04d}'.format(int(chop[0]),int(chop[1]))
except:
pass
if extention:
fchop+=extention
return fchop,fname
def _apply_fit(self,raw_chop=None,chop=None,idx=None):
"""
call to jumeg fit_ica
raw_chop = None
chop = None
ToDo
if not overwrite
if ICA file exist: load ICA
else calc ICA
:return:
ICA obj, ica-filename
"""
self._ica_obj = None
self._ics_found_svm = None
fname_ica,fname = self._get_chop_name(raw_chop,chop=None)
logger.info("start ICA FIT chop: {} / {}\n".format(idx + 1,self._chop_times.shape[0]) +
" --> chop id : {}\n".format(chop) +
" -> ica fname : {}\n".format(fname_ica) +
" -> ica chop path: {}\n".format(self.path_ica_chops) +
" -> raw filename : {}\n".format(fname))
#--- ck for ovewrite & ICA exist
load_from_disk = False
if not self.cfg.fit.overwrite:
load_from_disk = jb.isFile(fname_ica,path=self.path_ica_chops)
if load_from_disk:
self._ica_obj,fname_ica = jb.get_raw_obj(fname_ica,path=self.path_ica_chops)
logger.info("DONE LOADING ICA chop form disk: {}\n -> ica filename: {}".
format(chop,fname_ica))
else:
if self.useArtifactRejection:
with jumeg_logger.StreamLoggerSTD(label="ica fit"):
self._ica_obj = fit_ica(raw=raw_chop,picks=self.picks,reject=self.CFG.GetDataDict(key="reject"),
ecg_ch=self.cfg.ecg.ch_name,ecg_thresh=self.cfg.ecg.thresh,
flow_ecg=self.cfg.ecg.flow,fhigh_ecg=self.cfg.ecg.fhigh,
#---
eog_hor = self.cfg.eog.hor_ch,
eog_ver = self.cfg.eog.ver_ch,
flow_eog=self.cfg.eog.flow,fhigh_eog=self.cfg.eog.fhigh,
eog_thresh=self.cfg.eog.thresh,
#---
use_jumeg=self.cfg.ecg.use_jumeg,
random_state=self.cfg.random_state)
self._ica_obj.exclude = list( set( self._ica_obj.exclude ) )
if self.useSVM:
if not self._ica_obj:
logger.info('SVM start ICA FIT: init ICA object')
#--- !!! ToDo put parameter in CFG file
self._ica_obj = ICA(method='fastica',n_components=40,random_state=42,max_pca_components=None,max_iter=5000,verbose=False)
self._ica_obj.fit(raw_chop,picks=self.picks,decim=None,reject=self.CFG.GetDataDict(key="reject"),
verbose=True)
else:
logger.info('SVM ICA Obj start')
#--- !!! do_copy = True => resample
self._ica_obj,_ = self.SVM.run(raw=self.raw,ICA=self._ica_obj,picks=self.picks,do_crop=False,do_copy=True)
logger.info('DONE SVM ICA FIT: apply ICA.fit')
#--- save ica object
if self.cfg.fit.save and not load_from_disk:
logger.info("saving ICA chop: {}\n".format(idx + 1,self._chop_times.shape[0]) +
" -> ica filename : {}".format(fname_ica))
self._ica_obj.save(os.path.join(self.path_ica_chops,fname_ica))
logger.info("done ICA FIT for chop: {}\n".format(chop)+
" -> raw chop filename : {}\n".format(fname_ica)+
"-"*30+"\n"+
" -> ICs found JuMEG/MNE : {}\n".format(self.SVM.ICsMNE)+
" -> ICs found SVM : {}\n".format(self.SVM.ICsSVM) +
" -> ICs excluded : {}\n".format(self.ICs)+
"-"*30+"\n"+
" -> save ica fit : {}".format(self.cfg.fit.save)
)
return self._ica_obj,fname_ica
def apply_ica_artefact_rejection(self,raw,ICA,fname_raw= None,fname_clean=None,replace_pre_whitener=True,copy_raw=True,
reject=None):
"""
Applies ICA to the raw object. (ica transform)
Parameters
----------
raw : mne.io.Raw() (raw chop)
Raw object ICA is applied to
ica : ICA object
ICA object being applied d to the raw object
fname_raw : str | None
Path for saving the raw object
fname_clean : str | None
Path for saving the ICA cleaned raw object
reject: MNE reject dict
replace_pre_whitener : bool
If True, pre_whitener is replaced when applying ICA to
unfiltered data otherwise the original pre_whitener is used.
copy_raw: make a copy of raw
Returns
-------
raw_clean : mne.io.Raw()
Raw object after ICA cleaning
"""
logger.info("Start ICA Transform => call <apply_ica_replace_mean_std>")
if copy_raw:
_raw = raw.copy()
else:
_raw = raw
raw_clean = None
ica = ICA.copy() # ToDo exclude copy
with jumeg_logger.StreamLoggerSTD(label="ica fit"):
raw_clean = apply_ica_replace_mean_std(_raw,ica,picks=self.picks,
reject=reject,exclude=ica.exclude,n_pca_components=None)
return raw_clean
def concat_and_save(self,raws,fname=None,save=False,annotations=None):
"""
concat a list of raw obj
call to mne.concatenate_raw
:param raws:
:param save: save concat raw
:return:
concat raw obj
"""
if raws:
raw_concat = mne.concatenate_raws(raws)
while raws:
raws.pop().close()
if fname:
jb.set_raw_filename(raw_concat,fname)
if save:
if not fname.startswith(self.path):
fname = os.path.join(self.path,fname)
jb.apply_save_mne_data(raw_concat,fname=fname)
if annotations:
raw_concat.set_annotations(annotations)
return raw_concat
def _update_report(self,data):
"""
:param fimages:
:return:
"""
#--- update report config
CFG = jCFG()
report_config = os.path.join(self.plot_dir,os.path.basename(self.raw_fname).rsplit("_",1)[0] + "-report.yaml")
d = None
if not CFG.load_cfg(fname=report_config):
d = { "ica":data }
else:
CFG.config["ica"] = data
CFG.save_cfg(fname=report_config,data=d)
def _apply(self,raw=None,ICAs=None,run_transform=False,save_ica=False,save_chops=False,save_chops_clean=False,save_clean=True):
"""
:param raw : raw filtered or unfilterd
:param run_transform : self.cfg.transform.run or self.cfg.transform.filtered.run
:param ICAs : list of ICA objs if None calc ICA fit
:param save_ica : save ICA obj
:param save_chops : self.cfg.transform.unfiltered.save_chop or self.cfg.transform.filtered.save_chop
:param save_chops_clean: self.cfg.transform.unfiltered.save_chop_clean or self.cfg.transform.filtered.save_chop_clean
:param save_clean : self.cfg.transform.filtered.save or self.cfg.transform.unfiltered.save
:return:
raw_clean,ICA_objs
ICAs obj list to transform with unfilterd data if self.PreFilter.isFiltered
titles
images as np.arry
"""
raw_clean = None
ICA_objs = []
raw_chops_clean_list = []
fimages = []
for idx in range(self.Chopper.n_chops):
chop = self.Chopper.chops[idx]
logger.info("Start ICA FIT & Transform chop: {} / {}\n".format(idx + 1,self.Chopper.n_chops))
#--- chop raw
raw_chop = self.Chopper.copy_crop_and_chop(raw,chop)
fname_chop,fname_raw = self._get_chop_name(raw_chop,chop=chop,extention="-raw.fif")
jb.set_raw_filename(raw_chop,fname_chop)
#--- ICA fit chop
if ICAs:
ICA = ICAs[idx]
else:
ICA,fname_ica = self._apply_fit(raw_chop=raw_chop,chop=chop,idx=idx)
ICA_objs.append(ICA)
fname_chop,_ = self._get_chop_name(raw_chop,extention="-raw.fif")
fname_chop = os.path.join(self.path_ica_chops,fname_chop)
if save_chops:
raw_chop.save(fname_chop,overwrite=True)
#--- ICA Transform chop
if run_transform:
fout = jb.get_raw_filename(raw_chop)
raw_chops_clean_list.append(self.apply_ica_artefact_rejection(raw_chop,ICA,reject=self.CFG.GetDataDict(key="reject")))
#--- plot performance
txt = "ICs JuMEG/MNE: "
if self.useSVM:
if self.SVM.ICsMNE:
txt+= ",".join( [str(i) for i in self.SVM.ICsMNE ] )
txt+= " SVM: {}".format(self.SVM.ICsSVM)
else:
txt+= ",".join( [str(i) for i in self._ica_obj.exclude ] )
# logger.info("raw chop:\n {}".format(raw_chop.annotations))
self.ICAPerformance.plot(raw=raw_chop,raw_clean=raw_chops_clean_list[-1],verbose=True,text=txt,
plot_path=self.plot_dir,
fout=fout.rsplit("-",1)[0] + "-ar")
fimages.append( self.ICAPerformance.Plot.fout )
if save_chops_clean:
fname_clean,_ = self._get_chop_name(raw_chop,extention="-raw.fif",postfix="ar")
fname_clean = os.path.join(self.path_ica_chops,fname_clean)
raw_chops_clean_list[-1].save(fname_clean,overwrite=True)
logger.info("done ICA FIT & transform chop: {} / {}\n".format(idx + 1,self._chop_times.shape[0]))
#--- concat & save raw chops to raw_clean
if raw_chops_clean_list:
fname_clean = fname_raw.replace("-raw.fif",",ar-raw.fif")
if not fname_clean.endswith(",ar-raw.fif"):
fname_clean += ",ar-raw.fif"
raw_clean = self.concat_and_save(raw_chops_clean_list,
fname = fname_clean,
annotations = raw.annotations,
save = save_clean)
del( raw_chops_clean_list )
return raw_clean,ICA_objs,fimages
#==== MAIN function
def run(self,**kwargs):
"""
:param kwargs:
:return:
raw_unfiltered_clean,raw_filtered_clean
"""
self._clear()
self._update_from_kwargs(**kwargs)
#--- load config
kwargs["useStruct"] = True
self._CFG.update(**kwargs )
self.useSVM = self.cfg.fit.use_svm
self.useArtifactRejection = self.cfg.fit.use_artifact_rejection
#--- init or load raw
self._initRawObj()
#--- find & store ECG/EOG events in raw.annotations
self._set_ecg_eog_annotations()
#--- chop times
self.Chopper.update(raw=self.raw,length=self.cfg.chops.length,
description=self.cfg.chops.description,time_window_sec=self.cfg.chops.time_window,
show=self.cfg.chops.show,verbose=self.verbose,debug=self.debug)
msg = [
"Apply ICA => FIT & Transform",
" -> filename : {}".format(self._raw_fname),
" -> ica chop path : {}".format(self.path_ica_chops),
"-" * 40,
" -> chops [sec] : {}".format(self.Chopper.chops_as_string() ),
" -> chops [indices]: {}".format(self.Chopper.indices_as_string() ),
"-" * 40
]
#--- apply pre-filter
if self.cfg.pre_filter.run:
self.PreFilter.apply(
flow = self.cfg.pre_filter.flow,
fhigh = self.cfg.pre_filter.fhigh,
save = self.cfg.pre_filter.save,
overwrite = self.cfg.pre_filter.overwrite,
raw = self.raw.copy(),
picks = self.picks,
annotations = self.raw.annotations.copy()
)
msg = self.PreFilter.GetInfo(msg=msg)
else:
self.PreFilter.clear()
logger.info("\n".join(msg) )
ICA_objs = None
raw_filtered_clean = None
raw_unfiltered_clean = None
fimages_filtered = []
fimages_unfiltered = None
#--- apply raw-filter ica-fit,transform, save
if self.PreFilter.isFiltered:
raw_filtered_clean,ICA_objs,fimages_filtered = self._apply(raw = self.PreFilter.raw,
run_transform = self.cfg.transform.run and self.cfg.transform.filtered.run,
save_chops = self.cfg.transform.filtered.save_chop,
save_chops_clean = self.cfg.transform.filtered.save_chop_clean,
save_clean = self.cfg.transform.filtered.save)
self.PreFilter.raw.close()
#---apply transform for unfilterd data update data-mean
raw_unfiltered_clean, _ ,fimages_unfiltered = self._apply(raw = self.raw,
ICAs = ICA_objs,
run_transform = self.cfg.transform.run and self.cfg.transform.unfiltered.run,
save_chops = self.cfg.transform.unfiltered.save_chop,
save_chops_clean = self.cfg.transform.unfiltered.save_chop_clean,
save_clean = self.cfg.transform.unfiltered.save)
logger.info("DONE ICA FIT & Transpose\n"+
" -> filename : {}\n".format( jb.get_raw_filename(raw_unfiltered_clean) )+
" -> time to process :{}".format( datetime.timedelta(seconds= time.time() - self._start_time ) ))
#--- plot
data = { "ICA-FI-AR":None,"ICA-AR":None }
if self.PreFilter.isFiltered:
self.ICAPerformance.plot(raw=self.PreFilter.raw,raw_clean=raw_filtered_clean,plot_path = self.plot_dir,
text=None,fout = self.PreFilter.fname.rsplit("-",1)[0] + "-ar")
data["ICA-FI-AR"] = [self.ICAPerformance.Plot.fout,*fimages_filtered]
if raw_unfiltered_clean:
self.ICAPerformance.plot(raw=self.raw,raw_clean=raw_unfiltered_clean,verbose=True,text=None,
plot_path=self.plot_dir,fout=self.raw_fname.rsplit("-",1)[0] + "-ar")
data["ICA-AR"] = [self.ICAPerformance.Plot.fout,*fimages_unfiltered]
#-- check data size orig and transformed
ds_in = self._raw._data.shape
msg = ["Check data size"," --> raw orig: {}".format(ds_in)]
ck_size = False
ck_size_u = False
if raw_unfiltered_clean:
ds_uout = raw_unfiltered_clean._data.shape
msg.append(" --> raw-ar unfiltered: {}".format(ds_uout))
if ( ds_in[1] == ds_uout[1] ):
ck_size_u = True
else:
ck_size_u = True
if raw_filtered_clean:
ds_out = raw_filtered_clean._data.shape
msg.append(" --> raw-ar filtered: {}".format(ds_out))
if ( ds_in[1] == ds_out[1] ):
ck_size = True
else:
ck_size = True
if (ck_size and ck_size_u):
logger.info( "\n".join(msg) )
else:
raise ValueError("ERROR chop crop data\n".join(msg))
self._update_report(data)
self.clear(objects=ICA_objs)
return raw_unfiltered_clean,raw_filtered_clean
def test1():
#--- init/update logger
jumeg_logger.setup_script_logging(logger=logger)
stage = "$JUMEG_PATH_LOCAL_DATA/exp/MEG94T/mne"
fcfg = os.path.join(stage,"meg94t_config01.yaml")
fpath = "206720/MEG94T0T2/130820_1335/2/"
path = os.path.join(stage,fpath)
raw_fname = "206720_MEG94T0T2_130820_1335_2_c,rfDC,meeg,nr,bcc,int-raw.fif"
stage = "$JUMEG_PATH_LOCAL_DATA/exp/QUATERS/mne"
fcfg = os.path.join(stage,"jumeg_config.yaml") #""quaters_config01.yaml")
fpath = "210857/QUATERS01/191210_1325/1"
path = os.path.join(stage,fpath)
raw_fname = "210857_QUATERS01_191210_1325_1_c,rfDC,meeg,nr,bcc,int-raw.fif"
#stage = "${JUMEG_TEST_DATA}/mne"
#fcfg = "intext_config01.yaml"
raw = None
#fpath = "211855/INTEXT01/190329_1004/6"
#path = os.path.join(stage,fpath)
# raw_fname = "211855_INTEXT01_190329_1004_6_c,rfDC,meeg,nr,bcc-raw.fif"
#raw_fname = "211855_INTEXT01_190329_1004_6_c,rfDC,meeg,nr,bcc,int-raw.fif"
logger.info("JuMEG Apply ICA mne-version: {}".format(mne.__version__))
#--
jICA = JuMEG_PIPELINES_ICA()
raw_unfiltered_clean,raw_filtered_clean = jICA.run(path=path,raw_fname=raw_fname,config=fcfg,key="ica")
#raw_filtered_clean.plot(block=True)
if __name__ == "__main__":
test1()
'''
def _calc_chop_times(self):
"""
calc chop times & indices
Returns
self._chop_times,self._chop_indices
-------
TYPE
DESCRIPTION.
"""
logger.debug("Start calc Chop Times: length: {} raw time: {}".format(self.cfg.chops.length,self.raw.times[-1]))
self._chop_times = None
self._chop_indices = None
#--- warn if times less than chop length
if self.raw.times[-1] <= self.cfg.chops.length:
logger.warning("<Raw Times> : {} smaler than <Chop Times> : {}\n\n".format(self.raw.times[-1],self._chop_times))
self._chop_times,self._chop_indices = get_chop_times_indices(self.raw.times,chop_length=self.cfg.chops.length)
if self.debug:
logger.debug("Chop Times:\n -> {}\n --> Indices: -> {}".format(self._chop_times,self._chop_indices))
return self._chop_times,self._chop_indices
def _copy_crop_and_chop(self,raw,chop):
"""
copy raw
crop
:param raw:
:param chop:
:return:
"""
if self._chop_times.shape[0] > 1:
raw_crop = raw.copy().crop(tmin=chop[0],tmax=chop[1])
if self.debug:
logger.debug("RAW Crop Annotation : {}\n -> tmin: {} tmax: {}\n {}\n".format(jb.get_raw_filename(raw),chop[0],chop[1],raw_crop.annotations))
return raw_crop
return raw
'''
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import itertools
import logging
from taskflow.patterns import graph_flow
from pumphouse import task
from pumphouse import events
from pumphouse import exceptions
from pumphouse.tasks import utils as task_utils
LOG = logging.getLogger(__name__)
class LogReporter(task_utils.UploadReporter):
def report(self, absolute):
cloud_name, src_image, dst_image = self.context
LOG.info("Image %r uploaded on %3.2f%%",
dst_image["id"], absolute * 100)
events.emit("update", {
"id": dst_image["id"],
"type": "image",
"cloud": cloud_name,
"action": None,
"progress": round(absolute * 100),
"data": dict(dst_image),
}, namespace="/events")
class EnsureImage(task.BaseCloudsTask):
def execute(self, image_id, tenant_info, kernel_info, ramdisk_info):
if tenant_info:
tenant = self.dst_cloud.keystone.tenants.get(tenant_info["id"])
dst_cloud = self.dst_cloud.restrict(tenant_name=tenant.name)
else:
dst_cloud = self.dst_cloud
image_info = self.src_cloud.glance.images.get(image_id)
images = self.dst_cloud.glance.images.list(filters={
# FIXME(akscram): Not all images have the checksum property.
"checksum": image_info["checksum"],
"name": image_info["name"],
})
try:
# XXX(akscram): More then one images can be here. Now we
# just ignore this fact.
image = next(iter(images))
except StopIteration:
parameters = {
"disk_format": image_info["disk_format"],
"container_format": image_info["container_format"],
"visibility": image_info["visibility"],
"min_ram": image_info["min_ram"],
"min_disk": image_info["min_disk"],
"name": image_info["name"],
"protected": image_info["protected"],
}
if kernel_info:
parameters["kernel_id"] = kernel_info["id"]
if ramdisk_info:
parameters["ramdisk_id"] = ramdisk_info["id"]
# TODO(akscram): Some image can contain additional
# parameters which are skipped now.
image = dst_cloud.glance.images.create(**parameters)
self.created_event(image)
data = self.src_cloud.glance.images.data(image_info["id"])
img_data = task_utils.FileProxy(data, image_info["size"],
LogReporter((dst_cloud.name,
image_info,
image)))
dst_cloud.glance.images.upload(image["id"], img_data)
image = dst_cloud.glance.images.get(image["id"])
self.uploaded_event(image)
return dict(image)
def created_event(self, image):
LOG.info("Image created: %s", image["id"])
events.emit("create", {
"id": image["id"],
"type": "image",
"cloud": self.dst_cloud.name,
"action": "uploading",
"data": dict(image),
}, namespace="/events")
def uploaded_event(self, image):
LOG.info("Image uploaded: %s", image["id"])
events.emit("update", {
"id": image["id"],
"type": "image",
"cloud": self.dst_cloud.name,
"progress": None,
"action": None,
"data": dict(image),
}, namespace="/events")
class EnsureImageWithKernel(EnsureImage):
def execute(self, image_id, user_info, kernel_info):
return super(EnsureSingleImage, self).execute(image_id, user_info,
kernel_info, None)
class EnsureImageWithRamdisk(EnsureImage):
def execute(self, image_id, user_info, ramdisk_info):
return super(EnsureSingleImage, self).execute(image_id, user_info,
None, ramdisk_info)
class EnsureSingleImage(EnsureImage):
def execute(self, image_id, user_info):
return super(EnsureSingleImage, self).execute(image_id, user_info,
None, None)
class DeleteImage(task.BaseCloudTask):
def execute(self, image_info, **requires):
image_id = image_info["id"]
try:
self.cloud.glance.images.delete(image_id)
except exceptions.glance_excs.BadRequest as exc:
LOG.exception("Error deleting: %s", str(image_info))
raise exc
else:
LOG.info("Deleted: %s", str(image_info))
self.delete_event(image_info)
def delete_event(self, image_info):
events.emit("delete", {
"cloud": self.cloud.name,
"type": "image",
"id": image_info["id"]
}, namespace="/events")
class DeleteImageByID(DeleteImage):
def execute(self, image_id, **requires):
image = self.cloud.glance.images.get(image_id)
super(DeleteImageByID, self).execute(dict(image))
def migrate_image_task(context, task_class, image_id, tenant_id, *rebind):
image_binding = "image-{}".format(image_id)
image_ensure = "image-{}-ensure".format(image_id)
tenant_ensure = "tenant-{}-ensure".format(tenant_id)
rebind = itertools.chain((image_binding, tenant_ensure), *rebind)
task = task_class(context.src_cloud, context.dst_cloud,
name=image_ensure,
provides=image_ensure,
rebind=list(rebind))
context.store[image_binding] = image_id
return task
# XXX(akscram): We should to simplify this function. The cascade of
# if-statements looks ugly.
def migrate_image(context, image_id):
image = context.src_cloud.glance.images.get(image_id)
tenant_id = None
if image["visibility"] == "private":
tenant_id = image.get("owner")
else:
tenant_ensure = "tenant-{}-ensure".format(tenant_id)
context.store[tenant_ensure] = None
if image["container_format"] == "ami" and (hasattr(image, "kernel_id") or
hasattr(image, "ramdisk_id")):
flow = graph_flow.Flow("migrate-image-{}".format(image_id))
if hasattr(image, "kernel_id") and hasattr(image, "ramdisk_id"):
kernel = migrate_image_task(context, EnsureSingleImage,
image["kernel_id"], tenant_id)
ramdisk = migrate_image_task(context, EnsureSingleImage,
image["ramdisk_id"], tenant_id)
image = migrate_image_task(context, EnsureImage, image_id,
tenant_id, kernel.provides,
ramdisk.provides)
flow.add(kernel, ramdisk, image)
elif hasattr(image, "kernel_id"):
kernel = migrate_image_task(context, EnsureSingleImage,
image["kernel_id"], tenant_id)
image = migrate_image_task(context, EnsureImageWithKernel,
image_id, tenant_id, kernel.provides)
flow.add(kernel, image)
else:
ramdisk = migrate_image_task(context, EnsureSingleImage,
image["ramdisk_id"], tenant_id)
image = migrate_image_task(context, EnsureImageWithRamdisk,
image_id, tenant_id, ramdisk.provides)
flow.add(ramdisk, image)
else:
flow = migrate_image_task(context, EnsureSingleImage,
image_id, tenant_id)
return flow
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""training pipeline for GIFT."""
import functools
import time
from absl import logging
from flax import jax_utils
from flax.deprecated import nn
import jax
import jax.numpy as jnp
from gift.pipelines import multi_env_manifold_mixup
from gift.pipelines import pipeline_utils
from gift.tasks import style_transfer_ops
from gift.train_lib import optimizers
from gift.utils import tensor_util
class GradualDomainAdaptationWithMixup(
multi_env_manifold_mixup.MultiEnvManifoldMixup):
"""Training pipeline for gradual adaptation with manifold mixup."""
_INTERPOLATION_METHODS = {
'plain_convex_combination': tensor_util.convex_interpolate,
'wasserstein': style_transfer_ops.wasserstein,
'wct': style_transfer_ops.wct
}
def __init__(self, model_cls, task, hparams, experiment_dir,
tb_summary_writer, rng):
super().__init__(model_cls, task, hparams, experiment_dir,
tb_summary_writer, rng)
self.self_training_iterations = hparams.get('self_training_iterations', 1)
self.iter_total_steps = self.total_steps // self.self_training_iterations
self.hparams.keep_env_ckpts = self.hparams.get('keep_env_ckpts', False)
self.includes_self_supervision = True
logging.info('self_training_iterations %d', self.self_training_iterations)
# Set train env (On this environment we use ground truth labels to train
# the model).
self.labeled_envs = hparams.get('labeled_environments',
None) or task.dataset.train_environments
self.unlabeled_envs = hparams.get('unlabeled_environments', [])
interpolation_method = self.hparams.get('interpolation_method',
'plain_convex_combination')
intra_interpolation_method = self.hparams.get('intra_interpolation_method',
'plain_convex_combination')
self.setup_interpolation_method(interpolation_method,
intra_interpolation_method)
def setup_interpolation_method(self, interpolation_method,
intra_interpolation_method):
"""Define vmapped interpolation functions."""
self.interpolate_fn = jax.vmap(
functools.partial(
pipeline_utils.interpolate,
interpolation_method=self
._INTERPOLATION_METHODS[interpolation_method]),
in_axes=(0, 0, 0, 0, None, None, None, None))
self.intra_interpolate_fn = jax.vmap(
functools.partial(
pipeline_utils.interpolate,
interpolation_method=self
._INTERPOLATION_METHODS[intra_interpolation_method]),
in_axes=(0, 0, 0, 0, None, None, None, None))
def setup_pmapped_tain_and_eval_steps(self):
eval_env_ids = list(
map(int, self.task.dataset.data_iters.validation.keys()))
self.p_eval_step = functools.partial(
self.eval_step, all_env_ids=eval_env_ids)
self.pmapped_eval_step = jax.pmap(
self.p_eval_step,
axis_name='batch',
in_axes=(0, 0),
static_broadcasted_argnums=(2,),
donate_argnums=(1))
self.pmapped_forward_pass = jax.pmap(
self.forward_pass, axis_name='batch', in_axes=(0, 0, 0, 0))
def set_pseudo_label_generator(self):
"""Sets the pseudo label generator function."""
logit_transformer = functools.partial(
pipeline_utils.logit_transformer,
temp=self.hparams.get('label_temp') or 1.0,
confidence_quantile_threshold=self.hparams.get(
'confidence_quantile_threshold', 0.1),
self_supervised_label_transformation=self.hparams.get(
'self_supervised_label_transformation', 'sharp'))
pseudo_label_generator = functools.partial(
pipeline_utils.pseudo_label_generator,
pseudo_labels_transformer_fn=logit_transformer,
train=self.hparams.get('pseudo_labels_train_mode') or False)
self.pseudo_label_generator = jax.pmap(pseudo_label_generator)
def maybe_reset_train_state(self):
optimizer = jax_utils.unreplicate(self.train_state.optimizer)
if self.hparams.get('reinitilize_params_at_each_step', False):
del optimizer.target
(flax_model, _, _) = pipeline_utils.create_flax_module(
optimizer.target.module, self.task.dataset.meta_data['input_shape'],
self.hparams, nn.make_rng(),
self.task.dataset.meta_data.get('input_dtype', jnp.float32))
else:
flax_model = optimizer.target
# Reset optimizer
if self.hparams.get('reinitialize_optimizer_at_each_step', False):
optimizer = optimizers.get_optimizer(self.hparams).create(flax_model)
else:
optimizer = optimizer.replace(target=flax_model)
optimizer = jax_utils.replicate(optimizer)
self.train_state = self.train_state.replace(optimizer=optimizer)
def training_loss_fn(self, flax_model, train_state, teacher_train_state,
batch, unlabeled_batch, dropout_rng, env_ids,
unlabeled_env_ids, sampled_layer):
"""Runs forward pass and computes loss.
Args:
flax_model: A flax module.
train_state: TrainState; The state of training including the current
global_step, model_state, rng, and optimizer.
teacher_train_state: TrainState; The state of training for the teacher
(including the current global_step, model_state, rng, and optimizer).
batch: list(dict); A batch of data for each environment in the labeld set.
unlabeled_batch: list(dict); A batch of data for each environment in the
unlabeld set.
dropout_rng: FLAX PRNG key.
env_ids: list(int); List of labeled training environments ids.
unlabeled_env_ids: list(int); List of unlabeled environments ids.
sampled_layer: str; Name of the layer on which mixup is applied.
Returns:
loss, new_module_state and computed logits for each batch.
"""
dropout_rng, new_rng = jax.random.split(dropout_rng)
with nn.stochastic(dropout_rng):
# Run student forward pass on the labeled envs.
(all_std_env_reps, std_env_logits, _,
train_state) = self.stateful_forward_pass(flax_model, train_state, batch)
# Run teacher forward pass on the labeled envs.
(labeled_tchr_env_logits, _,
_) = self.stateless_forward_pass(teacher_train_state.optimizer.target,
teacher_train_state, batch)
# Run teacher forward pass on the unlabeled envs.
(unlabeled_tchr_env_logits, all_tchr_unlabeled_env_reps,
_) = self.stateless_forward_pass(teacher_train_state.optimizer.target,
teacher_train_state, unlabeled_batch)
# Replace labels with predicted labels from the teacher model.
for ub_id in range(len(unlabeled_env_ids)):
unlabeled_batch[ub_id]['label'] = jnp.argmax(
unlabeled_tchr_env_logits[ub_id], axis=-1)
# Get sampled layer for interpolations:
std_sampled_reps = all_std_env_reps[sampled_layer]
sampled_unlabeled_reps = all_tchr_unlabeled_env_reps[sampled_layer]
interpolation_rng, new_rng = jax.random.split(new_rng)
with nn.stochastic(interpolation_rng):
(interpolated_batches, interpolated_logits, _,
train_state) = self.maybe_inter_env_interpolation(
batch, env_ids, flax_model, self.intra_interpolate_fn, sampled_layer,
std_sampled_reps, std_sampled_reps, train_state)
(same_env_interpolated_batches, same_env_interpolated_logits, _,
train_state) = self.maybe_intra_env_interpolation(
batch, env_ids, flax_model, self.intra_interpolate_fn, sampled_layer,
std_sampled_reps, train_state)
(unlabeled_interpolated_batches, unlabeled_interpolated_logits,
unlabeled_mixup_lambdas, unlabeled_mixup_alpha, unlabeled_mixup_beta,
train_state) = self.maybe_gradual_interpolation(
batch, unlabeled_batch, env_ids, unlabeled_env_ids, flax_model,
self.interpolate_fn, sampled_layer, std_sampled_reps,
sampled_unlabeled_reps, std_sampled_reps, sampled_unlabeled_reps,
labeled_tchr_env_logits, unlabeled_tchr_env_logits, train_state,
teacher_train_state)
# Compute the total loss (inside nn.stochastic):
# env_reps and env_ids are set to None to avoid computing a loss for
# domain mapping (the mapping model is not trained and not used in
# computing the loss).
ground_truth_factor_params = pipeline_utils.get_weight_param(
self.hparams, 'ground_truth_factor', 1.0)
ground_truth_factor = pipeline_utils.scheduler(
train_state.global_step, ground_truth_factor_params)
ground_truth_loss = self.task.loss_function(std_env_logits, None, batch,
None, flax_model.params,
train_state.global_step)
loss = ground_truth_loss * ground_truth_factor
# Add the loss for cross environment interpolated states:
if len(env_ids) > 1 and self.hparams.get('inter_env_interpolation', True):
inter_mixup_factor_params = pipeline_utils.get_weight_param(
self.hparams, 'inter_mixup_factor', 1.0)
inter_mixup_factor = pipeline_utils.scheduler(
train_state.global_step, inter_mixup_factor_params)
loss += self.task.loss_function(
interpolated_logits, None, interpolated_batches, None, None,
train_state.global_step) * inter_mixup_factor
# Add the loss for same environment interpolated states:
if self.hparams.get('intra_env_interpolation', True):
intra_mixup_factor_params = pipeline_utils.get_weight_param(
self.hparams, 'intra_mixup_factor', 1.0)
intra_mixup_factor = pipeline_utils.scheduler(
train_state.global_step, intra_mixup_factor_params)
loss += self.task.loss_function(
same_env_interpolated_logits, None, same_env_interpolated_batches,
None, None, train_state.global_step) * intra_mixup_factor
# Add the loss for gradual environment interpolations toward unlabeled
# target environment(s):
unlabeled_mixup_factor = 0
unlabeled_loss = 0
if self.hparams.get('unlabeled_interpolation', True):
unlabeled_mixup_factor_params = pipeline_utils.get_weight_param(
self.hparams, 'unlabeled_mixup_factor', 1.0)
unlabeled_mixup_factor = pipeline_utils.scheduler(
train_state.global_step, unlabeled_mixup_factor_params)
unlabeled_loss = self.task.loss_function(unlabeled_interpolated_logits,
None,
unlabeled_interpolated_batches,
None, None,
train_state.global_step)
loss += unlabeled_loss * unlabeled_mixup_factor
logs = {}
logs['unlabeled_mixup_lambda'] = unlabeled_mixup_lambdas
logs['unlabeled_mixup_alpha'] = unlabeled_mixup_alpha
logs['unlabeled_mixup_beta'] = unlabeled_mixup_beta
logs['unlabeled_mixup_factor'] = unlabeled_mixup_factor
logs['train_loss'] = ground_truth_loss
logs['unlabeled_loss'] = unlabeled_loss
return loss, (train_state.model_state, std_env_logits, logs)
def stateless_forward_pass(self,
flax_model,
train_state,
batch,
input_key='input'):
(all_env_logits, all_env_reps,
selected_env_reps, _) = self.forward_pass(flax_model, train_state, batch,
nn.make_rng(), input_key)
return all_env_logits, all_env_reps, selected_env_reps
def train(self):
"""Training loop."""
master = jax.host_id() == 0
global_start_step = self.start_step
# current_step keeps track of global (cumulative) number of steps the model
# is trained on all of the environments so that we know the starting
# step for the next environments.
current_step = 0
eval_env_ids = list(
map(int, self.task.dataset.data_iters.validation.keys()))
labeled_envs_ids = [
self.task.dataset.env2id(env) for env in self.labeled_envs
]
unlabeled_envs_ids = [
self.task.dataset.env2id(env) for env in self.unlabeled_envs
]
labeled_env_dict = {
str(env_id): self.task.dataset.data_iters.train[str(env_id)]
for env_id in labeled_envs_ids
}
unlabeled_env_dict = {
str(env_id): self.task.dataset.data_iters.train[str(env_id)]
for env_id in unlabeled_envs_ids
}
labeled_env_ids, labeled_iters = list(zip(*labeled_env_dict.items()))
labeled_env_ids = list(map(int, labeled_env_ids))
unlabeled_env_ids, unlabeled_iters = list(zip(*unlabeled_env_dict.items()))
unlabeled_env_ids = list(map(int, unlabeled_env_ids))
self.p_train_step = functools.partial(
self.train_step,
env_ids=labeled_env_ids,
unlabeled_env_ids=unlabeled_env_ids)
self.pmapped_train_step = jax.pmap(
self.p_train_step,
axis_name='batch',
in_axes=(0, 0, 0, 0),
static_broadcasted_argnums=(4),
donate_argnums=(2, 3))
# Prepare arguments for layer sampling:
sample_batch = self.get_next_batch(labeled_iters)
_, all_env_reps, _, _ = self.pmapped_forward_pass(
self.train_state.optimizer.target, self.train_state, sample_batch,
self.train_state.rng)
layer_keys, mixup_layers = pipeline_utils.get_sample_layer_params(
self.hparams, all_env_reps)
self.teacher_train_state = self.train_state
train_summary, eval_summary = None, None
for _ in range(self.self_training_iterations):
# Set start and end step for the current environment.
iter_start_step = current_step
iter_end_step = iter_start_step + self.iter_total_steps
if global_start_step < iter_end_step:
# Resume or start training on this environment if we haven't already
# trained on it or stopped in the middle of it.
# Update env_start_step if the preemption has occured in the middle of
# training on this environments.
iter_start_step += jnp.maximum(0, global_start_step - iter_start_step)
train_summary, eval_summary = self._train_loop(
eval_env_ids, iter_end_step, iter_start_step, labeled_env_ids,
labeled_iters, layer_keys, master, mixup_layers, unlabeled_env_ids,
unlabeled_iters)
current_step += self.iter_total_steps
# Sync and save
if self.hparams.keep_env_ckpts:
self.train_state = self.checkpoint(self.train_state)
# Reset teacher to use the newly trained model.
self.teacher_train_state = self.train_state
self.maybe_reset_train_state()
# wait until computations are done before exiting (for timing!)
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# return the train and eval summary after last step for regresesion testing
return train_summary, eval_summary
def _train_loop(self, eval_env_ids, iter_end_step, iter_start_step,
labeled_env_ids, labeled_iters, layer_keys, master,
mixup_layers, unlabeled_env_ids, unlabeled_iters):
train_metrics = []
train_summary, eval_summary = None, None
tick = time.time()
for step in range(iter_start_step + 1, iter_end_step + 1):
labeled_batches = self.get_next_batch(labeled_iters)
unlabeled_batches = self.get_next_batch(unlabeled_iters)
sampled_layer = layer_keys[mixup_layers[step % len(mixup_layers)]]
self.train_state, t_metrics = self.pmapped_train_step(
self.train_state, self.teacher_train_state, labeled_batches,
unlabeled_batches, sampled_layer)
t_metrics = jax.tree_map(lambda x: x[0], t_metrics)
train_metrics.append(t_metrics)
(eval_summary, train_metrics, train_summary,
tick) = self.maybe_eval_and_log(eval_env_ids, eval_summary, master, step,
tick, train_metrics, train_summary)
# Sync and save
self.checkpoint(self.train_state, step)
return eval_summary, train_summary
def get_env_total_steps(self, labeled_env):
env_n_exmpls = self.task.dataset.splits['train'][labeled_env].num_examples
steps_per_epoch = env_n_exmpls // self.hparams.batch_size
env_total_steps = (steps_per_epoch * self.hparams.num_training_epochs)
return env_total_steps
def maybe_gradual_interpolation(
self, batch, unlabeled_batch, env_ids, unlabeled_env_ids, flax_model,
interpolate_fn, sampled_layer, selected_env_reps,
selected_unlabeled_env_reps, sampled_reps, sampled_unlabeled_reps, logits,
unlabled_logits, train_state, teacher_train_state):
# Compute alignment based on the selected reps.
aligned_pairs = self.task.get_bipartite_env_aligned_pairs_idx(
selected_env_reps, batch, env_ids, selected_unlabeled_env_reps,
unlabeled_batch, unlabeled_env_ids)
pair_keys, matching_matrix = zip(*aligned_pairs.items())
matching_matrix = jnp.array(matching_matrix)
# Convert pair keys to pair ids (indices in the env_ids list).
pair_ids = [(env_ids.index(int(x[0])), unlabeled_env_ids.index(int(x[1])))
for x in pair_keys]
# Get sampled layer activations and group them similar to env pairs.
paired_reps = jnp.array([(sampled_reps[envs[0]],
sampled_unlabeled_reps[envs[1]])
for envs in pair_ids])
# Set alpha and beta for sampling lambda:
beta_params = pipeline_utils.get_weight_param(self.hparams,
'unlabeled_beta', 1.0)
alpha_params = pipeline_utils.get_weight_param(self.hparams,
'unlabeled_alpha', 1.0)
step = train_state.global_step
beta = pipeline_utils.scheduler(step, beta_params)
alpha = pipeline_utils.scheduler(step, alpha_params)
if self.hparams.get('unlabeled_lambda_params', None):
lambda_params = pipeline_utils.get_weight_param(self.hparams,
'unlabeled_lambda', .0)
lmbda = pipeline_utils.scheduler(step, lambda_params)
else:
lmbda = -1
# Get interpolated reps for each en pair:
inter_reps, sample_lambdas = interpolate_fn(
jax.random.split(nn.make_rng(), len(paired_reps[:, 0])),
matching_matrix, paired_reps[:, 0], paired_reps[:, 1],
self.hparams.get('num_of_lambda_samples_for_inter_mixup',
1), alpha, beta, lmbda)
# Get interpolated batches for each env pair:
interpolated_batches = self.get_interpolated_batches(
batch, inter_reps, pair_ids, sample_lambdas,
self.hparams.get('interpolation_method', 'plain_convex_combination'))
if self.hparams.get('stop_gradient_for_interpolations', False):
interpolated_batches = jax.lax.stop_gradient(interpolated_batches)
if self.hparams.get('interpolated_labels'):
# Get logits for the interpolated states by interpoting pseudo labels on
# source and target.
if self.hparams.get('interpolation_method') == 'plain_convex_combination':
teacher_interpolated_logits = jax.vmap(tensor_util.convex_interpolate)(
logits, unlabled_logits, sample_lambdas)
else:
teacher_interpolated_logits = logits
else:
# Get logits for the interpolated states from the teacher.
teacher_interpolated_logits, _, _, _ = self.forward_pass(
teacher_train_state.optimizer.target, teacher_train_state,
interpolated_batches, nn.make_rng(), sampled_layer)
# Do we want to propagate the gradients to the teacher?
if self.hparams.get('stop_gradient_for_teacher', True):
teacher_interpolated_logits = jax.lax.stop_gradient(
teacher_interpolated_logits)
for i in range(len(interpolated_batches)):
(interpolated_batches[i]['label'],
interpolated_batches[i]['weights']) = pipeline_utils.logit_transformer(
logits=teacher_interpolated_logits[i],
temp=self.hparams.get('label_temp') or 1.0,
confidence_quantile_threshold=self.hparams.get(
'confidence_quantile_threshold', 0.1),
self_supervised_label_transformation=self.hparams.get(
'self_supervised_label_transformation', 'sharp'),
logit_indices=None)
# Compute logits for the interpolated states:
(_, interpolated_logits, _,
train_state) = self.stateful_forward_pass(flax_model, train_state,
interpolated_batches,
sampled_layer)
return (interpolated_batches, interpolated_logits, sample_lambdas, alpha,
beta, train_state)
def train_step(self, train_state, teacher_train_state, batch,
unlabeled_batches, sampled_layer, env_ids, unlabeled_env_ids):
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Args:
train_state: TrainState; The state of training including the current
global_step, model_state, rng, and optimizer.
teacher_train_state: TrainState; The state of training for the teacher
(including the current global_step, model_state, rng, and optimizer).
batch: list(dict); A batch of data for each environment in the labeld set.
unlabeled_batches: list(dict); A batch of data for each environment in the
unlabeld set.
sampled_layer: str; Name of the layer on which mixup is applied.
env_ids: list(int); List of labeled training environments ids.
unlabeled_env_ids: list(int); List of unlabeled environments ids.
Returns:
Updated state of training and calculated metrics.
"""
max_grad_norm = self.hparams.get('max_grad_norm', None)
new_rng, rng = jax.random.split(train_state.rng)
# bind the rng to the host/device we are on.
model_rng = pipeline_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to=['host', 'device'])
train_loss_fn = functools.partial(
self.training_loss_fn,
train_state=train_state,
teacher_train_state=teacher_train_state,
batch=batch,
unlabeled_batch=unlabeled_batches,
dropout_rng=model_rng,
env_ids=env_ids,
unlabeled_env_ids=unlabeled_env_ids,
sampled_layer=sampled_layer)
new_train_state, metrics_dict = self.compute_grads_and_update(
batch, env_ids, max_grad_norm, new_rng, train_loss_fn, train_state)
return new_train_state, metrics_dict
def get_learning_rate(self, step):
if self.hparams.get('restart_learning_rate'):
step = step % self.iter_total_steps
lr = self.learning_rate_fn(step)
return lr
|
|
#
# Class to create and store a street map.
# Author: James P. Biagioni ([email protected])
# Company: University of Illinois at Chicago
# Created: 6/6/11
#
import sqlite3
import pyximport; pyximport.install()
from pylibs import spatialfunclib
from pylibs import spatialfunclib_accel
from rtree import Rtree
# global parameters
intersection_size = 50.0 # meters
class Node:
id_counter = 1
def __init__(self, latitude, longitude, id=None, weight=0.0):
if id is not None:
Node.id_counter = max(Node.id_counter, id+1)
else:
id = Node.id_counter
Node.id_counter += 1
self.id = id
self.latitude = latitude
self.longitude = longitude
self.weight = weight
self.in_nodes = []
self.out_nodes = []
self.intersection = None
self.visited = False
def coords(self):
return (self.latitude,self.longitude)
def distance_to(self, lat, lon):
# return spatialfunclib.distance(self.latitude, self.longitude, lat, lon)
return spatialfunclib_accel.fast_distance(self.latitude, self.longitude, lat, lon)
class Edge:
id_counter = 1
def __init__(self, in_node, out_node, id=None, weight=0.0, segment=None):
if id is not None:
Edge.id_counter = max(Edge.id_counter, id+1)
else:
id = Edge.id_counter
Edge.id_counter += 1
self.id = id
self.in_node = in_node
self.out_node = out_node
self.weight = weight
self.segment = segment
self.in_edges = []
self.out_edges = []
self.visited = False
@property
def length(self):
return spatialfunclib.distance(self.in_node.latitude, self.in_node.longitude, self.out_node.latitude, self.out_node.longitude)
@property
def bearing(self):
return spatialfunclib.path_bearing(self.in_node.latitude, self.in_node.longitude, self.out_node.latitude, self.out_node.longitude)
def point_at_meters_along(self, meters):
return spatialfunclib.point_along_line(self.in_node.latitude, self.in_node.longitude, self.out_node.latitude, self.out_node.longitude, meters/self.length)
class Segment:
id_counter = 1
def __init__(self, id=None, edges=[]):
if id is not None:
Segment.id_counter = max(Segment.id_counter, id+1)
else:
id = Segment.id_counter
Segment.id_counter += 1
self.id = id
self.edges = edges
@property
def head_edge(self):
return self.edges[0]
@property
def length(self):
sum = 0.0
for edge in self.edges:
sum+=edge.length
return sum
@property
def tail_edge(self):
return self.edges[-1]
# if you get Nones in this list, that's because you didn't set the segment in the Edge
def out_segments(self):
return [x.segment for x in self.edges[-1].out_edges]
# if you get Nones in this list, that's because you didn't set the segment in the Edge
def in_segments(self):
return [x.segment for x in self.edges[0].in_edges]
class Intersection:
def __init__(self, id, nodes):
self.id = id
self.nodes = nodes
(self.latitude, self.longitude) = self._find_mean_location(nodes)
def _find_mean_location(self, nodes):
# initialize location
latitude = 0.0
longitude = 0.0
# iterate through member nodes
for node in self.nodes:
# accumulate values from nodes
latitude += node.latitude
longitude += node.longitude
# set node's intersection attribute value
node.intersection = self
# average latitude and longitude values
latitude = (latitude / len(self.nodes))
longitude = (longitude / len(self.nodes))
# return location
return (latitude, longitude)
class StreetMap:
def __init__(self):
self.nodes = {} # indexed by node id
self.edges = {} # indexed by edge id
self.intersections = {} # indexed by node id
self.node_spatial_index = Rtree()
self.edge_spatial_index = Rtree()
self.intersection_spatial_index = Rtree()
self.edge_lookup_table = {} # indexed by (in_node,out_node)
self.edge_coords_lookup_table = {} # indexed by (in_node.coords, out_node.coords)
self.segments = {} # indexed by segment id
self.segment_lookup_table = {} # indexed by (head_edge.in_node, tail_edge.out_node)
def load_osmdb(self, osmdb_filename):
# connect to OSMDB
conn = sqlite3.connect(osmdb_filename)
# grab cursor
cur = conn.cursor()
# output that we are loading nodes
sys.stdout.write("\nLoading nodes... ")
sys.stdout.flush()
# execute query on nodes table
cur.execute("select id, lat, lon from nodes")
query_result = cur.fetchall()
# iterate through all query results
for id, lat, lon in query_result:
# create and store node in nodes dictionary
self.nodes[int(id)] = Node(float(lat), float(lon), int(id))
print "done."
# output that we are loading edges
sys.stdout.write("Loading edges... ")
sys.stdout.flush()
# execute query on ways table
cur.execute("select id, tags, nds from ways")
query_result = cur.fetchall()
# storage for nodes used in valid edges
valid_edge_nodes = {} # indexed by node id
# iterate through all query results
for id, tags, nodes in query_result:
# grab tags associated with current way
way_tags_dict = eval(tags)
# if current way is a valid highway
if ('highway' in way_tags_dict.keys() and self._valid_highway_edge(way_tags_dict['highway'])):
# grab all nodes that compose this way
way_nodes_list = eval(nodes)
# iterate through list of way nodes
for i in range(1, len(way_nodes_list)):
# grab in_node from nodes dictionary
in_node = self.nodes[int(way_nodes_list[i - 1])]
# grab out_node from nodes dictionary
out_node = self.nodes[int(way_nodes_list[i])]
# create edge_id based on way id
edge_id = int(str(id) + str(i - 1) + "000000")
# if either node on the edge is valid
if (True): #self._valid_node(in_node) or self._valid_node(out_node)):
# create and store edge in edges dictionary
self.edges[int(edge_id)] = Edge(in_node, out_node,int(edge_id))
# store in_node in out_node's in_nodes list
if (in_node not in out_node.in_nodes):
out_node.in_nodes.append(in_node)
# store out_node in in_node's out_nodes list
if (out_node not in in_node.out_nodes):
in_node.out_nodes.append(out_node)
# if edge is bidirectional
if ('oneway' not in way_tags_dict.keys()):
# create new symmetric edge id
symmetric_edge_id = int(str(edge_id / 10) + "1")
# create and store symmetric edge in edges dictionary
self.edges[int(symmetric_edge_id)] = Edge(out_node, in_node, int(symmetric_edge_id))
# store in_node in out_node's out_nodes list
if (in_node not in out_node.out_nodes):
out_node.out_nodes.append(in_node)
# store out_node in in_node's in_nodes list
if (out_node not in in_node.in_nodes):
in_node.in_nodes.append(out_node)
# store in_node in valid_edge_nodes dictionary
if (in_node.id not in valid_edge_nodes.keys()):
valid_edge_nodes[in_node.id] = in_node
# store out_node in valid_edge_nodes dictionary
if (out_node.id not in valid_edge_nodes.keys()):
valid_edge_nodes[out_node.id] = out_node
print "done."
# close connection to OSMDB
conn.close()
# replace all nodes with valid edge nodes
self.nodes = valid_edge_nodes
# index nodes
self._index_nodes()
# index edges
self._index_edges()
# find and index intersections
self._find_and_index_intersections()
# output map statistics
print "Map has " + str(len(self.nodes)) + " nodes, " + str(len(self.edges)) + " edges and " + str(len(self.intersections)) + " intersections."
def load_graphdb(self, grapdb_filename):
# connect to graph database
conn = sqlite3.connect(grapdb_filename)
# grab cursor
cur = conn.cursor()
# output that we are loading nodes
sys.stdout.write("\nLoading nodes... ")
sys.stdout.flush()
# execute query on nodes table
cur.execute("select id, latitude, longitude, weight from nodes")
query_result = cur.fetchall()
# iterate through all query results
for id, latitude, longitude, weight in query_result:
# create and store node in nodes dictionary
self.nodes[id] = Node(latitude, longitude, id, weight)
print "done."
# output that we are loading edges
sys.stdout.write("Loading edges... ")
sys.stdout.flush()
# execute query on ways table
cur.execute("select id, in_node, out_node, weight from edges")
query_result = cur.fetchall()
# storage for nodes used in valid edges
valid_edge_nodes = {} # indexed by node id
# iterate through all query results
for id, in_node_id, out_node_id, weight in query_result:
# grab in_node from nodes dictionary
in_node = self.nodes[in_node_id]
# grab out_node from nodes dictionary
out_node = self.nodes[out_node_id]
# if either node on the edge is valid
if (True): #self._valid_node(in_node) or self._valid_node(out_node)):
# create and store edge in edges dictionary
self.edges[id] = Edge(in_node, out_node, id, weight)
# store in_node in out_node's in_nodes list
if (in_node not in out_node.in_nodes):
out_node.in_nodes.append(in_node)
# store out_node in in_node's out_nodes list
if (out_node not in in_node.out_nodes):
in_node.out_nodes.append(out_node)
# store in_node in valid_edge_nodes dictionary
if (in_node.id not in valid_edge_nodes.keys()):
valid_edge_nodes[in_node.id] = in_node
# store out_node in valid_edge_nodes dictionary
if (out_node.id not in valid_edge_nodes.keys()):
valid_edge_nodes[out_node.id] = out_node
# execute query on segments table
cur.execute("select id, edge_ids from segments")
query_result = cur.fetchall()
for id, edge_ids in query_result:
segment_edges = map(lambda edge_id: self.edges[edge_id], eval(edge_ids))
self.segments[id] = Segment(id, segment_edges)
self.segment_lookup_table[(self.segments[id].head_edge.in_node, self.segments[id].tail_edge.out_node)] = self.segments[id]
for segment_edge in segment_edges:
segment_edge.segment = self.segments[id]
# self.segment_lookup_table[segment_edge.id] = self.segments[id]
# execute query on intersections table
cur.execute("select node_id from intersections")
query_result = cur.fetchall()
for node_id in query_result:
self.intersections[node_id[0]] = self.nodes[node_id[0]]
try:
cur.execute("select transition_segment, from_segment, to_segment from transitions");
query_result = cur.fetchall()
self.transitions={}
for transition_segment, from_segment, to_segment in query_result:
self.transitions[transition_segment]=(from_segment,to_segment)
except:
print "Got an error reading "
print "done."
# close connection to graph db
conn.close()
# replace all nodes with valid edge nodes
self.nodes = valid_edge_nodes
# index nodes
self._index_nodes()
# index edges
self._index_edges()
# find and index intersections
#self._find_and_index_intersections()
# output map statistics
print "Map has " + str(len(self.nodes)) + " nodes, " + str(len(self.edges)) + " edges, " + str(len(self.segments)) + " segments and " + str(len(self.intersections)) + " intersections."
def load_shapedb(self, shapedb_filename):
# connect to graph database
conn = sqlite3.connect(shapedb_filename)
# grab cursor
cur = conn.cursor()
# execute query to find all shape ids
cur.execute("select distinct shape_id from shapes")
# output that we are loading nodes and edges
sys.stdout.write("\nLoading nodes and edges... ")
sys.stdout.flush()
# storage for shape specific edges
self.shape_edges = {} # indexed by shape_id
# storage for node id
node_id = 0
# iterate through all shape ids
for shape_id in cur.fetchall():
# grab shape id
shape_id = shape_id[0]
# if route is a bus route
if (shape_id == "0" or shape_id == "11" or shape_id == "15" or shape_id == "41" or shape_id == "65" or shape_id == "22"):
# execute query to find all shape points
cur.execute("select shape_pt_lat, shape_pt_lon from shapes where shape_id='" + str(shape_id) + "' order by shape_pt_sequence asc")
# amend shape id
if (shape_id == "0"):
shape_id = "10000000"
elif (shape_id == "11"):
shape_id = "10000011"
elif (shape_id == "41"):
shape_id = "10000041"
elif (shape_id == "15"):
shape_id = "10000015"
elif (shape_id == "65"):
shape_id = "10000065"
elif (shape_id == "22"):
shape_id = "10000022"
# storage for first node
first_node = None
# storage for previous node
prev_node = None
# create list for this shape's edges
self.shape_edges[shape_id] = []
# iterate through all shape points
for shape_pt_lat, shape_pt_lon in cur.fetchall():
# create new node
curr_node = Node(shape_pt_lat, shape_pt_lon, node_id)
# store first node
if (first_node is None):
first_node = curr_node
# increment node id
node_id += 1
# add shape id to node
curr_node.shape_id = shape_id
# store new node in nodes dictionary
self.nodes[node_id] = curr_node
# if there exists a previous node
if (prev_node is not None):
# create edge id
edge_id = int(str(shape_id) + str(prev_node.id) + str(curr_node.id))
# create new edge
curr_edge = Edge(prev_node, curr_node, edge_id)
# add shape id to edge
curr_edge.shape_id = shape_id
# store new edge in edges dictionary
self.edges[edge_id] = curr_edge
# store new edge in shape edges dictionary
self.shape_edges[shape_id].append(curr_edge)
# store previous node in current node's in_nodes list
curr_node.in_nodes.append(prev_node)
# store current node in previous node's out_nodes list
prev_node.out_nodes.append(curr_node)
# update previous node
prev_node = curr_node
# create edge id for last edge
edge_id = int(str(shape_id) + str(prev_node.id) + str(first_node.id))
# create new edge
curr_edge = Edge(prev_node, first_node, edge_id)
# add shape id to edge
curr_edge.shape_id = shape_id
# store new edge in edges dictionary
self.edges[edge_id] = curr_edge
# store new edge in shape edges dictionary
self.shape_edges[shape_id].append(curr_edge)
# store previous node in first node's in_nodes list
first_node.in_nodes.append(prev_node)
# store first node in previous node's out_nodes list
prev_node.out_nodes.append(first_node)
print "done."
# close connection to gtfs db
conn.close()
# index nodes
self._index_nodes()
# index edges
self._index_edges()
# find and index intersections
self._find_and_index_intersections()
# output map statistics
print "Map has " + str(len(self.nodes)) + " nodes, " + str(len(self.edges)) + " edges and " + str(len(self.intersections)) + " intersections."
def _index_nodes(self):
# output that we are indexing nodes
sys.stdout.write("Indexing nodes... ")
sys.stdout.flush()
# iterate through all nodes
for curr_node in self.nodes.values():
# insert node into spatial index
self.node_spatial_index.insert(curr_node.id, (curr_node.longitude, curr_node.latitude))
print "done."
def _index_edges(self):
# output that we are indexing edges
sys.stdout.write("Indexing edges... ")
sys.stdout.flush()
# iterate through all edges
for curr_edge in self.edges.values():
# determine current edge minx, miny, maxx, maxy values
curr_edge_minx = min(curr_edge.in_node.longitude, curr_edge.out_node.longitude)
curr_edge_miny = min(curr_edge.in_node.latitude, curr_edge.out_node.latitude)
curr_edge_maxx = max(curr_edge.in_node.longitude, curr_edge.out_node.longitude)
curr_edge_maxy = max(curr_edge.in_node.latitude, curr_edge.out_node.latitude)
# insert current edge into spatial index
self.edge_spatial_index.insert(curr_edge.id, (curr_edge_minx, curr_edge_miny, curr_edge_maxx, curr_edge_maxy))
# insert current edge into lookup table
self.edge_lookup_table[(curr_edge.in_node, curr_edge.out_node)] = curr_edge
self.edge_coords_lookup_table[(curr_edge.in_node.coords(), curr_edge.out_node.coords())] = curr_edge
# iterate through all edges
for edge in self.edges.values():
# iterate through all out edges
for out_node_neighbor in edge.out_node.out_nodes:
# add out edge to out edges list
edge.out_edges.append(self.edge_lookup_table[(edge.out_node, out_node_neighbor)])
# iterate through all in edges
for in_node_neighbor in edge.in_node.in_nodes:
# add in edge to in edges list
edge.in_edges.append(self.edge_lookup_table[(in_node_neighbor, edge.in_node)])
print "done."
def _find_and_index_intersections(self):
# output that we are finding and indexing intersections
sys.stdout.write("Finding and indexing intersections... ")
sys.stdout.flush()
# find intersection nodes and index
(intersection_nodes, intersection_nodes_index) = self._find_intersection_nodes()
# storage for intersection nodes already placed in intersections
placed_intersection_nodes = set()
# define longitude/latitude offset for bounding box
lon_offset = ((intersection_size / 2.0) / spatialfunclib.METERS_PER_DEGREE_LONGITUDE)
lat_offset = ((intersection_size / 2.0) / spatialfunclib.METERS_PER_DEGREE_LATITUDE)
# storage for intersection id
intersection_id = 0
# iterate through intersection nodes
for intersection_node in intersection_nodes:
# if the intersection node has not yet been placed
if (intersection_node not in placed_intersection_nodes):
# create bounding box
bounding_box = (intersection_node.longitude - lon_offset, intersection_node.latitude - lat_offset, intersection_node.longitude + lon_offset, intersection_node.latitude + lat_offset)
# find intersection node ids within bounding box
intersection_node_ids = intersection_nodes_index.intersection(bounding_box)
# get intersection nodes
intersection_nodes = map(self._get_node, intersection_node_ids)
# add intersection nodes to placed set
placed_intersection_nodes.update(intersection_nodes)
# create new intersection
new_intersection = Intersection(intersection_id, intersection_nodes)
# increment intersection id
intersection_id += 1
# add new intersection to intersections list
self.intersections[new_intersection.id] = new_intersection
# insert new intersection into spatial index
self.intersection_spatial_index.insert(new_intersection.id, (new_intersection.longitude, new_intersection.latitude))
print "done."
def _get_node(self, node_id):
# return node from dictionary
return self.nodes[node_id]
def _find_intersection_nodes(self):
# storage for intersection nodes
intersection_nodes = []
# spatial index for intersection nodes
intersection_nodes_index = Rtree()
# iterate through all nodes in map
for curr_node in self.nodes.values():
# set storage for current node's unique neighbors
neighbors = set()
# iterate through all in_nodes
for in_node in curr_node.in_nodes:
# add in_node to neighbors set
neighbors.add(in_node)
# iterate through all out_nodes
for out_node in curr_node.out_nodes:
# add out_node to neighbors set
neighbors.add(out_node)
# if current node has more than 2 neighbors
if (len(neighbors) > 2):
# add current node to intersection nodes list
intersection_nodes.append(curr_node)
# add current node to intersection nodes index
intersection_nodes_index.insert(curr_node.id, (curr_node.longitude, curr_node.latitude))
# return intersection nodes and index
return (intersection_nodes, intersection_nodes_index)
def _valid_node(self, node):
# if node falls inside the designated bounding box
if ((node.latitude >= 41.8619 and node.latitude <= 41.8842) and
(node.longitude >= -87.6874 and node.longitude <= -87.6398)):
return True
else:
return False
def _valid_highway_edge(self, highway_tag_value):
if ((highway_tag_value == 'primary') or
(highway_tag_value == 'secondary') or
(highway_tag_value == 'tertiary') or
(highway_tag_value == 'residential')):
return True
else:
return False
def reset_node_visited_flags(self):
# iterate through all nodes
for node in self.nodes.values():
# set node visited flag to False
node.visited = False
def reset_edge_visited_flags(self):
# iterate through all edges
for edge in self.edges.values():
# set edge visited flag to False
edge.visited = False
def write_map_to_file(self, map_filename="map.txt"):
# output that we are starting the writing process
sys.stdout.write("\nWriting map to file... ")
sys.stdout.flush()
# open map file
map_file = open(map_filename, 'w')
# iterate through all map edges
for curr_edge in self.edges.values():
# output current edge to file
map_file.write(str(curr_edge.in_node.latitude) + "," + str(curr_edge.in_node.longitude) + "\n")
map_file.write(str(curr_edge.out_node.latitude) + "," + str(curr_edge.out_node.longitude) + "\n\n")
# close map file
map_file.close()
print "done."
def _distance(self, location1, location2):
return spatialfunclib.distance(location1.latitude, location1.longitude, location2.latitude, location2.longitude)
import sys
import time
if __name__ == '__main__':
usage = "usage: python streetmap.py (osmdb|graphdb|shapedb) db_filename output_filename"
if len(sys.argv) != 4:
print usage
exit()
start_time = time.time()
db_type = sys.argv[1]
db_filename = sys.argv[2]
output_filename = sys.argv[3]
m = StreetMap()
if (db_type == "osmdb"):
m.load_osmdb(db_filename)
m.write_map_to_file(str(output_filename))
elif (db_type == "graphdb"):
m.load_graphdb(db_filename)
m.write_map_to_file(str(output_filename))
elif (db_type == "shapedb"):
m.load_shapedb(db_filename)
m.write_map_to_file(str(output_filename))
else:
print "Error! '" + str(db_type) + "' is an unknown database type"
print "\nMap operations complete (in " + str(time.time() - start_time) + " seconds).\n"
|
|
import csv
import getpass
import hashlib
import functools
import settings
import modules
class system():
"""Represents a huge megasystem."""
def __init__(self, args):
"""Constructor."""
self._args = args
self._database_changed = False
self._running = False
self._welcome = """
Welcome to %(package)s v%(version)s.
Type "help" for more information.
""" % {
'package': settings.__name__,
'version': settings.__version__
}
self._user = None
self._user_list = []
self._cmd = ''
self._cmds = [
{
'names': ['help', 'h'],
'help': 'Print this help.',
'callback': functools.partial(system._help_print, self)
},
{
'names': ['exit', 'quit', 'q'],
'help': 'Exit the program.',
'callback': functools.partial(system.stop_session, self)
},
{
'names': ['user-login', 'in'],
'help': 'Login into the system.',
'callback': functools.partial(system._user_login, self)
},
{
'names': ['user-logout', 'out'],
'help': 'Logout from the system.',
'callback': functools.partial(system._user_logout, self)
},
{
'names': ['user-register', 'mku'],
'help': 'Add a new user to database.',
'groups': ['admin'],
'callback': functools.partial(system._user_register, self)
},
{
'names': ['user-remove', 'rmu'],
'help': 'Remove a new user from database.',
'groups': ['admin'],
'callback': functools.partial(system._user_remove, self)
},
{
'names': ['user-change-password', 'chu'],
'help': 'Remove a new user from database.',
'groups': ['admin', 'user'],
'callback': functools.partial(system._user_change_password, self)
},
{
'names': ['user-show-list', 'lsu'],
'help': 'Show all the registered users.',
'groups': ['admin', 'user'],
'callback': functools.partial(system._user_show_list, self)
}
]
self._load_database(settings.files['database'])
def start_session(self):
"""Start system loop."""
if self._args.user and self._args.password:
self._user_login(self._args.user, self._args.password)
print(self._welcome)
self._check_no_users_condition()
self._running = True
while self._running:
self._poll_command()
self._process_command()
def stop_session(self):
"""Flush database and cleanup."""
self._running = False
if self._database_changed:
self._store_database(settings.files['database'])
def _load_database(self, path):
"""Load a database."""
self._user_list = []
with open(path, 'rb') as databasefile:
reader = csv.reader(databasefile)
for row in reader:
self._user_list.append(modules.user.user(**{
'user': row[0],
'password': row[1],
'group': row[2]
}))
self._database_changed = False
def _store_database(self, path):
"""Save database into a file."""
with open(path, 'wb') as databasefile:
writer = csv.writer(databasefile, quoting=csv.QUOTE_MINIMAL)
for user in self._user_list:
writer.writerow([user.login(), user.password(), user.group()])
self._database_changed = False
def _check_no_users_condition(self):
if not self._user_list:
self._user_list.append(modules.user.user(**{
'user': settings.default_user['user'],
'password': self._user_get_hash(settings.default_user['user'], settings.default_user['password'], settings.default_user['group']),
'group': settings.default_user['group']
}))
self._database_changed = True
print("""
It seems to be a first starting of the system.
You can use login `%(login)s` and password `%(password)s`
to login into the system.
Change your default credentials for security reasons!
""" % {
'login': settings.default_user['user'],
'password': settings.default_user['password']
})
def _poll_command(self):
"""Get next command."""
prompt = '[%(user)s]> ' % {
'user': self._user.login() if self._user else self._user
}
self._cmd = raw_input(prompt).strip()
def _process_command(self):
"""Process current command."""
try:
if self._cmd:
for cmd in self._cmds:
if self._cmd in cmd['names']:
self._check_access(cmd)
cmd['callback']()
return
raise Exception('Unknown command \'%(command)s\'' % {
'command': self._cmd
})
except Exception as e:
print(e)
def _check_access(self, cmd):
"""Check if user's group allowed for command."""
reason = ''
if 'groups' in cmd:
if not self._user or self._user.group() not in cmd['groups']:
reason = """
You don\'t have permission to execute a command.
Please, try to log in with another account or
contact your system administrator.
"""
if reason:
raise Exception('Access denied. {reason}'.format(reason=reason))
def _help_print(self):
"""Print out available commands list."""
for cmd in self._cmds:
print('\t%(name)-30s%(help)s' % {
'name': ', '.join(cmd['names']),
'help': cmd['help']
})
def _user_get_hash(self, login, password, group):
return hashlib.sha256(login + password + group).hexdigest()
def _user_change_password(self):
oldpass = self._user_get_password('User current password: ')
userhash = self._user_get_hash(self._user.login(), oldpass, self._user.group())
if self._user.password() != userhash:
self._user_logout()
raise Exception('Invalid password')
newpass = self._user_get_password('Enter a new password: ')
if not newpass:
raise Exception('Invalid password')
self._user.set_password(self._user_get_hash(self._user.login(), newpass, self._user.group()))
for user in self._user_list:
if user.login() == self._user.login() and user.password() == userhash and user.group() == self._user.group():
self._user_list.remove(user)
self._user_list.append(self._user)
break
self._user_logout()
self._database_changed = True
print('Password successfully changed. Log in into the system.')
def _user_show_list(self):
"""Print out all the registered users."""
for user in self._user_list:
print('\t%(user)s' % {
'user': user.login()
})
def _user_get_login(self):
return raw_input('User login: ')
def _user_get_password(self, prompt = 'User password: '):
return getpass.getpass(prompt)
def _user_register(self):
"""Create a new account."""
login = self._user_get_login()
if not login:
raise Exception('Empty login')
password = self._user_get_password()
if password != self._user_get_password('Confirm user password: '):
raise Exception('Password is not confirmed')
group = raw_input('User group: ')
if not group:
raise Exception('Invalid group')
for user in self._user_list:
userhash = self._user_get_hash(user.login(), password, user.group())
if login == user.login() and userhash == user.password() and group == user.group():
raise Exception('User already exists')
self._user_list.append(modules.user.user(**{
'user': login,
'password': self._user_get_hash(login, password, group),
'group': group
}))
self._database_changed = True
def _user_remove_confirm(self, user, login):
if user.login() != login:
return False
prompt = 'Remove user %(login)s from group \'%(group)s\'? y/[N] ' % {
'login': user.login(),
'group': user.group()
}
if 'y' == raw_input(prompt).strip().lower():
self._database_changed = True
return True
return False
def _user_remove(self):
"""Remove a user from database."""
login = self._user_get_login()
self._user_list = [user for user in self._user_list if not self._user_remove_confirm(user, login)]
def _user_login(self, login = None, password = None):
"""Signin into the system."""
if self._user:
raise Exception('Please logout first')
if not login:
login = self._user_get_login()
if not password:
password = self._user_get_password()
for user in self._user_list:
if user.login() == login:
userhash = self._user_get_hash(user.login(), password, user.group())
if user.password() == userhash:
self._user = modules.user.user(user=login, password=userhash, group=user.group())
break
if not self._user:
raise Exception('Invalid login/password')
def _user_logout(self):
"""Logout from the system."""
self._user = None
|
|
"""The tests for the Tasmota switch platform."""
import copy
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_tele_state,
get_topic_tele_will,
)
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON, Platform
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message
from tests.components.switch import common
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"ON"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/STATE", '{"POWER":"OFF"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"ON"}')
state = hass.states.get("switch.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/RESULT", '{"POWER":"OFF"}')
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands(hass, mqtt_mock, setup_tasmota):
"""Test the sending MQTT commands."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
await hass.async_block_till_done()
await hass.async_block_till_done()
mqtt_mock.async_publish.reset_mock()
# Turn the switch on and verify MQTT message is sent
await common.async_turn_on(hass, "switch.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Power1", "ON", 0, False
)
mqtt_mock.async_publish.reset_mock()
# Tasmota is not optimistic, the state should still be off
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
# Turn the switch off and verify MQTT message is sent
await common.async_turn_off(hass, "switch.test")
mqtt_mock.async_publish.assert_called_once_with(
"tasmota_49A3BC/cmnd/Power1", "OFF", 0, False
)
state = hass.states.get("switch.test")
assert state.state == STATE_OFF
async def test_relay_as_light(hass, mqtt_mock, setup_tasmota):
"""Test relay does not show up as switch in light mode."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
config["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("switch.test")
assert state is None
state = hass.states.get("light.test")
assert state is not None
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, Platform.SWITCH, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_availability(hass, mqtt_mock, Platform.SWITCH, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_availability_discovery_update(
hass, mqtt_mock, Platform.SWITCH, config
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
poll_topic = "tasmota_49A3BC/cmnd/STATE"
await help_test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, Platform.SWITCH, config, poll_topic, ""
)
async def test_discovery_removal_switch(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered switch."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 1
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 0
await help_test_discovery_removal(
hass, mqtt_mock, caplog, Platform.SWITCH, config1, config2
)
async def test_discovery_removal_relay_as_light(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered relay as light."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config1["rl"][0] = 1
config1["so"]["30"] = 0 # Disable Home Assistant auto-discovery as light
config2 = copy.deepcopy(DEFAULT_CONFIG)
config2["rl"][0] = 1
config2["so"]["30"] = 1 # Enforce Home Assistant auto-discovery as light
await help_test_discovery_removal(
hass, mqtt_mock, caplog, Platform.SWITCH, config1, config2
)
async def test_discovery_update_unchanged_switch(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered switch."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
with patch(
"homeassistant.components.tasmota.switch.TasmotaSwitch.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, Platform.SWITCH, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_switch_relay_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, Platform.SWITCH, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
topics = [
get_topic_stat_result(config),
get_topic_tele_state(config),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, Platform.SWITCH, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["rl"][0] = 1
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, Platform.SWITCH, config
)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
extract_books.py
A script to extract the list of books valid for participating in the
Wikisource contest.
This script is part of wscontest-votecounter.
(<https://github.com/CristianCantoro/wscontest-votecounter>)
---
usage: extract_books.py [-h] [--config CONFIG_FILE] [-d] [-o BOOKS_FILE] [-v]
Extract the list of books valid for the Wikisource contest.
optional arguments:
-h, --help show this help message and exit
--config CONFIG_FILE INI file to read configs (default: contest.conf.ini)
-d, --debug Enable debug output (implies -v)
-o BOOKS_FILE TSV file with the books to be processed (default:
books.tsv)
-v, --verbose Enable verbose output
---
The MIT License (MIT)
wscontest-votecounter:
Copyright (c) 2017 CristianCantoro <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import regex
import csv
import time
import logging
import argparse
import configparser
from datetime import datetime, timedelta
import urllib.parse
import urllib.request
import mwparserfromhell
# Try to use yajl, a faster module for JSON
# import json
try:
import yajl as json
except ImportError:
import json
### GLOBALS AND DEFAULTS ###
# Files
OUTPUT_BOOKS_FILE = "books.tsv"
CONFIG_FILE = "contest.conf.ini"
# URLs
WIKISOURCE_API = 'https://{lang}.wikisource.org/w/api.php'
OLDWIKISOURCE_API = 'https://wikisource.org/w/api.php'
COMMONS_API = 'https://commons.wikimedia.org/w/api.php'
OLDWIKISOURCE_PREFIXES = set(['old', 'oldwikisource', 'www', ''])
# params
# numeber of times to retry failing requests
MAX_RETRIES = 10
# time (in seconds) to wait between requests
WAIT_TIME = 0.5
# number of revisions
RVLIMIT = 50
### ###
### logging ###
LOGFORMAT_STDOUT = {logging.DEBUG: '%(funcName)s:%(lineno)s - '
'%(levelname)-8s: %(message)s',
logging.INFO: '%(levelname)-8s: %(message)s',
logging.WARNING: '%(levelname)-8s: %(message)s',
logging.ERROR: '%(levelname)-8s: %(message)s',
logging.CRITICAL: '%(levelname)-8s: %(message)s'
}
# root logger
rootlogger = logging.getLogger()
lvl_logger = logging.DEBUG
rootlogger.setLevel(lvl_logger)
console = logging.StreamHandler()
console.setLevel(lvl_logger)
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_logger])
console.setFormatter(formatter)
rootlogger.addHandler(console)
logger = logging.getLogger('score')
logger.setLevel(lvl_logger)
def get_page_revisions(page, lang):
page = str(page)
params = {
'action': 'query',
'format': 'json',
'prop': 'revisions',
'titles': '{page}'.format(page=page),
'rvlimit': RVLIMIT,
'rvprop': 'user|timestamp|content'
}
params = urllib.parse.urlencode(params).encode('ascii')
logger.info("\tRequesting '{page}'".format(page=page))
retries_counter = 0
retry_fetch = True
data = {}
if lang in OLDWIKISOURCE_PREFIXES:
wikisource_api = OLDWIKISOURCE_API
else:
wikisource_api = WIKISOURCE_API.format(lang=lang)
while retry_fetch and retries_counter < MAX_RETRIES:
try:
f = urllib.request.urlopen(source_api, params)
data = json.loads(f.read().decode('utf-8'))
retry_fetch = False
except:
time.sleep(WAIT_TIME)
retries_counter += 1
retry_fetch = True
page_id = int([k for k in data['query']['pages'].keys()][0])
revisions = data['query']['pages'][str(page_id)]['revisions']
return revisions
def read_config(config_file):
config = {}
parser = configparser.ConfigParser()
parser.read(config_file)
config['contest'] = dict([(k ,v) for k, v in parser['contest'].items()])
return config
def main(config):
output = config['books_file']
contest_start = datetime.strptime(config['contest']['start_date'],
"%Y-%m-%d %H:%M:%S")
contest_end = datetime.strptime(config['contest']['end_date'],
"%Y-%m-%d %H:%M:%S")
lang = config['contest']['language']
debug = config['debug']
rules_page = config['contest']['rules_page']
book_regex = config['contest']['book_regex']
revisions = get_page_revisions(page=rules_page, lang=lang)
recent_revisions = list()
if len(revisions) >= RVLIMIT:
for rev in revisions:
rev_time = datetime.strptime(rev['timestamp'],
"%Y-%m-%dT%H:%M:%SZ")
if rev_time >= contest_start and rev_time <= contest_end:
recent_revisions.append(rev)
else:
recent_revisions = revisions
del revisions
book_re = regex.compile(book_regex)
titles = set()
for rev in recent_revisions:
wikicode = mwparserfromhell.parse(rev['*'])
for match in book_re.findall(str(wikicode)):
titles.add("{title}.{ext}".format(title=match[0],ext=match[1]))
with open(output, 'w+') as outfile:
writer = csv.writer(outfile,
delimiter='\t',
quotechar='"',
quoting=csv.QUOTE_ALL)
for title in sorted(titles):
writer.writerow([title])
return
if __name__ == '__main__':
DESCRIPTION = 'Extract the list of books valid for the Wikisource contest.'
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('--config', default=CONFIG_FILE, metavar='CONFIG_FILE',
help='INI file to read configs (default: {})'.format(CONFIG_FILE))
parser.add_argument('-d', '--debug', action='store_true',
help='Enable debug output (implies -v)')
parser.add_argument('-o', default=OUTPUT_BOOKS_FILE, metavar='BOOKS_FILE',
help='TSV file with the books to be processed (default: {})'
.format(OUTPUT_BOOKS_FILE))
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable verbose output')
args = parser.parse_args()
config_file = args.config
config = read_config(config_file)
config['books_file'] = args.o
# Verbosity/Debug
config['verbose'] = args.verbose or args.debug
config['debug'] = args.debug
lvl_config_logger = logging.WARNING
if config['verbose']:
lvl_config_logger = logging.INFO
if config['debug']:
lvl_config_logger = logging.DEBUG
formatter = logging.Formatter(LOGFORMAT_STDOUT[lvl_config_logger])
console.setFormatter(formatter)
rootlogger.setLevel(lvl_config_logger)
logger.setLevel(lvl_config_logger)
logger.info("Enable verbose output")
logger.debug("Enable debug")
logger.debug(args)
logger.debug(config)
from pprint import pprint
# import ipdb; ipdb.set_trace()
main(config)
logger.info("All done!")
|
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import skbio
from skbio.util._decorator import classproperty, overrides
from skbio.util._decorator import stable
from ._nucleotide_mixin import NucleotideMixin, _motifs as _parent_motifs
from ._grammared_sequence import GrammaredSequence, DisableSubclassingMeta
class DNA(GrammaredSequence, NucleotideMixin,
metaclass=DisableSubclassingMeta):
r"""Store DNA sequence data and optional associated metadata.
Only characters in the IUPAC DNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the DNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
interval_metadata : IntervalMetadata
Arbitrary interval metadata which applies to intervals within
a sequence to store interval features (such as genes on the
DNA sequence).
lowercase : bool or str, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC DNA characters. If
``False``, no characters will be converted. If a str, it will be
treated as a key into the positional metadata of the object. All
lowercase characters will be converted to uppercase, and a ``True``
value will be stored in a boolean array in the positional metadata
under the key.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC DNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
See Also
--------
RNA
GrammaredSequence
Notes
-----
Subclassing is disabled for DNA, because subclassing makes
it possible to change the alphabet, and certain methods rely on the
IUPAC alphabet. If a custom sequence alphabet is needed, inherit directly
from ``GrammaredSequence``.
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import DNA
>>> DNA('ACCGAAT')
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
Convert lowercase characters to uppercase:
>>> DNA('AcCGaaT', lowercase=True)
DNA
--------------------------
Stats:
length: 7
has gaps: False
has degenerates: False
has definites: True
GC-content: 42.86%
--------------------------
0 ACCGAAT
"""
@classproperty
@overrides(NucleotideMixin)
def complement_map(cls):
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(GrammaredSequence)
def definite_chars(cls):
return set("ACGT")
@classproperty
@overrides(GrammaredSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
@classproperty
@overrides(GrammaredSequence)
def default_gap_char(cls):
return '-'
@classproperty
@overrides(GrammaredSequence)
def gap_chars(cls):
return set('-.')
@property
def _motifs(self):
return _motifs
@stable(as_of="0.4.0")
def transcribe(self):
"""Transcribe DNA into RNA.
DNA sequence is assumed to be the coding strand. Thymine (T) is
replaced with uracil (U) in the transcribed sequence.
Returns
-------
RNA
Transcribed sequence.
See Also
--------
translate
translate_six_frames
Notes
-----
DNA sequence's metadata, positional, and interval
metadata are included in the transcribed RNA sequence.
Examples
--------
Transcribe DNA into RNA:
>>> from skbio import DNA
>>> dna = DNA('TAACGTTA')
>>> dna
DNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 TAACGTTA
>>> dna.transcribe()
RNA
--------------------------
Stats:
length: 8
has gaps: False
has degenerates: False
has definites: True
GC-content: 25.00%
--------------------------
0 UAACGUUA
"""
seq = self._string.replace(b'T', b'U')
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
interval_metadata = None
if self.has_interval_metadata():
interval_metadata = self.interval_metadata
# turn off validation because `seq` is guaranteed to be valid
return skbio.RNA(seq, metadata=metadata,
positional_metadata=positional_metadata,
interval_metadata=interval_metadata,
validate=False)
@stable(as_of="0.4.0")
def translate(self, *args, **kwargs):
"""Translate DNA sequence into protein sequence.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate``.
Returns
-------
Protein
Translated sequence.
See Also
--------
RNA.reverse_transcribe
RNA.translate
translate_six_frames
transcribe
Notes
-----
DNA sequence's metadata are included in the translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using NCBI's standard genetic code (table ID
1, the default genetic code in scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> dna.translate()
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
Translate the same DNA sequence using a different NCBI genetic code
(table ID 3, the yeast mitochondrial code) and specify that translation
must terminate at the first stop codon:
>>> dna.translate(3, stop='require')
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 MPT
"""
return self.transcribe().translate(*args, **kwargs)
@stable(as_of="0.4.0")
def translate_six_frames(self, *args, **kwargs):
"""Translate DNA into protein using six possible reading frames.
DNA sequence is assumed to be the coding strand. DNA sequence is first
transcribed into RNA and then translated into protein. The six possible
reading frames are:
* 1 (forward)
* 2 (forward)
* 3 (forward)
* -1 (reverse)
* -2 (reverse)
* -3 (reverse)
Translated sequences are yielded in this order.
Parameters
----------
args : tuple
Positional arguments accepted by ``RNA.translate_six_frames``.
kwargs : dict
Keyword arguments accepted by ``RNA.translate_six_frames``.
Yields
------
Protein
Translated sequence in the current reading frame.
See Also
--------
RNA.translate_six_frames
translate
transcribe
Notes
-----
This method is faster than (and equivalent to) performing six
independent translations using, for example:
``(seq.translate(reading_frame=rf)
for rf in GeneticCode.reading_frames)``
DNA sequence's metadata are included in each translated protein
sequence. Positional metadata are not included.
Examples
--------
Translate DNA into protein using the six possible reading frames and
NCBI's standard genetic code (table ID 1, the default genetic code in
scikit-bio):
>>> from skbio import DNA
>>> dna = DNA('ATGCCACTTTAA')
>>> for protein in dna.translate_six_frames():
... protein
... print('')
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 MPL*
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 CHF
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 ATL
<BLANKLINE>
Protein
--------------------------
Stats:
length: 4
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 LKWH
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: True
--------------------------
0 *SG
<BLANKLINE>
Protein
--------------------------
Stats:
length: 3
has gaps: False
has degenerates: False
has definites: True
has stops: False
--------------------------
0 KVA
<BLANKLINE>
"""
return self.transcribe().translate_six_frames(*args, **kwargs)
@overrides(GrammaredSequence)
def _repr_stats(self):
"""Define custom statistics to display in the sequence's repr."""
stats = super(DNA, self)._repr_stats()
stats.append(('GC-content', '{:.2%}'.format(self.gc_content())))
return stats
_motifs = _parent_motifs.copy()
# Leave this at the bottom
_motifs.interpolate(DNA, "find_motifs")
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import range
import logging
import sys
import urllib.request, urllib.error
from django import forms
from django.forms import FileField, CharField, BooleanField, Textarea
from django.forms.formsets import formset_factory, BaseFormSet
from aws.s3 import S3A_ROOT, normpath as s3_normpath
from azure.abfs.__init__ import ABFS_ROOT, normpath as abfs_normpath
from desktop.lib import i18n
from hadoop.fs import normpath
from useradmin.models import User, Group
from filebrowser.lib import rwx
if sys.version_info[0] > 2:
from urllib.parse import unquote as urllib_unquote
from django.utils.translation import gettext_lazy as _
else:
from urllib import unquote as urllib_unquote
from django.utils.translation import ugettext_lazy as _
logger = logging.getLogger(__name__)
class FormSet(BaseFormSet):
def __init__(self, data=None, prefix=None, *args, **kwargs):
self.prefix = prefix or self.get_default_prefix()
if data:
self.data = {}
# Add management field info
# This is hard coded given that none of these keys or info is exportable
# This could be a problem point if the management form changes in later releases
self.data['%s-TOTAL_FORMS' % self.prefix] = len(data)
self.data['%s-INITIAL_FORMS' % self.prefix] = len(data)
self.data['%s-MAX_NUM_FORMS' % self.prefix] = 0
# Add correct data
for i in range(0, len(data)):
prefix = self.add_prefix(i)
for field in data[i]:
self.data['%s-%s' % (prefix, field)] = data[i][field]
BaseFormSet.__init__(self, self.data, self.prefix, *args, **kwargs)
class PathField(CharField):
def __init__(self, label, help_text=None, **kwargs):
kwargs.setdefault('required', True)
kwargs.setdefault('min_length', 1)
forms.CharField.__init__(self, label=label, help_text=help_text, **kwargs)
def clean(self, value):
cleaned_path = CharField.clean(self, value)
if value.lower().startswith(S3A_ROOT):
cleaned_path = s3_normpath(cleaned_path)
elif value.lower().startswith(ABFS_ROOT):
cleaned_path = abfs_normpath(cleaned_path)
else:
cleaned_path = normpath(cleaned_path)
return cleaned_path
class EditorForm(forms.Form):
path = PathField(label=_("File to edit"))
contents = CharField(widget=Textarea, label=_("Contents"), required=False)
encoding = CharField(label=_('Encoding'), required=False)
def clean_path(self):
return urllib_unquote(self.cleaned_data.get('path', ''))
def clean_contents(self):
return self.cleaned_data.get('contents', '').replace('\r\n', '\n')
def clean_encoding(self):
encoding = self.cleaned_data.get('encoding', '').strip()
if not encoding:
return i18n.get_site_encoding()
return encoding
class RenameForm(forms.Form):
op = "rename"
src_path = CharField(label=_("File to rename"), help_text=_("The file to rename."))
dest_path = CharField(label=_("New name"), help_text=_("Rename the file to:"))
class BaseRenameFormSet(FormSet):
op = "rename"
RenameFormSet = formset_factory(RenameForm, formset=BaseRenameFormSet, extra=0)
class CopyForm(forms.Form):
op = "copy"
src_path = CharField(label=_("File to copy"), help_text=_("The file to copy."))
dest_path = CharField(label=_("Destination location"), help_text=_("Copy the file to:"))
class BaseCopyFormSet(FormSet):
op = "copy"
CopyFormSet = formset_factory(CopyForm, formset=BaseCopyFormSet, extra=0)
class SetReplicationFactorForm(forms.Form):
op = "setreplication"
src_path = CharField(label=_("File to set replication factor"), help_text=_("The file to set replication factor."))
replication_factor = CharField(label=_("Value of replication factor"), help_text=_("The value of replication factor."))
class UploadFileForm(forms.Form):
op = "upload"
# The "hdfs" prefix in "hdfs_file" triggers the HDFSfileUploadHandler
hdfs_file = FileField(label=_("File to Upload"))
dest = PathField(label=_("Destination Path"), help_text=_("Filename or directory to upload to."))
extract_archive = BooleanField(required=False)
class UploadArchiveForm(forms.Form):
op = "upload"
archive = FileField(label=_("Archive to Upload"))
dest = PathField(label=_("Destination Path"), help_text=_("Archive to upload to."))
class RemoveForm(forms.Form):
op = "remove"
path = PathField(label=_("File to remove"))
class RmDirForm(forms.Form):
op = "rmdir"
path = PathField(label=_("Directory to remove"))
class RmTreeForm(forms.Form):
op = "rmtree"
path = PathField(label=_("Directory to remove (recursively)"))
class BaseRmTreeFormset(FormSet):
op = "rmtree"
RmTreeFormSet = formset_factory(RmTreeForm, formset=BaseRmTreeFormset, extra=0)
class RestoreForm(forms.Form):
op = "rmtree"
path = PathField(label=_("Path to restore"))
class BaseRestoreFormset(FormSet):
op = "restore"
RestoreFormSet = formset_factory(RestoreForm, formset=BaseRestoreFormset, extra=0)
class TrashPurgeForm(forms.Form):
op = "purge_trash"
class MkDirForm(forms.Form):
op = "mkdir"
path = PathField(label=_("Path in which to create the directory"))
name = PathField(label=_("Directory Name"))
class TouchForm(forms.Form):
op = "touch"
path = PathField(label=_("Path in which to create the file"))
name = PathField(label=_("File Name"))
class ChownForm(forms.Form):
op = "chown"
path = PathField(label=_("Path to change user/group ownership"))
# These could be "ChoiceFields", listing only users and groups
# that the current user has permissions for.
user = CharField(label=_("User"), min_length=1)
user_other = CharField(label=_("OtherUser"), min_length=1, required=False)
group = CharField(label=_("Group"), min_length=1)
group_other = CharField(label=_("OtherGroup"), min_length=1, required=False)
recursive = BooleanField(label=_("Recursive"), required=False)
def __init__(self, *args, **kwargs):
super(ChownForm, self).__init__(*args, **kwargs)
self.all_groups = [group.name for group in Group.objects.all()]
self.all_users = [user.username for user in User.objects.all()]
class BaseChownFormSet(FormSet):
op = "chown"
ChownFormSet = formset_factory(ChownForm, formset=BaseChownFormSet, extra=0)
class ChmodForm(forms.Form):
op = "chmod"
path = PathField(label=_("Path to change permissions"))
# By default, BooleanField only validates when
# it's checked.
user_read = BooleanField(required=False)
user_write = BooleanField(required=False)
user_execute = BooleanField(required=False)
group_read = BooleanField(required=False)
group_write = BooleanField(required=False)
group_execute = BooleanField(required=False)
other_read = BooleanField(required=False)
other_write = BooleanField(required=False)
other_execute = BooleanField(required=False)
sticky = BooleanField(required=False)
recursive = BooleanField(required=False)
names = ("user_read", "user_write", "user_execute",
"group_read", "group_write", "group_execute",
"other_read", "other_write", "other_execute",
"sticky")
def __init__(self, initial, *args, **kwargs):
logging.info(dir(self))
logging.info(dir(type(self)))
# Convert from string representation.
mode = initial.get("mode")
if mode is not None:
mode = int(mode, 8)
bools = rwx.expand_mode(mode)
for name, b in zip(self.names, bools):
initial[name] = b
logging.debug(initial)
kwargs['initial'] = initial
forms.Form.__init__(self, *args, **kwargs)
def full_clean(self):
forms.Form.full_clean(self)
if hasattr(self, "cleaned_data"):
self.cleaned_data["mode"] = rwx.compress_mode([self.cleaned_data[name] for name in self.names])
class BaseChmodFormSet(FormSet):
op = "chmod"
ChmodFormSet = formset_factory(ChmodForm, formset=BaseChmodFormSet, extra=0)
|
|
# -*- coding: utf-8 -*-
## flashcard_views
import json
import os
import shutil
import glob
from pyramid.response import Response
from pyramid.request import Request
from pyramid.view import view_config
import uuid
import Image
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm import aliased
from sqlalchemy import exists
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.security import (authenticated_userid,
unauthenticated_userid,
effective_principals,
forget,
remember
)
import time
import datetime
from forms import *
from random import randint
from pyramid.settings import asbool
from pyramid.url import (current_route_url,
route_url
)
from models import *
import beaker
from beaker.cache import cache_region
from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
from cache_functions import *
from user_views import BaseView
AUTO_PICTURE_DIR = '/home/user/venv/English/english/static/to_upload/images'
class FlashcardView(BaseView):
def __init__(self,request):
self.request = request
super(FlashcardView, self).__init__(request)
@cache_region('threehours', 'get_flashcard')
def get_flashcard(self, (cardid, languageid)):
translations = [translation.ForeignLemma.form for translation in DBSession.query(Translation,ForeignLemma).filter(Translation.card_id==cardid, ForeignLemma.id==Translation.foreign_lemma_id).order_by((Translation.count).desc()).limit(3)]
fdistractors_query = DBSession.query(ForeignLemma).filter(ForeignLemma.language_id ==languageid)
for translation in translations:
fdistractors_query = fdistractors_query.filter(ForeignLemma.form != translation)
foreigndistractors = [result.form for result in fdistractors_query.limit(7)]
return {'translations':translations, 'sourceDistractors' : foreigndistractors}
@cache_region('threehours', 'get_card')
def get_card(self, cardid):
card = DBSession.query(Card,EnglishLemma,EnglishForm, Picture).filter(Card.id == cardid, Card.lemma_id==EnglishLemma.id,EnglishLemma.form_id==EnglishForm.id,EnglishLemma.picture_id==Picture.id).first()
sentence = card.EnglishLemma.example_sentence
pos = card.EnglishLemma.pos
answer = card.EnglishForm.form
picture = card.Picture.name
edistractors_query = DBSession.query(EnglishForm).filter(EnglishForm.id !=card.EnglishForm.id)
englishdistractors = [result.form for result in edistractors_query.limit(7)]
return {'cardid': card.Card.id, 'sentence':sentence, 'pos' : pos, 'answer': answer, 'picturename' : picture, 'targetDistractors': englishdistractors}
@cache_region('threehours', 'get_lemma')
def get_lemma(self, lemmaid):
lemma = DBSession.query(EnglishLemma,EnglishForm, Picture).filter(EnglishLemma.id == lemmaid,EnglishLemma.form_id==EnglishForm.id,EnglishLemma.picture_id==Picture.id).first()
pos = lemma.EnglishLemma.pos
answer = lemma.EnglishForm.form
sentence = lemma.EnglishLemma.example_sentence.replace('____', answer)
picture = lemma.Picture.name
return {'sentence':sentence, 'pos' : pos, 'answer': answer, 'picturename' : picture}
@cache_region('twentymins', 'search')
def search(self, (search_criteria, preferred_language)):
if preferred_language != 0:
baselist = DBSession.query(EnglishLemma, Card, EnglishForm).filter(Card.lemma_id==EnglishLemma.id, EnglishLemma.form_id == EnglishForm.id, Card.language_id==preferred_language)
else:
baselist = DBSession.query(EnglishLemma, EnglishForm).filter(EnglishLemma.form_id == EnglishForm.id)
###Search criteria
lemma_type = search_criteria['lemma_type']
letter = search_criteria['letter']
limit = search_criteria['results_limit']
###Selection
if lemma_type != 'any':
baselist = baselist.filter(EnglishLemma.pos == lemma_type)
if letter != 'any':
baselist = baselist.filter(EnglishForm.form.like('%{0}'.format(letter)))
###Sorting
if search_criteria['order_by'] == 'frequency':
baselist = baselist.join(FormInfo, EnglishLemma.form_id==FormInfo.form_id).order_by(FormInfo.freq.desc())
if search_criteria['order_by'] == 'popularity':
baselist = baselist.outerjoin(Card, Card.lemma_id==EnglishLemma.id)
baselist = baselist.outerjoin(Flashcard, Flashcard.card_id==Card.id).order_by(func.count(Flashcard.id))
if search_criteria['order_by'] == 'recent':
baselist = baselist.order_by(EnglishLemma.id.desc())
###
baselist = baselist.limit(limit)
words_dict = []
for lemma in baselist:
if preferred_language != 0:
word_dict = self.get_card(lemma.Card.id)
else:
word_dict = self.get_lemma(lemma.EnglishLemma.id)
word_dict['len'] = 1
words_dict.append(word_dict)
return words_dict
def process_drill_results(self, drill_results):
session = DBSession()
for flashcard_id in drill_results.keys():
flashcard = session.query(Flashcard).filter_by(id=flashcard_id).first()
flashcard.level = drill_results[flashcard_id]['ending_level']
flashcard.correct += drill_results[flashcard_id]['correct']
flashcard.incorrect += drill_results[flashcard_id]['incorrect']
if drill_results[flashcard_id]['ending_level'].find('Flashcard') == 0:
if drill_results[flashcard_id]['correct'] > drill_results[flashcard_id]['incorrect'] and flashcard.ease < 3500:
flashcard.ease += flashcard.correct/(flashcard.incorrect+flashcard.correct)*100
elif drill_results[flashcard_id]['correct'] < drill_results[flashcard_id]['incorrect'] and flashcard.ease > 1500:
flashcard.ease -= flashcard.incorrect/(flashcard.incorrect+flashcard.correct)*100
flashcard.interval= int(flashcard.interval*flashcard.ease/1000.0)
flashcard.due = datetime.date.fromordinal(datetime.date.today().toordinal()+int(flashcard.interval/10))
beaker.cache.region_invalidate(BaseView.user_info, None, 'user_info', flashcard.id)
session.flush()
def moveUP(self, s):
if s == 'Show':
return '4Source'
elif s.find('Flashcard') ==0:
if (int(s[-1])+1) <= 8:
s = s.replace(s[-1], str(int(s[-1])+1))
return s
elif s.find('4') != -1:
return s.replace('4', '8')
elif s.find('8Source') == 0:
return '4Target'
elif s.find('8Target') == 0:
return 'Flashcard1'
def create_form(self, v):
form = self.request.params['form{0}'.format(v)]
english_form = DBSession.query(EnglishForm).filter(EnglishForm.form == form).first()
if english_form == None:
english_form = EnglishForm(form =form)
DBSession.add(english_form)
DBSession.flush()
example_sentence = re.sub(form, '____', self.request.params['example_sentence{0}'.format(v)])
pos = self.request.params['pos{0}'.format(v)]
if self.request.params['picloc{0}'.format(v)] != 'other':
picture = os.path.join(AUTO_PICTURE_DIR, '{0}.jpg'.format(self.request.params['picloc{0}'.format(v)]))
else:
if 'picture{0}'.format(v) in self.request.params:
picture = self.request.params['picture{0}'.format(v)]
pic_id = add_image(self.request, picture, self.userid, situation='flashcard')
english_lemma = EnglishLemma(owner=self.userid, form_id = english_form.id, example_sentence=example_sentence, pos=pos, picture_id=pic_id)
DBSession.add(english_lemma)
DBSession.flush()
langs = DBSession.query(Language).all()
for lang in langs:
card = Card(lemma_id = english_lemma.id, language_id = lang.id)
DBSession.add(card)
DBSession.flush()
## Views
def add_forms(self):
withoutlemma = DBSession.query(EnglishForm).filter(~exists().where(EnglishLemma.form_id==EnglishForm.id)).all()
self.response['forms'] = [x.form for x in withoutlemma]
return self.response
def add_form(self):
wordform = str(self.request.matchdict['word_form'])
info = DBSession.query(EnglishForm, FormInfo).filter(EnglishForm.id == FormInfo.form_id, EnglishForm.form == wordform).first()
self.response['pictures'] = ['{0}1'.format(wordform),'{0}2'.format(wordform), '{0}3'.format(wordform),'{0}4'.format(wordform)]
self.response['wordform'] = wordform
self.response.update({'freq' : info.FormInfo.freq, 'senses' : info.FormInfo.definitions})
if self.request.method == 'POST':
vocab_length = int(self.request.params['vocab_items'])
self.create_form(v='')
for v in xrange(1, vocab_length):
self.create_form(str(v))
return HTTPFound(location = self.request.route_url('add_forms'))
return self.response
def flashcard_tree(self):
parentname = str(self.request.matchdict['parent_name'])
parentid = int(self.request.matchdict['pid'])
parent = DBSession.query(EnglishLemmaCategory).filter(EnglishLemmaCategory.id==parentid).first()
children = DBSession.query(EnglishLemmaCategory).filter(EnglishLemmaCategory.left > parent.left, EnglishLemmaCategory.right < parent.right, EnglishLemmaCategory.level == parent.level + 1).all()
self.response['parent'] = {}
self.response['parent']['title'] = parent.name
self.response['parent']['children'] = []
for child in children:
self.response['parent']['children'].append(child.name)
return self.response
def abc_flashcards(self):
return self.response
def add_flashcards(self):
pass
def search_flashcards(self):
search_criteria = {}
### Default settings
limit = 30
search_criteria['lemma_type'] = 'all'
search_criteria['letter'] = 'any'
search_criteria['order_by'] = 'frequency'
search_criteria['limit'] = limit
###Change settings if they exist
if self.request.params.has_key('letter'):
search_criteria['letter'] = self.request.params['letter']
if self.request.params.has_key('lemma_type'):
search_criteria['lemma_type'] = self.request.params['lemma_type']
if self.request.params.has_key('results_limit'):
search_criteria['results_limit'] = self.request.params['results_limit']
if self.request.params.has_key('order_by'):
search_criteria['order_by'] = self.request.params['order_by']
search_criteria['results_limit'] = limit
preferred_language = 0
if self.userid:
preferred_language = DBSession.query(AuthID).filter(AuthID.id == self.userid).first().preferred_language
self.response['words'] = self.search((search_criteria, preferred_language))
self.response['search_criteria'] = search_criteria
return self.response
def my_flashcards(self):
user = DBSession.query(AuthID).filter(AuthID.id==self.userid).first()
flashcards = user.sorted_flashcards()
self.response['flashcards_overdue'] = len(flashcards['overdue'])
self.response['flashcards_today'] = len(flashcards['today'])
self.response['flashcards_tomorrow'] = len(flashcards['tomorrow'])
self.response['flashcards_this_week'] = len(flashcards['this_week'])
self.response['flashcards_next_week'] = len(flashcards['next_week'])
self.response['total_flashcards'] = len(user.flashcards)
return self.response
def practice_flashcards(self):
flashcards = DBSession.query(Flashcard,Card).filter(Flashcard.owner==self.userid, Flashcard.level != 'Show', Card.id ==Flashcard.card_id).filter(Flashcard.due <= datetime.date.today()).order_by((Flashcard.due).desc()).limit(30)
flashcard_deck = []
drill_results = {}
position = 1
self.response['drill'] = uuid.uuid4().node
for flashcard in flashcards:
card = self.get_card(flashcard.Card.id)
card.update({'level': flashcard.Flashcard.level, 'points': 40, 'cid' : flashcard.Flashcard.id, 'position' : position})
card['picture'] = self.request.static_url('english:/static/uploads/pictures/{0}.jpeg'.format(card['picturename']))
flashcard_data = self.get_flashcard((flashcard.Card.id, flashcard.Card.language_id))
card.update(flashcard_data)
flashcard_deck.append(card)
position +=1
self.response['flashcard_json'] = json.dumps(flashcard_deck)
return self.response
def introduce_flashcards(self):
flashcards = DBSession.query(Flashcard,Card).filter(Flashcard.owner==self.userid, Flashcard.level != 'Show', Card.id ==Flashcard.card_id).filter(Flashcard.due <= datetime.date.today()).order_by((Flashcard.due).desc()).limit(30)
flashcard_deck = []
drill_results = {}
position = 1
self.response['drill'] = uuid.uuid4().node
for flashcard in flashcards:
card = self.get_card(flashcard.Card.id)
card.update({'level': flashcard.Flashcard.level, 'points': 40, 'cid' : flashcard.Flashcard.id, 'position' : position})
card['picture'] = self.request.static_url('english:/static/uploads/pictures/{0}.jpeg'.format(card['picturename']))
flashcard_data = self.get_flashcard((flashcard.Card.id, flashcard.Card.language_id))
card.update(flashcard_data)
flashcard_deck.append(card)
position +=1
self.response['flashcard_json'] = json.dumps(flashcard_deck)
return self.response
def report_drill_results(self):
drill_results = {}
if 'activity_type' in self.request.params:
points_scored = 0
if self.request.params['current_card'] != '0':
for indx in xrange(0, int(self.request.params['current_card'])+1):
points_scored += int(self.request.params['score'+str(indx)])
card = int(self.request.params['card'+str(indx)])
lvl = self.request.params['level'+str(indx)]
time = int(float(self.request.params['time'+str(indx)])*100)
resp = self.request.params['response'+str(indx)]
corr = self.request.params['correct'+str(indx)]
drill_results.setdefault(card, {'correct' : 0, 'incorrect' : 0, 'ending_level' : lvl})
if corr == 'True' and lvl != 'Show':
fhistory = FlashcardHistory(flashcard_id=card, response_time = time, response = resp, correct=True, level = lvl)
drill_results[card]['correct'] += 1
drill_results[card]['ending_level'] = moveUP(lvl)
elif corr == 'False' and lvl != 'Show':
fhistory = FlashcardHistory(flashcard_id=card, response_time = time, response = resp, correct=False, level=lvl)
drill_results[card]['incorrect'] += 1
if lvl != 'Show': DBSession.add(fhistory)
self.process_drill_results(drill_results)
return HTTPFound(location = self.request.route_url('my_flashcards'))
return 'messed up'
def flashcards_demo(self):
self.response['flashcard_json'] = ''
self.response['language'] = ''
self.response['vocabulary'] = ''
self.response['vb_error'] = ''
self.response['drill'] = 1
self.response['language_options'] = [{'id': language.id, 'e_name': language.english_name, 'n_name' : language.native_name} for language in DBSession.query(Language).all()]
if 'configure' in self.request.params:
languageid = self.request.params["preferred_language"]
language = DBSession.query(Language).filter_by(id=languageid).first()
language_name = language.english_name
vocabulary = []
for indx in range(1,6):
english_lemma = DBSession.query(EnglishLemma).filter_by(id=indx).first()
form = DBSession.query(EnglishForm).filter(EnglishForm.id==english_lemma.form_id).first().form
example_sentence = re.sub('____', form, english_lemma.example_sentence)
all_translation = DBSession.query(Card,Translation,ForeignLemma).filter(Card.language_id==languageid, Card.lemma_id==english_lemma.id, Card.id==Translation.card_id, Translation.foreign_lemma_id==ForeignLemma.id).order_by(func.count(Translation.count).desc()).first()
cid = DBSession.query(Card).filter(Card.lemma_id==english_lemma.id,Card.language_id==languageid).first()
if cid == None:
card = Card(lemma_id=english_lemma.id,language_id=languageid)
DBSession.add(card)
DBSession.flush()
cid = card.id
else: cid = cid.id
vocab_item = {'form' : form, 'translation': u"{0}".format(all_translation.ForeignLemma.form), 'cid' : cid , 'fid' : all_translation.ForeignLemma.language_id, 'id': english_lemma.id, 'example_sentence': example_sentence, 'pos' :english_lemma.pos }
vocabulary.append(vocab_item)
self.response['vocabulary'] = vocabulary
self.response['language'] = language_name
if self.request.method=='POST' and self.request.POST.keys().count('add') > 0:
results = []
error = None
for item in vocabulary:
card = DBSession.query(Card).filter(Card.id==item['cid']).first()
foreign_form = unicode(self.request.params[str(item['id'])]).strip()
results.append(foreign_form)
existing_f_lemma = DBSession.query(ForeignLemma).filter(ForeignLemma.form==foreign_form, ForeignLemma.language_id==int(languageid)).first()
if existing_f_lemma == None:
existing_f_lemma = ForeignLemma(form=foreign_form, language_id=int(languageid))
DBSession.add(existing_f_lemma)
existing_translation = DBSession.query(Translation).filter(Translation.card_id ==card.id, Translation.foreign_lemma_id==existing_f_lemma.id).first()
if existing_translation == None:
existing_translation = Translation(card_id=card.id, foreign_lemma_id=existing_f_lemma.id, count = 0)
DBSession.add(existing_translation)
existing_translation.count += 1
card.translations.append(existing_translation)
if results.count('') == 0:
position = 1
flashcard_deck = []
for vocab_item in vocabulary:
pos = vocab_item['pos']
form = vocab_item['form']
englishform = DBSession.query(EnglishForm).filter(EnglishForm.form==form).first()
flashcard = DBSession.query(Card,Picture).filter(Card.id==vocab_item['cid'],Card.lemma_id == EnglishLemma.id, Picture.id==EnglishLemma.picture_id).first()
sentence = re.sub(form, '____', vocab_item['example_sentence'])
level = 'Show'
points = 40
picture = self.request.static_url('english:/static/uploads/pictures/{0}.jpeg'.format(flashcard.Picture.name))
id = flashcard.Card.id
translations = [translation.ForeignLemma.form for translation in DBSession.query(Translation,ForeignLemma).filter(Translation.card_id==flashcard.Card.id, ForeignLemma.id==Translation.foreign_lemma_id).order_by((Translation.count).desc()).limit(3)]
fdistractors_query = DBSession.query(ForeignLemma).filter(ForeignLemma.language_id ==flashcard.Card.language_id)
edistractors_query = DBSession.query(EnglishForm).filter(EnglishForm.id !=englishform.id)
for translation in translations:
fdistractors_query = fdistractors_query.filter(ForeignLemma.form != translation)
foreigndistractors = [result.form for result in fdistractors_query.limit(7)]
englishdistractors = [result.form for result in edistractors_query.limit(7)]
while len(foreigndistractors) < 7:
foreigndistractors.append('Distractor')
fc = {'position' : position, 'points' : points, 'level' : level, 'cid': id, 'answer':form, 'picture': picture, 'sentence': sentence, 'pos':pos, 'translations':translations, 'targetDistractors': englishdistractors, 'sourceDistractors' : foreigndistractors}
flashcard_deck.append(fc)
position +=1
self.response['flashcard_json'] = json.dumps(flashcard_deck)
return self.response
else:
error = 'Missing translation(s)'
self.response['language'] = language_name
self.response['vocabulary'] = vocabulary
self.response['vb_error'] = error
return self.response
return self.response
return self.response
def includeme(config):
#Use
config.add_route('my_flashcards', 'my_flashcards')
config.add_view(FlashcardView, attr='my_flashcards', route_name='my_flashcards', renderer='my_flashcards.mako', permission=NO_PERMISSION_REQUIRED)
config.add_route('practice_flashcards', 'practice_flashcards')
config.add_view(FlashcardView, attr='practice_flashcards', route_name='practice_flashcards', renderer='practice_flashcards.mako', permission=NO_PERMISSION_REQUIRED)
config.add_route('introduce_flashcards', 'introduce_flashcards')
config.add_view(FlashcardView, attr='introduce_flashcards', route_name='introduce_flashcards', renderer='practice_flashcards.mako', permission=NO_PERMISSION_REQUIRED)
config.add_route('report_drill_results', 'report_drill_results')
config.add_view(FlashcardView, attr='report_drill_results', route_name='report_drill_results', renderer='practice_flashcards.mako', permission=NO_PERMISSION_REQUIRED)
config.add_route('flashcards_demo', 'flashcards_demo')
config.add_view(FlashcardView, attr='flashcards_demo', route_name='flashcards_demo', renderer='flashcards_demo.mako', permission=NO_PERMISSION_REQUIRED)
#Search
config.add_route('flashcard_tree', 'flashcard-tree/:pid/:parent_name')
config.add_view(FlashcardView, attr='flashcard_tree', route_name='flashcard_tree', renderer='flashcard_tree.mako', permission=NO_PERMISSION_REQUIRED)
config.add_route('search_flashcards', 'search_flashcards')
config.add_view(FlashcardView, attr='search_flashcards', route_name='search_flashcards', renderer='search_flashcards.mako', permission=NO_PERMISSION_REQUIRED)
#Add
config.add_route('add_forms', 'add-forms')
config.add_view(FlashcardView, attr='add_forms', route_name='add_forms', renderer='add_forms.mako', permission='add')
config.add_route('add_form', 'add-lemmas/:word_form')
config.add_view(FlashcardView, attr='add_form', route_name='add_form', renderer='add_form.mako', permission='add')
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from abc import abstractmethod, abstractproperty
from contextlib import contextmanager
from six import string_types
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import environment_as
from pants.util.dirutil import relativize_paths
from pants.util.meta import AbstractClass
from pants.util.process_handler import subprocess
logger = logging.getLogger(__name__)
class Executor(AbstractClass):
"""Executes java programs.
:API: public
"""
@staticmethod
def _scrub_args(classpath, main, jvm_options, args, cwd):
classpath = maybe_list(classpath)
if not isinstance(main, string_types) or not main:
raise ValueError('A non-empty main classname is required, given: {}'.format(main))
jvm_options = maybe_list(jvm_options or ())
args = maybe_list(args or ())
return classpath, main, jvm_options, args, cwd
class Error(Exception):
"""Indicates an error launching a java program.
:API: public
"""
class InvalidDistribution(ValueError):
"""Indicates an invalid Distribution was used to construct this runner."""
class Runner(object):
"""A re-usable executor that can run a configured java command line."""
@abstractproperty
def executor(self):
"""Returns the executor this runner uses to run itself."""
raise NotImplementedError
@property
def cmd(self):
"""Returns a string representation of the command that will be run."""
return ' '.join(self.command)
@abstractproperty
def command(self):
"""Returns a copy of the command line that will be run as a list of command line tokens."""
raise NotImplementedError
@abstractmethod
def run(self, stdout=None, stderr=None, cwd=None):
"""Runs the configured java command.
If there is a problem executing tha java program subclasses should raise Executor.Error.
Its guaranteed that all arguments are valid as documented in `execute`
:param stdout: An optional stream to pump stdout to; defaults to `sys.stdout`.
:param stderr: An optional stream to pump stderr to; defaults to `sys.stderr`.
:param string cwd: optionally set the working directory
"""
raise NotImplementedError
@abstractmethod
def spawn(self, stdout=None, stderr=None, cwd=None):
"""Spawns the configured java command.
:param stdout: An optional stream to pump stdout to; defaults to `sys.stdout`.
:param stderr: An optional stream to pump stderr to; defaults to `sys.stderr`.
:param string cwd: optionally set the working directory
"""
raise NotImplementedError
def __init__(self, distribution):
"""Constructs an Executor that can be used to launch java programs.
:param distribution: a validated java distribution to use when launching java programs.
"""
if not hasattr(distribution, 'java') or not hasattr(distribution, 'validate'):
raise self.InvalidDistribution('A valid distribution is required, given: {}'
.format(distribution))
distribution.validate()
self._distribution = distribution
@property
def distribution(self):
"""Returns the `Distribution` this executor runs via."""
return self._distribution
def runner(self, classpath, main, jvm_options=None, args=None, cwd=None):
"""Returns an `Executor.Runner` for the given java command."""
return self._runner(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
def execute(self, classpath, main, jvm_options=None, args=None, stdout=None, stderr=None,
cwd=None):
"""Launches the java program defined by the classpath and main.
:param list classpath: the classpath for the java program
:param string main: the fully qualified class name of the java program's entry point
:param list jvm_options: an optional sequence of options for the underlying jvm
:param list args: an optional sequence of args to pass to the java program
:param string cwd: optionally set the working directory
Returns the exit code of the java program.
Raises Executor.Error if there was a problem launching java itself.
"""
runner = self.runner(classpath=classpath, main=main, jvm_options=jvm_options, args=args,
cwd=cwd)
return runner.run(stdout=stdout, stderr=stderr)
@abstractmethod
def _runner(self, classpath, main, jvm_options, args, cwd=None):
"""Subclasses should return a `Runner` that can execute the given java main."""
def _create_command(self, classpath, main, jvm_options, args, cwd=None):
cmd = [self._distribution.java]
cmd.extend(jvm_options)
if cwd:
classpath = relativize_paths(classpath, cwd)
cmd.extend(['-cp', os.pathsep.join(classpath), main])
cmd.extend(args)
return cmd
class CommandLineGrabber(Executor):
"""Doesn't actually execute anything, just captures the cmd line."""
def __init__(self, distribution):
super(CommandLineGrabber, self).__init__(distribution=distribution)
self._command = None # Initialized when we run something.
def _runner(self, classpath, main, jvm_options, args, cwd=None):
self._command = self._create_command(classpath, main, jvm_options, args, cwd=cwd)
class Runner(self.Runner):
@property
def executor(_):
return self
@property
def command(_):
return list(self._command)
def run(_, stdout=None, stderr=None):
return 0
def spawn(_, stdout=None, stderr=None):
return None
return Runner()
@property
def cmd(self):
return self._command
class SubprocessExecutor(Executor):
"""Executes java programs by launching a jvm in a subprocess.
:API: public
"""
_SCRUBBED_ENV = {
# We attempt to control the classpath for correctness, caching and invalidation reasons and
# allowing CLASSPATH to influence would be a hermeticity leak
'CLASSPATH': None,
# We attempt to control jvm options and give user's explicit control in some cases as well.
# In all cases we want predictable behavior - pants defaults, repo defaults, or user tweaks
# specified on the command line. In addition cli options can affect outputs; ie: class debug
# info, target classfile version, etc - all breaking hermeticity.
'_JAVA_OPTIONS': None,
'JAVA_TOOL_OPTIONS': None
}
@classmethod
@contextmanager
def _maybe_scrubbed_env(cls):
for env_var in cls._SCRUBBED_ENV:
value = os.getenv(env_var)
if value:
logger.warn('Scrubbing {env_var}={value}'.format(env_var=env_var, value=value))
with environment_as(**cls._SCRUBBED_ENV):
yield
def __init__(self, distribution):
super(SubprocessExecutor, self).__init__(distribution=distribution)
self._buildroot = get_buildroot()
self._process = None
def _create_command(self, classpath, main, jvm_options, args, cwd=None):
cwd = cwd or self._buildroot
return super(SubprocessExecutor, self)._create_command(classpath, main, jvm_options,
args, cwd=cwd)
def _runner(self, classpath, main, jvm_options, args, cwd=None):
command = self._create_command(classpath, main, jvm_options, args, cwd=cwd)
class Runner(self.Runner):
@property
def executor(_):
return self
@property
def command(_):
return list(command)
def spawn(_, stdout=None, stderr=None):
return self._spawn(command, stdout=stdout, stderr=stderr, cwd=cwd)
def run(_, stdout=None, stderr=None):
return self._spawn(command, stdout=stdout, stderr=stderr, cwd=cwd).wait()
return Runner()
def spawn(self, classpath, main, jvm_options=None, args=None, cwd=None, **subprocess_args):
"""Spawns the java program passing any extra subprocess kwargs on to subprocess.Popen.
Returns the Popen process object handle to the spawned java program subprocess.
:API: public
:raises: :class:`Executor.Error` if there is a problem spawning the subprocess.
"""
cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args, cwd=cwd))
return self._spawn(cmd, cwd, **subprocess_args)
def _spawn(self, cmd, cwd=None, **subprocess_args):
with self._maybe_scrubbed_env():
cwd = cwd or self._buildroot
logger.debug('Executing: {cmd} args={args} at cwd={cwd}'
.format(cmd=' '.join(cmd), args=subprocess_args, cwd=cwd))
try:
return subprocess.Popen(cmd, cwd=cwd, **subprocess_args)
except OSError as e:
raise self.Error('Problem executing {0}: {1}'.format(self._distribution.java, e))
|
|
# Test packages (dotted-name import)
import sys
import os
import tempfile
import textwrap
import unittest
from test import support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
if "__initializing__" in lst:
lst.remove("__initializing__")
return lst
# XXX Things to test
#
# import package without __init__
# import package with __init__
# __init__ importing submodule
# __init__ importing global module
# __init__ defining variables
# submodule importing other submodule
# submodule importing global module
# submodule import submodule via global name
# from package import submodule
# from package import subpackage
# from package import variable (defined in __init__)
# from package import * (defined in __init__)
class TestPkg(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
self.modules_before = support.modules_setup()
def tearDown(self):
sys.path[:] = self.syspath
support.modules_cleanup(*self.modules_before)
if self.root: # Only clean if the test was actually run
cleanout(self.root)
# delete all modules concerning the tested hierarchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_1(self):
hier = [("t1", None), ("t1 __init__.py", "")]
self.mkhier(hier)
import t1
def test_2(self):
hier = [
("t2", None),
("t2 __init__.py", "'doc for t2'"),
("t2 sub", None),
("t2 sub __init__.py", ""),
("t2 sub subsub", None),
("t2 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
# This exec crap is needed because Py3k forbids 'import *' outside
# of module-scope and __import__() is insufficient for what we need.
s = """
import t2
from t2 import *
self.assertEqual(dir(), ['self', 'sub', 't2'])
"""
self.run_code(s)
from t2 import sub
from t2.sub import subsub
from t2.sub.subsub import spam
self.assertEqual(sub.__name__, "t2.sub")
self.assertEqual(subsub.__name__, "t2.sub.subsub")
self.assertEqual(sub.subsub.__name__, "t2.sub.subsub")
for name in ['spam', 'sub', 'subsub', 't2']:
self.assertTrue(locals()["name"], "Failed to import %s" % name)
import t2.sub
import t2.sub.subsub
self.assertEqual(t2.__name__, "t2")
self.assertEqual(t2.sub.__name__, "t2.sub")
self.assertEqual(t2.sub.subsub.__name__, "t2.sub.subsub")
s = """
from t2 import *
self.assertTrue(dir(), ['self', 'sub'])
"""
self.run_code(s)
def test_3(self):
hier = [
("t3", None),
("t3 __init__.py", ""),
("t3 sub", None),
("t3 sub __init__.py", ""),
("t3 sub subsub", None),
("t3 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
import t3.sub.subsub
self.assertEqual(t3.__name__, "t3")
self.assertEqual(t3.sub.__name__, "t3.sub")
self.assertEqual(t3.sub.subsub.__name__, "t3.sub.subsub")
def test_4(self):
hier = [
("t4.py", "raise RuntimeError('Shouldnt load t4.py')"),
("t4", None),
("t4 __init__.py", ""),
("t4 sub.py", "raise RuntimeError('Shouldnt load sub.py')"),
("t4 sub", None),
("t4 sub __init__.py", ""),
("t4 sub subsub.py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t4 sub subsub", None),
("t4 sub subsub __init__.py", "spam = 1"),
]
self.mkhier(hier)
s = """
from t4.sub.subsub import *
self.assertEqual(spam, 1)
"""
self.run_code(s)
def test_5(self):
hier = [
("t5", None),
("t5 __init__.py", "import t5.foo"),
("t5 string.py", "spam = 1"),
("t5 foo.py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__path__', 'foo',
'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', 'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', 'spam'])
def test_6(self):
hier = [
("t6", None),
("t6 __init__.py",
"__all__ = ['spam', 'ham', 'eggs']"),
("t6 spam.py", ""),
("t6 ham.py", ""),
("t6 eggs.py", ""),
]
self.mkhier(hier)
import t6
self.assertEqual(fixdir(dir(t6)),
['__all__', '__cached__', '__doc__', '__file__',
'__loader__', '__name__', '__package__', '__path__'])
s = """
import t6
from t6 import *
self.assertEqual(fixdir(dir(t6)),
['__all__', '__cached__', '__doc__', '__file__',
'__loader__', '__name__', '__package__',
'__path__', 'eggs', 'ham', 'spam'])
self.assertEqual(dir(), ['eggs', 'ham', 'self', 'spam', 't6'])
"""
self.run_code(s)
def test_7(self):
hier = [
("t7.py", ""),
("t7", None),
("t7 __init__.py", ""),
("t7 sub.py",
"raise RuntimeError('Shouldnt load sub.py')"),
("t7 sub", None),
("t7 sub __init__.py", ""),
("t7 sub .py",
"raise RuntimeError('Shouldnt load subsub.py')"),
("t7 sub subsub", None),
("t7 sub subsub __init__.py",
"spam = 1"),
]
self.mkhier(hier)
t7, sub, subsub = None, None, None
import t7 as tas
self.assertEqual(fixdir(dir(tas)),
['__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__path__'])
self.assertFalse(t7)
from t7 import sub as subpar
self.assertEqual(fixdir(dir(subpar)),
['__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__path__'])
self.assertFalse(t7)
self.assertFalse(sub)
from t7.sub import subsub as subsubsub
self.assertEqual(fixdir(dir(subsubsub)),
['__cached__', '__doc__', '__file__', '__loader__',
'__name__', '__package__', '__path__', 'spam'])
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
from t7.sub.subsub import spam as ham
self.assertEqual(ham, 1)
self.assertFalse(t7)
self.assertFalse(sub)
self.assertFalse(subsub)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_8(self):
hier = [
("t8", None),
("t8 __init__"+os.extsep+"py", "'doc for t8'"),
]
self.mkhier(hier)
import t8
self.assertEqual(t8.__doc__, "doc for t8")
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
# -*- coding: utf-8 -*-
"""
celery.loaders.base
~~~~~~~~~~~~~~~~~~~
Loader base class.
"""
from __future__ import absolute_import
import anyjson
import imp as _imp
import importlib
import os
import re
import sys
from datetime import datetime
from kombu.utils import cached_property
from kombu.utils.encoding import safe_str
from celery import signals
from celery.datastructures import DictAttribute, force_mapping
from celery.five import reraise, string_t
from celery.utils.functional import maybe_list
from celery.utils.imports import (
import_from_cwd, symbol_by_name, NotAPackage, find_module,
)
__all__ = ['BaseLoader']
_RACE_PROTECTION = False
CONFIG_INVALID_NAME = """\
Error: Module '{module}' doesn't exist, or it's not a valid \
Python module name.
"""
CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\
Did you mean '{suggest}'?
"""
class BaseLoader(object):
"""The base class for loaders.
Loaders handles,
* Reading celery client/worker configurations.
* What happens when a task starts?
See :meth:`on_task_init`.
* What happens when the worker starts?
See :meth:`on_worker_init`.
* What happens when the worker shuts down?
See :meth:`on_worker_shutdown`.
* What modules are imported to find tasks?
"""
builtin_modules = frozenset()
configured = False
override_backends = {}
worker_initialized = False
_conf = None
def __init__(self, app, **kwargs):
self.app = app
self.task_modules = set()
def now(self, utc=True):
if utc:
return datetime.utcnow()
return datetime.now()
def on_task_init(self, task_id, task):
"""This method is called before a task is executed."""
pass
def on_process_cleanup(self):
"""This method is called after a task is executed."""
pass
def on_worker_init(self):
"""This method is called when the worker (:program:`celery worker`)
starts."""
pass
def on_worker_shutdown(self):
"""This method is called when the worker (:program:`celery worker`)
shuts down."""
pass
def on_worker_process_init(self):
"""This method is called when a child process starts."""
pass
def import_task_module(self, module):
self.task_modules.add(module)
return self.import_from_cwd(module)
def import_module(self, module, package=None):
return importlib.import_module(module, package=package)
def import_from_cwd(self, module, imp=None, package=None):
return import_from_cwd(
module,
self.import_module if imp is None else imp,
package=package,
)
def import_default_modules(self):
signals.import_modules.send(sender=self.app)
return [
self.import_task_module(m) for m in (
tuple(self.builtin_modules) +
tuple(maybe_list(self.app.conf.CELERY_IMPORTS)) +
tuple(maybe_list(self.app.conf.CELERY_INCLUDE))
)
]
def init_worker(self):
if not self.worker_initialized:
self.worker_initialized = True
self.import_default_modules()
self.on_worker_init()
def shutdown_worker(self):
self.on_worker_shutdown()
def init_worker_process(self):
self.on_worker_process_init()
def config_from_object(self, obj, silent=False):
if isinstance(obj, string_t):
try:
obj = self._smart_import(obj, imp=self.import_from_cwd)
except (ImportError, AttributeError):
if silent:
return False
raise
self._conf = force_mapping(obj)
return True
def _smart_import(self, path, imp=None):
imp = self.import_module if imp is None else imp
if ':' in path:
# Path includes attribute so can just jump here.
# e.g. ``os.path:abspath``.
return symbol_by_name(path, imp=imp)
# Not sure if path is just a module name or if it includes an
# attribute name (e.g. ``os.path``, vs, ``os.path.abspath``).
try:
return imp(path)
except ImportError:
# Not a module name, so try module + attribute.
return symbol_by_name(path, imp=imp)
def _import_config_module(self, name):
try:
self.find_module(name)
except NotAPackage:
if name.endswith('.py'):
reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format(
module=name, suggest=name[:-3])), sys.exc_info()[2])
reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format(
module=name)), sys.exc_info()[2])
else:
return self.import_from_cwd(name)
def find_module(self, module):
return find_module(module)
def cmdline_config_parser(
self, args, namespace='celery',
re_type=re.compile(r'\((\w+)\)'),
extra_types={'json': anyjson.loads},
override_types={'tuple': 'json',
'list': 'json',
'dict': 'json'}):
from celery.app.defaults import Option, NAMESPACES
namespace = namespace.upper()
typemap = dict(Option.typemap, **extra_types)
def getarg(arg):
"""Parse a single configuration definition from
the command-line."""
# ## find key/value
# ns.key=value|ns_key=value (case insensitive)
key, value = arg.split('=', 1)
key = key.upper().replace('.', '_')
# ## find namespace.
# .key=value|_key=value expands to default namespace.
if key[0] == '_':
ns, key = namespace, key[1:]
else:
# find namespace part of key
ns, key = key.split('_', 1)
ns_key = (ns and ns + '_' or '') + key
# (type)value makes cast to custom type.
cast = re_type.match(value)
if cast:
type_ = cast.groups()[0]
type_ = override_types.get(type_, type_)
value = value[len(cast.group()):]
value = typemap[type_](value)
else:
try:
value = NAMESPACES[ns][key].to_python(value)
except ValueError as exc:
# display key name in error message.
raise ValueError('{0!r}: {1}'.format(ns_key, exc))
return ns_key, value
return dict(getarg(arg) for arg in args)
def mail_admins(self, subject, body, fail_silently=False,
sender=None, to=None, host=None, port=None,
user=None, password=None, timeout=None,
use_ssl=False, use_tls=False):
message = self.mail.Message(sender=sender, to=to,
subject=safe_str(subject),
body=safe_str(body))
mailer = self.mail.Mailer(host=host, port=port,
user=user, password=password,
timeout=timeout, use_ssl=use_ssl,
use_tls=use_tls)
mailer.send(message, fail_silently=fail_silently)
def read_configuration(self, env='CELERY_CONFIG_MODULE'):
try:
custom_config = os.environ[env]
except KeyError:
pass
else:
if custom_config:
usercfg = self._import_config_module(custom_config)
return DictAttribute(usercfg)
return {}
def autodiscover_tasks(self, packages, related_name='tasks'):
self.task_modules.update(
mod.__name__ for mod in autodiscover_tasks(packages or (),
related_name) if mod)
@property
def conf(self):
"""Loader configuration."""
if self._conf is None:
self._conf = self.read_configuration()
return self._conf
@cached_property
def mail(self):
return self.import_module('celery.utils.mail')
def autodiscover_tasks(packages, related_name='tasks'):
global _RACE_PROTECTION
if _RACE_PROTECTION:
return ()
_RACE_PROTECTION = True
try:
return [find_related_module(pkg, related_name) for pkg in packages]
finally:
_RACE_PROTECTION = False
def find_related_module(package, related_name):
"""Given a package name and a module name, tries to find that
module."""
# Django 1.7 allows for speciying a class name in INSTALLED_APPS.
# (Issue #2248).
try:
importlib.import_module(package)
except ImportError:
package, _, _ = package.rpartition('.')
try:
pkg_path = importlib.import_module(package).__path__
except AttributeError:
return
try:
_imp.find_module(related_name, pkg_path)
except ImportError:
return
return importlib.import_module('{0}.{1}'.format(package, related_name))
|
|
#!/usr/bin/env python2
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gen contains all functionalities to generate transperf's graphs."""
import getopt
import json
import logging
import os
import shutil
import socket
import sys
import threading
import urllib
from scapy.all import IP
from scapy.all import IPv6
from scapy.all import TCP
from scapy.all import UDP
from transperf import cfgutil
from transperf import js
from transperf import log
from transperf import metric
from transperf import outparser
from transperf import shell
from transperf import tcp
from transperf import templates
from transperf import TestCase
from transperf.metric import AppLatencyMetricPublisher
from transperf.metric import ConvergenceMetricPublisher
from transperf.metric import KlogMetricsPublisher
from transperf.metric import parse_float
from transperf.metric import RetxRateMetricPublisher
from transperf.metric import RTTMetricPublisher
from transperf.metric import SerialDelayMetricPublisher
from transperf.metric import TputMetricPublisher
from transperf.path import all_files
LOG = logging.getLogger('transperf/gen')
def _merge_pcaps(exp_dir):
"""Merges all the pcaps in the experiment directory."""
pcaps = {}
for d, f in all_files(exp_dir, regex=r'.*\.pcap$'):
if d == exp_dir:
continue
if f not in pcaps:
pcaps[f] = []
pcaps[f].append(os.path.join(d, f))
procs = []
for f in pcaps:
procs.append(shell.bg('mergecap -F pcap -w %s %s' % (
os.path.join(exp_dir, 'all.' + f), ' '.join(pcaps[f]))))
for p in procs:
shell.wait(p)
def _merge_sysouts(exp_dir):
"""Merges sys.out (sysctl) files into a single file.
The format of the new file is:
Sender 0:
/sys/...=...
Sender 1:
/sys/...=...
Args:
exp_dir: The experiment's output directory.
"""
merged_file = open(os.path.join(exp_dir, 'sys.out'), 'w')
merged_file.write('Module Params\n')
merged_file.write('=============\n')
for d, f in sorted(all_files(exp_dir, name='mod.out')):
if d == exp_dir:
continue
sender_id = d[len(exp_dir) + 1:]
mod_f = open(os.path.join(d, f))
lines = mod_f.readlines()
merged_file.write('Sender %s\n' % sender_id)
merged_file.write('---------\n')
merged_file.writelines(lines)
merged_file.write('\n')
mod_f.close()
merged_file.write('Sysctl Params\n')
merged_file.write('=============\n')
for d, f in sorted(all_files(exp_dir, name='sys.out')):
if d == exp_dir:
continue
sender_id = d[len(exp_dir) + 1:]
sys_f = open(os.path.join(d, f))
lines = sys_f.readlines()
merged_file.write('Sender %s\n' % sender_id)
merged_file.write('---------\n')
merged_file.writelines(lines)
merged_file.write('\n\n')
sys_f.close()
merged_file.close()
def gen_xplots(data_dir):
"""Generates xplots for all the experiments in the data directory."""
for _, _, _, _, exp_dir in cfgutil.exps(data_dir):
xpl_paths = []
conn_info = outparser.ConnInfo(
[os.path.join(d, f) for d, f in
all_files(exp_dir, name='conn.info')])
rcv_ip = outparser.RecvInfo(os.path.join(exp_dir, 'R', 'recv.info')).ip
ports = conn_info.ports()
all_lines = []
procs = []
for d, f in all_files(exp_dir, regex=r'.*\.pcap$'):
if d == exp_dir:
continue
procs.append(shell.bg('tcptrace -CRSzxy --output_dir="%s" "%s"' % (
d, os.path.join(d, f))))
for p in procs:
shell.wait(p)
for d, f in all_files(exp_dir, regex=r'.*\.pcap$'):
for xd, xf in all_files(d, regex=r'.*\.xpl$'):
# Only process time sequence graphs.
if xf.find('_tsg') == -1:
continue
xplf = open(os.path.join(xd, xf))
lines = xplf.readlines()
# The first 3 lines in the xplot are for the title.
# The last line is the draw command. The rest (3:-1)
# is data. We save the rest in all_lines in order to
# create one xplot that contains the time seqeuence
# graphs for all flows.
all_lines += lines[3:-1]
# Parse the ip and port from the xplot's title. Note that the
# addresses may be either IPv4 or IPv6.
parts = lines[2].split('_==>_')[0].split(':')
ip_base = ':'.join(parts[:-1])
port = int(parts[-1])
try:
ip = socket.getaddrinfo(ip_base, 0, socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)[0][4][0]
except socket.gaierror:
ip = socket.getaddrinfo(ip_base, 0, socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)[0][4][0]
# If the ip and port are not from this experiment ignore this
# file.
if ip == rcv_ip or port not in ports:
continue
# Rewrite the title of the explot as:
# ==> CC -- IP:PORT
addr, _, cc, _, _, _, _ = conn_info.conn_info(port)
lines[2] = '==>%s -- %s:%s\n' % (cc, addr, port)
# Save the file.
xpath = os.path.join(xd, 'out-%s.xpl' % port)
xpl_paths.append(xpath)
oxplf = open(xpath, 'w')
oxplf.writelines(lines)
oxplf.close()
# Prepend the title to all_lines and append the draw command (ie, go).
all_lines = (['dtime signed\n', 'title\n', '===> All flows\n'] +
all_lines + ['go'])
axpath = os.path.join(exp_dir, 'out-all.xpl')
xpl_paths.append(axpath)
axplf = open(axpath, 'w')
axplf.writelines(all_lines)
axplf.close()
shell.run('tar -C %s -cvjf %s %s' % (
exp_dir,
os.path.join(exp_dir, 'xplots.tbz2'),
' '.join([os.path.relpath(p, exp_dir) for p in xpl_paths])))
def gen_transperf_pages(data_dir, has_xplot=False,
open_page=True, skip_pcap_scan=False):
"""Generate transperf pages for all the experiements in the data directory.
Args:
data_dir: The path to the data directory.
has_xplot: Whether we have generated xplots.
open_page: Whether to launch the browser at the end.
skip_pcap_scan: Whether to skip pcap scan.
Returns:
1 if test case errors are present, or 0.
"""
html = '''
<table>
<thead>
<tr>
<th>#</th>
<th>Connections</th>
<th>RTT (ms)</th>
<th>BW (Mbps)</th>
<th>Buf (pkts)</th>
<th>Slot</th>
<th>Policer (Mbps)</th>
<th>ILoss (%)</th>
<th>OLoss (%)</th>
<th>Dur (sec)</th>
<th>Tputs (Mpbs)</th>
<th>Retx</th>
<th>p95 RTT (ms)</th>
<th>Med RTT (ms)</th>
<th>Lock on BW (Mbps)</th>
<th>Status</th>
<th>Links</th>
</tr>
</thead>
<tbody>'''
param_cols = [
('conn', ''),
('rtt', ''),
('bw', ''),
('buf', 'pkts'),
('slot', ''),
('policer', ''),
('loss', '%'),
('out_loss', '%'),
('dur', 'sec')
]
metric_cols = [
('tool_tputs', 'Mbps', '%s', 1.0), # throughput from netperf or similar
('retx', '', '%.2f%%', 100.0), # convert retx fraction to percent
('p95_rtt', 'ms', '%s', 1.0),
('med_rtt', 'ms', '%s', 1.0),
('lock_on_bw', 'Mbps', '%s', 1.0),
]
exps = cfgutil.exps(data_dir)
has_error = 0
for i, (exp, cfg_dir, cfg_file, exp_name, exp_dir) in enumerate(exps):
metrics, errs = gen_exp(exp, exp_dir, has_xplot,
skip_pcap_scan)
if errs:
has_error = 1
if open_page:
shell.bg('x-www-browser %s/index.html' % exp_dir)
exp_info = _exp_info(exp_dir)
fields = exp_info.fields()
esc_dir = urllib.quote(os.path.join('__out', cfg_dir, exp_name))
html += '<tr>'
html += '<td>%s</td>' % (i + 1)
for name, unit in param_cols:
v = fields[name]
html += '<td>%s %s</td>' % (v, unit)
for name, unit, fmt, mul in metric_cols:
v = ', '.join([(fmt % (m * mul)) for m in metrics[name].as_array()])
html += '<td>%s %s</td>' % (v, unit)
html += '<td>'
if not errs:
html += '<span class="info">PASSED</span>'
else:
html += '<span class="error">FAILED</span><br>'
html += '<br>'.join(errs)
html += '</td>'
html += '<td>'
html += (''
'<a href="%(dir)s/index.html">dashboard</a><br>'
'<a href="%(dir)s/timeseq.html">time seq</a><br>'
'<a href="%(dir)s/util.html">utilization</a><br>'
'<a href="%(dir)s/klog.html">klog graphs</a><br>'
'<a href="%(dir)s/all.eth1.pcap">pcap</a><br>'
'<a href="%(dir)s/metrics">metrics</a><br>'
'<a href="%(dir)s/sys.out">sys params</a><br>'
'<a href="%(cfg)s">config file</a><br>'
) % {
'dir': esc_dir,
'cfg': cfg_file,
}
if has_xplot:
html += '<a href="%s/xplots.tbz2">xplots</a><br>' % esc_dir
html += '</td></tr>'
html += '</tbody></table>'
inf = open(os.path.join(data_dir, 'index.html'), 'w')
inf.write(templates.INDEX % {
'title': 'experiments',
'exps': html,
})
inf.close()
return has_error
def _dump_js_files(exp_dir):
"""Dumps the common javascript files in the experiments directory."""
for name, content in [('jquery.js', js.JQUERY),
('dygraphs.js', js.DYGRAPHS),
('dygraphs.css', js.DYGRAPHS_CSS),
('transperf.js', js.TRANSPERF)]:
jsf = open(os.path.join(exp_dir, name), 'w')
jsf.write(content)
jsf.close()
# Number of buckets used for generating utilization graphs.
BUCKETS = 100
class UtilMetricAndPageGenerator(metric.MetricPublisher):
"""Generates the utilization graphs and publises utilization metrics.
Attributes:
__exp_dir: The experiment directory.
_rcv_ip: The receiver IP address.
_html_file: The utilization graph html file.
_columns: The list of column headers.
_ports: The map of ports to column index.
_row: Last row of the data that represents the bytes acked in each
epoch.
_epoch_dur: The duration of an epoc in sec.
_end_time: The end of the current epoch.
_max_bw: The maximum bandwidth.
_sum_bw: The sum of bandwidths.
"""
def __init__(self):
super(UtilMetricAndPageGenerator, self).__init__()
self._exp_dir = None
self._rcv_ip = None
self._html_file = None
self._columns = []
self._ports = {}
self._last_ack = {}
self._sacked = {}
self._row = []
self._epoch_dur = 0.0
self._end_time = 0.0
# Metrics.
self._max_bw = {}
self._sum_bw = {}
self._buckets = 0
def _add_column(self, column_title):
"""Adds a column.
Args:
column_title: The title of the column.
Returns:
The index of this column.
"""
self._columns.append(column_title)
self._row.append(0)
return len(self._columns) - 1
def _reset_row(self):
"""Resets the row."""
self._row = [0] * len(self._columns)
def _dump_row(self):
"""Write the row in the html file."""
# Replace bytes with rate.
for i in xrange(1, len(self._row)):
self._row[i] *= 8
self._row[i] /= self._epoch_dur
self._html_file.write(json.dumps(self._row))
self._html_file.write(',\n')
self._buckets += 1
for port, index in self._ports.items():
bw = self._row[index]
if port not in self._max_bw:
self._max_bw[port] = bw
else:
self._max_bw[port] = max(self._max_bw[port], bw)
if port not in self._sum_bw:
self._sum_bw[port] = bw
else:
self._sum_bw[port] += bw
def begin(self, exp, exp_dir, rcv_ip):
# The first column is the timestamp.
self._add_column('time')
self._exp_dir = exp_dir
self._rcv_ip = rcv_ip
self._epoch_dur = exp.dur / float(BUCKETS)
self._end_time = self._epoch_dur
self._html_file = open(os.path.join(exp_dir, 'util.html'), 'w')
self._html_file.write(templates.UTIL_HEAD % {})
self._html_file.write('buckets = [')
def visit_conn(self, ip, port, tool, cc, params, start, dur, tput):
port_index = self._add_column('%s %s:%s' % (cc, ip, port))
self._ports[port] = port_index
self._max_bw[port] = 0
self._sum_bw[port] = 0
self._sacked[port] = []
def visit_ss_log(self, time, data):
if 'port' not in data:
return
port = data['port']
port_index = self._ports[port]
acked = 0
if 'bytes_acked' in data:
acked = data['bytes_acked']
if acked < 0:
return
while self._end_time <= time+0.001:
self._dump_row()
self._reset_row()
self._end_time += self._epoch_dur
self._row[0] = self._end_time
self._row[port_index] += acked - self._last_ack.get(port, 0)
self._last_ack[port] = acked
def visit_packet(self, time, packet):
if (IP not in packet and IPv6 not in packet) or TCP not in packet:
return
iph = packet[IP] if IP in packet else packet[IPv6]
tcph = packet[TCP]
port = tcph.dport
# Process only valid ack from receiver
if (iph.src != self._rcv_ip or
port not in self._ports or
not tcph.flags & 0x10):
return
# Ignore RST.
if tcph.flags & 0x4:
return
# Set the last acknowledged sequence upon receiving the first packet.
if port not in self._last_ack:
self._last_ack[port] = tcph.ack
return
# Move the time ahead until we pass this packet's timestamp.
# Note that it is very important to generate rows of 0 values, and
# because of that we need to have a loop here and generate 0 value
# buckets.
while self._end_time <= time:
self._dump_row()
self._reset_row()
self._end_time += self._epoch_dur
self._row[0] = self._end_time
port_index = self._ports[port]
ack = tcph.ack
sacks = tcp.sacks(tcph)
# Fetch the state.
last_ack = self._last_ack[port]
sacked = self._sacked[port]
bucket_bytes = self._row[port_index]
if tcp.after(ack, last_ack):
acked = tcp.diff_seq(ack, last_ack)
for sack in sacked:
if tcp.after(sack[1], ack):
break
acked -= tcp.sack_block_size(sack)
sacked = sacked[1:]
bucket_bytes += acked
last_ack = ack
for sack in sacks:
if tcp.after(sack[1], last_ack):
sacked, sbytes = tcp.merge_sack_block_into_list(sacked, sack)
bucket_bytes += sbytes
# Store the state.
self._last_ack[port] = last_ack
self._sacked[port] = sacked
self._row[port_index] = bucket_bytes
def end(self):
"""Write the html file.
See outparser.Visitor interface.
"""
# Dump the last row.
self._dump_row()
self._html_file.write('];\n')
self._html_file.write('var cols = %s;' % self._columns)
self._html_file.write(templates.UTIL_FOOT % {})
self._html_file.close()
def publish_metrics(self):
"""Publish max_bw and avg_bw in metrics.
See the metric.MetricPublisher interface.
Returns:
A tuple of [max_bw, avg_bw]
"""
max_bw = metric.Metric('max_bw')
for port, bw in self._max_bw.items():
max_bw.set(port, bw / 1000000.) # Mbps
avg_bw = metric.Metric('avg_bw')
for port, bw in self._sum_bw.items():
if self._buckets:
avg_bw.set(port, bw / 1000000. / self._buckets) # Mbps
else:
avg_bw.set(port, 0)
return [max_bw, avg_bw]
def _dump_metrics(exp_dir, metrics):
"""Dump metrics in the metrics file in the experiment's output directory.
Args:
exp_dir: The experiment directory.
metrics: The dictionary of metrics.
"""
metric_file = open(os.path.join(exp_dir, 'metrics'), 'w')
for name in sorted(metrics.keys()):
metric_file.write('%s=%s\n' % (name, metrics[name]))
metric_file.close()
def _log_metrics(exp, metrics):
"""Log metrics in the metrics file in the experiment's std out.
Args:
exp: The experiment object.
metrics: The dictionary of metrics.
"""
LOG.debug('metrics of exp=%s', exp)
for name in sorted(metrics.keys()):
LOG.debug('%s=%s', name, metrics[name])
def _exp_info(exp_dir):
"""Returns the experiment information stored in exp_dir."""
return outparser.ExpInfo(os.path.join(exp_dir, 'exp.info'))
class KlogCompressor(outparser.Visitor):
"""Separates klogs of each port in its own file and compress them together.
Attributes:
_klog_files: The dictionary ports to klog files.
_exp_dir: The experiment directory.
"""
def __init__(self):
super(KlogCompressor, self).__init__()
self._klog_files = {}
self._exp_dir = None
def begin(self, exp, exp_dir, rcv_ip):
"""Stores the experiment directory.
See the outparser.Visitor interface.
"""
self._exp_dir = exp_dir
def visit_klog(self, time, line, match):
"""Visits a klog entry and append it to the appropriate file.
See the outparser.Visitor interface.
"""
port = match['port']
if port in self._klog_files:
klogf = self._klog_files[port]
else:
klogf = open(os.path.join(self._exp_dir,
'kern-debug-%s.log' % port), 'w')
self._klog_files[port] = klogf
klogf.write(line)
def end(self):
"""Writes the klog compressed file.
See the outparser.Visitor interface.
"""
klog_paths = []
for _, klogf in self._klog_files.iteritems():
# Should drop the directory prefix to have a flat tarball.
klog_paths.append(os.path.basename(klogf.name))
klogf.close()
shell.run('tar -C %s -cvjf %s %s' % (
self._exp_dir,
os.path.join(self._exp_dir, 'kern-debug.tbz2'),
' '.join(klog_paths)))
class TimeSeqPageGenerator(outparser.Visitor):
"""Generates the time seqeunce graph.
The time sequence data consists of rows in the following format:
[time, seq1, ack1, win1, sack1, seq2, ack2, win2, sack2, ...]
That is, the length of each row is equal to 1 + (4 * number of flows).
Attributes:
_html_file: The html file.
_rcv_ip: The receiver IP address.
_ports: The dictionary of ports to legend titles.
_port_index: The starting index of port in each row.
_min_seq: The minimum sequence of each port.
_win_scale: The window scale of each port.
_row: The current row of data.
_max_seq: The maximum sequence seen in the data.
"""
def __init__(self):
super(TimeSeqPageGenerator, self).__init__()
self._html_file = None
self._rcv_ip = None
self._ports = {}
self._port_index = {}
self._min_seq = {}
self._win_scale = {}
self._row = [0]
self._max_seq = -1
def begin(self, exp, exp_dir, rcv_ip):
"""Stores the experiment directory.
See the outparser.Visitor interface.
"""
self._rcv_ip = rcv_ip
self._html_file = open(os.path.join(exp_dir, 'timeseq.html'), 'w')
self._html_file.write(templates.TIMESEQ_HEAD % {})
# Open the data array. This array will be filled in visit_packet().
self._html_file.write('var data = [')
def end(self):
"""Write the HTML file.
See the outparser.Visitor interface.
"""
# Close the data array.
self._html_file.write('];')
self._html_file.write('var ports = %s;' %
json.dumps(self._ports.values()))
self._html_file.write('var max_seq = %s;' % self._max_seq)
self._html_file.write(templates.TIMESEQ_TAIL % {})
self._html_file.close()
def visit_conn(self, ip, port, tool, cc, params, start, dur, tput):
"""Stores the ports.
See the outparser.Visitor interface.
"""
port = int(port)
self._ports[port] = '%s_%s' % (cc, port)
self._port_index[port] = 4 * (len(self._ports) - 1) + 1
self._row += [None] * 4
self._win_scale[port] = 0
def visit_ss_log(self, time, data):
if 'port' not in data:
return
port = data['port']
port_index = self._port_index[port]
prev_time = self._row[0]
if time - prev_time > 0.001:
self._dump_row()
self._row[0] = time
if 'bytes_acked' in data:
acked = data['bytes_acked']
self._row[port_index + 1] = max(self._row[port_index + 1], acked)
self._max_seq = max(self._max_seq, acked)
def visit_packet(self, time, packet):
"""Generates the time sequence data.
See the outparser.Visitor interface.
"""
if (IP not in packet and IPv6 not in packet) or TCP not in packet:
return
iph = packet[IP] if IP in packet else packet[IPv6]
tcph = packet[TCP]
port = tcph.dport if iph.src == self._rcv_ip else tcph.sport
# Ignore unknown ports and reset packets.
if port not in self._ports or tcph.flags & 0x4:
return
# If it has been more than one millisecond since we
# have created the current row, dump the row.
prev_time = self._row[0]
if time - prev_time > 0.001:
self._dump_row()
# Store the time with the resolution of 1ms.
self._row[0] = int(time * 1000) / 1000.0
if iph.src == self._rcv_ip:
self._process_rcv(iph, tcph)
else:
self._process_snd(iph, tcph)
def _dump_row(self):
"""Dumps the content of the row in the html file."""
self._html_file.write(json.dumps(self._row))
self._html_file.write(',')
def _process_snd(self, iph, tcph):
"""Handles the send side data and updates the seqeunce number.
Args:
iph: The parsed IP header.
tcph: The parsed TCP header.
"""
port = tcph.sport
if port not in self._ports:
return
seq = tcph.seq
if port not in self._min_seq:
self._min_seq[port] = seq
seq = tcp.diff_seq(seq, self._min_seq[port])
self._max_seq = max(self._max_seq, seq)
port_index = self._port_index[port]
self._row[port_index] = max(self._row[port_index], seq)
def _process_rcv(self, iph, tcph):
"""Handles the receive side data and updates the seqeunce number.
Args:
iph: The parsed IP header.
tcph: The parsed TCP header.
"""
port = tcph.dport
if port not in self._ports:
return
# Make sure we never use the stored win scale on SYNs,
# since SYNs can be retransmitted.
if tcph.flags & 0x2:
win_scale = 0
opts = tcp.options(tcph)
if 'WScale' in opts:
self._win_scale[port] = opts['WScale']
else:
win_scale = self._win_scale[port]
port_index = self._port_index[port]
min_seq = self._min_seq[port] if port in self._min_seq else 0
ack = tcp.diff_seq(tcph.ack, min_seq)
self._row[port_index + 1] = ack
win = ack + tcph.window << win_scale
self._row[port_index + 2] = win
max_sack = -1
for _, end in tcp.sacks(tcph):
max_sack = max(max_sack, tcp.diff_seq(end, min_seq))
self._row[port_index + 3] = max_sack if max_sack != -1 else None
class KlogPageGenerator(outparser.Visitor):
"""Generates the klog grahs.
We create 3 js files (ie, bw.js, rtt.js, and mode.js) for each port
and then includes those inside klog.html.
Attributes:
_exp_dir: The experiment directory.
_port_titles: The legend title for the port.
_js_files: The dictionary from ports to the list of javascript files for
that port (bw-port.js, rtt-port.js, mode-port.js).
"""
def __init__(self):
super(KlogPageGenerator, self).__init__()
self._exp_dir = None
self._port_titles = {}
self._js_files = {}
def begin(self, exp, exp_dir, rcv_ip):
"""Stores the experiment directory.
See the outparser.Visitor interface.
"""
self._exp_dir = exp_dir
def end(self):
"""Writes the klog files and includes all the generated javascripts.
See the outparser.Visitor interface.
"""
html_file = open(os.path.join(self._exp_dir, 'klog.html'), 'w')
html_file.write(templates.LOG_HEAD % {'title': 'Klog Graphs'})
html_file.write(templates.KLOG_VAR % {})
ports = sorted(self._port_titles.keys())
labels = []
for port in ports:
labels.append(self._port_titles[port])
if port not in self._js_files:
continue
files = self._js_files[port]
for f in files:
# All the files are setting an array. So we
# can simply close the array.
f.write('];')
f.close()
html_file.write(
'<script src="%s"></script>' % os.path.basename(f.name))
html_file.write('<script>ports=%s</script>' % labels)
html_file.write(templates.LOG_TAIL % {})
html_file.write(templates.KLOG_TAIL % {})
html_file.close()
def visit_conn(self, ip, port, tool, cc, params, start, dur, tput):
"""Stores the port titles.
See the outparser.Visitor interface.
"""
if tool == 'netperf':
self._port_titles[int(port)] = '%s %s:%s' % (cc, ip, port)
def visit_klog(self, time, line, match):
port = int(match['port'])
if port not in self._port_titles:
return
if port not in self._js_files:
port_title = self._port_titles[port]
# Create the files and write the assignment operator for bws, rtts,
# and modes.
bw_js = open(os.path.join(self._exp_dir, 'bws-%s.js' % port), 'w')
bw_js.write('bws["%s"]=[' % port_title)
rtt_js = open(os.path.join(self._exp_dir, 'rtts-%s.js' % port), 'w')
rtt_js.write('rtts["%s"]=[' % port_title)
mode_js = open(os.path.join(self._exp_dir, 'modes-%s.js' % port),
'w')
mode_js.write('modes["%s"]=[' % port_title)
self._js_files[port] = (bw_js, rtt_js, mode_js)
else:
bw_js, rtt_js, mode_js = self._js_files[port]
# Bandwidth metrics are multiplied by 1000 to convert from the kbps
# output by BBR to bps for more intuitive plot labels.
bw_js.write(json.dumps([time,
1000 * int(match.get('bw', 0)),
1000 * int(match.get('pacing_bw', 0)),
1000 * int(match.get('sample_bw', 0)),
1000 * int(match.get('bw_lo', 0)),
int(match.get('snd_cwnd', 0)),
int(match.get('extra_acked', 0)),
int(match.get('inflight', 0)),
int(match.get('inflight_lo', 0)),
int(match.get('inflight_hi', 0)),
]))
bw_js.write(',')
ecn_percent = int(match.get('ecn_x1000', 0)) / 10.0
loss_percent = int(match.get('lr_x1000', 0)) / 10.0
rtt_js.write(json.dumps([time,
ecn_percent,
loss_percent,
int(match.get('rtt', 0)),
int(match.get('mrtt', 0)),
]))
rtt_js.write(',')
mode = match.get('mode', None)
state = match.get('castate', None)
cycle = match.get('cycle_idx', None)
mode_row = [time,
1 if mode == 'G' else None, # Growing: BBR_MODE_STARTUP
2 if mode == 'D' else None, # Drain: BBR_MODE_DRAIN
3 if mode == 'W' else None, # Window: BBR_MODE_PROBE_BW
4 if mode == 'M' else None, # MinRTT: BBR_MODE_PROBE_RTT
5 if mode == '@' else None, # Undo
6 if state == 'O' else None, # Open
7 if state == 'D' else None, # Disorder
8 if state == 'C' else None, # Cwnd reduction (ECN)
9 if state == 'R' else None, # Recovery
10 if state == 'L' else None, # Loss
11 if cycle == '1' else None, # BBR_BW_PROBE_DOWN
12 if cycle == '2' else None, # BBR_BW_PROBE_CRUISE
13 if cycle == '3' else None, # BBR_BW_PROBE_REFILL
14 if cycle == '0' else None, # BBR_BW_PROBE_UP
]
mode_js.write(json.dumps(mode_row))
mode_js.write(',')
class SummaryPageGenerator(outparser.Visitor):
"""Generates the summary page.
Attributes:
_has_xplot: Whether the generator has generated xplots.
_summary_file: The summary page file.
_exp: The experiment object.
_ccs: The dictionary of connection information.
_rtts: The RTT metrics of each connection.
"""
def __init__(self, has_xplot):
super(SummaryPageGenerator, self).__init__()
self._has_xplot = has_xplot
self._summary_file = None
self._exp = None
self._ccs = {}
self._metrics = {}
def begin(self, exp, exp_dir, rcv_ip):
"""Stores the experiment directory.
See the outparser.Visitor interface.
"""
self._exp = exp
links = ''
if self._has_xplot:
links += '<a href="xplots.tbz2">xplots</a>'
self._summary_file = open(os.path.join(exp_dir, 'summary.html'), 'w')
self._summary_file.write(templates.SUMMARY_HEAD % {'links': links})
def visit_conn(self, ip, port, tool, cc, params, start, dur, tput):
"""Stores the connection info.
See the outparser.Visitor interface.
"""
if cc not in self._ccs:
infos = []
self._ccs[cc] = infos
else:
infos = self._ccs[cc]
infos.append((ip, port, start, dur, params, parse_float(tput)))
def visit_metric(self, metric):
"""Stores the RTT metrics to be used in generating the page.
See the outparser.Visitor interface.
"""
if metric.name().endswith('rtt') or metric.name() == 'retx':
self._metrics[metric.name()] = metric
def end(self):
"""Writes the content in the summary file and closes the file.
See the outparser.Visitor interface.
"""
# Dump the summary of the experiment information.
self._summary_file.write('<div id="exp_info"><h>Configuration</h>')
for line in self._exp.pretty_str().split('\n'):
param, val = line.split('=', 1)
# Do not include conn in the configuration.
if param == 'conn':
continue
val = val.strip().replace(', ', ',<br/>')
self._summary_file.write('<b>%s=</b>%s<br/>' % (param, val))
self._summary_file.write('</div>')
# Dump the connection information.
cc_elems = ''
cc_footers = ''
for cc, infos in self._ccs.iteritems():
cc_elem = '<div class="cctitle">%s</div>' % cc
cc_elem += '''
<table width="500">
<thead>
<tr>
<th style="width:120px">Address</th>
<th style="width:60px">TPut</th>
<th style="width:60px">Retx</th>
<th style="width:60px">MedRTT</th>
<th style="width:60px">P95RTT</th>
<th style="width:30px">Start</th>
<th style="width:25px">Dur</th>
<th style="width:60px">Params</th>
</tr>
</thead>
<tbody>
'''
for addr, port, start, dur, params, tput in infos:
cc_elem += ('<tr><td>%s:%s</td><td>%s Mbps</td>'
'<td>%.2f%%</td><td>%sms</td>'
'<td>%sms</td><td>%s</td><td>%s</td>'
'<td class="params" title="%s">%s</td>'
'</tr>') % (addr, port, tput,
self._metrics['retx'].get(port)*100,
self._metrics['med_rtt'].get(port),
self._metrics['p95_rtt'].get(port),
start, dur, params, params)
cc_elem += '</tbody></table>'
stputs = sorted([info[-1] for info in infos])
l = len(stputs)
if l % 2:
median = stputs[(l - 1) / 2]
else:
median = (stputs[l / 2] + stputs[l/2 - 1]) / 2.0
avg = sum(stputs) / l
cc_footers += ('<div class="col">Mdn: %s<br/>Avg: %s'
'</div>') % (median, avg)
cc_elems += '<div class="col">%s</div>' % cc_elem
self._summary_file.write('<div id="ccs"><h>Results</h>')
self._summary_file.write('<div class="row">')
self._summary_file.write(cc_elems)
self._summary_file.write('</div><div class="row">')
self._summary_file.write(cc_footers)
self._summary_file.write('</div></div>')
self._summary_file.write(templates.SUMMARY_TAIL % {})
self._summary_file.close()
class DashboardPageGenerator(outparser.Visitor):
"""Generates the experiment's dashboard page."""
def begin(self, exp, exp_dir, rcv_ip):
"""Generates the dashboard for this experiment.
See the outparser.Visitor interface.
"""
dbh = open(os.path.join(exp_dir, 'index.html'), 'w')
dbh.write(templates.DASHBOARD % {})
dbh.close()
def gen_exp(exp, exp_dir, has_xplot=False, skip_pcap_scan=False):
"""Generates all the pages for the experiment.
Args:
exp: The experiment object.
exp_dir: The experiment's output directory.
has_xplot: Whether the xplot is generated for the experiment.
skip_pcap_scan: Whether to skip pcap scan.
Returns:
The tuple of (metrics, test case errors).
"""
visitors = [
# Order is important here. Keep MetricPublishers at the head of the list
# and non-publisher Visitors at the end, so that metrics are published
# before the visitors are ended.
KlogMetricsPublisher(),
RetxRateMetricPublisher(),
ConvergenceMetricPublisher(),
RTTMetricPublisher(),
SerialDelayMetricPublisher(),
TputMetricPublisher(),
AppLatencyMetricPublisher(),
UtilMetricAndPageGenerator(),
TimeSeqPageGenerator(),
SummaryPageGenerator(has_xplot),
KlogPageGenerator(),
DashboardPageGenerator(),
KlogCompressor(),
]
_dump_js_files(exp_dir)
_merge_pcaps(exp_dir)
_merge_sysouts(exp_dir)
rcv_ip = outparser.RecvInfo(os.path.join(exp_dir, 'R', 'recv.info')).ip
conn_info = outparser.ConnInfo([os.path.join(d, f)
for d, f in all_files(exp_dir,
name='conn.info')])
pcaps = []
for i in range(exp.nsenders()):
snd_dir = os.path.join(exp_dir, str(i))
snd_pcaps = [os.path.join(d, f)
for d, f in all_files(snd_dir, regex=r'.*\.pcap$')]
# If the machine has eth1 or eth2 interfaces, we have a bonding/slave
# config. Otherwise, we have one physical interface that are not
# bonded. In the former case, we use the pcap of the slaves and for the
# latter we use pcaps from the physical interface eth0.
is_bonded = len([f for f in snd_pcaps
if f.endswith('eth1.pcap') or f.endswith('eth2.pcap')])
if not is_bonded:
pcaps += snd_pcaps
else:
pcaps += [f for f in snd_pcaps if not f.endswith('eth0.pcap')]
pcap_parser = outparser.Pcap(pcaps)
klogs = [os.path.join(d, f) for d, f in all_files(exp_dir,
name='kern-debug.log')]
klog_parser = outparser.KernLog(klogs)
for visitor in visitors:
visitor.begin(exp, exp_dir, rcv_ip)
for port in conn_info.ports():
ip, tool, cc, start, dur, tput, params = conn_info.conn_info(port)
for visitor in visitors:
visitor.visit_conn(ip, port, tool, cc, params, start, dur, tput)
start_times = {}
if not skip_pcap_scan:
exp_start = False
exp_start_time = 0
for time, packet in pcap_parser.packets():
if IP not in packet and IPv6 not in packet:
continue
if IPv6 not in packet:
ip = packet[IP]
else:
ip = packet[IPv6]
if TCP in packet:
l4_hdr = packet[TCP]
elif UDP in packet:
l4_hdr = packet[UDP]
else:
continue
port = l4_hdr.dport if ip.src == rcv_ip else l4_hdr.sport
# Whether this is SYN sent by sender or not
sender_syn = ip.src != rcv_ip and TCP in packet \
and (l4_hdr.flags&0x2)
# Process pkt only if experiment has started (from sender
# perspective) i.e. SYN packet sent by atleast one sender
if not exp_start:
if not sender_syn:
continue
exp_start_time = time
exp_start = True
# Adjust time relative to start of the experiment
time -= exp_start_time
# We need to store the port start time for adjusting the klog times.
if port not in start_times and sender_syn:
start_times[port] = time
for visitor in visitors:
visitor.visit_packet(time, packet)
else:
ss_logs = []
for i in range(exp.nsenders()):
ss_log = os.path.join(exp_dir, str(i))
ss_log = os.path.join(ss_log, 'ss.log')
if os.path.exists(ss_log):
ss_logs.append(ss_log)
sslog_parser = outparser.SsLog(ss_logs)
for time, data in sslog_parser.entries():
if 'port' in data:
port = data['port']
if port not in start_times:
start_times[port] = time
for visitor in visitors:
visitor.visit_ss_log(time, data)
for time, line, match in klog_parser.lines():
# Kernel log times are relative to the kernel log entries. We need
# to add the start times based on pcap data in order to get a timestamp
# that is relative to the beginning of the experiment. Thus, we use
# "time + start_time" instead of time.
port = int(match['port'])
start_time = start_times[port] if port in start_times else 0
for visitor in visitors:
visitor.visit_klog(time + start_time, line, match)
metrics = {}
for visitor in visitors:
for mt in metrics.values():
visitor.visit_metric(mt)
visitor.end()
if isinstance(visitor, metric.MetricPublisher):
for mt in visitor.publish_metrics():
metrics[mt.name()] = mt
_dump_metrics(exp_dir, metrics)
_log_metrics(exp, metrics)
case = TestCase()
errs = []
try:
exp.check(exp, metrics, case)
except Exception, e:
errs.append(str(e))
errs += case.errors()
if not errs:
sys.stderr.write(shell.colorize('%s\t[PASSED]\n' % exp, shell.GREEN))
else:
sys.stderr.write(shell.colorize('%s\t[FAILED]\n' % exp, shell.RED))
for err in errs:
sys.stderr.write(shell.colorize('\terror: %s\n' % err, shell.RED))
return metrics, errs
def print_usage():
"""Prints the help information."""
print '''gen.py [options] [DATA_DIR]
By default we use LATEST as the data directory.
options:
-v: verbose output
-x: generate xplots
-q: not open browser after experiment.'''
def main():
opts, args = getopt.getopt(sys.argv[1:], 'vt:xq')
has_xplot = False
open_page = True
for opt, val in opts:
if opt == '-v':
continue
elif opt == '-x':
has_xplot = True
elif opt == '-q':
open_page = False
else:
print_usage()
return -1
log.setup_logging(opts)
data_dir = 'LATEST' if not args else args[0]
threads = []
if has_xplot:
t = threading.Thread(target=gen_xplots, args=[data_dir])
threads.append(t)
t.start()
for t in threads:
t.join()
return ret
if __name__ == '__main__':
sys.exit(main())
|
|
from CommonServerPython import *
import os
import demistomock as demisto
RETURN_ERROR_TARGET = 'HTMLDocsAutomation.return_error'
def test_get_yaml_obj(mocker):
from HTMLDocsAutomation import get_yaml_obj
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
# sanity
file_path = os.path.join('test_data', 'ANYRUN_yml.txt')
mocker.patch.object(demisto, 'getFilePath',
return_value={'path': file_path})
data = get_yaml_obj('12345')
# error count should not change
assert return_error_mock.call_count == 0
# call_args last call with a tuple of args list and kwargs
assert data['commonfields']['id'] == 'ANYRUN'
# invalid yml
mocker.patch.object(demisto, 'getFilePath',
return_value={'path': os.path.join('test_data', 'not_yml_file.txt')})
get_yaml_obj('234')
assert return_error_mock.call_count == 1
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Failed to open integration file: not a yml file'
# no such file
mocker.patch.object(demisto, 'getFilePath', side_effect=ValueError('no such file'))
get_yaml_obj('234')
assert return_error_mock.call_count == 2
# call_args last call with a tuple of args list and kwargs
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Failed to open integration file: no such file'
def test_extract_command():
from HTMLDocsAutomation import extract_command
# no args
cmd, args = extract_command('!no-args-command')
assert cmd == '!no-args-command'
assert args == {}
# sanity
cmd, args = extract_command('!command ip=8.8.8.8')
expected = {'ip': '8.8.8.8'}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
# edge cases
cmd, args = extract_command('!command SomeParam=8.8.8.8 dash-arg="args" special_chars="1qazxsw2 EW3- *3d" '
'backTick=`hello "hello" \'hello\'` triple_quotes="""this is a multi quotes"""')
expected = {
'SomeParam': '8.8.8.8',
'dash-arg': 'args',
'special_chars': '1qazxsw2 EW3- *3d',
'backTick': 'hello "hello" \'hello\'',
'triple_quotes': 'this is a multi quotes'
}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
cmd, args = extract_command('!command SomeParam="""hello\nthis is multiline"""')
expected = {
'SomeParam': 'hello\nthis is multiline',
}
assert cmd == '!command'
assert len(expected) == len(args)
for k, v in expected.items():
assert args[k] == v
def test_generate_commands_section():
from HTMLDocsAutomation import generate_commands_section
yml_data = {
'script': {
'commands': [
{'deprecated': True,
'name': 'deprecated-cmd',
'description': 'desc'},
{'deprecated': False,
'name': 'non-deprecated-cmd',
'description': 'desc1'},
{'name': 'non-deprecated-cmd2',
'description': 'desc2.'}
]
}
}
section, errors = generate_commands_section(yml_data, {}, True)
expected_section = '''<h2>Commands</h2>
<p>
You can execute these commands from the Demisto CLI, as part of an automation, or in a playbook.
After you successfully execute a command, a DBot message appears in the War Room with the command details.
</p>
<ol>
<li><a href="#non-deprecated-cmd" target="_self">desc1: non-deprecated-cmd</a></li>
<li><a href="#non-deprecated-cmd2" target="_self">desc2: non-deprecated-cmd2</a></li>
</ol>
<h3 id="non-deprecated-cmd">1. non-deprecated-cmd</h3>
<hr>
<p>desc1</p>
<h5>Base Command</h5>
<p>
<code>non-deprecated-cmd</code>
</p>
<h5>Required Permissions</h5>
<p>The following permissions are required for this command.</p>
<ul>
<li>permission 1</li>
<li>permission 2</li>
</ul>
<h5>Input</h5>
There are no input arguments for this command.
<p> </p>
<h5>Context Output</h5>
There are no context output for this command.
<p> </p>
<h5>Command Example</h5>
<p>
<code> </code>
</p>
<h5>Human Readable Output</h5>
<p>
<!-- remove the following comments to manually add an image: -->
<!--
<a href="insert URL to your image" target="_blank" rel="noopener noreferrer"><img src="insert URL to your image"
alt="image" width="749" height="412"></a>
-->
</p>
<h3 id="non-deprecated-cmd2">2. non-deprecated-cmd2</h3>
<hr>
<p>desc2.</p>
<h5>Base Command</h5>
<p>
<code>non-deprecated-cmd2</code>
</p>
<h5>Required Permissions</h5>
<p>The following permissions are required for this command.</p>
<ul>
<li>permission 1</li>
<li>permission 2</li>
</ul>
<h5>Input</h5>
There are no input arguments for this command.
<p> </p>
<h5>Context Output</h5>
There are no context output for this command.
<p> </p>
<h5>Command Example</h5>
<p>
<code> </code>
</p>
<h5>Human Readable Output</h5>
<p>
<!-- remove the following comments to manually add an image: -->
<!--
<a href="insert URL to your image" target="_blank" rel="noopener noreferrer"><img src="insert URL to your image"
alt="image" width="749" height="412"></a>
-->
</p>
'''
assert section == expected_section
assert len(errors) == 2 # no example for both commands
def test_to_html_table():
from HTMLDocsAutomation import to_html_table
data = [
['hello', 'hello', 'hello'],
['world', 'world', 'world'],
['!', '!', '!'],
]
expected = '''<table style="width:750px" border="2" cellpadding="6">
<thead>
<tr>
<th><strong>header1</strong></th>
<th><strong>header2</strong></th>
<th><strong>header3</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td>hello</td>
<td>hello</td>
<td>hello</td>
</tr>
<tr>
<td>world</td>
<td>world</td>
<td>world</td>
</tr>
<tr>
<td>!</td>
<td>!</td>
<td>!</td>
</tr>
</tbody>
</table>
'''
assert to_html_table(['header1', 'header2', 'header3'], data) == expected
def test_human_readable_example_to_html():
from HTMLDocsAutomation import human_readable_example_to_html
data = [
{
'header1': 'hello',
'header2': 'hello',
},
{
'header1': 'world',
'header2': 'world',
},
]
md = tableToMarkdown('Title', data, headers=['header1', 'header2'])
expected = '''<h3>Title</h3>
<table style="width:750px" border="2" cellpadding="6">
<thead>
<tr>
<th><strong>header1</strong></th>
<th><strong>header2</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td> hello </td>
<td> hello </td>
</tr>
<tr>
<td> world </td>
<td> world </td>
</tr>
</tbody>
</table>
'''
assert human_readable_example_to_html(md) == expected
md = md + '\n# Headline\nsome text\nanother line of text\n' + md
expected = expected + '\n<h1>Headline</h1>\n<p>\nsome text\nanother line of text\n</p>\n' + expected
assert human_readable_example_to_html(md) == expected
# md = '''Key | Value
# - | -
# city | Mountain View
# country | US
# hostname | dns.google
# ip | 8.8.8.8
# loc | 37.3860,-122.0838
# org | AS15169 Google LLC
# postal | 94035
# readme | https://ipinfo.io/missingauth
# region | California
# {"lat": 37.386, "lng": -122.0838}'''
#
# print(human_readable_example_to_html(md))
|
|
"""Miscellaneous utility functions and classes."""
import bitarray
import collections
import io
import types
class Error(Exception):
pass
class IncompleteDataError(Error):
pass
class UnexpectedChunkError(Error):
pass
class ChunkPastEndError(UnexpectedChunkError):
pass
class ChunkCollisionError(UnexpectedChunkError):
pass
class UnexpectedChunkLengthError(ChunkCollisionError):
"""Raised when the length of the added chunk is too small or large.
This exception is used when the length of the chunk is legal but not expected
according to the parameters of DataAssembler. For example, the last chunk must
be sized to fit the known data length, and every other chunk must be sized
exactly as the alignment value. When the length is illegal, e.g. longer than
the alignment value, ValueError should be raised instead.
As the length mismatch is an indication of potential chunk collisions, this
exception subclasses ChunkCollisionError.
"""
pass
class DataChunk(collections.namedtuple('DataChunk', ['data', 'offset'])):
pass
class DataAssembler:
"""Assembler for DataChunk objects.
DataChunk represents a part of the entire data. DataAssembler can reconstruct
the entire data by assembling multiple DataChunk objects into a single buffer.
Each chunk is expected to be obtained by splitting the original data for every
alignment bytes.
"""
def __init__(self, alignment, storage=None, length=None):
"""Initialize DataAssembler for accepting chunks of given alignment.
DataAssembler object takes ownership of provided storage object. Therefore,
the storage should not be modified externally.
Args:
alignment (int): Alignment of each chunk. Offset of each chunk added must
be divisible by this number. Also, every chunk but the last must have
the same length as alignment.
storage (optional): binary file object used as data storage for the
assembler. Must support both read and write. If not provided, io.BytesIO
is used.
length (int, optional): Length of the entire data. Used to perform extra
validations.
"""
if not isinstance(alignment, int):
raise TypeError("alignment "
"must be int".format(self.__init__.__name__))
elif alignment <= 0:
raise ValueError('alignment must be positive integer')
if length is not None:
if not isinstance(length, int):
raise TypeError("{}: length "
"must be int or None".format(self.__init__.__name__))
elif length < 0:
raise ValueError("{}: length "
"cannot be negative".format(self.__init__.__name__))
self._alignment = alignment
bitarray_length = self._bitarray_length(length) if length is not None else 0
self._has_chunk = bitarray.bitarray(bitarray_length)
self._storage = storage or io.BytesIO()
self._length = length
self._has_chunk.setall(False)
def _bitarray_length(self, data_length):
assert data_length is not None
return 1 + (data_length - 1) // self._alignment
@property
def alignment(self):
return self._alignment
@property
def complete(self):
return self._has_chunk.all()
@property
def length(self):
return self._length
@length.setter
def length(self, value):
if self._length is None:
if value is not None:
self._length = value
self._extend_bitmap(self._bitarray_length(value))
elif value != self._length:
raise ValueError('length cannot be changed once set')
def getbytes(self, incomplete=False):
if not incomplete and not self.complete:
raise IncompleteDataError('cannot return incomplete data')
curr = self._storage.seek(0)
if curr != 0:
raise RuntimeError('seek(0) failed')
result = self._storage.read()
if self._length is not None and len(result) < self._length:
assert not self.complete
# pad the results to fit the requested length.
result += b'\x00' * (self._length - len(result))
return result
def add(self, data, offset):
self._verify_chunk_params(data, offset)
if self._data_already_added(data, offset):
return
self._update_bitmap(data, offset)
self._write_data(data, offset)
def _verify_chunk_params(self, data, offset):
length = len(data)
if offset < 0:
raise ValueError('offset must be non-negative')
elif self._length is not None and offset >= self._length:
raise ChunkPastEndError('chunk at offset {} is '
'past the end of expected range'.format(offset))
if offset % self._alignment != 0:
raise ValueError('offset not aligned by {} bytes'.format(self._alignment))
if length > self._alignment:
raise ValueError('length of the chunk must not exceed the alignment')
chunk_index = offset // self._alignment
if self._is_last_chunk(chunk_index):
if self._length is not None and offset + length != self._length:
# This is the last chunk, which should fill the buffer without gap or
# overflow.
raise UnexpectedChunkLengthError(
'incorrect length {} for the last chunk'.format(length))
elif length != self._alignment:
# Not the last chunk; data length must match alignment.
raise UnexpectedChunkLengthError(
'length of every chunk but the last must match alignment.')
def _is_last_chunk(self, chunk_index):
return chunk_index >= len(self._has_chunk) - 1
def _update_bitmap(self, data, offset):
assert offset >= 0
length = len(data)
chunk_index = offset // self._alignment
if chunk_index >= len(self._has_chunk):
assert self._length is None
# Extend the bitarray to ensure chunk_index is a valid index.
self._extend_bitmap(chunk_index + 1)
self._has_chunk[chunk_index] = True
if self._length is None and length < self._alignment:
# This must be the last chunk; now we know the length.
self._length = offset + length
def _extend_bitmap(self, length):
assert length >= len(self._has_chunk)
extension = length - len(self._has_chunk)
self._has_chunk.extend(False for i in range(extension))
def _data_already_added(self, data, offset):
# If we have seen the chunk before, check whether content matches.
# It's an error they don't match.
assert offset % self._alignment == 0
chunk_index = offset // self._alignment
try:
if not self._has_chunk[chunk_index]:
return False
except IndexError:
return False
curr_data = self._read_data(offset, self._alignment)
if data != curr_data:
raise ChunkCollisionError('chunk at offset {} has been already added '
'with different content'.format(offset))
return True
def _read_data(self, offset, length):
curr = self._storage.seek(offset)
assert curr == offset
return self._storage.read(length)
def _write_data(self, data, offset):
curr = self._storage.seek(offset)
if curr < offset:
# Seek to the offset failed somehow. Try padding to fill in the gap.
self._storage.write(b'\x00' * (offset - curr))
if self._storage.tell() != offset:
raise RuntimeError('failed to seek to offset {}'.format(offset))
assert self._storage.tell() == offset
self._storage.write(data)
def add_chunk(self, chunk):
self.add(*chunk)
|
|
# -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
from distutils.version import LooseVersion
import gc
import os
import os.path as op
import sys
import warnings
import sphinx_gallery
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import matplotlib
import mne
from mne.utils import linkcode_resolve # noqa, analysis:ignore
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
'%s' % (sphinx_gallery.__version__,))
matplotlib.use('agg')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'sphinx_fontawesome',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'sphinx_bootstrap_theme',
'sphinx_bootstrap_divs',
'sphinxcontrib.bibtex',
'sphinxcontrib.bibtex2',
]
linkcheck_ignore = [
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa 403 Client Error: Forbidden for url: http://iopscience.iop.org/article/10.1088/0031-9155/57/7/1937/meta
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa 403 Client Error: Forbidden for url: https://iopscience.iop.org/article/10.1088/0031-9155/51/7/008
'https://sccn.ucsd.edu/wiki/.*', # noqa HTTPSConnectionPool(host='sccn.ucsd.edu', port=443): Max retries exceeded with url: /wiki/Firfilt_FAQ (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847)'),))
'https://docs.python.org/dev/howto/logging.html', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442/', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
]
linkcheck_anchors = False # saves a bit of time
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
td = date.today()
copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
]
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ', # we replace this with an image
'source_link_position': "nav", # default
'bootswatch_theme': "flatly", # yeti paper lumen
'navbar_sidebarrel': False, # Render the next/prev links in navbar?
'navbar_pagenav': False,
'navbar_class': "navbar",
'bootstrap_version': "3", # default
'navbar_links': [
("Install", "install/index"),
("Overview", "overview/index"),
("Tutorials", "auto_tutorials/index"),
("Examples", "auto_examples/index"),
("Glossary", "glossary"),
("API", "python_reference"),
],
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True, 'use_twitter': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'surfer': ('https://pysurfer.github.io/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
# There are some problems with dipy's redirect:
# https://github.com/nipy/dipy/issues/1955
'dipy': ('https://dipy.org/documentation/latest',
'https://dipy.org/documentation/1.1.1./objects.inv/'),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
}
##############################################################################
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
##############################################################################
# sphinx-gallery
examples_dirs = ['../examples', '../tutorials']
gallery_dirs = ['auto_examples', 'auto_tutorials']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mlab = mne.utils._import_mlab()
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
# hack to initialize the Mayavi Engine
mlab.test_plot3d()
mlab.close()
except Exception:
pass
else:
scrapers += ('mayavi',)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
except Exception:
pass
else:
scrapers += ('pyvista',)
if any(x in scrapers for x in ('pyvista', 'mayavi')):
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
else:
report_scraper = None
if 'pyvista' in scrapers:
brain_scraper = mne.viz._brain._BrainScraper()
scrapers = list(scrapers)
scrapers.insert(scrapers.index('pyvista'), brain_scraper)
scrapers = tuple(scrapers)
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(op.join(
op.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.connect('build-finished', report_scraper.copyfiles)
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
gc.collect()
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'sklearn\.externals\.joblib is deprecated.*',
category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
reset_warnings(None, None)
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/source-modeling/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/discussions/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'download_section_examples': False,
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith('win'),
'line_numbers': False, # XXX currently (0.3.dev0) messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': op.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': ('images', 'thumbnails'),
}
##############################################################################
# numpydoc
# XXX This hack defines what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'collection': ':doc:`collections <matplotlib:api/collections_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# Mayavi
'mayavi.mlab.Figure': 'mayavi.core.api.Scene',
'mlab.Figure': 'mayavi.core.api.Scene',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'mayavi.mlab.pipeline.surface',
'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame',
}
|
|
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Viewfinder asset id prefixes and helpers.
"""
__author__ = '[email protected] (Andy Kimball)'
import struct
from collections import namedtuple
from tornado import gen
from viewfinder.backend.base import base64hex, util
from viewfinder.backend.base.exceptions import InvalidRequestError, PermissionError
from viewfinder.backend.db.device import Device
class IdPrefix(object):
"""An asset-id is base-64 hex encoded and then prefixed with a single
char that identifies the type of id. The prefix must be unique and
listed below.
"""
Activity = 'a'
Comment = 'c'
Episode = 'e'
Operation = 'o'
Photo = 'p'
Post = 't'
Viewpoint = 'v'
@staticmethod
def GetAssetName(prefix):
"""Return name of the asset that has the specified prefix."""
if not hasattr(IdPrefix, '_prefixes'):
IdPrefix._prefixes = {}
for slot in dir(IdPrefix):
if not slot.startswith('_'):
prefix = getattr(IdPrefix, slot)
if isinstance(prefix, str):
assert prefix not in IdPrefix._prefixes
IdPrefix._prefixes[prefix] = slot
return IdPrefix._prefixes.get(prefix, None)
@staticmethod
def IsValid(prefix):
"""Return true if "prefix" is a uniquely defined id prefix."""
return IdPrefix.GetAssetName(prefix) is not None
AssetIdUniquifier = namedtuple('AssetIdUniquifier', ['client_id', 'server_id'])
"""All asset ids must be globally unique, even across viewpoints or users.
All asset ids contain a device id and a unique numeric id generated by
that device (i.e. the client_id). If this is enough to guarantee
uniqueness, then the "server_id" field will be None. However, if the
server generates an asset, it may need to specify a server-derived byte
string id (i.e. the server_id) in order to provide the required uniqueness.
"""
def ConstructAssetId(id_prefix, device_id, uniquifier):
"""Constructs an asset id that does not have a timestamp part. The
asset id is base-64 hex encoded so that it sorts the same way as
its binary representation and can be safely included in URLs. The
"id_prefix" is appended to the resulting id. The binary format of
the asset id is as follows:
device_id (var-length numeric): id of the generating device
client_id (var-length numeric): unique id generated by the device
server_id (byte str): optionally generated by the server
"""
assert IdPrefix.IsValid(id_prefix), id_prefix
# Encode the device_id.
byte_str = util.EncodeVarLengthNumber(device_id)
# Append the encoded asset-id uniquifier.
byte_str += _EncodeUniquifier(uniquifier)
# Base64-hex encode the bytes to preserve ordering while attaining URL-inclusion safety.
return id_prefix + base64hex.B64HexEncode(byte_str, padding=False)
def ConstructTimestampAssetId(id_prefix, timestamp, device_id, uniquifier, reverse_ts=True):
"""Constructs an asset id that has a leading 4-byte encoded timestamp,
which may be reversed. The asset id is base-64 hex encoded so that it
sorts the same way as its binary representation and can be safely
included in URLs. The "id_prefix" is appended to the resulting id.
The binary format of the asset id is as follows:
timestamp (32 bits): whole seconds since Unix epoch
device_id (var-length numeric): id of the generating device
client_id (var-length numeric): unique id generated by the device
server_id (byte str): optionally generated by the server
"""
assert IdPrefix.IsValid(id_prefix), id_prefix
# Drop fractional seconds and possibly reverse the timestamp before converting to raw bytes.
assert timestamp < 1L << 32, timestamp
if reverse_ts:
timestamp = (1L << 32) - int(timestamp) - 1
byte_str = struct.pack('>I', timestamp)
assert len(byte_str) == 4, timestamp
# Append the encoded device_id.
byte_str += util.EncodeVarLengthNumber(device_id)
# Append the encoded asset-id uniquifier.
byte_str += _EncodeUniquifier(uniquifier)
# Base64-hex encode the bytes for URL-inclusion safety.
return id_prefix + base64hex.B64HexEncode(byte_str, padding=False)
def DeconstructAssetId(id_prefix, asset_id):
"""Deconstructs an asset id that was previously constructed according
to the rules of "ConstructAssetId" (i.e. no timestamp). Returns a tuple:
(device_id, uniquifier)
"""
assert IdPrefix.IsValid(id_prefix), id_prefix
assert asset_id[0] == id_prefix, asset_id
# Decode the bytes, which must be base-64 hex encoded.
byte_str = base64hex.B64HexDecode(asset_id[1:], padding=False)
# Decode the device_id and the uniquifier.
device_id, num_bytes = util.DecodeVarLengthNumber(byte_str)
uniquifier = _DecodeUniquifier(byte_str[num_bytes:])
# Return all parts as a tuple.
return device_id, uniquifier
def DeconstructTimestampAssetId(id_prefix, asset_id, reverse_ts=True):
"""Deconstructs an asset id that was previously constructed according
to the rules of "ConstructTimestampAssetId" (i.e. includes timestamp).
Returns a tuple:
(timestamp, device_id, uniquifier)
"""
assert IdPrefix.IsValid(id_prefix), id_prefix
assert asset_id[0] == id_prefix, asset_id
# Decode the bytes, which must be base-64 hex encoded.
byte_str = base64hex.B64HexDecode(asset_id[1:], padding=False)
# Decode the 4-byte timestamp and reverse it if requested.
timestamp, = struct.unpack('>I', byte_str[:4])
if reverse_ts:
timestamp = (1L << 32) - timestamp - 1
# Decode the device_id and the uniquifier.
device_id, num_bytes = util.DecodeVarLengthNumber(byte_str[4:])
uniquifier = _DecodeUniquifier(byte_str[4 + num_bytes:])
# Return all parts as a tuple.
return timestamp, device_id, uniquifier
@gen.coroutine
def VerifyAssetId(client, user_id, device_id, prefix_id, asset_id, has_timestamp):
"""Verifies that "asset_id" conforms to the following requirements:
1. The asset prefix must match "prefix_id" and the asset id's format must be valid.
2. The embedded device_id must match "device_id", or must match another device owned by
"user_id". A device can only create assets with ids that match itself.
3. The asset_id's uniquifier cannot include a server_id part. Only the server can create
uniquifiers with this part.
"""
try:
asset_name = IdPrefix.GetAssetName(prefix_id).lower()
if has_timestamp:
truncated_ts, embedded_device_id, uniquifier = DeconstructTimestampAssetId(prefix_id, asset_id)
else:
embedded_device_id, uniquifier = DeconstructAssetId(prefix_id, asset_id)
except:
raise InvalidRequestError('%s id "%s" does not have a valid format.' %
(asset_name, asset_id))
if embedded_device_id != device_id:
# Query the database to see if the client owns the embedded device id.
device = yield gen.Task(Device.Query, client, user_id, embedded_device_id, None, must_exist=False)
if device is None:
raise PermissionError('User %d and device %d do not have permission to create %s "%s".' %
(user_id, device_id, asset_name, asset_id))
if uniquifier.server_id is not None:
raise PermissionError('Clients do not have permission to create %s "%s".' %
(asset_name, asset_id))
def _EncodeUniquifier(uniquifier):
"""If "uniquifier" is an int or long, then assumes that there is no
server_id component needed to make the asset id unique. Otherwise,
expects "uniquifier" to be a tuple containing (client_id,
server_id).
Encodes the client_id and server_id as a combined byte str and returns
it.
"""
if type(uniquifier) in (int, long):
byte_str = util.EncodeVarLengthNumber(uniquifier)
else:
client_id, server_id = uniquifier
assert server_id is None or type(server_id) in (str, unicode), (server_id, type(server_id))
byte_str = util.EncodeVarLengthNumber(client_id)
if server_id is not None:
byte_str += str(server_id)
return byte_str
def _DecodeUniquifier(byte_str):
"""Decodes the byte str produced by "_EncodeUniquifier" and returns
the component parts as an AssetIdUniquifier tuple.
"""
client_id, num_bytes = util.DecodeVarLengthNumber(byte_str)
server_id = byte_str[num_bytes:] if num_bytes < len(byte_str) else None
return AssetIdUniquifier(client_id, server_id)
|
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts video encoding result data from text files to visualization
data source."""
__author__ = "[email protected] (James Zern),"
__author__ += "[email protected] (Jim Bankoski)"
__author__ += "[email protected] (Harald Alvestrand)"
import encoder
import gviz_api
import math
import mpeg_settings
import numpy
import optimizer
import re
import string
import pick_codec
def bdsnr(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average gain in psnr between two
rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
returns the calculated Bjontegaard metric 'dsnr'
code adapted from code written by : (c) 2010 Giuseppe Valenzise
http://www.mathworks.com/matlabcentral/fileexchange/27798-bjontegaard-metric/content/bjontegaard.m
"""
# pylint: disable=too-many-locals
# numpy seems to do tricks with its exports.
# pylint: disable=no-member
# map() is recommended against.
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(log_rate1, psnr1, 3)
poly2 = numpy.polyfit(log_rate2, psnr2, 3)
# Integration interval.
min_int = max([min(log_rate1), min(log_rate2)])
max_int = min([max(log_rate1), max(log_rate2)])
# Integrate poly1, and poly2.
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
if max_int != min_int:
avg_diff = (int2 - int1) / (max_int - min_int)
else:
avg_diff = 0.0
return avg_diff
def bdrate(metric_set1, metric_set2):
"""
BJONTEGAARD Bjontegaard metric calculation
Bjontegaard's metric allows to compute the average % saving in bitrate
between two rate-distortion curves [1].
rate1,psnr1 - RD points for curve 1
rate2,psnr2 - RD points for curve 2
adapted from code from: (c) 2010 Giuseppe Valenzise
"""
# numpy plays games with its exported functions.
# pylint: disable=no-member
# pylint: disable=too-many-locals
# pylint: disable=bad-builtin
rate1 = [x[0] for x in metric_set1]
psnr1 = [x[1] for x in metric_set1]
rate2 = [x[0] for x in metric_set2]
psnr2 = [x[1] for x in metric_set2]
log_rate1 = map(math.log, rate1)
log_rate2 = map(math.log, rate2)
# Best cubic poly fit for graph represented by log_ratex, psrn_x.
poly1 = numpy.polyfit(psnr1, log_rate1, 3)
poly2 = numpy.polyfit(psnr2, log_rate2, 3)
# Integration interval.
min_int = max([min(psnr1), min(psnr2)])
max_int = min([max(psnr1), max(psnr2)])
# find integral
p_int1 = numpy.polyint(poly1)
p_int2 = numpy.polyint(poly2)
# Calculate the integrated value over the interval we care about.
int1 = numpy.polyval(p_int1, max_int) - numpy.polyval(p_int1, min_int)
int2 = numpy.polyval(p_int2, max_int) - numpy.polyval(p_int2, min_int)
# Calculate the average improvement.
avg_exp_diff = (int2 - int1) / (max_int - min_int)
# In really bad formed data the exponent can grow too large.
# clamp it.
if avg_exp_diff > 200:
avg_exp_diff = 200
# Convert to a percentage.
avg_diff = (math.exp(avg_exp_diff) - 1) * 100
return avg_diff
def FillForm(string_for_substitution, dictionary_of_vars):
"""
This function substitutes all matches of the command string //%% ... %%//
with the variable represented by ... .
"""
return_string = string_for_substitution
for i in re.findall("//%%(.*)%%//", string_for_substitution):
return_string = re.sub("//%%" + i + "%%//", dictionary_of_vars[i],
return_string)
return return_string
def HasMetrics(line):
"""
The metrics files produced by vpxenc are started with a B for headers.
"""
if line[0:1] != "B" and len(string.split(line)) > 0:
return True
return False
def ParseMetricFile(file_name, metric_column):
"""
Convert a metrics file into a set of numbers.
This returns a sorted list of tuples with the first number
being from the first column (bitrate) and the second being from
metric_column (counting from 0).
"""
metric_set1 = set([])
metric_file = open(file_name, "r")
for line in metric_file:
metrics = string.split(line)
if HasMetrics(line):
if metric_column < len(metrics):
my_tuple = float(metrics[0]), float(metrics[metric_column])
else:
my_tuple = float(metrics[0]), 0
metric_set1.add(my_tuple)
metric_set1_sorted = sorted(metric_set1)
return metric_set1_sorted
def GraphBetter(metric_set1_sorted, metric_set2_sorted, use_set2_as_base):
"""
Search through the sorted metric set for metrics on either side of
the metric from file 1. Since both lists are sorted we really
should not have to search through the entire range, but these
are small lists."""
# pylint: disable=too-many-locals
total_bitrate_difference_ratio = 0.0
count = 0
# TODO(hta): Replace whole thing with a call to numpy.interp()
for bitrate, metric in metric_set1_sorted:
for i in range(len(metric_set2_sorted) - 1):
s2_bitrate_0, s2_metric_0 = metric_set2_sorted[i]
s2_bitrate_1, s2_metric_1 = metric_set2_sorted[i + 1]
# We have a point on either side of our metric range.
if s2_metric_0 < metric <= s2_metric_1:
# Calculate a slope.
if s2_metric_1 - s2_metric_0 != 0:
metric_slope = ((s2_bitrate_1 - s2_bitrate_0) /
(s2_metric_1 - s2_metric_0))
else:
metric_slope = 0
estimated_s2_bitrate = (s2_bitrate_0 + (metric - s2_metric_0) *
metric_slope)
# Calculate percentage difference as given by base.
if use_set2_as_base:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
estimated_s2_bitrate)
else:
bitrate_difference_ratio = ((bitrate - estimated_s2_bitrate) /
bitrate)
total_bitrate_difference_ratio += bitrate_difference_ratio
count += 1
break
# Calculate the average improvement between graphs.
if count != 0:
avg = total_bitrate_difference_ratio / count
else:
avg = 0.0
return avg
def DataSetBetter(metric_set1, metric_set2, method):
"""
Compares two data sets and determines which is better and by how
much.
The input metric set is sorted on bitrate.
The first set is the one to compare, the second set is the baseline.
"""
# Be fair to both graphs by testing all the points in each.
if method == 'avg':
avg_improvement = 50 * (
GraphBetter(metric_set1, metric_set2,
use_set2_as_base=True) -
GraphBetter(metric_set2, metric_set1,
use_set2_as_base=False))
elif method == 'dsnr':
avg_improvement = bdsnr(metric_set1, metric_set2)
else:
avg_improvement = bdrate(metric_set2, metric_set1)
return avg_improvement
def FileBetter(file_name_1, file_name_2, metric_column, method):
"""
Compares two data files and determines which is better and by how
much.
metric_column is the metric.
"""
# Store and parse our two files into lists of unique tuples.
# Read the two files, parsing out lines starting with bitrate.
metric_set1_sorted = ParseMetricFile(file_name_1, metric_column)
metric_set2_sorted = ParseMetricFile(file_name_2, metric_column)
return DataSetBetter(metric_set1_sorted, metric_set2_sorted, method)
def HtmlPage(page_template, page_title="", page_subtitle="",
filestable="", snrs="", formatters=""):
"""
Creates a HTML page from the template and variables passed to it.
"""
# pylint: disable=too-many-arguments
# Build up a dictionary of the variables actually used in the template.
my_dict = {
'page_title': page_title,
'page_subtitle': page_subtitle,
'filestable_dpsnr': filestable['dsnr'],
'filestable_avg': filestable['avg'],
'filestable_drate': filestable['drate'],
'snrs': snrs,
'formatters': formatters
}
return FillForm(page_template, my_dict)
def ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function=None):
"""Extend a datatable with the info about one video file's scores."""
# pylint: disable=too-many-arguments
for codec_name in codecs:
# For testing:
# Allow for direct context injection rather than picking by name.
if isinstance(codec_name, basestring):
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec, score_function=score_function)
else:
my_optimizer = codec_name
codec_name = my_optimizer.context.codec.name
best_encoding = my_optimizer.BestEncoding(rate, videofile)
if do_score and not best_encoding.Result():
best_encoding.Execute()
best_encoding.Store()
AddOneEncoding(codec_name, my_optimizer, best_encoding, videofile,
datatable)
def AddOneEncoding(codec_name, my_optimizer, this_encoding, videofile,
datatable):
assert this_encoding.Result()
# Ignore results that score less than zero.
if my_optimizer.Score(this_encoding) < 0.0:
return
# Datatable is a dictionary of codec name -> result sets.
# Each result set is an array containing result info.
# Each result info is a dictionary containing the
# ID of the configuration used, the
# target bitrate, the command line, the score and the result.
(datatable.setdefault(codec_name, {})
.setdefault(videofile.basename, [])
.append({'config_id': this_encoding.encoder.Hashname(),
'target_bitrate': this_encoding.bitrate,
'encode_command': this_encoding.EncodeCommandLine(),
'score': my_optimizer.Score(this_encoding),
'result': this_encoding.ResultWithoutFrameData()}))
def ListMpegResults(codecs, do_score, datatable, score_function=None):
"""List all scores for all tests in the MPEG test set for a set of codecs."""
# It is necessary to sort on target bitrate in order for graphs to display
# correctly.
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
ListOneTarget(codecs, rate, videofile, do_score, datatable,
score_function)
def ListMpegSingleConfigResults(codecs, datatable, score_function=None):
encoder_list = {}
optimizer_list = {}
for codec_name in codecs:
codec = pick_codec.PickCodec(codec_name)
my_optimizer = optimizer.Optimizer(codec,
score_function=score_function, file_set=mpeg_settings.MpegFiles())
optimizer_list[codec_name] = my_optimizer
encoder_list[codec_name] = my_optimizer.BestOverallEncoder()
for rate, filename in sorted(mpeg_settings.MpegFiles().AllFilesAndRates()):
videofile = encoder.Videofile(filename)
for codec_name in codecs:
if encoder_list[codec_name]:
my_encoding = encoder_list[codec_name].Encoding(rate, videofile)
my_encoding.Recover()
AddOneEncoding(codec_name, optimizer_list[codec_name],
my_encoding, videofile, datatable)
def ExtractBitrateAndPsnr(datatable, codec, filename):
dataset = [(r['result']['bitrate'], r['result']['psnr'])
for r in datatable[codec][filename]]
return dataset
def BuildComparisonTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a table of comparison data for this metric."""
# Find the metric files in the baseline codec.
videofile_name_list = datatable[baseline_codec].keys()
countoverall = {}
sumoverall = {}
for this_codec in other_codecs:
countoverall[this_codec] = 0
sumoverall[this_codec] = 0
# Data holds the data for the visualization, name given comes from
# gviz_api sample code.
data = []
for filename in videofile_name_list:
row = {'file': filename}
baseline_dataset = ExtractBitrateAndPsnr(datatable,
baseline_codec,
filename)
# Read the metric file from each of the directories in our list.
for this_codec in other_codecs:
# If there is a metric in this_codec, calculate the overall difference
# between it and the baseline codec's metric.
if (this_codec in datatable and filename in datatable[this_codec]
and filename in datatable[baseline_codec]):
this_dataset = ExtractBitrateAndPsnr(datatable,
this_codec,
filename)
overall = DataSetBetter(
baseline_dataset, this_dataset, metric)
if not math.isnan(overall):
# TODO(hta): figure out when DataSetBetter generates NaN
row[this_codec] = overall
sumoverall[this_codec] += overall
countoverall[this_codec] += 1
data.append(row)
# Add the overall numbers.
row = {"file": "OVERALL " + metric}
for this_codec in other_codecs:
if countoverall[this_codec]:
row[this_codec] = sumoverall[this_codec] / countoverall[this_codec]
data.append(row)
return data
def BuildGvizDataTable(datatable, metric, baseline_codec, other_codecs):
"""Builds a Gviz DataTable giving this metric for the files and codecs."""
description = {"file": ("string", "File")}
data = BuildComparisonTable(datatable, metric, baseline_codec, other_codecs)
for this_codec in other_codecs:
description[this_codec] = ("number", this_codec)
# Generate the gViz table
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
def CrossPerformanceGvizTable(datatable, metric, codecs, criterion):
"""Build a square table of codecs and relative performance."""
# pylint: disable=too-many-locals
videofile_name_list = datatable[codecs[0]].keys()
description = {}
description['codec'] = ('string', 'Codec')
data = []
for codec in codecs:
description[codec] = ('string', codec)
for codec1 in codecs:
lineitem = {'codec': codec1}
for codec2 in codecs:
if codec1 != codec2:
count = 0
overall = 0.0
for filename in videofile_name_list:
if (codec1 in datatable and filename in datatable[codec1]
and codec2 in datatable and filename in datatable[codec2]):
overall += DataSetBetter(
ExtractBitrateAndPsnr(datatable, codec2, filename),
ExtractBitrateAndPsnr(datatable, codec1, filename), metric)
count += 1
if count > 0:
display = ('<a href=/results/show_result.html?' +
'codec1=%s&codec2=%s&criterion=%s>%5.2f</a>') % (
codec2, codec1, criterion, overall / count)
lineitem[codec2] = (overall / count, display)
data.append(lineitem)
gviz_data_table = gviz_api.DataTable(description)
gviz_data_table.LoadData(data)
return gviz_data_table
|
|
"""Parse tree transformation module.
Transforms Python source code into an abstract syntax tree (AST)
defined in the ast module.
The simplest ways to invoke this module are via parse and parseFile.
parse(buf) -> AST
parseFile(path) -> AST
"""
# Original version written by Greg Stein ([email protected])
# and Bill Tutt ([email protected])
# February 1997.
#
# Modifications and improvements for Python 2.0 by Jeremy Hylton and
# Mark Hammond
#
# Some fixes to try to have correct line number on almost all nodes
# (except Module, Discard and Stmt) added by Sylvain Thenault
#
# Portions of this file are:
# Copyright (C) 1997-1998 Greg Stein. All Rights Reserved.
#
# This module is provided under a BSD-ish license. See
# http://www.opensource.org/licenses/bsd-license.html
# and replace OWNER, ORGANIZATION, and YEAR as appropriate.
from compiler.ast import *
import parser
import symbol
import token
import sys
class WalkerError(StandardError):
pass
from consts import CO_VARARGS, CO_VARKEYWORDS
from consts import OP_ASSIGN, OP_DELETE, OP_APPLY
def parseFile(path):
f = open(path, "U")
# XXX The parser API tolerates files without a trailing newline,
# but not strings without a trailing newline. Always add an extra
# newline to the file contents, since we're going through the string
# version of the API.
src = f.read() + "\n"
f.close()
return parse(src)
def parse(buf, mode="exec"):
if mode == "exec" or mode == "single":
return Transformer().parsesuite(buf)
elif mode == "eval":
return Transformer().parseexpr(buf)
else:
raise ValueError("compile() arg 3 must be"
" 'exec' or 'eval' or 'single'")
def asList(nodes):
l = []
for item in nodes:
if hasattr(item, "asList"):
l.append(item.asList())
else:
if type(item) is type( (None, None) ):
l.append(tuple(asList(item)))
elif type(item) is type( [] ):
l.append(asList(item))
else:
l.append(item)
return l
def extractLineNo(ast):
if not isinstance(ast[1], tuple):
# get a terminal node
return ast[2]
for child in ast[1:]:
if isinstance(child, tuple):
lineno = extractLineNo(child)
if lineno is not None:
return lineno
def Node(*args):
kind = args[0]
if nodes.has_key(kind):
try:
return nodes[kind](*args[1:])
except TypeError:
print nodes[kind], len(args), args
raise
else:
raise WalkerEror, "Can't find appropriate Node type: %s" % str(args)
#return apply(ast.Node, args)
class Transformer:
"""Utility object for transforming Python parse trees.
Exposes the following methods:
tree = transform(ast_tree)
tree = parsesuite(text)
tree = parseexpr(text)
tree = parsefile(fileob | filename)
"""
def __init__(self):
self._dispatch = {}
for value, name in symbol.sym_name.items():
if hasattr(self, name):
self._dispatch[value] = getattr(self, name)
self._dispatch[token.NEWLINE] = self.com_NEWLINE
self._atom_dispatch = {token.LPAR: self.atom_lpar,
token.LSQB: self.atom_lsqb,
token.LBRACE: self.atom_lbrace,
token.BACKQUOTE: self.atom_backquote,
token.NUMBER: self.atom_number,
token.STRING: self.atom_string,
token.NAME: self.atom_name,
}
self.encoding = None
def transform(self, tree):
"""Transform an AST into a modified parse tree."""
if not (isinstance(tree, tuple) or isinstance(tree, list)):
tree = parser.ast2tuple(tree, line_info=1)
return self.compile_node(tree)
def parsesuite(self, text):
"""Return a modified parse tree for the given suite text."""
return self.transform(parser.suite(text))
def parseexpr(self, text):
"""Return a modified parse tree for the given expression text."""
return self.transform(parser.expr(text))
def parsefile(self, file):
"""Return a modified parse tree for the contents of the given file."""
if type(file) == type(''):
file = open(file)
return self.parsesuite(file.read())
# --------------------------------------------------------------
#
# PRIVATE METHODS
#
def compile_node(self, node):
### emit a line-number node?
n = node[0]
if n == symbol.encoding_decl:
self.encoding = node[2]
node = node[1]
n = node[0]
if n == symbol.single_input:
return self.single_input(node[1:])
if n == symbol.file_input:
return self.file_input(node[1:])
if n == symbol.eval_input:
return self.eval_input(node[1:])
if n == symbol.lambdef:
return self.lambdef(node[1:])
if n == symbol.funcdef:
return self.funcdef(node[1:])
if n == symbol.classdef:
return self.classdef(node[1:])
raise WalkerEror, ('unexpected node type', n)
def single_input(self, node):
### do we want to do anything about being "interactive" ?
# NEWLINE | simple_stmt | compound_stmt NEWLINE
n = node[0][0]
if n != token.NEWLINE:
return self.com_stmt(node[0])
return Pass()
def file_input(self, nodelist):
doc = self.get_docstring(nodelist, symbol.file_input)
if doc is not None:
i = 1
else:
i = 0
stmts = []
for node in nodelist[i:]:
if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
self.com_append_stmt(stmts, node)
return Module(doc, Stmt(stmts))
def eval_input(self, nodelist):
# from the built-in function input()
### is this sufficient?
return Expression(self.com_node(nodelist[0]))
def decorator_name(self, nodelist):
listlen = len(nodelist)
assert listlen >= 1 and listlen % 2 == 1
item = self.atom_name(nodelist)
i = 1
while i < listlen:
assert nodelist[i][0] == token.DOT
assert nodelist[i + 1][0] == token.NAME
item = Getattr(item, nodelist[i + 1][1])
i += 2
return item
def decorator(self, nodelist):
# '@' dotted_name [ '(' [arglist] ')' ]
assert len(nodelist) in (3, 5, 6)
assert nodelist[0][0] == token.AT
assert nodelist[-1][0] == token.NEWLINE
assert nodelist[1][0] == symbol.dotted_name
funcname = self.decorator_name(nodelist[1][1:])
if len(nodelist) > 3:
assert nodelist[2][0] == token.LPAR
expr = self.com_call_function(funcname, nodelist[3])
else:
expr = funcname
return expr
def decorators(self, nodelist):
# decorators: decorator ([NEWLINE] decorator)* NEWLINE
items = []
for dec_nodelist in nodelist:
assert dec_nodelist[0] == symbol.decorator
items.append(self.decorator(dec_nodelist[1:]))
return Decorators(items)
def funcdef(self, nodelist):
# -6 -5 -4 -3 -2 -1
# funcdef: [decorators] 'def' NAME parameters ':' suite
# parameters: '(' [varargslist] ')'
if len(nodelist) == 6:
assert nodelist[0][0] == symbol.decorators
decorators = self.decorators(nodelist[0][1:])
else:
assert len(nodelist) == 5
decorators = None
lineno = nodelist[-4][2]
name = nodelist[-4][1]
args = nodelist[-3][2]
if args[0] == symbol.varargslist:
names, defaults, flags = self.com_arglist(args[1:])
else:
names = defaults = ()
flags = 0
doc = self.get_docstring(nodelist[-1])
# code for function
code = self.com_node(nodelist[-1])
if doc is not None:
assert isinstance(code, Stmt)
assert isinstance(code.nodes[0], Discard)
del code.nodes[0]
return Function(decorators, name, names, defaults, flags, doc, code,
lineno=lineno)
def lambdef(self, nodelist):
# lambdef: 'lambda' [varargslist] ':' test
if nodelist[2][0] == symbol.varargslist:
names, defaults, flags = self.com_arglist(nodelist[2][1:])
else:
names = defaults = ()
flags = 0
# code for lambda
code = self.com_node(nodelist[-1])
return Lambda(names, defaults, flags, code, lineno=nodelist[1][2])
def classdef(self, nodelist):
# classdef: 'class' NAME ['(' testlist ')'] ':' suite
name = nodelist[1][1]
doc = self.get_docstring(nodelist[-1])
if nodelist[2][0] == token.COLON:
bases = []
else:
bases = self.com_bases(nodelist[3])
# code for class
code = self.com_node(nodelist[-1])
if doc is not None:
assert isinstance(code, Stmt)
assert isinstance(code.nodes[0], Discard)
del code.nodes[0]
return Class(name, bases, doc, code, lineno=nodelist[1][2])
def stmt(self, nodelist):
return self.com_stmt(nodelist[0])
small_stmt = stmt
flow_stmt = stmt
compound_stmt = stmt
def simple_stmt(self, nodelist):
# small_stmt (';' small_stmt)* [';'] NEWLINE
stmts = []
for i in range(0, len(nodelist), 2):
self.com_append_stmt(stmts, nodelist[i])
return Stmt(stmts)
def parameters(self, nodelist):
raise WalkerEror
def varargslist(self, nodelist):
raise WalkerEror
def fpdef(self, nodelist):
raise WalkerEror
def fplist(self, nodelist):
raise WalkerEror
def dotted_name(self, nodelist):
raise WalkerEror
def comp_op(self, nodelist):
raise WalkerEror
def trailer(self, nodelist):
raise WalkerEror
def sliceop(self, nodelist):
raise WalkerEror
def argument(self, nodelist):
raise WalkerEror
# --------------------------------------------------------------
#
# STATEMENT NODES (invoked by com_node())
#
def expr_stmt(self, nodelist):
# augassign testlist | testlist ('=' testlist)*
en = nodelist[-1]
exprNode = self.lookup_node(en)(en[1:])
if len(nodelist) == 1:
return Discard(exprNode, lineno=exprNode.lineno)
if nodelist[1][0] == token.EQUAL:
nodesl = []
for i in range(0, len(nodelist) - 2, 2):
nodesl.append(self.com_assign(nodelist[i], OP_ASSIGN))
return Assign(nodesl, exprNode, lineno=nodelist[1][2])
else:
lval = self.com_augassign(nodelist[0])
op = self.com_augassign_op(nodelist[1])
return AugAssign(lval, op[1], exprNode, lineno=op[2])
raise WalkerError, "can't get here"
def print_stmt(self, nodelist):
# print ([ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ])
items = []
if len(nodelist) == 1:
start = 1
dest = None
elif nodelist[1][0] == token.RIGHTSHIFT:
assert len(nodelist) == 3 \
or nodelist[3][0] == token.COMMA
dest = self.com_node(nodelist[2])
start = 4
else:
dest = None
start = 1
for i in range(start, len(nodelist), 2):
items.append(self.com_node(nodelist[i]))
if nodelist[-1][0] == token.COMMA:
return Print(items, dest, lineno=nodelist[0][2])
return Printnl(items, dest, lineno=nodelist[0][2])
def del_stmt(self, nodelist):
return self.com_assign(nodelist[1], OP_DELETE)
def pass_stmt(self, nodelist):
return Pass(lineno=nodelist[0][2])
def break_stmt(self, nodelist):
return Break(lineno=nodelist[0][2])
def continue_stmt(self, nodelist):
return Continue(lineno=nodelist[0][2])
def return_stmt(self, nodelist):
# return: [testlist]
if len(nodelist) < 2:
return Return(Const(None), lineno=nodelist[0][2])
return Return(self.com_node(nodelist[1]), lineno=nodelist[0][2])
def yield_stmt(self, nodelist):
return Yield(self.com_node(nodelist[1]), lineno=nodelist[0][2])
def raise_stmt(self, nodelist):
# raise: [test [',' test [',' test]]]
if len(nodelist) > 5:
expr3 = self.com_node(nodelist[5])
else:
expr3 = None
if len(nodelist) > 3:
expr2 = self.com_node(nodelist[3])
else:
expr2 = None
if len(nodelist) > 1:
expr1 = self.com_node(nodelist[1])
else:
expr1 = None
return Raise(expr1, expr2, expr3, lineno=nodelist[0][2])
def import_stmt(self, nodelist):
# import_stmt: import_name | import_from
assert len(nodelist) == 1
return self.com_node(nodelist[0])
def import_name(self, nodelist):
# import_name: 'import' dotted_as_names
return Import(self.com_dotted_as_names(nodelist[1]),
lineno=nodelist[0][2])
def import_from(self, nodelist):
# import_from: 'from' dotted_name 'import' ('*' |
# '(' import_as_names ')' | import_as_names)
assert nodelist[0][1] == 'from'
assert nodelist[1][0] == symbol.dotted_name
assert nodelist[2][1] == 'import'
fromname = self.com_dotted_name(nodelist[1])
if nodelist[3][0] == token.STAR:
return From(fromname, [('*', None)],
lineno=nodelist[0][2])
else:
node = nodelist[3 + (nodelist[3][0] == token.LPAR)]
return From(fromname, self.com_import_as_names(node),
lineno=nodelist[0][2])
def global_stmt(self, nodelist):
# global: NAME (',' NAME)*
names = []
for i in range(1, len(nodelist), 2):
names.append(nodelist[i][1])
return Global(names, lineno=nodelist[0][2])
def exec_stmt(self, nodelist):
# exec_stmt: 'exec' expr ['in' expr [',' expr]]
expr1 = self.com_node(nodelist[1])
if len(nodelist) >= 4:
expr2 = self.com_node(nodelist[3])
if len(nodelist) >= 6:
expr3 = self.com_node(nodelist[5])
else:
expr3 = None
else:
expr2 = expr3 = None
return Exec(expr1, expr2, expr3, lineno=nodelist[0][2])
def assert_stmt(self, nodelist):
# 'assert': test, [',' test]
expr1 = self.com_node(nodelist[1])
if (len(nodelist) == 4):
expr2 = self.com_node(nodelist[3])
else:
expr2 = None
return Assert(expr1, expr2, lineno=nodelist[0][2])
def if_stmt(self, nodelist):
# if: test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
tests = []
for i in range(0, len(nodelist) - 3, 4):
testNode = self.com_node(nodelist[i + 1])
suiteNode = self.com_node(nodelist[i + 3])
tests.append((testNode, suiteNode))
if len(nodelist) % 4 == 3:
elseNode = self.com_node(nodelist[-1])
## elseNode.lineno = nodelist[-1][1][2]
else:
elseNode = None
return If(tests, elseNode, lineno=nodelist[0][2])
def while_stmt(self, nodelist):
# 'while' test ':' suite ['else' ':' suite]
testNode = self.com_node(nodelist[1])
bodyNode = self.com_node(nodelist[3])
if len(nodelist) > 4:
elseNode = self.com_node(nodelist[6])
else:
elseNode = None
return While(testNode, bodyNode, elseNode, lineno=nodelist[0][2])
def for_stmt(self, nodelist):
# 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
assignNode = self.com_assign(nodelist[1], OP_ASSIGN)
listNode = self.com_node(nodelist[3])
bodyNode = self.com_node(nodelist[5])
if len(nodelist) > 8:
elseNode = self.com_node(nodelist[8])
else:
elseNode = None
return For(assignNode, listNode, bodyNode, elseNode,
lineno=nodelist[0][2])
def try_stmt(self, nodelist):
# 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
# | 'try' ':' suite 'finally' ':' suite
if nodelist[3][0] != symbol.except_clause:
return self.com_try_finally(nodelist)
return self.com_try_except(nodelist)
def suite(self, nodelist):
# simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if len(nodelist) == 1:
return self.com_stmt(nodelist[0])
stmts = []
for node in nodelist:
if node[0] == symbol.stmt:
self.com_append_stmt(stmts, node)
return Stmt(stmts)
# --------------------------------------------------------------
#
# EXPRESSION NODES (invoked by com_node())
#
def testlist(self, nodelist):
# testlist: expr (',' expr)* [',']
# testlist_safe: test [(',' test)+ [',']]
# exprlist: expr (',' expr)* [',']
return self.com_binary(Tuple, nodelist)
testlist_safe = testlist # XXX
testlist1 = testlist
exprlist = testlist
def testlist_gexp(self, nodelist):
if len(nodelist) == 2 and nodelist[1][0] == symbol.gen_for:
test = self.com_node(nodelist[0])
return self.com_generator_expression(test, nodelist[1])
return self.testlist(nodelist)
def test(self, nodelist):
# and_test ('or' and_test)* | lambdef
if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
return self.lambdef(nodelist[0])
return self.com_binary(Or, nodelist)
def and_test(self, nodelist):
# not_test ('and' not_test)*
return self.com_binary(And, nodelist)
def not_test(self, nodelist):
# 'not' not_test | comparison
result = self.com_node(nodelist[-1])
if len(nodelist) == 2:
return Not(result, lineno=nodelist[0][2])
return result
def comparison(self, nodelist):
# comparison: expr (comp_op expr)*
node = self.com_node(nodelist[0])
if len(nodelist) == 1:
return node
results = []
for i in range(2, len(nodelist), 2):
nl = nodelist[i-1]
# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
# | 'in' | 'not' 'in' | 'is' | 'is' 'not'
n = nl[1]
if n[0] == token.NAME:
type = n[1]
if len(nl) == 3:
if type == 'not':
type = 'not in'
else:
type = 'is not'
else:
type = _cmp_types[n[0]]
lineno = nl[1][2]
results.append((type, self.com_node(nodelist[i])))
# we need a special "compare" node so that we can distinguish
# 3 < x < 5 from (3 < x) < 5
# the two have very different semantics and results (note that the
# latter form is always true)
return Compare(node, results, lineno=lineno)
def expr(self, nodelist):
# xor_expr ('|' xor_expr)*
return self.com_binary(Bitor, nodelist)
def xor_expr(self, nodelist):
# xor_expr ('^' xor_expr)*
return self.com_binary(Bitxor, nodelist)
def and_expr(self, nodelist):
# xor_expr ('&' xor_expr)*
return self.com_binary(Bitand, nodelist)
def shift_expr(self, nodelist):
# shift_expr ('<<'|'>>' shift_expr)*
node = self.com_node(nodelist[0])
for i in range(2, len(nodelist), 2):
right = self.com_node(nodelist[i])
if nodelist[i-1][0] == token.LEFTSHIFT:
node = LeftShift([node, right], lineno=nodelist[1][2])
elif nodelist[i-1][0] == token.RIGHTSHIFT:
node = RightShift([node, right], lineno=nodelist[1][2])
else:
raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
return node
def arith_expr(self, nodelist):
node = self.com_node(nodelist[0])
for i in range(2, len(nodelist), 2):
right = self.com_node(nodelist[i])
if nodelist[i-1][0] == token.PLUS:
node = Add([node, right], lineno=nodelist[1][2])
elif nodelist[i-1][0] == token.MINUS:
node = Sub([node, right], lineno=nodelist[1][2])
else:
raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
return node
def term(self, nodelist):
node = self.com_node(nodelist[0])
for i in range(2, len(nodelist), 2):
right = self.com_node(nodelist[i])
t = nodelist[i-1][0]
if t == token.STAR:
node = Mul([node, right])
elif t == token.SLASH:
node = Div([node, right])
elif t == token.PERCENT:
node = Mod([node, right])
elif t == token.DOUBLESLASH:
node = FloorDiv([node, right])
else:
raise ValueError, "unexpected token: %s" % t
node.lineno = nodelist[1][2]
return node
def factor(self, nodelist):
elt = nodelist[0]
t = elt[0]
node = self.lookup_node(nodelist[-1])(nodelist[-1][1:])
# need to handle (unary op)constant here...
if t == token.PLUS:
return UnaryAdd(node, lineno=elt[2])
elif t == token.MINUS:
return UnarySub(node, lineno=elt[2])
elif t == token.TILDE:
node = Invert(node, lineno=elt[2])
return node
def power(self, nodelist):
# power: atom trailer* ('**' factor)*
node = self.com_node(nodelist[0])
for i in range(1, len(nodelist)):
elt = nodelist[i]
if elt[0] == token.DOUBLESTAR:
return Power([node, self.com_node(nodelist[i+1])],
lineno=elt[2])
node = self.com_apply_trailer(node, elt)
return node
def atom(self, nodelist):
return self._atom_dispatch[nodelist[0][0]](nodelist)
n.lineno = nodelist[0][2]
return n
def atom_lpar(self, nodelist):
if nodelist[1][0] == token.RPAR:
return Tuple(())
return self.com_node(nodelist[1])
def atom_lsqb(self, nodelist):
if nodelist[1][0] == token.RSQB:
return List(())
return self.com_list_constructor(nodelist[1])
def atom_lbrace(self, nodelist):
if nodelist[1][0] == token.RBRACE:
return Dict(())
return self.com_dictmaker(nodelist[1])
def atom_backquote(self, nodelist):
return Backquote(self.com_node(nodelist[1]))
def atom_number(self, nodelist):
### need to verify this matches compile.c
k = eval(nodelist[0][1])
return Const(k, lineno=nodelist[0][2])
def decode_literal(self, lit):
if self.encoding:
# this is particularly fragile & a bit of a
# hack... changes in compile.c:parsestr and
# tokenizer.c must be reflected here.
if self.encoding not in ['utf-8', 'iso-8859-1']:
lit = unicode(lit, 'utf-8').encode(self.encoding)
return eval("# coding: %s\n%s" % (self.encoding, lit))
else:
return eval(lit)
def atom_string(self, nodelist):
k = ''
for node in nodelist:
k += self.decode_literal(node[1])
return Const(k, lineno=nodelist[0][2])
def atom_name(self, nodelist):
return Name(nodelist[0][1], lineno=nodelist[0][2])
# --------------------------------------------------------------
#
# INTERNAL PARSING UTILITIES
#
# The use of com_node() introduces a lot of extra stack frames,
# enough to cause a stack overflow compiling test.test_parser with
# the standard interpreter recursionlimit. The com_node() is a
# convenience function that hides the dispatch details, but comes
# at a very high cost. It is more efficient to dispatch directly
# in the callers. In these cases, use lookup_node() and call the
# dispatched node directly.
def lookup_node(self, node):
return self._dispatch[node[0]]
_callers = {}
def com_node(self, node):
# Note: compile.c has handling in com_node for del_stmt, pass_stmt,
# break_stmt, stmt, small_stmt, flow_stmt, simple_stmt,
# and compound_stmt.
# We'll just dispatch them.
return self._dispatch[node[0]](node[1:])
def com_NEWLINE(self, *args):
# A ';' at the end of a line can make a NEWLINE token appear
# here, Render it harmless. (genc discards ('discard',
# ('const', xxxx)) Nodes)
return Discard(Const(None))
def com_arglist(self, nodelist):
# varargslist:
# (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
# | fpdef ['=' test] (',' fpdef ['=' test])* [',']
# fpdef: NAME | '(' fplist ')'
# fplist: fpdef (',' fpdef)* [',']
names = []
defaults = []
flags = 0
i = 0
while i < len(nodelist):
node = nodelist[i]
if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
if node[0] == token.STAR:
node = nodelist[i+1]
if node[0] == token.NAME:
names.append(node[1])
flags = flags | CO_VARARGS
i = i + 3
if i < len(nodelist):
# should be DOUBLESTAR
t = nodelist[i][0]
if t == token.DOUBLESTAR:
node = nodelist[i+1]
else:
raise ValueError, "unexpected token: %s" % t
names.append(node[1])
flags = flags | CO_VARKEYWORDS
break
# fpdef: NAME | '(' fplist ')'
names.append(self.com_fpdef(node))
i = i + 1
if i >= len(nodelist):
break
if nodelist[i][0] == token.EQUAL:
defaults.append(self.com_node(nodelist[i + 1]))
i = i + 2
elif len(defaults):
# XXX This should be a syntax error.
# Treat "(a=1, b)" as "(a=1, b=None)"
defaults.append(Const(None))
i = i + 1
return names, defaults, flags
def com_fpdef(self, node):
# fpdef: NAME | '(' fplist ')'
if node[1][0] == token.LPAR:
return self.com_fplist(node[2])
return node[1][1]
def com_fplist(self, node):
# fplist: fpdef (',' fpdef)* [',']
if len(node) == 2:
return self.com_fpdef(node[1])
list = []
for i in range(1, len(node), 2):
list.append(self.com_fpdef(node[i]))
return tuple(list)
def com_dotted_name(self, node):
# String together the dotted names and return the string
name = ""
for n in node:
if type(n) == type(()) and n[0] == 1:
name = name + n[1] + '.'
return name[:-1]
def com_dotted_as_name(self, node):
assert node[0] == symbol.dotted_as_name
node = node[1:]
dot = self.com_dotted_name(node[0][1:])
if len(node) == 1:
return dot, None
assert node[1][1] == 'as'
assert node[2][0] == token.NAME
return dot, node[2][1]
def com_dotted_as_names(self, node):
assert node[0] == symbol.dotted_as_names
node = node[1:]
names = [self.com_dotted_as_name(node[0])]
for i in range(2, len(node), 2):
names.append(self.com_dotted_as_name(node[i]))
return names
def com_import_as_name(self, node):
assert node[0] == symbol.import_as_name
node = node[1:]
assert node[0][0] == token.NAME
if len(node) == 1:
return node[0][1], None
assert node[1][1] == 'as', node
assert node[2][0] == token.NAME
return node[0][1], node[2][1]
def com_import_as_names(self, node):
assert node[0] == symbol.import_as_names
node = node[1:]
names = [self.com_import_as_name(node[0])]
for i in range(2, len(node), 2):
names.append(self.com_import_as_name(node[i]))
return names
def com_bases(self, node):
bases = []
for i in range(1, len(node), 2):
bases.append(self.com_node(node[i]))
return bases
def com_try_finally(self, nodelist):
# try_fin_stmt: "try" ":" suite "finally" ":" suite
return TryFinally(self.com_node(nodelist[2]),
self.com_node(nodelist[5]),
lineno=nodelist[0][2])
def com_try_except(self, nodelist):
# try_except: 'try' ':' suite (except_clause ':' suite)* ['else' suite]
#tryexcept: [TryNode, [except_clauses], elseNode)]
stmt = self.com_node(nodelist[2])
clauses = []
elseNode = None
for i in range(3, len(nodelist), 3):
node = nodelist[i]
if node[0] == symbol.except_clause:
# except_clause: 'except' [expr [',' expr]] */
if len(node) > 2:
expr1 = self.com_node(node[2])
if len(node) > 4:
expr2 = self.com_assign(node[4], OP_ASSIGN)
else:
expr2 = None
else:
expr1 = expr2 = None
clauses.append((expr1, expr2, self.com_node(nodelist[i+2])))
if node[0] == token.NAME:
elseNode = self.com_node(nodelist[i+2])
return TryExcept(self.com_node(nodelist[2]), clauses, elseNode,
lineno=nodelist[0][2])
def com_augassign_op(self, node):
assert node[0] == symbol.augassign
return node[1]
def com_augassign(self, node):
"""Return node suitable for lvalue of augmented assignment
Names, slices, and attributes are the only allowable nodes.
"""
l = self.com_node(node)
if l.__class__ in (Name, Slice, Subscript, Getattr):
return l
raise SyntaxError, "can't assign to %s" % l.__class__.__name__
def com_assign(self, node, assigning):
# return a node suitable for use as an "lvalue"
# loop to avoid trivial recursion
while 1:
t = node[0]
if t == symbol.exprlist or t == symbol.testlist or t == symbol.testlist_gexp:
if len(node) > 2:
return self.com_assign_tuple(node, assigning)
node = node[1]
elif t in _assign_types:
if len(node) > 2:
raise SyntaxError, "can't assign to operator"
node = node[1]
elif t == symbol.power:
if node[1][0] != symbol.atom:
raise SyntaxError, "can't assign to operator"
if len(node) > 2:
primary = self.com_node(node[1])
for i in range(2, len(node)-1):
ch = node[i]
if ch[0] == token.DOUBLESTAR:
raise SyntaxError, "can't assign to operator"
primary = self.com_apply_trailer(primary, ch)
return self.com_assign_trailer(primary, node[-1],
assigning)
node = node[1]
elif t == symbol.atom:
t = node[1][0]
if t == token.LPAR:
node = node[2]
if node[0] == token.RPAR:
raise SyntaxError, "can't assign to ()"
elif t == token.LSQB:
node = node[2]
if node[0] == token.RSQB:
raise SyntaxError, "can't assign to []"
return self.com_assign_list(node, assigning)
elif t == token.NAME:
return self.com_assign_name(node[1], assigning)
else:
raise SyntaxError, "can't assign to literal"
else:
raise SyntaxError, "bad assignment"
def com_assign_tuple(self, node, assigning):
assigns = []
for i in range(1, len(node), 2):
assigns.append(self.com_assign(node[i], assigning))
return AssTuple(assigns, lineno=extractLineNo(node))
def com_assign_list(self, node, assigning):
assigns = []
for i in range(1, len(node), 2):
if i + 1 < len(node):
if node[i + 1][0] == symbol.list_for:
raise SyntaxError, "can't assign to list comprehension"
assert node[i + 1][0] == token.COMMA, node[i + 1]
assigns.append(self.com_assign(node[i], assigning))
return AssList(assigns, lineno=extractLineNo(node))
def com_assign_name(self, node, assigning):
return AssName(node[1], assigning, lineno=node[2])
def com_assign_trailer(self, primary, node, assigning):
t = node[1][0]
if t == token.DOT:
return self.com_assign_attr(primary, node[2], assigning)
if t == token.LSQB:
return self.com_subscriptlist(primary, node[2], assigning)
if t == token.LPAR:
raise SyntaxError, "can't assign to function call"
raise SyntaxError, "unknown trailer type: %s" % t
def com_assign_attr(self, primary, node, assigning):
return AssAttr(primary, node[1], assigning, lineno=node[-1])
def com_binary(self, constructor, nodelist):
"Compile 'NODE (OP NODE)*' into (type, [ node1, ..., nodeN ])."
l = len(nodelist)
if l == 1:
n = nodelist[0]
return self.lookup_node(n)(n[1:])
items = []
for i in range(0, l, 2):
n = nodelist[i]
items.append(self.lookup_node(n)(n[1:]))
return constructor(items, lineno=extractLineNo(nodelist))
def com_stmt(self, node):
result = self.lookup_node(node)(node[1:])
assert result is not None
if isinstance(result, Stmt):
return result
return Stmt([result])
def com_append_stmt(self, stmts, node):
result = self.lookup_node(node)(node[1:])
assert result is not None
if isinstance(result, Stmt):
stmts.extend(result.nodes)
else:
stmts.append(result)
if hasattr(symbol, 'list_for'):
def com_list_constructor(self, nodelist):
# listmaker: test ( list_for | (',' test)* [','] )
values = []
for i in range(1, len(nodelist)):
if nodelist[i][0] == symbol.list_for:
assert len(nodelist[i:]) == 1
return self.com_list_comprehension(values[0],
nodelist[i])
elif nodelist[i][0] == token.COMMA:
continue
values.append(self.com_node(nodelist[i]))
return List(values, lineno=values[0].lineno)
def com_list_comprehension(self, expr, node):
# list_iter: list_for | list_if
# list_for: 'for' exprlist 'in' testlist [list_iter]
# list_if: 'if' test [list_iter]
# XXX should raise SyntaxError for assignment
lineno = node[1][2]
fors = []
while node:
t = node[1][1]
if t == 'for':
assignNode = self.com_assign(node[2], OP_ASSIGN)
listNode = self.com_node(node[4])
newfor = ListCompFor(assignNode, listNode, [])
newfor.lineno = node[1][2]
fors.append(newfor)
if len(node) == 5:
node = None
else:
node = self.com_list_iter(node[5])
elif t == 'if':
test = self.com_node(node[2])
newif = ListCompIf(test, lineno=node[1][2])
newfor.ifs.append(newif)
if len(node) == 3:
node = None
else:
node = self.com_list_iter(node[3])
else:
raise SyntaxError, \
("unexpected list comprehension element: %s %d"
% (node, lineno))
return ListComp(expr, fors, lineno=lineno)
def com_list_iter(self, node):
assert node[0] == symbol.list_iter
return node[1]
else:
def com_list_constructor(self, nodelist):
values = []
for i in range(1, len(nodelist), 2):
values.append(self.com_node(nodelist[i]))
return List(values)
if hasattr(symbol, 'gen_for'):
def com_generator_expression(self, expr, node):
# gen_iter: gen_for | gen_if
# gen_for: 'for' exprlist 'in' test [gen_iter]
# gen_if: 'if' test [gen_iter]
lineno = node[1][2]
fors = []
while node:
t = node[1][1]
if t == 'for':
assignNode = self.com_assign(node[2], OP_ASSIGN)
genNode = self.com_node(node[4])
newfor = GenExprFor(assignNode, genNode, [],
lineno=node[1][2])
fors.append(newfor)
if (len(node)) == 5:
node = None
else:
node = self.com_gen_iter(node[5])
elif t == 'if':
test = self.com_node(node[2])
newif = GenExprIf(test, lineno=node[1][2])
newfor.ifs.append(newif)
if len(node) == 3:
node = None
else:
node = self.com_gen_iter(node[3])
else:
raise SyntaxError, \
("unexpected generator expression element: %s %d"
% (node, lineno))
fors[0].is_outmost = True
return GenExpr(GenExprInner(expr, fors), lineno=lineno)
def com_gen_iter(self, node):
assert node[0] == symbol.gen_iter
return node[1]
def com_dictmaker(self, nodelist):
# dictmaker: test ':' test (',' test ':' value)* [',']
items = []
for i in range(1, len(nodelist), 4):
items.append((self.com_node(nodelist[i]),
self.com_node(nodelist[i+2])))
return Dict(items)
def com_apply_trailer(self, primaryNode, nodelist):
t = nodelist[1][0]
if t == token.LPAR:
return self.com_call_function(primaryNode, nodelist[2])
if t == token.DOT:
return self.com_select_member(primaryNode, nodelist[2])
if t == token.LSQB:
return self.com_subscriptlist(primaryNode, nodelist[2], OP_APPLY)
raise SyntaxError, 'unknown node type: %s' % t
def com_select_member(self, primaryNode, nodelist):
if nodelist[0] != token.NAME:
raise SyntaxError, "member must be a name"
return Getattr(primaryNode, nodelist[1], lineno=nodelist[2])
def com_call_function(self, primaryNode, nodelist):
if nodelist[0] == token.RPAR:
return CallFunc(primaryNode, [], lineno=extractLineNo(nodelist))
args = []
kw = 0
len_nodelist = len(nodelist)
for i in range(1, len_nodelist, 2):
node = nodelist[i]
if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
break
kw, result = self.com_argument(node, kw)
if len_nodelist != 2 and isinstance(result, GenExpr) \
and len(node) == 3 and node[2][0] == symbol.gen_for:
# allow f(x for x in y), but reject f(x for x in y, 1)
# should use f((x for x in y), 1) instead of f(x for x in y, 1)
raise SyntaxError, 'generator expression needs parenthesis'
args.append(result)
else:
# No broken by star arg, so skip the last one we processed.
i = i + 1
if i < len_nodelist and nodelist[i][0] == token.COMMA:
# need to accept an application that looks like "f(a, b,)"
i = i + 1
star_node = dstar_node = None
while i < len_nodelist:
tok = nodelist[i]
ch = nodelist[i+1]
i = i + 3
if tok[0]==token.STAR:
if star_node is not None:
raise SyntaxError, 'already have the varargs indentifier'
star_node = self.com_node(ch)
elif tok[0]==token.DOUBLESTAR:
if dstar_node is not None:
raise SyntaxError, 'already have the kwargs indentifier'
dstar_node = self.com_node(ch)
else:
raise SyntaxError, 'unknown node type: %s' % tok
return CallFunc(primaryNode, args, star_node, dstar_node,
lineno=extractLineNo(nodelist))
def com_argument(self, nodelist, kw):
if len(nodelist) == 3 and nodelist[2][0] == symbol.gen_for:
test = self.com_node(nodelist[1])
return 0, self.com_generator_expression(test, nodelist[2])
if len(nodelist) == 2:
if kw:
raise SyntaxError, "non-keyword arg after keyword arg"
return 0, self.com_node(nodelist[1])
result = self.com_node(nodelist[3])
n = nodelist[1]
while len(n) == 2 and n[0] != token.NAME:
n = n[1]
if n[0] != token.NAME:
raise SyntaxError, "keyword can't be an expression (%s)"%n[0]
node = Keyword(n[1], result, lineno=n[2])
return 1, node
def com_subscriptlist(self, primary, nodelist, assigning):
# slicing: simple_slicing | extended_slicing
# simple_slicing: primary "[" short_slice "]"
# extended_slicing: primary "[" slice_list "]"
# slice_list: slice_item ("," slice_item)* [","]
# backwards compat slice for '[i:j]'
if len(nodelist) == 2:
sub = nodelist[1]
if (sub[1][0] == token.COLON or \
(len(sub) > 2 and sub[2][0] == token.COLON)) and \
sub[-1][0] != symbol.sliceop:
return self.com_slice(primary, sub, assigning)
subscripts = []
for i in range(1, len(nodelist), 2):
subscripts.append(self.com_subscript(nodelist[i]))
return Subscript(primary, assigning, subscripts,
lineno=extractLineNo(nodelist))
def com_subscript(self, node):
# slice_item: expression | proper_slice | ellipsis
ch = node[1]
t = ch[0]
if t == token.DOT and node[2][0] == token.DOT:
return Ellipsis()
if t == token.COLON or len(node) > 2:
return self.com_sliceobj(node)
return self.com_node(ch)
def com_sliceobj(self, node):
# proper_slice: short_slice | long_slice
# short_slice: [lower_bound] ":" [upper_bound]
# long_slice: short_slice ":" [stride]
# lower_bound: expression
# upper_bound: expression
# stride: expression
#
# Note: a stride may be further slicing...
items = []
if node[1][0] == token.COLON:
items.append(Const(None))
i = 2
else:
items.append(self.com_node(node[1]))
# i == 2 is a COLON
i = 3
if i < len(node) and node[i][0] == symbol.test:
items.append(self.com_node(node[i]))
i = i + 1
else:
items.append(Const(None))
# a short_slice has been built. look for long_slice now by looking
# for strides...
for j in range(i, len(node)):
ch = node[j]
if len(ch) == 2:
items.append(Const(None))
else:
items.append(self.com_node(ch[2]))
return Sliceobj(items, lineno=extractLineNo(node))
def com_slice(self, primary, node, assigning):
# short_slice: [lower_bound] ":" [upper_bound]
lower = upper = None
if len(node) == 3:
if node[1][0] == token.COLON:
upper = self.com_node(node[2])
else:
lower = self.com_node(node[1])
elif len(node) == 4:
lower = self.com_node(node[1])
upper = self.com_node(node[3])
return Slice(primary, assigning, lower, upper,
lineno=extractLineNo(node))
def get_docstring(self, node, n=None):
if n is None:
n = node[0]
node = node[1:]
if n == symbol.suite:
if len(node) == 1:
return self.get_docstring(node[0])
for sub in node:
if sub[0] == symbol.stmt:
return self.get_docstring(sub)
return None
if n == symbol.file_input:
for sub in node:
if sub[0] == symbol.stmt:
return self.get_docstring(sub)
return None
if n == symbol.atom:
if node[0][0] == token.STRING:
s = ''
for t in node:
s = s + eval(t[1])
return s
return None
if n == symbol.stmt or n == symbol.simple_stmt \
or n == symbol.small_stmt:
return self.get_docstring(node[0])
if n in _doc_nodes and len(node) == 1:
return self.get_docstring(node[0])
return None
_doc_nodes = [
symbol.expr_stmt,
symbol.testlist,
symbol.testlist_safe,
symbol.test,
symbol.and_test,
symbol.not_test,
symbol.comparison,
symbol.expr,
symbol.xor_expr,
symbol.and_expr,
symbol.shift_expr,
symbol.arith_expr,
symbol.term,
symbol.factor,
symbol.power,
]
# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
# | 'in' | 'not' 'in' | 'is' | 'is' 'not'
_cmp_types = {
token.LESS : '<',
token.GREATER : '>',
token.EQEQUAL : '==',
token.EQUAL : '==',
token.LESSEQUAL : '<=',
token.GREATEREQUAL : '>=',
token.NOTEQUAL : '!=',
}
_legal_node_types = [
symbol.funcdef,
symbol.classdef,
symbol.stmt,
symbol.small_stmt,
symbol.flow_stmt,
symbol.simple_stmt,
symbol.compound_stmt,
symbol.expr_stmt,
symbol.print_stmt,
symbol.del_stmt,
symbol.pass_stmt,
symbol.break_stmt,
symbol.continue_stmt,
symbol.return_stmt,
symbol.raise_stmt,
symbol.import_stmt,
symbol.global_stmt,
symbol.exec_stmt,
symbol.assert_stmt,
symbol.if_stmt,
symbol.while_stmt,
symbol.for_stmt,
symbol.try_stmt,
symbol.suite,
symbol.testlist,
symbol.testlist_safe,
symbol.test,
symbol.and_test,
symbol.not_test,
symbol.comparison,
symbol.exprlist,
symbol.expr,
symbol.xor_expr,
symbol.and_expr,
symbol.shift_expr,
symbol.arith_expr,
symbol.term,
symbol.factor,
symbol.power,
symbol.atom,
]
if hasattr(symbol, 'yield_stmt'):
_legal_node_types.append(symbol.yield_stmt)
_assign_types = [
symbol.test,
symbol.and_test,
symbol.not_test,
symbol.comparison,
symbol.expr,
symbol.xor_expr,
symbol.and_expr,
symbol.shift_expr,
symbol.arith_expr,
symbol.term,
symbol.factor,
]
import types
_names = {}
for k, v in symbol.sym_name.items():
_names[k] = v
for k, v in token.tok_name.items():
_names[k] = v
def debug_tree(tree):
l = []
for elt in tree:
if type(elt) == types.IntType:
l.append(_names.get(elt, elt))
elif type(elt) == types.StringType:
l.append(elt)
else:
l.append(debug_tree(elt))
return l
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from concurrent import futures
from functools import partial
from six.moves.urllib.parse import urlparse
import json
import numpy as np
import os
import re
import six
import pyarrow as pa
import pyarrow.lib as lib
import pyarrow._parquet as _parquet
from pyarrow._parquet import (ParquetReader, RowGroupStatistics, # noqa
FileMetaData, RowGroupMetaData,
ColumnChunkMetaData,
ParquetSchema, ColumnSchema)
from pyarrow.compat import guid
from pyarrow.filesystem import (LocalFileSystem, _ensure_filesystem,
resolve_filesystem_and_path)
from pyarrow.util import _is_path_like, _stringify_path
_URI_STRIP_SCHEMES = ('hdfs',)
def _parse_uri(path):
path = _stringify_path(path)
parsed_uri = urlparse(path)
if parsed_uri.scheme in _URI_STRIP_SCHEMES:
return parsed_uri.path
else:
# ARROW-4073: On Windows returning the path with the scheme
# stripped removes the drive letter, if any
return path
def _get_filesystem_and_path(passed_filesystem, path):
if passed_filesystem is None:
return resolve_filesystem_and_path(path, passed_filesystem)
else:
passed_filesystem = _ensure_filesystem(passed_filesystem)
parsed_path = _parse_uri(path)
return passed_filesystem, parsed_path
def _check_contains_null(val):
if isinstance(val, six.binary_type):
for byte in val:
if isinstance(byte, six.binary_type):
compare_to = chr(0)
else:
compare_to = 0
if byte == compare_to:
return True
elif isinstance(val, six.text_type):
return u'\x00' in val
return False
def _check_filters(filters):
"""
Check if filters are well-formed.
"""
if filters is not None:
if len(filters) == 0 or any(len(f) == 0 for f in filters):
raise ValueError("Malformed filters")
if isinstance(filters[0][0], six.string_types):
# We have encountered the situation where we have one nesting level
# too few:
# We have [(,,), ..] instead of [[(,,), ..]]
filters = [filters]
for conjunction in filters:
for col, op, val in conjunction:
if (
isinstance(val, list)
and all(_check_contains_null(v) for v in val)
or _check_contains_null(val)
):
raise NotImplementedError(
"Null-terminated binary strings are not supported as"
" filter values."
)
return filters
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile(object):
"""
Reader interface for a single Parquet file
Parameters
----------
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarorw.BufferReader
metadata : ParquetFileMetadata, default None
Use existing metadata object, rather than reading from file.
common_metadata : ParquetFileMetadata, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment
memory_map : boolean, default True
If the source is a file path, use a memory map to read file, which can
improve performance in some environments
"""
def __init__(self, source, metadata=None, common_metadata=None,
memory_map=True):
self.reader = ParquetReader()
self.reader.open(source, use_memory_map=memory_map, metadata=metadata)
self.common_metadata = common_metadata
self._nested_paths_by_prefix = self._build_nested_paths()
def _build_nested_paths(self):
paths = self.reader.column_paths
result = defaultdict(list)
def _visit_piece(i, key, rest):
result[key].append(i)
if len(rest) > 0:
nested_key = '.'.join((key, rest[0]))
_visit_piece(i, nested_key, rest[1:])
for i, path in enumerate(paths):
_visit_piece(i, path[0], path[1:])
return result
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
return self.metadata.schema
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file
Parameters
----------
columns: list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
use_threads : boolean, default True
Perform multi-threaded column reads
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
use_threads=use_threads)
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read a Table from Parquet format
Parameters
----------
columns: list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
use_threads : boolean, default True
Perform multi-threaded column reads
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
use_threads=use_threads)
def scan_contents(self, columns=None, batch_size=65536):
"""
Read contents of file with a single thread for indicated columns and
batch size. Number of rows in file is returned. This function is used
for benchmarking
Parameters
----------
columns : list of integers, default None
If None, scan all columns
batch_size : int, default 64K
Number of rows to read at a time internally
Returns
-------
num_rows : number of rows in file
"""
column_indices = self._get_column_indices(columns)
return self.reader.scan_contents(column_indices,
batch_size=batch_size)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = []
for name in column_names:
if name in self._nested_paths_by_prefix:
indices.extend(self._nested_paths_by_prefix[name])
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += map(self.reader.column_name_idx, index_columns)
return indices
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
def _sanitized_spark_field_name(name):
return _SPARK_DISALLOWED_CHARS.sub('_', name)
def _sanitize_schema(schema, flavor):
if 'spark' in flavor:
sanitized_fields = []
schema_changed = False
for field in schema:
name = field.name
sanitized_name = _sanitized_spark_field_name(name)
if sanitized_name != name:
schema_changed = True
sanitized_field = pa.field(sanitized_name, field.type,
field.nullable, field.metadata)
sanitized_fields.append(sanitized_field)
else:
sanitized_fields.append(field)
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
return new_schema, schema_changed
else:
return schema, False
def _sanitize_table(table, new_schema, flavor):
# TODO: This will not handle prohibited characters in nested field names
if 'spark' in flavor:
column_data = [table[i].data for i in range(table.num_columns)]
return pa.Table.from_arrays(column_data, schema=new_schema)
else:
return table
_parquet_writer_arg_docs = """version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
use_deprecated_int96_timestamps : boolean, default None
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : string, default None
Cast timestamps a particular resolution.
Valid values: {None, 'ms', 'us'}
allow_truncated_timestamps : boolean, default False
Allow loss of data when coercing timestamps to a particular
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
'ms', do not raise an exception
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'LZO', 'BROTLI', 'LZ4', 'ZSTD'}
flavor : {'spark'}, default None
Sanitize schema or set other compatibility options for compatibility
filesystem : FileSystem, default None
If nothing passed, will be inferred from `where` if path-like, else
`where` is already a file-like object so no filesystem is needed."""
class ParquetWriter(object):
__doc__ = """
Class for incrementally building a Parquet file for Arrow tables
Parameters
----------
where : path or file-like object
schema : arrow Schema
{0}
""".format(_parquet_writer_arg_docs)
def __init__(self, where, schema, filesystem=None,
flavor=None,
version='1.0',
use_dictionary=True,
compression='snappy',
use_deprecated_int96_timestamps=None, **options):
if use_deprecated_int96_timestamps is None:
# Use int96 timestamps for Spark
if flavor is not None and 'spark' in flavor:
use_deprecated_int96_timestamps = True
else:
use_deprecated_int96_timestamps = False
self.flavor = flavor
if flavor is not None:
schema, self.schema_changed = _sanitize_schema(schema, flavor)
else:
self.schema_changed = False
self.schema = schema
self.where = where
# If we open a file using a filesystem, store file handle so we can be
# sure to close it when `self.close` is called.
self.file_handle = None
filesystem, path = resolve_filesystem_and_path(where, filesystem)
if filesystem is not None:
sink = self.file_handle = filesystem.open(path, 'wb')
else:
sink = where
self.writer = _parquet.ParquetWriter(
sink, schema,
version=version,
compression=compression,
use_dictionary=use_dictionary,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
**options)
self.is_open = True
def __del__(self):
if getattr(self, 'is_open', False):
self.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# return false since we want to propagate exceptions
return False
def write_table(self, table, row_group_size=None):
if self.schema_changed:
table = _sanitize_table(table, self.schema, self.flavor)
assert self.is_open
if not table.schema.equals(self.schema, check_metadata=False):
msg = ('Table schema does not match schema used to create file: '
'\ntable:\n{0!s} vs. \nfile:\n{1!s}'.format(table.schema,
self.schema))
raise ValueError(msg)
self.writer.write_table(table, row_group_size=row_group_size)
def close(self):
if self.is_open:
self.writer.close()
self.is_open = False
if self.file_handle is not None:
self.file_handle.close()
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece(object):
"""
A single chunk of a potentially larger Parquet dataset to read. The
arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table
Parameters
----------
path : str or pathlib.Path
Path to file in the file system where this piece is located
open_file_func : callable
Function to use for obtaining file handle to dataset piece
partition_keys : list of tuples
[(column name, ordinal index)]
row_group : int, default None
Row group to load. By default, reads all row groups
"""
def __init__(self, path, open_file_func=partial(open, mode='rb'),
row_group=None, partition_keys=None):
self.path = _stringify_path(path)
self.open_file_func = open_file_func
self.row_group = row_group
self.partition_keys = partition_keys or []
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return ('{0}({1!r}, row_group={2!r}, partition_keys={3!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{0}={1}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{0}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={0}'.format(self.row_group)
return result
def get_metadata(self):
"""
Given a function that can create an open ParquetFile object, return the
file's metadata
"""
return self.open().metadata
def open(self):
"""
Returns instance of ParquetFile
"""
reader = self.open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader)
return reader
def read(self, columns=None, use_threads=True, partitions=None,
file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table
Parameters
----------
columns : list of column names, default None
use_threads : boolean, default True
Perform multi-threaded column reads
partitions : ParquetPartitions, default None
open_file_func : function, default None
A function that knows how to construct a ParquetFile object given
the file path in this piece
file : file-like object
passed to ParquetFile
Returns
-------
table : pyarrow.Table
"""
if self.open_file_func is not None:
reader = self.open()
elif file is not None:
reader = ParquetFile(file)
else:
# try to read the local path
reader = ParquetFile(self.path)
options = dict(columns=columns,
use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.array([index], dtype='i4').repeat(len(table))
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = lib.DictionaryArray.from_arrays(indices, dictionary)
col = lib.Column.from_array(name, arr)
table = table.append_column(col)
return table
class PartitionSet(object):
"""A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions(object):
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level. Example:
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : string
The partition name
key : string or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{0} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def filter_accepts_partition(self, part_key, filter, level):
p_column, p_value_index = part_key
f_column, op, f_value = filter
if p_column != f_column:
return True
f_type = type(f_value)
if isinstance(f_value, set):
if not f_value:
raise ValueError("Cannot use empty set as filter value")
if op not in {'in', 'not in'}:
raise ValueError("Op '%s' not supported with set value",
op)
if len(set([type(item) for item in f_value])) != 1:
raise ValueError("All elements of set '%s' must be of"
" same type", f_value)
f_type = type(next(iter(f_value)))
p_value = f_type((self.levels[level]
.dictionary[p_value_index]
.as_py()))
if op == "=" or op == "==":
return p_value == f_value
elif op == "!=":
return p_value != f_value
elif op == '<':
return p_value < f_value
elif op == '>':
return p_value > f_value
elif op == '<=':
return p_value <= f_value
elif op == '>=':
return p_value >= f_value
elif op == 'in':
return p_value in f_value
elif op == 'not in':
return p_value not in f_value
else:
raise ValueError("'%s' is not a valid operator in predicates.",
filter[1])
class ParquetManifest(object):
"""
"""
def __init__(self, dirpath, open_file_func=None, filesystem=None,
pathsep='/', partition_scheme='hive', metadata_nthreads=1):
filesystem, dirpath = _get_filesystem_and_path(filesystem, dirpath)
self.filesystem = filesystem
self.open_file_func = open_file_func
self.pathsep = pathsep
self.dirpath = _stringify_path(dirpath)
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self._metadata_nthreads = metadata_nthreads
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=metadata_nthreads)
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
# Due to concurrency, pieces will potentially by out of order if the
# dataset is partitioned so we sort them to yield stable results
self.pieces.sort(key=lambda piece: piece.path)
if self.common_metadata_path is None:
# _common_metadata is a subset of _metadata
self.common_metadata_path = self.metadata_path
self._thread_pool.shutdown()
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif self._should_silently_exclude(path):
continue
else:
filtered_files.append(full_path)
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(filtered_files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {0}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or # Checksums
file_name.endswith('_$folder$') or # HDFS directories in S3
file_name.startswith('.') or # Hidden files
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
futures_list = []
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
# If you have less threads than levels, the wait call will block
# indefinitely due to multiple waits within a thread.
if level < self._metadata_nthreads:
future = self._thread_pool.submit(self._visit_level,
level + 1,
path,
dir_part_keys)
futures_list.append(future)
else:
self._visit_level(level + 1, path, dir_part_keys)
if futures_list:
futures.wait(futures_list)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {0}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece(path, partition_keys=part_keys,
open_file_func=self.open_file_func)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {0}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return tail.startswith('_') and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
class ParquetDataset(object):
"""
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter
split_row_groups : boolean, default False
Divide files into pieces for each row group in the file
validate_schema : boolean, default True
Check that individual file schemas are all the same / compatible
filters : List[Tuple] or List[List[Tuple]] or None (default)
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some files of the dataset.
Predicates are expressed in disjunctive normal form (DNF). This means
that the innermost tuple describe a single column predicate. These
inner predicate make are all combined with a conjunction (AND) into a
larger predicate. The most outer list then combines all filters
with a disjunction (OR). By this, we should be able to express all
kinds of filters that are possible using boolean logic.
This function also supports passing in as List[Tuple]. These predicates
are evaluated as a conjunction. To express OR in predictates, one must
use the (preferred) List[List[Tuple]] notation.
metadata_nthreads: int, default 1
How many threads to allow the thread pool which is used to read the
dataset metadata. Increasing this is helpful to read partitioned
datasets.
memory_map : boolean, default True
If the source is a file path, use a memory map to read each file in the
dataset if possible, which can improve performance in some environments
"""
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, memory_map=True):
a_path = path_or_paths
if isinstance(a_path, list):
a_path = a_path[0]
self.fs, _ = _get_filesystem_and_path(filesystem, a_path)
if isinstance(path_or_paths, list):
self.paths = [_parse_uri(path) for path in path_or_paths]
else:
self.paths = _parse_uri(path_or_paths)
self.memory_map = memory_map
self._open_file_func = self._get_open_file_func()
(self.pieces,
self.partitions,
self.common_metadata_path,
self.metadata_path) = _make_manifest(
path_or_paths, self.fs, metadata_nthreads=metadata_nthreads,
open_file_func=self._open_file_func)
if self.common_metadata_path is not None:
with self.fs.open(self.common_metadata_path) as f:
self.common_metadata = (ParquetFile(f, memory_map=memory_map)
.metadata)
else:
self.common_metadata = None
if metadata is None and self.metadata_path is not None:
with self.fs.open(self.metadata_path) as f:
self.metadata = ParquetFile(f, memory_map=memory_map).metadata
else:
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if validate_schema:
self.validate_schemas()
if filters is not None:
filters = _check_filters(filters)
self._filter(filters)
def validate_schemas(self):
if self.metadata is None and self.schema is None:
if self.common_metadata is not None:
self.schema = self.common_metadata.schema
else:
self.schema = self.pieces[0].get_metadata().schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all compatible
dataset_schema = self.schema.to_arrow_schema()
# Exclude the partition columns from the schema, they are provided
# by the path, not the DatasetPiece
if self.partitions is not None:
for partition_name in self.partitions.partition_names:
if dataset_schema.get_field_index(partition_name) != -1:
field_idx = dataset_schema.get_field_index(partition_name)
dataset_schema = dataset_schema.remove(field_idx)
for piece in self.pieces:
file_metadata = piece.get_metadata()
file_schema = file_metadata.schema.to_arrow_schema()
if not dataset_schema.equals(file_schema, check_metadata=False):
raise ValueError('Schema in {0!s} was different. \n'
'{1!s}\n\nvs\n\n{2!s}'
.format(piece, file_schema,
dataset_schema))
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table
Parameters
----------
columns : List[str]
Names of columns to read from the file
use_threads : boolean, default True
Perform multi-threaded column reads
use_pandas_metadata : bool, default False
Passed through to each dataset piece
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
tables = []
for piece in self.pieces:
table = piece.read(columns=columns, use_threads=use_threads,
partitions=self.partitions,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _get_open_file_func(self):
if self.fs is None or isinstance(self.fs, LocalFileSystem):
def open_file(path, meta=None):
return ParquetFile(path, metadata=meta,
memory_map=self.memory_map,
common_metadata=self.common_metadata)
else:
def open_file(path, meta=None):
return ParquetFile(self.fs.open(path, mode='rb'),
memory_map=self.memory_map,
metadata=meta,
common_metadata=self.common_metadata)
return open_file
def _filter(self, filters):
accepts_filter = self.partitions.filter_accepts_partition
def one_filter_accepts(piece, filter):
return all(accepts_filter(part_key, filter, level)
for level, part_key in enumerate(piece.partition_keys))
def all_filters_accept(piece):
return any(all(one_filter_accepts(piece, f) for f in conjunction)
for conjunction in filters)
self.pieces = [p for p in self.pieces if all_filters_accept(p)]
def _make_manifest(path_or_paths, fs, pathsep='/', metadata_nthreads=1,
open_file_func=None):
partitions = None
common_metadata_path = None
metadata_path = None
if isinstance(path_or_paths, list) and len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if _is_path_like(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
open_file_func=open_file_func,
pathsep=fs.pathsep,
metadata_nthreads=metadata_nthreads)
common_metadata_path = manifest.common_metadata_path
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise IOError('Passed non-file path: {0}'
.format(path))
piece = ParquetDatasetPiece(path, open_file_func=open_file_func)
pieces.append(piece)
return pieces, partitions, common_metadata_path, metadata_path
_read_table_docstring = """
{0}
Parameters
----------
source: str, pyarrow.NativeFile, or file-like object
If a string passed, can be a single file name or directory name. For
file-like objects, only read a single file. Use pyarrow.BufferReader to
read a file contained in a bytes or buffer-like object
columns: list
If not None, only these columns will be read from the file. A column
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
'a.c', and 'a.d.e'
use_threads : boolean, default True
Perform multi-threaded column reads
metadata : FileMetaData
If separately computed
memory_map : boolean, default True
If the source is a file path, use a memory map to read file, which can
improve performance in some environments
{1}
Returns
-------
{2}
"""
def read_table(source, columns=None, use_threads=True, metadata=None,
use_pandas_metadata=False, memory_map=True,
filesystem=None):
if _is_path_like(source):
fs, path = _get_filesystem_and_path(filesystem, source)
return fs.read_parquet(path, columns=columns,
use_threads=use_threads, metadata=metadata,
use_pandas_metadata=use_pandas_metadata)
pf = ParquetFile(source, metadata=metadata)
return pf.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
read_table.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format',
"""use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded""",
"""pyarrow.Table
Content of the file as a table (of columns)""")
def read_pandas(source, columns=None, use_threads=True, memory_map=True,
metadata=None):
return read_table(source, columns=columns,
use_threads=use_threads,
metadata=metadata, memory_map=True,
use_pandas_metadata=True)
read_pandas.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format, also reading DataFrame\n'
'index values if known in the file metadata',
'',
"""pyarrow.Table
Content of the file as a Table of Columns, including DataFrame
indexes as columns""")
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
use_deprecated_int96_timestamps=None,
coerce_timestamps=None,
allow_truncated_timestamps=False,
flavor=None, filesystem=None, **kwargs):
row_group_size = kwargs.pop('chunk_size', row_group_size)
use_int96 = use_deprecated_int96_timestamps
try:
with ParquetWriter(
where, table.schema,
filesystem=filesystem,
version=version,
flavor=flavor,
use_dictionary=use_dictionary,
coerce_timestamps=coerce_timestamps,
allow_truncated_timestamps=allow_truncated_timestamps,
compression=compression,
use_deprecated_int96_timestamps=use_int96,
**kwargs) as writer:
writer.write_table(table, row_group_size=row_group_size)
except Exception:
if _is_path_like(where):
try:
os.remove(_stringify_path(where))
except os.error:
pass
raise
write_table.__doc__ = """
Write a Table to Parquet format
Parameters
----------
table : pyarrow.Table
where: string or pyarrow.NativeFile
{0}
""".format(_parquet_writer_arg_docs)
def _mkdir_if_not_exists(fs, path):
if fs._isfilestore() and not fs.exists(path):
try:
fs.mkdir(path)
except OSError:
assert fs.exists(path)
def write_to_dataset(table, root_path, partition_cols=None,
filesystem=None, preserve_index=True, **kwargs):
"""
Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : string,
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
preserve_index : bool,
Parameter for instantiating Table; preserve pandas index or not.
**kwargs : dict, kwargs for write_table function.
"""
fs, root_path = _get_filesystem_and_path(filesystem, root_path)
_mkdir_if_not_exists(fs, root_path)
if partition_cols is not None and len(partition_cols) > 0:
df = table.to_pandas()
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-4538: Remove index column from subschema in write_to_dataframe
metadata = subschema.metadata
has_pandas_metadata = (metadata is not None and b'pandas' in metadata)
index_columns = []
if has_pandas_metadata:
pandas_metadata = json.loads(metadata[b'pandas'].decode('utf8'))
index_columns = pandas_metadata['index_columns']
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for col in table.schema.names:
if (col.startswith('__index_level_') or col in partition_cols or
col in index_columns):
subschema = subschema.remove(subschema.get_field_index(col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup,
preserve_index=preserve_index,
schema=subschema,
safe=False)
prefix = '/'.join([root_path, subdir])
_mkdir_if_not_exists(fs, prefix)
outfile = guid() + '.parquet'
full_path = '/'.join([prefix, outfile])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, **kwargs)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, **kwargs)
def write_metadata(schema, where, version='1.0',
use_deprecated_int96_timestamps=False,
coerce_timestamps=None):
"""
Write metadata-only Parquet file from schema
Parameters
----------
schema : pyarrow.Schema
where: string or pyarrow.NativeFile
version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
use_deprecated_int96_timestamps : boolean, default False
Write nanosecond resolution timestamps to INT96 Parquet format
coerce_timestamps : string, default None
Cast timestamps a particular resolution.
Valid values: {None, 'ms', 'us'}
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
"""
writer = ParquetWriter(
where, schema, version=version,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
coerce_timestamps=coerce_timestamps)
writer.close()
def read_metadata(where, memory_map=False):
"""
Read FileMetadata from footer of a single Parquet file
Parameters
----------
where : string (filepath) or file-like object
memory_map : boolean, default False
Create memory map when the source is a file path
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where, memory_map=memory_map).metadata
def read_schema(where, memory_map=False):
"""
Read effective Arrow schema from Parquet file metadata
Parameters
----------
where : string (filepath) or file-like object
memory_map : boolean, default False
Create memory map when the source is a file path
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where, memory_map=memory_map).schema.to_arrow_schema()
|
|
import mock
import nengo
import numpy as np
import pytest
from nengo_spinnaker.builder import builder, ensemble
from nengo_spinnaker.builder.ports import InputPort, OutputPort
from nengo_spinnaker import operators
class TestBuildEnsembleLIF(object):
@pytest.mark.parametrize("n_neurons, size_in", [(100, 1), (300, 4)])
def test_build_ensemble_lif(self, n_neurons, size_in):
"""Test building LIF ensembles."""
# Create a Nengo ensemble to build
ens = nengo.Ensemble(n_neurons, size_in, add_to_container=False)
# Create a model
model = builder.Model()
model.seeds[ens] = 1
# Build the ensemble
ensemble.build_ensemble(model, ens)
# Check that the built ensemble was inserted into the params and that
# the parameters are (loosely) as expected.
assert model.params[ens].eval_points is not None
assert (model.params[ens].encoders.shape ==
model.params[ens].scaled_encoders.shape ==
(n_neurons, size_in))
assert (model.params[ens].intercepts.shape ==
model.params[ens].max_rates.shape ==
model.params[ens].gain.shape ==
model.params[ens].bias.shape == (n_neurons, ))
# Check that a new object was inserted into the objects dictionary
assert isinstance(model.object_operators[ens],
operators.EnsembleLIF)
def test_with_encoders_and_gain_bias(self):
"""Test that the encoders we provide are used (albeit scaled)"""
# Create a Nengo ensemble to build
ens = nengo.Ensemble(1, 1, add_to_container=False)
ens.radius = 10.0
ens.encoders = np.array([[1.0]])
ens.gain = np.array([0.5])
ens.bias = np.array([0.0])
# Create a model
model = builder.Model()
model.seeds[ens] = 1
# Build the ensemble
ensemble.build_ensemble(model, ens)
# Check that parameters are (loosely) as expected.
assert model.params[ens].encoders == ens.encoders
assert model.params[ens].gain == ens.gain
assert model.params[ens].bias == ens.bias
assert model.params[ens].scaled_encoders == ens.encoders * (0.5 / 10)
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_only_gain(self):
"""Build an ensemble with only gain specified."""
# Create a Nengo ensemble to build
ens = nengo.Ensemble(1, 1, add_to_container=False)
ens.gain = np.array([0.5])
# Create a model
model = builder.Model()
model.seeds[ens] = 1
# Build the ensemble
ensemble.build_ensemble(model, ens)
# Check that parameters are (loosely) as expected.
assert model.params[ens].gain == ens.gain # pragma : no cover
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_only_bias(self):
"""Build an ensemble with only bias specified."""
# Create a Nengo ensemble to build
ens = nengo.Ensemble(1, 1, add_to_container=False)
ens.bias = np.array([-0.5])
# Create a model
model = builder.Model()
model.seeds[ens] = 1
# Build the ensemble
ensemble.build_ensemble(model, ens)
# Check that parameters are (loosely) as expected.
assert model.params[ens].bias == ens.bias # pragma : no cover
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_neurons_source():
"""Test that neurons sources are sane."""
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 4)
a_b = nengo.Connection(a.neurons, b.neurons, transform=np.eye(100))
# Create a model with the Ensemble for a in it
model = builder.Model()
a_ens = operators.EnsembleLIF(a)
model.object_operators[a] = a_ens
# Get the source, check that an appropriate target is return
source = ensemble.get_neurons_source(model, a_b)
assert source.target.obj is a_ens
assert source.target.port is ensemble.EnsembleOutputPort.neurons
class TestEnsembleSource(object):
def test_normal_source(self):
# Create a network and standard model
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(200, 4)
a_b = nengo.Connection(a, b[1:3])
# Create a model with the Ensemble for b in it
model = builder.Model()
a_ens = operators.EnsembleLIF(a)
model.object_operators[a] = a_ens
source = ensemble.get_ensemble_source(model, a_b)
assert source.target.obj is a_ens
assert source.target.port is OutputPort.standard
def test_decoder_learnt_source(self):
# Create a network and standard model
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 2)
e = nengo.Ensemble(100, 2)
a_b = nengo.Connection(a, b)
a_b.learning_rule_type = nengo.PES()
e_l = nengo.Connection(e, a_b.learning_rule)
# Create a model with the Ensemble for b in it
model = builder.Model()
a_ens = operators.EnsembleLIF(a)
model.object_operators[a] = a_ens
source = ensemble.get_ensemble_source(model, a_b)
assert source.target.obj is a_ens
assert source.target.port is ensemble.EnsembleOutputPort.learnt
class TestEnsembleSink(object):
def test_normal_sink(self):
"""Test that sinks for most connections into Ensembles do nothing
special.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(200, 4)
a_b = nengo.Connection(a, b[1:3])
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Get the sink, check that an appropriate target is return
sink = ensemble.get_ensemble_sink(model, a_b)
assert sink.target.obj is b_ens
assert sink.target.port is InputPort.standard
def test_encoder_learnt_sink(self):
# Create a network and standard model
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 2)
a_b = nengo.Connection(a, b)
a_b.learning_rule_type = nengo.Voja()
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
sink = ensemble.get_ensemble_sink(model, a_b)
assert sink.target.obj is b_ens
assert sink.target.port is ensemble.EnsembleInputPort.learnt
def test_decoder_learning_rule_sink(self):
"""Test that sinks for most connections into Ensembles do nothing
special.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 2)
e = nengo.Ensemble(100, 2)
a_b = nengo.Connection(a, b)
a_b.learning_rule_type = nengo.PES()
e_l = nengo.Connection(e, a_b.learning_rule)
# Create a model with the Ensemble for b in it
model = builder.Model()
a_ens = operators.EnsembleLIF(a)
model.object_operators[a] = a_ens
# Get the sink, check that an appropriate target is return
sink = ensemble.get_learning_rule_sink(model, e_l)
assert sink.target.obj is a_ens
assert sink.target.port is a_b.learning_rule
def test_encoder_learning_rule_sink(self):
"""Test that sinks for most connections into Ensembles do nothing
special.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 2)
e = nengo.Ensemble(100, 1)
a_b = nengo.Connection(a, b)
a_b.learning_rule_type = nengo.Voja()
e_l = nengo.Connection(e, a_b.learning_rule)
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Get the sink, check that an appropriate target is return
sink = ensemble.get_learning_rule_sink(model, e_l)
assert sink.target.obj is b_ens
assert sink.target.port is a_b.learning_rule
def test_normal_sink_for_passthrough_node(self):
"""Test that sinks for most connections into Ensembles do nothing
special.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Node(None, size_in=4)
b = nengo.Ensemble(200, 4)
a_b = nengo.Connection(a, b)
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Get the sink, check that an appropriate target is return
sink = ensemble.get_ensemble_sink(model, a_b)
assert sink.target.obj is b_ens
assert sink.target.port is InputPort.standard
def test_normal_sink_for_process_node(self):
"""Test that sinks for most connections into Ensembles do nothing
special.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Node(nengo.processes.WhiteNoise(), size_out=4)
b = nengo.Ensemble(200, 4)
a_b = nengo.Connection(a, b)
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Get the sink, check that an appropriate target is return
sink = ensemble.get_ensemble_sink(model, a_b)
assert sink.target.obj is b_ens
assert sink.target.port is InputPort.standard
def test_constant_node_sink_with_slice(self):
"""Test that connections from constant valued Nodes to Ensembles are
optimised out correctly.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Node([0.5, 1.0])
b = nengo.Ensemble(200, 2)
a_b = nengo.Connection(a[0], b[1])
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Check that no sink is created but that the direct input is modified
assert np.all(b_ens.direct_input == np.zeros(2))
assert ensemble.get_ensemble_sink(model, a_b) is None
assert np.all(b_ens.direct_input == [0.0, 0.5])
def test_constant_node_sink_with_function(self):
"""Test that connections from constant valued Nodes to Ensembles are
optimised out correctly.
"""
# Create a network and standard model
with nengo.Network():
a = nengo.Node([0.5, 1.0])
b = nengo.Ensemble(200, 2)
a_b = nengo.Connection(a, b, function=lambda x: x**2,
transform=[[0.0, -1.0], [-1.0, 0.0]])
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Check that no sink is created but that the direct input is modified
assert np.all(b_ens.direct_input == np.zeros(2))
assert ensemble.get_ensemble_sink(model, a_b) is None
assert np.all(b_ens.direct_input == [-1.0, -0.25])
class TestNeuronSinks(object):
@pytest.mark.parametrize("source", ("neurons", "value"))
def test_arbitrary_neuron_sink(self, source):
"""Test that standard connections to neurons return an appropriate
sink.
We have no plan to support arbitrary connections to neurons, but we
allow them at this stage because they may later become global
inhibition connections when we optimise out passthrough Nodes.
"""
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 4)
if source == "neurons":
a_b = nengo.Connection(a.neurons, b.neurons,
transform=np.eye(100))
else:
a_b = nengo.Connection(a, b.neurons,
transform=[[1.0, 0.5]]*99 +
[[0.5, 1.0]])
# Create a model with the Ensemble for b in it
model = builder.Model()
b_ens = operators.EnsembleLIF(b)
model.object_operators[b] = b_ens
# Get the sink, check that an appropriate target is return
sink = ensemble.get_neurons_sink(model, a_b)
assert sink.target.obj is b_ens
assert sink.target.port is ensemble.EnsembleInputPort.neurons
class TestBuildFromEnsembleConnection(object):
"""Test the construction of parameters that describe connections from
Ensembles.
"""
def test_standard_build(self):
"""Test relatively standard build."""
# Create the network
with nengo.Network():
a = nengo.Ensemble(200, 3)
b = nengo.Node(lambda t, x: None, size_in=2)
a_b = nengo.Connection(a[:2], b, transform=np.array([[0.5, 0],
[0.0, 0.0]]))
# Create the model and built the pre-synaptic Ensemble
model = builder.Model()
model.rng = np.random
model.seeds[a] = 1
model.seeds[a_b] = 2
ensemble.build_ensemble(model, a)
# Now build the connection and check that the params seem sensible
tparams = ensemble.build_from_ensemble_connection(model, a_b)
assert tparams.full_decoders.shape == (2, 200)
assert np.all(tparams.full_decoders[1, :] == 0.0)
# Check that the params stored in the model are correct
params = model.params[a_b]
assert params.decoders.shape == (200, 2)
assert np.all(params.transform == a_b.transform)
assert np.all(params.eval_points == model.params[a].eval_points)
assert params.solver_info is not None
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_weights_built(self):
"""Test a build using a weights-based solver."""
# Create the network
with nengo.Network():
a = nengo.Ensemble(200, 2)
b = nengo.Ensemble(400, 2)
a_b = nengo.Connection(
a, b, solver=nengo.solvers.Lstsq(weights=True)
)
# Create the model and built the pre-synaptic Ensemble
model = builder.Model()
model.rng = np.random
model.seeds[a] = 1
model.seeds[b] = 2
model.seeds[a_b] = 3
ensemble.build_ensemble(model, a)
ensemble.build_ensemble(model, b)
# Now build the connection and check that the params seem sensible
ensemble.build_from_ensemble_connection(model, a_b)
# Check that the params stored in the model are correct
params = model.params[a_b]
assert params.decoders.shape == (200, 400)
class TestBuildFromNeuronsConnection(object):
"""Test the construction of parameters that describe connections from
Neurons.
"""
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_standard_build(self):
# Create the network
with nengo.Network():
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 3)
a_b = nengo.Connection(a.neurons, b.neurons)
# Get the connection parameters
params = ensemble.build_from_neurons_connection(None, a_b)
assert params.decoders is None
assert np.all(params.transform == np.eye(100))
assert params.eval_points is None
assert params.solver_info is None
class TestProbeEnsemble(object):
"""Test probing ensembles."""
@pytest.mark.parametrize("with_slice", [False, True])
def test_probe_output_with_sampling(self, with_slice):
"""Test that probing the output of an Ensemble generates a new
connection and a new object.
"""
with nengo.Network() as net:
a = nengo.Ensemble(100, 3)
if not with_slice:
p = nengo.Probe(a, sample_every=0.0023)
else:
p = nengo.Probe(a[0:1], sample_every=0.0023)
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Check that a new connection was added and built
assert len(list(model.connection_map.get_signals())) == 1
# Check that a new object was added to the model
vs = model.object_operators[p]
assert isinstance(vs, operators.ValueSink)
assert vs.probe is p
def test_probe_output_no_sampling(self):
"""Test that probing the output of an Ensemble generates a new
connection and a new object.
"""
with nengo.Network() as net:
a = nengo.Ensemble(100, 3)
p = nengo.Probe(a)
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Check that a new object was added to the model
vs = model.object_operators[p]
assert vs.sample_every == 1
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_probe_input(self):
"""Test probing the input of an Ensemble."""
with nengo.Network():
a = nengo.Ensemble(100, 3)
p = nengo.Probe(a, "input")
# Create an empty model to build the probe into
model = builder.Model()
model.rng = np.random
model.seeds[p] = 1
# Build the probe
ensemble.build_ensemble_probe(model, p)
class TestProbeNeurons(object):
"""Test probing neurons."""
def test_probe_spikes(self):
"""Check that probing spikes modifies the local_probes list on the
operator, but does nothing else.
"""
with nengo.Network() as net:
a = nengo.Ensemble(300, 1)
p = nengo.Probe(a.neurons, "spikes")
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Assert that we added the probe to the list of local probes and
# nothing else
assert model.object_operators[a].local_probes == [p]
assert len(model.object_operators) == 1
assert list(model.connection_map.get_signals()) == []
def test_probe_spike_slice(self):
with nengo.Network() as net:
a = nengo.Ensemble(300, 1)
p = nengo.Probe(a.neurons[:100], "spikes")
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Assert that we added the probe to the list of local probes and
# nothing else
assert model.object_operators[a].local_probes == [p]
assert len(model.object_operators) == 1
assert list(model.connection_map.get_signals()) == []
def test_probe_voltage(self):
"""Check that probing voltage modifies the local_probes list on the
operator, but does nothing else.
"""
with nengo.Network() as net:
a = nengo.Ensemble(300, 1)
p = nengo.Probe(a.neurons, "voltage")
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Assert that we added the probe to the list of local probes and
# nothing else
assert model.object_operators[a].local_probes == [p]
assert len(model.object_operators) == 1
assert list(model.connection_map.get_signals()) == []
@pytest.mark.xfail(reason="Unimplemented functionality")
def test_refractory_time(self):
"""Check that probing refractory time modifies the local_probes list on
the operator, but does nothing else.
"""
with nengo.Network() as net:
a = nengo.Ensemble(300, 1)
p = nengo.Probe(a.neurons, "refractory_time")
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Assert that we added the probe to the list of local probes and
# nothing elseobject_operators
assert model.object_operators[a].local_probes == [p]
assert len(model.object_operators) == 1
assert len(model.connections_signals) == 0
class TestProbeLearningRules(object):
"""Test probing of Voja learnt encoders."""
def test_probe_voja_scaled_encoders(self):
# Create a network and standard model
with nengo.Network() as net:
a = nengo.Ensemble(100, 2)
b = nengo.Ensemble(100, 2)
a_b = nengo.Connection(a, b)
a_b.learning_rule_type = nengo.Voja()
p = nengo.Probe(a_b.learning_rule, "scaled_encoders")
# Create an empty model to build the probe into
model = builder.Model()
model.build(net)
# Assert that we added the probe to the list of local probes and
# nothing else
assert model.object_operators[b].local_probes == [p]
assert len(model.object_operators) == 2
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import BaseHTTPServer
import itertools
import json
import logging
import mimetypes
import os
import pkgutil
import re
import urllib
import urlparse
from collections import namedtuple
from datetime import date, datetime
from textwrap import dedent
import pystache
from six.moves import range
from pants.base.build_environment import get_buildroot
from pants.base.mustache import MustacheRenderer
from pants.base.run_info import RunInfo
from pants.pantsd.process_manager import ProcessManager
from pants.stats.statsdb import StatsDBFactory
logger = logging.getLogger(__name__)
# Google Prettyprint plugin files.
PPP_RE = re.compile("""^lang-.*\.js$""")
class PantsHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler that demultiplexes various pants reporting URLs."""
def __init__(self, settings, renderer, request, client_address, server):
self._settings = settings # An instance of ReportingServer.Settings.
self._root = self._settings.root
self._renderer = renderer
self._client_address = client_address
# The underlying handlers for specific URL prefixes.
self._GET_handlers = [
('/runs/', self._handle_runs), # Show list of known pants runs.
('/run/', self._handle_run), # Show a report for a single pants run.
('/stats/', self._handle_stats), # Show a stats analytics page.
('/statsdata/', self._handle_statsdata), # Get JSON stats data.
('/browse/', self._handle_browse), # Browse filesystem under build root.
('/content/', self._handle_content), # Show content of file.
('/assets/', self._handle_assets), # Statically serve assets (css, js etc.)
('/poll', self._handle_poll), # Handle poll requests for raw file content.
('/latestrunid', self._handle_latest_runid), # Return id of latest pants run.
('/favicon.ico', self._handle_favicon) # Return favicon.
]
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
"""GET method implementation for BaseHTTPRequestHandler."""
if not self._client_allowed():
return
try:
(_, _, path, query, _) = urlparse.urlsplit(self.path)
params = urlparse.parse_qs(query)
# Give each handler a chance to respond.
for prefix, handler in self._GET_handlers:
if self._maybe_handle(prefix, handler, path, params):
return
# If no path specified, default to showing the list of all runs.
if path == '/':
self._handle_runs('', {})
return
self._send_content('Invalid GET request {}'.format(self.path), 'text/html', code=400)
except (IOError, ValueError):
pass # Printing these errors gets annoying, and there's nothing to do about them anyway.
#sys.stderr.write('Invalid GET request {}'.format(self.path))
def _handle_runs(self, relpath, params):
"""Show a listing of all pants runs since the last clean-all."""
runs_by_day = self._partition_runs_by_day()
args = self._default_template_args('run_list')
args['runs_by_day'] = runs_by_day
self._send_content(self._renderer.render_name('base', args), 'text/html')
_collapsible_fmt_string = dedent("""
<div class="{class_prefix}" id="{id}">
<div class="{class_prefix}-header toggle-header" id="{id}-header">
<div class="{class_prefix}-header-icon toggle-header-icon" onclick="pants.collapsible.toggle('{id}')">
<i id="{id}-icon" class="visibility-icon icon-large icon-caret-right hidden"></i>
</div>
<div class="{class_prefix}-header-text toggle-header-text">
[<span id="{id}-header-text">{title}</span>]
</div>
</div>
<div class="{class_prefix}-content toggle-content nodisplay" id="{id}-content"></div>
</div>
""")
def _handle_run(self, relpath, params):
"""Show the report for a single pants run."""
args = self._default_template_args('run')
run_id = relpath
run_info = self._get_run_info_dict(run_id)
if run_info is None:
args['no_such_run'] = relpath
if run_id == 'latest':
args['is_latest'] = 'none'
else:
report_abspath = run_info['default_report']
report_relpath = os.path.relpath(report_abspath, self._root)
report_dir = os.path.dirname(report_relpath)
self_timings_path = os.path.join(report_dir, 'self_timings')
cumulative_timings_path = os.path.join(report_dir, 'cumulative_timings')
artifact_cache_stats_path = os.path.join(report_dir, 'artifact_cache_stats')
run_info['timestamp_text'] = \
datetime.fromtimestamp(float(run_info['timestamp'])).strftime('%H:%M:%S on %A, %B %d %Y')
timings_and_stats = '\n'.join([
self._collapsible_fmt_string.format(id='cumulative-timings-collapsible',
title='Cumulative timings', class_prefix='aggregated-timings'),
self._collapsible_fmt_string.format(id='self-timings-collapsible',
title='Self timings', class_prefix='aggregated-timings'),
self._collapsible_fmt_string.format(id='artifact-cache-stats-collapsible',
title='Artifact cache stats', class_prefix='artifact-cache-stats')
])
args.update({'run_info': run_info,
'report_path': report_relpath,
'self_timings_path': self_timings_path,
'cumulative_timings_path': cumulative_timings_path,
'artifact_cache_stats_path': artifact_cache_stats_path,
'timings_and_stats': timings_and_stats})
if run_id == 'latest':
args['is_latest'] = run_info['id']
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _handle_stats(self, relpath, params):
"""Show stats for pants runs in the statsdb."""
args = self._default_template_args('stats')
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _handle_statsdata(self, relpath, params):
"""Show stats for pants runs in the statsdb."""
statsdb = StatsDBFactory.global_instance().get_db()
statsdata = list(statsdb.get_aggregated_stats_for_cmd_line('cumulative_timings', '%'))
self._send_content(json.dumps(statsdata), 'application/json')
def _handle_browse(self, relpath, params):
"""Handle requests to browse the filesystem under the build root."""
abspath = os.path.normpath(os.path.join(self._root, relpath))
if not abspath.startswith(self._root):
raise ValueError # Prevent using .. to get files from anywhere other than root.
if os.path.isdir(abspath):
self._serve_dir(abspath, params)
elif os.path.isfile(abspath):
self._serve_file(abspath, params)
def _handle_content(self, relpath, params):
"""Render file content for pretty display."""
abspath = os.path.normpath(os.path.join(self._root, relpath))
if os.path.isfile(abspath):
with open(abspath, 'r') as infile:
content = infile.read()
else:
content = 'No file found at {}'.format(abspath)
content_type = mimetypes.guess_type(abspath)[0] or 'text/plain'
if not content_type.startswith('text/') and not content_type == 'application/xml':
# Binary file. Display it as hex, split into lines.
n = 120 # Display lines of this max size.
content = repr(content)[1:-1] # Will escape non-printables etc, dropping surrounding quotes.
content = '\n'.join([content[i:i + n] for i in range(0, len(content), n)])
prettify = False
prettify_extra_langs = []
else:
prettify = True
if self._settings.assets_dir:
prettify_extra_dir = os.path.join(self._settings.assets_dir, 'js', 'prettify_extra_langs')
prettify_extra_langs = [{'name': x} for x in os.listdir(prettify_extra_dir)]
else:
# TODO: Find these from our package, somehow.
prettify_extra_langs = []
linenums = True
args = {'prettify_extra_langs': prettify_extra_langs, 'content': content,
'prettify': prettify, 'linenums': linenums}
self._send_content(self._renderer.render_name('file_content', args), 'text/html')
def _handle_assets(self, relpath, params):
"""Statically serve assets: js, css etc."""
if self._settings.assets_dir:
abspath = os.path.normpath(os.path.join(self._settings.assets_dir, relpath))
with open(abspath, 'r') as infile:
content = infile.read()
else:
content = pkgutil.get_data(__name__, os.path.join('assets', relpath))
content_type = mimetypes.guess_type(relpath)[0] or 'text/plain'
self._send_content(content, content_type)
def _handle_poll(self, relpath, params):
"""Handle poll requests for raw file contents."""
request = json.loads(params.get('q')[0])
ret = {}
# request is a polling request for multiple files. For each file:
# - id is some identifier assigned by the client, used to differentiate the results.
# - path is the file to poll.
# - pos is the last byte position in that file seen by the client.
for poll in request:
_id = poll.get('id', None)
path = poll.get('path', None)
pos = poll.get('pos', 0)
if path:
abspath = os.path.normpath(os.path.join(self._root, path))
if os.path.isfile(abspath):
with open(abspath, 'r') as infile:
if pos:
infile.seek(pos)
content = infile.read()
ret[_id] = content
self._send_content(json.dumps(ret), 'application/json')
def _handle_latest_runid(self, relpath, params):
"""Handle request for the latest run id.
Used by client-side javascript to detect when there's a new run to display.
"""
latest_runinfo = self._get_run_info_dict('latest')
if latest_runinfo is None:
self._send_content('none', 'text/plain')
else:
self._send_content(latest_runinfo['id'], 'text/plain')
def _handle_favicon(self, relpath, params):
"""Statically serve the favicon out of the assets dir."""
self._handle_assets('favicon.ico', params)
def _partition_runs_by_day(self):
"""Split the runs by day, so we can display them grouped that way."""
run_infos = self._get_all_run_infos()
for x in run_infos:
ts = float(x['timestamp'])
x['time_of_day_text'] = datetime.fromtimestamp(ts).strftime('%H:%M:%S')
def date_text(dt):
delta_days = (date.today() - dt).days
if delta_days == 0:
return 'Today'
elif delta_days == 1:
return 'Yesterday'
elif delta_days < 7:
return dt.strftime('%A') # Weekday name.
else:
d = dt.day % 10
suffix = 'st' if d == 1 else 'nd' if d == 2 else 'rd' if d == 3 else 'th'
return dt.strftime('%B %d') + suffix # E.g., October 30th.
keyfunc = lambda x: datetime.fromtimestamp(float(x['timestamp']))
sorted_run_infos = sorted(run_infos, key=keyfunc, reverse=True)
return [{'date_text': date_text(dt), 'run_infos': [x for x in infos]}
for dt, infos in itertools.groupby(sorted_run_infos, lambda x: keyfunc(x).date())]
def _get_run_info_dict(self, run_id):
"""Get the RunInfo for a run, as a dict."""
run_info_path = os.path.join(self._settings.info_dir, run_id, 'info')
if os.path.exists(run_info_path):
# We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.
return RunInfo(run_info_path).get_as_dict()
else:
return None
def _get_all_run_infos(self):
"""Find the RunInfos for all runs since the last clean-all."""
info_dir = self._settings.info_dir
if not os.path.isdir(info_dir):
return []
paths = [os.path.join(info_dir, x) for x in os.listdir(info_dir)]
# We copy the RunInfo as a dict, so we can add stuff to it to pass to the template.
# We filter only those that have a timestamp, to avoid a race condition with writing
# that field.
return filter(lambda d: 'timestamp' in d, [RunInfo(os.path.join(p, 'info')).get_as_dict()
for p in paths if os.path.isdir(p) and not os.path.islink(p)])
def _serve_dir(self, abspath, params):
"""Show a directory listing."""
relpath = os.path.relpath(abspath, self._root)
breadcrumbs = self._create_breadcrumbs(relpath)
entries = [{'link_path': os.path.join(relpath, e), 'name': e} for e in os.listdir(abspath)]
args = self._default_template_args('dir')
args.update({'root_parent': os.path.dirname(self._root),
'breadcrumbs': breadcrumbs,
'entries': entries,
'params': params})
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _serve_file(self, abspath, params):
"""Show a file.
The actual content of the file is rendered by _handle_content.
"""
relpath = os.path.relpath(abspath, self._root)
breadcrumbs = self._create_breadcrumbs(relpath)
link_path = urlparse.urlunparse([None, None, relpath, None, urllib.urlencode(params), None])
args = self._default_template_args('file')
args.update({'root_parent': os.path.dirname(self._root),
'breadcrumbs': breadcrumbs,
'link_path': link_path})
self._send_content(self._renderer.render_name('base', args), 'text/html')
def _send_content(self, content, content_type, code=200):
"""Send content to client."""
self.send_response(code)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', str(len(content)))
self.end_headers()
self.wfile.write(content)
def _client_allowed(self):
"""Check if client is allowed to connect to this server."""
client_ip = self._client_address[0]
if not client_ip in self._settings.allowed_clients and \
not 'ALL' in self._settings.allowed_clients:
self._send_content('Access from host {} forbidden.'.format(client_ip), 'text/html')
return False
return True
def _maybe_handle(self, prefix, handler, path, params, data=None):
"""Apply the handler if the prefix matches."""
if path.startswith(prefix):
relpath = path[len(prefix):]
if data:
handler(relpath, params, data)
else:
handler(relpath, params)
return True
else:
return False
def _create_breadcrumbs(self, relpath):
"""Create filesystem browsing breadcrumb navigation.
That is, make each path segment into a clickable element that takes you to that dir.
"""
if relpath == '.':
breadcrumbs = []
else:
path_parts = [os.path.basename(self._root)] + relpath.split(os.path.sep)
path_links = ['/'.join(path_parts[1:i + 1]) for i, name in enumerate(path_parts)]
breadcrumbs = [{'link_path': link_path, 'name': name}
for link_path, name in zip(path_links, path_parts)]
return breadcrumbs
def _default_template_args(self, content_template):
"""Initialize template args."""
def include(text, args):
template_name = pystache.render(text, args)
return self._renderer.render_name(template_name, args)
# Our base template calls include on the content_template.
ret = {'content_template': content_template}
ret['include'] = lambda text: include(text, ret)
return ret
def log_message(self, fmt, *args):
"""Silence BaseHTTPRequestHandler's logging."""
class ReportingServer(object):
"""Reporting Server HTTP server."""
class Settings(namedtuple('Settings', ['info_dir', 'template_dir', 'assets_dir', 'root',
'allowed_clients'])):
"""Reporting server settings.
info_dir: path to dir containing RunInfo files.
template_dir: location of mustache template files. If None, the templates
embedded in our package are used.
assets_dir: location of assets (js, css etc.) If None, the assets
embedded in our package are used.
root: build root.
allowed_clients: list of ips or ['ALL'].
"""
def __init__(self, port, settings):
renderer = MustacheRenderer(settings.template_dir, __name__)
class MyHandler(PantsHandler):
def __init__(self, request, client_address, server):
PantsHandler.__init__(self, settings, renderer, request, client_address, server)
self._httpd = BaseHTTPServer.HTTPServer(('', port), MyHandler)
self._httpd.timeout = 0.1 # Not the network timeout, but how often handle_request yields.
def server_port(self):
return self._httpd.server_port
def start(self):
self._httpd.serve_forever()
class ReportingServerManager(ProcessManager):
def __init__(self, context=None, options=None):
ProcessManager.__init__(self, name='reporting_server')
self.context = context
self.options = options
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemonize()."""
# The server finds run-specific info dirs by looking at the subdirectories of info_dir,
# which is conveniently and obviously the parent dir of the current run's info dir.
info_dir = os.path.dirname(self.context.run_tracker.run_info_dir)
settings = ReportingServer.Settings(info_dir=info_dir,
root=get_buildroot(),
template_dir=self.options.template_dir,
assets_dir=self.options.assets_dir,
allowed_clients=self.options.allowed_clients)
server = ReportingServer(self.options.port, settings)
self.write_socket(server.server_port())
# Block forever.
server.start()
|
|
import sys
import os
import numpy as np
import cv2
from scipy.signal import convolve2d
from scipy.ndimage.filters import gaussian_filter
import math
import part0
import part1
import part2
import part3
import run
def sobel_filter_x():
'''Return a 3x3 sobel filter in the x direction.
'''
return np.array([[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]])
def sobel_filter_y():
'''Return a 3x3 sobel filter in the y direction.
'''
return np.array([[-1,-2,-1],
[ 0, 0, 0],
[ 1, 2, 1]])
def transform_xy_theta(dx, dy):
'''Transform from xy gradients to edge direction.
Input:
dx, dy - the gradient images generated by applying sobel filters to an
image. They both have shape (rows, cols) and dtype float.
Output:
theta - a numpy array of shape (rows, cols) and dtype float.
Each location theta[i,j] should contain the inverse tangent of dy[i,j]/dx[i,j]
, in the range of [-pi/2, pi/2] radiants.
Hint: you may find the np.arctan function useful here.
'''
# To avoid dividing by zero, set dy to a small value in locations where it
# is zero.
dx[dx == 0] = 0.001
theta = None
# Insert your code here -------------------------------------------------------
#------------------------------------------------------------------------------
return theta
def transform_xy_mag(dx, dy):
'''Transform from xy gradients to edge direction.
Input:
dx, dy - the gradient images generated by applying sobel filters to an
image. They both have shape (rows, cols) and dtype float.
Output:
mag - a numpy array of shape (rows, cols) and dtype float.
Each location mag[i,j] should contain the magnitude of the gradient, which
is sqrt(dx[i,j]^2 + dy[i,j]^2)
Hint: you may find the np.sqrt and np.square funcitons useful here.
'''
mag = None
# Insert your code here -------------------------------------------------------
#------------------------------------------------------------------------------
return mag
def get_color(theta, mag):
'''Return the color for a given edge theta and magnitude.
Given the local edge orientation and magnitude, return the corresponding
color. The intensity of the color is given by the magnitude (stronger edges
are brighter)
'''
boundaries = np.array([0.375, 0.125, -0.125, -0.375]) * math.pi
# crop the magnitude to 0, 255 range.
if mag < 0:
mag = 0
if mag > 255:
mag = 255
# (vertical) | yellow
if theta > boundaries[0] or theta < boundaries[3] :
return (0, mag, mag)
# \ green
if theta >= boundaries[3] and theta < boundaries[2] :
return (0, mag, 0)
# -- blue
if theta >= boundaries[2] and theta < boundaries[1] :
return (mag, 0, 0)
# / red
if theta >= boundaries[1] and theta < boundaries[0] :
return (0, 0, mag)
def run_edges(image):
''' This function finds and colors all edges in the given image.
'''
# Convert image to gray
if len(image.shape) > 2:
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
grayimage = image
# blur so the gradient operation is less noisy.
# uses a gaussian filter with sigma = 2
grayimage = gaussian_filter(grayimage, 2).astype(float)
# Filter with x and y sobel filters
dx = convolve2d(grayimage, sobel_filter_x())
dy = convolve2d(grayimage, sobel_filter_y())
# Convert to orientation and magnitude images
theta = transform_xy_theta(dx, dy)
mag = transform_xy_mag(dx, dy)
outimg = np.zeros((image.shape[0], image.shape[1], 3), dtype = np.uint8)
# Fill with corresponding color.
for r in range(outimg.shape[0]):
for c in range(outimg.shape[1]):
outimg[r,c,:] = get_color(theta[r,c], mag[r,c])
return outimg
def test():
'''This script will perform a unit test on your function, and provide useful
output.
'''
dxs = []
dys = []
thetas = []
mags = []
y = np.array([[ 0, 1],
[-1, 0]], dtype = float)
dys.append(y)
x = np.array([[ 1, 0],
[ 0,-1]], dtype = float)
dxs.append(x)
theta = np.array([[ 0. , math.pi/2],
[-math.pi/2, 0. ]], dtype = float)
thetas.append(theta)
mag = np.array([[ 1, 1],
[ 1, 1]], dtype = float)
mags.append(mag)
y = np.array([[ 0, 0, 0],
[ 1, 1, 1],
[-1,-1,-1]], dtype = float)
dys.append(y)
x = np.array([[ 0, 1,-1],
[ 0, 1,-1],
[ 0, 1,-1]], dtype = float)
dxs.append(x)
theta = np.array([[ 0, 0, 0],
[ math.pi/2, math.pi/4, -math.pi/4],
[-math.pi/2, -math.pi/4, math.pi/4]], dtype = float)
thetas.append(theta)
mag= np.array([[ 0, 1, 1],
[ 1, 1.414, 1.414],
[ 1, 1.414, 1.414]], dtype = float)
mags.append(mag)
for dx, dy, theta, mag in zip(dxs, dys, thetas, mags):
if __name__ == "__main__":
print "dx:\n{}\n, dy:\n{}\n".format(dx, dy)
usr_theta = transform_xy_theta(dx, dy)
usr_mag = transform_xy_mag(dx, dy)
for usr_out, true_out, name in zip((usr_theta, usr_mag), (theta, mag), ('theta', 'mag')):
if not type(usr_out) == type(true_out):
if __name__ == "__main__":
print "Error- {} has type {}. Expected type is {}.".format(
name, type(usr_out), type(true_out))
return False
if not usr_out.shape == true_out.shape:
if __name__ == "__main__":
print "Error- {} has shape {}. Expected shape is {}.".format(
name, usr_out.shape, true_out.shape)
return False
if not usr_out.dtype == true_out.dtype:
if __name__ == "__main__":
print "Error- {} has dtype {}. Expected dtype is {}.".format(
name, usr_out.dtype, true_out.dtype)
return False
if not np.all(np.abs(usr_out - true_out) < .05):
if __name__ == "__main__":
print "Error- {} has value:\n{}\nExpected value:\n{}".format(
name, usr_out, true_out)
return False
if __name__ == "__main__":
print "{} passed.".format(name)
if __name__ == "__main__":
print "Success."
return True
if __name__ == "__main__":
print "Performing unit tests. Your functions will be accepted if your result is\
within 0.05 of the correct output."
np.set_printoptions(precision=3)
if not test():
print "Unit test failed. Halting"
sys.exit()
sourcefolder = os.path.abspath(os.path.join(os.curdir, 'images', 'source'))
outfolder = os.path.abspath(os.path.join(os.curdir, 'images', 'filtered'))
print 'Searching for images in {} folder'.format(sourcefolder)
# Extensions recognized by opencv
exts = ['.bmp', '.pbm', '.pgm', '.ppm', '.sr', '.ras', '.jpeg', '.jpg',
'.jpe', '.jp2', '.tiff', '.tif', '.png']
# For every image in the source directory
for dirname, dirnames, filenames in os.walk(sourcefolder):
for filename in filenames:
name, ext = os.path.splitext(filename)
if ext in exts:
print "Reading image {}.".format(filename)
img = cv2.imread(os.path.join(dirname, filename))
print "Applying edges."
outimg = run_edges(img)
outpath = os.path.join(outfolder, name + 'edges' + ext)
print "Writing image {}.".format(outpath)
cv2.imwrite(outpath, outimg)
|
|
#--------------------------------------------------
# BMP2NC_PY
# Convert bmp file to NC code (python version)
# [email protected]
#---------------------------------------------------
from bmpy import *
import time
import math
# 5mm
z_safe_height = 3
# -5mm
z_drill_height = -3
# > jump_distance, move to high, move to target position, then start to drill
jump_distance = 0.1
#------------------------------
# help
#------------------------------
def help():
print "Usage: " + sys.argv[0] + " bmp_filename x_mm r g b rmm"
print 'example : python draw_pocket_edge.py d.bmp 255 0 0 2'
#------------------------------
# distance_p2p
#------------------------------
def distance_p2p(x1,y1,x2,y2,mm_per_pt):
return math.sqrt((x1-x2)*(x1-x2)*mm_per_pt*mm_per_pt + (y1-y2)*(y1-y2)*mm_per_pt*mm_per_pt)
#----------------------------------
# gen_gcode
# x1,y2 -- target bmp bitmap index
# x2,y2 -- last bmp bitmap index
#----------------------------------
def gen_gcode(x1,y1,x2,y2,height):
d = distance_p2p(x1,y1,x2,y2,mm_per_pt)
#print 'd:',d
if d >= jump_distance:
s = 'G1 X%.3f Y%.3f Z%.3f\n' % (x2*mm_per_pt,(height-1-y2)*mm_per_pt,z_safe_height)
s += 'G1 X%.3f Y%.3f Z%.3f\n' % (x1*mm_per_pt,(height-1-y1)*mm_per_pt,z_safe_height)
s += 'G1 X%.3f Y%.3f Z%.3f' % (x1*mm_per_pt,(height-1-y1)*mm_per_pt,z_drill_height)
print s
else:
s = 'G1 X%.3f Y%.3f Z%.3f' % (x1*mm_per_pt,(height-1-y1)*mm_per_pt,z_drill_height)
print s
pass
#------------------------------
# find_new_available_pt
#------------------------------
def find_new_available_pt(bitmap,last_x,last_y,x_limit,y_limit):
retcode = -1
current_x = 0
current_y = 0
# right
if (last_x + 1) < x_limit and bitmap[last_y][last_x+1] == (0,0,0):
return 0,last_x+1,last_y
# down
elif (last_y + 1) < y_limit and bitmap[last_y+1][last_x] == (0,0,0):
return 0,last_x,last_y+1
# left
elif (last_x - 1) >= 0 and bitmap[last_y][last_x-1] == (0,0,0):
return 0,last_x-1,last_y
# up
elif (last_y - 1) >= 0 and bitmap[last_y-1][last_x] == (0,0,0):
return 0,last_x,last_y-1
# right + up
if (last_x + 1) < x_limit and (last_y - 1) >= 0 and bitmap[last_y-1][last_x+1] == (0,0,0):
return 0,last_x+1,last_y-1
# right + down
elif (last_x + 1) < x_limit and (last_y + 1) < y_limit and bitmap[last_y+1][last_x+1] == (0,0,0):
return 0,last_x+1,last_y+1
# left + up
elif (last_x - 1) >= 0 and (last_y - 1) >= 0 and bitmap[last_y-1][last_x-1] == (0,0,0):
return 0,last_x-1,last_y-1
# left + down
elif (last_x - 1) >= 0 and (last_y + 1) < y_limit and bitmap[last_y+1][last_x-1] == (0,0,0):
return 0,last_x-1,last_y+1
else:
# go through all pt to find next
for y in xrange(y_limit):
for x in xrange(x_limit):
(r,b,g) = bitmap[y][x]
#if r > 128:
if r == 0:
return 0,x,y
return -1,0,0
#------------------------------
# find_pts_in_range
#------------------------------
def find_pts_in_range(bmp,center_x,center_y,r_mm):
retcode = -1
current_x = 0
current_y = 0
pts = []
# go through all pt to find next
for y in xrange(bmp.height):
for x in xrange(bmp.width):
if distance_p2p(x,y,center_x,center_y,mm_per_pt) <= r_mm:
pts.append((x,y))
return pts
#------------------------------
# main
#------------------------------
if __name__ == "__main__":
mm_per_pt = 0
r_mm = 2
if len(sys.argv) == 8:
image_name = sys.argv[1]
x_mm = float(sys.argv[2])
draw_r = int(sys.argv[3])
draw_g = int(sys.argv[4])
draw_b = int(sys.argv[5])
r_mm = float(sys.argv[6])
else:
help()
sys.exit(1)
bmp = BMPy(image_name)
#bmp.draw_line( (0x00,0x00,0x00),0,0,20,30)
x_min = bmp.width-1
x_max = 0
#bmp.draw_line((255,0,0),0,0,10,10)
bmp.save_to('new1.bmp')
#
# make picture points become black & white
#
for y in xrange(bmp.height):
for x in xrange(bmp.width):
(r,b,g) = bmp.bitmap[y][x]
#if r > 128:
if (r == draw_r) and (g == draw_g) and (b == draw_b):
bmp.bitmap[y][x] = (0,0,0)
if x < x_min:
x_min = x
if x > x_max:
x_max = x
else:
bmp.bitmap[y][x] = (255,255,255)
bmp.save_to('new2.bmp')
#
# caculate ?mm/point
#
mm_per_pt = x_mm/(x_max-x_min)
#print 'x_mm:',x_mm
#print 'x_min:',x_min
#print 'x_max:',x_max
#print 'mm_per_pt:',mm_per_pt
print 'G0 X0 Y0 Z%.3f' %(z_safe_height)
#
# generate nc point from bmp point mapping
#
current_x = 0
current_y = 0
current_z = z_safe_height
last_x = 0
last_y = 0
last_z = z_safe_height
pts_all = []
bitmap2 = bmp.bitmap
while True:
retcode, current_x,current_y = find_new_available_pt(bmp.bitmap,last_x,last_y,bmp.width,bmp.height)
if retcode == 0:
#print 'loop ....',current_x,current_y
#gen_gcode(current_x,current_y,last_x,last_y,bmp.height)
#bmp.bitmap[current_y][current_x] = (255,255,255)
pts = find_pts_in_range(bmp,current_x,current_y,r_mm)
#print 'pts:',pts
for (x,y) in pts:
bitmap2[y][x] = (255,0,0)
#bitmap2[current_y][current_x] = (255,0,0)
bmp.bitmap[current_y][current_x] = (255,255,255)
last_x = current_x
last_y = current_y
else:
break
bmp.bitmap = bitmap2
#print 'bmp.bitmap:',bmp.bitmap
bmp.save_to('new2.bmp')
|
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with MongoDB `ObjectIds
<http://dochub.mongodb.org/core/objectids>`_.
"""
import binascii
import calendar
import datetime
try:
import hashlib
_md5func = hashlib.md5
except ImportError: # for Python < 2.5
import md5
_md5func = md5.new
import os
import random
import socket
import struct
import threading
import time
from bson.errors import InvalidId
from bson.py3compat import (PY3, b, binary_type, text_type,
bytes_from_hex, string_types)
from bson.tz_util import utc
EMPTY = b("")
ZERO = b("\x00")
def _machine_bytes():
"""Get the machine portion of an ObjectId.
"""
machine_hash = _md5func()
if PY3:
# gethostname() returns a unicode string in python 3.x
# while update() requires a byte string.
machine_hash.update(socket.gethostname().encode())
else:
# Calling encode() here will fail with non-ascii hostnames
machine_hash.update(socket.gethostname())
return machine_hash.digest()[0:3]
class ObjectId(object):
"""A MongoDB ObjectId.
"""
_inc = random.randint(0, 0xFFFFFF)
_inc_lock = threading.Lock()
_machine_bytes = _machine_bytes()
__slots__ = ('__id')
_type_marker = 7
def __init__(self, oid=None):
"""Initialize a new ObjectId.
If `oid` is ``None``, create a new (unique) ObjectId. If `oid`
is an instance of (:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), :class:`ObjectId`) validate it and use that. Otherwise,
a :class:`TypeError` is raised. If `oid` is invalid,
:class:`~bson.errors.InvalidId` is raised.
:Parameters:
- `oid` (optional): a valid ObjectId (12 byte binary or 24 character
hex string)
.. versionadded:: 1.2.1
The `oid` parameter can be a ``unicode`` instance (that contains
only hexadecimal digits).
.. mongodoc:: objectids
"""
if oid is None:
self.__generate()
else:
self.__validate(oid)
@classmethod
def from_datetime(cls, generation_time):
"""Create a dummy ObjectId instance with a specific generation time.
This method is useful for doing range queries on a field
containing :class:`ObjectId` instances.
.. warning::
It is not safe to insert a document containing an ObjectId
generated using this method. This method deliberately
eliminates the uniqueness guarantee that ObjectIds
generally provide. ObjectIds generated with this method
should be used exclusively in queries.
`generation_time` will be converted to UTC. Naive datetime
instances will be treated as though they already contain UTC.
An example using this helper to get documents where ``"_id"``
was generated before January 1, 2010 would be:
>>> gen_time = datetime.datetime(2010, 1, 1)
>>> dummy_id = ObjectId.from_datetime(gen_time)
>>> result = collection.find({"_id": {"$lt": dummy_id}})
:Parameters:
- `generation_time`: :class:`~datetime.datetime` to be used
as the generation time for the resulting ObjectId.
.. versionchanged:: 1.8
Properly handle timezone aware values for
`generation_time`.
.. versionadded:: 1.6
"""
if generation_time.utcoffset() is not None:
generation_time = generation_time - generation_time.utcoffset()
ts = calendar.timegm(generation_time.timetuple())
oid = struct.pack(">i", int(ts)) + ZERO * 8
return cls(oid)
@classmethod
def is_valid(cls, oid):
"""Checks if a `oid` string is valid or not.
:Parameters:
- `oid`: the object id to validate
.. versionadded:: 2.3
"""
try:
ObjectId(oid)
return True
except (InvalidId, TypeError):
return False
def __generate(self):
"""Generate a new value for this ObjectId.
"""
oid = EMPTY
# 4 bytes current time
oid += struct.pack(">i", int(time.time()))
# 3 bytes machine
oid += ObjectId._machine_bytes
# 2 bytes pid
oid += struct.pack(">H", os.getpid() % 0xFFFF)
# 3 bytes inc
ObjectId._inc_lock.acquire()
oid += struct.pack(">i", ObjectId._inc)[1:4]
ObjectId._inc = (ObjectId._inc + 1) % 0xFFFFFF
ObjectId._inc_lock.release()
self.__id = oid
def __validate(self, oid):
"""Validate and use the given id for this ObjectId.
Raises TypeError if id is not an instance of
(:class:`basestring` (:class:`str` or :class:`bytes`
in python 3), ObjectId) and InvalidId if it is not a
valid ObjectId.
:Parameters:
- `oid`: a valid ObjectId
"""
if isinstance(oid, ObjectId):
self.__id = oid.__id
elif isinstance(oid, string_types):
if len(oid) == 12:
if isinstance(oid, binary_type):
self.__id = oid
else:
raise InvalidId("%s is not a valid ObjectId" % oid)
elif len(oid) == 24:
try:
self.__id = bytes_from_hex(oid)
except (TypeError, ValueError):
raise InvalidId("%s is not a valid ObjectId" % oid)
else:
raise InvalidId("%s is not a valid ObjectId" % oid)
else:
raise TypeError("id must be an instance of (%s, %s, ObjectId), "
"not %s" % (binary_type.__name__,
text_type.__name__, type(oid)))
@property
def binary(self):
"""12-byte binary representation of this ObjectId.
"""
return self.__id
@property
def generation_time(self):
"""A :class:`datetime.datetime` instance representing the time of
generation for this :class:`ObjectId`.
The :class:`datetime.datetime` is timezone aware, and
represents the generation time in UTC. It is precise to the
second.
.. versionchanged:: 1.8
Now return an aware datetime instead of a naive one.
.. versionadded:: 1.2
"""
t = struct.unpack(">i", self.__id[0:4])[0]
return datetime.datetime.fromtimestamp(t, utc)
def __getstate__(self):
"""return value of object for pickling.
needed explicitly because __slots__() defined.
"""
return self.__id
def __setstate__(self, value):
"""explicit state set from pickling
"""
# Provide backwards compatability with OIDs
# pickled with pymongo-1.9 or older.
if isinstance(value, dict):
oid = value["_ObjectId__id"]
else:
oid = value
# ObjectIds pickled in python 2.x used `str` for __id.
# In python 3.x this has to be converted to `bytes`
# by encoding latin-1.
if PY3 and isinstance(oid, text_type):
self.__id = oid.encode('latin-1')
else:
self.__id = oid
def __str__(self):
if PY3:
return binascii.hexlify(self.__id).decode()
return binascii.hexlify(self.__id)
def __repr__(self):
return "ObjectId('%s')" % (str(self),)
def __eq__(self, other):
if isinstance(other, ObjectId):
return self.__id == other.__id
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObjectId):
return self.__id != other.__id
return NotImplemented
def __lt__(self, other):
if isinstance(other, ObjectId):
return self.__id < other.__id
return NotImplemented
def __le__(self, other):
if isinstance(other, ObjectId):
return self.__id <= other.__id
return NotImplemented
def __gt__(self, other):
if isinstance(other, ObjectId):
return self.__id > other.__id
return NotImplemented
def __ge__(self, other):
if isinstance(other, ObjectId):
return self.__id >= other.__id
return NotImplemented
def __hash__(self):
"""Get a hash value for this :class:`ObjectId`.
.. versionadded:: 1.1
"""
return hash(self.__id)
|
|
"""
Created on 7 Sep 2020
@author: Bruno Beloff ([email protected])
NB: 1 kPa = 10 mBar
https://www.sensirion.com/en/download-center/carbon-dioxide-sensors-co2/co2-sensor/
https://github.com/Sensirion/embedded-scd/releases/tag/2.1.0
"""
import time
from scs_core.data.datum import Decode, Encode
from scs_core.gas.scd30.scd30_datum import SCD30Datum
from scs_core.gas.scd30.scd30_baseline import SCD30Baseline
from scs_dfe.gas.scd30.pca9543a import PCA9543A
from scs_host.bus.i2c import I2C
from scs_host.lock.lock import Lock
# --------------------------------------------------------------------------------------------------------------------
class SCD30(object):
"""
classdocs
"""
DEFAULT_AMBIENT_PRESSURE = 101.3 # kPa
MIN_SAMPLING_INTERVAL = 2 # seconds
MAX_SAMPLING_INTERVAL = 1800 # seconds
MIN_FORCED_CALIB = 400 # ppm
MAX_FORCED_CALIB = 2000 # ppm
MIN_PRESSURE = 70.0 # kPa
MAX_PRESSURE = 140.0 # kPa
# ----------------------------------------------------------------------------------------------------------------
__I2C_ADDR = 0x61
__CMD_START_PERIODIC_MEASUREMENT = 0x0010
__CMD_STOP_PERIODIC_MEASUREMENT = 0x0104
__CMD_GET_DATA_READY = 0x0202
__CMD_READ_MEASUREMENT = 0x0300
__CMD_MEASUREMENT_INTERVAL = 0x4600
__CMD_TEMPERATURE_OFFSET = 0x5403
__CMD_ALTITUDE = 0x5102
__CMD_AUTO_SELF_CALIBRATION = 0x5306
__CMD_FORCED_RECALIBRATION = 0x5204
__CMD_READ_SERIAL_NUMBER = 0xd033
__CMD_READ_FIRMWARE_VERSION = 0xd100
__CMD_RESET = 0xd304
__SERIAL_NUM_WORDS = 16
__CMD_DELAY = 0.01
__RESET_DELAY = 2.0
__CRC8_POLYNOMIAL = 0x31
__CRC8_INIT = 0xff
__CRC8_LEN = 1
__LOCK_TIMEOUT = 2.0
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def null_datum(cls):
return SCD30Datum(None, None, None)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, baseline: SCD30Baseline):
"""
Constructor
"""
self.__selector = PCA9543A() # PCA9543A
self.__addr = self.__I2C_ADDR # int
self.__baseline = baseline # SCD30Baseline
self.__ambient_pressure_kpa = None # float
# ----------------------------------------------------------------------------------------------------------------
# sample...
def sample(self):
while not self.get_data_ready():
time.sleep(0.1)
return self.read_measurement()
def get_data_ready(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_GET_DATA_READY)
words = self.__read_words(1)
finally:
self.release_lock()
return bool(Decode.int(words[0], '>'))
def read_measurement(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_READ_MEASUREMENT)
words = self.__read_words(6)
finally:
self.release_lock()
co2 = Decode.float(words[0] + words[1], '>')
temp = Decode.float(words[2] + words[3], '>')
humid = Decode.float(words[4] + words[5], '>')
corrected_co2 = co2 + self.__baseline.sensor_baseline.offset
return SCD30Datum(corrected_co2, temp, humid)
# ----------------------------------------------------------------------------------------------------------------
# period...
def start_periodic_measurement(self, ambient_pressure_kpa=None):
if ambient_pressure_kpa is None:
ambient_pressure_kpa = self.DEFAULT_AMBIENT_PRESSURE
if not (self.MIN_PRESSURE <= ambient_pressure_kpa <= self.MAX_PRESSURE):
raise ValueError(ambient_pressure_kpa)
ambient_pressure_mbar = int(ambient_pressure_kpa * 10.0)
try:
self.obtain_lock()
self.__cmd(self.__CMD_START_PERIODIC_MEASUREMENT, arg=ambient_pressure_mbar)
self.__ambient_pressure_kpa = ambient_pressure_kpa
finally:
self.release_lock()
def stop_periodic_measurement(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_STOP_PERIODIC_MEASUREMENT)
finally:
self.release_lock()
def get_measurement_interval(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_MEASUREMENT_INTERVAL)
words = self.__read_words(1)
finally:
self.release_lock()
return Decode.int(words[0], '>')
def set_measurement_interval(self, interval):
if not (self.MIN_SAMPLING_INTERVAL <= interval <= self.MAX_SAMPLING_INTERVAL):
raise ValueError(interval)
try:
self.obtain_lock()
self.__cmd(self.__CMD_MEASUREMENT_INTERVAL, arg=interval)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
# temperature_offset...
def get_temperature_offset(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_TEMPERATURE_OFFSET)
words = self.__read_words(1)
finally:
self.release_lock()
return Decode.int(words[0], '>') / 100
def set_temperature_offset(self, temp_offset):
int_offset = int(round(temp_offset * 100))
try:
self.obtain_lock()
self.__cmd(self.__CMD_TEMPERATURE_OFFSET, arg=int_offset)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
# altitude...
def get_altitude(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_ALTITUDE)
words = self.__read_words(1)
finally:
self.release_lock()
return Decode.int(words[0], '>')
def set_altitude(self, altitude):
try:
self.obtain_lock()
self.__cmd(self.__CMD_ALTITUDE, arg=altitude)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
# auto_self_calib...
def get_auto_self_calib(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_AUTO_SELF_CALIBRATION)
words = self.__read_words(1)
finally:
self.release_lock()
return bool(Decode.int(words[0], '>'))
def set_auto_self_calib(self, on):
auto_self_calib = 1 if on else 0
try:
self.obtain_lock()
self.__cmd(self.__CMD_AUTO_SELF_CALIBRATION, arg=auto_self_calib)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
# forced_calib...
def get_forced_calib(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_FORCED_RECALIBRATION)
words = self.__read_words(1)
finally:
self.release_lock()
return Decode.int(words[0], '>')
def set_forced_calib(self, concentration_ppm):
if not (self.MIN_FORCED_CALIB <= concentration_ppm <= self.MAX_FORCED_CALIB):
raise ValueError(concentration_ppm)
try:
self.obtain_lock()
self.__cmd(self.__CMD_FORCED_RECALIBRATION, arg=concentration_ppm)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
# reset...
def reset(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_RESET)
time.sleep(self.__RESET_DELAY)
finally:
self.release_lock()
# ----------------------------------------------------------------------------------------------------------------
# sensor...
def get_serial_no(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_READ_SERIAL_NUMBER)
words = self.__read_words(self.__SERIAL_NUM_WORDS)
finally:
self.release_lock()
return ''.join([chr(byte) for word in words for byte in word])
def get_firmware_version(self):
try:
self.obtain_lock()
self.__cmd(self.__CMD_READ_FIRMWARE_VERSION)
words = self.__read_words(1)
finally:
self.release_lock()
major = words[0][0]
minor = words[0][1]
return major, minor
# ----------------------------------------------------------------------------------------------------------------
def obtain_lock(self):
Lock.acquire(self.__lock_name, self.__LOCK_TIMEOUT)
self.__selector.enable(True, False)
time.sleep(0.001)
def release_lock(self):
self.__selector.enable(False, False)
time.sleep(0.001)
Lock.release(self.__lock_name)
@property
def __lock_name(self):
return "%s-0x%02x" % (self.__class__.__name__, self.__I2C_ADDR)
# ----------------------------------------------------------------------------------------------------------------
def __cmd(self, cmd, arg=None):
if arg:
values = list(Encode.int(arg, '>'))
values.append(self.__crc(values))
else:
values = ()
try:
I2C.Sensors.start_tx(self.__I2C_ADDR)
I2C.Sensors.write_addr16(cmd, *values)
finally:
I2C.Sensors.end_tx()
time.sleep(self.__CMD_DELAY)
def __read_words(self, word_count):
char_count = word_count * 3
words = []
chars = self.__read(char_count)
# print(["0x%02x" % char for char in chars])
for i in range(0, char_count, 3):
word = chars[i:i + 2]
crc = chars[i + 2]
if not self.__crc_check(word, crc):
raise ValueError(chars)
words.append(word)
return words
def __read(self, char_count):
try:
I2C.Sensors.start_tx(self.__I2C_ADDR)
chars = I2C.Sensors.read(char_count)
return chars
finally:
I2C.Sensors.end_tx()
def __crc_check(self, word, crc):
return crc == self.__crc(word)
def __crc(self, word):
crc = self.__CRC8_INIT
for byte in word:
crc ^= byte
for _ in range(8):
crc = 0xff & ((crc << 1) ^ self.__CRC8_POLYNOMIAL if crc & 0x80 else (crc << 1))
return crc
# ----------------------------------------------------------------------------------------------------------------
@property
def ambient_pressure_kpa(self):
return self.__ambient_pressure_kpa
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "SCD30:{selector:%s, baseline:%s, ambient_pressure_kpa:%s}" % \
(self.__selector, self.__baseline, self.ambient_pressure_kpa)
|
|
# Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import netaddr
from neutron.agent.common import ovs_lib
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_dvr_process
from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge
from neutron.plugins.openvswitch.common import constants
class OVSTunnelBridge(ovs_bridge.OVSAgentBridge,
br_dvr_process.OVSDVRProcessMixin):
"""openvswitch agent tunnel bridge specific logic."""
# Used by OVSDVRProcessMixin
dvr_process_table_id = constants.DVR_PROCESS
dvr_process_next_table_id = constants.PATCH_LV_TO_TUN
def setup_default_table(self, patch_int_ofport, arp_responder_enabled):
# Table 0 (default) will sort incoming traffic depending on in_port
self.add_flow(priority=1,
in_port=patch_int_ofport,
actions="resubmit(,%s)" %
constants.PATCH_LV_TO_TUN)
self.add_flow(priority=0, actions="drop")
if arp_responder_enabled:
# ARP broadcast-ed request go to the local ARP_RESPONDER table to
# be locally resolved
# REVISIT(yamamoto): arp_op=arp.ARP_REQUEST
self.add_flow(table=constants.PATCH_LV_TO_TUN,
priority=1,
proto='arp',
dl_dst="ff:ff:ff:ff:ff:ff",
actions=("resubmit(,%s)" %
constants.ARP_RESPONDER))
# PATCH_LV_TO_TUN table will handle packets coming from patch_int
# unicasts go to table UCAST_TO_TUN where remote addresses are learnt
self.add_flow(table=constants.PATCH_LV_TO_TUN,
priority=0,
dl_dst="00:00:00:00:00:00/01:00:00:00:00:00",
actions="resubmit(,%s)" % constants.UCAST_TO_TUN)
# Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding
self.add_flow(table=constants.PATCH_LV_TO_TUN,
priority=0,
dl_dst="01:00:00:00:00:00/01:00:00:00:00:00",
actions="resubmit(,%s)" % constants.FLOOD_TO_TUN)
# Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id
# for each tunnel type, and resubmit to table LEARN_FROM_TUN where
# remote mac addresses will be learnt
for tunnel_type in constants.TUNNEL_NETWORK_TYPES:
self.add_flow(table=constants.TUN_TABLE[tunnel_type],
priority=0,
actions="drop")
# LEARN_FROM_TUN table will have a single flow using a learn action to
# dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac
# addresses (assumes that lvid has already been set by a previous flow)
learned_flow = ("table=%s,"
"priority=1,"
"hard_timeout=300,"
"NXM_OF_VLAN_TCI[0..11],"
"NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],"
"load:0->NXM_OF_VLAN_TCI[],"
"load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],"
"output:NXM_OF_IN_PORT[]" %
constants.UCAST_TO_TUN)
# Once remote mac addresses are learnt, output packet to patch_int
self.add_flow(table=constants.LEARN_FROM_TUN,
priority=1,
actions="learn(%s),output:%s" %
(learned_flow, patch_int_ofport))
# Egress unicast will be handled in table UCAST_TO_TUN, where remote
# mac addresses will be learned. For now, just add a default flow that
# will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them
# as broadcasts/multicasts
self.add_flow(table=constants.UCAST_TO_TUN,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
if arp_responder_enabled:
# If none of the ARP entries correspond to the requested IP, the
# broadcast-ed packet is resubmitted to the flooding table
self.add_flow(table=constants.ARP_RESPONDER,
priority=0,
actions="resubmit(,%s)" %
constants.FLOOD_TO_TUN)
# FLOOD_TO_TUN will handle flooding in tunnels based on lvid,
# for now, add a default drop action
self.install_drop(table_id=constants.FLOOD_TO_TUN)
def provision_local_vlan(self, network_type, lvid, segmentation_id,
distributed=False):
if distributed:
table_id = constants.DVR_NOT_LEARN
else:
table_id = constants.LEARN_FROM_TUN
self.add_flow(table=constants.TUN_TABLE[network_type],
priority=1,
tun_id=segmentation_id,
actions="mod_vlan_vid:%s,"
"resubmit(,%s)" %
(lvid, table_id))
def reclaim_local_vlan(self, network_type, segmentation_id):
self.delete_flows(table=constants.TUN_TABLE[network_type],
tun_id=segmentation_id)
@staticmethod
def _ofport_set_to_str(ports_set):
return ",".join(map(str, ports_set))
def install_flood_to_tun(self, vlan, tun_id, ports, deferred_br=None):
br = deferred_br if deferred_br else self
br.mod_flow(table=constants.FLOOD_TO_TUN,
dl_vlan=vlan,
actions="strip_vlan,set_tunnel:%s,output:%s" %
(tun_id, self._ofport_set_to_str(ports)))
def delete_flood_to_tun(self, vlan, deferred_br=None):
br = deferred_br if deferred_br else self
br.delete_flows(table=constants.FLOOD_TO_TUN, dl_vlan=vlan)
def install_unicast_to_tun(self, vlan, tun_id, port, mac,
deferred_br=None):
br = deferred_br if deferred_br else self
br.add_flow(table=constants.UCAST_TO_TUN,
priority=2,
dl_vlan=vlan,
dl_dst=mac,
actions="strip_vlan,set_tunnel:%s,output:%s" %
(tun_id, port))
def delete_unicast_to_tun(self, vlan, mac, deferred_br=None):
br = deferred_br if deferred_br else self
if mac is None:
br.delete_flows(table=constants.UCAST_TO_TUN,
dl_vlan=vlan)
else:
br.delete_flows(table=constants.UCAST_TO_TUN,
dl_vlan=vlan,
dl_dst=mac)
def install_arp_responder(self, vlan, ip, mac, deferred_br=None):
br = deferred_br if deferred_br else self
actions = constants.ARP_RESPONDER_ACTIONS % {
'mac': netaddr.EUI(mac, dialect=netaddr.mac_unix),
'ip': netaddr.IPAddress(ip),
}
br.add_flow(table=constants.ARP_RESPONDER,
priority=1,
proto='arp',
dl_vlan=vlan,
nw_dst='%s' % ip,
actions=actions)
def delete_arp_responder(self, vlan, ip, deferred_br=None):
br = deferred_br if deferred_br else self
if ip is None:
br.delete_flows(table=constants.ARP_RESPONDER,
proto='arp',
dl_vlan=vlan)
else:
br.delete_flows(table=constants.ARP_RESPONDER,
proto='arp',
dl_vlan=vlan,
nw_dst='%s' % ip)
def setup_tunnel_port(self, network_type, port, deferred_br=None):
br = deferred_br if deferred_br else self
br.add_flow(priority=1,
in_port=port,
actions="resubmit(,%s)" %
constants.TUN_TABLE[network_type])
def cleanup_tunnel_port(self, port, deferred_br=None):
br = deferred_br if deferred_br else self
br.delete_flows(in_port=port)
def add_dvr_mac_tun(self, mac, port):
# Table DVR_NOT_LEARN ensures unique dvr macs in the cloud
# are not learnt, as they may result in flow explosions
self.install_output(table_id=constants.DVR_NOT_LEARN,
priority=1,
eth_src=mac,
port=port)
def remove_dvr_mac_tun(self, mac):
# REVISIT(yamamoto): match in_port as well?
self.delete_flows(table_id=constants.DVR_NOT_LEARN,
eth_src=mac)
def deferred(self):
return DeferredOVSTunnelBridge(self)
class DeferredOVSTunnelBridge(ovs_lib.DeferredOVSBridge):
_METHODS = [
'install_unicast_to_tun',
'delete_unicast_to_tun',
'install_flood_to_tun',
'delete_flood_to_tun',
'install_arp_responder',
'delete_arp_responder',
'setup_tunnel_port',
'cleanup_tunnel_port',
]
def __getattr__(self, name):
if name in self._METHODS:
m = getattr(self.br, name)
return functools.partial(m, deferred_br=self)
return super(DeferredOVSTunnelBridge, self).__getattr__(name)
|
|
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import itertools
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.hashcompat import md5_constructor
from django.utils.http import same_origin
from django.utils.log import getLogger
from django.utils.safestring import mark_safe
from django.utils.crypto import constant_time_compare
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
logger = getLogger('django.request')
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_COOKIE = "No CSRF or session cookie."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return md5_constructor("%s%s"
% (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def _make_legacy_session_token(session_id):
return md5_constructor(settings.SECRET_KEY + session_id).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
# If the user doesn't have a CSRF cookie, generate one and store it in the
# request, so it's available to the view. We'll store it in a cookie when
# we reach the response.
try:
# In case of cookies from untrusted sources, we strip anything
# dangerous at this point, so that the cookie + token will have the
# same, sanitized value.
request.META["CSRF_COOKIE"] = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
cookie_is_new = False
except KeyError:
# No cookie, so create one. This will be sent with the next
# response.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Set a flag to allow us to fall back and allow the session id in
# place of a CSRF cookie for this request only.
cookie_is_new = True
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
if request.method == 'POST':
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker,(man-in-the-middle, MITM) sends a
# POST form which targets https://example.com/detonate-bomb/ and
# submits it via javascript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that is no problem for a MITM and the session independent
# nonce we are using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s' % (REASON_NO_REFERER, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s' % (reason, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
# If the user didn't already have a CSRF cookie, then fall back to
# the Django 1.1 method (hash of session ID), so a request is not
# rejected if the form was sent to the user before upgrading to the
# Django 1.2 method (session independent nonce)
if cookie_is_new:
try:
session_id = request.COOKIES[settings.SESSION_COOKIE_NAME]
csrf_token = _make_legacy_session_token(session_id)
except KeyError:
# No CSRF cookie and no session cookie. For POST requests,
# we insist on a CSRF cookie, and in this way we can avoid
# all CSRF attacks, including login CSRF.
logger.warning('Forbidden (%s): %s' % (REASON_NO_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_COOKIE)
else:
csrf_token = request.META["CSRF_COOKIE"]
# check incoming token
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
if cookie_is_new:
# probably a problem setting the CSRF cookie
logger.warning('Forbidden (%s): %s' % (REASON_NO_CSRF_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
else:
logger.warning('Forbidden (%s): %s' % (REASON_BAD_TOKEN, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"], max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
class CsrfResponseMiddleware(object):
"""
DEPRECATED
Middleware that post-processes a response to add a csrfmiddlewaretoken.
This exists for backwards compatibility and as an interim measure until
applications are converted to using use the csrf_token template tag
instead. It will be removed in Django 1.4.
"""
def __init__(self):
import warnings
warnings.warn(
"CsrfResponseMiddleware and CsrfMiddleware are deprecated; use CsrfViewMiddleware and the template tag instead (see CSRF documentation).",
DeprecationWarning
)
def process_response(self, request, response):
if getattr(response, 'csrf_exempt', False):
return response
if response['Content-Type'].split(';')[0] in _HTML_TYPES:
csrf_token = get_token(request)
# If csrf_token is None, we have no token for this request, which probably
# means that this is a response from a request middleware.
if csrf_token is None:
return response
# ensure we don't add the 'id' attribute twice (HTML validity)
idattributes = itertools.chain(("id='csrfmiddlewaretoken'",),
itertools.repeat(''))
def add_csrf_field(match):
"""Returns the matched <form> tag plus the added <input> element"""
return mark_safe(match.group() + "<div style='display:none;'>" + \
"<input type='hidden' " + idattributes.next() + \
" name='csrfmiddlewaretoken' value='" + csrf_token + \
"' /></div>")
# Modify any POST forms
response.content, n = _POST_FORM_RE.subn(add_csrf_field, response.content)
if n > 0:
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
# Since the content has been modified, any Etag will now be
# incorrect. We could recalculate, but only if we assume that
# the Etag was set by CommonMiddleware. The safest thing is just
# to delete. See bug #9163
del response['ETag']
return response
class CsrfMiddleware(object):
"""
Django middleware that adds protection against Cross Site
Request Forgeries by adding hidden form fields to POST forms and
checking requests for the correct value.
CsrfMiddleware uses two middleware, CsrfViewMiddleware and
CsrfResponseMiddleware, which can be used independently. It is recommended
to use only CsrfViewMiddleware and use the csrf_token template tag in
templates for inserting the token.
"""
# We can't just inherit from CsrfViewMiddleware and CsrfResponseMiddleware
# because both have process_response methods.
def __init__(self):
self.response_middleware = CsrfResponseMiddleware()
self.view_middleware = CsrfViewMiddleware()
def process_response(self, request, resp):
# We must do the response post-processing first, because that calls
# get_token(), which triggers a flag saying that the CSRF cookie needs
# to be sent (done in CsrfViewMiddleware.process_response)
resp2 = self.response_middleware.process_response(request, resp)
return self.view_middleware.process_response(request, resp2)
def process_view(self, request, callback, callback_args, callback_kwargs):
return self.view_middleware.process_view(request, callback, callback_args,
callback_kwargs)
|
|
"""
opentrons.protocols.parse: functions and state for parsing protocols
"""
import ast
import functools
import itertools
import json
import logging
import re
from io import BytesIO
from zipfile import ZipFile
from typing import Any, Dict, Optional, Union, Tuple, TYPE_CHECKING
import jsonschema # type: ignore
from opentrons.config import feature_flags as ff
from opentrons_shared_data import load_shared_data, protocol
from .types import (Protocol, PythonProtocol, JsonProtocol,
Metadata, APIVersion, MalformedProtocolError)
from .bundle import extract_bundle
if TYPE_CHECKING:
from opentrons_shared_data.labware.dev_types import LabwareDefinition
from opentrons_shared_data.protocol.dev_types import (
JsonProtocol as JsonProtocolDef
)
MODULE_LOG = logging.getLogger(__name__)
# match e.g. "2.0" but not "hi", "2", "2.0.1"
API_VERSION_RE = re.compile(r'^(\d+)\.(\d+)$')
MAX_SUPPORTED_JSON_SCHEMA_VERSION = 5
def _validate_v2_ast(protocol_ast: ast.Module):
defs = [fdef for fdef in protocol_ast.body
if isinstance(fdef, ast.FunctionDef)]
rundefs = [fdef for fdef in defs
if fdef.name == 'run']
# There must be precisely 1 one run function
if len(rundefs) > 1:
lines = [str(d.lineno) for d in rundefs]
linestr = ', '.join(lines)
raise MalformedProtocolError(
f'More than one run function is defined (lines {linestr})')
if not rundefs:
raise MalformedProtocolError(
"No function 'run(ctx)' defined")
if infer_version_from_imports(protocol_ast):
raise MalformedProtocolError(
'Protocol API v1 modules such as robot, instruments, and labware '
'may not be imported in Protocol API V2 protocols'
)
def version_from_string(vstr: str) -> APIVersion:
""" Parse an API version from a string
:param str vstr: The version string to parse
:returns APIVersion: The parsed version
:raises ValueError: if the version string is the wrong format
"""
matches = API_VERSION_RE.match(vstr)
if not matches:
raise ValueError(
f'apiLevel {vstr} is incorrectly formatted. It should '
'major.minor, where both major and minor are numbers.')
return APIVersion(
major=int(matches.group(1)), minor=int(matches.group(2)))
def _parse_json(
protocol_contents: str, filename: str = None) -> JsonProtocol:
""" Parse a protocol known or at least suspected to be json """
protocol_json = json.loads(protocol_contents)
version, validated = validate_json(protocol_json)
return JsonProtocol(
text=protocol_contents, filename=filename, contents=validated,
schema_version=version)
def _parse_python(
protocol_contents: str,
filename: str = None,
bundled_labware: Dict[str, 'LabwareDefinition'] = None,
bundled_data: Dict[str, bytes] = None,
bundled_python: Dict[str, str] = None,
extra_labware: Dict[str, 'LabwareDefinition'] = None,
) -> PythonProtocol:
""" Parse a protocol known or at least suspected to be python """
filename_checked = filename or '<protocol>'
if filename_checked.endswith('.zip'):
ast_filename = 'protocol.ot2.py'
else:
ast_filename = filename_checked
parsed = ast.parse(protocol_contents,
filename=ast_filename)
metadata = extract_metadata(parsed)
protocol = compile(parsed, filename=ast_filename, mode='exec')
version = get_version(metadata, parsed)
if version >= APIVersion(2, 0):
_validate_v2_ast(parsed)
result = PythonProtocol(
text=protocol_contents,
filename=getattr(protocol, 'co_filename', '<protocol>'),
contents=protocol,
metadata=metadata,
api_level=version,
bundled_labware=bundled_labware,
bundled_data=bundled_data,
bundled_python=bundled_python,
extra_labware=extra_labware)
return result
def _parse_bundle(bundle: ZipFile, filename: str = None) -> PythonProtocol: # noqa: C901
""" Parse a bundled Python protocol """
if not ff.use_protocol_api_v2():
raise RuntimeError(
'Uploading a bundled protocol requires the robot to be set to '
'Protocol API V2. Enable the \'Use Protocol API version 2\' '
'toggle in the robot\'s Advanced Settings and restart the robot')
contents = extract_bundle(bundle)
result = _parse_python(
contents.protocol, filename,
contents.bundled_labware,
contents.bundled_data,
contents.bundled_python)
if result.api_level < APIVersion(2, 0):
raise RuntimeError('Bundled protocols must use Protocol API V2, ' +
f'got {result.api_level}')
return result
def parse(
protocol_file: Union[str, bytes],
filename: str = None,
extra_labware: Dict[str, 'LabwareDefinition'] = None,
extra_data: Dict[str, bytes] = None
) -> Protocol:
""" Parse a protocol from text.
:param protocol_file: The protocol file, or for single-file protocols, a
string of the protocol contents.
:param filename: The name of the protocol. Optional, but helps with
deducing the kind of protocol (e.g. if it ends with
'.json' we can treat it like json)
:param extra_labware: Any extra labware defs that should be given to the
protocol. Ignored if the protocol is json or zipped
python.
:param extra_data: Any extra data files that should be provided to the
protocol. Ignored if the protocol is json or zipped
python.
:return types.Protocol: The protocol holder, a named tuple that stores the
data in the protocol for later simulation or
execution.
"""
if filename and filename.endswith('.zip'):
if not isinstance(protocol_file, bytes):
raise RuntimeError('Please update your Run App version to '
'support uploading a .zip file')
with ZipFile(BytesIO(protocol_file)) as bundle:
result = _parse_bundle(bundle, filename)
return result
else:
if isinstance(protocol_file, bytes):
protocol_str = protocol_file.decode('utf-8')
else:
protocol_str = protocol_file
if filename and filename.endswith('.json'):
return _parse_json(protocol_str, filename)
elif filename and filename.endswith('.py'):
return _parse_python(
protocol_str, filename, extra_labware=extra_labware,
bundled_data=extra_data)
# our jsonschema says the top level json kind is object
if protocol_str and protocol_str[0] in ('{', b'{'):
return _parse_json(protocol_str, filename)
else:
return _parse_python(
protocol_str, filename, extra_labware=extra_labware,
bundled_data=extra_data)
def extract_metadata(parsed: ast.Module) -> Metadata:
metadata: Metadata = {}
assigns = [
obj for obj in parsed.body if isinstance(obj, ast.Assign)]
for obj in assigns:
# XXX This seems brittle and could probably do with
# - enough love that we can remove the type: ignores
# - some thought about exactly what types are allowed in metadata
if isinstance(obj.targets[0], ast.Name) \
and obj.targets[0].id == 'metadata' \
and isinstance(obj.value, ast.Dict):
keys = [k.s for k in obj.value.keys] # type: ignore
values = [v.s for v in obj.value.values] # type: ignore
metadata = dict(zip(keys, values))
return metadata
@functools.lru_cache(1)
def infer_version_from_imports(parsed: ast.Module) -> Optional[APIVersion]:
# Imports in the form of `import opentrons.robot` will have an entry in
# parsed.body[i].names[j].name in the form "opentrons.robot". Find those
# imports and transform them to strip away the 'opentrons.' part.
ot_imports = ['.'.join(name.name.split('.')[1:]) for name in
itertools.chain.from_iterable(
[obj.names for obj in parsed.body
if isinstance(obj, ast.Import)])
if 'opentrons' in name.name]
# Imports in the form of `from opentrons import robot` (with or without an
# `as ___` statement) will have an entry in parsed.body[i].module
# containing "opentrons"
ot_from_imports = [
name.name for name in
itertools.chain.from_iterable(
[obj.names for obj in parsed.body
if isinstance(obj, ast.ImportFrom)
and obj.module
and 'opentrons' in obj.module])
]
# If any of these are populated, filter for entries with v1-specific terms
opentrons_imports = set(ot_imports + ot_from_imports)
v1_markers = set(('robot', 'instruments', 'modules', 'containers'))
v1evidence = v1_markers.intersection(opentrons_imports)
if v1evidence:
return APIVersion(1, 0)
else:
return None
def version_from_metadata(metadata: Metadata) -> APIVersion:
""" Build an API version from metadata, if we can.
If there is no apiLevel key, raise a KeyError.
If the apiLevel value is malformed, raise a ValueError.
"""
if 'apiLevel' not in metadata:
raise KeyError('apiLevel')
requested_level = str(metadata['apiLevel'])
if requested_level == '1':
return APIVersion(1, 0)
return version_from_string(requested_level)
def get_version(metadata: Metadata, parsed: ast.Module) -> APIVersion:
"""
Infer protocol API version based on a combination of metadata and imports.
If a protocol specifies its API version using the 'apiLevel' key of a top-
level dict variable named `metadata`, the value for that key will be
returned as the version (the value will be intified, so numeric values
only can be used).
If that variable does not exist or if it does not contain the 'apiLevel'
key, the API version will be inferred from the imports. A script with an
import containing 'robot', 'instruments', or 'modules' will be assumed to
be an APIv1 protocol. If none of these are present, it is assumed to be an
APIv2 protocol (note that 'labware' is not in this list, as there is a
valid APIv2 import named 'labware').
"""
try:
return version_from_metadata(metadata)
except KeyError: # No apiLevel key, may be apiv1
pass
inferred = infer_version_from_imports(parsed)
if not inferred:
raise RuntimeError(
'If this is not an API v1 protocol, you must specify the target '
'api level in the apiLevel key of the metadata. For instance, '
'metadata={"apiLevel": "2.0"}')
return inferred
def _get_protocol_schema_version(protocol_json: Dict[Any, Any]) -> int:
# v3 and above uses `schemaVersion: integer`
version = protocol_json.get('schemaVersion')
if version:
return int(version)
# v1 uses 1.x.x and v2 uses 2.x.x
legacyKebabVersion = protocol_json.get('protocol-schema')
# No minor/patch schemas ever were released,
# do not permit protocols with nonexistent schema versions to load
if legacyKebabVersion == '1.0.0':
return 1
elif legacyKebabVersion == '2.0.0':
return 2
elif legacyKebabVersion:
raise RuntimeError(
f'No such schema version: "{legacyKebabVersion}". Did you mean ' +
'"1.0.0" or "2.0.0"?')
# no truthy value for schemaVersion or protocol-schema
raise RuntimeError(
'Could not determine schema version for protocol. ' +
'Make sure there is a version number under "schemaVersion"')
def _get_schema_for_protocol(version_num: int) -> protocol.Schema:
""" Retrieve the json schema for a protocol schema version
"""
# TODO(IL, 2020/03/05): use $otSharedSchema, but maybe wait until
# deprecating v1/v2 JSON protocols?
if version_num > MAX_SUPPORTED_JSON_SCHEMA_VERSION:
raise RuntimeError(
f'JSON Protocol version {version_num} is not yet ' +
'supported in this version of the API')
try:
schema = load_shared_data(
f'protocol/schemas/{version_num}.json')
except FileNotFoundError:
schema = None # type: ignore
if not schema:
raise RuntimeError('JSON Protocol schema "{}" does not exist'
.format(version_num))
return json.loads(schema.decode('utf-8'))
def validate_json(
protocol_json: Dict[Any, Any]) -> Tuple[int, 'JsonProtocolDef']:
""" Validates a json protocol and returns its schema version """
# Check if this is actually a labware
labware_schema_v2 = json.loads(load_shared_data(
'labware/schemas/2.json').decode('utf-8'))
try:
jsonschema.validate(protocol_json, labware_schema_v2)
except jsonschema.ValidationError:
pass
else:
MODULE_LOG.error("labware uploaded instead of protocol")
raise RuntimeError(
'The file you are trying to open is a JSON labware definition, '
'and therefore can not be opened here. Please try '
'uploading a JSON protocol file instead.')
# this is now either a protocol or something corrupt
version_num = _get_protocol_schema_version(protocol_json)
if version_num <= 2:
raise RuntimeError(
f'JSON protocol version {version_num} is '
'deprecated. Please upload your protocol into Protocol '
'Designer and save it to migrate the protocol to a later '
'version. This error might mean a labware '
'definition was specified instead of a protocol.')
if version_num > MAX_SUPPORTED_JSON_SCHEMA_VERSION:
raise RuntimeError(
f'The protocol you are trying to open is a JSONv{version_num} '
'protocol and is not supported by your current robot server '
'version. Please update your OT-2 App and robot server to the '
'latest version and try again.'
)
protocol_schema = _get_schema_for_protocol(version_num)
# instruct schema how to resolve all $ref's used in protocol schemas
resolver = jsonschema.RefResolver(
protocol_schema.get('$id', ''),
protocol_schema,
store={
"opentronsLabwareSchemaV2": labware_schema_v2
})
# do the validation
try:
jsonschema.validate(protocol_json, protocol_schema, resolver=resolver)
except jsonschema.ValidationError:
MODULE_LOG.exception("JSON protocol validation failed")
raise RuntimeError(
'This may be a corrupted file or a JSON file that is not an '
'Opentrons JSON protocol.')
else:
return version_num, protocol_json # type: ignore
|
|
#!/usr/bin/python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Automatically update TensorFlow version in source files
#
# Usage:
# ./tensorflow/tools/ci_build/update_version.py --version 1.4.0-rc1
# ./tensorflow/tools/ci_build/update_version.py --nightly
#
"""Update version of TensorFlow script."""
# pylint: disable=superfluous-parens
import argparse
import os
import re
import subprocess
import time
# File parameters.
TF_SRC_DIR = "tensorflow"
VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR
SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
README_MD = "./README.md"
TENSORFLOW_BZL = "%s/tensorflow.bzl" % TF_SRC_DIR
RELEVANT_FILES = [TF_SRC_DIR, VERSION_H, SETUP_PY, README_MD]
# Version type parameters.
NIGHTLY_VERSION = 1
REGULAR_VERSION = 0
def check_existence(filename):
"""Check the existence of file or dir."""
if not os.path.exists(filename):
raise RuntimeError("%s not found. Are you under the TensorFlow source root"
" directory?" % filename)
def check_all_files():
"""Check all relevant files necessary for upgrade."""
for file_name in RELEVANT_FILES:
check_existence(file_name)
def replace_string_in_line(search, replace, filename):
"""Replace with sed when regex is required."""
with open(filename, "r") as source:
content = source.read()
with open(filename, "w") as source:
source.write(re.sub(search, replace, content))
class Version(object):
"""Version class object that stores SemVer version information."""
def __init__(self, major, minor, patch, identifier_string, version_type):
"""Constructor.
Args:
major: major string eg. (1)
minor: minor string eg. (3)
patch: patch string eg. (1)
identifier_string: extension string eg. (-rc0)
version_type: version parameter ((REGULAR|NIGHTLY)_VERSION)
"""
self.major = major
self.minor = minor
self.patch = patch
self.identifier_string = identifier_string
self.version_type = version_type
self._update_string()
def _update_string(self):
self.string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
def __str__(self):
return self.string
def set_identifier_string(self, identifier_string):
self.identifier_string = identifier_string
self._update_string()
@property
def pep_440_str(self):
if self.version_type == REGULAR_VERSION:
return_string = "%s.%s.%s%s" % (self.major,
self.minor,
self.patch,
self.identifier_string)
return return_string.replace("-", "")
else:
return_string = "%s.%s.%s" % (self.major,
self.minor,
self.identifier_string)
return return_string.replace("-", "")
@staticmethod
def parse_from_string(string, version_type):
"""Returns version object from Semver string.
Args:
string: version string
version_type: version parameter
Raises:
RuntimeError: If the version string is not valid.
"""
# Check validity of new version string.
if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string):
raise RuntimeError("Invalid version string: %s" % string)
major, minor, extension = string.split(".", 2)
# Isolate patch and identifier string if identifier string exists.
extension_split = extension.split("-", 1)
patch = extension_split[0]
if len(extension_split) == 2:
identifier_string = "-" + extension_split[1]
else:
identifier_string = ""
return Version(major,
minor,
patch,
identifier_string,
version_type)
def get_current_semver_version():
"""Returns a Version object of current version.
Returns:
version: Version object of current SemVer string based on information from
core/public/version.h
"""
# Get current version information.
version_file = open(VERSION_H, "r")
for line in version_file:
major_match = re.search("^#define TF_MAJOR_VERSION ([0-9]+)", line)
minor_match = re.search("^#define TF_MINOR_VERSION ([0-9]+)", line)
patch_match = re.search("^#define TF_PATCH_VERSION ([0-9]+)", line)
extension_match = re.search("^#define TF_VERSION_SUFFIX \"(.*)\"", line)
if major_match:
old_major = major_match.group(1)
if minor_match:
old_minor = minor_match.group(1)
if patch_match:
old_patch_num = patch_match.group(1)
if extension_match:
old_extension = extension_match.group(1)
break
if "dev" in old_extension:
version_type = NIGHTLY_VERSION
else:
version_type = REGULAR_VERSION
return Version(old_major,
old_minor,
old_patch_num,
old_extension,
version_type)
def update_version_h(old_version, new_version):
"""Update tensorflow/core/public/version.h."""
replace_string_in_line("#define TF_MAJOR_VERSION %s" % old_version.major,
"#define TF_MAJOR_VERSION %s" % new_version.major,
VERSION_H)
replace_string_in_line("#define TF_MINOR_VERSION %s" % old_version.minor,
"#define TF_MINOR_VERSION %s" % new_version.minor,
VERSION_H)
replace_string_in_line("#define TF_PATCH_VERSION %s" % old_version.patch,
"#define TF_PATCH_VERSION %s" % new_version.patch,
VERSION_H)
replace_string_in_line(
"#define TF_VERSION_SUFFIX \"%s\"" % old_version.identifier_string,
"#define TF_VERSION_SUFFIX \"%s\"" % new_version.identifier_string,
VERSION_H)
def update_setup_dot_py(old_version, new_version):
"""Update setup.py."""
replace_string_in_line("_VERSION = '%s'" % old_version.string,
"_VERSION = '%s'" % new_version.string, SETUP_PY)
def update_readme(old_version, new_version):
"""Update README."""
pep_440_str = new_version.pep_440_str
replace_string_in_line(r"%s\.%s\.([[:alnum:]]+)-" % (old_version.major,
old_version.minor),
"%s-" % pep_440_str, README_MD)
def update_tensorflow_bzl(old_version, new_version):
"""Update tensorflow.bzl."""
old_mmp = "%s.%s.%s" % (old_version.major, old_version.minor,
old_version.patch)
new_mmp = "%s.%s.%s" % (new_version.major, new_version.minor,
new_version.patch)
replace_string_in_line('VERSION = "%s"' % old_mmp,
'VERSION = "%s"' % new_mmp, TENSORFLOW_BZL)
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
def check_for_lingering_string(lingering_string):
"""Check for given lingering strings."""
formatted_string = lingering_string.replace(".", r"\.")
try:
linger_str_output = subprocess.check_output(
["grep", "-rnoH", formatted_string, TF_SRC_DIR])
linger_strs = linger_str_output.decode("utf8").split("\n")
except subprocess.CalledProcessError:
linger_strs = []
if linger_strs:
print("WARNING: Below are potentially instances of lingering old version "
"string \"%s\" in source directory \"%s/\" that are not "
"updated by this script. Please check them manually!"
% (lingering_string, TF_SRC_DIR))
for linger_str in linger_strs:
print(linger_str)
else:
print("No lingering old version strings \"%s\" found in source directory"
" \"%s/\". Good." % (lingering_string, TF_SRC_DIR))
def check_for_old_version(old_version, new_version):
"""Check for old version references."""
for old_ver in [old_version.string, old_version.pep_440_str]:
check_for_lingering_string(old_ver)
if major_minor_change(old_version, new_version):
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
check_for_lingering_string(old_r_major_minor)
def main():
"""This script updates all instances of version in the tensorflow directory.
Requirements:
version: The version tag
OR
nightly: Create a nightly tag with current date
Raises:
RuntimeError: If the script is not being run from tf source dir
"""
parser = argparse.ArgumentParser(description="Cherry picking automation.")
# Arg information
parser.add_argument("--version",
help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>",
default="")
parser.add_argument("--nightly",
help="disable the service provisioning step",
action="store_true")
args = parser.parse_args()
check_all_files()
old_version = get_current_semver_version()
if args.nightly:
if args.version:
new_version = Version.parse_from_string(args.version, NIGHTLY_VERSION)
new_version.set_identifier_string("-dev" + time.strftime("%Y%m%d"))
else:
new_version = Version(old_version.major,
str(old_version.minor),
old_version.patch,
"-dev" + time.strftime("%Y%m%d"),
NIGHTLY_VERSION)
else:
new_version = Version.parse_from_string(args.version, REGULAR_VERSION)
update_version_h(old_version, new_version)
update_setup_dot_py(old_version, new_version)
update_readme(old_version, new_version)
update_tensorflow_bzl(old_version, new_version)
# Print transition details.
print("Major: %s -> %s" % (old_version.major, new_version.major))
print("Minor: %s -> %s" % (old_version.minor, new_version.minor))
print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch))
check_for_old_version(old_version, new_version)
if __name__ == "__main__":
main()
|
|
from winnow.utils import deep_copy_dict as deepcopy
import jsonschema
from decimal import Decimal
import os
from winnow.options import OptionsSet
from winnow import inline
from winnow import utils
from winnow import validation
from winnow.exceptions import OptionsExceptionFailedValidation, OptionsExceptionReferenceError
from winnow.constants import *
def add_doc(target, doc, validation=True):
_set_doc(target, deepcopy(doc), validation=validation)
def allows(source_a, source_b):
set_a = OptionsSet(inline.get_inlined_options(source_a))
set_b = OptionsSet(inline.get_inlined_options(source_b))
return set_a.allows(set_b)
def is_allowed_by(source_a, source_b):
return allows(source_b, source_a)
def disallowed_keys(source_a, source_b):
set_a = OptionsSet(inline.get_inlined_options(source_a))
set_b = OptionsSet(inline.get_inlined_options(source_b))
return set_b.disallowed_keys(set_a)
def filter_allows(filter_source, possible):
return [p for p in possible if allows(filter_source, p)]
def filter_allowed_by(filter_source, possible):
return [p for p in possible if is_allowed_by(filter_source, p)]
def merge(source_a, source_b, target, doc, validation=True):
doc_a = source_a.get_doc()
doc_b = source_b.get_doc()
# get the options from bothe sources
options_a = deepcopy(source_a.get_options_dict())
options_b = deepcopy(source_b.get_options_dict())
errors = []
# carry any errors through
errors_a = doc_a.get("errors")
if errors_a:
errors += errors_a
errors_b = doc_b.get("errors")
if errors_b:
errors += errors_b
merged_options = inline._merge_option_dicts(source_a, options_a, options_b, doc_a, doc_b, errors=errors)
# put this merged options into a copy of the doc
new_doc = deepcopy(doc)
new_doc[OPTIONS_KEY] = merged_options
if errors:
new_doc["errors"] = errors
target.clone_history_from(source_a)
_add_start_if_needed(source_a, target)
_set_doc(target, new_doc, validation=validation)
target.add_history_action(action=HISTORY_ACTION_MERGE,
input=source_b,
output_type=doc.get("type"))
def scope(source, scopes, target, doc):
new_doc = deepcopy(doc)
_trim_out_off_scope(new_doc[OPTIONS_KEY], set(scopes))
target.clone_history_from(source)
_add_start_if_needed(source, target)
_set_doc(target, new_doc)
def default_choices(source, scopes):
#take a copy of the options
doc = source.get_doc()
options_dict = deepcopy(source.get_options_dict())
#expand it
ref_hashes = {}
inline.inline_refs(options_dict, doc, source, ref_hashes)
#scope it
_trim_out_off_scope(options_dict, set(scopes))
# wrap it in an options set
options_set = OptionsSet(options_dict)
#get default options set
default = options_set.default()
return default.store
def quantify(source, target, doc, validation=True):
default_quantity = {
u"type": u"numeric::range",
u"name": u"Quantity",
u"default": Decimal("1"),
u"max": Decimal("100"),
u"min": Decimal("1"),
}
options_dict = source.get_options_dict()
options_dict.setdefault(u'quantity', default_quantity)
new_doc = deepcopy(doc)
new_doc[OPTIONS_KEY] = options_dict
target.clone_history_from(source)
_add_start_if_needed(source, target)
_set_doc(target, new_doc, validation=validation)
target.add_history_action(action=HISTORY_ACTION_QUANTIFY,
output_type=doc.get("type"))
def _trim_out_off_scope(node, scopes):
if isinstance(node, dict):
for key in node.keys():
child = node[key]
if isinstance(child, dict):
if "scopes" in child.keys():
child_scopes = set(child["scopes"])
if scopes.isdisjoint(child_scopes):
del node[key]
else:
_trim_out_off_scope(child, scopes)
if isinstance(child, list):
_trim_out_off_scope(child, scopes)
# recursively trim inside lists
if isinstance(node, list):
for i, child in enumerate(node[:]):
if isinstance(child, dict):
if "scopes" in child.keys():
child_scopes = set(child["scopes"])
if scopes.isdisjoint(child_scopes):
node.remove(child)
else:
_trim_out_off_scope(child, scopes)
if isinstance(child, list):
_trim_out_off_scope(child, scopes)
def expand(source, target, validation=True):
new_doc = deepcopy(source.get_doc())
target.clone_history_from(source)
## inline references
ref_hashes = {}
inline.inline_refs(new_doc, new_doc, source, ref_hashes)
_set_doc(target, new_doc, validation=validation)
return ref_hashes
def _patch_upstream(source, target, options_set):
doc = source.get_doc()
if u"upstream" in doc.keys():
upstream_delegate = source.get_upstream()
if upstream_delegate is None:
raise OptionsExceptionReferenceError("Winnow Reference Error: Cannot find upstream reference %s" % doc[u"upstream"])
else:
return options_set
upstream_options = OptionsSet(upstream_delegate.get_options_dict())
patched_options_set = options_set.patch(upstream_options)
if target is not None:
_add_start_if_needed(source, target)
target.add_history_action(action=HISTORY_ACTION_PATCH,
input=upstream_delegate,
output_type=doc.get("type"))
return _patch_upstream(upstream_delegate, target, patched_options_set)
def _add_start_if_needed(source, target):
if target.history_is_empty():
doc = source.get_doc()
target.add_history_action(action=HISTORY_ACTION_START,
input=source,
output_type=doc.get("type"))
def asset_props(doc, dl_base=None):
path = doc.get("path")
if path is None:
return []
relpaths = []
_walk_dict_for_assets(doc, relpaths)
return [asset_from_relpath(doc, rp, dl_base=dl_base) for rp in relpaths]
def asset_from_relpath(doc, relpath, dl_base=None):
if '://' in relpath:
path = relpath
else:
path = os.path.normpath("%s/%s" % (doc['path'], relpath))
return {
"path": path,
"source": doc['source'],
"base": doc.get("base", dl_base),
"relpath": relpath,
}
def _walk_dict_for_assets(node, found):
if isinstance(node, dict):
if u"asset" in node.keys():
found.append(node[u"asset"])
else:
for k, v in node.iteritems():
_walk_dict_for_assets(v, found)
if isinstance(node, list):
for v in node:
_walk_dict_for_assets(v, found)
def validate(doc):
try:
validation.validate(doc)
except jsonschema.ValidationError, e:
raise OptionsExceptionFailedValidation(e)
def _set_doc(target, doc, validation=True):
if validation:
validate(doc)
target.set_doc(doc)
target.set_doc_hash(utils.get_doc_hash(utils.json_dumps(doc)))
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Interactive shell based on Django:
#
# Copyright (c) 2005, the Lawrence Journal-World
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CLI interface for nova management.
"""
from __future__ import print_function
import os
import sys
import netaddr
from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova import availability_zones
from nova.cells import rpc_driver
from nova.compute import flavors
from nova import config
from nova import context
from nova import db
from nova.db import migration
from nova import exception
from nova.openstack.common import cliutils
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova import quota
from nova import servicegroup
from nova import version
CONF = cfg.CONF
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('flat_network_bridge', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('multi_host', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('vlan_start', 'nova.network.manager')
CONF.import_opt('vpn_start', 'nova.network.manager')
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('public_interface', 'nova.network.linux_net')
QUOTAS = quota.QUOTAS
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def param2id(object_id):
"""Helper function to convert various volume id types to internal id.
args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10'
"""
if '-' in object_id:
return ec2utils.ec2_vol_id_to_uuid(object_id)
else:
return object_id
class VpnCommands(object):
"""Class for managing VPNs."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--ip', metavar='<IP Address>', help='IP Address')
@args('--port', metavar='<Port>', help='Port')
def change(self, project_id, ip, port):
"""Change the ip and port for a vpn.
this will update all networks associated with a project
not sure if that's the desired behavior or not, patches accepted
"""
# TODO(tr3buchet): perhaps this shouldn't update all networks
# associated with a project in the future
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_update(admin_context,
network['id'],
{'vpn_public_address': ip,
'vpn_public_port': int(port)})
class ShellCommands(object):
def bpython(self):
"""Runs a bpython shell.
Falls back to Ipython/python shell if unavailable
"""
self.run('bpython')
def ipython(self):
"""Runs an Ipython shell.
Falls back to Python shell if unavailable
"""
self.run('ipython')
def python(self):
"""Runs a python shell.
Falls back to Python shell if unavailable
"""
self.run('python')
@args('--shell', metavar='<bpython|ipython|python >',
help='Python shell')
def run(self, shell=None):
"""Runs a Python interactive interpreter."""
if not shell:
shell = 'bpython'
if shell == 'bpython':
try:
import bpython
bpython.embed()
except ImportError:
shell = 'ipython'
if shell == 'ipython':
try:
import IPython
# Explicitly pass an empty list as arguments, because
# otherwise IPython would use sys.argv from this script.
shell = IPython.Shell.IPShell(argv=[])
shell.mainloop()
except ImportError:
shell = 'python'
if shell == 'python':
import code
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try',
# because we already know 'readline' was imported successfully.
readline.parse_and_bind("tab:complete")
code.interact()
@args('--path', metavar='<path>', help='Script path')
def script(self, path):
"""Runs the script from the specified path with flags set properly.
arguments: path
"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
def _db_error(caught_exception):
print(caught_exception)
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'nova-manage db sync' before running this command."))
exit(1)
class ProjectCommands(object):
"""Class for managing projects."""
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
@args('--user', dest='user_id', metavar='<User name>',
help='User name')
@args('--key', metavar='<key>', help='Key')
@args('--value', metavar='<value>', help='Value')
def quota(self, project_id, user_id=None, key=None, value=None):
"""
Create, update or display quotas for project/user
If no quota key is provided, the quota will be displayed.
If a valid quota key is provided and it does not exist,
it will be created. Otherwise, it will be updated.
"""
ctxt = context.get_admin_context()
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
user_id = None
quota = QUOTAS.get_project_quotas(ctxt, project_id)
# if key is None, that means we need to show the quotas instead
# of updating them
if key:
settable_quotas = QUOTAS.get_settable_quotas(ctxt,
project_id,
user_id=user_id)
if key in quota:
minimum = settable_quotas[key]['minimum']
maximum = settable_quotas[key]['maximum']
if value.lower() == 'unlimited':
value = -1
if int(value) < -1:
print(_('Quota limit must be -1 or greater.'))
return(2)
if ((int(value) < minimum) and
(maximum != -1 or (maximum == -1 and int(value) != -1))):
print(_('Quota limit must greater than %s.') % minimum)
return(2)
if maximum != -1 and int(value) > maximum:
print(_('Quota limit must less than %s.') % maximum)
return(2)
try:
db.quota_create(ctxt, project_id, key, value,
user_id=user_id)
except exception.QuotaExists:
db.quota_update(ctxt, project_id, key, value,
user_id=user_id)
else:
print(_('%(key)s is not a valid quota key. Valid options are: '
'%(options)s.') % {'key': key,
'options': ', '.join(quota)})
return(2)
print_format = "%-36s %-10s %-10s %-10s"
print(print_format % (
_('Quota'),
_('Limit'),
_('In Use'),
_('Reserved')))
# Retrieve the quota after update
if user_id:
quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id)
else:
quota = QUOTAS.get_project_quotas(ctxt, project_id)
for key, value in quota.iteritems():
if value['limit'] < 0 or value['limit'] is None:
value['limit'] = 'unlimited'
print(print_format % (key, value['limit'], value['in_use'],
value['reserved']))
@args('--project', dest='project_id', metavar='<Project name>',
help='Project name')
def scrub(self, project_id):
"""Deletes data associated with project."""
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
db.network_disassociate(admin_context, network['id'])
groups = db.security_group_get_by_project(admin_context, project_id)
for group in groups:
db.security_group_destroy(admin_context, group['id'])
AccountCommands = ProjectCommands
class FixedIpCommands(object):
"""Class for managing fixed ip."""
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all fixed ips (optionally by host)."""
ctxt = context.get_admin_context()
try:
if host is None:
fixed_ips = db.fixed_ip_get_all(ctxt)
else:
fixed_ips = db.fixed_ip_get_by_host(ctxt, host)
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
instances = db.instance_get_all(context.get_admin_context())
instances_by_uuid = {}
for instance in instances:
instances_by_uuid[instance['uuid']] = instance
print("%-18s\t%-15s\t%-15s\t%s" % (_('network'),
_('IP address'),
_('hostname'),
_('host')))
all_networks = {}
try:
# use network_get_all to retrieve all existing networks
# this is to ensure that IPs associated with deleted networks
# will not throw exceptions.
for network in db.network_get_all(context.get_admin_context()):
all_networks[network.id] = network
except exception.NoNetworksFound:
# do not have any networks, so even if there are IPs, these
# IPs should have been deleted ones, so return.
print(_('No fixed IP found.'))
return
has_ip = False
for fixed_ip in fixed_ips:
hostname = None
host = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
if fixed_ip.get('instance_uuid'):
instance = instances_by_uuid.get(fixed_ip['instance_uuid'])
if instance:
hostname = instance['hostname']
host = instance['host']
else:
print(_('WARNING: fixed ip %s allocated to missing'
' instance') % str(fixed_ip['address']))
print("%-18s\t%-15s\t%-15s\t%s" % (
network['cidr'],
fixed_ip['address'],
hostname, host))
if not has_ip:
print(_('No fixed IP found.'))
@args('--address', metavar='<ip address>', help='IP address')
def reserve(self, address):
"""Mark fixed ip as reserved
arguments: address
"""
return self._set_reserved(address, True)
@args('--address', metavar='<ip address>', help='IP address')
def unreserve(self, address):
"""Mark fixed ip as free to use
arguments: address
"""
return self._set_reserved(address, False)
def _set_reserved(self, address, reserved):
ctxt = context.get_admin_context()
try:
fixed_ip = db.fixed_ip_get_by_address(ctxt, address)
if fixed_ip is None:
raise exception.NotFound('Could not find address')
db.fixed_ip_update(ctxt, fixed_ip['address'],
{'reserved': reserved})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
class FloatingIpCommands(object):
"""Class for managing floating ip."""
@staticmethod
def address_to_hosts(addresses):
"""
Iterate over hosts within an address range.
If an explicit range specifier is missing, the parameter is
interpreted as a specific individual address.
"""
try:
return [netaddr.IPAddress(addresses)]
except ValueError:
net = netaddr.IPNetwork(addresses)
if net.size < 4:
reason = _("/%s should be specified as single address(es) "
"not in cidr format") % net.prefixlen
raise exception.InvalidInput(reason=reason)
elif net.size >= 1000000:
# NOTE(dripton): If we generate a million IPs and put them in
# the database, the system will slow to a crawl and/or run
# out of memory and crash. This is clearly a misconfiguration.
reason = _("Too many IP addresses will be generated. Please "
"increase /%s to reduce the number generated."
) % net.prefixlen
raise exception.InvalidInput(reason=reason)
else:
return net.iter_hosts()
@args('--ip_range', metavar='<range>', help='IP range')
@args('--pool', metavar='<pool>', help='Optional pool')
@args('--interface', metavar='<interface>', help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
"""Creates floating ips for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
if not interface:
interface = CONF.public_interface
ips = ({'address': str(address), 'pool': pool, 'interface': interface}
for address in self.address_to_hosts(ip_range))
try:
db.floating_ip_bulk_create(admin_context, ips)
except exception.FloatingIpExists as exc:
# NOTE(simplylizz): Maybe logging would be better here
# instead of printing, but logging isn't used here and I
# don't know why.
print('error: %s' % exc)
return(1)
@args('--ip_range', metavar='<range>', help='IP range')
def delete(self, ip_range):
"""Deletes floating ips by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
for address in self.address_to_hosts(ip_range))
db.floating_ip_bulk_destroy(admin_context, ips)
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Lists all floating ips (optionally by host).
Note: if host is given, only active floating IPs are returned
"""
ctxt = context.get_admin_context()
try:
if host is None:
floating_ips = db.floating_ip_get_all(ctxt)
else:
floating_ips = db.floating_ip_get_all_by_host(ctxt, host)
except exception.NoFloatingIpsDefined:
print(_("No floating IP addresses have been defined."))
return
for floating_ip in floating_ips:
instance_uuid = None
if floating_ip['fixed_ip_id']:
fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id'])
instance_uuid = fixed_ip['instance_uuid']
print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'],
floating_ip['address'],
instance_uuid,
floating_ip['pool'],
floating_ip['interface']))
class NetworkCommands(object):
"""Class for managing networks."""
@args('--label', metavar='<label>', help='Label for network (ex: public)')
@args('--fixed_range_v4', dest='cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet (ex: 10.0.0.0/8)')
@args('--num_networks', metavar='<number>',
help='Number of networks to create')
@args('--network_size', metavar='<number>',
help='Number of IPs per network')
@args('--vlan', dest='vlan_start', metavar='<vlan id>', help='vlan id')
@args('--vpn', dest='vpn_start', help='vpn start')
@args('--fixed_range_v6', dest='cidr_v6',
help='IPv6 subnet (ex: fe80::/64')
@args('--gateway', help='gateway')
@args('--gateway_v6', help='ipv6 gateway')
@args('--bridge', metavar='<bridge>',
help='VIFs on this network are connected to this bridge')
@args('--bridge_interface', metavar='<bridge interface>',
help='the bridge is connected to this interface')
@args('--multi_host', metavar="<'T'|'F'>",
help='Multi host')
@args('--dns1', metavar="<DNS Address>", help='First DNS')
@args('--dns2', metavar="<DNS Address>", help='Second DNS')
@args('--uuid', metavar="<network uuid>", help='Network UUID')
@args('--fixed_cidr', metavar='<x.x.x.x/yy>',
help='IPv4 subnet for fixed IPS (ex: 10.20.0.0/16)')
@args('--project_id', metavar="<project id>",
help='Project id')
@args('--priority', metavar="<number>", help='Network interface priority')
def create(self, label=None, cidr=None, num_networks=None,
network_size=None, multi_host=None, vlan_start=None,
vpn_start=None, cidr_v6=None, gateway=None,
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
"""Creates fixed ips for host by range."""
kwargs = dict(((k, v) for k, v in locals().iteritems()
if v and k != "self"))
if multi_host is not None:
kwargs['multi_host'] = multi_host == 'T'
net_manager = importutils.import_object(CONF.network_manager)
net_manager.create_networks(context.get_admin_context(), **kwargs)
def list(self):
"""List all created networks."""
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print(_fmt % (_('id'),
_('IPv4'),
_('IPv6'),
_('start address'),
_('DNS1'),
_('DNS2'),
_('VlanID'),
_('project'),
_("uuid")))
try:
# Since network_get_all can throw exception.NoNetworksFound
# for this command to show a nice result, this exception
# should be caught and handled as such.
networks = db.network_get_all(context.get_admin_context())
except exception.NoNetworksFound:
print(_('No networks found'))
else:
for network in networks:
print(_fmt % (network.id,
network.cidr,
network.cidr_v6,
network.dhcp_start,
network.dns1,
network.dns2,
network.vlan,
network.project_id,
network.uuid))
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to delete')
@args('--uuid', metavar='<uuid>', help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
"""Deletes a network."""
if fixed_range is None and uuid is None:
raise Exception(_("Please specify either fixed_range or uuid"))
net_manager = importutils.import_object(CONF.network_manager)
if "NeutronManager" in CONF.network_manager:
if uuid is None:
raise Exception(_("UUID is required to delete "
"Neutron Networks"))
if fixed_range:
raise Exception(_("Deleting by fixed_range is not supported "
"with the NeutronManager"))
# delete the network
net_manager.delete_network(context.get_admin_context(),
fixed_range, uuid)
@args('--fixed_range', metavar='<x.x.x.x/yy>', help='Network to modify')
@args('--project', metavar='<project name>',
help='Project name to associate')
@args('--host', metavar='<host>', help='Host to associate')
@args('--disassociate-project', action="store_true", dest='dis_project',
default=False, help='Disassociate Network from Project')
@args('--disassociate-host', action="store_true", dest='dis_host',
default=False, help='Disassociate Host from Project')
def modify(self, fixed_range, project=None, host=None,
dis_project=None, dis_host=None):
"""Associate/Disassociate Network with Project and/or Host
arguments: network project host
leave any field blank to ignore it
"""
admin_context = context.get_admin_context()
network = db.network_get_by_cidr(admin_context, fixed_range)
net = {}
#User can choose the following actions each for project and host.
#1) Associate (set not None value given by project/host parameter)
#2) Disassociate (set None by disassociate parameter)
#3) Keep unchanged (project/host key is not added to 'net')
if dis_project:
net['project_id'] = None
if dis_host:
net['host'] = None
# The --disassociate-X are boolean options, but if they user
# mistakenly provides a value, it will be used as a positional argument
# and be erroneously interepreted as some other parameter (e.g.
# a project instead of host value). The safest thing to do is error-out
# with a message indicating that there is probably a problem with
# how the disassociate modifications are being used.
if dis_project or dis_host:
if project or host:
error_msg = "ERROR: Unexpected arguments provided. Please " \
"use separate commands."
print(error_msg)
return(1)
db.network_update(admin_context, network['id'], net)
return
if project:
net['project_id'] = project
if host:
net['host'] = host
db.network_update(admin_context, network['id'], net)
class VmCommands(object):
"""Class for mangaging VM instances."""
@args('--host', metavar='<host>', help='Host')
def list(self, host=None):
"""Show a list of all instances."""
print(("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5s" % (_('instance'),
_('node'),
_('type'),
_('state'),
_('launched'),
_('image'),
_('kernel'),
_('ramdisk'),
_('project'),
_('user'),
_('zone'),
_('index'))))
if host is None:
instances = db.instance_get_all(context.get_admin_context())
else:
instances = db.instance_get_all_by_host(
context.get_admin_context(), host)
for instance in instances:
instance_type = flavors.extract_flavor(instance)
print(("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5d" % (instance['display_name'],
instance['host'],
instance_type['name'],
instance['vm_state'],
instance['launched_at'],
instance['image_ref'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
instance['user_id'],
instance['availability_zone'],
instance['launch_index'])))
class ServiceCommands(object):
"""Enable and disable running services."""
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def list(self, host=None, service=None):
"""
Show a list of all running services. Filter by host & service name.
"""
servicegroup_api = servicegroup.API()
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
print_format = "%-16s %-36s %-16s %-10s %-5s %-10s"
print(print_format % (
_('Binary'),
_('Host'),
_('Zone'),
_('Status'),
_('State'),
_('Updated_At')))
for svc in services:
alive = servicegroup_api.service_is_up(svc)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print(print_format % (svc['binary'], svc['host'],
svc['availability_zone'], active, art,
svc['updated_at']))
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def enable(self, host, service):
"""Enable scheduling for a service."""
ctxt = context.get_admin_context()
try:
svc = db.service_get_by_args(ctxt, host, service)
db.service_update(ctxt, svc['id'], {'disabled': False})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
print((_("Service %(service)s on host %(host)s enabled.") %
{'service': service, 'host': host}))
@args('--host', metavar='<host>', help='Host')
@args('--service', metavar='<service>', help='Nova service')
def disable(self, host, service):
"""Disable scheduling for a service."""
ctxt = context.get_admin_context()
try:
svc = db.service_get_by_args(ctxt, host, service)
db.service_update(ctxt, svc['id'], {'disabled': True})
except exception.NotFound as ex:
print(_("error: %s") % ex)
return(2)
print((_("Service %(service)s on host %(host)s disabled.") %
{'service': service, 'host': host}))
def _show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns:
example format is below::
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
'vcpus_used': 12, 'memory_mb_used': 10240,
'local_gb_used': 64}
"""
# Getting compute node info and related instances info
service_ref = db.service_get_by_compute_host(context, host)
instance_refs = db.instance_get_all_by_host(context,
service_ref['host'])
# Getting total available/used resource
compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
'vcpus_used': compute_ref['vcpus_used'],
'memory_mb_used': compute_ref['memory_mb_used'],
'local_gb_used': compute_ref['local_gb_used']}
usage = dict()
if not instance_refs:
return {'resource': resource, 'usage': usage}
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
root = [i['root_gb'] for i in instance_refs
if i['project_id'] == project_id]
ephemeral = [i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
usage[project_id] = {'vcpus': sum(vcpus),
'memory_mb': sum(mem),
'root_gb': sum(root),
'ephemeral_gb': sum(ephemeral)}
return {'resource': resource, 'usage': usage}
@args('--host', metavar='<host>', help='Host')
def describe_resource(self, host):
"""Describes cpu/memory/hdd info for host.
:param host: hostname.
"""
try:
result = self._show_host_resources(context.get_admin_context(),
host=host)
except exception.NovaException as ex:
print (_("error: %s") % ex)
return 2
if not isinstance(result, dict):
print(_('An unexpected error has occurred.'))
print(_('[Result]'), result)
else:
# Printing a total and used_now
# (NOTE)The host name width 16 characters
print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {"a": _('HOST'),
"b": _('PROJECT'),
"c": _('cpu'),
"d": _('mem(mb)'),
"e": _('hdd')})
print(('%(a)-16s(total)%(b)26s%(c)8s%(d)8s' %
{"a": host,
"b": result['resource']['vcpus'],
"c": result['resource']['memory_mb'],
"d": result['resource']['local_gb']}))
print(('%(a)-16s(used_now)%(b)23s%(c)8s%(d)8s' %
{"a": host,
"b": result['resource']['vcpus_used'],
"c": result['resource']['memory_mb_used'],
"d": result['resource']['local_gb_used']}))
# Printing a used_max
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for p_id, val in result['usage'].items():
cpu_sum += val['vcpus']
mem_sum += val['memory_mb']
hdd_sum += val['root_gb']
hdd_sum += val['ephemeral_gb']
print('%(a)-16s(used_max)%(b)23s%(c)8s%(d)8s' % {"a": host,
"b": cpu_sum,
"c": mem_sum,
"d": hdd_sum})
for p_id, val in result['usage'].items():
print('%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {
"a": host,
"b": p_id,
"c": val['vcpus'],
"d": val['memory_mb'],
"e": val['root_gb'] + val['ephemeral_gb']})
class HostCommands(object):
"""List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
args: [zone]
"""
print("%-25s\t%-15s" % (_('host'),
_('zone')))
ctxt = context.get_admin_context()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
services = [s for s in services if s['availability_zone'] == zone]
hosts = []
for srv in services:
if not [h for h in hosts if h['host'] == srv['host']]:
hosts.append(srv)
for h in hosts:
print("%-25s\t%-15s" % (h['host'], h['availability_zone']))
class DbCommands(object):
"""Class for managing the database."""
def __init__(self):
pass
@args('--version', metavar='<version>', help='Database version')
def sync(self, version=None):
"""Sync the database up to the most recent version."""
return migration.db_sync(version)
def version(self):
"""Print the current database version."""
print(migration.db_version())
@args('--max_rows', metavar='<number>',
help='Maximum number of deleted rows to archive')
def archive_deleted_rows(self, max_rows):
"""Move up to max_rows deleted rows from production tables to shadow
tables.
"""
if max_rows is not None:
max_rows = int(max_rows)
if max_rows < 0:
print(_("Must supply a positive value for max_rows"))
return(1)
admin_context = context.get_admin_context()
db.archive_deleted_rows(admin_context, max_rows)
class FlavorCommands(object):
"""Class for managing flavors.
Note instance type is a deprecated synonym for flavor.
"""
def _print_flavors(self, val):
is_public = ('private', 'public')[val["is_public"] == 1]
print(("%s: Memory: %sMB, VCPUS: %s, Root: %sGB, Ephemeral: %sGb, "
"FlavorID: %s, Swap: %sMB, RXTX Factor: %s, %s, ExtraSpecs %s") % (
val["name"], val["memory_mb"], val["vcpus"], val["root_gb"],
val["ephemeral_gb"], val["flavorid"], val["swap"],
val["rxtx_factor"], is_public, val["extra_specs"]))
@args('--name', metavar='<name>',
help='Name of flavor')
@args('--memory', metavar='<memory size>', help='Memory size')
@args('--cpu', dest='vcpus', metavar='<num cores>', help='Number cpus')
@args('--root_gb', metavar='<root_gb>', help='Root disk size')
@args('--ephemeral_gb', metavar='<ephemeral_gb>',
help='Ephemeral disk size')
@args('--flavor', dest='flavorid', metavar='<flavor id>',
help='Flavor ID')
@args('--swap', metavar='<swap>', help='Swap')
@args('--rxtx_factor', metavar='<rxtx_factor>', help='rxtx_factor')
@args('--is_public', metavar='<is_public>',
help='Make flavor accessible to the public')
def create(self, name, memory, vcpus, root_gb, ephemeral_gb=0,
flavorid=None, swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
try:
flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb, flavorid=flavorid,
swap=swap, rxtx_factor=rxtx_factor,
is_public=is_public)
except exception.InvalidInput as e:
print(_("Must supply valid parameters to create flavor"))
print(e)
return 1
except exception.InstanceTypeExists:
print(_("Flavor exists."))
print(_("Please ensure flavor name and flavorid are "
"unique."))
print(_("Currently defined flavor names and flavorids:"))
print()
self.list()
return 2
except Exception:
print(_("Unknown error"))
return 3
else:
print(_("%s created") % name)
@args('--name', metavar='<name>', help='Name of flavor')
def delete(self, name):
"""Marks flavors as deleted."""
try:
flavors.destroy(name)
except exception.InstanceTypeNotFound:
print(_("Valid flavor name is required"))
return 1
except db_exc.DBError as e:
print(_("DB Error: %s") % e)
return(2)
except Exception:
return(3)
else:
print(_("%s deleted") % name)
@args('--name', metavar='<name>', help='Name of flavor')
def list(self, name=None):
"""Lists all active or specific flavors."""
try:
if name is None:
inst_types = flavors.get_all_flavors()
else:
inst_types = flavors.get_flavor_by_name(name)
except db_exc.DBError as e:
_db_error(e)
if isinstance(inst_types.values()[0], dict):
for k, v in inst_types.iteritems():
self._print_flavors(v)
else:
self._print_flavors(inst_types)
@args('--name', metavar='<name>', help='Name of flavor')
@args('--key', metavar='<key>', help='The key of the key/value pair')
@args('--value', metavar='<value>', help='The value of the key/value pair')
def set_key(self, name, key, value=None):
"""Add key/value pair to specified flavor's extra_specs."""
try:
try:
inst_type = flavors.get_flavor_by_name(name)
except exception.InstanceTypeNotFoundByName as e:
print(e)
return(2)
ctxt = context.get_admin_context()
ext_spec = {key: value}
db.flavor_extra_specs_update_or_create(
ctxt,
inst_type["flavorid"],
ext_spec)
print((_("Key %(key)s set to %(value)s on instance "
"type %(name)s") %
{'key': key, 'value': value, 'name': name}))
except db_exc.DBError as e:
_db_error(e)
@args('--name', metavar='<name>', help='Name of flavor')
@args('--key', metavar='<key>', help='The key to be deleted')
def unset_key(self, name, key):
"""Delete the specified extra spec for flavor."""
try:
try:
inst_type = flavors.get_flavor_by_name(name)
except exception.InstanceTypeNotFoundByName as e:
print(e)
return(2)
ctxt = context.get_admin_context()
db.flavor_extra_specs_delete(
ctxt,
inst_type["flavorid"],
key)
print((_("Key %(key)s on flavor %(name)s unset") %
{'key': key, 'name': name}))
except db_exc.DBError as e:
_db_error(e)
class AgentBuildCommands(object):
"""Class for managing agent builds."""
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def create(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Creates a new agent build."""
ctxt = context.get_admin_context()
db.agent_build_create(ctxt, {'hypervisor': hypervisor,
'os': os,
'architecture': architecture,
'version': version,
'url': url,
'md5hash': md5hash})
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def delete(self, os, architecture, hypervisor='xen'):
"""Deletes an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_destroy(ctxt, agent_build_ref['id'])
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: None)')
def list(self, hypervisor=None):
"""Lists all agent builds.
arguments: <none>
"""
fmt = "%-10s %-8s %12s %s"
ctxt = context.get_admin_context()
by_hypervisor = {}
for agent_build in db.agent_build_get_all(ctxt):
buildlist = by_hypervisor.get(agent_build.hypervisor)
if not buildlist:
buildlist = by_hypervisor[agent_build.hypervisor] = []
buildlist.append(agent_build)
for key, buildlist in by_hypervisor.iteritems():
if hypervisor and key != hypervisor:
continue
print(_('Hypervisor: %s') % key)
print(fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32))
for agent_build in buildlist:
print(fmt % (agent_build.os, agent_build.architecture,
agent_build.version, agent_build.md5hash))
print(' %s' % agent_build.url)
print()
@args('--os', metavar='<os>', help='os')
@args('--architecture', dest='architecture',
metavar='<architecture>', help='architecture')
@args('--version', metavar='<version>', help='version')
@args('--url', metavar='<url>', help='url')
@args('--md5hash', metavar='<md5hash>', help='md5hash')
@args('--hypervisor', metavar='<hypervisor>',
help='hypervisor(default: xen)')
def modify(self, os, architecture, version, url, md5hash,
hypervisor='xen'):
"""Update an existing agent build."""
ctxt = context.get_admin_context()
agent_build_ref = db.agent_build_get_by_triple(ctxt,
hypervisor, os, architecture)
db.agent_build_update(ctxt, agent_build_ref['id'],
{'version': version,
'url': url,
'md5hash': md5hash})
class GetLogCommands(object):
"""Get logging information."""
def errors(self):
"""Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
for file in logs:
log_file = os.path.join(CONF.log_dir, file)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print_name = 0
for index, line in enumerate(lines):
if line.find(" ERROR ") > 0:
error_found += 1
if print_name == 0:
print(log_file + ":-")
print_name = 1
linenum = len(lines) - index
print((_('Line %(linenum)d : %(line)s') %
{'linenum': linenum, 'line': line}))
if error_found == 0:
print(_('No errors in logfiles!'))
@args('--num_entries', metavar='<number of entries>',
help='number of entries(default: 10)')
def syslog(self, num_entries=10):
"""Get <num_entries> of the nova syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
if os.path.exists('/var/log/syslog'):
log_file = '/var/log/syslog'
elif os.path.exists('/var/log/messages'):
log_file = '/var/log/messages'
else:
print(_('Unable to find system log file!'))
return(1)
lines = [line.strip() for line in open(log_file, "r")]
lines.reverse()
print(_('Last %s nova syslog entries:-') % (entries))
for line in lines:
if line.find("nova") > 0:
count += 1
print("%s" % (line))
if count == entries:
break
if count == 0:
print(_('No nova entries in syslog!'))
class CellCommands(object):
"""Commands for managing cells."""
@args('--name', metavar='<name>', help='Name for the new cell')
@args('--cell_type', metavar='<parent|child>',
help='Whether the cell is a parent or child')
@args('--username', metavar='<username>',
help='Username for the message broker in this cell')
@args('--password', metavar='<password>',
help='Password for the message broker in this cell')
@args('--hostname', metavar='<hostname>',
help='Address of the message broker in this cell')
@args('--port', metavar='<number>',
help='Port number of the message broker in this cell')
@args('--virtual_host', metavar='<virtual_host>',
help='The virtual host of the message broker in this cell')
@args('--woffset', metavar='<float>')
@args('--wscale', metavar='<float>')
def create(self, name, cell_type='child', username=None, password=None,
hostname=None, port=None, virtual_host=None,
woffset=None, wscale=None):
if cell_type not in ['parent', 'child']:
print("Error: cell type must be 'parent' or 'child'")
return(2)
# Set up the transport URL
transport = {
'username': username,
'password': password,
'hostname': hostname,
'port': int(port),
'virtual_host': virtual_host,
}
transport_url = rpc_driver.unparse_transport_url(transport)
is_parent = cell_type == 'parent'
values = {'name': name,
'is_parent': is_parent,
'transport_url': transport_url,
'weight_offset': float(woffset),
'weight_scale': float(wscale)}
ctxt = context.get_admin_context()
db.cell_create(ctxt, values)
@args('--cell_name', metavar='<cell_name>',
help='Name of the cell to delete')
def delete(self, cell_name):
ctxt = context.get_admin_context()
db.cell_delete(ctxt, cell_name)
def list(self):
ctxt = context.get_admin_context()
cells = db.cell_get_all(ctxt)
fmt = "%3s %-10s %-6s %-10s %-15s %-5s %-10s"
print(fmt % ('Id', 'Name', 'Type', 'Username', 'Hostname',
'Port', 'VHost'))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
for cell in cells:
transport = rpc_driver.parse_transport_url(cell.transport_url)
print(fmt % (cell.id, cell.name,
'parent' if cell.is_parent else 'child',
transport['username'], transport['hostname'],
transport['port'], transport['virtual_host']))
print(fmt % ('-' * 3, '-' * 10, '-' * 6, '-' * 10, '-' * 15,
'-' * 5, '-' * 10))
CATEGORIES = {
'account': AccountCommands,
'agent': AgentBuildCommands,
'cell': CellCommands,
'db': DbCommands,
'fixed': FixedIpCommands,
'flavor': FlavorCommands,
'floating': FloatingIpCommands,
'host': HostCommands,
# Deprecated, remove in Icehouse
'instance_type': FlavorCommands,
'logs': GetLogCommands,
'network': NetworkCommands,
'project': ProjectCommands,
'service': ServiceCommands,
'shell': ShellCommands,
'vm': VmCommands,
'vpn': VpnCommands,
}
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers):
parser = subparsers.add_parser('version')
parser = subparsers.add_parser('bash-completion')
parser.add_argument('query_category', nargs='?')
for category in CATEGORIES:
command_object = CATEGORIES[category]()
parser = subparsers.add_parser(category)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# FIXME(markmc): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*')
category_opt = cfg.SubCommandOpt('category',
title='Command categories',
help='Available categories',
handler=add_command_parsers)
def main():
"""Parse options and call the appropriate class/method."""
CONF.register_cli_opt(category_opt)
try:
config.parse_args(sys.argv)
logging.setup("nova")
except cfg.ConfigFilesNotFoundError:
cfgfile = CONF.config_file[-1] if CONF.config_file else None
if cfgfile and not os.access(cfgfile, os.R_OK):
st = os.stat(cfgfile)
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
except Exception:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run nova-manage as root.'))
return(2)
if CONF.category.name == "version":
print(version.version_string_with_package())
return(0)
if CONF.category.name == "bash-completion":
if not CONF.category.query_category:
print(" ".join(CATEGORIES.keys()))
elif CONF.category.query_category in CATEGORIES:
fn = CATEGORIES[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print(" ".join([k for (k, v) in actions]))
return(0)
fn = CONF.category.action_fn
fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args]
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, basestring):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
try:
cliutils.validate_args(fn, *fn_args, **fn_kwargs)
except cliutils.MissingArgs as e:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
print(e)
return(1)
try:
ret = fn(*fn_args, **fn_kwargs)
rpc.cleanup()
return(ret)
except Exception:
print(_("Command failed, please check log for more info"))
raise
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"pipelineRunName": _SERIALIZER.url("pipeline_run_name", pipeline_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"pipelineRunName": _SERIALIZER.url("pipeline_run_name", pipeline_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"pipelineRunName": _SERIALIZER.url("pipeline_run_name", pipeline_run_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PipelineRunsOperations(object):
"""PipelineRunsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> "_models.PipelineRun":
"""Gets the detailed information for a given pipeline run.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PipelineRun, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRun
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
pipeline_run_create_parameters: "_models.PipelineRun",
**kwargs: Any
) -> "_models.PipelineRun":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(pipeline_run_create_parameters, 'PipelineRun')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PipelineRun', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
pipeline_run_create_parameters: "_models.PipelineRun",
**kwargs: Any
) -> LROPoller["_models.PipelineRun"]:
"""Creates a pipeline run for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:param pipeline_run_create_parameters: The parameters for creating a pipeline run.
:type pipeline_run_create_parameters:
~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRun
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PipelineRun or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRun]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRun"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
pipeline_run_create_parameters=pipeline_run_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PipelineRun', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
pipeline_run_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a pipeline run from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param pipeline_run_name: The name of the pipeline run.
:type pipeline_run_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
pipeline_run_name=pipeline_run_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns/{pipelineRunName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.PipelineRunListResult"]:
"""Lists all the pipeline runs for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PipelineRunListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2021_06_01_preview.models.PipelineRunListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PipelineRunListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PipelineRunListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/pipelineRuns'} # type: ignore
|
|
from nose.tools import * # flake8: noqa
from website.util import permissions
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import ProjectFactory
from tests.factories import AuthUserFactory
from tests.factories import PrivateLinkFactory
from website.models import Node
class ViewOnlyTestCase(ApiTestCase):
def setUp(self):
super(ViewOnlyTestCase, self).setUp()
self.creation_user = AuthUserFactory()
self.viewing_user = AuthUserFactory()
self.contributing_read_user = AuthUserFactory()
self.contributing_write_user = AuthUserFactory()
self.valid_contributors = [
self.creation_user._id,
self.contributing_read_user._id,
self.contributing_write_user._id,
]
self.private_node_one = ProjectFactory(is_public=False, creator=self.creation_user, title="Private One")
self.private_node_one.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.private_node_one.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
self.private_node_one_anonymous_link.nodes.append(self.private_node_one)
self.private_node_one_anonymous_link.save()
self.private_node_one_private_link = PrivateLinkFactory(anonymous=False)
self.private_node_one_private_link.nodes.append(self.private_node_one)
self.private_node_one_private_link.save()
self.private_node_one_url = '/{}nodes/{}/'.format(API_BASE, self.private_node_one._id)
self.private_node_two = ProjectFactory(is_public=False, creator=self.creation_user, title="Private Two")
self.private_node_two.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.private_node_two.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.private_node_two_url = '/{}nodes/{}/'.format(API_BASE, self.private_node_two._id)
self.public_node_one = ProjectFactory(is_public=True, creator=self.creation_user, title="Public One")
self.public_node_one.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.public_node_one.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
self.public_node_one_anonymous_link.nodes.append(self.public_node_one)
self.public_node_one_anonymous_link.save()
self.public_node_one_private_link = PrivateLinkFactory(anonymous=False)
self.public_node_one_private_link.nodes.append(self.public_node_one)
self.public_node_one_private_link.save()
self.public_node_one_url = '/{}nodes/{}/'.format(API_BASE, self.public_node_one._id)
self.public_node_two = ProjectFactory(is_public=True, creator=self.creation_user, title="Public Two")
self.public_node_two.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.public_node_two.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.public_node_two_url = '/{}nodes/{}/'.format(API_BASE, self.public_node_two._id)
def tearDown(self):
Node.remove()
class TestNodeDetailViewOnlyLinks(ViewOnlyTestCase):
def test_private_node_with_link_works_when_using_link(self):
res_normal = self.app.get(self.private_node_one_url, auth=self.contributing_read_user.auth)
assert_equal(res_normal.status_code, 200)
res_linked = self.app.get(self.private_node_one_url, {'view_only': self.private_node_one_private_link.key})
assert_equal(res_linked.status_code, 200)
assert_equal(res_linked.json, res_normal.json)
def test_private_node_with_link_unauthorized_when_not_using_link(self):
res = self.app.get(self.private_node_one_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_with_link_anonymous_does_not_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_equal(contributor['id'], '')
def test_private_node_with_link_non_anonymous_does_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
}, auth=self.creation_user.auth)
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_public_node_with_link_anonymous_does_not_expose_user_id(self):
res = self.app.get(self.public_node_one_url, {
'view_only': self.public_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_equal(contributor['id'], '')
def test_public_node_with_link_non_anonymous_does_expose_contributor_id(self):
res = self.app.get(self.public_node_one_url, {
'view_only': self.public_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_public_node_with_link_unused_does_expose_contributor_id(self):
res = self.app.get(self.public_node_one_url, {
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_view_only_link_does_not_grant_write_permission(self):
payload = {
'data': {
'attributes': {
'title': 'Cannot touch this' },
'id': self.private_node_one._id,
'type': 'nodes',
}
}
res = self.app.patch_json_api(self.private_node_one_url, payload, {
'view_only': self.private_node_one_private_link.key,
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_view_only_link_from_anther_project_does_not_grant_view_permission(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.public_node_one_private_link.key,
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_project_logs_with_anonymous_link_does_not_expose_user_id(self):
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': self.private_node_one_anonymous_link.key,
})
assert_equal(res.status_code, 200)
body = res.body
assert_not_in(self.contributing_write_user._id, body)
assert_not_in(self.contributing_read_user._id, body)
assert_not_in(self.creation_user._id, body)
def test_private_project_with_anonymous_link_does_not_expose_registrations_or_forks(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_anonymous_link.key,
})
assert_equal(res.status_code, 200)
relationships = res.json['data']['relationships']
if 'embeds' in res.json['data']:
embeds = res.json['data']['embeds']
else:
embeds = {}
assert_not_in('registrations', relationships)
assert_not_in('forks', relationships, 'Add forks view to blacklist in hide_view_when_anonymous().')
assert_not_in('registrations', embeds)
assert_not_in('forks', embeds, 'Add forks view to blacklist in hide_view_when_anonymous().')
def test_bad_view_only_link_does_not_modify_permissions(self):
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, expect_errors=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, auth=self.creation_user.auth)
assert_equal(res.status_code, 200)
class TestNodeListViewOnlyLinks(ViewOnlyTestCase):
def test_private_link_does_not_show_node_in_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_private_link.key,
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
node_ids = []
for node in nodes:
node_ids.append(node['id'])
assert_not_in(self.private_node_one._id, node_ids)
def test_anonymous_link_does_not_show_contributor_id_in_node_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert_equal(contributor['id'], '')
assert_not_equal(assertions, 0)
def test_non_anonymous_link_does_show_contributor_id_in_node_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert_in(contributor['id'], self.valid_contributors)
assert_not_equal(assertions, 0)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
# pylint: disable=unused-argument, redefined-builtin
"""Bitserial Conv2D operators"""
import tvm
from tvm import te
from .pad import pad
from .utils import get_pad_tuple
from .bitserial_util import bitpack
from ..utils import get_const_tuple
def bitserial_conv2d_nchw(
data,
kernel,
stride,
padding,
activation_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Bitserial Conv2D operator.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two or four ints
padding size, [pad_height, pad_width], [pad_top, pad_left, pad_down, pad_right]
activation_bits: int
number of bits used for activations/input elements
weight_bits: int
number of bits used for weight elements
out_dtype: str
return type of convolution
pack_dtype: str
bit packing type
unipolar: bool
if binarization style is in unipolar 1/0 format, instead of bipolar -1/+1 format
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert isinstance(stride, int) or len(stride) == 2
Input_q = bitpack(data, activation_bits, pack_axis=1, bit_axis=2, pack_type=pack_dtype)
if len(kernel.shape) == 4:
Filter_q = bitpack(kernel, weight_bits, pack_axis=1, bit_axis=4, pack_type=pack_dtype)
else:
Filter_q = kernel
batch, in_channel, activation_bits, in_height, in_width = Input_q.shape
num_filter, _, kernel_h, kernel_w, weight_bits = Filter_q.shape
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, 0, 0, TPAD, LPAD]
pad_after = [0, 0, 0, DPAD, RPAD]
PadInput_q = pad(Input_q, pad_before, pad_after, name="pad_temp")
# compute the output shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
out_channel = num_filter
out_height = (in_height - kernel_h + TPAD + DPAD) // stride_h + 1
out_width = (in_width - kernel_w + LPAD + RPAD) // stride_w + 1
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
b1 = te.reduce_axis((0, activation_bits), name="b1")
b2 = te.reduce_axis((0, weight_bits), name="b2")
if unipolar:
def _conv(nn, ff, yy, xx):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
(
tvm.tir.popcount(
PadInput_q[nn, rc, b1, yy * stride_h + ry, xx * stride_w + rx]
& Filter_q[ff, rc, ry, rx, b2]
)
- tvm.tir.popcount(
PadInput_q[nn, rc, b1, yy * stride_h + ry, xx * stride_w + rx]
& ~Filter_q[ff, rc, ry, rx, b2]
)
)
<< (b1b2)
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
).astype(out_dtype)
else:
def _conv(nn, ff, yy, xx):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
tvm.tir.popcount(
PadInput_q[nn, rc, b1, yy * stride_h + ry, xx * stride_w + rx]
& Filter_q[ff, rc, ry, rx, b2]
)
<< (b1b2)
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
).astype(out_dtype)
return te.compute(
(batch, out_channel, out_height, out_width),
_conv,
name="Conv2dOutput",
tag="bitserial_conv2d_nchw",
)
def bitserial_conv2d_nhwc(
data,
kernel,
stride,
padding,
activation_bits,
weight_bits,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Bitserial Conv2D operator.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_height, in_width, in_channel]
kernel : tvm.te.Tensor
4-D with shape [filter_height, filter_width, in_channel, num_filter]
stride : int or a list/tuple of two ints
stride size, or [stride_height, stride_width]
padding : int or a list/tuple of two or four ints
padding size, [pad_height, pad_width], [pad_top, pad_left, pad_down, pad_right]
activation_bits: int
number of bits used for activations/input elements
weight_bits: int
number of bits used for weight elements
out_dtype: str
return type of convolution
pack_dtype: str
bit packing type
unipolar: bool
if binarization style is in unipolar 1/0 format, instead of bipolar -1/+1 format
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, out_height, out_width, out_channel]
"""
assert isinstance(stride, int) or len(stride) == 2
Input_q = bitpack(data, activation_bits, pack_axis=3, bit_axis=4, pack_type=pack_dtype)
if len(kernel.shape) == 4:
Filter_q = bitpack(kernel, weight_bits, pack_axis=2, bit_axis=4, pack_type=pack_dtype)
kernel_h, kernel_w, _, num_filter, _ = get_const_tuple(Filter_q.shape)
else:
Filter_q = kernel
kernel_h, kernel_w, _, _, num_filter = get_const_tuple(Filter_q.shape)
batch, in_height, in_width, in_channel_q, _ = get_const_tuple(Input_q.shape)
if isinstance(padding, int) or (isinstance(padding, (tuple, list)) and len(padding) == 2):
TPAD, LPAD, DPAD, RPAD = get_pad_tuple(padding, kernel)
else:
TPAD, LPAD, DPAD, RPAD = padding
pad_before = [0, TPAD, LPAD, 0, 0]
pad_after = [0, DPAD, RPAD, 0, 0]
# compute the output shape
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
out_channel = num_filter
out_height = (in_height - kernel_h + TPAD + DPAD) // stride_h + 1
out_width = (in_width - kernel_w + LPAD + RPAD) // stride_w + 1
PadInput_q = pad(Input_q, pad_before, pad_after, name="PaddedInput")
rc = te.reduce_axis((0, in_channel_q), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
b1 = te.reduce_axis((0, activation_bits), name="b1")
b2 = te.reduce_axis((0, weight_bits), name="b2")
if unipolar:
def _conv(nn, yy, xx, ff):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
(
tvm.tir.popcount(
PadInput_q[nn, yy * stride_h + ry, xx * stride_w + rx, rc, b1]
& Filter_q[ry, rx, rc, ff, b2]
)
- tvm.tir.popcount(
PadInput_q[nn, yy * stride_h + ry, xx * stride_w + rx, rc, b1]
& ~Filter_q[ry, rx, rc, ff, b2]
)
)
<< b1b2
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
)
else:
def _conv(nn, yy, xx, ff):
b1b2 = (b1 + b2).astype(out_dtype)
return te.sum(
(
tvm.tir.popcount(
PadInput_q[nn, yy * stride_h + ry, xx * stride_w + rx, rc, b1]
& Filter_q[ry, rx, rc, ff, b2]
)
<< b1b2
).astype(out_dtype),
axis=[rc, ry, rx, b2, b1],
)
conv = te.compute(
(batch, out_height, out_width, out_channel),
_conv,
name="Conv2dOutput",
tag="bitserial_conv2d_nhwc",
)
return conv
@tvm.target.generic_func
def bitserial_conv2d_legalize(attrs, inputs, types):
"""Legalizes Bitserial Conv2D op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# not to change by default
return None
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for Federation Extension."""
import re
import jsonschema
from oslo.utils import timeutils
import six
from keystone.common import config
from keystone import exception
from keystone.i18n import _
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
MAPPING_SCHEMA = {
"type": "object",
"required": ['rules'],
"properties": {
"rules": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"required": ['local', 'remote'],
"additionalProperties": False,
"properties": {
"local": {
"type": "array"
},
"remote": {
"minItems": 1,
"type": "array",
"items": {
"type": "object",
"oneOf": [
{"$ref": "#/definitions/empty"},
{"$ref": "#/definitions/any_one_of"},
{"$ref": "#/definitions/not_any_of"}
],
}
}
}
}
}
},
"definitions": {
"empty": {
"type": "object",
"required": ['type'],
"properties": {
"type": {
"type": "string"
},
},
"additionalProperties": False,
},
"any_one_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'any_one_of'],
"properties": {
"type": {
"type": "string"
},
"any_one_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
},
"not_any_of": {
"type": "object",
"additionalProperties": False,
"required": ['type', 'not_any_of'],
"properties": {
"type": {
"type": "string"
},
"not_any_of": {
"type": "array"
},
"regex": {
"type": "boolean"
}
}
}
}
}
def validate_mapping_structure(ref):
v = jsonschema.Draft4Validator(MAPPING_SCHEMA)
messages = ''
for error in sorted(v.iter_errors(ref), key=str):
messages = messages + error.message + "\n"
if messages:
raise exception.ValidationError(messages)
def validate_expiration(token_ref):
if timeutils.utcnow() > token_ref['expires']:
raise exception.Unauthorized(_('Federation token is expired'))
def validate_groups(group_ids, mapping_id, identity_api):
if not group_ids:
raise exception.MissingGroups(mapping_id=mapping_id)
for group_id in group_ids:
try:
identity_api.get_group(group_id)
except exception.GroupNotFound:
raise exception.MappedGroupNotFound(
group_id=group_id, mapping_id=mapping_id)
def get_assertion_params_from_env(context):
prefix = CONF.federation.assertion_prefix
for k, v in context['environment'].items():
if k.startswith(prefix):
yield (k, v)
class RuleProcessor(object):
"""A class to process assertions and mapping rules."""
class _EvalType(object):
"""Mapping rule evaluation types."""
ANY_ONE_OF = 'any_one_of'
NOT_ANY_OF = 'not_any_of'
def __init__(self, rules):
"""Initialize RuleProcessor.
Example rules can be found at:
:class:`keystone.tests.mapping_fixtures`
:param rules: rules from a mapping
:type rules: dict
"""
self.rules = rules
def process(self, assertion_data):
"""Transform assertion to a dictionary of user name and group ids
based on mapping rules.
This function will iterate through the mapping rules to find
assertions that are valid.
:param assertion_data: an assertion containing values from an IdP
:type assertion_data: dict
Example assertion_data::
{
'Email': '[email protected]',
'UserName': 'testacct',
'FirstName': 'Test',
'LastName': 'Account',
'orgPersonType': 'Tester'
}
:returns: dictionary with user and group_ids
The expected return structure is::
{
'name': 'foobar',
'group_ids': ['abc123', 'def456']
}
"""
# Assertions will come in as string key-value pairs, and will use a
# semi-colon to indicate multiple values, i.e. groups.
# This will create a new dictionary where the values are arrays, and
# any multiple values are stored in the arrays.
assertion = dict((n, v.split(';')) for n, v in assertion_data.items()
if isinstance(v, six.string_types))
identity_values = []
for rule in self.rules:
direct_maps = self._verify_all_requirements(rule['remote'],
assertion)
# If the compare comes back as None, then the rule did not apply
# to the assertion data, go on to the next rule
if direct_maps is None:
continue
# If there are no direct mappings, then add the local mapping
# directly to the array of saved values. However, if there is
# a direct mapping, then perform variable replacement.
if not direct_maps:
identity_values += rule['local']
else:
for local in rule['local']:
new_local = self._update_local_mapping(local, direct_maps)
identity_values.append(new_local)
mapped_properties = self._transform(identity_values)
if mapped_properties.get('name') is None:
raise exception.Unauthorized(_("Could not map user"))
return mapped_properties
def _transform(self, identity_values):
"""Transform local mappings, to an easier to understand format.
Transform the incoming array to generate the return value for
the process function. Generating content for Keystone tokens will
be easier if some pre-processing is done at this level.
:param identity_values: local mapping from valid evaluations
:type identity_values: array of dict
Example identity_values::
[{'group': {'id': '0cd5e9'}, 'user': {'email': '[email protected]'}}]
:returns: dictionary with user name and group_ids.
"""
# initialize the group_ids as a set to eliminate duplicates
user_name = None
group_ids = set()
for identity_value in identity_values:
if 'user' in identity_value:
# if a mapping outputs more than one user name, log it
if user_name is not None:
LOG.warning(_('Ignoring user name %s'),
identity_value['user']['name'])
else:
user_name = identity_value['user']['name']
if 'group' in identity_value:
group_ids.add(identity_value['group']['id'])
return {'name': user_name, 'group_ids': list(group_ids)}
def _update_local_mapping(self, local, direct_maps):
"""Replace any {0}, {1} ... values with data from the assertion.
:param local: local mapping reference that needs to be updated
:type local: dict
:param direct_maps: list of identity values, used to update local
:type direct_maps: list
Example local::
{'user': {'name': '{0} {1}', 'email': '{2}'}}
Example direct_maps::
['Bob', 'Thompson', '[email protected]']
:returns: new local mapping reference with replaced values.
The expected return structure is::
{'user': {'name': 'Bob Thompson', 'email': '[email protected]'}}
"""
new = {}
for k, v in six.iteritems(local):
if isinstance(v, dict):
new_value = self._update_local_mapping(v, direct_maps)
else:
new_value = v.format(*direct_maps)
new[k] = new_value
return new
def _verify_all_requirements(self, requirements, assertion):
"""Go through the remote requirements of a rule, and compare against
the assertion.
If a value of ``None`` is returned, the rule with this assertion
doesn't apply.
If an array of zero length is returned, then there are no direct
mappings to be performed, but the rule is valid.
Otherwise, then it will return the values, in order, to be directly
mapped, again, the rule is valid.
:param requirements: list of remote requirements from rules
:type requirements: list
Example requirements::
[
{
"type": "UserName"
},
{
"type": "orgPersonType",
"any_one_of": [
"Customer"
]
}
]
:param assertion: dict of attributes from an IdP
:type assertion: dict
Example assertion::
{
'UserName': ['testacct'],
'LastName': ['Account'],
'orgPersonType': ['Tester'],
'Email': ['[email protected]'],
'FirstName': ['Test']
}
:returns: list of direct mappings or None.
"""
direct_maps = []
for requirement in requirements:
requirement_type = requirement['type']
regex = requirement.get('regex', False)
any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
if any_one_values is not None:
if self._evaluate_requirement(any_one_values,
requirement_type,
self._EvalType.ANY_ONE_OF,
regex,
assertion):
continue
else:
return None
not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
if not_any_values is not None:
if self._evaluate_requirement(not_any_values,
requirement_type,
self._EvalType.NOT_ANY_OF,
regex,
assertion):
continue
else:
return None
# If 'any_one_of' or 'not_any_of' are not found, then values are
# within 'type'. Attempt to find that 'type' within the assertion.
direct_map_values = assertion.get(requirement_type)
if direct_map_values:
direct_maps += direct_map_values
return direct_maps
def _evaluate_requirement(self, values, requirement_type,
eval_type, regex, assertion):
"""Evaluate the incoming requirement and assertion.
If the requirement type does not exist in the assertion data, then
return False. If regex is specified, then compare the values and
assertion values. Otherwise, grab the intersection of the values
and use that to compare against the evaluation type.
:param values: list of allowed values, defined in the requirement
:type values: list
:param requirement_type: key to look for in the assertion
:type requirement_type: string
:param eval_type: determine how to evaluate requirements
:type eval_type: string
:param regex: perform evaluation with regex
:type regex: boolean
:param assertion: dict of attributes from the IdP
:type assertion: dict
:returns: boolean, whether requirement is valid or not.
"""
assertion_values = assertion.get(requirement_type)
if not assertion_values:
return False
if regex:
for value in values:
for assertion_value in assertion_values:
if re.search(value, assertion_value):
return True
return False
any_match = bool(set(values).intersection(set(assertion_values)))
if any_match and eval_type == self._EvalType.ANY_ONE_OF:
return True
if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
return True
return False
|
|
import logging
from asyncio import Future, iscoroutinefunction
from typing import Any, AsyncIterable, AsyncIterator, Awaitable, Callable, List, Optional, Tuple, TypeVar, cast
from expression.core import MailboxProcessor, TailCall, tailrec_async
from expression.system import AsyncDisposable, CancellationTokenSource, Disposable
from .msg import DisposableMsg, DisposeMsg, Msg
from .notification import MsgKind, Notification, OnCompleted, OnError, OnNext
from .types import AsyncObservable, AsyncObserver
from .utils import anoop
log = logging.getLogger(__name__)
TSource = TypeVar("TSource")
class AsyncIteratorObserver(AsyncObserver[TSource], AsyncIterable[TSource], AsyncDisposable):
"""An async observer that might be iterated asynchronously."""
def __init__(self, source: AsyncObservable[TSource]) -> None:
super().__init__()
self._push: Future[TSource] = Future()
self._pull: Future[bool] = Future()
self._awaiters: List[Future[bool]] = []
self._subscription: Optional[AsyncDisposable] = None
self._source = source
self._busy = False
async def asend(self, value: TSource) -> None:
log.debug("AsyncIteratorObserver:asend(%s)", value)
await self._serialize_access()
self._push.set_result(value)
await self._wait_for_pull()
async def athrow(self, error: Exception) -> None:
log.debug("AsyncIteratorObserver:athrow()", error)
await self._serialize_access()
self._push.set_exception(error)
await self._wait_for_pull()
async def aclose(self) -> None:
await self._serialize_access()
self._push.set_exception(StopAsyncIteration)
await self._wait_for_pull()
async def _wait_for_pull(self) -> None:
await self._pull
self._pull = Future()
self._busy = False
async def _serialize_access(self) -> None:
# Serialize producer event to the iterator
while self._busy:
fut: Future[bool] = Future()
self._awaiters.append(fut)
await fut
self._awaiters.remove(fut)
self._busy = True
async def wait_for_push(self) -> TSource:
if self._subscription is None:
self._subscription = await self._source.subscribe_async(self)
value = await self._push
self._push = Future()
self._pull.set_result(True)
# Wake up any awaiters
for awaiter in self._awaiters[:1]:
awaiter.set_result(True)
return value
async def dispose_async(self) -> None:
if self._subscription is not None:
await self._subscription.dispose_async()
self._subscription = None
def __aiter__(self) -> AsyncIterator[TSource]:
log.debug("AsyncIteratorObserver:__aiter__")
return self
async def __anext__(self) -> TSource:
log.debug("AsyncIteratorObserver:__anext__()")
return await self.wait_for_push()
class AsyncAnonymousObserver(AsyncObserver[TSource]):
"""An anonymous AsyncObserver.
Creates as sink where the implementation is provided by three
optional and anonymous functions, asend, athrow and aclose. Used for
listening to a source."""
def __init__(
self,
asend: Callable[[TSource], Awaitable[None]] = anoop,
athrow: Callable[[Exception], Awaitable[None]] = anoop,
aclose: Callable[[], Awaitable[None]] = anoop,
) -> None:
super().__init__()
assert iscoroutinefunction(asend)
self._asend = asend
assert iscoroutinefunction(athrow)
self._athrow = athrow
assert iscoroutinefunction(aclose)
self._aclose = aclose
async def asend(self, value: TSource) -> None:
await self._asend(value)
async def athrow(self, error: Exception) -> None:
await self._athrow(error)
async def aclose(self) -> None:
await self._aclose()
class AsyncNotificationObserver(AsyncObserver[TSource]):
"""Observer created from an async notification processing function"""
def __init__(self, fn: Callable[[Notification[TSource]], Awaitable[None]]) -> None:
self._fn = fn
async def asend(self, value: TSource) -> None:
await self._fn(OnNext(value))
async def athrow(self, error: Exception) -> None:
await self._fn(OnError(error))
async def aclose(self) -> None:
await self._fn(OnCompleted)
def noop() -> AsyncObserver[Any]:
return AsyncAnonymousObserver(anoop, anoop, anoop)
def safe_observer(obv: AsyncObserver[TSource], disposable: AsyncDisposable) -> AsyncObserver[TSource]:
"""Safe observer that wraps the given observer. Makes sure that
invocations are serialized and that the Rx grammar is not violated:
`(OnNext*(OnError|OnCompleted)?)`
I.e one or more OnNext, then terminates with a single OnError or
OnCompleted.
Args:
obv: Observer to serialize access to
disposable: Disposable to dispose when the observer closes.
"""
async def worker(inbox: MailboxProcessor[Notification[TSource]]):
async def message_loop(running: bool) -> None:
while running:
msg = await inbox.receive()
if msg.kind == MsgKind.ON_NEXT:
try:
await msg.accept_observer(obv)
except Exception as ex:
await obv.athrow(ex)
running = False
elif msg.kind == MsgKind.ON_ERROR:
await disposable.dispose_async()
await msg.accept_observer(obv)
running = False
else:
await disposable.dispose_async()
await obv.aclose()
running = False
await message_loop(running=True)
agent = MailboxProcessor.start(worker)
async def asend(value: TSource) -> None:
agent.post(OnNext(value))
async def athrow(ex: Exception) -> None:
agent.post(OnError(ex))
async def aclose() -> None:
agent.post(OnCompleted)
return AsyncAnonymousObserver(asend, athrow, aclose)
def auto_detach_observer(
obv: AsyncObserver[TSource],
) -> Tuple[AsyncObserver[TSource], Callable[[Awaitable[AsyncDisposable]], Awaitable[AsyncDisposable]]]:
cts = CancellationTokenSource()
token = cts.token
async def worker(inbox: MailboxProcessor[Msg[TSource]]):
@tailrec_async
async def message_loop(disposables: List[AsyncDisposable]):
if token.is_cancellation_requested:
return
cmd = await inbox.receive()
if isinstance(cmd, DisposableMsg):
disposables.append(cmd.disposable)
else:
for disp in disposables:
await disp.dispose_async()
return
return TailCall(disposables)
await message_loop([])
agent = MailboxProcessor.start(worker, token)
async def cancel():
cts.cancel()
agent.post(DisposeMsg)
canceller = AsyncDisposable.create(cancel)
safe_obv = safe_observer(obv, canceller)
# Auto-detaches (disposes) the disposable when the observer completes with success or error.
async def auto_detach(async_disposable: Awaitable[AsyncDisposable]):
disposable = await async_disposable
agent.post(DisposableMsg(disposable))
return disposable
return safe_obv, auto_detach
class AsyncAwaitableObserver(Future[TSource], AsyncObserver[TSource], Disposable):
"""An async awaitable observer.
Both a future and async observer. The future resolves with the last
value before the observer is closed. A close without any values sent
is the same as cancelling the future."""
def __init__(
self,
asend: Callable[[TSource], Awaitable[None]] = anoop,
athrow: Callable[[Exception], Awaitable[None]] = anoop,
aclose: Callable[[], Awaitable[None]] = anoop,
) -> None:
super().__init__()
assert iscoroutinefunction(asend)
self._asend = asend
assert iscoroutinefunction(athrow)
self._athrow = athrow
assert iscoroutinefunction(aclose)
self._aclose = aclose
self._last_value: Optional[TSource] = None
self._is_stopped = False
self._has_value = False
async def asend(self, value: TSource) -> None:
log.debug("AsyncAwaitableObserver:asend(%s)", str(value))
if self._is_stopped:
log.debug("AsyncAwaitableObserver:asend(), Closed!!")
return
self._last_value = value
self._has_value = True
await self._asend(value)
async def athrow(self, error: Exception) -> None:
log.debug("AsyncAwaitableObserver:athrow()")
if self._is_stopped:
log.debug("AsyncAwaitableObserver:athrow(), Closed!!")
return
self._is_stopped = True
self.set_exception(error)
await self._athrow(error)
async def aclose(self) -> None:
log.debug("AsyncAwaitableObserver:aclose")
if self._is_stopped:
log.debug("AsyncAwaitableObserver:aclose(), Closed!!")
return
self._is_stopped = True
if self._has_value:
self.set_result(cast("TSource", self._last_value))
else:
self.cancel()
await self._aclose()
def dispose(self) -> None:
log.debug("AsyncAwaitableObserver:dispose()")
self._is_stopped = True
|
|
# Copyright 2014: Dassault Systemes
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import random
import ddt
import jsonschema
import mock
from rally.plugins.openstack.context.quotas import quotas
from tests.unit import test
QUOTAS_PATH = "rally.plugins.openstack.context.quotas."
@ddt.ddt
class QuotasTestCase(test.TestCase):
def setUp(self):
super(QuotasTestCase, self).setUp()
self.unlimited = -1
self.context = {
"config": {
},
"tenants": {
"t1": {"credential": mock.MagicMock()},
"t2": {"credential": mock.MagicMock()}},
"admin": {"credential": mock.MagicMock()},
"task": mock.MagicMock()
}
def test_quotas_schemas(self):
ctx = copy.deepcopy(self.context)
ctx["config"]["quotas"] = {
"cinder": {
"volumes": self.unlimited,
"snapshots": self.unlimited,
"gigabytes": self.unlimited
},
"nova": {
"instances": self.unlimited,
"cores": self.unlimited,
"ram": self.unlimited,
"floating_ips": self.unlimited,
"fixed_ips": self.unlimited,
"metadata_items": self.unlimited,
"injected_files": self.unlimited,
"injected_file_content_bytes": self.unlimited,
"injected_file_path_bytes": self.unlimited,
"key_pairs": self.unlimited,
"security_groups": self.unlimited,
"security_group_rules": self.unlimited
},
"neutron": {
"network": self.unlimited,
"subnet": self.unlimited,
"port": self.unlimited,
"router": self.unlimited,
"floatingip": self.unlimited,
"security_group": self.unlimited,
"security_group_rule": self.unlimited
}
}
for service in ctx["config"]["quotas"]:
for key in ctx["config"]["quotas"][service]:
# Test invalid values
ctx["config"]["quotas"][service][key] = self.unlimited - 1
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
pass
else:
self.fail("Invalid value %s must raise a validation error"
% ctx["config"]["quotas"][service][key])
ctx["config"]["quotas"][service][key] = 2.5
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
pass
else:
self.fail("Invalid value %s must raise a validation error"
% ctx["config"]["quotas"][service][key])
ctx["config"]["quotas"][service][key] = "-1"
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
pass
else:
self.fail("Invalid value %s must raise a validation error"
% ctx["config"]["quotas"][service][key])
# Test valid values
ctx["config"]["quotas"][service][key] = random.randint(0,
1000000)
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
self.fail("Positive integers are valid quota values")
ctx["config"]["quotas"][service][key] = self.unlimited
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
self.fail("%d is a valid quota value" % self.unlimited)
# Test additional keys are refused
ctx["config"]["quotas"][service]["additional"] = self.unlimited
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
pass
else:
self.fail("Additional keys must raise a validation error")
del ctx["config"]["quotas"][service]["additional"]
# Test valid keys are optional
ctx["config"]["quotas"][service] = {}
try:
quotas.Quotas.validate(ctx["config"]["quotas"])
except jsonschema.ValidationError:
self.fail("Valid quota keys are optional")
@mock.patch("rally.plugins.openstack.context."
"quotas.quotas.osclients.Clients")
@mock.patch("rally.plugins.openstack.context."
"quotas.cinder_quotas.CinderQuotas")
def test_cinder_quotas(self, mock_cinder_quotas, mock_clients):
ctx = copy.deepcopy(self.context)
ctx["config"]["quotas"] = {
"cinder": {
"volumes": self.unlimited,
"snapshots": self.unlimited,
"gigabytes": self.unlimited
}
}
tenants = ctx["tenants"]
cinder_quotas = ctx["config"]["quotas"]["cinder"]
with quotas.Quotas(ctx) as quotas_ctx:
quotas_ctx.setup()
expected_setup_calls = []
for tenant in tenants:
expected_setup_calls.append(mock.call()
.update(tenant,
**cinder_quotas))
mock_cinder_quotas.assert_has_calls(
expected_setup_calls, any_order=True)
mock_cinder_quotas.reset_mock()
expected_cleanup_calls = []
for tenant in tenants:
expected_cleanup_calls.append(mock.call().delete(tenant))
mock_cinder_quotas.assert_has_calls(
expected_cleanup_calls, any_order=True)
@mock.patch("rally.plugins.openstack.context."
"quotas.quotas.osclients.Clients")
@mock.patch("rally.plugins.openstack.context."
"quotas.nova_quotas.NovaQuotas")
def test_nova_quotas(self, mock_nova_quotas, mock_clients):
ctx = copy.deepcopy(self.context)
ctx["config"]["quotas"] = {
"nova": {
"instances": self.unlimited,
"cores": self.unlimited,
"ram": self.unlimited,
"floating-ips": self.unlimited,
"fixed-ips": self.unlimited,
"metadata_items": self.unlimited,
"injected_files": self.unlimited,
"injected_file_content_bytes": self.unlimited,
"injected_file_path_bytes": self.unlimited,
"key_pairs": self.unlimited,
"security_groups": self.unlimited,
"security_group_rules": self.unlimited,
}
}
nova_quotas = ctx["config"]["quotas"]["nova"]
with quotas.Quotas(ctx) as quotas_ctx:
quotas_ctx.setup()
expected_setup_calls = []
for tenant in ctx["tenants"]:
expected_setup_calls.append(mock.call()
.update(tenant,
**nova_quotas))
mock_nova_quotas.assert_has_calls(
expected_setup_calls, any_order=True)
mock_nova_quotas.reset_mock()
expected_cleanup_calls = []
for tenant in ctx["tenants"]:
expected_cleanup_calls.append(mock.call().delete(tenant))
mock_nova_quotas.assert_has_calls(
expected_cleanup_calls, any_order=True)
@mock.patch("rally.plugins.openstack.context."
"quotas.quotas.osclients.Clients")
@mock.patch("rally.plugins.openstack.context."
"quotas.neutron_quotas.NeutronQuotas")
def test_neutron_quotas(self, mock_neutron_quotas, mock_clients):
ctx = copy.deepcopy(self.context)
ctx["config"]["quotas"] = {
"neutron": {
"network": self.unlimited,
"subnet": self.unlimited,
"port": self.unlimited,
"router": self.unlimited,
"floatingip": self.unlimited,
"security_group": self.unlimited,
"security_group_rule": self.unlimited
}
}
neutron_quotas = ctx["config"]["quotas"]["neutron"]
with quotas.Quotas(ctx) as quotas_ctx:
quotas_ctx.setup()
expected_setup_calls = []
for tenant in ctx["tenants"]:
expected_setup_calls.append(mock.call()
.update(tenant,
**neutron_quotas))
mock_neutron_quotas.assert_has_calls(
expected_setup_calls, any_order=True)
mock_neutron_quotas.reset_mock()
expected_cleanup_calls = []
for tenant in ctx["tenants"]:
expected_cleanup_calls.append(mock.call().delete(tenant))
mock_neutron_quotas.assert_has_calls(
expected_cleanup_calls, any_order=True)
@mock.patch("rally.plugins.openstack.context."
"quotas.quotas.osclients.Clients")
@mock.patch("rally.plugins.openstack.context."
"quotas.nova_quotas.NovaQuotas")
@mock.patch("rally.plugins.openstack.context."
"quotas.cinder_quotas.CinderQuotas")
@mock.patch("rally.plugins.openstack.context."
"quotas.neutron_quotas.NeutronQuotas")
def test_no_quotas(self, mock_neutron_quotas, mock_cinder_quotas,
mock_nova_quotas, mock_clients):
ctx = copy.deepcopy(self.context)
if "quotas" in ctx["config"]:
del ctx["config"]["quotas"]
with quotas.Quotas(ctx) as quotas_ctx:
quotas_ctx.setup()
self.assertFalse(mock_cinder_quotas.update.called)
self.assertFalse(mock_nova_quotas.update.called)
self.assertFalse(mock_neutron_quotas.update.called)
self.assertFalse(mock_cinder_quotas.delete.called)
self.assertFalse(mock_nova_quotas.delete.called)
self.assertFalse(mock_neutron_quotas.delete.called)
@ddt.data(
{"quotas_ctxt": {"nova": {"cpu": 1}},
"quotas_class_path": "nova_quotas.NovaQuotas"},
{"quotas_ctxt": {"neutron": {"network": 2}},
"quotas_class_path": "neutron_quotas.NeutronQuotas"},
{"quotas_ctxt": {"cinder": {"volumes": 3}},
"quotas_class_path": "cinder_quotas.CinderQuotas"},
{"quotas_ctxt": {"manila": {"shares": 4}},
"quotas_class_path": "manila_quotas.ManilaQuotas"},
{"quotas_ctxt": {"designate": {"domains": 5}},
"quotas_class_path": "designate_quotas.DesignateQuotas"},
)
@ddt.unpack
def test_exception_during_cleanup(self, quotas_ctxt, quotas_class_path):
with mock.patch(QUOTAS_PATH + quotas_class_path) as mock_quotas:
mock_quotas.delete.side_effect = type(
"ExceptionDuringCleanup", (Exception, ), {})
ctx = copy.deepcopy(self.context)
ctx["config"]["quotas"] = quotas_ctxt
# NOTE(boris-42): ensure that cleanup didn't raise exceptions.
quotas.Quotas(ctx).cleanup()
self.assertEqual(mock_quotas.return_value.delete.call_count,
len(self.context["tenants"]))
|
|
import json
import os
import shutil
from datetime import date, timedelta
import mock
from django.conf import settings
from django.core import management
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon, Persona
from olympia.stats.management.commands import (
save_stats_to_file, serialize_stats)
from olympia.stats.management.commands.download_counts_from_file import is_valid_source # noqa
from olympia.stats.management.commands.update_counts_from_file import Command
from olympia.stats.models import (
DownloadCount, ThemeUpdateCount, UpdateCount, ThemeUserCount)
from olympia.zadmin.models import DownloadSource
hive_folder = os.path.join(settings.ROOT, 'src/olympia/stats/fixtures/files')
class FixturesFolderMixin(object):
# You have to define these two values in your subclasses.
date = 'YYYY-MM-DD'
source_folder = 'dummy'
def clean_up_files(self):
dirpath = os.path.join(hive_folder, self.date)
if os.path.isdir(dirpath):
for name in os.listdir(dirpath):
os.unlink(os.path.join(dirpath, name))
os.rmdir(dirpath)
def setUp(self):
super(FixturesFolderMixin, self).setUp()
self.clean_up_files()
shutil.copytree(os.path.join(hive_folder, self.source_folder),
os.path.join(hive_folder, self.date))
def tearDown(self):
self.clean_up_files()
super(FixturesFolderMixin, self).tearDown()
class TestADICommand(FixturesFolderMixin, TestCase):
fixtures = ('base/addon_3615', 'base/featured', 'addons/persona',
'base/appversion.json')
date = '2014-07-10'
source_folder = 'src'
def setUp(self):
super(TestADICommand, self).setUp()
self.command = Command()
@mock.patch(
'olympia.stats.management.commands.update_counts_from_file.'
'save_stats_to_file')
def test_update_counts_from_file(self, mock_save_stats_to_file):
management.call_command('update_counts_from_file', hive_folder,
date=self.date)
assert UpdateCount.objects.all().count() == 1
update_count = UpdateCount.objects.last()
assert update_count.count == 5
assert update_count.date == date(2014, 7, 10)
assert update_count.versions == {u'3.8': 2, u'3.7': 3}
assert update_count.statuses == {u'userEnabled': 5}
application = u'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
assert update_count.applications[application] == {u'3.6': 18}
assert update_count.oses == {u'WINNT': 5}
assert update_count.locales == {u'en-us': 1, u'en-US': 4}
# save_stats_to_file is called with a non-saved model.
update_count.id = None
mock_save_stats_to_file.assert_called_once_with(update_count)
def test_update_version(self):
# Initialize the known addons and their versions.
self.command.addons_versions = {3615: ['3.5', '3.6']}
uc = UpdateCount(addon_id=3615)
self.command.update_version(uc, '3.6', 123)
assert uc.versions == {'3.6': 123}
# Test very long version:
self.command.update_version(uc, '1' * 33, 1)
assert uc.versions == {'3.6': 123, '1' * 32: 1} # Trimmed.
def test_update_status(self):
uc = UpdateCount(addon_id=3615)
self.command.update_status(uc, 'foobar', 123) # Non-existent status.
assert not uc.statuses
self.command.update_status(uc, 'userEnabled', 123)
assert uc.statuses == {'userEnabled': 123}
def test_update_app(self):
firefox_guid = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
uc = UpdateCount(addon_id=3615)
self.command.update_app(uc, 'foobar', '1.0', 123) # Non-existent app.
assert not uc.applications
# Malformed versions.
self.command.update_app(uc, firefox_guid, '3.0.1.2', 123)
self.command.update_app(uc, firefox_guid, '3.0123', 123)
self.command.update_app(uc, firefox_guid, '3.0c2', 123)
self.command.update_app(uc, firefox_guid, 'a.b.c', 123)
assert not uc.applications
# Well formed versions.
self.command.update_app(uc, firefox_guid, '1.0', 123)
self.command.update_app(uc, firefox_guid, '1.0.1', 124)
self.command.update_app(uc, firefox_guid, '1.0a1', 125)
self.command.update_app(uc, firefox_guid, '1.0b2', 126)
assert uc.applications == {firefox_guid: {
'1.0': 123,
'1.0.1': 124,
'1.0a1': 125,
'1.0b2': 126}}
def test_update_os(self):
uc = UpdateCount(addon_id=3615)
self.command.update_os(uc, 'foobar', 123) # Non-existent OS.
assert not uc.oses
self.command.update_os(uc, 'WINNT', 123)
assert uc.oses == {'WINNT': 123}
def test_update_locale(self):
current_locales = [ # Taken from the language pack index.
'ach', 'af', 'ak', 'an', 'ar', 'as', 'ast', 'ast-ES', 'az',
'bb-BK', 'be', 'bg', 'bn-BD', 'bn-IN', 'br', 'bs', 'ca',
'ca-valencia', 'cs', 'csb', 'cy', 'cy-GB', 'da', 'de', 'dsb', 'el',
'en-GB', 'en-ZA', 'eo', 'es-AR', 'es-CL', 'es-ES', 'es-MX', 'et',
'eu', 'fa', 'ff', 'fi', 'fj-FJ', 'fr', 'fur-IT', 'fy-NL', 'ga-IE',
'gd', 'gl', 'gu-IN', 'he', 'hi', 'hi-IN', 'hr', 'hsb', 'hu',
'hy-AM', 'id', 'is', 'it', 'ja', 'kk', 'km', 'kn', 'ko', 'ku',
'lg', 'lij', 'lt', 'lv', 'mai', 'mg', 'mk', 'ml', 'mr', 'ms',
'nb-NO', 'nl', 'nn-NO', 'nr', 'nso', 'or', 'pa-IN', 'pl', 'pt-BR',
'pt-PT', 'rm', 'ro', 'ru', 'si', 'sk', 'sl', 'son', 'sq', 'sr',
'ss', 'st', 'sv-SE', 'sw', 'sw-TZ', 'ta', 'ta-IN', 'ta-LK', 'te',
'th', 'tn', 'tr', 'ts', 'uk', 've', 'vi', 'wa', 'wo-SN', 'xh',
'zap-MX-diiste', 'zh-CN', 'zh-TW', 'zu']
uc = UpdateCount(addon_id=3615)
self.command.update_locale(uc, 'foobar', 123) # Non-existent locale.
assert not uc.locales
for locale in current_locales:
self.command.update_locale(uc, locale, 1)
assert len(uc.locales) == len(current_locales)
def test_trim_field(self):
uc = UpdateCount(addon_id=3615, count=1, date='2015-01-11')
self.command.trim_field(uc.versions) # Empty field.
assert not uc.versions
uc.versions = {'3.6': 123, '3.7': 321}
self.command.trim_field(uc.versions) # Small enough to fit in the db.
assert uc.versions == {'3.6': 123, '3.7': 321} # Unchanged.
very_long_key = 'x' * (2 ** 16)
uc.versions[very_long_key] = 1
self.command.trim_field(uc.versions) # Too big, must be trimmed.
assert uc.versions == {'3.6': 123, '3.7': 321} # Keep the most used.
uc.versions[very_long_key] = 1000 # Most used.
self.command.trim_field(uc.versions) # Too big, must be trimmed.
# Nothing left: least used removed, but still too big, so all the keys
# were removed.
assert uc.versions == {}
# Make sure we can store a very large field in the database.
long_key = 'x' * 65528 # This makes the dict barely fit in the db.
uc.versions[long_key] = 1
assert len(json.dumps(uc.versions)) == (2 ** 16) - 1
uc.save()
uc = UpdateCount.objects.get(pk=uc.pk) # Reload
# Fits in the database, so no truncation.
assert len(json.dumps(uc.versions)) == (2 ** 16) - 1
@mock.patch(
'olympia.stats.management.commands.download_counts_from_file.'
'save_stats_to_file')
def test_download_counts_from_file(self, mock_save_stats_to_file):
# Create the necessary "valid download sources" entries.
DownloadSource.objects.create(name='search', type='full')
DownloadSource.objects.create(name='coll', type='prefix')
management.call_command('download_counts_from_file', hive_folder,
date=self.date)
assert DownloadCount.objects.all().count() == 1
download_count = DownloadCount.objects.last()
assert download_count.count == 2
assert download_count.date == date(2014, 7, 10)
assert download_count.sources == {u'search': 1, u'collection': 1}
# save_stats_to_file is called with a non-saved model.
download_count.id = None
mock_save_stats_to_file.assert_called_once_with(download_count)
@mock.patch('olympia.stats.management.commands.save_stats_to_file')
def test_theme_update_counts_from_file(self, mock_save_stats_to_file):
management.call_command('theme_update_counts_from_file', hive_folder,
date=self.date)
assert ThemeUpdateCount.objects.all().count() == 2
tuc1 = ThemeUpdateCount.objects.get(addon_id=3615)
assert tuc1.count == 2
# Persona 813 has addon id 15663: we need the count to be the sum of
# the "old" request on the persona_id 813 (only the one with the source
# "gp") and the "new" request on the addon_id 15663.
tuc2 = ThemeUpdateCount.objects.get(addon_id=15663)
assert tuc2.count == 15
assert mock_save_stats_to_file.call_count == 2
# save_stats_to_file is called with a non-saved model.
tuc1.id = None
tuc2.id = None
mock_save_stats_to_file.assert_has_calls(
[mock.call(tuc1), mock.call(tuc2)])
def test_update_theme_popularity_movers(self):
# Create ThemeUpdateCount entries for the persona 559 with addon_id
# 15663 and the persona 575 with addon_id 15679 for the last 28 days.
# We start from the previous day, as the theme_update_counts_from_*
# scripts are gathering data for the day before.
today = date.today()
yesterday = today - timedelta(days=1)
for i in range(28):
d = yesterday - timedelta(days=i)
ThemeUpdateCount.objects.create(addon_id=15663, count=i, date=d)
ThemeUpdateCount.objects.create(addon_id=15679,
count=i * 100, date=d)
# Compute the popularity and movers.
management.call_command('update_theme_popularity_movers')
p1 = Persona.objects.get(pk=559)
p2 = Persona.objects.get(pk=575)
# The popularity is the average over the last 7 days, and as we created
# entries with one more user per day in the past (or 100 more), the
# calculation is "sum(range(7)) / 7" (or "sum(range(7)) * 100 / 7").
assert p1.popularity == 3 # sum(range(7)) / 7
assert p2.popularity == 300 # sum(range(7)) * 100 / 7
# A ThemeUserCount row should have been created for each Persona with
# today's date and the Persona popularity.
t1 = ThemeUserCount.objects.get(addon_id=15663)
t2 = ThemeUserCount.objects.get(addon_id=15679)
assert t1.date == today
assert t1.count == p1.popularity
assert t2.date == today
assert t2.count == p2.popularity
# Three weeks avg (sum(range(21)) / 21) = 10 so (3 - 10) / 10.
# The movers is computed with the following formula:
# previous_3_weeks: the average over the 21 days before the last 7 days
# movers: (popularity - previous_3_weeks) / previous_3_weeks
# The calculation for the previous_3_weeks is:
# previous_3_weeks: (sum(range(28) - sum(range(7))) * 100 / 21 == 1700.
assert p1.movers == 0.0 # Because the popularity is <= 100.
# We round the results to cope with floating point imprecision.
assert round(p2.movers, 5) == round((300.0 - 1700) / 1700, 5)
def test_is_valid_source(self):
assert is_valid_source('foo',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert not is_valid_source('foob',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert is_valid_source('foobaz',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert not is_valid_source('ba',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
class TestThemeADICommand(FixturesFolderMixin, TestCase):
date = '2014-11-06'
fixtures = ['base/appversion.json']
source_folder = '1093699'
@mock.patch(
'olympia.stats.management.commands.update_counts_from_file.'
'save_stats_to_file')
def test_update_counts_from_file_bug_1093699(self,
mock_save_stats_to_file):
Addon.objects.create(guid='{fe9e9f88-42f0-40dc-970b-4b0e6b7a3d0b}',
type=amo.ADDON_THEME)
management.call_command('update_counts_from_file', hive_folder,
date=self.date)
assert UpdateCount.objects.all().count() == 1
uc = UpdateCount.objects.last()
assert uc.count == 1320
assert uc.date == date(2014, 11, 06)
assert (uc.versions ==
{u'1.7.16': 1, u'userEnabled': 3, u'1.7.13': 2, u'1.7.11': 3,
u'1.6.0': 1, u'1.7.14': 1304, u'1.7.6': 6})
assert (uc.statuses ==
{u'Unknown': 3, u'userEnabled': 1259, u'userDisabled': 58})
assert uc.oses == {u'WINNT': 1122, u'Darwin': 114, u'Linux': 84}
assert uc.locales[u'es-ES'] == 20
assert (uc.applications[u'{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}'] ==
{u'2.0': 3})
uc.id = None # save_stats_to_file is called with a non-saved model.
mock_save_stats_to_file.assert_called_once_with(uc)
def test_stats_from_model_theme_update_count():
result = serialize_stats(
ThemeUpdateCount(addon_id=321, date='2016-01-18', count=123))
assert json.loads(result) == {
'date': '2016-01-18',
'addon': 321,
'count': 123}
def test_stats_from_model_update_count():
result = serialize_stats(
UpdateCount(
addon_id=321, date='2016-01-18',
count=123,
versions={u'3.8': 2, u'3.7': 3},
statuses={u'userEnabled': 5},
applications={u'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}':
{u'3.6': 18}},
oses={u'WINNT': 5},
locales={u'en-us': 1, u'en-US': 4}))
assert json.loads(result) == {
'date': '2016-01-18',
'addon': 321,
'count': 123,
'versions': {'3.7': 3, '3.8': 2},
'oses': {'WINNT': 5},
'applications': {
'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}': {'3.6': 18}},
'locales': {'en-US': 4, 'en-us': 1},
'statuses': {'userEnabled': 5}}
def test_stats_from_model_download_count():
result = serialize_stats(
DownloadCount(
addon_id=321, date='2016-01-18', count=123,
sources={u'search': 1, u'collection': 1}))
assert json.loads(result) == {
'date': '2016-01-18',
'addon': 321,
'count': 123,
'sources': {'search': 1, 'collection': 1}}
@mock.patch('olympia.stats.management.commands.storage.save')
@mock.patch('olympia.stats.management.commands.ContentFile')
def test_save_stats_to_file(mock_ContentFile, mock_storage):
mock_ContentFile.return_value = mock.sentinel.content
theme_update_count = ThemeUpdateCount(
addon_id=321, date='2016-01-18', count=123)
save_stats_to_file(theme_update_count)
mock_storage.assert_called_once_with(
'321/2016/01/2016_01_18_themeupdatecount.json', mock.sentinel.content)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
# Credit to Dr. Shyue Ping Ong for the template of the calculator
"""
This module implements a TEM pattern calculator.
"""
import json
import os
from collections import namedtuple
from fractions import Fraction
from functools import lru_cache
from typing import Dict, List, Tuple, cast, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import scipy.constants as sc
from pymatgen.analysis.diffraction.core import AbstractDiffractionPatternCalculator
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import latexify_spacegroup, unicodeify_spacegroup
with open(os.path.join(os.path.dirname(__file__), "atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.22"
__maintainer__ = "Jason Liang"
__email__ = "[email protected], [email protected]"
__date__ = "03/31/2020"
class TEMCalculator(AbstractDiffractionPatternCalculator):
"""
Computes the TEM pattern of a crystal structure for multiple Laue zones.
Code partially inspired from XRD calculation implementation. X-ray factor to electron factor
conversion based on the International Table of Crystallography.
#TODO: Could add "number of iterations", "magnification", "critical value of beam",
"twin direction" for certain materials, "sample thickness", and "excitation error s"
"""
def __init__(
self,
symprec: float = None,
voltage: float = 200,
beam_direction: Tuple[int, int, int] = (0, 0, 1),
camera_length: int = 160,
debye_waller_factors: Dict[str, float] = None,
cs: float = 1,
) -> None:
"""
Args:
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
voltage (float): The wavelength is a function of the TEM microscope's
voltage. By default, set to 200 kV. Units in kV.
beam_direction (tuple): The direction of the electron beam fired onto the sample.
By default, set to [0,0,1], which corresponds to the normal direction
of the sample plane.
camera_length (int): The distance from the sample to the projected diffraction pattern.
By default, set to 160 cm. Units in cm.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
cs (float): the chromatic aberration coefficient. set by default to 1 mm.
"""
self.symprec = symprec
self.voltage = voltage
self.beam_direction = beam_direction
self.camera_length = camera_length
self.debye_waller_factors = debye_waller_factors or {}
self.cs = cs
@lru_cache(1)
def wavelength_rel(self) -> float:
"""
Calculates the wavelength of the electron beam with relativistic kinematic effects taken
into account.
Args:
none
Returns:
Relativistic Wavelength (in angstroms)
"""
wavelength_rel = (
sc.h
/ np.sqrt(
2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c ** 2))
)
* (10 ** 10)
)
return wavelength_rel
@classmethod
def generate_points(cls, coord_left: int = -10, coord_right: int = 10) -> np.ndarray:
"""
Generates a bunch of 3D points that span a cube.
Args:
coord_left (int): The minimum coordinate value.
coord_right (int): The maximum coordinate value.
Returns:
Numpy 2d array
"""
points = [0, 0, 0]
coord_values = np.arange(coord_left, coord_right + 1)
points[0], points[1], points[2] = np.meshgrid(coord_values, coord_values, coord_values)
points_matrix = (np.ravel(points[i]) for i in range(0, 3))
result = np.vstack(list(points_matrix)).transpose()
return result
def zone_axis_filter(
self, points: Union[List[Tuple[int, int, int]], np.ndarray], laue_zone: int = 0
) -> Union[List[Tuple[int, int, int]]]:
"""
Filters out all points that exist within the specified Laue zone according to the zone axis rule.
Args:
points (np.ndarray): The list of points to be filtered.
laue_zone (int): The desired Laue zone.
Returns:
list of 3-tuples
"""
if any(isinstance(n, tuple) for n in points):
return list(points)
if len(points) == 0:
return []
filtered = np.where(np.dot(np.array(self.beam_direction), np.transpose(points)) == laue_zone)
result = points[filtered]
result_tuples = cast(List[Tuple[int, int, int]], [tuple(x) for x in result.tolist()])
return result_tuples
def get_interplanar_spacings(
self, structure: Structure, points: Union[List[Tuple[int, int, int]], np.ndarray]
) -> Dict[Tuple[int, int, int], float]:
"""
Args:
structure (Structure): the input structure.
points (tuple): the desired hkl indices.
Returns:
Dict of hkl to its interplanar spacing, in angstroms (float).
"""
points_filtered = self.zone_axis_filter(points)
if (0, 0, 0) in points_filtered:
points_filtered.remove((0, 0, 0))
interplanar_spacings_val = np.array(list(map(lambda x: structure.lattice.d_hkl(x), points_filtered)))
interplanar_spacings = dict(zip(points_filtered, interplanar_spacings_val))
return interplanar_spacings
def bragg_angles(
self, interplanar_spacings: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Gets the Bragg angles for every hkl point passed in (where n = 1).
Args:
interplanar_spacings (dict): dictionary of hkl to interplanar spacing
Returns:
dict of hkl plane (3-tuple) to Bragg angle in radians (float)
"""
plane = list(interplanar_spacings.keys())
interplanar_spacings_val = np.array(list(interplanar_spacings.values()))
bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val))
bragg_angles = dict(zip(plane, bragg_angles_val))
return bragg_angles
def get_s2(self, bragg_angles: Dict[Tuple[int, int, int], float]) -> Dict[Tuple[int, int, int], float]:
"""
Calculates the s squared parameter (= square of sin theta over lambda) for each hkl plane.
Args:
bragg_angles (Dict): The bragg angles for each hkl plane.
Returns:
Dict of hkl plane to s2 parameter, calculates the s squared parameter
(= square of sin theta over lambda).
"""
plane = list(bragg_angles.keys())
bragg_angles_val = np.array(list(bragg_angles.values()))
s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2
s2 = dict(zip(plane, s2_val))
return s2
def x_ray_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates x-ray factors, which are required to calculate atomic scattering factors. Method partially inspired
by the equivalent process in the xrd module.
Args:
structure (Structure): The input structure.
bragg_angles (Dict): Dictionary of hkl plane to Bragg angle.
Returns:
dict of atomic symbol to another dict of hkl plane to x-ray factor (in angstroms).
"""
x_ray_factors = {}
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
scattering_factors_for_atom = {}
for atom in atoms:
coeffs = np.array(ATOMIC_SCATTERING_PARAMS[atom.symbol])
for plane in bragg_angles:
scattering_factor_curr = atom.Z - 41.78214 * s2[plane] * np.sum(
coeffs[:, 0] * np.exp(-coeffs[:, 1] * s2[plane]), axis=None
)
scattering_factors_for_atom[plane] = scattering_factor_curr
x_ray_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return x_ray_factors
def electron_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates atomic scattering factors for electrons using the Mott-Bethe formula (1st order Born approximation).
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict from atomic symbol to another dict of hkl plane to factor (in angstroms)
"""
electron_scattering_factors = {}
x_ray_factors = self.x_ray_factors(structure, bragg_angles)
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
prefactor = 0.023934
scattering_factors_for_atom = {}
for atom in atoms:
for plane in bragg_angles:
scattering_factor_curr = prefactor * (atom.Z - x_ray_factors[atom.symbol][plane]) / s2[plane]
scattering_factors_for_atom[plane] = scattering_factor_curr
electron_scattering_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return electron_scattering_factors
def cell_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], int]:
"""
Calculates the scattering factor for the whole cell.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane (3-tuple) to scattering factor (in angstroms).
"""
cell_scattering_factors = {}
electron_scattering_factors = self.electron_scattering_factors(structure, bragg_angles)
scattering_factor_curr = 0
for plane in bragg_angles:
for site in structure:
for sp, occu in site.species.items():
g_dot_r = np.dot(np.array(plane), np.transpose(site.frac_coords))
scattering_factor_curr += electron_scattering_factors[sp.symbol][plane] * np.exp(
2j * np.pi * g_dot_r
)
cell_scattering_factors[plane] = scattering_factor_curr
scattering_factor_curr = 0
return cell_scattering_factors
def cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Calculates cell intensity for each hkl plane. For simplicity's sake, take I = |F|**2.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to cell intensity
"""
csf = self.cell_scattering_factors(structure, bragg_angles)
plane = bragg_angles.keys()
csf_val = np.array(list(csf.values()))
cell_intensity_val = (csf_val * csf_val.conjugate()).real
cell_intensity = dict(zip(plane, cell_intensity_val))
return cell_intensity
def get_pattern(
self,
structure: Structure,
scaled: bool = None,
two_theta_range: Tuple[float, float] = None,
) -> pd.DataFrame:
"""
Returns all relevant TEM DP info in a pandas dataframe.
Args:
structure (Structure): The input structure.
scaled (boolean): Required value for inheritance, does nothing in TEM pattern
two_theta_range (Tuple): Required value for inheritance, does nothing in TEM pattern
Returns:
PandasDataFrame
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
field_names = [
"Position",
"(hkl)",
"Intensity (norm)",
"Film radius",
"Interplanar Spacing",
]
rows_list = []
for dot in tem_dots:
dict1 = {
"Pos": dot.position,
"(hkl)": dot.hkl,
"Intnsty (norm)": dot.intensity,
"Film rad": dot.film_radius,
"Interplanar Spacing": dot.d_spacing,
}
rows_list.append(dict1)
df = pd.DataFrame(rows_list, columns=field_names)
return df
def normalized_cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Normalizes the cell_intensity dict to 1, for use in plotting.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to normalized cell intensity
"""
normalized_cell_intensity = {}
cell_intensity = self.cell_intensity(structure, bragg_angles)
max_intensity = max(cell_intensity.values())
norm_factor = 1 / max_intensity
for plane in cell_intensity:
normalized_cell_intensity[plane] = cell_intensity[plane] * norm_factor
return normalized_cell_intensity
def is_parallel(
self,
structure: Structure,
plane: Tuple[int, int, int],
other_plane: Tuple[int, int, int],
) -> bool:
"""
Checks if two hkl planes are parallel in reciprocal space.
Args:
structure (Structure): The input structure.
plane (3-tuple): The first plane to be compared.
other_plane (3-tuple): The other plane to be compared.
Returns:
boolean
"""
phi = self.get_interplanar_angle(structure, plane, other_plane)
return phi in (180, 0) or np.isnan(phi)
def get_first_point(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], float]:
"""
Gets the first point to be plotted in the 2D DP, corresponding to maximum d/minimum R.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of a hkl plane to max interplanar distance.
"""
max_d = -100.0
max_d_plane = (0, 0, 1)
points = self.zone_axis_filter(points)
spacings = self.get_interplanar_spacings(structure, points)
for plane in sorted(spacings.keys()):
if spacings[plane] > max_d:
max_d_plane = plane
max_d = spacings[plane]
return {max_d_plane: max_d}
@classmethod
def get_interplanar_angle(cls, structure: Structure, p1: Tuple[int, int, int], p2: Tuple[int, int, int]) -> float:
"""
Returns the interplanar angle (in degrees) between the normal of two crystal planes.
Formulas from International Tables for Crystallography Volume C pp. 2-9.
Args:
structure (Structure): The input structure.
p1 (3-tuple): plane 1
p2 (3-tuple): plane 2
Returns:
float
"""
a, b, c = structure.lattice.a, structure.lattice.b, structure.lattice.c
alpha, beta, gamma = (
np.deg2rad(structure.lattice.alpha),
np.deg2rad(structure.lattice.beta),
np.deg2rad(structure.lattice.gamma),
)
v = structure.lattice.volume
a_star = b * c * np.sin(alpha) / v
b_star = a * c * np.sin(beta) / v
c_star = a * b * np.sin(gamma) / v
cos_alpha_star = (np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
cos_beta_star = (np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
cos_gamma_star = (np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
r1_norm = np.sqrt(
p1[0] ** 2 * a_star ** 2
+ p1[1] ** 2 * b_star ** 2
+ p1[2] ** 2 * c_star ** 2
+ 2 * p1[0] * p1[1] * a_star * b_star * cos_gamma_star
+ 2 * p1[0] * p1[2] * a_star * c_star * cos_beta_star
+ 2 * p1[1] * p1[2] * b_star * c_star * cos_gamma_star
)
r2_norm = np.sqrt(
p2[0] ** 2 * a_star ** 2
+ p2[1] ** 2 * b_star ** 2
+ p2[2] ** 2 * c_star ** 2
+ 2 * p2[0] * p2[1] * a_star * b_star * cos_gamma_star
+ 2 * p2[0] * p2[2] * a_star * c_star * cos_beta_star
+ 2 * p2[1] * p2[2] * b_star * c_star * cos_gamma_star
)
r1_dot_r2 = (
p1[0] * p2[0] * a_star ** 2
+ p1[1] * p2[1] * b_star ** 2
+ p1[2] * p2[2] * c_star ** 2
+ (p1[0] * p2[1] + p2[0] * p1[1]) * a_star * b_star * cos_gamma_star
+ (p1[0] * p2[2] + p2[0] * p1[1]) * a_star * c_star * cos_beta_star
+ (p1[1] * p2[2] + p2[1] * p1[2]) * b_star * c_star * cos_alpha_star
)
phi = np.arccos(r1_dot_r2 / (r1_norm * r2_norm))
return np.rad2deg(phi)
@classmethod
def get_plot_coeffs(
cls,
p1: Tuple[int, int, int],
p2: Tuple[int, int, int],
p3: Tuple[int, int, int],
) -> np.ndarray:
"""
Calculates coefficients of the vector addition required to generate positions for each DP point
by the Moore-Penrose inverse method.
Args:
p1 (3-tuple): The first point. Fixed.
p2 (3-tuple): The second point. Fixed.
p3 (3-tuple): The point whose coefficients are to be calculted.
Returns:
Numpy array
"""
a = np.array([[p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]]])
b = np.array([[p3[0], p3[1], p3[2]]]).T
a_pinv = np.linalg.pinv(a)
x = np.dot(a_pinv, b)
return np.ravel(x)
def get_positions(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], np.ndarray]:
"""
Calculates all the positions of each hkl point in the 2D diffraction pattern by vector addition.
Distance in centimeters.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of hkl plane to xy-coordinates.
"""
positions = {}
points = self.zone_axis_filter(points)
# first is the max_d, min_r
first_point_dict = self.get_first_point(structure, points)
for point in first_point_dict:
first_point = point
first_d = first_point_dict[point]
spacings = self.get_interplanar_spacings(structure, points)
# second is the first non-parallel-to-first-point vector when sorted.
# note 000 is "parallel" to every plane vector.
for plane in sorted(spacings.keys()):
second_point, second_d = plane, spacings[plane]
if not self.is_parallel(structure, first_point, second_point):
break
p1 = first_point
p2 = second_point
if (0, 0, 0) in points:
points.remove((0, 0, 0))
points.remove(first_point)
points.remove(second_point)
positions[(0, 0, 0)] = np.array([0, 0])
r1 = self.wavelength_rel() * self.camera_length / first_d
positions[first_point] = np.array([r1, 0])
r2 = self.wavelength_rel() * self.camera_length / second_d
phi = np.deg2rad(self.get_interplanar_angle(structure, first_point, second_point))
positions[second_point] = np.array([r2 * np.cos(phi), r2 * np.sin(phi)])
for plane in points:
coeffs = self.get_plot_coeffs(p1, p2, plane)
pos = np.array(
[
coeffs[0] * positions[first_point][0] + coeffs[1] * positions[second_point][0],
coeffs[0] * positions[first_point][1] + coeffs[1] * positions[second_point][1],
]
)
positions[plane] = pos
points.append((0, 0, 0))
points.append(first_point)
points.append(second_point)
return positions
def tem_dots(self, structure: Structure, points) -> List:
"""
Generates all TEM_dot as named tuples that will appear on the 2D diffraction pattern.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
list of TEM_dots
"""
dots = []
interplanar_spacings = self.get_interplanar_spacings(structure, points)
bragg_angles = self.bragg_angles(interplanar_spacings)
cell_intensity = self.normalized_cell_intensity(structure, bragg_angles)
positions = self.get_positions(structure, points)
for plane in cell_intensity.keys():
dot = namedtuple("dot", ["position", "hkl", "intensity", "film_radius", "d_spacing"])
position = positions[plane]
hkl = plane
intensity = cell_intensity[plane]
film_radius = 0.91 * (10 ** -3 * self.cs * self.wavelength_rel() ** 3) ** Fraction("1/4")
d_spacing = interplanar_spacings[plane]
tem_dot = dot(position, hkl, intensity, film_radius, d_spacing)
dots.append(tem_dot)
return dots
def get_plot_2d(self, structure: Structure) -> go.Figure:
"""
Generates the 2D diffraction pattern of the input structure.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(str(dot.hkl))
intensities.append(dot.intensity)
hkls = list(map(unicodeify_spacegroup, list(map(latexify_spacegroup, hkls))))
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
hoverinfo="text",
mode="markers",
marker=dict(
size=8,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
),
go.Scatter(
x=[0],
y=[0],
text="(0, 0, 0): Direct beam",
hoverinfo="text",
mode="markers",
marker=dict(size=14, cmax=1, cmin=0, color="white"),
showlegend=False,
),
]
layout = go.Layout(
title="2D Diffraction Pattern<br>Beam Direction: " + "".join(str(e) for e in self.beam_direction),
font=dict(size=14, color="#7f7f7f"),
hovermode="closest",
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
width=550,
height=550,
paper_bgcolor="rgba(100,110,110,0.5)",
plot_bgcolor="black",
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_plot_2d_concise(self, structure: Structure) -> go.Figure:
"""
Generates the concise 2D diffraction pattern of the input structure of a smaller size and without layout.
Does not display.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
if dot.hkl != (0, 0, 0):
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(dot.hkl)
intensities.append(dot.intensity)
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
mode="markers",
hoverinfo="skip",
marker=dict(
size=4,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
)
]
layout = go.Layout(
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
plot_bgcolor="black",
margin={"l": 0, "r": 0, "t": 0, "b": 0},
width=121,
height=121,
)
fig = go.Figure(data=data, layout=layout)
fig.layout.update(showlegend=False)
return fig
|
|
# -*- coding: utf-8 -*-
import psycopg2
import sys
import re
class Paper:
pid = None
title = None
author = None
aid = None
wid = None
year = None
venue = None
vid = None
index = None
references = []
abstract = None
keywords = []
def make_keywords(self):
if self.abstract:
tmp_abstract = self.abstract.replace(',', '').replace('.', '')
not_empty_words = filter(None, tmp_abstract.split(' '))
if len(not_empty_words) > 7:
not_empty_words = not_empty_words[0:5]
self.keywords = map(lambda x: x.lower(), not_empty_words)
def update(self, line):
line = line.strip()
if line.startswith('#*'):
self.title = line[2:]
if line.startswith('#@'):
self.author = line[2:]
if line.startswith('#t'):
self.year = line[2:]
if line.startswith('#c'):
self.venue = line[2:]
if line.startswith('#i'):
self.index = line[2:]
if line.startswith('#%'):
if line[2:] != '':
if len(self.references) < 3:
self.references.append(line[2:])
if line.startswith('#!'):
abstract = line[2:]
if abstract == '':
abstract = self.title
self.abstract = abstract
self.make_keywords()
def printme(self):
print self.index
# print self.title
# print self.author
# print self.year
# print self.venue
# print self.index
# print self.references
# print self.abstract
# print self.keywords
print ''
class DB:
def __init__(self):
self.connection = psycopg2.connect(database="stonedb", user="postgres")
self.cursor = self.connection.cursor()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.commit()
self.cursor.close()
self.connection.close()
def get_or_create_author(self, a):
cursor = self.cursor
cursor.execute("SELECT * FROM author WHERE name = %s;", (a,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(aid) FROM author;")
id = cursor.fetchone()[0] or 0
cursor.execute("INSERT INTO author (aid, name) VALUES (%s, %s)", (id+1, a,))
result = (id+1,)
id = result[0]
return id
def get_or_create_venue(self, v):
cursor = self.cursor
cursor.execute("SELECT * FROM venue WHERE name = %s;", (v,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(vid) FROM venue;")
id = cursor.fetchone()[0] or 0
cursor.execute("INSERT INTO venue (vid, name) VALUES (%s, %s)", (id+1, v,))
result = (id+1,)
id = result[0]
return id
def get_or_create_paper(self, p):
cursor = self.cursor
cursor.execute("SELECT * FROM paper WHERE pid = %s;", (p.index,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(pid) FROM paper;")
id = cursor.fetchone()[0] or 0
cursor.execute(
"INSERT INTO paper (pid, title, year, venue_id) VALUES (%s, %s, %s, %s)",
(p.index, p.title, p.year, p.vid)
)
result = (p.index,)
id = result[0]
return id
def get_or_create_writes(self, p):
cursor = self.cursor
cursor.execute("SELECT * FROM writes WHERE wid = %s;", (p.wid,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(wid) FROM writes;")
id = cursor.fetchone()[0] or 0
cursor.execute(
"INSERT INTO writes (wid, paper_id, author_id) VALUES (%s, %s, %s)",
(id+1, p.pid, p.aid)
)
result = (id+1,)
id = result[0]
return id
def get_or_create_keyword(self, kw):
cursor = self.cursor
cursor.execute("SELECT * FROM keyword WHERE value = %s;", (kw,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(kid) FROM keyword;")
id = cursor.fetchone()[0] or 0
cursor.execute(
"INSERT INTO keyword (kid, value) VALUES (%s, %s)",
(id+1, kw)
)
result = (id+1,)
id = result[0]
return id
def get_or_create_contains(self, p, kwid):
cursor = self.cursor
cursor.execute("SELECT * FROM contains WHERE cid = %s;", (0,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(cid) FROM contains;")
id = cursor.fetchone()[0] or 0
cursor.execute(
"INSERT INTO contains (cid, paper_id, keyword_id) VALUES (%s, %s, %s)",
(id+1, p.pid, kwid)
)
result = (id+1,)
id = result[0]
return id
def get_or_create_references(self, p, to_id):
cursor = self.cursor
cursor.execute("SELECT * FROM refs WHERE rid = %s;", (0,))
result = cursor.fetchone()
if result is None:
cursor.execute("SELECT MAX(rid) FROM refs;")
id = cursor.fetchone()[0] or 0
cursor.execute(
"INSERT INTO refs (rid, from_id, to_id) VALUES (%s, %s, %s)",
(id+1, p.pid, to_id)
)
result = (id+1,)
id = result[0]
return id
def insert_paper(self, p):
cursor = self.cursor
try:
p.vid = self.get_or_create_venue(p.venue)
p.pid = self.get_or_create_paper(p)
p.aid = self.get_or_create_author(p.author)
p.wid = self.get_or_create_writes(p)
for kw in p.keywords:
kwid = self.get_or_create_keyword(kw)
self.get_or_create_contains(p, kwid)
for ref_id in p.references:
self.get_or_create_references(p, ref_id)
except Exception, e:
raise e
pass
def read_dataset():
# do sed 's/#index/#i/' publications.txt > publications_new.txt before use
# script
data_file = '/home/abcdw/tmp/dblp/DBLP_Citation_2014_May/publications_new.txt'
with DB() as db:
with open(data_file) as f:
i = 0
papers_number = 0
p = Paper()
for line in f:
i = i + 1
if i % 3000 == 0:
db.connection.commit()
print papers_number
if i > 1000007:
break
p.update(line)
if line.strip() == '':
# if p.abstract:
# p.printme()
db.insert_paper(p)
papers_number = papers_number + 1
p = Paper()
read_dataset()
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
EyeLinkSession.py
Created by Tomas Knapen on 2011-04-27.
Copyright (c) 2011 __MyCompanyName__. All rights reserved.
"""
import os, sys, pickle, math, thread, time
from subprocess import *
import scipy as sp
import scipy.stats as stats
import numpy as np
import matplotlib.pylab as pl
from matplotlib.backends.backend_pdf import PdfPages
from IPython import embed as shell
from tables import *
import pp
from DataContainer import DataContainer
from DataAnalyzer import DataAnalyzer
def simpleaxis(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def spine_shift(ax, shift = 10):
for loc, spine in ax.spines.iteritems():
if loc in ['left','bottom']:
spine.set_position(('outward', shift)) # outward by 10 points
elif loc in ['right','top']:
spine.set_color('none') # don't draw spine
else:
raise ValueError('unknown spine location: %s'%loc)
nr_variables = 5
def npS( input, mu ):
input[input < 0] = 0.0
input = pow(input,mu['NRa'])/(pow(input,mu['NRa']) + pow(mu['NRs'],mu['NRa']))
def func(t, y, mu):
import pygsl._numobj
from pygsl import odeiv, Float
def S( input, NRa, NRs ):
if input >= 0. :
return pow(input,NRa)/(pow(input,NRa) + pow(NRs,NRa))
else:
return 0.
dydt = pygsl._numobj.zeros((5,), Float) * 1.0
#defining variables based on indices on y
H1, H2 = 0,1
A1, A2 = 2,3
C = 4
# dydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - mu['gamma'] * S(y[H2], mu['NRa'], mu['NRs']);
# dydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - mu['gamma'] * S(y[H1], mu['NRa'], mu['NRs']);
# dydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - mu['gamma'] * S(y[H2], mu['NRa'], mu['NRs']) - mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh']);
# dydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - mu['gamma'] * S(y[H1], mu['NRa'], mu['NRs']) - mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh']);
# dydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - (mu['gamma'] + mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh'])) * S(y[H2], mu['NRa'], mu['NRs']);
# dydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - (mu['gamma'] + mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh'])) * S(y[H1], mu['NRa'], mu['NRs']);
dydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - (mu['gamma'] + mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh'])) * S(y[H2], mu['NRa'], mu['NRs']);
dydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - (mu['gamma'] * S(y[H1], mu['NRa'], mu['NRs']));
dydt[A1] = ( -pow(y[A1],mu['exponent']) + ( mu['alpha'] * S(y[H1], mu['NRa'], mu['NRs']) ) ) / mu['tau'];
dydt[A2] = ( -pow(y[A2],mu['exponent']) + ( mu['alpha'] * S(y[H2], mu['NRa'], mu['NRs']) ) ) / mu['tau'];
dydt[C] = (mu['var_inh'] * (S(dydt[H1], mu['NRa'], mu['NRs']) + S(dydt[H2], mu['NRa'], mu['NRs'])) -y[C]) / mu['tau_inh']
return dydt
def run_sim(mu, nr_timepoints, func, npS):
import pygsl._numobj
import pygsl
from pygsl import odeiv, Float
import numpy
import scipy.stats as stats
import numpy as np
dimension = 5
step = odeiv.step_rkf45(dimension, func, None, mu)
control = odeiv.control_y_new(step, 1e-6, 1e-6)
evolve = odeiv.evolve(step, control, dimension)
h = 1
t1 = float(nr_timepoints)
# initial values - all 0.
y = pygsl._numobj.array((0.5, 0.5, 0.0, 0.01, 0.0))
op = numpy.zeros((nr_timepoints, dimension))
iters = 0
smooth_width = 200
noise_signal = numpy.random.randn(nr_timepoints + smooth_width) * mu['var_inh_noise_level']
kern = stats.norm.pdf( np.linspace(-3.25,3.25,smooth_width) )
sm_noise = np.convolve( noise_signal, kern / kern.sum(), 'full' )[smooth_width/2:nr_timepoints+smooth_width/2]
# sm_signal = np.convolve( noise_signal, np.ones((smooth_width))/float(smooth_width), 'valid' )
for t in numpy.linspace(0, t1, nr_timepoints):
t, h, y = evolve.apply(t, t1, h, y)
op[iters] = y
# add noise to instantaneous activity:
# y += numpy.concatenate((numpy.random.randn(2) * mu['noise_level'], [0.0, 0.0, 0.0]))
# add noise to novel interaction
# y += numpy.concatenate(([0.0, 0.0, 0.0, 0.0], numpy.random.randn(1) * mu['noise_level']))
# add noise to activities and to novel interaction
# y += numpy.array([numpy.random.randn(1) * mu['noise_level'] * mu['var_inh_noise_infl']/y[4], numpy.random.randn(1) * mu['noise_level'] * mu['var_inh_noise_infl']/y[4], 0.0, 0.0, y[4] * numpy.random.randn(1) * mu['var_inh_noise_level']])
# add noise only to novel interaction, but graded by the inverse of its value.
# y += numpy.concatenate(([0.0, 0.0, 0.0, 0.0], numpy.random.randn(1) * mu['noise_level']))
# add noise to both populations and transient signal
y += numpy.array([numpy.random.randn(1) * mu['noise_level'], numpy.random.randn(1) * mu['noise_level'], 0.0, 0.0, sm_noise[iters]])
iters += 1
op = numpy.array(op)
# naka rushton on activities:
npS(op[:,0], mu)
npS(op[:,1], mu)
# return both output and parameter dictionary
return [mu, op]
# mu parameters based on dictionary
mu = {'XL': 0.98, 'XR': 1.0, 'beta': 0.24, 'gamma': 3.0, 'exponent': 1.0, 'alpha': 4.0, 'tau': 100.0, 'NRa': 2.0, 'NRs': 1.0, 'noise_level': 0.0025, 'var_inh': 120.0, 'tau_inh': 25, 'var_inh_infl': 0.8, 'NRa_var_inh': 3.0, 'NRs_var_inh': 1.0, 'var_inh_noise_level': 0.005, 'var_inh_noise_infl': 0.0}
nr_timepoints = 20000
file_name = 'data/C_inhibition'
corr_res = np.zeros((4,2,7,6))
pnl_range = np.linspace(0.0001, 0.0003, corr_res.shape[0]) # was between 0.00027 and 0.00028
inl_range = np.linspace(0.008, 0.02, corr_res.shape[1]) # was between 0.0009 and the same, I believe
simulate = True
for i, population_noise_level in enumerate(pnl_range):
for j, inhibition_noise_level in enumerate(inl_range):
mu['var_inh_noise_level'] = inhibition_noise_level
mu['noise_level'] = population_noise_level
rn = 'pnl' + '_' + str(population_noise_level) + '_inl' + '_' + str(inhibition_noise_level)
which_var = 'var_inh_infl'
which_values = np.linspace(0.0,100.0,corr_res.shape[2])
# Create an instance of callback class
nr_simulations = which_values.shape[0]
dc = DataContainer(file_name + '.hdf5')
da = DataAnalyzer(dc)
if simulate:
dc.setup_for_simulation(nr_timepoints = nr_timepoints, nr_simulations = nr_simulations, nr_variables = nr_variables)
# running these in parallel
# Creates jobserver with automatically detected number of workers
job_server = pp.Server(ppservers=())
# Execute the same task with different amount of active workers and measure the time
for index in xrange(nr_simulations):
mu[which_var] = which_values[index]
job_server.submit(run_sim, (mu, nr_timepoints, func, npS), callback=dc.save_to_array)
#wait for jobs in all groups to finish
job_server.wait()
job_server.destroy()
dc.save_to_hdf_file(run_name = rn.replace('.',''))
da.plot_activities(plot_file_name = file_name + '_act_' + rn + '.pdf', run_name = rn.replace('.',''), sort_variable = which_var)
da.all_time_courses_to_percepts(run_name = rn.replace('.',''), sort_variable = which_var, plot_file_name = file_name + '_' + rn + '.pdf')
# da.transition_related_averaging(run_name = rn.replace('.',''), sort_variable = which_var)
# corr_res[i,j,:,:] = da.correlation_results
# fig = pl.figure()
# ax = fig.add_subplot(111)
# cax = ax.imshow(corr_res[i], extent = (which_values[0],which_values[-1],inl_range[0],inl_range[-1]), vmin = 0, vmax = 1)
# cbar = fig.colorbar(cax, ticks=[0, 0.5, 1])
# cbar.ax.set_yticklabels(['0', '0.5', '1'])# vertically oriented colorbar
# ax.set_ylabel('inhibition noise level', fontsize=9)
# ax.set_xlabel('variable inhibition strength', fontsize=9)
# pl.savefig('data/im_' + str(population_noise_level) + '.pdf')
# pl.close()
pl.show()
shell()
# for a run of 1x10:
cr_m = corr_res.squeeze().mean(axis = 0)
cr_s = corr_res.squeeze().std(axis = 0) / math.sqrt(10)
f2 = pl.figure(figsize = (8,4))
s = f2.add_subplot(111) # , aspect = 'equal')
# s.set_title('simulation results\ncorrelations between C and percept duration\nfor %s' % sort_variable)
s.set_xlabel('Strength of transient signal [C] influence')
s.set_ylabel('Spearman\'s $\rho$')
s.axhline(0,-0.5,45.5, linewidth = 0.25)
s.plot(which_values, cr_m[:,0], 'k--', label = 'percept duration / C')
s.plot(which_values, cr_m[:,2], 'b--', label = 'percept duration / $\sigma$ H')
s.plot(which_values, cr_m[:,4], 'r--', label = '$\sigma$ H / C')
pl.fill_between(which_values, cr_m[:,0] + cr_s[:,0], cr_m[:,0] - cr_s[:,0], color = 'k', alpha = 0.2)
pl.fill_between(which_values, cr_m[:,2] + cr_s[:,2], cr_m[:,2] - cr_s[:,2], color = 'b', alpha = 0.2)
pl.fill_between(which_values, cr_m[:,4] + cr_s[:,4], cr_m[:,4] - cr_s[:,4], color = 'r', alpha = 0.2)
s.axis([-0.5,45.5, -0.7, 0.9])
leg = s.legend(fancybox = True)
leg.get_frame().set_alpha(0.5)
if leg:
for t in leg.get_texts():
t.set_fontsize('x-small') # the legend text fontsize
for l in leg.get_lines():
l.set_linewidth(3.5) # the legend line width
simpleaxis(s)
spine_shift(s)
pl.savefig(file_name + '_corr.pdf')
#
# # shell()
|
|
"""Test the DLT implementation.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import dlt
def test_exceptions():
#TODO make some error cases here
pass
def test_3d():
# 3D (x, y, z) coordinates (in cm) of the corner of a cube (the measurement error is at least 0.2 cm)
xyz = [[0, 0, 0],
[0, 12.3, 0],
[14.5, 12.3, 0],
[14.5, 0, 0],
[0, 0, 14.5],
[0, 12.3, 14.5],
[14.5, 12.3, 14.5],
[14.5, 0, 14.5]]
# 2D (u, v) coordinates (in pixels) of 4 different views of the cube
uv1 = [[1302, 1147],
[1110, 976],
[1411, 863],
[1618, 1012],
[1324, 812],
[1127, 658],
[1433, 564],
[1645, 704]]
uv2 = [[1094, 1187],
[1130, 956],
[1514, 968],
[1532, 1187],
[1076, 854],
[1109, 647],
[1514, 659],
[1523, 860]]
uv3 = [[1073, 866],
[1319, 761],
[1580, 896],
[1352, 1016],
[1064, 545],
[1304, 449],
[1568, 557],
[1313, 668]]
uv4 = [[1205, 1511],
[1193, 1142],
[1601, 1121],
[1631, 1487],
[1157, 1550],
[1139, 1124],
[1628, 1100],
[1661, 1520]]
# calibration results
err1_blessed = 2.57965902006
err2_blessed = 3.04214261951
err3_blessed = 6.16791729721
err4_blessed = 2.79210779319
L1_blessed = np.array([ 2.95265206e+01, -8.97370130e+00, -6.96531802e-01, 1.30358419e+03,
-4.06246400e+00, -8.02186056e+00, -2.44358618e+01, 1.14686150e+03,
4.94180751e-03, 6.18568358e-03, -1.68242140e-03, 1.00000000e+00])
L2_blessed = np.array([ 3.19407422e+01, 1.26911035e+01, -4.63671185e+00, 1.09701804e+03,
1.86877074e+00, -9.99243817e+00, -2.56231471e+01, 1.18695817e+03,
1.43560285e-03, 9.01401595e-03, -2.88449313e-03, 1.00000000e+00])
L3_blessed = np.array([ 1.16209215e+01, 2.44307350e+01, -8.06307139e-01, 1.07849968e+03,
5.33446749e+00, -5.99924577e+00, -2.22602954e+01, 8.68588147e+02,
-4.81341554e-03, 3.71965408e-03, 3.40587076e-04, 1.00000000e+00])
L4_blessed = np.array([ 3.04486953e+01, 2.06678879e+00, -1.52883726e+01, 1.20481687e+03,
-7.87459694e-01, -2.66125606e+01, -1.32016005e+01, 1.50953468e+03,
6.16247151e-04, 2.74227353e-03, -1.03889378e-02, 1.00000000e+00])
# reconstruction results
error_cm_blessed = 0.108730880148
xyz1234_blessed = np.array([[ -8.09297218e-02, -7.60766130e-02, 8.18317612e-02],
[ 6.14967987e-02, 1.23308395e+01, -3.34614720e-02],
[ 1.43971386e+01, 1.22842067e+01, -1.01040774e-01],
[ 1.46310434e+01, 6.92701815e-02, 5.15954438e-02],
[ 9.68520833e-03, 6.59756252e-02, 1.44007915e+01],
[ 1.07361971e-02, 1.22785425e+01, 1.45588380e+01],
[ 1.45309228e+01, 1.23050727e+01, 1.45759737e+01],
[ 1.44428869e+01, -6.01772394e-02, 1.44702910e+01]])
nd = 3 # number of dimensions
nc = 4 # number of cameras
npts = 8 # number of data points in each image
# perform the calibrations
L1, err1 = dlt.calibrate(nd, xyz, uv1)
L2, err2 = dlt.calibrate(nd, xyz, uv2)
L3, err3 = dlt.calibrate(nd, xyz, uv3)
L4, err4 = dlt.calibrate(nd, xyz, uv4)
# perform reconstruction
xyz1234 = np.zeros((len(xyz), 3))
L1234 = [L1, L2, L3, L4]
for i in range(npts):
xyz1234[i, :] = dlt.reconstruct(nd, nc, L1234, [uv1[i], uv2[i], uv3[i], uv4[i]])
xyz = np.array(xyz)
error_cm = np.mean(np.sqrt(((xyz1234 - xyz)**2).sum(axis=1)))
# check datatypes
assert(isinstance(xyz1234, np.ndarray))
assert(isinstance(L1, np.ndarray))
# check calibration values
assert(np.allclose(L1, L1_blessed))
assert(np.allclose(L2, L2_blessed))
assert(np.allclose(L3, L3_blessed))
assert(np.allclose(L4, L4_blessed))
# check calibration errors
assert(np.allclose(err1, err1_blessed))
assert(np.allclose(err2, err2_blessed))
assert(np.allclose(err3, err3_blessed))
assert(np.allclose(err4, err4_blessed))
# check reconstruction values
assert(np.allclose(xyz1234_blessed, xyz1234))
# check reconstruction error
assert(np.allclose(error_cm, error_cm_blessed))
# plot the images and reconstruction
uv1 = np.asarray(uv1)
uv2 = np.asarray(uv2)
uv3 = np.asarray(uv3)
uv4 = np.asarray(uv4)
#TODO: make this its own module function
# plot image points
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
ax[0, 0].plot(uv1[:, 0], uv1[:, 1], 'o')
ax[0, 1].plot(uv2[:, 0], uv2[:, 1], 'o')
ax[1, 0].plot(uv3[:, 0], uv3[:, 1], 'o')
ax[1, 1].plot(uv4[:, 0], uv4[:, 1], 'o')
fig.tight_layout()
for _ax in ax.flatten():
_ax.set_xticklabels([])
_ax.set_yticklabels([])
#TODO: make this its own module
# plot reconstruction
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2], 'o')
ax.plot(xyz1234[:, 0], xyz1234[:, 1], xyz1234[:, 2], 'x')
plt.show()
def t():
L1, err1 = DLTcalib(nd, xyz, uv1)
print 'Camera calibration parameters based on view #1:'
print L1
print 'Error of the calibration of view #1 (in pixels):'
print err1
L2, err2 = DLTcalib(nd, xyz, uv2)
print 'Camera calibration parameters based on view #2:'
print L2
print 'Error of the calibration of view #2 (in pixels):'
print err2
L3, err3 = DLTcalib(nd, xyz, uv3)
print 'Camera calibration parameters based on view #3:'
print L3
print 'Error of the calibration of view #3 (in pixels):'
print err3
L4, err4 = DLTcalib(nd, xyz, uv4)
print 'Camera calibration parameters based on view #4:'
print L4
print 'Error of the calibration of view #4 (in pixels):'
print err4
xyz1234 = np.zeros((len(xyz),3))
L1234 = [L1,L2,L3,L4]
for i in range(len(uv1)):
xyz1234[i,:] = DLTrecon( nd, nc, L1234, [uv1[i],uv2[i],uv3[i],uv4[i]] )
print 'Reconstruction of the same 8 points based on 4 views and the camera calibration parameters:'
print xyz1234
print 'Mean error of the point reconstruction using the DLT (error in cm):'
print np.mean(np.sqrt(np.sum((np.array(xyz1234)-np.array(xyz))**2,1)))
print ''
print 'Test of the 2D DLT'
print '2D (x, y) coordinates (in cm) of the corner of a square (the measurement error is at least 0.2 cm):'
xy = [[0,0], [0,12.3], [14.5,12.3], [14.5,0]]
print np.asarray(xy)
print '2D (u, v) coordinates (in pixels) of 2 different views of the square:'
uv1 = [[1302,1147],[1110,976],[1411,863],[1618,1012]]
uv2 = [[1094,1187],[1130,956],[1514,968],[1532,1187]]
print 'uv1:'
print np.asarray(uv1)
print 'uv2:'
print np.asarray(uv2)
print ''
print 'Use 2 views to perform a 2D calibration of the camera with 4 points of the square:'
nd=2
nc=2
L1, err1 = DLTcalib(nd, xy, uv1)
print 'Camera calibration parameters based on view #1:'
print L1
print 'Error of the calibration of view #1 (in pixels):'
print err1
L2, err2 = DLTcalib(nd, xy, uv2)
print 'Camera calibration parameters based on view #2:'
print L2
print 'Error of the calibration of view #2 (in pixels):'
print err2
xy12 = np.zeros((len(xy),2))
L12 = [L1,L2]
for i in range(len(uv1)):
xy12[i,:] = DLTrecon( nd, nc, L12, [uv1[i],uv2[i]] )
print 'Reconstruction of the same 4 points based on 2 views and the camera calibration parameters:'
print xy12
print 'Mean error of the point reconstruction using the DLT (error in cm):'
print np.mean(np.sqrt(np.sum((np.array(xy12)-np.array(xy))**2,1)))
print ''
print 'Use only one view to perform a 2D calibration of the camera with 4 points of the square:'
nd=2
nc=1
L1, err1 = DLTcalib(nd, xy, uv1)
print 'Camera calibration parameters based on view #1:'
print L1
print 'Error of the calibration of view #1 (in pixels):'
print err1
xy1 = np.zeros((len(xy),2))
for i in range(len(uv1)):
xy1[i,:] = DLTrecon( nd, nc, L1, uv1[i] )
print 'Reconstruction of the same 4 points based on one view and the camera calibration parameters:'
print xy1
print 'Mean error of the point reconstruction using the DLT (error in cm):'
print np.mean(np.sqrt(np.sum((np.array(xy1)-np.array(xy))**2,1)))
|
|
##
# Copyright (c) 2014-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
group attendee tests
"""
from twext.enterprise.jobqueue import JobItem
from twext.python.filepath import CachingFilePath as FilePath
from twext.who.directory import DirectoryService
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.trial import unittest
from twistedcaldav.config import config
from twistedcaldav.ical import Component, normalize_iCalStr
from txdav.caldav.datastore.sql_directory import GroupAttendeeRecord
from txdav.caldav.datastore.test.util import populateCalendarsFrom, CommonCommonTests, \
DateTimeSubstitutionsMixin
from txdav.who.directory import CalendarDirectoryRecordMixin
from txdav.who.groups import GroupCacher
import os
class GroupAttendeeTestBase(CommonCommonTests, DateTimeSubstitutionsMixin, unittest.TestCase):
"""
GroupAttendeeReconciliation tests
"""
@inlineCallbacks
def setUp(self):
yield super(GroupAttendeeTestBase, self).setUp()
accountsFilePath = FilePath(
os.path.join(os.path.dirname(__file__), "accounts")
)
yield self.buildStoreAndDirectory(
accounts=accountsFilePath.child("groupAccounts.xml"),
)
yield self.populate()
self.setupDateTimeValues()
self.paths = {}
def configure(self):
super(GroupAttendeeTestBase, self).configure()
config.GroupAttendees.Enabled = True
config.GroupAttendees.ReconciliationDelaySeconds = 0
config.GroupAttendees.AutoUpdateSecondsFromNow = 0
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
requirements = {
"user01" : None,
"user02" : None,
"user06" : None,
"user07" : None,
"user08" : None,
"user09" : None,
"user10" : None,
}
@inlineCallbacks
def _verifyObjectResourceCount(self, home, expected_count):
cal6 = yield self.calendarUnderTest(name="calendar", home=home)
count = yield cal6.countObjectResources()
self.assertEqual(count, expected_count)
def _assertICalStrEqual(self, iCalStr1, iCalStr2):
def orderAttendeePropAndMemberValues(event):
for component in event.subcomponents(ignore=True):
# remove all values and add them again
# this is sort of a hack, better pycalendar has ordering
attendees = sorted(list(component.properties("ATTENDEE")), key=lambda x: x.value())
component.removeProperty("ATTENDEE")
for attendeeProp in attendees:
if attendeeProp.hasParameter("MEMBER"):
parameterValues = tuple(attendeeProp.parameterValues("MEMBER"))
for paramterValue in parameterValues:
attendeeProp.removeParameterValue("MEMBER", paramterValue)
attendeeProp.setParameter("MEMBER", sorted(parameterValues))
component.addProperty(attendeeProp)
return event
self.assertEqualCalendarData(
orderAttendeePropAndMemberValues(Component.fromString(normalize_iCalStr(iCalStr1))),
orderAttendeePropAndMemberValues(Component.fromString(normalize_iCalStr(iCalStr2)))
)
class GroupAttendeeTests(GroupAttendeeTestBase):
@inlineCallbacks
def test_simplePUT(self):
"""
Test that group attendee is expanded on PUT
"""
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group02
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
yield self._verifyObjectResourceCount("user06", 0)
yield self._verifyObjectResourceCount("user07", 0)
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user06", 1)
yield self._verifyObjectResourceCount("user07", 1)
@inlineCallbacks
def test_unknownPUT(self):
"""
Test unknown group with CUTYPE=X-SERVER-GROUP handled
"""
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd1}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE;CUTYPE=X-SERVER-GROUP:urn:uuid:FFFFFFFF-EEEE-DDDD-CCCC-BBBBBBBBBBBB
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd1}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CUTYPE=X-SERVER-GROUP;SCHEDULE-STATUS=3.7:urn:uuid:FFFFFFFF-EEEE-DDDD-CCCC-BBBBBBBBBBBB
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
@inlineCallbacks
def test_primaryAttendeeInGroupPUT(self):
"""
Test that primary attendee also in group remains primary
"""
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
@inlineCallbacks
def test_nestedPUT(self):
"""
Test that nested groups are expanded expanded on PUT
"""
yield self._verifyObjectResourceCount("user06", 0)
yield self._verifyObjectResourceCount("user07", 0)
yield self._verifyObjectResourceCount("user08", 0)
yield self._verifyObjectResourceCount("user09", 0)
yield self._verifyObjectResourceCount("user10", 0)
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:urn:x-uid:group04
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 04;CUTYPE=X-SERVER-GROUP;SCHEDULE-STATUS=2.7:urn:x-uid:group04
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group04";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group04";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group04";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
ATTENDEE;CN=User 09;[email protected];MEMBER="urn:x-uid:group04";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user09
ATTENDEE;CN=User 10;[email protected];MEMBER="urn:x-uid:group04";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user10
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user06", 1)
yield self._verifyObjectResourceCount("user07", 1)
yield self._verifyObjectResourceCount("user08", 1)
yield self._verifyObjectResourceCount("user09", 1)
yield self._verifyObjectResourceCount("user10", 1)
@inlineCallbacks
def test_multiGroupPUT(self):
"""
Test that expanded users in two primary groups have groups in MEMBERS param
"""
yield self._verifyObjectResourceCount("user06", 0)
yield self._verifyObjectResourceCount("user07", 0)
yield self._verifyObjectResourceCount("user08", 0)
yield self._verifyObjectResourceCount("user09", 0)
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
ATTENDEE:MAILTO:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group02
ATTENDEE;CN=Group 03;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group03
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
ATTENDEE;CN=User 09;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user09
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR"""
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user06", 1)
yield self._verifyObjectResourceCount("user07", 1)
yield self._verifyObjectResourceCount("user08", 1)
yield self._verifyObjectResourceCount("user09", 1)
@inlineCallbacks
def test_groupPutOldEvent(self):
"""
Test that old event with group attendee is expanded but not linked to group update
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back2}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back2}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 0)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
if len(wps): # This is needed because the test currently fails and does actually create job items we have to wait for
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
self.assertEqual(len(wps), 0)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
class GroupAttendeeReconciliationTests(GroupAttendeeTestBase):
@inlineCallbacks
def test_groupChange(self):
"""
Test that group attendee are changed when the group changes.
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_4 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SEQUENCE:2
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def expandedMembers(self, records=None, seen=None):
yield None
returnValue(set())
unpatchedExpandedMembers = CalendarDirectoryRecordMixin.expandedMembers
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 0)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
self.assertEqual(len(wps), 0)
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 0)
yield self.commit()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
self.assertEqual(list(groupsToRefresh)[0], "group01")
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), 1)
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_3.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 1)
yield self.commit()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), 1)
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_4.format(**self.dtsubs))
cal = yield self.calendarUnderTest(name="calendar", home="user01")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
comp = yield cobjs[0].componentForUser()
self.assertTrue("STATUS:CANCELLED" in str(comp))
@inlineCallbacks
def test_multieventGroupChange(self):
"""
Test that every event associated with a group changes when the group changes
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event {0}
UID:event{0}@ninevah.local
ORGANIZER:MAILTO:user0{0}@example.com
ATTENDEE:mailto:user0{0}@example.com
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:event{0}@ninevah.local
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 0{0};EMAIL=user0{0}@example.com;RSVP=TRUE:urn:x-uid:user0{0}
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 0{0};EMAIL=user0{0}@example.com:urn:x-uid:user0{0}
SUMMARY:event {0}
END:VEVENT
END:VCALENDAR
"""
data_get_3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:event{0}@ninevah.local
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 0{0};EMAIL=user0{0}@example.com;RSVP=TRUE:urn:x-uid:user0{0}
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 0{0};EMAIL=user0{0}@example.com:urn:x-uid:user0{0}
SEQUENCE:1
SUMMARY:event {0}
END:VEVENT
END:VCALENDAR
"""
data_get_4 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:event{0}@ninevah.local
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 0{0};EMAIL=user0{0}@example.com;RSVP=TRUE:urn:x-uid:user0{0}
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 0{0};EMAIL=user0{0}@example.com:urn:x-uid:user0{0}
SEQUENCE:2
SUMMARY:event {0}
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def expandedMembers(self, records=None, seen=None):
yield None
returnValue(set())
unpatchedExpandedMembers = CalendarDirectoryRecordMixin.expandedMembers
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 0)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
self.assertEqual(len(wps), 0)
userRange = range(6, 10) # have to be 1 diget and homes in requirements
for i in userRange:
calendar = yield self.calendarUnderTest(name="calendar", home="user0{0}".format(i))
vcalendar = Component.fromString(data_put_1.format(i, **self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user0{0}".format(i))
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(i, **self.dtsubs))
yield self._verifyObjectResourceCount("user01", 0)
yield self.commit()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
self.assertEqual(list(groupsToRefresh)[0], "group01")
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), len(userRange))
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
for i in userRange:
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user0{0}".format(i))
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_3.format(i, **self.dtsubs))
yield self._verifyObjectResourceCount("user01", len(userRange))
yield self.commit()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), len(userRange))
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
for i in userRange:
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user0{0}".format(i))
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_4.format(i, **self.dtsubs))
cal = yield self.calendarUnderTest(name="calendar", home="user01")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), len(userRange))
for cobj in cobjs:
comp = yield cobj.componentForUser()
self.assertTrue("STATUS:CANCELLED" in str(comp))
@inlineCallbacks
def test_groupChangeOldEvent(self):
"""
Test that group attendee changes not applied to old events
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_put_2 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def expandedMembers(self, records=None, seen=None):
yield None
returnValue(set())
unpatchedExpandedMembers = CalendarDirectoryRecordMixin.expandedMembers
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
self.assertEqual(list(groupsToRefresh)[0], "group01")
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), 0)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 1)
vcalendar = Component.fromString(data_put_2.format(**self.dtsubs))
yield cobj.setComponent(vcalendar)
yield self.commit()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
self.assertEqual(len(wps), 0)
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
# finally, simulate an event that has become old
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
group = yield self.transactionUnderTest().groupByUID("group01")
yield GroupAttendeeRecord.create(
self.transactionUnderTest(),
resourceID=cobj._resourceID,
groupID=group.groupID,
membershipHash="None",
)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
self.assertEqual(len(wps), 1)
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
'''
cal = yield self.calendarUnderTest(name="calendar", home="user01")
cobjs = yield cal.objectResources()
for cobj in cobjs:
print("comp = %s" % ((yield cobj.componentForUser())))
'''
@inlineCallbacks
def test_groupChangeOldNoMasterEvent(self):
"""
Test that group attendee changes not applied to old events with no master event
"""
yield None
test_groupChangeOldNoMasterEvent.todo = "Create test data"
@inlineCallbacks
def test_groupChangeOldRecurringEvent(self):
"""
Test that group attendee changes not applied to old recurring events
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_put_2 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
RRULE:FREQ=DAILY;UNTIL={nowDate_back1}T100000
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
RRULE:FREQ=DAILY;UNTIL={nowDate_back1}T100000
SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def expandedMembers(self, records=None, seen=None):
yield None
returnValue(set())
unpatchedExpandedMembers = CalendarDirectoryRecordMixin.expandedMembers
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
self.assertEqual(list(groupsToRefresh)[0], "group01")
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), 0)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 1)
vcalendar = Component.fromString(data_put_2.format(**self.dtsubs))
yield cobj.setComponent(vcalendar)
yield self.commit()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
if len(wps): # This is needed because the test currently fails and does actually create job items we have to wait for
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
self.assertEqual(len(wps), 0)
# finally, simulate an event that has become old
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
group = yield self.transactionUnderTest().groupByUID("group01")
yield GroupAttendeeRecord.create(
self.transactionUnderTest(),
resourceID=cobj._resourceID,
groupID=group.groupID,
membershipHash="None",
)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
self.assertEqual(len(wps), 1)
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
@inlineCallbacks
def test_groupChangeSmallerSpanningEvent(self):
"""
Test that group attendee changes not applied to old recurring events
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_1_user01 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected]:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
{start}DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SEQUENCE:2
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
{uid}DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}{rule}SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_2_user01 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:[email protected]
{start}DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected]:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SEQUENCE:2
STATUS:CANCELLED
SUMMARY:event 1
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
"""
data_get_3_user01 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
{uid}DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected]:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}{rule}SEQUENCE:1
SUMMARY:event 1
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def expandedMembers(self, records=None, seen=None):
yield None
returnValue(set())
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
cal = yield self.calendarUnderTest(name="calendar", home="user01")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
vcalendar = yield cobjs[0].componentForUser()
self._assertICalStrEqual(vcalendar, data_get_1_user01.format(**self.dtsubs))
user01_cname = cobjs[0].name()
cal = yield self.calendarUnderTest(name="inbox", home="user01")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
yield cobjs[0].remove()
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), 1)
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cal = yield self.calendarUnderTest(name="calendar", home="user02")
cobjs = yield cal.objectResources()
for cobj in cobjs:
vcalendar = yield cobj.component()
for component in vcalendar.subcomponents(ignore=True):
props = {
"relatedTo": component.getProperty("RELATED-TO"),
"start": component.getProperty("DTSTART"),
"rule": component.getProperty("RRULE"),
"uid": component.getProperty("UID"),
}
break
props.update(self.dtsubs)
if cobj.name() == "data1.ics":
self._assertICalStrEqual(vcalendar, data_get_2.format(**props))
props_orig = props
else:
self._assertICalStrEqual(vcalendar, data_get_3.format(**props))
props_new = props
cal = yield self.calendarUnderTest(name="calendar", home="user01")
cobjs = yield cal.objectResources()
for cobj in cobjs:
vcalendar = yield cobj.componentForUser()
if cobj.name() == user01_cname:
self._assertICalStrEqual(vcalendar, data_get_2_user01.format(**props_orig))
else:
self._assertICalStrEqual(vcalendar, data_get_3_user01.format(**props_new))
cal = yield self.calendarUnderTest(name="inbox", home="user01")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
comp = yield cobjs[0].componentForUser()
self.assertTrue("METHOD:CANCEL" in str(comp))
@inlineCallbacks
def test_groupChangeLargerSpanningEvent(self):
"""
Test that group attendee changes not applied to old recurring events
"""
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
{start}DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SEQUENCE:2
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
{uid}DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}{rule}SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_2_user01 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
BEGIN:VEVENT
UID:[email protected]
{start}DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected]:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
{relatedTo}RRULE:FREQ=DAILY;UNTIL={nowDate_fwd20}T100000
SEQUENCE:2
SUMMARY:event 1
TRANSP:TRANSPARENT
END:VEVENT
END:VCALENDAR
"""
@inlineCallbacks
def expandedMembers(self, records=None, seen=None):
yield None
returnValue(set())
unpatchedExpandedMembers = CalendarDirectoryRecordMixin.expandedMembers
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", expandedMembers)
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 0)
self.patch(CalendarDirectoryRecordMixin, "expandedMembers", unpatchedExpandedMembers)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
self.assertEqual(len(wps), 1)
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cal = yield self.calendarUnderTest(name="calendar", home="user02")
cobjs = yield cal.objectResources()
for cobj in cobjs:
vcalendar = yield cobj.component()
for component in vcalendar.subcomponents(ignore=True):
props = {
"relatedTo": component.getProperty("RELATED-TO"),
"start": component.getProperty("DTSTART"),
"rule": component.getProperty("RRULE"),
"uid": component.getProperty("UID"),
}
break
props.update(self.dtsubs)
if cobj.name() == "data1.ics":
self._assertICalStrEqual(vcalendar, data_get_2.format(**props))
props_orig = props
else:
self._assertICalStrEqual(vcalendar, data_get_3.format(**props))
cal = yield self.calendarUnderTest(name="calendar", home="user01")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
vcalendar = yield cobjs[0].componentForUser()
self._assertICalStrEqual(vcalendar, data_get_2_user01.format(**props_orig))
@inlineCallbacks
def test_groupRemovalFromDirectory(self):
"""
Test that removing a group from the directory also removes the expanded attendees.
This needs to make sure that an attendee in two groups is NOT removed if only one
of those groups is removed
"""
yield self._verifyObjectResourceCount("user06", 0)
yield self._verifyObjectResourceCount("user07", 0)
yield self._verifyObjectResourceCount("user08", 0)
yield self._verifyObjectResourceCount("user09", 0)
yield self._verifyObjectResourceCount("user10", 0)
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
ATTENDEE:MAILTO:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group02
ATTENDEE;CN=Group 03;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group03
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
ATTENDEE;CN=User 09;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user09
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=3.7:urn:x-uid:group02
ATTENDEE;CN=Group 03;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group03
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
ATTENDEE;CN=User 09;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user09
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
data_get_3 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group02
ATTENDEE;CN=Group 03;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group03
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
ATTENDEE;CN=User 09;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user09
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SEQUENCE:2
SUMMARY:event 1
END:VEVENT
END:VCALENDAR"""
unpatchedRecordWithUID = DirectoryService.recordWithUID
@inlineCallbacks
def recordWithUID(self, uid, timeoutSeconds=None):
if uid == "group02":
result = None
else:
result = yield unpatchedRecordWithUID(self, uid)
returnValue(result)
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user06", 1)
yield self._verifyObjectResourceCount("user07", 1)
yield self._verifyObjectResourceCount("user08", 1)
yield self._verifyObjectResourceCount("user09", 1)
# cache group
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 3)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group02")
yield self.commit()
self.assertEqual(len(wps), 0)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
# remove group members run cacher again
self.patch(DirectoryService, "recordWithUID", recordWithUID)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group02")
yield self.commit()
self.assertEqual(len(wps), 1)
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
cal = yield self.calendarUnderTest(name="calendar", home="user06")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
comp = yield cobjs[0].componentForUser()
self.assertTrue("STATUS:CANCELLED" in str(comp))
yield self.commit()
# add group members back, run cacher
self.patch(DirectoryService, "recordWithUID", unpatchedRecordWithUID)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group02")
self.assertEqual(len(wps), 1)
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_3.format(**self.dtsubs))
cal = yield self.calendarUnderTest(name="calendar", home="user06")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
comp = yield cobjs[0].componentForUser()
self.assertFalse("STATUS:CANCELLED" in str(comp))
yield self.commit()
@inlineCallbacks
def test_groupRemovalFromEvent(self):
"""
Test that removing a group from the calendar data also removes the expanded attendees.
This needs to make sure that an attendee in two groups is NOT removed if only one of
those groups is removed
"""
yield self._verifyObjectResourceCount("user06", 0)
yield self._verifyObjectResourceCount("user07", 0)
yield self._verifyObjectResourceCount("user08", 0)
yield self._verifyObjectResourceCount("user09", 0)
yield self._verifyObjectResourceCount("user10", 0)
calendar = yield self.calendarUnderTest(name="calendar", home="user01")
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_1 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group02
ATTENDEE;CN=Group 03;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group03
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group02","urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
ATTENDEE;CN=User 09;[email protected];MEMBER="urn:x-uid:group03";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user09
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SUMMARY:event 1
END:VEVENT
END:VCALENDAR"""
data_put_2 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 01;[email protected];RSVP=TRUE:urn:x-uid:user01
ATTENDEE;CN=Group 02;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group02
ATTENDEE;CN=User 06;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user06
ATTENDEE;CN=User 07;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user07
ATTENDEE;CN=User 08;[email protected];MEMBER="urn:x-uid:group02";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user08
CREATED:20060101T150000Z
ORGANIZER;CN=User 01;[email protected]:urn:x-uid:user01
SEQUENCE:1
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user06", 1)
yield self._verifyObjectResourceCount("user07", 1)
yield self._verifyObjectResourceCount("user08", 1)
yield self._verifyObjectResourceCount("user09", 1)
# cache groups
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 2)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group02")
yield self.commit()
self.assertEqual(len(wps), 0)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group03")
yield self.commit()
self.assertEqual(len(wps), 0)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_1.format(**self.dtsubs))
vcalendar = Component.fromString(data_put_2.format(**self.dtsubs))
yield cobj.setComponent(vcalendar)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user01")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user06", 1)
yield self._verifyObjectResourceCount("user07", 1)
yield self._verifyObjectResourceCount("user08", 1)
yield self._verifyObjectResourceCount("user09", 1)
# groups did not change so no work proposals
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group02")
self.assertEqual(len(wps), 0)
wps = yield groupCacher.refreshGroup(self.transactionUnderTest(), "group03")
self.assertEqual(len(wps), 0)
cal = yield self.calendarUnderTest(name="calendar", home="user09")
cobjs = yield cal.objectResources()
self.assertEqual(len(cobjs), 1)
comp = yield cobjs[0].componentForUser()
self.assertTrue("STATUS:CANCELLED" in str(comp))
yield self.commit()
@inlineCallbacks
def test_groupAttendeesWhenFullyInFutureEventInTrash(self):
"""
Test that group attendee link is severed while an event is in the trash
and is relinked when recovered. In this case, the event only has
instances in the future.
"""
self.patch(config, "EnableTrashCollection", True)
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_fwd20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
self.assertEqual(list(groupsToRefresh)[0], "group01")
rows = yield self.transactionUnderTest().execSQL("select * from group_attendee", [])
self.assertEquals(len(rows), 1)
yield self.commit()
yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 1)
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
yield cobj.remove()
yield self.commit()
rows = yield self.transactionUnderTest().execSQL("select * from group_attendee", [])
self.assertEquals(len(rows), 0)
yield self.commit()
# With the event in the trash, the group will not be refreshed
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 0)
yield self.commit()
home = yield self.homeUnderTest(name="user02")
trash = yield home.getTrash()
names = yield trash.listObjectResources()
cobj = yield trash.calendarObjectWithName(names[0])
yield cobj.fromTrash()
yield self.commit()
# With the event recovered from the trash, the group will be refreshed
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
yield self.commit()
rows = yield self.transactionUnderTest().execSQL("select * from group_attendee", [])
self.assertEquals(len(rows), 1)
yield self.commit()
@inlineCallbacks
def test_groupAttendeesWhenSplitEventInTrash(self):
"""
Test that group attendee link is severed while an event is in the trash
and is relinked when recovered. In this case, the event has instances
in the past and future.
"""
self.patch(config, "EnableTrashCollection", True)
data_put_1 = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART:{nowDate_back20}T100000Z
RRULE:FREQ=DAILY
DURATION:PT1H
SUMMARY:event 1
UID:[email protected]
ORGANIZER:MAILTO:[email protected]
ATTENDEE:mailto:[email protected]
ATTENDEE:MAILTO:[email protected]
END:VEVENT
END:VCALENDAR"""
data_get_2 = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
BEGIN:VEVENT
UID:[email protected]
DTSTART:{nowDate_back20}T100000Z
DURATION:PT1H
ATTENDEE;CN=User 02;[email protected];RSVP=TRUE:urn:x-uid:user02
ATTENDEE;CN=Group 01;CUTYPE=X-SERVER-GROUP;[email protected];SCHEDULE-STATUS=2.7:urn:x-uid:group01
ATTENDEE;CN=User 01;[email protected];MEMBER="urn:x-uid:group01";PARTSTAT=NEEDS-ACTION;RSVP=TRUE;SCHEDULE-STATUS=1.2:urn:x-uid:user01
CREATED:20060101T150000Z
ORGANIZER;CN=User 02;[email protected]:urn:x-uid:user02
RRULE:FREQ=DAILY
SUMMARY:event 1
END:VEVENT
END:VCALENDAR
"""
groupCacher = GroupCacher(self.transactionUnderTest().directoryService())
calendar = yield self.calendarUnderTest(name="calendar", home="user02")
vcalendar = Component.fromString(data_put_1.format(**self.dtsubs))
yield calendar.createCalendarObjectWithName("data1.ics", vcalendar)
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
self.assertEqual(list(groupsToRefresh)[0], "group01")
rows = yield self.transactionUnderTest().execSQL("select * from group_attendee", [])
self.assertEquals(len(rows), 1)
yield self.commit()
yield groupCacher.refreshGroup(self.transactionUnderTest(), "group01")
yield self.commit()
yield JobItem.waitEmpty(self._sqlCalendarStore.newTransaction, reactor, 60)
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
vcalendar = yield cobj.component()
self._assertICalStrEqual(vcalendar, data_get_2.format(**self.dtsubs))
yield self._verifyObjectResourceCount("user01", 1)
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
yield self.commit()
cobj = yield self.calendarObjectUnderTest(name="data1.ics", calendar_name="calendar", home="user02")
yield cobj.remove()
yield self.commit()
rows = yield self.transactionUnderTest().execSQL("select * from group_attendee", [])
self.assertEquals(len(rows), 0)
yield self.commit()
# With the event in the trash, the group will not be refreshed
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 0)
yield self.commit()
home = yield self.homeUnderTest(name="user02")
trash = yield home.getTrash()
names = yield trash.listObjectResources()
cobj = yield trash.calendarObjectWithName(names[0])
yield cobj.fromTrash()
yield self.commit()
# With the event recovered from the trash, the group will be refreshed
groupsToRefresh = yield groupCacher.groupsToRefresh(self.transactionUnderTest())
self.assertEqual(len(groupsToRefresh), 1)
yield self.commit()
rows = yield self.transactionUnderTest().execSQL("select * from group_attendee", [])
self.assertEquals(len(rows), 1)
yield self.commit()
|
|
"""
Egg loading utilities.
"""
import cPickle
import os.path
import pickle
import sys
import zipfile
import pkg_resources
from openmdao.util.eggobserver import EggObserver
from openmdao.util.eggsaver import SAVE_CPICKLE, SAVE_PICKLE
from openmdao.util.log import NullLogger, LOG_DEBUG2
__all__ = ('load', 'load_from_eggfile', 'load_from_eggpkg',
'check_requirements')
def load_from_eggfile(filename, entry_group, entry_name, logger=None,
observer=None):
"""
Extracts files in egg to a subdirectory matching the saved object name.
Then loads object graph state by invoking the given entry point.
Returns the root object.
filename: string
Name of egg file.
entry_group: string
Name of group.
entry_name: string
Name of entry point in group.
logger: Logger
Used for recording progress, etc.
observer: callable
Called via an :class:`EggObserver`.
"""
logger = logger or NullLogger()
observer = EggObserver(observer, logger)
logger.debug('Loading %s from %s in %s...',
entry_name, filename, os.getcwd())
egg_dir, dist = _dist_from_eggfile(filename, logger, observer)
# Just being defensive, '.' is typically in the path.
if not '.' in sys.path: # pragma no cover
sys.path.append('.')
orig_dir = os.getcwd()
os.chdir(egg_dir)
try:
return _load_from_distribution(dist, entry_group, entry_name, None,
logger, observer)
finally:
os.chdir(orig_dir)
def load_from_eggpkg(package, entry_group, entry_name, instance_name=None,
logger=None, observer=None):
"""
Load object graph state by invoking the given package entry point.
Returns the root object.
package: string
Name of package to load from.
entry_group: string
Name of group.
entry_name: string
Name of entry point in group.
instance_name: string
Name for instance loaded.
logger: Logger
Used for recording progress, etc.
observer: callable
Called via an :class:`EggObserver`.
"""
logger = logger or NullLogger()
observer = EggObserver(observer, logger)
logger.debug('Loading %s from %s in %s...',
entry_name, package, os.getcwd())
try:
dist = pkg_resources.get_distribution(package)
except pkg_resources.DistributionNotFound as exc:
logger.error('Distribution not found: %s', exc)
raise exc
return _load_from_distribution(dist, entry_group, entry_name, instance_name,
logger, observer)
def _load_from_distribution(dist, entry_group, entry_name, instance_name,
logger, observer):
""" Invoke entry point in distribution and return result. """
logger.log(LOG_DEBUG2, ' entry points:')
maps = dist.get_entry_map()
for group in sorted(maps.keys()):
logger.log(LOG_DEBUG2, ' group %s:' % group)
for entry_pt in maps[group].values():
logger.log(LOG_DEBUG2, ' %s', entry_pt)
info = dist.get_entry_info(entry_group, entry_name)
if info is None:
msg = "No '%s' '%s' entry point." % (entry_group, entry_name)
logger.error(msg)
raise RuntimeError(msg)
if info.module_name in sys.modules:
logger.log(LOG_DEBUG2, " removing existing '%s' in sys.modules",
info.module_name)
del sys.modules[info.module_name]
try:
loader = dist.load_entry_point(entry_group, entry_name)
return loader(name=instance_name, observer=observer.observer)
# Difficult to generate egg in test process that causes this.
except pkg_resources.DistributionNotFound as exc: # pragma no cover
observer.exception('Distribution not found: %s' % exc)
check_requirements(dist.requires(), logger=logger, indent_level=1)
raise exc
# Difficult to generate egg in test process that causes this.
except pkg_resources.VersionConflict as exc: # pragma no cover
observer.exception('Version conflict: %s' % exc)
check_requirements(dist.requires(), logger=logger, indent_level=1)
raise exc
# Difficult to generate egg in test process that causes this.
except Exception as exc: # pragma no cover
observer.exception('Loader exception:')
logger.exception('')
raise exc
def _dist_from_eggfile(filename, logger, observer):
""" Create distribution by unpacking egg file. """
if not os.path.exists(filename):
msg = "'%s' not found." % filename
observer.exception(msg)
raise ValueError(msg)
if not zipfile.is_zipfile(filename):
msg = "'%s' is not an egg/zipfile." % filename
observer.exception(msg)
raise ValueError(msg)
# Extract files.
archive = zipfile.ZipFile(filename, 'r', allowZip64=True)
try:
name = archive.read('EGG-INFO/top_level.txt').split('\n')[0]
logger.log(LOG_DEBUG2, " name '%s'", name)
if observer.observer is not None:
# Collect totals.
total_files = 0.
total_bytes = 0.
for info in archive.infolist():
fname = info.filename
# Just being defensive.
if not fname.startswith(name) and \
not fname.startswith('EGG-INFO'): # pragma no cover
continue
total_files += 1
total_bytes += info.file_size
else:
total_files = 1. # Avoid divide-by-zero.
total_bytes = 1.
files = 0.
size = 0.
for info in archive.infolist():
fname = info.filename
# Just being defensive.
if not fname.startswith(name) and \
not fname.startswith('EGG-INFO'): # pragma no cover
continue
observer.extract(fname, files / total_files, size / total_bytes)
dirname = os.path.dirname(fname)
if dirname == 'EGG-INFO':
# Extract EGG-INFO as subdirectory.
archive.extract(fname, name)
else:
archive.extract(fname)
if sys.platform != 'win32':
# Set permissions, extract() doesn't.
rwx = (info.external_attr >> 16) & 0777
if rwx:
os.chmod(fname, rwx) # Only if something valid.
files += 1
size += info.file_size
finally:
archive.close()
# Create distribution from extracted files.
location = os.getcwd()
egg_info = os.path.join(location, name, 'EGG-INFO')
provider = pkg_resources.PathMetadata(location, egg_info)
dist = pkg_resources.Distribution.from_location(location,
os.path.basename(filename),
provider)
logger.log(LOG_DEBUG2, ' project_name: %s', dist.project_name)
logger.log(LOG_DEBUG2, ' version: %s', dist.version)
logger.log(LOG_DEBUG2, ' py_version: %s', dist.py_version)
logger.log(LOG_DEBUG2, ' platform: %s', dist.platform)
logger.log(LOG_DEBUG2, ' requires:')
for req in dist.requires():
logger.log(LOG_DEBUG2, ' %s', req)
# If any module didn't have a distribution, check that we can import it.
if provider.has_metadata('openmdao_orphans.txt'):
errors = 0
orphan_names = []
for mod in provider.get_metadata_lines('openmdao_orphans.txt'):
mod = mod.strip()
logger.log(LOG_DEBUG2, " checking for 'orphan' module: %s", mod)
try:
__import__(mod)
# Difficult to generate a distribution that can't be reloaded.
except ImportError: # pragma no cover
logger.error("Can't import %s, which didn't have a known"
" distribution when the egg was written.", mod)
orphan_names.append(mod)
errors += 1
# Difficult to generate a distribution that can't be reloaded.
if errors: # pragma no cover
plural = 's' if errors > 1 else ''
msg = "Couldn't import %d 'orphan' module%s: %s." \
% (errors, plural, orphan_names)
observer.exception(msg)
raise RuntimeError(msg)
return (name, dist)
def check_requirements(required, logger=None, indent_level=0):
"""
Display requirements (if logger debug level enabled) and note conflicts.
Returns a list of unavailable requirements.
required: list
List of package requirements.
logger: Logger
Used for recording progress, etc.
indent_level: int
Used to improve readability of log messages.
"""
def _recursive_check(required, logger, level, visited, working_set,
not_avail):
indent = ' ' * level
indent2 = ' ' * (level + 1)
for req in required:
logger.log(LOG_DEBUG2, '%schecking %s', indent, req)
dist = None
try:
dist = working_set.find(req)
# Difficult to generate a distribution that can't be reloaded.
except pkg_resources.VersionConflict: # pragma no cover
dist = working_set.by_key[req.key]
logger.debug('%sconflicts with %s %s', indent2,
dist.project_name, dist.version)
not_avail.append(req)
else:
# Difficult to generate a distribution that can't be reloaded.
if dist is None: # pragma no cover
logger.debug('%sno distribution found', indent2)
not_avail.append(req)
else:
logger.log(LOG_DEBUG2, '%s%s %s', indent2,
dist.project_name, dist.version)
if not dist in visited:
visited.add(dist)
_recursive_check(dist.requires(), logger, level + 1,
visited, working_set, not_avail)
logger = logger or NullLogger()
not_avail = []
_recursive_check(required, logger, indent_level, set(),
pkg_resources.WorkingSet(), not_avail)
return not_avail
def load(instream, fmt=SAVE_CPICKLE, package=None, logger=None):
"""
Load object(s) from an input stream (or filename).
If `instream` is a string that is not an existing filename or
absolute path, then it is searched for using :mod:`pkg_resources`.
Returns the root object.
instream: file or string
Stream or filename to load from.
fmt: int
Format of state data.
package: string
Name of package to use.
logger: Logger
Used for recording progress, etc.
"""
logger = logger or NullLogger()
new_stream = False
if isinstance(instream, basestring):
if not os.path.exists(instream) and not os.path.isabs(instream):
# Try to locate via pkg_resources.
if not package:
dot = instream.rfind('.')
if dot < 0:
raise ValueError("Bad state filename '%s'." % instream)
package = instream[:dot]
logger.debug("Looking for '%s' in package '%s'", instream, package)
path = pkg_resources.resource_filename(package, instream)
if not os.path.exists(path):
raise IOError("State file '%s' not found." % instream)
instream = path
# The state file assumes a sys.path.
package_dir = os.path.dirname(path)
if not package_dir in sys.path:
sys.path.append(package_dir)
if fmt is SAVE_CPICKLE or fmt is SAVE_PICKLE:
mode = 'rb'
else:
mode = 'rU'
instream = open(instream, mode)
new_stream = True
try:
if fmt is SAVE_CPICKLE:
top = cPickle.load(instream)
elif fmt is SAVE_PICKLE:
top = pickle.load(instream)
else:
raise RuntimeError("Can't load object using format '%s'" % fmt)
finally:
if new_stream:
instream.close()
return top
|
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
import os
import re
import shutil
import stat
import sys
import tempfile
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyp.bat'
else:
gyp = 'gyp'
self.gyp = os.path.abspath(gyp)
self.initialize_build_tool()
if not kw.has_key('match'):
kw['match'] = TestCommon.match_exact
# Put test output in out/testworkarea by default.
# Use temporary names so there are no collisions.
workdir = os.path.join('out', kw.get('workdir', 'testworkarea'))
# Create work area if it doesn't already exist.
try:
os.makedirs(workdir)
except OSError:
pass
kw['workdir'] = tempfile.mktemp(prefix='testgyp.', dir=workdir)
formats = kw.get('formats', [])
if kw.has_key('formats'):
del kw['formats']
super(TestGypBase, self).__init__(*args, **kw)
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+self.format in excluded_formats or
included_formats and self.format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
# Set $HOME so that gyp doesn't read the user's actual
# ~/.gyp/include.gypi file, which may contain variables
# and other settings that would change the output.
os.environ['HOME'] = self.workpath()
# Clear $GYP_DEFINES for the same reason.
if 'GYP_DEFINES' in os.environ:
del os.environ['GYP_DEFINES']
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make, scons) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# TODO: --depth=. works around Chromium-specific tree climbing.
depth = '.'
if 'depth' in kw:
depth = kw['depth']
del kw['depth']
args = ('--depth='+depth, '--format='+self.format, gyp_file) + args
return self.run(program=self.gyp, arguments=args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def built_file_basename(self, name, type=None, **kw):
"""
Returns the base name of the specified file name, of the specified type.
A bare=True keyword argument specifies that prefixes and suffixes shouldn't
be applied.
"""
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return name
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
The specified name should be independent of any particular generator.
Subclasses should find the output executable in the appropriate
output build directory, tack on any necessary executable suffix, etc.
"""
raise NotImplementedError
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
The subclass should implement this by calling build()
(or a reasonable equivalent), checking whatever conditions
will tell it the build was an "up to date" null build, and
failing if it isn't.
"""
raise NotImplementedError
class TestGypGypd(TestGypBase):
"""
Subclass for testing the GYP 'gypd' generator (spit out the
internal data structure as pretty-printed Python).
"""
format = 'gypd'
class TestGypMake(TestGypBase):
"""
Subclass for testing the GYP Make generator.
"""
format = 'make'
build_tool_list = ['make']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a Make build using the Makefiles generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])[:]
if self.configuration:
arguments.append('BUILDTYPE=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
# Sub-directory builds provide per-gyp Makefiles (i.e.
# Makefile.gyp_filename), so use that if there is no Makefile.
chdir = kw.get('chdir', '')
if not os.path.exists(os.path.join(chdir, 'Makefile')):
print "NO Makefile in " + os.path.join(chdir, 'Makefile')
arguments.insert(0, '-f')
arguments.insert(1, os.path.splitext(gyp_file)[0] + '.Makefile')
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Make target is up to date.
"""
if target in (None, self.DEFAULT):
message_target = 'all'
else:
message_target = target
kw['stdout'] = "make: Nothing to be done for `%s'.\n" % message_target
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Make.
"""
configuration = self.configuration_dirname()
libdir = os.path.join('out', configuration, 'lib')
# TODO(piman): when everything is cross-compile safe, remove lib.target
os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Make.
Built files are in the subdirectory 'out/{configuration}'.
The default is 'out/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
A subdir= keyword argument specifies a library subdirectory within
the default 'obj.target'.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['out', configuration])
if type == self.STATIC_LIB and sys.platform != 'darwin':
result.append('obj.target')
elif type == self.SHARED_LIB and sys.platform != 'darwin':
result.append('lib.target')
subdir = kw.get('subdir')
if subdir:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypNinja(TestGypBase):
"""
Subclass for testing the GYP Ninja generator.
"""
format = 'ninja'
build_tool_list = ['ninja']
ALL = 'all'
DEFAULT = 'all'
def run_gyp(self, gyp_file, *args, **kw):
TestGypBase.run_gyp(self, gyp_file, *args, **kw)
def build(self, gyp_file, target=None, **kw):
arguments = kw.get('arguments', [])[:]
# Add a -C output/path to the command line.
arguments.append('-C')
arguments.append(os.path.join('out', self.configuration_dirname()))
if target is None:
target = 'all'
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def run_built_executable(self, name, *args, **kw):
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
if sys.platform == 'darwin':
libdir = os.path.join('out', 'Default')
if self.configuration:
libdir = os.path.join('out', self.configuration)
os.environ['DYLD_LIBRARY_PATH'] = libdir
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append('out')
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
if sys.platform != 'darwin':
result.append('obj')
elif type == self.SHARED_LIB:
if sys.platform != 'darwin' and sys.platform != 'win32':
result.append('lib')
subdir = kw.get('subdir')
if subdir:
result.append(subdir)
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
def up_to_date(self, gyp_file, target=None, **kw):
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
if 'ninja: no work to do' not in stdout:
self.report_not_up_to_date()
self.fail_test()
return result
class TestGypMSVS(TestGypBase):
"""
Subclass for testing the GYP Visual Studio generator.
"""
format = 'msvs'
u = r'=== Build: 0 succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
up_to_date_re = re.compile(u, re.M)
# Initial None element will indicate to our .initialize_build_tool()
# method below that 'devenv' was not found on %PATH%.
#
# Note: we must use devenv.com to be able to capture build output.
# Directly executing devenv.exe only sends output to BuildLog.htm.
build_tool_list = [None, 'devenv.com']
def initialize_build_tool(self):
""" Initializes the Visual Studio .build_tool and .uses_msbuild parameters.
We use the value specified by GYP_MSVS_VERSION. If not specified, we
search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable.
Failing that, we search for likely deployment paths.
"""
super(TestGypMSVS, self).initialize_build_tool()
possible_roots = ['C:\\Program Files (x86)', 'C:\\Program Files',
'E:\\Program Files (x86)', 'E:\\Program Files']
possible_paths = {
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
msvs_version = os.environ.get('GYP_MSVS_VERSION', 'auto')
if msvs_version in possible_paths:
# Check that the path to the specified GYP_MSVS_VERSION exists.
path = possible_paths[msvs_version]
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
self.build_tool = bt
self.uses_msbuild = msvs_version >= '2010'
return
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
if self.build_tool:
# We found 'devenv' on the path, use that and try to guess the version.
for version, path in possible_paths.iteritems():
if self.build_tool.find(path) >= 0:
self.uses_msbuild = version >= '2010'
return
else:
# If not, assume not MSBuild.
self.uses_msbuild = False
return
# Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
# the choices looking for a match.
for version, path in possible_paths.iteritems():
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
self.build_tool = bt
self.uses_msbuild = msvs_version >= '2010'
return
print 'Error: could not find devenv'
sys.exit(1)
def build(self, gyp_file, target=None, rebuild=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
configuration = self.configuration_buildname()
if rebuild:
build = '/Rebuild'
else:
build = '/Build'
arguments = kw.get('arguments', [])[:]
arguments.extend([gyp_file.replace('.gyp', '.sln'),
build, configuration])
# Note: the Visual Studio generator doesn't add an explicit 'all'
# target, so we just treat it the same as the default.
if target not in (None, self.ALL, self.DEFAULT):
arguments.extend(['/Project', target])
if self.configuration:
arguments.extend(['/ProjectConfig', self.configuration])
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Visual Studio target is up to date.
Beware that VS2010 will behave strangely if you build under
C:\USERS\yourname\AppData\Local. It will cause needless work. The ouptut
will be "1 succeeded and 0 up to date". MSBuild tracing reveals that:
"Project 'C:\Users\...\AppData\Local\...vcxproj' not up to date because
'C:\PROGRAM FILES (X86)\MICROSOFT VISUAL STUDIO 10.0\VC\BIN\1033\CLUI.DLL'
was modified at 02/21/2011 17:03:30, which is newer than '' which was
modified at 01/01/0001 00:00:00.
The workaround is to specify a workdir when instantiating the test, e.g.
test = TestGyp.TestGyp(workdir='workarea')
"""
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
m = self.up_to_date_re.search(stdout)
up_to_date = m and m.group(1) == '1'
if not up_to_date:
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Visual Studio.
"""
configuration = self.configuration_dirname()
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Visual Studio.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypSCons(TestGypBase):
"""
Subclass for testing the GYP SCons generator.
"""
format = 'scons'
build_tool_list = ['scons', 'scons.py']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a scons build using the SCons configuration generated from the
specified gyp_file.
"""
arguments = kw.get('arguments', [])[:]
dirname = os.path.dirname(gyp_file)
if dirname:
arguments.extend(['-C', dirname])
if self.configuration:
arguments.append('--mode=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified SCons target is up to date.
"""
if target in (None, self.DEFAULT):
up_to_date_targets = 'all'
else:
up_to_date_targets = target
up_to_date_lines = []
for arg in up_to_date_targets.split():
up_to_date_lines.append("scons: `%s' is up to date.\n" % arg)
kw['stdout'] = ''.join(up_to_date_lines)
arguments = kw.get('arguments', [])[:]
arguments.append('-Q')
kw['arguments'] = arguments
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by scons.
"""
configuration = self.configuration_dirname()
os.environ['LD_LIBRARY_PATH'] = os.path.join(configuration, 'lib')
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Scons.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type in (self.STATIC_LIB, self.SHARED_LIB):
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypXcode(TestGypBase):
"""
Subclass for testing the GYP Xcode generator.
"""
format = 'xcode'
build_tool_list = ['xcodebuild']
phase_script_execution = ("\n"
"PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
" cd /\\S+\n"
" /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
"(make: Nothing to be done for `all'\\.\n)?")
strip_up_to_date_expressions = [
# Various actions or rules can run even when the overall build target
# is up to date. Strip those phases' GYP-generated output.
re.compile(phase_script_execution, re.S),
# The message from distcc_pump can trail the "BUILD SUCCEEDED"
# message, so strip that, too.
re.compile('__________Shutting down distcc-pump include server\n', re.S),
]
up_to_date_endings = (
'Checking Dependencies...\n** BUILD SUCCEEDED **\n', # Xcode 3.0/3.1
'Check dependencies\n** BUILD SUCCEEDED **\n\n', # Xcode 3.2
)
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
# Be sure we're working with a copy of 'arguments' since we modify it.
# The caller may not be expecting it to be modified.
arguments = kw.get('arguments', [])[:]
arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
if target == self.ALL:
arguments.append('-alltargets',)
elif target not in (None, self.DEFAULT):
arguments.extend(['-target', target])
if self.configuration:
arguments.extend(['-configuration', self.configuration])
symroot = kw.get('SYMROOT', '$SRCROOT/build')
if symroot:
arguments.append('SYMROOT='+symroot)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Xcode target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
output = self.stdout()
for expression in self.strip_up_to_date_expressions:
output = expression.sub('', output)
if not output.endswith(self.up_to_date_endings):
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Xcode.
Built files are in the subdirectory 'build/{configuration}'.
The default is 'build/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['build', configuration])
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
format_class_list = [
TestGypGypd,
TestGypMake,
TestGypMSVS,
TestGypNinja,
TestGypSCons,
TestGypXcode,
]
def TestGyp(*args, **kw):
"""
Returns an appropriate TestGyp* instance for a specified GYP format.
"""
format = kw.get('format')
if format:
del kw['format']
else:
format = os.environ.get('TESTGYP_FORMAT')
for format_class in format_class_list:
if format == format_class.format:
return format_class(*args, **kw)
raise Exception, "unknown format %r" % format
|
|
import inspect
from functools import partial
from typing import Callable, Dict, List, Optional, Tuple, Union, Any, Sequence
import numpy as np
import tensorflow as tf
from six import string_types
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
from odin.fuel import IterableDataset
from tensorflow.python import keras
from tensorflow.python.keras.layers import Layer, Activation, Flatten
from tensorflow_probability.python.distributions import Normal
from tensorflow_probability.python.distributions.pixel_cnn import \
_PixelCNNNetwork
from tensorflow_probability.python.layers import DistributionLambda
from typeguard import typechecked
from typing_extensions import Literal
from odin.bay.distributions import (Blockwise, Categorical, ContinuousBernoulli,
Distribution, Gamma,
JointDistributionSequential,
MixtureQuantizedLogistic, QuantizedLogistic,
VonMises, Bernoulli, Independent)
from odin.networks.base_networks import SequentialNetwork
__all__ = [
'CenterAt0',
'PixelCNNDecoder',
'mnist_networks',
'dsprites_networks',
'shapes3d_networks',
'cifar_networks',
'svhn_networks',
'cifar10_networks',
'cifar20_networks',
'cifar100_networks',
'celeba_networks',
'get_networks',
'get_optimizer_info',
]
# ===========================================================================
# Helpers
# ===========================================================================
def _parse_distribution(
input_shape: Tuple[int, int, int],
distribution: Literal['qlogistic', 'mixqlogistic', 'bernoulli', 'gaussian'],
n_components=10,
n_channels=3) -> Tuple[int, DistributionLambda, Layer]:
from odin.bay.layers import DistributionDense
n_channels = input_shape[-1]
last_layer = Activation('linear')
# === 1. Quantized logistic
if distribution == 'qlogistic':
n_params = 2
observation = DistributionLambda(
lambda params: QuantizedLogistic(
*[
# loc
p if i == 0 else
# Ensure scales are positive and do not collapse to near-zero
tf.nn.softplus(p) + tf.cast(tf.exp(-7.), tf.float32)
for i, p in enumerate(tf.split(params, 2, -1))],
low=0,
high=255,
inputs_domain='sigmoid',
reinterpreted_batch_ndims=3),
convert_to_tensor_fn=Distribution.sample,
name='image'
)
# === 2. Mixture Quantized logistic
elif distribution == 'mixqlogistic':
n_params = MixtureQuantizedLogistic.params_size(
n_components=n_components,
n_channels=n_channels) // n_channels
observation = DistributionLambda(
lambda params: MixtureQuantizedLogistic(params,
n_components=n_components,
n_channels=n_channels,
inputs_domain='sigmoid',
high=255,
low=0),
convert_to_tensor_fn=Distribution.mean,
name='image')
# === 3. Bernoulli
elif distribution == 'bernoulli':
n_params = 1
observation = DistributionDense(
event_shape=input_shape,
posterior=lambda p: Independent(Bernoulli(logits=p), len(input_shape)),
projection=False,
name="image")
# === 4. Gaussian
elif distribution == 'gaussian':
n_params = 2
observation = DistributionDense(
event_shape=input_shape,
posterior=lambda p: Independent(Normal(
*tf.split(p, 2, -1)), len(input_shape)),
projection=False,
name="image")
else:
raise ValueError(f'No support for distribution {distribution}')
return n_params, observation, last_layer
class CenterAt0(keras.layers.Layer):
"""Normalize the image pixel from [0, 1] to be centerized
at 0 given range [-1, 1]
"""
def __init__(self,
enable: bool = True,
div_255: bool = False,
name: str = 'CenterAt0'):
super().__init__(name=name)
self.enable = bool(enable)
self.div_255 = bool(div_255)
def call(self, inputs, **kwargs):
if self.enable:
if self.div_255:
inputs = inputs / 255.
return 2. * inputs - 1.
return inputs
def get_config(self):
return dict(enable=self.enable, div_255=self.div_255)
def __repr__(self):
return self.__str__()
def __str__(self):
return f'<Center [-1,1] enable:{self.enable} div255:{self.div_255}>'
class LogNorm(keras.layers.Layer):
def __init__(self, enable: bool = True, name: str = 'LogNorm'):
super().__init__(name=name)
self.scale_factor = 10000
self.eps = 1e-8
self.enable = bool(enable)
def call(self, x, **kwargs):
if self.enable:
x = x / (tf.reduce_sum(x, axis=-1, keepdims=True) + self.eps)
x = x * self.scale_factor
x = tf.math.log1p(x)
return x
def get_config(self):
return dict(enable=self.enable)
def _prepare_cnn(activation=tf.nn.elu):
# he_uniform is better for leaky_relu, relu
# while he_normal is good for elu
if activation is tf.nn.leaky_relu:
init = tf.initializers.HeUniform()
elif activation is tf.nn.elu:
init = tf.initializers.HeNormal()
else:
init = tf.initializers.HeUniform()
conv = partial(keras.layers.Conv2D,
padding='same',
kernel_initializer=init,
activation=activation)
deconv = partial(keras.layers.Conv2DTranspose,
padding='same',
kernel_initializer=init,
activation=activation)
return conv, deconv
class SkipSequential(keras.Model):
def __init__(self, layers: Sequence[Layer] = (), name: str = 'SkipGenerator'):
super().__init__(name=name)
self.all_layers = list(layers)
self.proj_layers = list()
self.activation = list()
linear = keras.activations.get('linear')
for l in layers:
if isinstance(l, keras.layers.Conv2DTranspose):
self.activation.append(l.activation)
self.proj_layers.append(
keras.layers.Conv2D(l.filters, (1, 1),
padding='same',
activation='linear',
name=f'{l.name}_proj'))
l.activation = linear
else:
self.proj_layers.append(None)
self.activation.append(None)
def build(self, input_shape):
# this is a simple logic why keras don't do this by default!
self._build_input_shape = input_shape
return super().build(input_shape)
@property
def input_shape(self):
return self._build_input_shape
def call(self, x, **kwargs):
z = tf.reshape(x, (-1, 1, 1, x.shape[-1]))
for fn, proj, activation in zip(self.all_layers, self.proj_layers,
self.activation):
x = fn(x, **kwargs)
# shortcut connection
if proj is not None:
z_proj = proj(z, **kwargs)
x = activation(x + z_proj)
return x
# ===========================================================================
# Basic Network
# ===========================================================================
@typechecked
def mnist_networks(
qz: str = 'mvndiag',
zdim: Optional[int] = None,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
centerize_image: bool = True,
skip_generator: bool = False,
**kwargs,
) -> Dict[str, Layer]:
"""Network for MNIST dataset image size (28, 28, 1)"""
from odin.bay.random_variable import RVconf
from odin.bay.vi import BiConvLatents
n_channels = int(kwargs.get('n_channels', 1))
proj_dim = 196
input_shape = (28, 28, n_channels)
if zdim is None:
zdim = 32
conv, deconv = _prepare_cnn(activation=activation)
n_params, observation, last_layer = _parse_distribution(
input_shape, kwargs.get('distribution', 'bernoulli'))
encoder = SequentialNetwork(
[
keras.layers.InputLayer(input_shape),
CenterAt0(enable=centerize_image),
conv(32, 5, strides=1, name='encoder0'), # 28, 28, 32
conv(32, 5, strides=2, name='encoder1'), # 14, 14, 32
conv(64, 5, strides=1, name='encoder2'), # 14, 14, 64
conv(64, 5, strides=2, name='encoder3'), # 7 , 7 , 64
keras.layers.Flatten(),
keras.layers.Dense(proj_dim, activation='linear', name='encoder_proj')
],
name='Encoder',
)
layers = [
keras.layers.Dense(proj_dim, activation='linear', name='decoder_proj'),
keras.layers.Reshape((7, 7, proj_dim // 49)), # 7, 7, 4
deconv(64, 5, strides=2, name='decoder2'), # 14, 14, 64
BiConvLatents(conv(64, 5, strides=1, name='decoder3'), # 14, 14, 64
encoder=encoder.layers[3],
filters=16, kernel_size=14, strides=7,
disable=True,
name='latents2'),
deconv(32, 5, strides=2, name='decoder4'), # 28, 28, 32
conv(32, 5, strides=1, name='decoder5'), # 28, 28, 32
conv(n_channels * n_params, 1, strides=1, activation='linear',
name='decoder6'),
last_layer
]
layers = [i.layer if isinstance(i, BiConvLatents) and not is_hierarchical
else i
for i in layers]
if skip_generator:
decoder = SkipSequential(layers=layers, name='SkipDecoder')
else:
decoder = SequentialNetwork(layers=layers, name='Decoder')
latents = RVconf((zdim,), qz, projection=True,
name="latents").create_posterior()
networks = dict(encoder=encoder,
decoder=decoder,
observation=observation,
latents=latents)
if is_semi_supervised:
networks['labels'] = RVconf(
10,
'onehot',
projection=True,
name=kwargs.get('labels_name', 'digits'),
).create_posterior()
return networks
fashionmnist_networks = partial(mnist_networks, labels_name='fashion')
binarizedmnist_networks = mnist_networks
omniglot_networks = partial(mnist_networks, n_channels=3)
# ===========================================================================
# CIFAR10
# ===========================================================================
class PixelCNNDecoder(keras.Model):
def __init__(self, input_shape, zdim, n_components, dtype, name):
super().__init__(name)
# create the pixelcnn decoder
self.pixelcnn = _PixelCNNNetwork(dropout_p=0.3,
num_resnet=1,
num_hierarchies=1,
num_filters=32,
num_logistic_mix=n_components,
use_weight_norm=False)
self.pixelcnn.build((None,) + input_shape)
self.dense = keras.layers.Dense(units=int(np.prod(input_shape)),
activation='tanh',
name='decoder0')
self.reshape = keras.layers.Reshape(input_shape)
def call(self, inputs, training=None):
h = self.dense(inputs)
h = self.reshape(h)
return self.pixelcnn(h, training=training)
@typechecked
def cifar_networks(
qz: str = 'mvndiag',
zdim: Optional[int] = None,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
centerize_image: bool = True,
skip_generator: bool = False,
**kwargs,
) -> Dict[str, Layer]:
"""Network for CIFAR dataset image size (32, 32, 3)"""
from odin.bay.random_variable import RVconf
from odin.bay.vi.autoencoder.hierarchical_vae import BiConvLatents
if zdim is None:
zdim = 256
n_channels = kwargs.get('n_channels', 3)
input_shape = (32, 32, n_channels)
conv, deconv = _prepare_cnn(activation=activation)
n_classes = kwargs.get('n_classes', 10)
proj_dim = 8 * 8 * 8
## output distribution
n_params, observation, last_layer = _parse_distribution(
input_shape, kwargs.get('distribution', 'qlogistic'))
## encoder
encoder = SequentialNetwork(
[
CenterAt0(enable=centerize_image),
conv(32, 4, strides=1, name='encoder0'), # 32, 32, 32
conv(32, 4, strides=2, name='encoder1'), # 16, 16, 32
conv(64, 4, strides=1, name='encoder2'), # 16, 16, 64
conv(64, 4, strides=2, name='encoder3'), # 8, 8, 64
keras.layers.Flatten(),
keras.layers.Dense(proj_dim, activation='linear', name='encoder_proj')
],
name='Encoder',
)
layers = [
keras.layers.Dense(proj_dim, activation='linear', name='decoder_proj'),
keras.layers.Reshape((8, 8, proj_dim // 64)), # 8, 8, 4
deconv(64, 4, strides=2, name='decoder1'), # 16, 16, 64
BiConvLatents(conv(64, 4, strides=1, name='decoder2'), # 16, 16, 64
encoder=encoder.layers[3],
filters=32, kernel_size=8, strides=4,
disable=True,
name='latents1'),
deconv(32, 4, strides=2, name='decoder3'), # 32, 32, 32
BiConvLatents(conv(32, 4, strides=1, name='decoder4'), # 32, 32, 32
encoder=encoder.layers[1],
filters=16, kernel_size=8, strides=4,
disable=True,
name='latents2'),
conv(n_channels * n_params, # 32, 32, 3
1,
strides=1,
activation='linear',
name='decoder5'),
last_layer
]
layers = [i.layer if isinstance(i, BiConvLatents) and not is_hierarchical
else i
for i in layers]
if skip_generator:
decoder = SkipSequential(layers=layers, name='SkipDecoder')
else:
decoder = SequentialNetwork(layers=layers, name='Decoder')
## others
latents = RVconf((zdim,), qz, projection=True,
name="latents").create_posterior()
# create the observation of MixtureQuantizedLogistic
networks = dict(encoder=encoder,
decoder=decoder,
observation=observation,
latents=latents)
if is_semi_supervised:
networks['labels'] = RVconf(n_classes,
'onehot',
projection=True,
name='labels').create_posterior()
return networks
cifar10_networks = partial(cifar_networks, n_classes=10)
cifar20_networks = partial(cifar_networks, n_classes=20)
cifar100_networks = partial(cifar_networks, n_classes=100)
svhn_networks = partial(cifar_networks, n_classes=10)
# ===========================================================================
# dSprites
# ===========================================================================
def _dsprites_distribution(x: tf.Tensor) -> Blockwise:
# NOTE: tried Continuous Bernoulli for dSPrites, but leads to
# more unstable training in semi-supervised learning.
dtype = x.dtype
py = JointDistributionSequential([
VonMises(loc=x[..., 0],
concentration=tf.math.softplus(x[..., 1]),
name='orientation'),
Gamma(concentration=tf.math.softplus(x[..., 2]),
rate=tf.math.softplus(x[..., 3]),
name='scale'),
Categorical(logits=x[..., 4:7], dtype=dtype, name='shape'),
Bernoulli(logits=x[..., 7], dtype=dtype, name='x_position'),
Bernoulli(logits=x[..., 8], dtype=dtype, name='y_position'),
])
return Blockwise(py, name='shapes2d')
@typechecked
def dsprites_networks(
qz: str = 'mvndiag',
zdim: Optional[int] = None,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
centerize_image: bool = True,
skip_generator: bool = False,
**kwargs,
) -> Dict[str, Layer]:
from odin.bay.random_variable import RVconf
from odin.bay.vi.autoencoder import BiConvLatents
if zdim is None:
zdim = 10
n_channels = int(kwargs.get('n_channels', 1))
input_shape = (64, 64, n_channels)
conv, deconv = _prepare_cnn(activation=activation)
proj_dim = kwargs.get('proj_dim', None)
if proj_dim is None:
proj_dim = 128 if n_channels == 1 else 256
else:
proj_dim = int(proj_dim)
n_params, observation, last_layer = _parse_distribution(
input_shape, kwargs.get('distribution', 'bernoulli'))
encoder = SequentialNetwork(
[
CenterAt0(enable=centerize_image),
conv(32, 4, strides=2, name='encoder0'),
conv(32, 4, strides=2, name='encoder1'),
conv(64, 4, strides=2, name='encoder2'),
conv(64, 4, strides=2, name='encoder3'),
keras.layers.Flatten(),
keras.layers.Dense(proj_dim, activation='linear', name='encoder_proj')
],
name='Encoder',
)
# layers = [
# keras.layers.Dense(proj_dim, activation='linear', name='decoder_proj'),
# keras.layers.Reshape((4, 4, proj_dim // 16)),
# BiConvLatents(deconv(64, 4, strides=2, name='decoder1'),
# encoder=encoder.layers[3],
# filters=32, kernel_size=8, strides=4,
# disable=True, name='latents1'),
# deconv(64, 4, strides=2, name='decoder2'),
# BiConvLatents(deconv(32, 4, strides=2, name='decoder3'),
# encoder=encoder.layers[1],
# filters=16, kernel_size=8, strides=4,
# disable=True, name='latents2'),
# deconv(32, 4, strides=2, name='decoder4'),
# # NOTE: this last projection layer with linear activation is crucial
# # otherwise the distribution parameterized by this layer won't converge
# conv(n_channels * n_params,
# 1,
# strides=1,
# activation='linear',
# name='decoder6'),
# last_layer
# ]
layers = [
keras.layers.Dense(proj_dim, activation='linear', name='decoder_proj'),
keras.layers.Reshape((4, 4, proj_dim // 16)),
BiConvLatents(deconv(64, 4, strides=2, name='decoder1'),
encoder=encoder.layers[3],
filters=32, kernel_size=8, strides=4,
disable=True,
name='latents2'),
deconv(64, 4, strides=2, name='decoder2'),
deconv(32, 4, strides=2, name='decoder3'),
deconv(32, 4, strides=2, name='decoder4'),
# NOTE: this last projection layer with linear activation is crucial
# otherwise the distribution parameterized by this layer won't converge
conv(n_channels * n_params,
1,
strides=1,
activation='linear',
name='decoder6'),
last_layer
]
layers = [i.layer if isinstance(i, BiConvLatents) and not is_hierarchical
else i
for i in layers]
if skip_generator:
decoder = SkipSequential(layers=layers, name='SkipDecoder')
else:
decoder = SequentialNetwork(layers=layers, name='Decoder')
latents = RVconf((zdim,), qz, projection=True,
name="latents").create_posterior()
networks = dict(encoder=encoder,
decoder=decoder,
observation=observation,
latents=latents)
if is_semi_supervised:
from odin.bay.layers.dense_distribution import DistributionDense
# TODO: update
networks['labels'] = DistributionDense(event_shape=(5,),
posterior=_dsprites_distribution,
units=9,
name='geometry2d')
return networks
dsprites0_networks = dsprites_networks
# ===========================================================================
# Shapes 3D
# ===========================================================================
def _shapes3d_distribution(x: tf.Tensor) -> Blockwise:
dtype = x.dtype
py = JointDistributionSequential([
VonMises(loc=x[..., 0],
concentration=tf.math.softplus(x[..., 1]),
name='orientation'),
Gamma(concentration=tf.math.softplus(x[..., 2]),
rate=tf.math.softplus(x[..., 3]),
name='scale'),
Categorical(logits=x[..., 4:8], dtype=dtype, name='shape'),
ContinuousBernoulli(logits=x[..., 8], name='floor_hue'),
ContinuousBernoulli(logits=x[..., 9], name='wall_hue'),
ContinuousBernoulli(logits=x[..., 10], name='object_hue'),
])
return Blockwise(py, name='shapes3d')
def shapes3d_networks(qz: str = 'mvndiag',
zdim: Optional[int] = None,
activation: Union[Callable, str] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
centerize_image: bool = True,
skip_generator: bool = False,
small: bool = False,
**kwargs) -> Dict[str, Layer]:
if zdim is None:
zdim = 6
if small:
networks = cifar_networks(qz=qz,
zdim=zdim,
activation=activation,
is_semi_supervised=False,
is_hierarchical=is_hierarchical,
centerize_image=centerize_image,
skip_generator=skip_generator,
distribution='bernoulli')
else:
networks = dsprites_networks(qz=qz,
zdim=zdim,
activation=activation,
is_semi_supervised=False,
is_hierarchical=is_hierarchical,
centerize_image=centerize_image,
skip_generator=skip_generator,
distribution='bernoulli',
n_channels=3)
if is_semi_supervised:
from odin.bay.layers import DistributionDense
# TODO: update
networks['labels'] = DistributionDense(event_shape=(6,),
posterior=_shapes3d_distribution,
units=11,
name='geometry3d')
return networks
shapes3dsmall_networks = partial(shapes3d_networks, small=True)
shapes3d0_networks = shapes3d_networks
# ===========================================================================
# Halfmoons
# ===========================================================================
def _halfmoons_distribution(x: tf.Tensor) -> Blockwise:
dtype = x.dtype
py = JointDistributionSequential([
Gamma(concentration=tf.math.softplus(x[..., 0]),
rate=tf.math.softplus(x[..., 1]),
name='x'),
Gamma(concentration=tf.math.softplus(x[..., 2]),
rate=tf.math.softplus(x[..., 3]),
name='y'),
Gamma(concentration=tf.math.softplus(x[..., 4]),
rate=tf.math.softplus(x[..., 5]),
name='color'),
Categorical(logits=x[..., 6:10], dtype=dtype, name='shape'),
])
return Blockwise(py, name='shapes3d')
def halfmoons_networks(qz: str = 'mvndiag',
zdim: Optional[int] = None,
activation: Union[Callable, str] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
centerize_image: bool = True,
skip_generator: bool = False,
**kwargs) -> Dict[str, Layer]:
if zdim is None:
zdim = 5
networks = dsprites_networks(qz=qz,
zdim=zdim,
activation=activation,
is_semi_supervised=False,
is_hierarchical=is_hierarchical,
centerize_image=centerize_image,
skip_generator=skip_generator,
distribution='bernoulli',
n_channels=3)
if is_semi_supervised:
from odin.bay.layers import DistributionDense
networks['labels'] = DistributionDense(event_shape=(4,),
posterior=_halfmoons_distribution,
units=10,
name='geometry3d')
return networks
# ===========================================================================
# CelebA
# ===========================================================================
def _celeba_distribution(x: tf.Tensor) -> Blockwise:
dtype = x.dtype
py = ContinuousBernoulli(logits=x)
return Independent(py, 1, name='attributes')
def celeba_networks(qz: str = 'mvndiag',
zdim: Optional[int] = None,
activation: Union[Callable, str] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
centerize_image: bool = True,
skip_generator: bool = False,
n_labels: int = 18,
**kwargs):
from odin.bay.random_variable import RVconf
if zdim is None:
zdim = 45
input_shape = (64, 64, 3)
n_components = 10 # for Mixture Quantized Logistic
n_channels = input_shape[-1]
conv, deconv = _prepare_cnn(activation=activation)
proj_dim = 512
encoder = SequentialNetwork(
[
CenterAt0(enable=centerize_image),
conv(32, 4, strides=2, name='encoder0'),
conv(32, 4, strides=2, name='encoder1'),
conv(64, 4, strides=2, name='encoder2'),
conv(64, 4, strides=1, name='encoder3'),
keras.layers.Flatten(),
keras.layers.Dense(proj_dim, activation='linear', name='encoder_proj')
],
name='Encoder',
)
layers = [
keras.layers.Dense(proj_dim, activation='linear', name='decoder_proj'),
keras.layers.Reshape((8, 8, proj_dim // 64)),
deconv(64, 4, strides=1, name='decoder1'),
deconv(64, 4, strides=2, name='decoder2'),
deconv(32, 4, strides=2, name='decoder3'),
deconv(32, 4, strides=2, name='decoder4'),
conv(2 * n_channels,
# MixtureQuantizedLogistic.params_size(n_components, n_channels),
1,
strides=1,
activation='linear',
name='decoder5'),
]
from odin.bay import BiConvLatents
layers = [i.layer if isinstance(i, BiConvLatents) and not is_hierarchical
else i
for i in layers]
if skip_generator:
decoder = SkipSequential(layers=layers, name='SkipDecoder')
else:
decoder = SequentialNetwork(layers=layers, name='Decoder')
latents = RVconf((zdim,), qz, projection=True,
name="latents").create_posterior()
observation = _parse_distribution(input_shape, 'qlogistic')
networks = dict(encoder=encoder,
decoder=decoder,
observation=observation,
latents=latents)
if is_semi_supervised:
from odin.bay.layers import DistributionDense
networks['labels'] = DistributionDense(event_shape=n_labels,
posterior=_celeba_distribution,
units=n_labels,
name='attributes')
return networks
# ===========================================================================
# Gene Networks
# ===========================================================================
@typechecked
def cortex_networks(
qz: str = 'mvndiag',
zdim: Optional[int] = 10,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
log_norm: bool = True,
cnn: bool = False,
units: Sequence[int] = (256, 256, 256),
**kwargs,
) -> Dict[str, Layer]:
"""Network for Cortex mRNA sequencing datasets"""
from odin.bay.random_variable import RVconf
input_shape = (558,)
n_labels = 7
if zdim is None:
zdim = 10
## dense network
if not cnn:
encoder = SequentialNetwork(
[LogNorm(enable=log_norm)] + [
keras.layers.Dense(u, activation=activation, name=f'encoder{i}')
for i, u in enumerate(units)
],
name='encoder',
)
decoder = SequentialNetwork(
[
keras.layers.Dense(u, activation=activation, name=f'decoder{i}')
for i, u in enumerate(units)
],
name='decoder',
)
## cnn
else:
Conv1D = partial(keras.layers.Conv1D,
strides=2,
padding='same',
activation=activation)
Conv1DTranspose = partial(keras.layers.Conv1DTranspose,
strides=2,
padding='same',
activation=activation)
encoder = SequentialNetwork(
[
LogNorm(enable=log_norm),
keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1)), # (n, 2019, 1)
Conv1D(32, 7, name='encoder0'),
Conv1D(64, 5, name='encoder1'),
Conv1D(128, 3, name='encoder2'),
keras.layers.Flatten()
],
name='encoder',
)
decoder = SequentialNetwork(
[
keras.layers.Dense(128, activation=activation, name='decoder0'),
keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1)), # (n, 256, 1)
Conv1DTranspose(128, 3, strides=1, name='decoder1'),
Conv1DTranspose(64, 5, name='decoder3'),
Conv1DTranspose(32, 7, name='decoder4'),
Conv1DTranspose(1, 1, strides=1, name='decoder5'),
keras.layers.Flatten()
],
name='decoder',
)
latents = RVconf((zdim,), qz, projection=True,
name="latents").create_posterior()
observation = RVconf(input_shape, "nb", projection=True,
name="mrna").create_posterior()
networks = dict(encoder=encoder,
decoder=decoder,
observation=observation,
latents=latents)
if is_semi_supervised:
networks['labels'] = RVconf(7, 'onehot', projection=True,
name='celltype').create_posterior()
return networks
@typechecked
def pbmc_networks(
qz: str = 'mvndiag',
zdim: Optional[int] = 32,
activation: Callable[[tf.Tensor], tf.Tensor] = tf.nn.elu,
is_semi_supervised: bool = False,
is_hierarchical: bool = False,
log_norm: bool = True,
cnn: bool = True,
units: Sequence[int] = (512, 512, 512),
**kwargs,
) -> Dict[str, Layer]:
"""Network for Cortex mRNA sequencing datasets"""
from odin.bay.random_variable import RVconf
input_shape = (2019,)
n_labels = 32
if zdim is None:
zdim = 32
## dense network
if not cnn:
encoder = SequentialNetwork(
[LogNorm(enable=log_norm)] + [
keras.layers.Dense(u, activation=activation, name=f'encoder{i}')
for i, u in enumerate(units)
],
name='encoder',
)
decoder = SequentialNetwork(
[
keras.layers.Dense(u, activation=activation, name=f'decoder{i}')
for i, u in enumerate(units)
],
name='decoder',
)
## conv network
else:
Conv1D = partial(keras.layers.Conv1D,
strides=2,
padding='same',
activation=activation)
Conv1DTranspose = partial(keras.layers.Conv1DTranspose,
strides=2,
padding='same',
activation=activation)
encoder = SequentialNetwork(
[
LogNorm(enable=log_norm),
keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1)), # (n, 2019, 1)
Conv1D(32, 7, name='encoder0'),
Conv1D(64, 5, name='encoder1'),
Conv1D(128, 3, name='encoder2'),
Conv1D(128, 3, name='encoder3'),
keras.layers.Flatten()
],
name='encoder',
)
decoder = SequentialNetwork(
[
keras.layers.Dense(256, activation=activation, name='decoder0'),
keras.layers.Lambda(
lambda x: tf.expand_dims(x, axis=-1)), # (n, 256, 1)
Conv1DTranspose(128, 3, strides=1, name='decoder1'),
Conv1DTranspose(128, 3, name='decoder2'),
Conv1DTranspose(64, 5, name='decoder3'),
Conv1DTranspose(32, 7, name='decoder4'),
Conv1DTranspose(1, 1, strides=1, name='decoder5'),
keras.layers.Flatten()
],
name='decoder',
)
latents = RVconf((zdim,), qz, projection=True,
name="latents").create_posterior()
observation = RVconf(input_shape, "zinb", projection=True,
name="mrna").create_posterior()
networks = dict(encoder=encoder,
decoder=decoder,
observation=observation,
latents=latents)
if is_semi_supervised:
networks['labels'] = RVconf(n_labels, 'nb', projection=True,
name='adt').create_posterior()
return networks
# ===========================================================================
# Utils
# ===========================================================================
_DSNAME_MAP = dict(
halfmnist='mnist'
)
def get_networks(dataset_name: [str, IterableDataset],
*,
is_semi_supervised: bool,
is_hierarchical: bool,
qz: str = 'mvndiag',
zdim: Optional[int] = None,
**kwargs) -> Dict[str, Layer]:
""" Return dictionary of networks for encoder, decoder, observation, latents
and labels (in case of semi-supervised learning) """
if isinstance(dataset_name, IterableDataset):
dataset_name = dataset_name.name.lower()
if zdim is not None and zdim <= 0:
zdim = None
dataset_name = str(dataset_name).lower().strip()
dataset_name = _DSNAME_MAP.get(dataset_name, dataset_name)
for k, fn in globals().items():
if isinstance(k, string_types) and (inspect.isfunction(fn) or
isinstance(fn, partial)):
k = k.split('_')[0]
if k == dataset_name:
return fn(qz=qz,
zdim=zdim,
is_semi_supervised=is_semi_supervised,
is_hierarchical=is_hierarchical,
**kwargs)
raise ValueError('Cannot find pre-implemented network for '
f'dataset with name="{dataset_name}"')
def get_optimizer_info(
dataset_name: str,
batch_size: int = 64,
) -> Dict[str, Any]:
"""Return information for optimizing networks of given datasets,
this is create with the assumption that batch_size=32
Parameters
----------
dataset_name : str
name of datasets, e.g. 'mnist', 'dsprites', 'shapes3d'
batch_size : int
mini-batch size
Returns
-------
Dict[str, Any]
'max_iter' : int,
number of iterations,
'learning_rate' : `tf.optimizers.schedules.ExponentialDecay`
learning rate
"""
dataset_name = str(dataset_name).strip().lower()
dataset_name = _DSNAME_MAP.get(dataset_name, dataset_name)
decay_rate = 0.996
decay_steps = 10000
init_lr = 1e-3
### image networks
if dataset_name == 'halfmoons':
n_epochs = 200
n_samples = 3200
elif dataset_name == 'mnist':
n_epochs = 800
n_samples = 55000
elif dataset_name == 'fashionmnist':
n_epochs = 1000
n_samples = 55000
elif dataset_name == 'omniglot':
n_epochs = 1000
n_samples = 19280
elif 'svhn' in dataset_name:
n_epochs = 2000
n_samples = 69594
elif 'cifar' in dataset_name:
n_epochs = 2500
n_samples = 48000
init_lr = 5e-4
# dsrpites datasets
elif 'dsprites' in dataset_name:
n_epochs = 400
n_samples = 663552
# sahpes datasets
elif 'shapes3d' in dataset_name:
n_epochs = 250 if 'small' in dataset_name else 400
n_samples = 432000
init_lr = 2e-4
elif 'celeba' in dataset_name:
n_epochs = 2000 if 'small' in dataset_name else 3000
n_samples = 162770
init_lr = 2e-4
### gene networks
elif 'cortex' in dataset_name:
n_epochs = 500
n_samples = 5000
init_lr = 1e-4
elif 'pbmc' in dataset_name:
n_epochs = 500
n_samples = 5000
init_lr = 1e-4
else:
raise NotImplementedError(
f'No predefined optimizer information for dataset {dataset_name}')
max_iter = int((n_samples / batch_size) * n_epochs)
lr = tf.optimizers.schedules.ExponentialDecay(init_lr,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=True)
return dict(max_iter=max_iter, learning_rate=lr)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Committee.description'
db.add_column('committees_committee', 'description', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Committee.description'
db.delete_column('committees_committee', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'chaired_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'replacing_in_committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'committees.protocolpart': {
'Meta': {'object_name': 'ProtocolPart'},
'body': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parts'", 'to': "orm['committees.CommitteeMeeting']"}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'protocol_parts'", 'null': 'True', 'to': "orm['persons.Person']"})
},
'committees.topic': {
'Meta': {'object_name': 'Topic'},
'committees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['committees.Committee']", 'symmetrical': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'editing_topics'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meetings': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']", 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'where': ('django.db.models.fields.TextField', [], {}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'symmetrical': 'False'})
},
'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'links.link': {
'Meta': {'object_name': 'Link'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_link'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['links.LinkType']", 'null': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
},
'links.linktype': {
'Meta': {'object_name': 'LinkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['committees']
|
|
#!/usr/bin/python
import sys
import re
import unicodedata
from StringIO import StringIO
try:
from lxml import etree
LXML_PRESENT = True
except ImportError:
LXML_PRESENT = False
#################################################################################
"""Html2MdException
Base exception class for module.
"""
class Html2MdException(Exception):
pass
#################################################################################
"""TagHandler
Base class for objects that convert a tag into text
"""
class TagHandler(object):
def __init__(self, txtConverter):
self._txtConverter = txtConverter
def test(self, e):
if e.tag == self.tag:
return True
return False
def convert(self, e):
'''
To be over-ridden by subclasses.
'''
pass
def getElementText(self, e):
text = ''
if e.text and not e.text.isspace():
text = e.text
return text + self._txtConverter.childHandler(e)
def getElementAttributes(self, e):
attr_str = ''
for a in e.attrib.keys():
attr_str += "{@%s=%s}" % (a, e.attrib[a])
return attr_str
#################################################################################
"""InlineTagHandler
Subclass of TagHandler- base class for inline tag objects like <em>, <code>,
<strong>, etc. due to common conversion processing
"""
class InlineTagHandler(TagHandler):
def convert(self, e):
addspace = False
'''
EDGE CASE: it's possible to have nested inline elements, like ***strong
and emphasis*** in which case, the outer element won't have any text.
In this case, we'll assume that there must be a child element with text,
so we'll parse that first to get some text, then place complete
processing here.
'''
if e.text is None:
text = self._txtConverter.childHandler(e)
else:
if e.text.endswith(' '):
addspace = True
text = e.text
text = "%s%s%s" % (self.inlinechars, text.rstrip(), self.inlinechars)
if addspace:
text += ' '
return text
#################################################################################
"""FixedStringTagHandler
subclass of TagHandler, baseclass for tag objects that process tags which
return a fixed string, eg <br /> and <hr /> tags
"""
class FixedStringTagHandler(TagHandler):
def convert(self, e):
return self.conversion_str
#################################################################################
"""InlineCodeHandler
Class for inline HTML elements like <em>text</em>
"""
class InlineCodeHandler(InlineTagHandler):
tag = 'code'
inlinechars = '`'
def test(self, e):
if e.tag == self.tag and e.getparent().tag != 'pre':
return True
return False
#################################################################################
"""EmHandler
Specific class for ``em`` tags.
"""
class EmHandler(InlineTagHandler):
tag = 'em'
inlinechars = '*'
#################################################################################
"""StrongHandler
Specific class for ``strong`` tags.
"""
class StrongHandler(InlineTagHandler):
tag = 'strong'
inlinechars = '**'
#################################################################################
"""StrikeHandler
Specific class for ``strike`` tags- NOT IN MARKDOWN SPEC.
"""
class StrikeHandler(InlineTagHandler):
tag = 'strike'
inlinechars = '-'
#################################################################################
"""BrHandler
Specific classs for ``br`` tags.
"""
class BrHandler(FixedStringTagHandler):
tag = 'br'
conversion_str = ' \n'
#################################################################################
"""HorizontalRuleHandler
Specific class for ``hr`` tags.
"""
class HorizontalRuleHandler(FixedStringTagHandler):
tag = 'hr'
conversion_str = '* * *\n\n'
#################################################################################
"""AHandler
Specific class for ``a`` tags.
"""
class AHandler(TagHandler):
tag = 'a'
def convert(self, a):
if 'href' not in a.attrib.keys():
raise Html2MdException
# build return string based on anchor tag
s = self.getElementText(a)
# if the link is within the current document, use inline style
if a.attrib['href'].startswith('#'):
return "[%s](%s)" % (s, a.attrib['href'])
reflink = self._searchlink(a.attrib['href'])
if reflink is None:
reflink = self._txtConverter._reflinks
self._txtConverter._reflinks += 1
# save the reflinks
if 'title' not in a.attrib.keys():
a.set('title', '')
self._txtConverter._links.append((reflink,
a.attrib['href'],
a.attrib['title']))
# now that we have all the text, format it in markdown syntax
return "[%s][%s]" % (s, reflink)
def _searchlink(self, linktext):
for t in self._txtConverter._links:
if linktext == t[1]:
return self._txtConverter._links.index(t)
return None
#################################################################################
"""ImgHandler
Specific class for ``img`` tags.
"""
class ImgHandler(TagHandler):
tag = 'img'
def convert(self, img):
# processes img tag if it's on it's own
attrib_str = self.getElementAttributes(img)
if 'alt' not in img.attrib.keys() and 'src' not in img.attrib.keys():
raise Html2MdException
if 'title' not in img.attrib.keys():
img.set('title', '')
img_text = "" % (img.attrib['alt'],
attrib_str,
img.attrib['src'],
img.attrib['title'])
return img_text
def getElementAttributes(self, img):
attr_str = ''
for a in img.attrib.keys():
if a not in ['alt', 'title', 'src']:
attr_str += "{@%s=%s}" % (a, img.attrib[a])
return attr_str
#################################################################################
"""PHandler
Specific class for ``p`` tags.
"""
class PHandler(TagHandler):
tag = 'p'
def convert(self, p):
attrs = self.getElementAttributes(p)
if attrs:
attrs += '\n'
return attrs + self.getElementText(p) + '\n\n'
#################################################################################
"""HeadingHandler
Specific class for ``h1-6`` tags.
"""
class HeadingHandler(TagHandler):
tag = re.compile('^h(\d)$')
def test(self, e):
if not isinstance(e.tag, str):
return False
m = self.tag.match(e.tag)
if m:
self._hlevel = m.group(1)
return True
return False
def convert(self, h):
h_text = self.getElementAttributes(h) + self.getElementText(h)
if h.tag == 'h1':
hdr_char = '='
elif h.tag == 'h2':
hdr_char = '-'
else:
return "#"*int(self._hlevel) + h_text + '\n\n'
return h_text + '\n' + hdr_char*len(h_text) + '\n\n'
#################################################################################
"""BlockQuoteHandler
Specific class for ``blockquote`` tags.
"""
class BlockQuoteHandler(TagHandler):
tag = 'blockquote'
prepend_char = '> '
def __init__(self, txtConverter):
self._txtConverter = txtConverter
self._level = -1
def convert(self, bq):
self._level += 1
text = self.getElementText(bq).rstrip() + '\n'
text = self._txtConverter.prepend(text, self.prepend_char)
if self._level > 0:
text += '\n'
self._level -= 1
return text
#################################################################################
"""PreHandler
Specific class for ``pre`` tags.
"""
class PreHandler(TagHandler):
tag = 'pre'
def convert(self, pre):
return self._txtConverter.childHandler(pre)
#################################################################################
"""CodeBlockHandler
Specfic class for ``code`` tags.
"""
class CodeBlockHandler(TagHandler):
tag = 'code'
prepend_char = ' '
def test(self, e):
if e.tag == self.tag and e.getparent().tag == 'pre':
return True
return False
def convert(self, cb):
return self._txtConverter.prepend(cb.text, self.prepend_char)
#################################################################################
"""OListHandler
Object that converts ordered list tags to text- serves as a base class for
the UListHandler as well
"""
class OListHandler(TagHandler):
tag = 'ol'
def convert(self, ol):
# We'll handle this a little differently- we have to manually manage
# each list item- if we just call 'getElementText' on the 'ol' tag, it
# would process ALL the 'li' tags under here and we don't want that
# because it would be too error prone untangling the mess to figure out
# where to put the prepend char. Since we're in a list related tag, I
# think it's safe to assume it has children, so setup the loop and go.
# NOTE: This approach also means there is not need for an 'li' tag
# handler
self._txtConverter.listlevel += 1
listitems = [ self.getElementText(li) for li in ol ]
text = self.listloop(listitems)
# edge case- if a nested list and thies is a previous sibling that
# isnt' a block tag, prepend a '\n'
if self._txtConverter.listlevel > 0:
previous_sibling = ol.getprevious()
if previous_sibling is not None and \
not self._txtConverter._isblock(previous_sibling):
text = '\n' + text
self._txtConverter.listlevel -= 1
return text
def listloop(self, listitems):
item_number = 1
text = ''
for listitem in listitems:
# if a list without p-tags around the items, make sure the items are
# separated with a '\n'
if not listitem.endswith('\n'):
listitem += '\n'
listitem_pre = "%s." % item_number
listitem_pre += ' '*(4 - len(listitem_pre))
item_number += 1
text += self.formatListItem(listitem, listitem_pre)
return text
def formatListItem(self, listitem, li_pre):
text = ''
li_pre = li_pre
for line in listitem.splitlines(1):
if li_pre:
text += "%s" % (' '*(4*self._txtConverter.listlevel) + li_pre +
line.lstrip())
li_pre = ''
elif not line.isspace():
text += "%s" % (' '*(4*(self._txtConverter.listlevel+1)) +
line.lstrip())
else:
# most likely a linefeed...
text += line
return text
def getElementText(self, e):
text = ''
if e.text and not e.text.isspace():
text = e.text
# edge case- if an 'li' tag with text and the next element is another
# list add a '\n'
if text and e.tag == 'li' and len(e) != 0 and e[0].tag in ['ol', 'ul']:
text += '\n'
return text + self._txtConverter.childHandler(e)
#################################################################################
"""UListHandler
Subclass of OListHandler, for ``ul`` tags.
"""
class UListHandler(OListHandler):
tag = 'ul'
def listloop(self, listitems):
listitem_pre = '* '
text = ''
for listitem in listitems:
if not listitem.endswith('\n'):
listitem += '\n'
text += self.formatListItem(listitem, listitem_pre)
return text
#################################################################################
"""Html2Morkdown
Creates a converter object for turning HTML markup into markdown text.
"""
class Html2Markdown(object):
_inlinetags = ['code', 'em', 'strong', 'br', 'strike', 'img', 'a']
_blocktags = ['p', 'blockquote', 'li', 'ul', 'ol', 'pre', 'h1', 'h2', 'h3',
'h4', 'h5', 'h6', 'hr']
def __init__(self):
self._taghandlers = []
self._taghandlers.append(PHandler(self))
self._taghandlers.append(BlockQuoteHandler(self))
self._taghandlers.append(UListHandler(self))
self._taghandlers.append(OListHandler(self))
self._taghandlers.append(HeadingHandler(self))
self._taghandlers.append(PreHandler(self))
self._taghandlers.append(CodeBlockHandler(self))
self._taghandlers.append(AHandler(self))
self._taghandlers.append(InlineCodeHandler(self))
self._taghandlers.append(EmHandler(self))
self._taghandlers.append(StrongHandler(self))
self._taghandlers.append(BrHandler(self))
self._taghandlers.append(StrikeHandler(self))
self._taghandlers.append(ImgHandler(self))
self._taghandlers.append(HorizontalRuleHandler(self))
self.listlevel = -1
self._blocklist = []
self._reflinks = 0
self._links = []
def convert(self, html):
try:
nhtml = unicodedata.normalize('NFKD', html)
except TypeError:
nhtml = html
except UnicodeEncodeError:
print repr(html)
sys.exit()
# this is a negative-lookahead re- we're looking for '&' that are
# unescaped in the data, the lxml parser chokes on those
i = 0
for m in re.finditer(u'&(?!amp;|gt;|lt;|quot;|#\d+;)', nhtml):
if m:
nhtml = nhtml[:m.start()+(i*4)] + u'&' + nhtml[m.end()+(i*4):]
i = i + 1
# print nhtml
root = etree.fromstring("<post>%s</post>" % nhtml)
# if the 'post' tag has text, then grab it and add it as the first
# block before proceeding to process the children
if root.text and not root.text.isspace():
self._blocklist.append(root.text.rstrip() + '\n')
# process the children of root- we don't use `childhandler` because that
# would return a large blob of text. This way we can control the block
# spacing
for element in root:
links_snapshot = len(self._links)
try:
text = self._tagHandler(element)
# print text
if text:
if self._isblock(element):
self._blocklist.append(text.rstrip() + '\n')
else:
# some kind of inline tag so we'll for now we'll just
# append to the previous block
self._blocklist[-1] = self._blocklist[-1].rstrip() + \
' ' + text.rstrip() + '\n'
except Html2MdException:
while len(self._links) != links_snapshot:
self._links.pop()
self._reflinks -= 1
self._blocklist.append(etree.tostring(element, pretty_print=True).rstrip() + '\n')
# now add any referenced links as the final block
if links_snapshot < len(self._links):
self._blocklist.append(self._refLinkText(links_snapshot))
return '\n'.join(self._blocklist)
def childHandler(self, element):
text = ''
if len(element) != 0:
for child in element:
try:
# print "Child: %s" % child.tag
text += self._tagHandler(child)
except Html2MdException:
raise
return text
def checkTail(self, element):
if element.tail and not element.tail.isspace():
# print "TAIL: %s" % element.tag
# return element.tail.lstrip('\n')
return element.tail
else:
return ''
def prepend(self, text, pretext):
# print text
rtext = ''
for line in text.splitlines():
rtext += "%s%s\n" % (pretext, line)
return rtext
############################################################################
"""_tagHandler
Scans the `_taghandlers` list for a handler of the element type
based on the element's tag. Calls the `convert` method of the
handler object.
If no handler is found, then perform a simple check to see if the
element is a 'comment'
Otherwise return the tag as a string
"""
def _tagHandler(self, element):
for handler in self._taghandlers:
if handler.test(element):
text = handler.convert(element)
break
else:
if element.tag is etree.Comment:
text = "+ + +"
if element.text != "more":
text += ' ' + element.text.replace('more ', '') + " + + +"
text += "\n\n"
else:
# certain elements are better printed using HTML method than XML
# NOTE: Should use my own serializer rather than relying on
# tostring
if element.tag in ['iframe']:
text = etree.tostring(element, pretty_print=True, method="html")
else:
text = etree.tostring(element, pretty_print=True)
return text.replace(u'&', u'&')
return text + self.checkTail(element)
def _refLinkText(self, first_link):
text = ''
for ref in self._links[first_link:]:
text += " [%s]: %s" % (ref[0], ref[1])
if ref[2]:
text += ''' "%s"''' % (ref[2])
text += '\n'
del self._links[:]
return text
def _isblock(self, e):
if e.tag in self._blocktags:
return True
if e.tag == 'code' and e.getparent().tag == 'pre':
return True
# we need to pick up comments- but their tag is not a string, but a
# function so, until this stops working, we'll check the tag instance
# for a string and return True if it isn't
if not isinstance(e.tag, str):
return True
return False
################################################################################
"""convert
Function that for instantiating an Html2Markdown object then converting the
HTML.
"""
def convert(html):
md = Html2Markdown()
return md.convert(html)
|
|
"""
This module downloads a package from a given URL using one of potentially
many different methods. We currently support the github web API and
simple HTTP(S). The github method is more developed and returns meta data
about the project (the commit hash and message), but support for other
methods, e.g. download a tar file that was uploaded to a web server,
are not precluded.
Private downloads are supported with the HTTP Authorization header.
For github, we need to use the github API to request a token to access
the owner's private repository. That part is not implemented here.
"""
import base64
import json
import os
import re
import shutil
import subprocess
import tarfile
import tempfile
import pycurl
import six
from paradrop.base import settings
github_re = re.compile("^(http|https)://github.com/([\w\-]+)/([\w\-\.]+?)(\.git)?$")
general_url_re = re.compile("(http:\/\/|https:\/\/)(\S+)")
hash_re = re.compile("^.*-([0-9a-f]+)$")
class Downloader(object):
def __init__(self, url, user=None, secret=None, repo_owner=None, repo_name=None):
self.url = url
self.user = user
self.secret = secret
self.repo_owner = repo_owner
self.repo_name = repo_name
self.checkout = None
self.commitHash = None
self.commitMessage = None
self.workDir = None
def __enter__(self):
self.workDir = tempfile.mkdtemp()
return self
def __exit__(self, type, value, traceback):
if self.workDir is not None:
shutil.rmtree(self.workDir)
self.workDir = None
def download(self):
raise NotImplementedError
def meta(self):
raise NotImplementedError
def fetch(self):
"""
Download the project.
Returns the full path to the temporary directory containing the project
and a dictionary containing meta data.
"""
if self.workDir is None:
self.workDir = tempfile.mkdtemp()
runDir = self.download()
meta = self.meta()
return runDir, meta
def extract(self):
tar = tarfile.open(self.tarFile)
# Look for a Dockerfile and also check for dangerous paths (.. or /).
runPath = None
for member in tar:
path = os.path.normpath(member.name)
if path.startswith(".."):
raise Exception("Archive contains a forbidden path: {}".format(path))
elif os.path.isabs(path):
raise Exception("Archive contains an absolute path: {}".format(path))
elif path.endswith(settings.CHUTE_CONFIG_FILE):
runPath = path
elif path.endswith("Dockerfile"):
runPath = path
elif self.commitHash is None:
match = hash_re.match(path)
if match is not None:
self.commitHash = match.group(1)
tar.extractall(path=self.workDir)
if runPath is None:
raise Exception("Repository does not contain {} or Dockerfile".format(
settings.CHUTE_CONFIG_FILE))
relRunDir = os.path.dirname(runPath)
runDir = os.path.join(self.workDir, relRunDir)
return runDir
class GitSSHDownloader(Downloader):
def __init__(self, url, checkout="master", **kwargs):
"""
checkout: branch, tag, or commit hash to checkout (default: "master").
"""
super(GitSSHDownloader, self).__init__(url, **kwargs)
if checkout:
self.checkout = checkout
else:
# Interpret None or empty string as the default, "master".
self.checkout = "master"
def download(self):
env = os.environ.copy()
key_file = os.path.join(settings.KEY_DIR, "node.key")
if os.path.isfile(key_file):
# TODO: Set up a way for the node to securely retrieve the host key
# from the server and add it to the known hosts file. This would
# probably need to go through the web server, which uses HTTPS.
env['GIT_SSH_COMMAND'] = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i {}".format(key_file)
cmd = ["git", "clone", self.url, self.workDir]
subprocess.call(cmd, env=env)
cmd = ["git", "-C", self.workDir, "checkout", self.checkout]
subprocess.call(cmd)
return self.workDir
def meta(self):
return {}
class GithubDownloader(Downloader):
def __init__(self, url, checkout="master", **kwargs):
"""
checkout: branch, tag, or commit hash to checkout (default: "master").
"""
super(GithubDownloader, self).__init__(url, **kwargs)
if checkout:
self.checkout = checkout
else:
# Interpret None or empty string as the default, "master".
self.checkout = "master"
def _create_curl_conn(self, url):
"""
Create a cURL connection object with useful default settings.
"""
headers = []
if self.user is not None and self.secret is not None:
b64cred = base64.b64encode("{}:{}".format(self.user, self.secret))
headers.append("Authorization: Basic {}".format(b64cred))
conn = pycurl.Curl()
if len(headers) > 0:
conn.setopt(pycurl.HTTPHEADER, headers)
conn.setopt(pycurl.URL, url)
# github often redirects
conn.setopt(pycurl.FOLLOWLOCATION, 1)
return conn
def download(self):
url = "https://github.com/{}/{}/tarball/{}".format(
self.repo_owner, self.repo_name, self.checkout)
conn = self._create_curl_conn(url)
self.tarFile = os.path.join(self.workDir, "source.tar.gz")
with open(self.tarFile, "w") as output:
conn.setopt(pycurl.WRITEFUNCTION, output.write)
conn.perform()
http_code = conn.getinfo(pycurl.HTTP_CODE)
if http_code != 200:
raise Exception("Error downloading archive: response {}".format(http_code))
return self.extract()
def meta(self):
"""
Return repository meta data as a dictionary.
"""
result = {}
if self.commitHash is not None:
result['CommitHash'] = self.commitHash
if self.commitMessage is not None:
result['CommitMessage'] = self.commitMessage
# If set, self.commitHash may be more specific than self.checkout (e.g.
# commit hash vs. branch name). It is better to use the most specific
# one to query for meta data.
checkout = self.commitHash
if checkout is None:
checkout = self.checkout
url = "https://api.github.com/repos/{owner}/{repo}/commits/{sha}".format(
owner=self.repo_owner, repo=self.repo_name, sha=checkout)
conn = self._create_curl_conn(url)
response = six.StringIO()
conn.setopt(pycurl.WRITEFUNCTION, response.write)
conn.perform()
http_code = conn.getinfo(pycurl.HTTP_CODE)
if http_code == 200:
data = json.loads(response.getvalue())
result['Commit'] = data['commit']
result['CommitMessage'] = data['commit']['message']
return result
class WebDownloader(Downloader):
def _create_curl_conn(self, url):
"""
Create a cURL connection object with useful default settings.
"""
headers = []
if self.user is not None and self.secret is not None:
b64cred = base64.b64encode("{}:{}".format(self.user, self.secret))
headers.append("Authorization: Basic {}".format(b64cred))
conn = pycurl.Curl()
if len(headers) > 0:
conn.setopt(pycurl.HTTPHEADER, headers)
conn.setopt(pycurl.URL, url)
# github often redirects
conn.setopt(pycurl.FOLLOWLOCATION, 1)
return conn
def download(self):
conn = self._create_curl_conn(self.url)
self.tarFile = os.path.join(self.workDir, "source.tar")
with open(self.tarFile, "w") as output:
conn.setopt(pycurl.WRITEFUNCTION, output.write)
conn.perform()
http_code = conn.getinfo(pycurl.HTTP_CODE)
if http_code != 200:
raise Exception("Error downloading archive: response {}".format(http_code))
return self.extract()
def meta(self):
"""
Return repository meta data as a dictionary.
"""
result = {}
return result
def downloader(url, user=None, secret=None, **kwargs):
"""
Return an appropriate Downloader for the given URL.
This should be used in a "with ... as ..." statement to perform cleanup on
all exit cases.
Example:
with downloader("https://github.com/...") as dl:
path, meta = dl.fetch()
# do some work on the repo here
"""
# If the URL looks like a github.com reop, then use github-specific
# download method. Otherwise, use basic web download method.
match = github_re.match(url)
if match is not None:
if user is None:
user = match.group(2)
repo_owner = match.group(2)
repo_name = match.group(3)
return GithubDownloader(url, user=user, secret=secret,
repo_owner=repo_owner, repo_name=repo_name, **kwargs)
# If the URL starts with ssh://, then use the git SSH download method.
if url.startswith("ssh://"):
return GitSSHDownloader(url, **kwargs)
return WebDownloader(url, user=user, secret=secret, **kwargs)
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python import tf2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.BatchNormalization', v1=[])
class BatchNormalizationV2(Layer):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalize the activations of the previous layer at each batch,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
Arguments:
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
virtual_batch_size: An `int`. By default, `virtual_batch_size` is `None`,
which means batch normalization is performed across the whole batch. When
`virtual_batch_size` is not `None`, instead perform "Ghost Batch
Normalization", which creates virtual sub-batches which are each
normalized separately (with shared gamma, beta, and moving statistics).
Must divide the actual batch size during execution.
adjustment: A function taking the `Tensor` containing the (dynamic) shape of
the input tensor and returning a pair (scale, bias) to apply to the
normalized values (before gamma and beta), only during training. For
example, if axis==-1,
`adjustment = lambda shape: (
tf.random_uniform(shape[-1:], 0.93, 1.07),
tf.random_uniform(shape[-1:], -0.1, 0.1))`
will scale the normalized value by up to 7% up or down, then shift the
result by up to 0.1 (with independent scaling and bias for each feature
but shared across all examples), and finally apply gamma and/or beta. If
`None`, no adjustment is applied. Cannot be specified if
virtual_batch_size is specified.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
References:
- [Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
"""
# The BatchNormalizationV1 subclass sets this to False to use the V1 behavior.
_USE_V2_BEHAVIOR = True
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
moving_mean_initializer='zeros',
moving_variance_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
virtual_batch_size=None,
adjustment=None,
name=None,
**kwargs):
super(BatchNormalizationV2, self).__init__(
name=name, trainable=trainable, **kwargs)
if isinstance(axis, list):
self.axis = axis[:]
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError('axis must be int or list, type given: %s'
% type(self.axis))
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.renorm = renorm
self.virtual_batch_size = virtual_batch_size
self.adjustment = adjustment
if self._USE_V2_BEHAVIOR:
if fused:
self._raise_if_fused_cannot_be_used()
# We leave fused as None if self._fused_can_be_used()==True, since we
# still may set it to False in self.build() if the input rank is not 4.
elif fused is None and not self._fused_can_be_used():
fused = False
elif fused is None:
fused = True
self.supports_masking = True
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def _raise_if_fused_cannot_be_used(self):
"""Raises a ValueError if fused implementation cannot be used.
In addition to the checks done in this function, the input tensors rank must
be 4. The input rank check can only be done once the input shape is known.
"""
# Currently fused batch norm doesn't support renorm. It also only supports a
# channel dimension on axis 1 or 3, when no virtual batch size or adjustment
# is used.
if self.renorm:
raise ValueError('Passing both fused=True and renorm=True is '
'unsupported')
axis = [self.axis] if isinstance(self.axis, int) else self.axis
# Axis -3 is equivalent to 1, and axis -1 is equivalent to 3, because the
# input rank is required to be 4 (which is checked later).
if len(axis) > 1 or axis[0] not in (-3, -1, 1, 3):
raise ValueError('Passing fused=True is only supported when axis is 1 '
'or 3')
if self.virtual_batch_size is not None:
raise ValueError('Passing fused=True is unsupported when '
'virtual_batch_size is specified.')
if self.adjustment is not None:
raise ValueError('Passing fused=True is unsupported when '
'adjustment is specified.')
def _fused_can_be_used(self):
try:
self._raise_if_fused_cannot_be_used()
return True
except ValueError:
return False
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused in (None, True):
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
if self._USE_V2_BEHAVIOR:
if self.fused is None:
self.fused = (ndims == 4)
elif self.fused and ndims != 4:
raise ValueError('Batch normalization layers with fused=True only '
'support 4D input tensors.')
else:
assert self.fused is not None
self.fused = (ndims == 4 and self._fused_can_be_used())
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
# Raise parameters of fp16 batch norm to fp32
if self.dtype == dtypes.float16 or self.dtype == dtypes.bfloat16:
param_dtype = dtypes.float32
else:
param_dtype = self.dtype or dtypes.float32
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
param_shape = (list(axis_to_dim.values())[0],)
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [axis_to_dim[i] if i in axis_to_dim
else 1 for i in range(ndims)]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
dtype=param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(
1.0, dtype=param_dtype, shape=param_shape)
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
dtype=param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(
0.0, dtype=param_dtype, shape=param_shape)
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_mean_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=param_dtype,
initializer=self.moving_variance_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN)
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_weight(
name=name,
shape=shape,
dtype=param_dtype,
initializer=init_ops.zeros_initializer(),
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN)
return var
with distribution_strategy_context.get_distribution_strategy(
).colocate_vars_with(self.moving_mean):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape)
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
with distribution_strategy_context.get_distribution_strategy(
).colocate_vars_with(self.moving_variance):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape)
self.renorm_stddev_weight = _renorm_variable('renorm_stddev_weight',
())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, momentum):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, momentum]) as scope:
# TODO(apassos,srbs,skyewm): the colocation constraints here are disabled
# because of a bug which leads cond_v2 to skip rewriting them creating
# conflicts.
if tf2.enabled():
cm = contextlib.contextmanager(lambda: (yield))()
else:
cm = ops.colocate_with(variable)
with cm:
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (
variable - math_ops.cast(value, variable.dtype)) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
momentum)
variance_update = self._assign_moving_average(self.moving_variance,
variance, momentum)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0.
r = tf_utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = tf_utils.smart_cond(training,
lambda: d,
lambda: array_ops.zeros_like(d))
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
"""Updates the var and weight, returns their updated ratio."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = self._assign_moving_average(var, value, self.renorm_momentum)
new_weight = self._assign_moving_average(weight, weight_value,
self.renorm_momentum)
# TODO(yuefengz): the updates to var and weighted can not be batched
# together if we fetch their updated values here. Consider calculating
# new values and delaying the updates.
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return tf_utils.smart_cond(training, _do_update, _fake_update)
# TODO(yuefengz): colocate the operations
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight, mean)
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight, stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def _moments(self, inputs, reduction_axes, keep_dims):
return nn.moments(inputs, reduction_axes, keep_dims=keep_dims)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
in_eager_mode = context.executing_eagerly()
if self.virtual_batch_size is not None:
# Virtual batches (aka ghost batches) can be simulated by reshaping the
# Tensor and reusing the existing batch norm implementation
original_shape = [-1] + inputs.shape.as_list()[1:]
expanded_shape = [self.virtual_batch_size, -1] + original_shape[1:]
# Will cause errors if virtual_batch_size does not divide the batch size
inputs = array_ops.reshape(inputs, expanded_shape)
def undo_virtual_batching(outputs):
outputs = array_ops.reshape(outputs, original_shape)
return outputs
if self.fused:
outputs = self._fused_batch_norm(inputs, training=training)
if self.virtual_batch_size is not None:
# Currently never reaches here since fused_batch_norm does not support
# virtual batching
outputs = undo_virtual_batching(outputs)
return outputs
# Compute the axes along which to reduce the mean / variance
input_shape = inputs.get_shape()
ndims = len(input_shape)
reduction_axes = [i for i in range(ndims) if i not in self.axis]
if self.virtual_batch_size is not None:
del reduction_axes[1] # Do not reduce along virtual batch dim
# Broadcasting only necessary for single-axis batch norm where the axis is
# not the last dimension
broadcast_shape = [1] * ndims
broadcast_shape[self.axis[0]] = input_shape.dims[self.axis[0]].value
def _broadcast(v):
if (v is not None and
len(v.get_shape()) != ndims and
reduction_axes != list(range(ndims - 1))):
return array_ops.reshape(v, broadcast_shape)
return v
scale, offset = _broadcast(self.gamma), _broadcast(self.beta)
def _compose_transforms(scale, offset, then_scale, then_offset):
if then_scale is not None:
scale *= then_scale
offset *= then_scale
if then_offset is not None:
offset += then_offset
return (scale, offset)
# Determine a boolean value for `training`: could be True, False, or None.
training_value = tf_utils.constant_value(training)
if training_value is not False:
if self.adjustment:
adj_scale, adj_bias = self.adjustment(array_ops.shape(inputs))
# Adjust only during training.
adj_scale = tf_utils.smart_cond(training,
lambda: adj_scale,
lambda: array_ops.ones_like(adj_scale))
adj_bias = tf_utils.smart_cond(training,
lambda: adj_bias,
lambda: array_ops.zeros_like(adj_bias))
scale, offset = _compose_transforms(adj_scale, adj_bias, scale, offset)
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
keep_dims = self.virtual_batch_size is not None or len(self.axis) > 1
mean, variance = self._moments(
inputs, reduction_axes, keep_dims=keep_dims)
moving_mean = self.moving_mean
moving_variance = self.moving_variance
mean = tf_utils.smart_cond(training,
lambda: mean,
lambda: moving_mean)
variance = tf_utils.smart_cond(training,
lambda: variance,
lambda: moving_variance)
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are
# supposed to sequentially update the moving_mean and moving_variance
# with each sub-batch. However, since the moving statistics are only
# used during evaluation, it is more efficient to just update in one
# step and should not make a significant difference in the result.
new_mean = math_ops.reduce_mean(mean, axis=1, keepdims=True)
new_variance = math_ops.reduce_mean(variance, axis=1, keepdims=True)
else:
new_mean, new_variance = mean, variance
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
new_mean, new_variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
r = _broadcast(array_ops.stop_gradient(r, name='renorm_r'))
d = _broadcast(array_ops.stop_gradient(d, name='renorm_d'))
scale, offset = _compose_transforms(r, d, scale, offset)
def _do_update(var, value):
if in_eager_mode and not self.trainable:
return
return self._assign_moving_average(var, value, self.momentum)
mean_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_mean, new_mean),
lambda: self.moving_mean)
variance_update = tf_utils.smart_cond(
training,
lambda: _do_update(self.moving_variance, new_variance),
lambda: self.moving_variance)
if not context.executing_eagerly():
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
else:
mean, variance = self.moving_mean, self.moving_variance
mean = math_ops.cast(mean, inputs.dtype)
variance = math_ops.cast(variance, inputs.dtype)
if offset is not None:
offset = math_ops.cast(offset, inputs.dtype)
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
offset,
scale,
self.epsilon)
# If some components of the shape got lost due to adjustments, fix that.
outputs.set_shape(input_shape)
if self.virtual_batch_size is not None:
outputs = undo_virtual_batching(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'axis': self.axis,
'momentum': self.momentum,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'moving_mean_initializer':
initializers.serialize(self.moving_mean_initializer),
'moving_variance_initializer':
initializers.serialize(self.moving_variance_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
if self.renorm:
config['renorm'] = True
config['renorm_clipping'] = self.renorm_clipping
config['renorm_momentum'] = self.renorm_momentum
if self.virtual_batch_size is not None:
config['virtual_batch_size'] = self.virtual_batch_size
# Note: adjustment is not serializable.
if self.adjustment is not None:
logging.warning('The `adjustment` function of this `BatchNormalization` '
'layer cannot be serialized and has been omitted from '
'the layer config. It will not be included when '
're-creating the layer from the saved config.')
base_config = super(BatchNormalizationV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _replace_in_v2_docstring(old, new):
string = BatchNormalizationV2.__doc__
if old not in string:
raise ValueError('Could not find following string in BatchNormalizationV2 '
'docstring: "{}"'.format(old))
return string.replace(old, new)
@tf_export(v1=['keras.layers.BatchNormalization']) # pylint: disable=missing-docstring
class BatchNormalizationV1(BatchNormalizationV2):
__doc__ = _replace_in_v2_docstring(
'''
fused: if `True`, use a faster, fused implementation, or raise a ValueError
if the fused implementation cannot be used. If `None`, use the faster
implementation if possible. If False, do not used the fused
implementation.''',
'''
fused: if `None` or `True`, use a faster, fused implementation if possible.
If `False`, use the system recommended implementation.''')
_USE_V2_BEHAVIOR = False
if tf2.enabled():
BatchNormalization = BatchNormalizationV2
else:
BatchNormalization = BatchNormalizationV1
|
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
This file is used to make the input matrix for the munkres algorithm.
It is like a helper file for munkres_test.py file
Description
===========
The input file with student information is read and each line in file is *yielded*.
The area/project to committee file is read and stored in form of list in aoe_comm_list.
The *some_name* function takes three parameters as input. *Alpha* for load balencing, *gamma*
for test performance and *pref_graph* which signifies how fast the importance of student
preferances decreases. Pref_graph is hard coded in algorithm in the form of an array. We can see from the array that the weight significantly decreases for the third and fourth preferance.
It is set like that because we want maximum students to get either their first or second
preferance. Depending on the parameters that we set the, function assigns appropriate weights
to all rows andcolumns which corresponds to students and committees respectively.
The average outdegree is calculated to identify the edges with more potential to do load
balencing and increase their weights accordingly so that the likelihood of them being in the
final assignment increases. The edges are then divided in two groups depending on the average
outdegree and appropriate weights are assigned to them to complete the matrix.
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2015 Rathijeet Bhave
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
#------------------------------------------------------------------------------------------
# Info about the module
__author__ = "Rathijeet Bhave, [email protected]"
__copyright__ = "(c) 2008 Rathijeet Bhave"
__license__ = "BSD-style license"
#------------------------------------------------------------------------------------------
import csv
import itertools
import sys
#class student:
#def __init__(self,s_no,name,marks,pref):
#self.marks=marks
#self.name=name
#self.pref=pref
#self.s_no = s_no
#
#def create_stud_database():
#stud_list = []
#with open('stud_and_marks.csv','r') as f:
#output = csv.reader(f,delimiter = ',')
#for i,row in enumerate(output):
#stud_list.append(student(i,row[0],row[1],row[2:]))
##for stud in stud_list:
##print stud.marks,stud.name,stud.pref,stud.s_no+1
#stud_list.sort(key = lambda student:student.marks)
def get_stud_row():
"""
Yield rows of student data one by one.
Each row contains the serial number of student along with his four preferances.
final_stud_file.csv : Comma seperated values with first column as serial number
and rest of the four columns containing the four preferances.
name of input file with student data.
:rtype: list
:return: one row of input file at a time.
"""
with open('final_stud_file.csv','r') as f:
output = csv.reader(f,delimiter = ',')
for row in output:
yield row
def get_aoe_comm_list():
"""
Returns a list of lists with area/project of intrest as first element
followed by all the committees that cover the specific area/project of intrest.
aoi_comm_new.csv : CSV file with first column as area/project of intrest and rest
of columns as the committees that cover that area of intrest.
:rtype: list of lists
:return: list of lists of all area/project of intrest along with their committees.
"""
aoe_comm_list = []
with open('aoi_comm_new.csv','r') as f:
output = csv.reader(f,delimiter = ' ')
for line in output:
aoe_comm_list.append(line)
return aoe_comm_list
#def some_name(alpha,max_count,gamma):
def some_name(alpha,max_count,gamma):
"""
Forms the matrix that acts like an input to munkres.py.
Returns a matrix in form of list of lists.
:Parameters:
alpha : Integer between 1-9
This value specifies how much importance is to be given to load balancing
and how much to be given to preferances of students.
Value of one means maximum importance to load balencing while
value of 9 means maximum importance to student preferances.
**WARNING**: Code does not work for values other than between one to nine.
:rtype: list of lists
:return: A list of lists as a matrix to be used as
input for weight maximisation algorithm.
"""
#test_graph = 200 - gamma*int(row[0])
alf = int(alpha)
gamma = int(gamma)
#print alf,max_count,gamma
#max_count = 4
no_of_committees = 20
no_of_students = 148
list_alpha,list_one_minus_alpha = segregate_edges()
aoe_comm_dict,avg_outdegree = find_avg_outdegree()
#print len(aoe_comm_dict)
#matrix = [[1]*max_count*len(aoe_comm_dict) for _ in range(max_count*len(aoe_comm_dict))]
matrix = [[1]*no_of_committees*max_count for _ in range(no_of_students)]
aoe_comm_list = get_aoe_comm_list()
#print aoe_comm_list
#pref_graph=[300,200,100,50]
pref_graph=[500,400,100,10]
for row in get_stud_row():
test_graph = 20 - gamma*int(row[0])/10
#test_graph = 1
for aoe in row[1:]:
index_in_comm_list = find_aoe_comm_index(aoe_comm_list,aoe)
#print index_in_comm_list
try:
for comm in aoe_comm_list[index_in_comm_list][1:]:
#print aoe,comm
#char = aoe
#comm = elem
for i in range(1,len(pref_graph)+1):
if row[i] == aoe :
if (row[0],aoe) in list_alpha:
weight = test_graph*pref_graph[i-1]*alf*(len(aoe_comm_list[index_in_comm_list])-1)
if matrix[int(row[0])-1][(int(comm)-1)*max_count] < weight :
for j in range(max_count):
matrix[int(row[0])-1][(int(comm)-1)*max_count + j] = weight
break
elif (row[0],aoe) in list_one_minus_alpha:
weight = test_graph*pref_graph[i-1]*(10-alf)*(len(aoe_comm_list[index_in_comm_list])-1)
if matrix[int(row[0])-1][(int(comm)-1)*max_count] < weight :
for j in range(max_count):
matrix[int(row[0])-1][(int(comm)-1)*max_count + j] = weight
break
except TypeError,e:
#print str(e)
pass
#for row in matrix:
#print row
return matrix
def find_avg_outdegree():
"""
Find the average outdegree of aoi_comm_new.csv file which indicates
which areas/projects of intrest have maximun potential for load balencing.
It is also used to form a dictionary of area/project of intrest as keys and
the number of committees covering that area/project as the corresponding values.
:rtype: Tuple
:return: A tuple with the dictionary as first element and
average outdegree as other element.
"""
out_degree = 0
total_committees = 0
aoe_comm_dict = {}
_sum = 0
with open('aoi_comm_new.csv','r') as f:
output = csv.reader(f,delimiter = ' ')
for row in output:
aoe_comm_dict[row[0]] = int(len(row[1:]))
for i in aoe_comm_dict.values():
_sum += i
avg_outdegree = _sum/len(aoe_comm_dict)
return (aoe_comm_dict,avg_outdegree)
#print aoe_comm_dict,avg_outdegree,len(aoe_comm_dict)
def segregate_edges():
"""
Finds which edges can be used to achieve more amount of load balencing. This is
used to assign more weights to those edges with more potential to do load balencing.
Segregation is based on outdegree of the area of intrest which
are given as student preferances.
:rtype: Tuple
:return: A tuple with all preferances of students divided in two lists.
These two lists form the tuple.
"""
aoe_comm_dict,avg_outdegree = find_avg_outdegree()
list_alpha = []
list_one_minus_alpha = []
with open('final_stud_file.csv','r') as f:
output = csv.reader(f,delimiter = ',')
for row in output:
for num in row[1:]:
try:
if aoe_comm_dict[num]>avg_outdegree:
list_one_minus_alpha.append((row[0],num))
else:
list_alpha.append((row[0],num))
except KeyError,e:
#print str(e)
pass
#pass
#print list_alpha
#print list_alpha
print '\n'
#print list_one_minus_alpha
return (list_alpha,list_one_minus_alpha)
#print list_one_minus_alpha
def find_aoe_comm_index(aoe_comm_list,elem):
"""
Used to find the index of a specific area/project of intrest in the list of lists
of area/project of intrest and committees file.
:rtype: Integer
:return: The index of area/project of intrest in given list of lists.
"""
for i,lst in enumerate(aoe_comm_list):
if elem == lst[0]:
return i
def make_matrix():
#some_name(alpha,max_count,gamma)
#find_avg_outdegree()
#segregate_edges()
create_stud_database()
if __name__ == '__main__':
#some_name(sys.argv[1],sys.argv[2],sys.argv[3])
make_matrix()
#get_stud_row()
#get_aoe_comm_list()
|
|
from unittest import mock
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo import search
from olympia.amo.tests import ESTestCaseWithAddons, TestCase
from olympia.tags.models import Tag
class TestESIndexing(ESTestCaseWithAddons):
# This needs to be in its own class for data isolation.
def test_indexed_count(self):
# Did all the right addons get indexed?
count = Addon.search().filter(type=1, is_disabled=False).count()
# Created in the setUpClass.
assert (
count
== 4
== (
Addon.objects.filter(
disabled_by_user=False, status__in=amo.VALID_ADDON_STATUSES
).count()
)
)
def test_get_es_not_mocked(self):
es = search.get_es()
assert not issubclass(es.__class__, mock.Mock)
class TestNoESIndexing(TestCase):
def test_no_es(self):
assert not getattr(self, 'es', False), 'TestCase should not have "es" attribute'
def test_not_indexed(self):
addon = Addon.objects.create(
type=amo.ADDON_EXTENSION, status=amo.STATUS_APPROVED
)
assert issubclass(
Addon.search().filter(id__in=addon.id).count().__class__, mock.Mock
)
def test_get_es_mocked(self):
es = search.get_es()
assert issubclass(es.__class__, mock.Mock)
class TestESWithoutMakingQueries(TestCase):
# These tests test methods that don't directly call ES, so they work using
# the faster TestCase class where ES is mocked.
def test_clone(self):
# Doing a filter creates a new ES object.
qs = Addon.search()
qs2 = qs.filter(type=1)
assert 'bool' not in qs._build_query()['query']
assert 'filter' in qs2._build_query()['query']['bool']
def test_filter(self):
qs = Addon.search().filter(type=1)
assert qs._build_query()['query']['bool']['filter'] == ([{'term': {'type': 1}}])
def test_in_filter(self):
qs = Addon.search().filter(type__in=[1, 2])
assert qs._build_query()['query']['bool']['filter'] == (
[{'terms': {'type': [1, 2]}}]
)
def test_and(self):
qs = Addon.search().filter(type=1, category__in=[1, 2])
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [{'term': {'type': 1}}, {'terms': {'category': [1, 2]}}]
assert len(filters) == 2
assert {'term': {'type': 1}} in filters
assert {'terms': {'category': [1, 2]}} in filters
def test_query(self):
qs = Addon.search().query(type=1)
assert qs._build_query()['query'] == ({'term': {'type': 1}})
def test_query_match(self):
qs = Addon.search().query(name__match='woo woo')
assert qs._build_query()['query'] == ({'match': {'name': 'woo woo'}})
def test_query_multiple_and_range(self):
qs = Addon.search().query(type=1, status__gte=1)
query = qs._build_query()['query']
# Query:
# {'bool': {'must': [{'term': {'type': 1}},
# {'range': {'status': {'gte': 1}}}, ]}}
assert list(query.keys()) == ['bool']
assert list(query['bool'].keys()) == ['must']
assert {'term': {'type': 1}} in query['bool']['must']
assert {'range': {'status': {'gte': 1}}} in query['bool']['must']
def test_query_fuzzy(self):
fuzz = {'boost': 2, 'value': 'woo'}
qs = Addon.search().query(type=1, status__fuzzy=fuzz)
query = qs._build_query()['query']
# Query:
# {'bool': {'must': [{'fuzzy': {'status': fuzz}},
# {'term': {'type': 1}}, ]}})
assert list(query.keys()) == ['bool']
assert list(query['bool'].keys()) == ['must']
assert {'term': {'type': 1}} in query['bool']['must']
assert {'fuzzy': {'status': fuzz}} in query['bool']['must']
def test_order_by_desc(self):
qs = Addon.search().order_by('-rating')
assert qs._build_query()['sort'] == [{'rating': 'desc'}]
def test_order_by_asc(self):
qs = Addon.search().order_by('rating')
assert qs._build_query()['sort'] == ['rating']
def test_order_by_multiple(self):
qs = Addon.search().order_by('-rating', 'id')
assert qs._build_query()['sort'] == [{'rating': 'desc'}, 'id']
def test_slice(self):
qs = Addon.search()[5:12]
assert qs._build_query()['from'] == 5
assert qs._build_query()['size'] == 7
def test_slice_stop(self):
qs = Addon.search()[:6]
assert qs._build_query()['size'] == 6
def test_slice_stop_zero(self):
qs = Addon.search()[:0]
assert qs._build_query()['size'] == 0
def test_gte(self):
qs = Addon.search().filter(type__in=[1, 2], status__gte=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'terms': {'type': [1, 2]}},
# {'range': {'status': {'gte': 4}}},
# ]
assert len(filters)
assert {'terms': {'type': [1, 2]}} in filters
assert {'range': {'status': {'gte': 4}}} in filters
def test_lte(self):
qs = Addon.search().filter(type__in=[1, 2], status__lte=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'terms': {'type': [1, 2]}},
# {'range': {'status': {'lte': 4}}},
# ]
assert len(filters) == 2
assert {'terms': {'type': [1, 2]}} in filters
assert {'range': {'status': {'lte': 4}}} in filters
def test_gt(self):
qs = Addon.search().filter(type__in=[1, 2], status__gt=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'terms': {'type': [1, 2]}},
# {'range': {'status': {'gt': 4}}},
# ]
assert len(filters) == 2
assert {'terms': {'type': [1, 2]}} in filters
assert {'range': {'status': {'gt': 4}}} in filters
def test_lt(self):
qs = Addon.search().filter(type__in=[1, 2], status__lt=4)
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [
# {'range': {'status': {'lt': 4}}},
# {'terms': {'type': [1, 2]}},
# ]
assert len(filters)
assert {'range': {'status': {'lt': 4}}} in filters
assert {'terms': {'type': [1, 2]}} in filters
def test_lt2(self):
qs = Addon.search().filter(status__lt=4)
assert qs._build_query()['query']['bool']['filter'] == (
[{'range': {'status': {'lt': 4}}}]
)
def test_range(self):
qs = Addon.search().filter(date__range=('a', 'b'))
assert qs._build_query()['query']['bool']['filter'] == (
[{'range': {'date': {'gte': 'a', 'lte': 'b'}}}]
)
def test_prefix(self):
qs = Addon.search().query(name__startswith='woo')
assert qs._build_query()['query'] == ({'prefix': {'name': 'woo'}})
def test_values(self):
qs = Addon.search().values('name')
assert qs._build_query()['_source'] == ['id', 'name']
def test_values_dict(self):
qs = Addon.search().values_dict('name')
assert qs._build_query()['_source'] == ['id', 'name']
def test_empty_values_dict(self):
qs = Addon.search().values_dict()
assert qs._build_query()['_source'] == ['id']
def test_extra_values(self):
qs = Addon.search().extra(values=['name'])
assert qs._build_query()['_source'] == ['id', 'name']
qs = Addon.search().values('status').extra(values=['name'])
assert qs._build_query()['_source'] == ['id', 'status', 'name']
def test_extra_values_dict(self):
qs = Addon.search().extra(values_dict=['name'])
assert qs._build_query()['_source'] == ['id', 'name']
qs = Addon.search().values_dict('status').extra(values_dict=['name'])
assert qs._build_query()['_source'] == ['id', 'status', 'name']
def test_extra_order_by(self):
qs = Addon.search().extra(order_by=['-rating'])
assert qs._build_query()['sort'] == [{'rating': 'desc'}]
qs = Addon.search().order_by('-id').extra(order_by=['-rating'])
assert qs._build_query()['sort'] == [{'id': 'desc'}, {'rating': 'desc'}]
def test_extra_query(self):
qs = Addon.search().extra(query={'type': 1})
assert qs._build_query()['query'] == ({'term': {'type': 1}})
qs = Addon.search().filter(status=1).extra(query={'type': 1})
filtered = qs._build_query()['query']['bool']
assert filtered['must'] == ([{'term': {'type': 1}}])
assert filtered['filter'] == [{'term': {'status': 1}}]
def test_extra_filter(self):
qs = Addon.search().extra(filter={'category__in': [1, 2]})
assert qs._build_query()['query']['bool']['filter'] == (
[{'terms': {'category': [1, 2]}}]
)
qs = Addon.search().filter(type=1).extra(filter={'category__in': [1, 2]})
filters = qs._build_query()['query']['bool']['filter']
# Filters:
# [{'term': {'type': 1}}, {'terms': {'category': [1, 2]}}]
assert len(filters) == 2
assert {'term': {'type': 1}} in filters
assert {'terms': {'category': [1, 2]}} in filters
def test_source(self):
qs = Addon.search().source('versions')
assert qs._build_query()['_source'] == ['id', 'versions']
class TestES(ESTestCaseWithAddons):
def test_getitem(self):
addons = list(Addon.search())
assert addons[0] == Addon.search()[0]
def test_iter(self):
qs = Addon.search().filter(type=1, is_disabled=False)
assert len(qs) == len(list(qs))
def test_count(self):
assert Addon.search().count() == 6
def test_count_uses_cached_results(self):
qs = Addon.search()
qs._results_cache = mock.Mock()
qs._results_cache.count = mock.sentinel.count
assert qs.count() == mock.sentinel.count
def test_len(self):
qs = Addon.search()
qs._results_cache = [1]
assert len(qs) == 1
def test_values_result(self):
addons = [{'id': a.id, 'slug': a.slug} for a in self._addons]
qs = Addon.search().values_dict('slug').order_by('id')
assert list(qs) == addons
def test_values_dict_result(self):
addons = [{'id': a.id, 'slug': a.slug} for a in self._addons]
qs = Addon.search().values_dict('slug').order_by('id')
assert list(qs) == list(addons)
def test_empty_values_dict_result(self):
qs = Addon.search().values_dict()
assert list(qs[0].keys()) == ['id']
def test_object_result(self):
qs = Addon.search().filter(id=self._addons[0].id)[:1]
assert self._addons[:1] == list(qs)
def test_object_result_slice(self):
addon = self._addons[0]
qs = Addon.search().filter(id=addon.id)
assert addon == qs[0]
def test_extra_bad_key(self):
with self.assertRaises(AssertionError):
Addon.search().extra(x=1)
def test_aggregations(self):
Tag.objects.get_or_create(tag_text='sky')[0].add_tag(self._addons[0])
Tag.objects.get_or_create(tag_text='sky')[0].add_tag(self._addons[1])
Tag.objects.get_or_create(tag_text='sky')[0].add_tag(self._addons[2])
Tag.objects.get_or_create(tag_text='earth')[0].add_tag(self._addons[0])
Tag.objects.get_or_create(tag_text='earth')[0].add_tag(self._addons[1])
Tag.objects.get_or_create(tag_text='ocean')[0].add_tag(self._addons[0])
self.reindex(Addon)
qs = Addon.search().aggregate(tags={'terms': {'field': 'tags'}})
results = list(qs)
assert len(results) == 6
assert qs.aggregations == {
'tags': [
{'doc_count': 3, 'key': 'sky'},
{'doc_count': 2, 'key': 'earth'},
{'doc_count': 1, 'key': 'ocean'},
]
}
|
|
"""
The :mod:`compat` module provides support for backwards compatibility with older versions of django/python.
"""
import django
# cStringIO only if it's available, otherwise StringIO
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# parse_qs from 'urlparse' module unless python 2.5, in which case from 'cgi'
try:
# python >= 2.6
from urlparse import parse_qs
except ImportError:
# python < 2.6
from cgi import parse_qs
# django.test.client.RequestFactory (Required for Django < 1.3)
try:
from django.test.client import RequestFactory
except ImportError:
from django.test import Client
from django.core.handlers.wsgi import WSGIRequest
# From: http://djangosnippets.org/snippets/963/
# Lovely stuff
class RequestFactory(Client):
"""
Class that lets you create mock :obj:`Request` objects for use in testing.
Usage::
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
This class re-uses the :class:`django.test.client.Client` interface. Of which
you can find the docs here__.
__ http://www.djangoproject.com/documentation/testing/#the-test-client
Once you have a `request` object you can pass it to any :func:`view` function,
just as if that :func:`view` had been hooked up using a URLconf.
"""
def request(self, **request):
"""
Similar to parent class, but returns the :obj:`request` object as soon as it
has created it.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
# django.views.generic.View (Django >= 1.3)
try:
from django.views.generic import View
if not hasattr(View, 'head'):
# First implementation of Django class-based views did not include head method
# in base View class - https://code.djangoproject.com/ticket/15668
class ViewPlusHead(View):
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
View = ViewPlusHead
except ImportError:
from django import http
from django.utils.functional import update_wrapper
# from django.utils.log import getLogger
# from django.utils.decorators import classonlymethod
# logger = getLogger('django.request') - We'll just drop support for logger if running Django <= 1.2
# Might be nice to fix this up sometime to allow djangorestframework.compat.View to match 1.3's View more closely
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in kwargs.iteritems():
setattr(self, key, value)
# @classonlymethod - We'll just us classmethod instead if running Django <= 1.2
@classmethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(u"You tried to pass in the %s method name as a "
u"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError(u"%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
self.request = request
self.args = args
self.kwargs = kwargs
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
allowed_methods = [m for m in self.http_method_names if hasattr(self, m)]
#logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path),
# extra={
# 'status_code': 405,
# 'request': self.request
# }
#)
return http.HttpResponseNotAllowed(allowed_methods)
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
# PUT, DELETE do not require CSRF until 1.4. They should. Make it better.
if django.VERSION >= (1, 4):
from django.middleware.csrf import CsrfViewMiddleware
else:
import hashlib
import re
import random
import logging
import urlparse
from django.conf import settings
from django.core.urlresolvers import get_callable
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('django.request')
if not logger.handlers:
logger.addHandler(NullHandler())
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return hashlib.md5("%s%s" % (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RC2616 needs protection.
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return self._accept(request)
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker,(man-in-the-middle, MITM) sends a
# POST form which targets https://example.com/detonate-bomb/ and
# submits it via javascript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that is no problem for a MITM and the session independent
# nonce we are using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s' % (REASON_NO_REFERER, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s' % (reason, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
logger.warning('Forbidden (%s): %s' % (REASON_NO_CSRF_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
# check non-cookie token for match
request_csrf_token = ""
if request.method == "POST":
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
logger.warning('Forbidden (%s): %s' % (REASON_BAD_TOKEN, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
# Markdown is optional
try:
import markdown
class CustomSetextHeaderProcessor(markdown.blockprocessors.BlockProcessor):
"""
Class for markdown < 2.1
Override `markdown`'s :class:`SetextHeaderProcessor`, so that ==== headers are <h2> and ---- heade
We use <h1> for the resource name.
"""
import re
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 2
else:
level = 3
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False,
if markdown.version_info < (2, 1):
output_format = markdown.DEFAULT_OUTPUT_FORMAT
md = markdown.Markdown(extensions=markdown.load_extensions(extensions),
safe_mode=safe_mode,
output_format=output_format)
md.parser.blockprocessors['setextheader'] = CustomSetextHeaderProcessor(md.parser)
else:
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# Yaml is optional
try:
import yaml
except ImportError:
yaml = None
import unittest
try:
import unittest.skip
except ImportError: # python < 2.7
from unittest import TestCase
import functools
def skip(reason):
# Pasted from py27/lib/unittest/case.py
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
pass
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
unittest.skip = skip
|
|
#!/usr/bin/python
# Usage: Train_Evaluate_7MLP-100PCA.py <label> <target-variable> <data-sources> <train-set-sessions> <test-set-sessions>
# NB: Normally, use quotes around each parameter, as they are strings with commas and stuff
# The script assumes that the dataset is in the same dir as the script, and writes its output to a file <label>.RData
# The output contains the trained model (model), args = commandArgs(trailingOnly=TRUE)
import sys
import os
import numpy
import pandas
import json
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.layers import LSTM, TimeDistributed, Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score, f1_score, cohen_kappa_score
from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn import decomposition
print 'Argument List:', str(sys.argv)
if(len(sys.argv)!=6):
sys.exit("Wrong number of arguments. Usage:\nTrain_Evaluate_LSTM-50PCA.py <label> <target-variable> <data-sources> <train-set-sessions> <test-set-sessions>")
label = sys.argv[1]
target = sys.argv[2] # either 'Activity' or 'Social'
datasourcestring = sys.argv[3] # Comma separated combinations of all,et,acc,aud,vid
trainstring = sys.argv[4]
teststring = sys.argv[5]
# # For tests
# label='LSTM-100PCA-DeepDrop_et_GM_LOSO_Activity_1'
# target='Activity'
# datasourcestring='et'
# trainstring='case1-day1-session2-teacher1,case1-day1-session3-teacher1,case1-day1-session4-teacher1,case2-day1-session1-teacher2,case2-day1-session2-teacher2,case2-day2-session1-teacher2,case2-day2-session2-teacher2,case2-day3-session1-teacher2,case2-day3-session2-teacher2,case2-day4-session1-teacher2,case2-day4-session2-teacher2'
# teststring='case1-day1-session1-teacher1'
# We parse the data sources to take into account, and sessions for the train and test sets
features = range(0,2) # These are only the session and timestamp
sources = datasourcestring.split(",")
for source in sources:
if(source=='all'):
features.extend(range(2,7557))
break
elif(source=='et'):
features.extend(range(2,12))
elif(source=='acc'):
features.extend(range(12,152))
elif(source=='aud'):
features.extend(range(152,6557))
elif(source=='vid'):
features.extend(range(6557,7557))
else:
sys.exit("Wrong data sources. Possible values: all,et,acc,aud,vid")
features.extend(range(7557,7559)) # Add activity and Social
print("Selected features: "+str(len(features)))
sessiontrain = trainstring.split(",") # Gives an array of the sessions to train in
sessiontest = teststring.split(",") # Gives an array of the sessions to train in
if(len(sessiontrain)==0 | len(sessiontest)==0):
sys.exit("Wrong train/test sessions specification. Should be a comma-separated string with the sessions identificators")
path = os.path.dirname(os.path.realpath(sys.argv[0]))
# READING AND PREPARING THE DATA
processeddatadir = path
#processeddatadir = "../src/models" # TODO: Change this for the actual script
datafile = os.path.join(processeddatadir,'completeDataset.csv')
gzdatafile = os.path.join(processeddatadir,'completeDataset.csv.gz')
fulldata = pandas.DataFrame()
if(os.path.isfile(datafile)):
fulldata = pandas.read_csv(datafile, sep=',', quotechar='"')
elif(os.path.isfile(gzdatafile)):
fulldata = pandas.read_csv(gzdatafile, compression='gzip', sep=',', quotechar='"')
else:
sys.exit("Data not available in the script's folder")
# Drop the useless first column
fulldata.drop(fulldata.columns[[0]],axis=1,inplace=True)
def cleanAct(value):
if pandas.isnull(value):
return 'Other'
elif value=='OFF' or value=='TDT' or value=='TEC':
return 'Other'
else:
return value
def cleanSoc(value):
if pandas.isnull(value):
return 'Other'
else:
return value
# We only look for predicting 4 states of activity and 3 of social, the rest (incl.NA) we bunch in 'Other' (so in the end it is a 5- and 4-class classification problem)
fulldata['Activity'] = fulldata['Activity.win'].map(cleanAct)
fulldata['Social'] = fulldata['Social.win'].map(cleanSoc)
# Drop the useless first column
fulldata.drop(fulldata.columns[[2,3,4]],axis=1,inplace=True)
print(fulldata.columns.values[0:5],"...",fulldata.columns.values[-5:])
#fulldata.head(3)
# Now the column indices match what is expected in the arguments parsed above
# * [,0]: ''session id''
# * [,1]: ''timestamp'' within the session (in ms)
# * [,2:12]: ''eyetracking'' features (mean/sd pupil diameter, nr. of long fixations, avg. saccade speed, fixation duration, fixation dispersion, saccade duration, saccade amplitude, saccade length, saccade velocity)
# * [,12:152]: ''accelerometer'' features, including X, Y, Z (mean, sd, max, min, median, and 30 FFT coefficients of each of them) and jerk (mean, sd, max, min, median, and 30 FFT coefficients of each of it)
# * [,152:6557]: ''audio'' features extracted from an audio snippet of the 10s window, using openSMILE. Includes features about whether there is someone speaking (153:163), emotion recognition models (164:184), and brute-force audio spectrum features and characteristics used in various audio recognition challenges/tasks (185:6557)
# * [,6557:7557]: ''video'' features extracted from an image taken in the middle of the window (the 1000 values of the last layer when passing the immage through a VGG pre-trained model)
# * [,7557:7559]: ''Activity,Social'' labels we want to predict
# SELECTING THE DATASET FEATURES (DATA SOURCES BEING TRIED)
data = fulldata.ix[:,features]
# We drop the non-needed target variable
if target == 'Activity':
data.drop('Social',axis=1,inplace=True)
elif target == 'Social':
data.drop('Activity',axis=1,inplace=True)
print(data.shape)
#data.head(3)
# SPLITTING THE DATA
test = data.loc[data['session'].isin(sessiontest)]
train = data.loc[data['session'].isin(sessiontrain)]
print(test.shape)
print(train.shape)
# Removing null values
test = test[test.notnull().all(axis=1)]
train = train[train.notnull().all(axis=1)]
print("Removing null and NAs...")
print(test.shape)
print(train.shape)
X_train = train.values[:,range(2,train.shape[1]-1)].astype(float)
Y_train = train.values[:,(train.shape[1]-1)]
X_test = test.values[:,range(2,test.shape[1]-1)].astype(float)
Y_test = test.values[:,(test.shape[1]-1)]
Y_total = data.values[:,(data.shape[1]-1)]
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape)
#######################################################
# DO OTHER DATA TRANSFORMATIONS NEEDED, e.g. PCA, SELECT K-BEST FEATURES, etc (NORMALLY, ON THE TRAIN SET ONLY, TO BE APPLIED LATER TO THE TEST SET)
print("Transforming data... scaling only")
outs = len(data[target].unique())
# We standardize on the basis of the training data
scaler = StandardScaler().fit(X_train)
X_train_st = scaler.transform(X_train)
X_test_st = scaler.transform(X_test)
# # Removing zero variance features
# selector = VarianceThreshold()
# selector.fit(X_train_st)
# X_train_nz = selector.transform(X_train_st)
# X_test_nz = selector.transform(X_test_st)
# idx = numpy.where(selector.variances_ > threshold)[0] # to get the indices
# TODO: Remove highly correlated ones (according to Cohen's d?)
## From http://lucystatistics.blogspot.com.ee/2016/03/dimension-reduction.html
# c = df.corr().abs()
# so1=argsort(np.array(c))
# s = c.unstack()
# so2 = s.order(kind="quicksort")
# if X_train.shape[1]>k:
# # Apply 100-component pca
# print("PCA with "+str(k)+" components")
# pca = decomposition.PCA(n_components=k)
# X_train_pca = pca.fit_transform(X_train_st)
# X_test_pca = pca.transform(X_test_st)
# print 'Variance explained:'
# print pca.explained_variance_ratio_
# print 'Total variance explained by '+str(k)+' components:'
# print sum(pca.explained_variance_ratio_)
# else:
# k=X_train.shape[1]
# pca = decomposition.PCA(n_components=k)
# X_train_pca = pca.fit_transform(X_train_st)
# X_test_pca = pca.transform(X_test_st)
#######################################################
# PREPARING THE DATA FOR KERAS TRAINING
# One hot encoding of the response variable (using dummy variables)
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y_total)
encoded_Y_train = encoder.transform(Y_train)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y_train = to_categorical(encoded_Y_train)
#encoder.fit(Y_test)
encoded_Y_test = encoder.transform(Y_test)
# convert integers to dummy variables (i.e. one hot encoded)
dummy_y_test = to_categorical(encoded_Y_test)
##################### MODEL BUILDING --- CUSTOMIZE THIS ################
# Apply dropout regularization, it is overfitting!
def create_deeper_dropout_decay_PCA(k, outs):
# create model
model = Sequential()
model.add(Dropout(0.2, input_shape=(k,)))
model.add(Dense(300, init='uniform', activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(300, init='uniform', activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(80, init='uniform', activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(80, init='uniform', activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(20, init='uniform', activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(20, init='uniform', activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(outs, init='uniform', activation='sigmoid'))
# Compile model, with larger learning rate and momentum, as recommended by the original paper
sgd = SGD(lr=0.1, momentum=0.8, decay=0.0001, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
# LUIS
def create_LSTM_PCA(batch_size = 1, trainShape1=100, outs=5):
# create and fit the LSTM network
model = Sequential()
# stateful LSTM!
model.add(LSTM(200, batch_input_shape=(batch_size, 1, trainShape1),
return_sequences=True, stateful=True))
model.add(Dropout(0.2))
model.add(LSTM(100,
return_sequences=True, stateful=True))
model.add(Dropout(0.2))
model.add(LSTM(50,
return_sequences=False, stateful=True))
model.add(Dropout(0.2))
model.add(Dense(50, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(20, activation='tanh'))
model.add(Dropout(0.2))
model.add(Dense(outs, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# LUKASZ
def create_LSTM(hidden = 50, lstm_layers = 1, input_dim = 100, output_dim = 5):
model = Sequential()
model.add(LSTM(input_shape = (input_dim,),input_dim=input_dim, output_dim=hidden, return_sequences=True))
for i in range(lstm_layers-1):
model.add(LSTM(output_dim = hidden, return_sequences=True))
model.add(TimeDistributed(Dense(output_dim, activation='sigmoid')))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def create_LSTM_DeepDrop(hidden = 50, lstm_layers = 1, input_dim = 100, output_dim = 5, mlp_layers = 1, dropout=None):
model = Sequential()
model.add(LSTM(input_shape = (input_dim,),input_dim=input_dim, output_dim=hidden, return_sequences=True))
for i in range(lstm_layers-1):
model.add(LSTM(output_dim = hidden, return_sequences=True))
if(dropout is not None):
model.add(Dropout(dropout))
if(mlp_layers>1):
for i in range(mlp_layers-1):
model.add(TimeDistributed(Dense(hidden, activation='tanh')))
if(dropout is not None):
model.add(Dropout(dropout))
model.add(TimeDistributed(Dense(output_dim, activation='softmax')))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# evaluate baseline model with standardized dataset
# fix random seed for reproducibility
seed = 66
numpy.random.seed(seed)
#estimators = []
#estimators.append(('standardize', StandardScaler()))
#estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, nb_epoch=10, batch_size=10, verbose=1)))
# We define a pipeline of estimators, in which first the scaler is fitted to the data, then the MLP is applied
#pipeline = Pipeline(estimators)
#kfold = StratifiedKFold(y=Y_train, n_folds=3, shuffle=True, random_state=seed)
#model = create_baseline()
#model = create_deeper_dropout_decay_PCA(k, outs)
model = create_LSTM_DeepDrop(hidden=200, lstm_layers=3, input_dim = X_train.shape[1], output_dim = outs, mlp_layers=2, dropout=0.3)
#print model.summary()
#############################################################################
# To save the best model
# serialize model to JSON
model_json = model.to_json()
modelfile = os.path.join(path,label+".model.json")
with open(modelfile, "w") as json_file:
json_file.write(model_json)
filepath=os.path.join(path,label+".weights.hdf5")
# Define that the accuracy in cv is monitored, and that weights are stored in a file when max accuracy is achieved
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
def slice_timeseries(X,Y,length=32):
# TODO: consider more randomized slicing
n = int(math.floor(X.shape[0] / length))
maxt = n * length
X = X[0:maxt,:].reshape((n, length, X.shape[1]))
Y = Y[0:maxt,:].reshape((n, length, Y.shape[1]))
return X,Y
X_train_st_reshaped, dummy_y_train_reshaped = slice_timeseries(X_train_st, dummy_y_train, length=16)
X_test_st_reshaped, dummy_y_test_reshaped = slice_timeseries(X_test_st, dummy_y_test, length=32)
# Fit the model
history = model.fit(X_train_st_reshaped, dummy_y_train_reshaped, validation_data=(X_test_st_reshaped,dummy_y_test_reshaped),
nb_epoch=2000, batch_size=16, verbose=0, callbacks=callbacks_list)
#results = cross_val_score(pipeline, X_train, dummy_y_train, cv=kfold)
#print("Standardized data Acc (in CV training data): %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# evaluate the model
#scores = pipeline.evaluate(X_test, dummy_y_test)
#print pipeline.metrics_names[1]
#print scores[1]*100
# For other metrics, see http://machinelearningmastery.com/metrics-evaluate-machine-learning-algorithms-python/
# Other performance/accuracy metrics
Y_pred = model.predict(X_test_st.reshape((1,) + X_test_st.shape ))[0,:,:]
# Accuracy
print('Accuracy:')
acc = accuracy_score(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1))
print(acc)
# Confusion matrix
cm = confusion_matrix(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1))
numpy.set_printoptions(precision=2)
print('Confusion matrix:')
print(cm)
# AUC
roc = None
try:
roc = roc_auc_score(dummy_y_test, Y_pred, average='macro')
except:
pass
print('AUC score:')
print(roc)
# F1
f1= f1_score(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1), average='macro')
print('F1 score:')
print(f1)
# KAppa?
kappa = cohen_kappa_score(numpy.argmax(dummy_y_test, axis=1), numpy.argmax(Y_pred, axis=1))
print('Kappa score:')
print(kappa)
#import matplotlib
#matplotlib.use('Agg')
#import matplotlib.pyplot as plt
# summarize history for accuracy
#plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
#plt.title('model accuracy')
#plt.ylabel('accuracy')
#plt.xlabel('epoch')
#plt.legend(['train','test'], loc='upper left')
#plt.show()
# summarize history for loss
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
#plt.title('model loss')
#plt.ylabel('loss')
#plt.xlabel('epoch')
#plt.legend(['train','test'], loc='upper left')
#plt.show()
# The models are stored already... what about the performance metrics?
# Store them in a csv
perfdata = [
{'label': label, 'cm': json.dumps(cm.tolist()), 'acc': acc, 'auc': roc, 'f1': f1, 'kappa': kappa}
]
# Later, to recover cm:
# cm = np.array(json.loads(text))
df = pandas.DataFrame(perfdata)
filename = os.path.join(path,label+".perf.csv")
df.to_csv(filename, index=False, encoding='utf-8')
|
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Spanish National Research Council.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import abc
import argparse
import os
import six
from stevedore import extension
from heatclient.openstack.common.apiclient import exceptions
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
global _discovered_plugins
_discovered_plugins = {}
def add_plugin(ext):
_discovered_plugins[ext.name] = ext.plugin
ep_namespace = "heatclient.openstack.common.apiclient.auth"
mgr = extension.ExtensionManager(ep_namespace)
mgr.map(add_plugin)
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
group = parser.add_argument_group("Common auth options")
BaseAuthPlugin.add_common_opts(group)
for name, auth_plugin in six.iteritems(_discovered_plugins):
group = parser.add_argument_group(
"Auth-system '%s' options" % name,
conflict_handler="resolve")
auth_plugin.add_opts(group)
def load_plugin(auth_system):
try:
plugin_class = _discovered_plugins[auth_system]
except KeyError:
raise exceptions.AuthSystemNotFound(auth_system)
return plugin_class(auth_system=auth_system)
def load_plugin_from_args(args):
"""Load required plugin and populate it with options.
Try to guess auth system if it is not specified. Systems are tried in
alphabetical order.
:type args: argparse.Namespace
:raises: AuthPluginOptionsMissing
"""
auth_system = args.os_auth_system
if auth_system:
plugin = load_plugin(auth_system)
plugin.parse_opts(args)
plugin.sufficient_options()
return plugin
for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)):
plugin_class = _discovered_plugins[plugin_auth_system]
plugin = plugin_class()
plugin.parse_opts(args)
try:
plugin.sufficient_options()
except exceptions.AuthPluginOptionsMissing:
continue
return plugin
raise exceptions.AuthPluginOptionsMissing(["auth_system"])
@six.add_metaclass(abc.ABCMeta)
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
auth_system = None
opt_names = []
common_opt_names = [
"auth_system",
"username",
"password",
"tenant_name",
"token",
"auth_url",
]
def __init__(self, auth_system=None, **kwargs):
self.auth_system = auth_system or self.auth_system
self.opts = dict((name, kwargs.get(name))
for name in self.opt_names)
@staticmethod
def _parser_add_opt(parser, opt):
"""Add an option to parser in two variants.
:param opt: option name (with underscores)
"""
dashed_opt = opt.replace("_", "-")
env_var = "OS_%s" % opt.upper()
arg_default = os.environ.get(env_var, "")
arg_help = "Defaults to env[%s]." % env_var
parser.add_argument(
"--os-%s" % dashed_opt,
metavar="<%s>" % dashed_opt,
default=arg_default,
help=arg_help)
parser.add_argument(
"--os_%s" % opt,
metavar="<%s>" % dashed_opt,
help=argparse.SUPPRESS)
@classmethod
def add_opts(cls, parser):
"""Populate the parser with the options for this plugin.
"""
for opt in cls.opt_names:
# use `BaseAuthPlugin.common_opt_names` since it is never
# changed in child classes
if opt not in BaseAuthPlugin.common_opt_names:
cls._parser_add_opt(parser, opt)
@classmethod
def add_common_opts(cls, parser):
"""Add options that are common for several plugins.
"""
for opt in cls.common_opt_names:
cls._parser_add_opt(parser, opt)
@staticmethod
def get_opt(opt_name, args):
"""Return option name and value.
:param opt_name: name of the option, e.g., "username"
:param args: parsed arguments
"""
return (opt_name, getattr(args, "os_%s" % opt_name, None))
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute `self.opts` with a
dict containing the options and values needed to make authentication.
"""
self.opts.update(dict(self.get_opt(opt_name, args)
for opt_name in self.opt_names))
def authenticate(self, http_client):
"""Authenticate using plugin defined method.
The method usually analyses `self.opts` and performs
a request to authentication server.
:param http_client: client object that needs authentication
:type http_client: HTTPClient
:raises: AuthorizationFailure
"""
self.sufficient_options()
self._do_authenticate(http_client)
@abc.abstractmethod
def _do_authenticate(self, http_client):
"""Protected method for authentication.
"""
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
missing = [opt
for opt in self.opt_names
if not self.opts.get(opt)]
if missing:
raise exceptions.AuthPluginOptionsMissing(missing)
@abc.abstractmethod
def token_and_endpoint(self, endpoint_type, service_type):
"""Return token and endpoint.
:param service_type: Service type of the endpoint
:type service_type: string
:param endpoint_type: Type of endpoint.
Possible values: public or publicURL,
internal or internalURL,
admin or adminURL
:type endpoint_type: string
:returns: tuple of token and endpoint strings
:raises: EndpointException
"""
|
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import inspect
from docutils.parsers import rst
from rally.cli import cliutils
from rally.cli import main
import utils
class Parser(object):
"""A simplified interface of argparse.ArgumentParser"""
def __init__(self):
self.parsers = {}
self.subparser = None
self.defaults = {}
self.arguments = []
def add_parser(self, name, help=None, description=None,
formatter_class=None):
parser = Parser()
self.parsers[name] = {"description": description,
"help": help,
"fclass": formatter_class,
"parser": parser}
return parser
def set_defaults(self, command_object=None, action_fn=None,
action_kwargs=None):
if command_object:
self.defaults["command_object"] = command_object
if action_fn:
self.defaults["action_fn"] = action_fn
if action_kwargs:
self.defaults["action_kwargs"] = action_kwargs
def add_subparsers(self, dest):
# NOTE(andreykurilin): there is only one expected call
if self.subparser:
raise ValueError("Can't add one more subparser.")
self.subparser = Parser()
return self.subparser
def add_argument(self, *args, **kwargs):
if "action_args" in args:
return
self.arguments.append((args, kwargs))
DEFAULT_UUIDS_CMD = {
"deployment": ["rally deployment create"],
"task": ["rally task start"],
"verification": ["rally verify start", "rally verify import_results"]
}
def compose_note_about_default_uuids(argument, dest):
# TODO(andreykurilin): add references to commands
return utils.note(
"The default value for the ``%(arg)s`` argument is taken from "
"the Rally environment. Usually, the default value is equal to"
" the UUID of the last successful run of ``%(cmd)s``, if the "
"``--no-use`` argument was not used." % {
"arg": argument,
"cmd": "``, ``".join(DEFAULT_UUIDS_CMD[dest])})
def compose_use_cmd_hint_msg(cmd):
return utils.hint(
"You can set the default value by executing ``%(cmd)s <uuid>``"
" (ref__).\n\n __ #%(ref)s" % {"cmd": cmd,
"ref": cmd.replace(" ", "-")})
def make_arguments_section(category_name, cmd_name, arguments, defaults):
elements = [utils.paragraph("**Command arguments**:")]
for args, kwargs in arguments:
# for future changes...
# :param args: a single command argument which can represented by
# several names(for example, --uuid and --task-id) in cli.
# :type args: tuple
# :param kwargs: description of argument. Have next format:
# {"dest": "action_kwarg_<name of keyword argument in code>",
# "help": "just a description of argument"
# "metavar": "[optional] metavar of argument. Example:"
# "Example: argument '--file'; metavar 'path' ",
# "type": "[optional] class object of argument's type",
# "required": "[optional] boolean value"}
# :type kwargs: dict
dest = kwargs.get("dest").replace("action_kwarg_", "")
description = []
if cmd_name != "use":
# lets add notes about specific default values and hint about
# "use" command with reference
if dest in ("deployment", "task"):
description.append(compose_note_about_default_uuids(
args[0], dest))
description.append(
compose_use_cmd_hint_msg("rally %s use" % dest))
elif dest == "verification":
description.append(compose_note_about_default_uuids(
args[0], dest))
description.append(
compose_use_cmd_hint_msg("rally verify use"))
description.append(kwargs.get("help"))
action = kwargs.get("action")
if not action:
arg_type = kwargs.get("type")
if arg_type:
description.append("**Type**: %s" % arg_type.__name__)
skip_default = dest in ("deployment",
"task_id",
"verification")
if not skip_default and dest in defaults:
description.append("**Default**: %s" % defaults[dest])
metavar = kwargs.get("metavar")
ref = "%s_%s_%s" % (category_name, cmd_name, args[0].replace("-", ""))
if metavar:
args = ["%s %s" % (arg, metavar) for arg in args]
elements.extend(utils.make_definition(", ".join(args),
ref, description))
return elements
def get_defaults(func):
"""Return a map of argument:default_value for specified function."""
spec = inspect.getargspec(func)
if spec.defaults:
return dict(zip(spec.args[-len(spec.defaults):], spec.defaults))
return {}
def make_command_section(category_name, name, parser):
section = utils.subcategory("rally %s %s" % (category_name, name))
section.extend(utils.parse_text(parser["description"]))
if parser["parser"].arguments:
defaults = get_defaults(parser["parser"].defaults["action_fn"])
section.extend(make_arguments_section(
category_name, name, parser["parser"].arguments, defaults))
return section
def make_category_section(name, parser):
category_obj = utils.category("Category: %s" % name)
# NOTE(andreykurilin): we are re-using `_add_command_parsers` method from
# `rally.cli.cliutils`, but, since it was designed to print help message,
# generated description for categories contains specification for all
# sub-commands. We don't need information about sub-commands at this point,
# so let's skip "generated description" and take it directly from category
# class.
description = parser.defaults["command_object"].__doc__
# TODO(andreykurilin): write a decorator which will mark cli-class as
# deprecated without changing its docstring.
if description.startswith("[Deprecated"):
i = description.find("]")
msg = description[1:i]
description = description[i + 1:].strip()
category_obj.append(utils.warning(msg))
category_obj.extend(utils.parse_text(description))
for command in sorted(parser.subparser.parsers.keys()):
subparser = parser.subparser.parsers[command]
category_obj.append(make_command_section(name, command, subparser))
return category_obj
class CLIReferenceDirective(rst.Directive):
optional_arguments = 1
option_spec = {"group": str}
def run(self):
parser = Parser()
categories = copy.copy(main.categories)
if "group" in self.options:
categories = {k: v for k, v in categories.items()
if k == self.options["group"]}
cliutils._add_command_parsers(categories, parser)
content = []
for cg in sorted(categories.keys()):
content.append(make_category_section(
cg, parser.parsers[cg]["parser"]))
return content
def setup(app):
app.add_directive("make_cli_reference", CLIReferenceDirective)
|
|
from collections import defaultdict
from django.conf import settings
from django.template.defaultfilters import linebreaksbr
from django.urls import reverse
from django.utils.translation import ugettext as _
from celery import chord
from celery.task import task
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import get_url_base
from corehq import toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.dbaccessors import get_apps_in_domain
from corehq.apps.app_manager.util import is_linked_app
from corehq.apps.app_manager.views.utils import update_linked_app
from corehq.apps.hqwebapp.tasks import send_html_email_async
from corehq.apps.linked_domain.const import (
FEATURE_FLAG_DATA_MODEL_TOGGLES,
MODEL_APP,
MODEL_KEYWORD,
MODEL_REPORT,
)
from corehq.apps.linked_domain.dbaccessors import get_upstream_domain_link
from corehq.apps.linked_domain.exceptions import DomainLinkError
from corehq.apps.linked_domain.keywords import (
create_linked_keyword,
update_keyword,
)
from corehq.apps.linked_domain.models import (
KeywordLinkDetail,
ReportLinkDetail,
)
from corehq.apps.linked_domain.ucr import (
create_linked_ucr,
get_downstream_report,
update_linked_ucr,
)
from corehq.apps.linked_domain.updates import update_model_type
from corehq.apps.linked_domain.util import (
pull_missing_multimedia_for_app_and_notify,
)
from corehq.apps.reminders.views import KeywordsListView
from corehq.apps.sms.models import Keyword
from corehq.apps.userreports.models import ReportConfiguration
from corehq.apps.users.models import CouchUser
from corehq.privileges import RELEASE_MANAGEMENT
@task(queue='linked_domain_queue')
def pull_missing_multimedia_for_app_and_notify_task(domain, app_id, email=None, force=False):
pull_missing_multimedia_for_app_and_notify(domain, app_id, email, force)
@task(queue='linked_domain_queue')
def push_models(upstream_domain, models, downstream_domains, build_apps, username):
ReleaseManager(upstream_domain, username).release(models, downstream_domains, build_apps)
class ReleaseManager:
def __init__(self, upstream_domain, username):
self.upstream_domain = upstream_domain
self.user = CouchUser.get_by_username(username)
self._reset()
def _reset(self):
self.errors_by_domain = {'html': defaultdict(list), 'text': defaultdict(list)}
self.successes_by_domain = {'html': defaultdict(list), 'text': defaultdict(list)}
def results(self):
return self.successes_by_domain, self.errors_by_domain
def add_error(self, domain, html, text=None):
text = text or html
self.errors_by_domain['html'][domain].append(html)
self.errors_by_domain['text'][domain].append(text)
def add_success(self, domain, html, text=None):
text = text or html
self.successes_by_domain['html'][domain].append(html)
self.successes_by_domain['text'][domain].append(text)
def update_successes(self, successes):
self._update_messages(self.successes_by_domain, successes)
def update_errors(self, errors):
self._update_messages(self.errors_by_domain, errors)
def _update_messages(self, attr, messages):
for fmt in ('html', 'text'):
for domain, msgs in messages[fmt].items():
attr[fmt][domain].extend(msgs)
def get_error_domain_count(self):
return len(self.errors_by_domain['html'])
def get_success_domain_count(self):
return len(self.successes_by_domain['html'])
def _get_errors(self, domain, html=True):
return self.errors_by_domain['html' if html else 'text'][domain]
def _get_successes(self, domain, html=True):
return self.successes_by_domain['html' if html else 'text'][domain]
def release(self, models, downstream_domains, build_apps=False):
self._reset()
header = [
release_domain.si(self.upstream_domain, downstream_domain, self.user.username, models, build_apps)
for downstream_domain in downstream_domains
]
callback = send_linked_domain_release_email.s(self.upstream_domain, self.user.username,
models, downstream_domains)
chord(header)(callback)
def get_email_message(self, models, linked_domains, html=True):
error_domain_count = self.get_error_domain_count()
separator = "\n"
message = _("""
Release complete. {} project(s) succeeded. {}
The following content was released:
{}
The following linked project spaces received content:
""").format(
self.get_success_domain_count(),
_("{} project(s) encountered errors.").format(error_domain_count) if error_domain_count else "",
separator.join(["- {}".format(m['name']) for m in models])
).strip()
for linked_domain in sorted(linked_domains):
if not self._get_errors(linked_domain, html):
message += _("{}- {} updated successfully").format(separator, linked_domain)
else:
message += _("{}- {} encountered errors:").format(separator, linked_domain)
for msg in self._get_errors(linked_domain, html) + self._get_successes(linked_domain, html):
message += separator + " - " + msg
return linebreaksbr(message) if html else message
def _release_app(self, domain_link, model, user, build_and_release=False):
if toggles.MULTI_MASTER_LINKED_DOMAINS.enabled(domain_link.linked_domain):
return self._error_tuple(_("Multi master flag is in use"))
app_id = model['detail']['app_id']
found = False
error_prefix = ""
try:
for linked_app in get_apps_in_domain(domain_link.linked_domain, include_remote=False):
if is_linked_app(linked_app) and linked_app.family_id == app_id:
found = True
app = update_linked_app(linked_app, app_id, user.user_id)
if not found:
return self._error_tuple(_("Could not find app"))
if build_and_release:
error_prefix = _("Updated app but did not build or release: ")
build = app.make_build()
build.is_released = True
build.save(increment_version=False)
except Exception as e: # intentionally broad
return self._error_tuple(error_prefix + str(e))
def _release_report(self, domain_link, model, user_id):
report_id = model['detail']['report_id']
linked_report = get_downstream_report(domain_link.linked_domain, report_id)
if not linked_report:
if domain_has_privilege(self.upstream_domain, RELEASE_MANAGEMENT):
try:
linked_report_info = create_linked_ucr(domain_link, report_id)
linked_report = linked_report_info.report
except DomainLinkError as e:
return self._error_tuple(str(e))
else:
report = ReportConfiguration.get(report_id)
if report.report_meta.created_by_builder:
view = 'edit_report_in_builder'
else:
view = 'edit_configurable_report'
url = get_url_base() + reverse(view, args=[domain_link.master_domain, report_id])
return self._error_tuple(
_('Could not find report. <a href="{}">Click here</a> and click "Link Report" to link this '
+ 'report.').format(url),
text=_('Could not find report. Please check that the report has been linked.'),
)
# have no hit an error case, so update the ucr
update_linked_ucr(domain_link, linked_report.get_id)
domain_link.update_last_pull(
MODEL_REPORT,
user_id,
model_detail=ReportLinkDetail(report_id=linked_report.get_id).to_json(),
)
def _release_flag_dependent_model(self, domain_link, model, user, feature_flag):
if not feature_flag.enabled(domain_link.linked_domain):
return self._error_tuple(_("Feature flag for {} is not enabled").format(model['name']))
return self._release_model(domain_link, model, user)
def _release_keyword(self, domain_link, model, user_id):
upstream_id = model['detail']['keyword_id']
try:
linked_keyword_id = (Keyword.objects.values_list('id', flat=True)
.get(domain=domain_link.linked_domain, upstream_id=upstream_id))
except Keyword.DoesNotExist:
if domain_has_privilege(self.upstream_domain, RELEASE_MANAGEMENT):
linked_keyword_id = create_linked_keyword(domain_link, upstream_id)
else:
return self._error_tuple(
_('Could not find linked keyword in {domain}. '
'Please check that the keyword has been linked from the '
'<a href="{keyword_url}">Keyword Page</a>.').format(
domain=domain_link.linked_domain,
keyword_url=(
get_url_base() + reverse(
KeywordsListView.urlname, args=[domain_link.master_domain]
))
),
_('Could not find linked keyword. Please check the keyword has been linked.'),
)
update_keyword(domain_link, linked_keyword_id)
domain_link.update_last_pull(
MODEL_KEYWORD,
user_id,
model_detail=KeywordLinkDetail(keyword_id=str(linked_keyword_id)).to_json(),
)
def _release_model(self, domain_link, model, user):
update_model_type(domain_link, model['type'], model_detail=model['detail'])
domain_link.update_last_pull(model['type'], user._id, model_detail=model['detail'])
def _error_tuple(self, html, text=None):
text = text or html
return (html, text)
@task(queue='linked_domain_queue')
def release_domain(upstream_domain, downstream_domain, username, models, build_apps=False):
manager = ReleaseManager(upstream_domain, username)
domain_link = get_upstream_domain_link(downstream_domain)
if not domain_link or domain_link.master_domain != upstream_domain:
manager.add_error(downstream_domain, _("Project space {} is no longer linked to {}. No content "
"was released to it.").format(upstream_domain, downstream_domain))
return manager.results()
for model in models:
errors = None
try:
if model['type'] == MODEL_APP:
errors = manager._release_app(domain_link, model, manager.user, build_apps)
elif model['type'] == MODEL_REPORT:
errors = manager._release_report(domain_link, model, manager.user._id)
elif model['type'] in FEATURE_FLAG_DATA_MODEL_TOGGLES:
errors = manager._release_flag_dependent_model(domain_link, model, manager.user,
FEATURE_FLAG_DATA_MODEL_TOGGLES[model['type']])
elif model['type'] == MODEL_KEYWORD:
errors = manager._release_keyword(domain_link, model, manager.user._id)
else:
manager._release_model(domain_link, model, manager.user)
except Exception as e: # intentionally broad
errors = [str(e), str(e)]
notify_exception(None, "Exception pushing linked domains: {}".format(e))
if errors:
manager.add_error(
domain_link.linked_domain,
_("Could not update {}: {}").format(model['name'], errors[0]),
text=_("Could not update {}: {}").format(model['name'], errors[1]))
else:
manager.add_success(domain_link.linked_domain, _("Updated {} successfully").format(model['name']))
return manager.results()
@task(queue='linked_domain_queue')
def send_linked_domain_release_email(results, upstream_domain, username, models, downstream_domains):
manager = ReleaseManager(upstream_domain, username)
# chord sends a list of results only if there were multiple tasks
if len(downstream_domains) == 1:
results = [results]
for result in results:
(successes, errors) = result
manager.update_successes(successes)
manager.update_errors(errors)
subject = _("Linked project release complete.")
if manager.get_error_domain_count():
subject += _(" Errors occurred.")
email = manager.user.email or manager.user.username
send_html_email_async(
subject,
email,
manager.get_email_message(models, downstream_domains, html=True),
text_content=manager.get_email_message(models, downstream_domains, html=False),
email_from=settings.DEFAULT_FROM_EMAIL
)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import tlvs
class sid_label_binding(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__tlvs")
_yang_name = "sid-label-binding"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__tlvs = YANGDynClass(
base=tlvs.tlvs,
is_container="container",
yang_name="tlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state (container)
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_tlvs(self):
"""
Getter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs (container)
YANG Description: TLVs contained within the SID/Label Binding sub-TLV of the
SID/Label Binding TLV
"""
return self.__tlvs
def _set_tlvs(self, v, load=False):
"""
Setter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlvs() directly.
YANG Description: TLVs contained within the SID/Label Binding sub-TLV of the
SID/Label Binding TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=tlvs.tlvs,
is_container="container",
yang_name="tlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tlvs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__tlvs = t
if hasattr(self, "_set"):
self._set()
def _unset_tlvs(self):
self.__tlvs = YANGDynClass(
base=tlvs.tlvs,
is_container="container",
yang_name="tlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
tlvs = __builtin__.property(_get_tlvs)
_pyangbind_elements = OrderedDict([("state", state), ("tlvs", tlvs)])
from . import state
from . import tlvs
class sid_label_binding(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__tlvs")
_yang_name = "sid-label-binding"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__tlvs = YANGDynClass(
base=tlvs.tlvs,
is_container="container",
yang_name="tlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"extended-prefix",
"tlvs",
"tlv",
"sid-label-binding",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state (container)
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters relating to the SID/Label binding sub-TLV
of the extended prefix LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_tlvs(self):
"""
Getter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs (container)
YANG Description: TLVs contained within the SID/Label Binding sub-TLV of the
SID/Label Binding TLV
"""
return self.__tlvs
def _set_tlvs(self, v, load=False):
"""
Setter method for tlvs, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tlvs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tlvs() directly.
YANG Description: TLVs contained within the SID/Label Binding sub-TLV of the
SID/Label Binding TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=tlvs.tlvs,
is_container="container",
yang_name="tlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """tlvs must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=tlvs.tlvs, is_container='container', yang_name="tlvs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__tlvs = t
if hasattr(self, "_set"):
self._set()
def _unset_tlvs(self):
self.__tlvs = YANGDynClass(
base=tlvs.tlvs,
is_container="container",
yang_name="tlvs",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
tlvs = __builtin__.property(_get_tlvs)
_pyangbind_elements = OrderedDict([("state", state), ("tlvs", tlvs)])
|
|
# Copyright 2017-present Kensho Technologies, LLC.
import string
from typing import cast
import unittest
from graphql import parse
from graphql.utilities.build_ast_schema import build_ast_schema
import six
from ..compiler.compiler_frontend import graphql_to_ir
from ..exceptions import GraphQLCompilationError, GraphQLParsingError, GraphQLValidationError
from ..schema import TypeEquivalenceHintsType
from .test_helpers import get_schema
class IrGenerationErrorTests(unittest.TestCase):
"""Ensure illegal inputs raise proper exceptions."""
def setUp(self) -> None:
"""Initialize the test schema once for all tests, and disable max diff limits."""
self.maxDiff = None
self.schema = get_schema()
def test_repeated_field_name(self) -> None:
repeated_property_field = """{
Animal {
name @output(out_name: "name")
name
}
}"""
repeated_property_field_with_directives = """{
Animal {
name @output(out_name: "name")
name @filter(op_name: "=", value: ["$wanted"])
}
}"""
repeated_vertex_field = """{
Animal {
out_Animal_ParentOf {
name @output(out_name: "child_name")
}
out_Animal_ParentOf {
uuid @output(out_name: "child_uuid")
}
}
}"""
for graphql in (
repeated_property_field,
repeated_property_field_with_directives,
repeated_vertex_field,
):
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, graphql)
def test_output_source_directive_constraints(self) -> None:
output_source_not_on_last_vertex_element = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_ParentOf @output_source {
name @output(out_name: "parent_name")
}
out_Animal_FedAt {
name @output(out_name: "event_name")
event_date @output(out_name: "event_date")
}
}
}"""
output_source_on_non_vertex_element = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_FedAt {
name @output(out_name: "event_name") @output_source
event_date @output(out_name: "event_date")
}
}
}"""
multiple_output_sources = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_FedAt @output_source {
name @output(out_name: "event_name") @output_source
event_date @output(out_name: "event_date")
}
}
}"""
output_source_on_optional_vertex = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_FedAt @output_source @optional {
name @output(out_name: "event_name")
event_date @output(out_name: "event_date")
}
}
}"""
output_source_on_fold_vertex = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_FedAt @output_source @fold {
name @output(out_name: "event_name")
event_date @output(out_name: "event_date")
}
}
}"""
for graphql in (
output_source_not_on_last_vertex_element,
output_source_on_non_vertex_element,
multiple_output_sources,
output_source_on_optional_vertex,
output_source_on_fold_vertex,
):
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, graphql)
def test_optional_directive_constraints(self) -> None:
optional_on_property_field = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_ParentOf {
name @output(out_name: "parent_name") @optional
}
}
}"""
optional_on_root_vertex = """{
Animal @optional {
name @output(out_name: "uuid")
}
}"""
output_source_inside_optional_block = """{
Animal {
out_Animal_ParentOf @optional {
out_Animal_FedAt @output_source {
uuid @output(out_name: "uuid")
}
}
}
}"""
recurse_traversal_inside_optional_block = """{
Animal {
out_Animal_ParentOf @optional {
out_Animal_FedAt @recurse(depth: 3) {
uuid @output(out_name: "uuid")
}
}
}
}"""
fold_traversal_inside_optional_block = """{
Animal {
out_Animal_ParentOf @optional {
out_Animal_FedAt @fold {
uuid @output(out_name: "uuid")
}
}
}
}"""
optional_on_output_source_vertex_field = """{
Animal {
out_Animal_ParentOf @optional @output_source {
uuid @output(out_name: "uuid")
}
}
}"""
optional_on_recurse_vertex_field = """{
Animal {
out_Animal_ParentOf @optional @recurse(depth: 3) {
uuid @output(out_name: "uuid")
}
}
}"""
optional_on_fold_vertex_field = """{
Animal {
out_Animal_ParentOf @optional @fold {
uuid @output(out_name: "uuid")
}
}
}"""
for graphql in (
optional_on_property_field,
optional_on_root_vertex,
output_source_inside_optional_block,
recurse_traversal_inside_optional_block,
fold_traversal_inside_optional_block,
optional_on_output_source_vertex_field,
optional_on_recurse_vertex_field,
optional_on_fold_vertex_field,
):
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, graphql)
def test_starts_with_op_filter_missing_value_argument(self) -> None:
graphql_input = """{
Animal {
name @filter(op_name: "starts_with")
@output(out_name: "animal_name")
}
}"""
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(self.schema, graphql_input)
def test_fold_directive_constraints(self) -> None:
fold_on_property_field = """{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_ParentOf {
name @output(out_name: "parent_name") @fold
}
}
}"""
fold_on_root_vertex = """{
Animal @fold {
name @output(out_name: "uuid")
}
}"""
multi_level_outputs_inside_fold_block = """{
Animal {
out_Animal_ParentOf @fold {
uuid @output(out_name: "uuid")
out_Animal_FedAt {
uuid @output(out_name: "uuid")
}
}
}
}"""
traversal_inside_fold_block_after_output = """{
Animal {
out_Animal_ParentOf @fold {
in_Animal_ParentOf {
uuid @output(out_name: "uuid")
out_Animal_FedAt {
uuid
}
}
}
}
}"""
no_outputs_or_filters_inside_fold_block = """{
Animal {
uuid @output(out_name: "uuid")
out_Animal_ParentOf @fold {
name
}
}
}"""
list_output_inside_fold_block = """{
Animal {
uuid @output(out_name: "uuid")
out_Animal_ParentOf @fold {
alias @output(out_name: "disallowed_folded_list_output")
}
}
}"""
fold_within_fold = """{
Animal {
out_Animal_ParentOf @fold {
out_Animal_FedAt @fold {
uuid @output(out_name: "uuid")
}
}
}
}"""
optional_within_fold = """{
Animal {
out_Animal_ParentOf @fold {
out_Animal_FedAt @optional {
uuid @output(out_name: "uuid")
}
}
}
}"""
recurse_within_fold = """{
Animal {
out_Animal_ParentOf @fold {
in_Animal_ParentOf @recurse(depth: 2) {
uuid @output(out_name: "uuid")
}
}
}
}"""
output_source_within_fold = """{
Animal {
out_Animal_ParentOf @fold {
out_Animal_FedAt @output_source {
uuid @output(out_name: "uuid")
}
}
}
}"""
multiple_vertex_fields_within_fold = """{
Animal {
out_Animal_ParentOf @fold {
out_Animal_FedAt {
uuid @output(out_name: "uuid")
}
in_Animal_ParentOf {
name
}
}
}
}"""
multiple_vertex_fields_within_fold_after_traverse = """{
Animal {
out_Animal_ParentOf @fold {
in_Animal_ParentOf {
out_Animal_FedAt {
uuid @output(out_name: "uuid")
}
out_Animal_OfSpecies {
name
}
}
}
}
}"""
use_of_count_outside_of_fold = """{
Animal {
name @output(out_name: "name")
out_Animal_ParentOf {
_x_count @output(out_name: "child_count")
}
}
}"""
use_of_count_before_innermost_scope = """{
Species {
name @output(out_name: "name")
in_Animal_OfSpecies @fold {
_x_count @output(out_name: "fold_size")
out_Animal_LivesIn {
name @filter(op_name: "=", value: ["$location"])
}
}
}
}"""
multiple_uses_of_count_in_one_fold_1 = """{
Species {
name @output(out_name: "name")
in_Animal_OfSpecies @fold {
_x_count @output(out_name: "fold_size")
out_Animal_LivesIn {
_x_count @filter(op_name: "=", value: ["$num_animals"])
}
}
}
}"""
multiple_uses_of_count_in_one_fold_2 = """{
Species {
name @output(out_name: "name")
in_Animal_OfSpecies @fold {
_x_count @filter(op_name: "=", value: ["$num_animals"])
out_Animal_LivesIn {
_x_count @output(out_name: "fold_size")
}
}
}
}"""
all_test_cases = (
fold_on_property_field,
fold_on_root_vertex,
multi_level_outputs_inside_fold_block,
traversal_inside_fold_block_after_output,
no_outputs_or_filters_inside_fold_block,
list_output_inside_fold_block,
fold_within_fold,
optional_within_fold,
recurse_within_fold,
output_source_within_fold,
multiple_vertex_fields_within_fold,
multiple_vertex_fields_within_fold_after_traverse,
use_of_count_outside_of_fold,
use_of_count_before_innermost_scope,
multiple_uses_of_count_in_one_fold_1,
multiple_uses_of_count_in_one_fold_2,
)
for graphql in all_test_cases:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, graphql)
def test_output_directive_constraints(self) -> None:
output_on_vertex_field = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_ParentOf @output(out_name: "parent") {
uuid
}
}
}""",
)
output_without_name = (
GraphQLValidationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @output
}
}""",
)
output_with_empty_name = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @output(out_name: "")
}
}""",
)
output_with_name_starting_with_digit = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @output(out_name: "1uuid")
}
}""",
)
output_with_duplicated_name = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
uuid @output(out_name: "uuid")
out_Animal_ParentOf {
uuid @output(out_name: "uuid")
}
}
}""",
)
output_with_illegal_name = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @output(out_name: "name'\\\\\\"")
}
}""",
)
output_with_reserved_name = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @output(out_name: "___animal_name")
}
}""",
)
for expected_error, graphql in (
output_on_vertex_field,
output_without_name,
output_with_duplicated_name,
output_with_illegal_name,
output_with_empty_name,
output_with_name_starting_with_digit,
output_with_reserved_name,
):
with self.assertRaises(expected_error):
graphql_to_ir(self.schema, graphql)
def test_tag_directive_constraints(self) -> None:
tag_on_vertex_field = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
out_Animal_ParentOf @tag(tag_name: "role") {
uuid @output(out_name: "uuid")
}
in_Animal_ParentOf {
name @filter(op_name: "!=", value: ["%role"])
}
}
}""",
)
tag_without_name = (
GraphQLValidationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @tag
uuid @output(out_name: "uuid")
}
}""",
)
tag_with_duplicated_name = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @output(out_name: "name")
uuid @tag(tag_name: "uuid")
out_Animal_ParentOf {
uuid @tag(tag_name: "uuid")
}
in_Animal_ParentOf {
uuid @filter(op_name: "!=", value: ["%uuid"])
}
}
}""",
)
tag_with_illegal_name = (
GraphQLCompilationError,
"""{
Animal @filter(op_name: "name_or_alias", value: ["$animal_name"]) {
name @tag(tag_name: "name'\\\\\\"")
uuid @output(out_name: "uuid")
}
}""",
)
tag_within_fold_scope = (
GraphQLCompilationError,
"""{
Animal {
out_Animal_ParentOf @fold {
name @tag(tag_name: "name")
uuid @output(out_name: "uuid")
}
in_Animal_ParentOf {
name @filter(op_name: "!=", value: ["%name"])
}
}
}""",
)
tag_on_count_field = (
GraphQLCompilationError,
"""{
Animal {
name @output(out_name: "name")
out_Animal_ParentOf {
_x_count @tag(tag_name: "count")
}
in_Animal_ParentOf {
_x_count @filter(op_name: "=", value: ["%count"])
}
}
}""",
)
unused_tag = (
GraphQLCompilationError,
"""{
Animal {
name @tag(tag_name: "name")
uuid @output(out_name: "uuid")
}
}""",
)
errors_and_inputs = (
tag_on_vertex_field,
tag_without_name,
tag_with_duplicated_name,
tag_with_illegal_name,
tag_within_fold_scope,
tag_on_count_field,
unused_tag,
)
for expected_error, graphql in errors_and_inputs:
with self.assertRaises(expected_error):
graphql_to_ir(self.schema, graphql)
def test_recurse_directive_constraints(self) -> None:
recurse_on_property_field = (
GraphQLCompilationError,
"""{
Animal {
name @recurse(depth: 3)
uuid @output(out_name: "uuid")
}
}""",
)
recurse_on_root_vertex = (
GraphQLCompilationError,
"""{
Animal @recurse(depth: 3) {
name @output(out_name: "name")
}
}""",
)
recurse_with_illegal_depth = (
GraphQLCompilationError,
"""{
Animal {
out_Animal_ParentOf @recurse(depth: 0) {
name @output(out_name: "name")
}
}
}""",
)
recurse_at_fold_scope = (
GraphQLCompilationError,
"""{
Animal {
out_Animal_ParentOf @recurse(depth: 3) @fold {
name @output(out_name: "name")
}
}
}""",
)
recurse_within_fold_scope = (
GraphQLCompilationError,
"""{
Animal {
out_Animal_ParentOf @fold {
out_Animal_ParentOf @recurse(depth: 3) {
name @output(out_name: "name")
}
}
}
}""",
)
# Note that out_Animal_ImportantEvent is a union of Event | BirthEvent, hence this query
# attempts to recurse on Animal at depth 0, and then on an Event for depth 1 and 2.
recurse_on_union_edge_without_parent_type = (
GraphQLCompilationError,
"""{
Animal {
out_Animal_ImportantEvent @recurse(depth: 2) {
... on Event {
name @output(out_name: "event_name")
}
}
}
}""",
)
# Note that "color" is a property on Animal, but not on Species.
# However, @recurse emits both the starting vertex (0-depth) as well as deeper vertices,
# so this query is invalid. The "in_Animal_OfSpecies" vertex field is not of union type,
# so this fails the type safety check for the @recurse directive.
recurse_with_type_mismatch = (
GraphQLCompilationError,
"""{
Species {
in_Animal_OfSpecies @recurse(depth: 3) {
color @output(out_name: "color")
}
}
}""",
)
for expected_error, graphql in (
recurse_on_property_field,
recurse_on_root_vertex,
recurse_with_illegal_depth,
recurse_at_fold_scope,
recurse_within_fold_scope,
recurse_with_type_mismatch,
recurse_on_union_edge_without_parent_type,
):
with self.assertRaises(expected_error):
graphql_to_ir(self.schema, graphql)
def test_filter_directive_bad_op_name(self) -> None:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(
self.schema,
"""{
Event @filter(op_name: "non_existent", value: ["$a"]) {
name @output(out_name: "name")
}
}""",
)
def test_filter_directive_undeclared_argument(self) -> None:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(
self.schema,
"""{
Event @filter(op_name: "name_or_alias", value: ["%not_there"]) {
name @output(out_name: "name")
}
}""",
)
def test_filter_directive_literal_argument(self) -> None:
# Literal arguments are currently not supported, and instead raise errors.
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(
self.schema,
"""{
Event @filter(op_name: "name_or_alias", value: ["literal"]) {
name @output(out_name: "name")
}
}""",
)
def test_filter_directive_non_list_value_argument(self) -> None:
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(
self.schema,
"""{
Event @filter(op_name: "name_or_alias", value: "$not_a_list") {
name @output(out_name: "name")
}
}""",
)
def test_filter_directive_wrong_location(self) -> None:
invalid_graphql_inputs = [
# 'name_or_alias' must be on a vertex field that has 'name' and 'alias' properties,
# Event vertex fields and property fields (like 'name') do not satisfy this.
"""{
Animal {
name @filter(op_name: "name_or_alias", value: ["$foo"])
description @output(out_name: "description_text")
}
}""",
# '=' must be on a property field, not a vertex field
"""{
Event @filter(op_name: "=", value: ["$foo"]) {
name @output(out_name: "name")
}
}""",
# 'between' must be on a property field
"""{
Event @filter(op_name: "between", value: ["$foo"]) {
name @output(out_name: "name")
}
}""",
]
for graphql in invalid_graphql_inputs:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, graphql)
def test_filter_directive_bad_arg_counts(self) -> None:
def generate_args_string(num_args: int) -> str:
"""Generate a GraphQL array with the given args, as a string."""
if num_args == 0:
return "[]"
variable_names = string.ascii_lowercase
if num_args >= len(variable_names):
raise AssertionError("Invalid test data, too many variables to represent.")
args = (variable_names[i] for i in six.moves.xrange(num_args))
array_contents = ",".join('"${}"'.format(x) for x in args)
return "[{}]".format(array_contents)
expected_arg_counts = [
# Using % rather than .format() because GraphQL uses lots of curly braces,
# which are annoying to escape the way .format() likes them.
(
1,
"""{
Animal @filter(op_name: "name_or_alias", value: %s) {
name @output(out_name: "name")
}
}""",
),
(
1,
"""{
Event {
name @filter(op_name: "=", value: %s) @output(out_name: "name")
}
}""",
),
(
2,
"""{
Event {
name @output(out_name: "name")
event_date @filter(op_name: "between", value: %s)
}
}""",
),
]
for expected_arg_count, base_query in expected_arg_counts:
# Try using just the right number of arguments, expect no error.
args = generate_args_string(expected_arg_count)
graphql_to_ir(self.schema, base_query % (args))
# Try using one argument fewer or too many, expect an error.
for num_args in (expected_arg_count - 1, expected_arg_count + 1):
args = generate_args_string(num_args)
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, base_query % (args))
def test_simple_bad_graphql(self) -> None:
bad_graphqls = (
(GraphQLParsingError, "not really graphql"),
# graphql that doesn't match the graph schema
(
GraphQLValidationError,
"""{
NonExistentVertex {
name @output(out_name: "name")
}
}""",
),
# more than one graphql definition block
(
GraphQLValidationError,
"""{
Animal {
name @output(out_name: "animal_name")
}
}
{
Species {
name @output(out_name: "species_name")
}
}""",
),
# more than one root selection
(
GraphQLCompilationError,
"""{
Animal {
name @output(out_name: "animal_name")
}
Species {
name @output(out_name: "species_name")
}
}""",
),
)
for expected_error, graphql in bad_graphqls:
with self.assertRaises(expected_error):
graphql_to_ir(self.schema, graphql)
def test_duplicate_directive_on_same_field(self) -> None:
# self-consistency check: the non-duplicate version of the query compiles fine
graphql_to_ir(
self.schema,
"""{
Event {
name @tag(tag_name: "name")
event_date @output(out_name: "date")
description @filter(op_name: "has_substring", value: ["%name"])
}
}""",
)
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(
self.schema,
"""{
Event {
name @tag(tag_name: "name") @tag(tag_name: "name2")
event_date @output(out_name: "date")
description @filter(op_name: "has_substring", value: ["%name"])
@filter(op_name: "has_substring", value: ["%name2"])
}
}""",
)
def test_property_field_after_vertex_field(self) -> None:
# self-consistency check: the correctly-ordered version of the query compiles fine
graphql_to_ir(
self.schema,
"""{
FeedingEvent {
name @output(out_name: "name")
event_date @tag(tag_name: "date")
in_Animal_FedAt {
name @output(out_name: "animal")
}
in_Event_RelatedEvent {
... on Event {
event_date @filter(op_name: "=", value: ["%date"])
}
}
}
}""",
)
invalid_queries = (
"""{
FeedingEvent {
name @output(out_name: "name")
in_Animal_FedAt {
name @output(out_name: "animal")
}
event_date @tag(tag_name: "date")
in_Event_RelatedEvent {
... on Event {
event_date @filter(op_name: "=", value: ["%date"])
}
}
}
}""",
"""{
FeedingEvent {
in_Animal_FedAt {
name @output(out_name: "animal")
}
name @output(out_name: "name")
event_date @tag(tag_name: "date")
in_Event_RelatedEvent {
... on Event {
event_date @filter(op_name: "=", value: ["%date"])
}
}
}
}""",
)
for invalid_graphql in invalid_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_fragment_and_fields_in_same_selection(self) -> None:
invalid_graphql_queries = (
# Property field and fragment in the same selection set.
"""{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related {
name @output(out_name: "related_name")
... on Animal {
out_Animal_OfSpecies {
name @output(out_name: "related_animal_species")
}
}
}
}
}""",
# Vertex field and fragment in the same selection set.
"""{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related {
out_Entity_Related {
name @output(out_name: "second_order_related_name")
}
... on Animal {
name @output(out_name: "related_animal_name")
}
}
}
}""",
# Both types of fields, and a fragment in the same selection set.
"""{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related {
name @output(out_name: "related_name")
out_Entity_Related {
name @output(out_name: "second_order_related_name")
}
... on Animal {
out_Animal_OfSpecies {
name @output(out_name: "related_animal_species")
}
}
}
}
}""",
)
for invalid_graphql in invalid_graphql_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_directive_on_fragment(self) -> None:
invalid_graphql = """{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related {
... on Animal @optional {
name @output(out_name: "related_name")
out_Animal_OfSpecies {
name @output(out_name: "related_animal_species")
}
}
}
}
}"""
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_more_than_one_fragment_in_scope(self) -> None:
invalid_graphql = """{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related {
... on Animal {
name @output(out_name: "related_animal_name")
out_Animal_OfSpecies {
name @output(out_name: "related_animal_species")
}
}
... on Species {
name @output(out_name: "related_species")
}
}
}
}"""
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_fragment_on_nonexistent_type(self) -> None:
invalid_graphql = """{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related {
... on NonExistentType {
name @output(out_name: "related_animal_name")
out_Animal_OfSpecies {
name @output(out_name: "related_animal_species")
}
}
}
}
}"""
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_filter_on_union_type_field(self) -> None:
# Filtering cannot be applied on a union type, because we don't yet know
# what fields are available at the location.
invalid_graphql = """{
Species {
name @output(out_name: "species_name")
out_Species_Eats @filter(op_name: "name_or_alias", value: ["$wanted"]) {
... on Food {
name @output(out_name: "food_name")
}
}
}
}"""
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_no_output_fields(self) -> None:
# The GraphQL query must have at least one field marked @output, otherwise
# why query at all if you don't want any results?
invalid_graphql = """{
Species {
out_Species_Eats {
... on Food {
name @filter(op_name: "=", value: ["$food_name"])
}
}
}
}"""
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_type_coercion_immediately_after_query_root(self) -> None:
# The below pattern of applying a type coercion immediately after specifying a wider
# type in the query root is nonsensical. Make sure we raise an appropriate error.
invalid_graphql = """{
Entity {
... on Animal {
name @output(out_name: "animal")
}
}
}"""
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_filter_graphql_type_validation(self) -> None:
invalid_queries = [
# The "=" filter requires a GraphQL leaf type on its left side,
# but the "alias" field is a List of String. This should cause an error.
"""{
Animal {
name @output(out_name: "animal")
alias @filter(op_name: "=", value: ["$wanted"])
}
}""",
# The "in_collection" filter requires a GraphQL leaf type on its left side,
# but the "alias" field is a List of String. This should cause an error.
"""{
Animal {
name @output(out_name: "animal")
alias @filter(op_name: "in_collection", value: ["$wanted"])
}
}""",
# The "has_substring" filter requires a GraphQLString type on its left side,
# but the "alias" field is a List of String. This should cause an error.
"""{
Animal {
name @output(out_name: "animal")
alias @filter(op_name: "has_substring", value: ["$wanted"])
}
}""",
# The "between" filter requires a GraphQL leaf type on its left side,
# but the "alias" field is a List of String. This should cause an error.
"""{
Animal {
name @output(out_name: "animal")
alias @filter(op_name: "between", value: ["$left", "$right"])
}
}""",
# The "contains" filter requires a GraphQLList on its left side,
# but the "name" field is a List of String. This should cause an error.
"""{
Animal {
name @output(out_name: "animal")
@filter(op_name: "contains", value: ["$wanted"])
}
}""",
]
for invalid_graphql in invalid_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_input_type_validation(self) -> None:
invalid_queries = [
# The inferred types for "wanted" conflict between String and ID.
"""{
Animal {
name @filter(op_name: "=", value: ["$wanted"]) @output(out_name: "name")
uuid @filter(op_name: "=", value: ["$wanted"])
}
}""",
# The inferred types for "wanted" conflict between String and ID.
"""{
Animal {
name @output(out_name: "name")
uuid @filter(op_name: "=", value: ["$wanted"])
alias @filter(op_name: "contains", value: ["$wanted"])
}
}""",
]
for invalid_graphql in invalid_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_tag_type_validation(self) -> None:
invalid_queries = [
# The inferred types for "tagged" conflict between String and ID.
"""{
Animal {
name @tag(tag_name: "tagged") @output(out_name: "name")
out_Animal_ParentOf {
uuid @filter(op_name: "=", value: ["%tagged"])
}
}
}""",
# The inferred types for "tagged" conflict between String and ID.
"""{
Animal {
uuid @tag(tag_name: "tagged")
out_Animal_ParentOf {
name @filter(op_name: "=", value: ["%tagged"]) @output(out_name: "name")
}
}
}""",
# The inferred types for "tagged" conflict between String and ID.
"""{
Animal {
uuid @tag(tag_name: "tagged") @output(out_name: "uuid")
out_Animal_ParentOf {
alias @filter(op_name: "contains", value: ["%tagged"])
}
}
}""",
]
for invalid_graphql in invalid_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_invalid_variable_types(self) -> None:
# Variables currently cannot represent lists of Dates or lists of DateTimes.
# Ensure such use of Variables causes compilation errors.
invalid_queries = [
# $list_of_dates is, unsurprisingly, a Variable of type List of Date
"""{
Animal {
name @output(out_name: "name")
birthday @filter(op_name: "in_collection", value: ["$list_of_dates"])
}
}""",
# $list_of_datetimes is, unsurprisingly, a Variable of type List of DateTime
"""{
Event {
name @output(out_name: "name")
event_date @filter(op_name: "in_collection", value: ["$list_of_datetimes"])
}
}""",
]
for invalid_graphql in invalid_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_invalid_edge_degree_queries(self) -> None:
invalid_queries = [
# Can't filter with "has_edge_degree" on the root vertex field -- there's no edge.
"""{
Animal @filter(op_name: "has_edge_degree", value: ["$degree"]) {
name @output(out_name: "name")
}
}""",
# Can't filter with "has_edge_degree" on a property field -- there's no edge.
"""{
Animal {
name @output(out_name: "name")
@filter(op_name: "has_edge_degree", value: ["$degree"])
}
}""",
# Can't filter with "has_edge_degree" on a type coercion -- it has to be on the field.
"""{
Animal {
out_Entity_Related {
... on Animal @filter(op_name: "has_edge_degree", value: ["$degree"]) {
name @output(out_name: "related")
}
}
}
}""",
# Can't filter with "has_edge_degree" with a tagged value that isn't of Int type.
"""{
Animal {
out_Animal_ParentOf {
name @output(out_name: "name") @tag(tag_name: "parent")
}
out_Animal_OfSpecies @filter(op_name: "has_edge_degree", value: ["%parent"]) {
name @output(out_name: "species")
}
}
}""",
# We currently do not support tagged values as "has_edge_degree" arguments.
"""{
Animal {
name @output(out_name: "animal_name")
out_Animal_OfSpecies @optional {
limbs @tag(tag_name: "limb_count")
}
out_Animal_ParentOf
@filter(op_name: "has_edge_degree", value: ["%limb_count"]) {
name @output(out_name: "child_name")
}
}
}""",
]
for invalid_graphql in invalid_queries:
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql)
def test_missing_directives_in_schema(self) -> None:
"""Ensure that validators properly identifiy missing directives in the schema.
The schema should contain all directives that are supported by the graphql compiler,
even if they might not be used in the query. Hence we raise an error when the following
directive is not declared in the schema: directive @recurse(depth: Int!) on FIELD.
"""
incomplete_schema_text = """
schema {
query: RootSchemaQuery
}
directive @filter(op_name: String!, value: [String!]!) on FIELD | INLINE_FRAGMENT
directive @tag(tag_name: String!) on FIELD
directive @output(out_name: String!) on FIELD
directive @output_source on FIELD
directive @optional on FIELD
directive @fold on FIELD
type Animal {
name: String
}
type RootSchemaQuery {
Animal: Animal
}
"""
incomplete_schema = build_ast_schema(parse(incomplete_schema_text))
query = """{
Animal {
name @output(out_name: "animal_name")
}
}"""
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(incomplete_schema, query)
def test_incorrect_directive_locations_in_schema(self) -> None:
"""Ensure appropriate errors are raised if nonexistent directive is provided."""
schema_with_extra_directive = """
schema {
query: RootSchemaQuery
}
directive @filter(op_name: String!, value: [String!]!) on FIELD | INLINE_FRAGMENT
directive @tag(tag_name: String!) on FIELD
directive @output(out_name: String!) on FIELD
directive @output_source on FIELD
directive @optional on FIELD
directive @fold on FIELD
directive @recurse(depth: Int!) on FIELD
directive @nonexistent on FIELD
type Animal {
name: String
}
type RootSchemaQuery {
Animal: Animal
}
"""
parsed_schema_with_extra_directive = build_ast_schema(parse(schema_with_extra_directive))
query = """{
Animal {
name @output(out_name: "animal_name")
}
}"""
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(parsed_schema_with_extra_directive, query)
def test_directives_on_wrong_fields(self) -> None:
"""Ensure appropriate errors are raised if any directives are on the wrong location."""
# Change @tag from FIELD to INLINE_FRAGMENT
schema_with_wrong_directive_on_inline_fragment = """
schema {
query: RootSchemaQuery
}
directive @filter(op_name: String!, value: [String!]!) on FIELD | INLINE_FRAGMENT
directive @tag(tag_name: String!) on INLINE_FRAGMENT
directive @output(out_name: String!) on FIELD
directive @output_source on FIELD
directive @optional on FIELD
directive @fold on FIELD
directive @recurse(depth: Int!) on FIELD
type Animal {
name: String
}
type RootSchemaQuery {
Animal: Animal
}
"""
# Remove INLINE_FRAGMENT from @filter
schema_with_directive_missing_location = """
schema {
query: RootSchemaQuery
}
directive @filter(op_name: String!, value: [String!]!) on FIELD
directive @tag(tag_name: String!) on FIELD
directive @output(out_name: String!) on FIELD
directive @output_source on FIELD
directive @optional on FIELD
directive @fold on FIELD
directive @recurse(depth: Int!) on FIELD
type Animal {
name: String
}
type RootSchemaQuery {
Animal: Animal
}
"""
# Change @output_source from FIELD to FIELD | INLINE_FRAGMENT
schema_with_directive_missing_location = """
schema {
query: RootSchemaQuery
}
directive @filter(op_name: String!, value: [String!]!) on FIELD | INLINE_FRAGMENT
directive @tag(tag_name: String!) on FIELD
directive @output(out_name: String!) on FIELD
directive @output_source on FIELD | INLINE_FRAGMENT
directive @optional on FIELD
directive @fold on FIELD
directive @recurse(depth: Int!) on FIELD
type Animal {
name: String
}
type RootSchemaQuery {
Animal: Animal
}
"""
incorrect_schemas = [
schema_with_wrong_directive_on_inline_fragment,
schema_with_directive_missing_location,
schema_with_directive_missing_location,
]
query = """{
Animal {
name @output(out_name: "animal_name")
}
}"""
for schema in incorrect_schemas:
parsed_incorrect_schema = build_ast_schema(parse(schema))
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(parsed_incorrect_schema, query)
def test_directives_with_incorrect_arguments(self) -> None:
"""Ensure that proper errors are raised if directives are provided with incorrect args."""
# Change @filter arg from String! to Int!
schema_with_incorrect_args = """
schema {
query: RootSchemaQuery
}
directive @filter(op_name: Int!, value: [String!]!) on FIELD | INLINE_FRAGMENT
directive @tag(tag_name: String!) on INLINE_FRAGMENT
directive @output(out_name: String!) on FIELD
directive @output_source on FIELD
directive @optional on FIELD
directive @fold on FIELD
directive @recurse(depth: Int!) on FIELD
type Animal {
name: String
}
type RootSchemaQuery {
Animal: Animal
}
"""
parsed_incorrect_schema = build_ast_schema(parse(schema_with_incorrect_args))
query = """{
Animal {
name @output(out_name: "animal_name")
}
}"""
with self.assertRaises(GraphQLValidationError):
graphql_to_ir(parsed_incorrect_schema, query)
def test_with_noninvertible_hints(self) -> None:
"""Ensure TypeError is raised when the hints are non-invertible."""
valid_graphql_input = """{
Animal {
name @output(out_name: "animal_name")
out_Entity_Related @fold {
... on Entity {
name @output(out_name: "related_entities")
}
}
}
}"""
invalid_type_equivalence_hint_data = {
"Event": "Union__BirthEvent__Event__FeedingEvent",
"BirthEvent": "Union__BirthEvent__Event__FeedingEvent",
}
invalid_type_equivalence_hints: TypeEquivalenceHintsType = cast(
TypeEquivalenceHintsType,
{
self.schema.get_type(key): self.schema.get_type(value)
for key, value in invalid_type_equivalence_hint_data.items()
},
)
with self.assertRaises(TypeError):
graphql_to_ir(
self.schema,
valid_graphql_input,
type_equivalence_hints=invalid_type_equivalence_hints,
)
def test_filter_and_tag_on_same_field(self) -> None:
invalid_graphql_input = """{
Animal {
out_Entity_Related {
name @output(out_name: "related_name")
@tag(tag_name: "name")
@filter(op_name: "has_substring", value: ["%name"])
}
}
}"""
with self.assertRaises(GraphQLCompilationError):
graphql_to_ir(self.schema, invalid_graphql_input, type_equivalence_hints=None)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Support for "safe" evaluation of Python expressions."""
import __builtin__
from compiler import ast, parse
from compiler.pycodegen import ExpressionCodeGenerator, ModuleCodeGenerator
import new
try:
set
except NameError:
from sets import ImmutableSet as frozenset
from sets import Set as set
from textwrap import dedent
from genshi.core import Markup
from genshi.template.base import TemplateRuntimeError
from genshi.util import flatten
__all__ = ['Code', 'Expression', 'Suite', 'LenientLookup', 'StrictLookup',
'Undefined', 'UndefinedError']
__docformat__ = 'restructuredtext en'
# Check for a Python 2.4 bug in the eval loop
has_star_import_bug = False
try:
class _FakeMapping(object):
__getitem__ = __setitem__ = lambda *a: None
exec 'from sys import *' in {}, _FakeMapping()
except SystemError:
has_star_import_bug = True
except TypeError:
pass # Python 2.3
del _FakeMapping
def _star_import_patch(mapping, modname):
"""This function is used as helper if a Python version with a broken
star-import opcode is in use.
"""
module = __import__(modname, None, None, ['__all__'])
if hasattr(module, '__all__'):
members = module.__all__
else:
members = [x for x in module.__dict__ if not x.startswith('_')]
mapping.update([(name, getattr(module, name)) for name in members])
class Code(object):
"""Abstract base class for the `Expression` and `Suite` classes."""
__slots__ = ['source', 'code', 'ast', '_globals']
def __init__(self, source, filename=None, lineno=-1, lookup='strict',
xform=None):
"""Create the code object, either from a string, or from an AST node.
:param source: either a string containing the source code, or an AST
node
:param filename: the (preferably absolute) name of the file containing
the code
:param lineno: the number of the line on which the code was found
:param lookup: the lookup class that defines how variables are looked
up in the context; can be either "strict" (the default),
"lenient", or a custom lookup class
:param xform: the AST transformer that should be applied to the code;
if `None`, the appropriate transformation is chosen
depending on the mode
"""
if isinstance(source, basestring):
self.source = source
node = _parse(source, mode=self.mode)
else:
assert isinstance(source, ast.Node), \
'Expected string or AST node, but got %r' % source
self.source = '?'
if self.mode == 'eval':
node = ast.Expression(source)
else:
node = ast.Module(None, source)
self.ast = node
self.code = _compile(node, self.source, mode=self.mode,
filename=filename, lineno=lineno, xform=xform)
if lookup is None:
lookup = LenientLookup
elif isinstance(lookup, basestring):
lookup = {'lenient': LenientLookup, 'strict': StrictLookup}[lookup]
self._globals = lookup.globals
def __getstate__(self):
state = {'source': self.source, 'ast': self.ast,
'lookup': self._globals.im_self}
c = self.code
state['code'] = (c.co_nlocals, c.co_stacksize, c.co_flags, c.co_code,
c.co_consts, c.co_names, c.co_varnames, c.co_filename,
c.co_name, c.co_firstlineno, c.co_lnotab, (), ())
return state
def __setstate__(self, state):
self.source = state['source']
self.ast = state['ast']
self.code = new.code(0, *state['code'])
self._globals = state['lookup'].globals
def __eq__(self, other):
return (type(other) == type(self)) and (self.code == other.code)
def __hash__(self):
return hash(self.code)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.source)
class Expression(Code):
"""Evaluates Python expressions used in templates.
>>> data = dict(test='Foo', items=[1, 2, 3], dict={'some': 'thing'})
>>> Expression('test').evaluate(data)
'Foo'
>>> Expression('items[0]').evaluate(data)
1
>>> Expression('items[-1]').evaluate(data)
3
>>> Expression('dict["some"]').evaluate(data)
'thing'
Similar to e.g. Javascript, expressions in templates can use the dot
notation for attribute access to access items in mappings:
>>> Expression('dict.some').evaluate(data)
'thing'
This also works the other way around: item access can be used to access
any object attribute:
>>> class MyClass(object):
... myattr = 'Bar'
>>> data = dict(mine=MyClass(), key='myattr')
>>> Expression('mine.myattr').evaluate(data)
'Bar'
>>> Expression('mine["myattr"]').evaluate(data)
'Bar'
>>> Expression('mine[key]').evaluate(data)
'Bar'
All of the standard Python operators are available to template expressions.
Built-in functions such as ``len()`` are also available in template
expressions:
>>> data = dict(items=[1, 2, 3])
>>> Expression('len(items)').evaluate(data)
3
"""
__slots__ = []
mode = 'eval'
def evaluate(self, data):
"""Evaluate the expression against the given data dictionary.
:param data: a mapping containing the data to evaluate against
:return: the result of the evaluation
"""
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
return eval(self.code, _globals, {'__data__': data})
class Suite(Code):
"""Executes Python statements used in templates.
>>> data = dict(test='Foo', items=[1, 2, 3], dict={'some': 'thing'})
>>> Suite("foo = dict['some']").execute(data)
>>> data['foo']
'thing'
"""
__slots__ = []
mode = 'exec'
def execute(self, data):
"""Execute the suite in the given data dictionary.
:param data: a mapping containing the data to execute in
"""
__traceback_hide__ = 'before_and_this'
_globals = self._globals(data)
exec self.code in _globals, data
UNDEFINED = object()
class UndefinedError(TemplateRuntimeError):
"""Exception thrown when a template expression attempts to access a variable
not defined in the context.
:see: `LenientLookup`, `StrictLookup`
"""
def __init__(self, name, owner=UNDEFINED):
if owner is not UNDEFINED:
message = '%s has no member named "%s"' % (repr(owner), name)
else:
message = '"%s" not defined' % name
TemplateRuntimeError.__init__(self, message)
class Undefined(object):
"""Represents a reference to an undefined variable.
Unlike the Python runtime, template expressions can refer to an undefined
variable without causing a `NameError` to be raised. The result will be an
instance of the `Undefined` class, which is treated the same as ``False`` in
conditions, but raise an exception on any other operation:
>>> foo = Undefined('foo')
>>> bool(foo)
False
>>> list(foo)
[]
>>> print foo
undefined
However, calling an undefined variable, or trying to access an attribute
of that variable, will raise an exception that includes the name used to
reference that undefined variable.
>>> foo('bar')
Traceback (most recent call last):
...
UndefinedError: "foo" not defined
>>> foo.bar
Traceback (most recent call last):
...
UndefinedError: "foo" not defined
:see: `LenientLookup`
"""
__slots__ = ['_name', '_owner']
def __init__(self, name, owner=UNDEFINED):
"""Initialize the object.
:param name: the name of the reference
:param owner: the owning object, if the variable is accessed as a member
"""
self._name = name
self._owner = owner
def __iter__(self):
return iter([])
def __nonzero__(self):
return False
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._name)
def __str__(self):
return 'undefined'
def _die(self, *args, **kwargs):
"""Raise an `UndefinedError`."""
__traceback_hide__ = True
raise UndefinedError(self._name, self._owner)
__call__ = __getattr__ = __getitem__ = _die
class LookupBase(object):
"""Abstract base class for variable lookup implementations."""
def globals(cls, data):
"""Construct the globals dictionary to use as the execution context for
the expression or suite.
"""
return {
'__data__': data,
'_lookup_name': cls.lookup_name,
'_lookup_attr': cls.lookup_attr,
'_lookup_item': cls.lookup_item,
'_star_import_patch': _star_import_patch,
'UndefinedError': UndefinedError,
}
globals = classmethod(globals)
def lookup_name(cls, data, name):
__traceback_hide__ = True
val = data.get(name, UNDEFINED)
if val is UNDEFINED:
val = BUILTINS.get(name, val)
if val is UNDEFINED:
val = cls.undefined(name)
return val
lookup_name = classmethod(lookup_name)
def lookup_attr(cls, obj, key):
__traceback_hide__ = True
try:
val = getattr(obj, key)
except AttributeError:
if hasattr(obj.__class__, key):
raise
else:
try:
val = obj[key]
except (KeyError, TypeError):
val = cls.undefined(key, owner=obj)
return val
lookup_attr = classmethod(lookup_attr)
def lookup_item(cls, obj, key):
__traceback_hide__ = True
if len(key) == 1:
key = key[0]
try:
return obj[key]
except (AttributeError, KeyError, IndexError, TypeError), e:
if isinstance(key, basestring):
val = getattr(obj, key, UNDEFINED)
if val is UNDEFINED:
val = cls.undefined(key, owner=obj)
return val
raise
lookup_item = classmethod(lookup_item)
def undefined(cls, key, owner=UNDEFINED):
"""Can be overridden by subclasses to specify behavior when undefined
variables are accessed.
:param key: the name of the variable
:param owner: the owning object, if the variable is accessed as a member
"""
raise NotImplementedError
undefined = classmethod(undefined)
class LenientLookup(LookupBase):
"""Default variable lookup mechanism for expressions.
When an undefined variable is referenced using this lookup style, the
reference evaluates to an instance of the `Undefined` class:
>>> expr = Expression('nothing', lookup='lenient')
>>> undef = expr.evaluate({})
>>> undef
<Undefined 'nothing'>
The same will happen when a non-existing attribute or item is accessed on
an existing object:
>>> expr = Expression('something.nil', lookup='lenient')
>>> expr.evaluate({'something': dict()})
<Undefined 'nil'>
See the documentation of the `Undefined` class for details on the behavior
of such objects.
:see: `StrictLookup`
"""
def undefined(cls, key, owner=UNDEFINED):
"""Return an ``Undefined`` object."""
__traceback_hide__ = True
return Undefined(key, owner=owner)
undefined = classmethod(undefined)
class StrictLookup(LookupBase):
"""Strict variable lookup mechanism for expressions.
Referencing an undefined variable using this lookup style will immediately
raise an ``UndefinedError``:
>>> expr = Expression('nothing', lookup='strict')
>>> expr.evaluate({})
Traceback (most recent call last):
...
UndefinedError: "nothing" not defined
The same happens when a non-existing attribute or item is accessed on an
existing object:
>>> expr = Expression('something.nil', lookup='strict')
>>> expr.evaluate({'something': dict()})
Traceback (most recent call last):
...
UndefinedError: {} has no member named "nil"
"""
def undefined(cls, key, owner=UNDEFINED):
"""Raise an ``UndefinedError`` immediately."""
__traceback_hide__ = True
raise UndefinedError(key, owner=owner)
undefined = classmethod(undefined)
def _parse(source, mode='eval'):
source = source.strip()
if mode == 'exec':
lines = [line.expandtabs() for line in source.splitlines()]
if lines:
first = lines[0]
rest = dedent('\n'.join(lines[1:])).rstrip()
if first.rstrip().endswith(':') and not rest[0].isspace():
rest = '\n'.join([' %s' % line for line in rest.splitlines()])
source = '\n'.join([first, rest])
if isinstance(source, unicode):
source = '\xef\xbb\xbf' + source.encode('utf-8')
return parse(source, mode)
def _compile(node, source=None, mode='eval', filename=None, lineno=-1,
xform=None):
if xform is None:
xform = {'eval': ExpressionASTTransformer}.get(mode,
TemplateASTTransformer)
tree = xform().visit(node)
if isinstance(filename, unicode):
# unicode file names not allowed for code objects
filename = filename.encode('utf-8', 'replace')
elif not filename:
filename = '<string>'
tree.filename = filename
if lineno <= 0:
lineno = 1
if mode == 'eval':
gen = ExpressionCodeGenerator(tree)
name = '<Expression %r>' % (source or '?')
else:
gen = ModuleCodeGenerator(tree)
lines = source.splitlines()
if not lines:
extract = ''
else:
extract = lines[0]
if len(lines) > 1:
extract += ' ...'
name = '<Suite %r>' % (extract)
gen.optimized = True
code = gen.getCode()
# We'd like to just set co_firstlineno, but it's readonly. So we need to
# clone the code object while adjusting the line number
return new.code(0, code.co_nlocals, code.co_stacksize,
code.co_flags | 0x0040, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename, name, lineno,
code.co_lnotab, (), ())
BUILTINS = __builtin__.__dict__.copy()
BUILTINS.update({'Markup': Markup, 'Undefined': Undefined})
CONSTANTS = frozenset(['False', 'True', 'None', 'NotImplemented', 'Ellipsis'])
class ASTTransformer(object):
"""General purpose base class for AST transformations.
Every visitor method can be overridden to return an AST node that has been
altered or replaced in some way.
"""
def visit(self, node):
if node is None:
return None
if type(node) is tuple:
return tuple([self.visit(n) for n in node])
visitor = getattr(self, 'visit%s' % node.__class__.__name__,
self._visitDefault)
return visitor(node)
def _clone(self, node, *args):
lineno = getattr(node, 'lineno', None)
node = node.__class__(*args)
if lineno is not None:
node.lineno = lineno
if isinstance(node, (ast.Class, ast.Function, ast.Lambda)) or \
hasattr(ast, 'GenExpr') and isinstance(node, ast.GenExpr):
node.filename = '<string>' # workaround for bug in pycodegen
return node
def _visitDefault(self, node):
return node
def visitExpression(self, node):
return self._clone(node, self.visit(node.node))
def visitModule(self, node):
return self._clone(node, node.doc, self.visit(node.node))
def visitStmt(self, node):
return self._clone(node, [self.visit(x) for x in node.nodes])
# Classes, Functions & Accessors
def visitCallFunc(self, node):
return self._clone(node, self.visit(node.node),
[self.visit(x) for x in node.args],
node.star_args and self.visit(node.star_args) or None,
node.dstar_args and self.visit(node.dstar_args) or None
)
def visitClass(self, node):
return self._clone(node, node.name, [self.visit(x) for x in node.bases],
node.doc, self.visit(node.code)
)
def visitFrom(self, node):
if not has_star_import_bug or node.names != [('*', None)]:
# This is a Python 2.4 bug. Only if we have a broken Python
# version we have to apply the hack
return node
new_node = ast.Discard(ast.CallFunc(
ast.Name('_star_import_patch'),
[ast.Name('__data__'), ast.Const(node.modname)], None, None
))
if hasattr(node, 'lineno'): # No lineno in Python 2.3
new_node.lineno = node.lineno
return new_node
def visitFunction(self, node):
args = []
if hasattr(node, 'decorators'):
args.append(self.visit(node.decorators))
return self._clone(node, *args + [
node.name,
node.argnames,
[self.visit(x) for x in node.defaults],
node.flags,
node.doc,
self.visit(node.code)
])
def visitGetattr(self, node):
return self._clone(node, self.visit(node.expr), node.attrname)
def visitLambda(self, node):
node = self._clone(node, node.argnames,
[self.visit(x) for x in node.defaults], node.flags,
self.visit(node.code)
)
return node
def visitSubscript(self, node):
return self._clone(node, self.visit(node.expr), node.flags,
[self.visit(x) for x in node.subs]
)
# Statements
def visitAssert(self, node):
return self._clone(node, self.visit(node.test), self.visit(node.fail))
def visitAssign(self, node):
return self._clone(node, [self.visit(x) for x in node.nodes],
self.visit(node.expr)
)
def visitAssAttr(self, node):
return self._clone(node, self.visit(node.expr), node.attrname,
node.flags
)
def visitAugAssign(self, node):
return self._clone(node, self.visit(node.node), node.op,
self.visit(node.expr)
)
def visitDecorators(self, node):
return self._clone(node, [self.visit(x) for x in node.nodes])
def visitExec(self, node):
return self._clone(node, self.visit(node.expr), self.visit(node.locals),
self.visit(node.globals)
)
def visitFor(self, node):
return self._clone(node, self.visit(node.assign), self.visit(node.list),
self.visit(node.body), self.visit(node.else_)
)
def visitIf(self, node):
return self._clone(node, [self.visit(x) for x in node.tests],
self.visit(node.else_)
)
def _visitPrint(self, node):
return self._clone(node, [self.visit(x) for x in node.nodes],
self.visit(node.dest)
)
visitPrint = visitPrintnl = _visitPrint
def visitRaise(self, node):
return self._clone(node, self.visit(node.expr1), self.visit(node.expr2),
self.visit(node.expr3)
)
def visitReturn(self, node):
return self._clone(node, self.visit(node.value))
def visitTryExcept(self, node):
return self._clone(node, self.visit(node.body), self.visit(node.handlers),
self.visit(node.else_)
)
def visitTryFinally(self, node):
return self._clone(node, self.visit(node.body), self.visit(node.final))
def visitWhile(self, node):
return self._clone(node, self.visit(node.test), self.visit(node.body),
self.visit(node.else_)
)
def visitWith(self, node):
return self._clone(node, self.visit(node.expr),
[self.visit(x) for x in node.vars], self.visit(node.body)
)
def visitYield(self, node):
return self._clone(node, self.visit(node.value))
# Operators
def _visitBoolOp(self, node):
return self._clone(node, [self.visit(x) for x in node.nodes])
visitAnd = visitOr = visitBitand = visitBitor = visitBitxor = _visitBoolOp
visitAssTuple = visitAssList = _visitBoolOp
def _visitBinOp(self, node):
return self._clone(node,
(self.visit(node.left), self.visit(node.right))
)
visitAdd = visitSub = _visitBinOp
visitDiv = visitFloorDiv = visitMod = visitMul = visitPower = _visitBinOp
visitLeftShift = visitRightShift = _visitBinOp
def visitCompare(self, node):
return self._clone(node, self.visit(node.expr),
[(op, self.visit(n)) for op, n in node.ops]
)
def _visitUnaryOp(self, node):
return self._clone(node, self.visit(node.expr))
visitUnaryAdd = visitUnarySub = visitNot = visitInvert = _visitUnaryOp
visitBackquote = visitDiscard = _visitUnaryOp
def visitIfExp(self, node):
return self._clone(node, self.visit(node.test), self.visit(node.then),
self.visit(node.else_)
)
# Identifiers, Literals and Comprehensions
def visitDict(self, node):
return self._clone(node,
[(self.visit(k), self.visit(v)) for k, v in node.items]
)
def visitGenExpr(self, node):
return self._clone(node, self.visit(node.code))
def visitGenExprFor(self, node):
return self._clone(node, self.visit(node.assign), self.visit(node.iter),
[self.visit(x) for x in node.ifs]
)
def visitGenExprIf(self, node):
return self._clone(node, self.visit(node.test))
def visitGenExprInner(self, node):
quals = [self.visit(x) for x in node.quals]
return self._clone(node, self.visit(node.expr), quals)
def visitKeyword(self, node):
return self._clone(node, node.name, self.visit(node.expr))
def visitList(self, node):
return self._clone(node, [self.visit(n) for n in node.nodes])
def visitListComp(self, node):
quals = [self.visit(x) for x in node.quals]
return self._clone(node, self.visit(node.expr), quals)
def visitListCompFor(self, node):
return self._clone(node, self.visit(node.assign), self.visit(node.list),
[self.visit(x) for x in node.ifs]
)
def visitListCompIf(self, node):
return self._clone(node, self.visit(node.test))
def visitSlice(self, node):
return self._clone(node, self.visit(node.expr), node.flags,
node.lower and self.visit(node.lower) or None,
node.upper and self.visit(node.upper) or None
)
def visitSliceobj(self, node):
return self._clone(node, [self.visit(x) for x in node.nodes])
def visitTuple(self, node):
return self._clone(node, [self.visit(n) for n in node.nodes])
class TemplateASTTransformer(ASTTransformer):
"""Concrete AST transformer that implements the AST transformations needed
for code embedded in templates.
"""
def __init__(self):
self.locals = [CONSTANTS]
def visitConst(self, node):
if isinstance(node.value, str):
try: # If the string is ASCII, return a `str` object
node.value.decode('ascii')
except ValueError: # Otherwise return a `unicode` object
return ast.Const(node.value.decode('utf-8'))
return node
def visitAssName(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
return node
def visitAugAssign(self, node):
if isinstance(node.node, ast.Name) \
and node.node.name not in flatten(self.locals):
name = node.node.name
node.node = ast.Subscript(ast.Name('__data__'), 'OP_APPLY',
[ast.Const(name)])
node.expr = self.visit(node.expr)
return ast.If([
(ast.Compare(ast.Const(name), [('in', ast.Name('__data__'))]),
ast.Stmt([node]))],
ast.Stmt([ast.Raise(ast.CallFunc(ast.Name('UndefinedError'),
[ast.Const(name)]),
None, None)]))
else:
return ASTTransformer.visitAugAssign(self, node)
def visitClass(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
self.locals.append(set())
try:
return ASTTransformer.visitClass(self, node)
finally:
self.locals.pop()
def visitFor(self, node):
self.locals.append(set())
try:
return ASTTransformer.visitFor(self, node)
finally:
self.locals.pop()
def visitFunction(self, node):
if len(self.locals) > 1:
self.locals[-1].add(node.name)
self.locals.append(set(node.argnames))
try:
return ASTTransformer.visitFunction(self, node)
finally:
self.locals.pop()
def visitGenExpr(self, node):
self.locals.append(set())
try:
return ASTTransformer.visitGenExpr(self, node)
finally:
self.locals.pop()
def visitLambda(self, node):
self.locals.append(set(flatten(node.argnames)))
try:
return ASTTransformer.visitLambda(self, node)
finally:
self.locals.pop()
def visitListComp(self, node):
self.locals.append(set())
try:
return ASTTransformer.visitListComp(self, node)
finally:
self.locals.pop()
def visitName(self, node):
# If the name refers to a local inside a lambda, list comprehension, or
# generator expression, leave it alone
if node.name not in flatten(self.locals):
# Otherwise, translate the name ref into a context lookup
func_args = [ast.Name('__data__'), ast.Const(node.name)]
node = ast.CallFunc(ast.Name('_lookup_name'), func_args)
return node
class ExpressionASTTransformer(TemplateASTTransformer):
"""Concrete AST transformer that implements the AST transformations needed
for code embedded in templates.
"""
def visitGetattr(self, node):
return ast.CallFunc(ast.Name('_lookup_attr'), [
self.visit(node.expr),
ast.Const(node.attrname)
])
def visitSubscript(self, node):
return ast.CallFunc(ast.Name('_lookup_item'), [
self.visit(node.expr),
ast.Tuple([self.visit(sub) for sub in node.subs])
])
|
|
# -*- test-case-name: piped.processing.test.test_utilprocessors -*-
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
""" Utility processors that are useful in many contexts. """
import collections
import copy
import logging
import pdb
import pprint
import sys
import tempfile
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.python import failure, reflect
from zope import interface
from piped import exceptions, util, processing
from piped.processors import base
logger = logging.getLogger(__name__)
class MergeWithDictProcessor(base.Processor):
""" Processor that merges the baton with the provided dictionary.
Expects a kw-argument "dict", which is the dictionary to merge.
"""
name = 'merge-with-dict'
interface.classProvides(processing.IProcessor)
def __init__(self, dict=None, merge_args=None, **kw):
super(MergeWithDictProcessor, self).__init__(**kw)
self.dict = dict
if dict is None:
self.dict = dict()
self.merge_args = merge_args or dict()
# Defaulting to true because most of the time we expect the
# dictionary we pass into a pipeline to be the same dictionary
# *instance* that is sent to every processor.
merge_args.setdefault('inline', True)
def process(self, baton):
return util.merge_dicts(baton, self.dict, **self.merge_args)
class CallNamedAny(base.Processor):
""" Calls a named anything. """
interface.classProvides(processing.IProcessor)
name = 'call-named-any'
def __init__(self, name, output_path='result', args=tuple(), kwargs=None, **kw):
super(CallNamedAny, self).__init__(**kw)
self.name = name
self.output_path = output_path
self.args = args
self.kwargs = kwargs or dict()
def process(self, baton):
any = reflect.namedAny(self.name)
result = any(*self.args, **self.kwargs)
if self.output_path == '':
return result
util.dict_set_path(baton, self.output_path, result)
return baton
class CallbackDeferred(base.Processor):
""" Callbacks a deferred. """
interface.classProvides(processing.IProcessor)
name = 'callback-deferred'
def __init__(self, deferred_path='deferred', result=Ellipsis, result_path='result', **kw):
"""
:param deferred_path: The path to the deferred inside the baton.
:param result: The result used to callback the deferred. This takes precedence
over ``result_path``
:param result_path: A path to the result in the baton.
"""
super(CallbackDeferred, self).__init__(**kw)
self.result_path = result_path
self.result = result
self.deferred_path = deferred_path
def process(self, baton):
deferred = util.dict_get_path(baton, self.deferred_path)
result = self.result
if result is Ellipsis:
result = util.dict_get_path(baton, self.result_path)
deferred.callback(result)
return baton
class Shutdown(base.Processor):
""" Stops the piped process. """
interface.classProvides(processing.IProcessor)
name = 'shutdown'
def configure(self, runtime_environment):
self.application = runtime_environment.application
@defer.inlineCallbacks
def process(self, baton):
yield service.IService(self.application).stopService()
yield reactor.stop()
defer.returnValue(baton)
class CoroutineWrapper(base.Processor):
""" Passes batons to the wrapped coroutine. """
name = 'wrap-coroutine'
def __init__(self, coroutine, **kw):
super(CoroutineWrapper, self).__init__(**kw)
self.coroutine = coroutine
def process(self, baton):
return self.coroutine.send(baton)
class RemapProcessor(base.MappingProcessor):
""" Remaps a dictionary.
Expects to be instantiated with a dictionary that copies values at one path to another one.
For example, giving the mapping `{'b.c': 'a'}` and the baton
`dict(b=dict(c='d'))`, the output will be
`dict(a='d', b=dict(c='d'))`.
"""
name = 'remap'
interface.classProvides(processing.IProcessor)
def __init__(self, extend=False, copy=False, deep_copy=False, **kw):
super(RemapProcessor, self).__init__(**kw)
self.extend = extend
self.copy = copy
self.deep_copy = deep_copy
def process_mapping(self, input, input_path, output_path, baton, **additional_kwargs):
output = input
if self.deep_copy:
output = copy.deepcopy(output)
elif self.copy:
output = copy.copy(output)
if self.extend:
output = util.dict_get_path(baton, output_path, list()) + [output]
return output
class BatonCleaner(base.Processor):
""" Filters the baton by removing unwanted attributes.
Expects at least one of the two keyword arguments:
:param keep: List of attributes to keep. Any attributes not in
this list will be removed.
:param remove: List of attributes to remove. Any attribute in this
list will be removed.
"""
name = 'clean-baton'
interface.classProvides(processing.IProcessor)
def __init__(self, keep=None, remove=None, **kw):
super(BatonCleaner, self).__init__(**kw)
self.remove = set(remove if remove else [])
self.keep = set(keep if keep else []) - self.remove
assert self.keep or self.remove, "Useless configuration -- nothing to remove or keep"
def process(self, baton):
for path in self.remove:
util.dict_remove_path(baton, path)
if self.keep:
keepers = dict()
for path in self.keep:
value = util.dict_get_path(baton, path, Ellipsis)
if value is Ellipsis:
continue
util.dict_set_path(keepers, path, value)
# Modify baton in-place
baton.clear()
baton.update(keepers)
return baton
class BatonCollector(base.Processor):
""" Appends batons that pass through it to the list its
instantiated with. Useful to e.g. inspect how a baton appears at
various stages of the processing, or as a sink.
"""
name = 'collect-batons'
interface.classProvides(processing.IProcessor)
def __init__(self, list=None, deepcopy=False, describer=None, **kw):
"""
:param deepcopy: Whether to deepcopy the batons as they pass
through. If enabled, this will show the batons as they
were when they passed through --- if not, subsequent
processors may have modified it.
"""
super(BatonCollector, self).__init__(**kw)
self.list = list
if list is None:
self.list = []
self.deepcopy = deepcopy
assert describer is None or callable(describer), "describer must be a callable"
self.describer = describer
def process(self, baton):
if self.deepcopy:
copy_of_baton = util.safe_deepcopy(baton)
else:
copy_of_baton = baton
if self.describer:
self.list.append(self.describer(copy_of_baton))
else:
self.list.append(copy_of_baton)
return baton
class PrettyPrint(base.Processor):
""" Prettyprints the baton before passing it on.
No changes are made to the baton.
"""
name = 'pretty-print'
interface.classProvides(processing.IProcessor)
def __init__(self, path='', formatter='baton: baton', namespace=None, *a, **kw):
super(PrettyPrint, self).__init__(*a, **kw)
self.path = path
self.formatter_definition = formatter
self.namespace = namespace or dict()
def configure(self, runtime_environment):
self.formatter = util.create_lambda_function(self.formatter_definition, self=self, **self.namespace)
def process(self, baton):
value = util.dict_get_path(baton, self.path)
pprint.pprint(self.formatter(value))
return baton
class PrintTraceback(base.Processor):
""" Prints the currently active exception traceback. Useful for debugging.
No changes are made to the baton.
"""
name = 'print-failure-traceback'
interface.classProvides(processing.IProcessor)
def process(self, baton):
f = failure.Failure()
f.printTraceback()
return baton
class TrapFailure(base.Processor):
""" Traps failures of the specified types.
If the encountered exception is not one of the expected exception types, this
processor will raise the original exception, preserving the traceback.
"""
name = 'trap-failure'
interface.classProvides(processing.IProcessor)
def __init__(self, error_types, output_path=None, *a, **kw):
"""
:param error_types: A single or a list of fully qualified exception class
names that should be trapped.
:param output_path: If one of the expected error types are trapped, this
value will be set to the matching error type.
"""
super(TrapFailure, self).__init__(*a, **kw)
if not isinstance(error_types, (list, tuple)):
error_types = [error_types]
for i, error_type in enumerate(error_types):
error_types[i] = reflect.namedAny(error_type)
self.error_types = error_types
self.output_path = output_path
def process(self, baton):
f = failure.Failure()
trapped = f.trap(*self.error_types)
baton = self.get_resulting_baton(baton, self.output_path, f)
return baton
class FlattenDictionaryList(base.InputOutputProcessor):
""" Reduce a list of dictionaries to a list of values, given a key
which occurs in the dictionaries.
For example, if we have a list::
l = [dict(author='J. Doe', email='[email protected]'), dict(author='J. Smith', id=42))]
then processing that list given `key_path='author'` will result in::
result = ['J. Doe', 'J. Smith']
"""
interface.classProvides(processing.IProcessor)
name = 'flatten-list-of-dictionaries'
def __init__(self, key_path, uniquify=False, sort=True, **kw):
super(FlattenDictionaryList, self).__init__(**kw)
self.key_path = key_path
self.uniquify = uniquify
self.sort = sort
def process_input(self, input, baton):
replacements = []
for dictionary in input:
value = util.dict_get_path(dictionary, self.key_path)
if value:
replacements.append(value)
if self.uniquify:
replacements = list(set(replacements))
if self.sort:
replacements.sort()
return replacements
class LambdaProcessor(base.InputOutputProcessor):
"""
Given a path to a value in the baton, apply the provided lambda
function.
:param lambda:
A string that defines a lambda function when `eval()`-ed. Note
that the `lambda`-keyword should not be provided in the
string.
:param dependencies:
A dict of local dependency names to their resource configurations.
Providers that are strings are converted to tuples as required by
the dependency manager.
See found.processing.dependency.DependencyManager.create_dependency_map
:param namespace:
A dict that defines the namespace the lambda runs in. Values in
this dict will be passed to reflect.namedAny before being made
available to the lambda function.
"""
interface.classProvides(processing.IProcessor)
name = 'eval-lambda'
def __init__(self, namespace=None, dependencies=None, **kw):
if not 'lambda' in kw:
raise exceptions.ConfigurationError('no lambda definition provided')
self.lambda_definition = kw.pop('lambda')
self.namespace = namespace or dict()
super(LambdaProcessor, self).__init__(**kw)
self.dependency_map = dependencies or dict()
def configure(self, runtime_environment):
for name, dependency_configuration in self.dependency_map.items():
# if the configuration is a string, assume the string is a provider
if isinstance(dependency_configuration, basestring):
self.dependency_map[name] = dict(provider=dependency_configuration)
self.runtime_environment = runtime_environment
self.dependencies = runtime_environment.create_dependency_map(self, **self.dependency_map)
self.lambda_ = util.create_lambda_function(self.lambda_definition, self=self, **self.namespace)
def process_input(self, input, baton):
try:
return self.lambda_(input)
except:
logger.error('Failed "%s"' % self.lambda_definition)
raise
def __unicode__(self):
return u'LambdaProcessor(%s ; %s -> %s)' % (self.lambda_definition, self.input_path, self.output_path)
class ExecProcessor(base.InputOutputProcessor):
"""
Given a path to a value in the baton, execute the provided code.
:param code:
A string that defines the code to run. The code will we wrapped in
a function with the following signature: ``def compiled_process(self, input, baton):``.
:param inline_callbacks:
Whether to wrap ``compiled_process`` in :meth:`twisted.internet.defer.inlineCallbacks`
:param use_file:
If True, write the code contents to a temporary file so that the code is shown
in any tracebacks.
:param dependencies:
A dict of local dependency names to their resource configurations.
Providers that are strings are converted to tuples as required by
the dependency manager.
See :meth:`found.processing.dependency.DependencyManager.create_dependency_map`
:param namespace:
A dict that defines the namespace the code runs in. Values in
this dict will be passed to reflect.namedAny before being made
available to the code as part of the globals.
"""
interface.classProvides(processing.IProcessor)
name = 'exec-code'
def __init__(self, code, inline_callbacks=False, use_file=True, namespace=None, dependencies=None, **kw):
super(ExecProcessor, self).__init__(**kw)
self.code = 'def compiled_process(self, input, baton):\n'
self.code += self._reindent(self._trim(code))
if inline_callbacks:
self.code += '\n'
self.code += 'compiled_process = defer.inlineCallbacks(compiled_process)'
self.use_file = use_file
self.namespace = namespace or dict()
self.dependency_map = dependencies or dict()
def configure(self, runtime_environment):
self.runtime_environment = runtime_environment
# request dependencies
if self.dependency_map:
for dependency_key, dependency_configuration in self.dependency_map.items():
# if the configuration is a string, assume the string is a provider
if isinstance(dependency_configuration, basestring):
dependency_configuration = dict(provider=dependency_configuration)
self.dependency_map[dependency_key] = dependency_configuration
self.dependencies = runtime_environment.create_dependency_map(self, **self.dependency_map)
# configure the locals and globals the code should be executed with
compiled_locals = dict()
compiled_globals = dict(globals())
for key, value in self.namespace.items():
if isinstance(value, basestring):
value = reflect.namedAny(value)
compiled_globals[key] = value
name = self.node_name or self.name
if self.use_file:
# if we're asked to use a file, we write the contents to a temporary
# file and make compile() reference that file, which makes the lines
# available in stack traces.
self.tempfile = tempfile.NamedTemporaryFile(suffix='-'+name+'.py')
self.tempfile.file.write(self.code)
self.tempfile.file.flush()
name = self.tempfile.name
self.compiled_code = compile(self.code, filename=name, mode='exec')
# compile the code and extract the compiled process function
exec self.compiled_code in compiled_globals, compiled_locals
self.compiled_process = compiled_locals['compiled_process']
@defer.inlineCallbacks
def process_input(self, input, baton):
output = yield self.compiled_process(self, input, baton)
defer.returnValue(output)
# This is the example docstring processing code from http://www.python.org/dev/peps/pep-0257/
def _trim(self, docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def _reindent(self, string, spaces=4):
lines = [spaces * ' ' + line for line in string.splitlines()]
return '\n'.join(lines)
class Passthrough(base.Processor):
interface.classProvides(processing.IProcessor)
name = 'passthrough'
def process(self, baton):
return baton
class LambdaConditional(base.Processor):
interface.classProvides(processing.IProcessor)
name = 'lambda-decider'
def __init__(self, input_path='', namespace=None, **kw):
if not 'lambda' in kw:
raise exceptions.ConfigurationError('no lambda definition provided')
self.lambda_definition = kw.pop('lambda')
self.namespace = namespace or dict()
super(LambdaConditional, self).__init__(**kw)
self.input_path = input_path
def configure(self, runtime_environment):
self.lambda_ = util.create_lambda_function(self.lambda_definition, self=self, **self.namespace)
def process(self, baton):
return baton
def get_consumers(self, baton):
input = util.dict_get_path(baton, self.input_path)
index_or_indexes = self.lambda_(input)
if isinstance(index_or_indexes, int):
return [self.consumers[index_or_indexes]]
else:
return [self.consumers[index] for index in index_or_indexes]
class Stopper(base.Processor):
""" Stops processing when the configured *lambda* returns true.
An optional *input_path* can be specified, as well as a
*namespace*. These are explained in detail in TODO: Some refererence.
"""
interface.classProvides(processing.IProcessor)
name = 'stop'
def __init__(self, input_path='', decider='input: bool(input)', namespace=None, **kw):
self.input_path = input_path
if namespace is None:
namespace = dict()
self.namespace = namespace
self.decider = util.create_lambda_function(decider, self=self, **self.namespace)
super(Stopper, self).__init__(**kw)
def process(self, baton):
return baton
def get_consumers(self, baton):
input = util.dict_get_path(baton, self.input_path)
should_stop = self.decider(input)
if should_stop:
return []
else:
return self.consumers
class Waiter(base.Processor):
name = 'wait'
interface.classProvides(processing.IProcessor)
def __init__(self, delay, **kw):
super(Waiter, self).__init__(**kw)
self.delay = delay
def process(self, baton):
d = defer.Deferred()
reactor.callLater(self.delay, d.callback, baton)
return d
class NthPrinter(base.Processor):
name = 'print-nth'
interface.classProvides(processing.IProcessor)
def __init__(self, n, prefix='', formatter='i, baton: i', namespace=None, **kw):
super(NthPrinter, self).__init__(**kw)
self.n = n
self.prefix = prefix
self.i = 0
self.formatter_definition = formatter
self.namespace = namespace or dict()
def configure(self, runtime_environment):
self.formatter = util.create_lambda_function(self.formatter_definition, self=self, **self.namespace)
def process(self, baton):
self.i += 1
if not self.i % self.n:
print '%s%r' % (self.prefix, self.formatter(self.i, baton))
return baton
class DictGrouper(base.Processor):
interface.classProvides(processing.IProcessor)
name = 'group-by-value'
def __init__(self, key_path, input_path, output_path=None, fallback=None, **kw):
super(DictGrouper, self).__init__(**kw)
self.key_path = key_path
self.input_path = input_path
self.output_path = output_path
if output_path is None:
self.output_path = self.input_path
self.fallback = fallback
def process(self, baton):
groups = collections.defaultdict(list)
items = util.dict_get_path(baton, self.input_path, list())
for item in items:
value = util.dict_get_path(item, self.key_path, self.fallback)
groups[value].append(item)
util.dict_set_path(baton, self.output_path, groups)
return baton
class NestedListFlattener(base.Processor):
""" Flatten nested lists into a single list.
For example:
>>> baton = dict(data=['One', ['Two', 'Three'], ['Four']])
>>> NestedListFlattener(paths=['data']).process(baton)
{'data': ['One', 'Two', 'Three', 'Four']}
"""
interface.classProvides(processing.IProcessor)
name = 'flatten-nested-lists'
def __init__(self, paths, path_prefix='', **kw):
super(NestedListFlattener, self).__init__(**kw)
self.paths = [path_prefix + path for path in paths]
def process(self, baton):
for path in self.paths:
value = util.dict_get_path(baton, path, None)
if isinstance(value, list):
util.dict_set_path(baton, path, util.flatten(value))
return baton
class StringEncoder(base.InputOutputProcessor):
name = 'encode-string'
interface.classProvides(processing.IProcessor)
def __init__(self, encoding, **kw):
super(StringEncoder, self).__init__(**kw)
self.encoding = encoding
def process_input(self, input, baton):
return input.encode(self.encoding)
class StringDecoder(base.InputOutputProcessor):
name = 'decode-string'
interface.classProvides(processing.IProcessor)
def __init__(self, encoding, **kw):
super(StringDecoder, self).__init__(**kw)
self.encoding = encoding
def process_input(self, input, baton):
return input.decode(self.encoding)
class StringFormatter(base.InputOutputProcessor):
""" Formats a string.
See the `format string syntax <http://docs.python.org/library/string.html#formatstrings>`_
in the Python documentation.
"""
name = 'format-string'
interface.classProvides(processing.IProcessor)
def __init__(self, format=None, format_path=None, unpack=True, **kw):
"""
:param format: An inline format string.
:param format_path: Path to a format string within the baton.
:param unpack: Whether to unpack input lists, tuples and dicts when formatting. This
enables using a variable number of arguments and keyword arguments to
:meth:`str.format` depending on the input value.
"""
super(StringFormatter, self).__init__(**kw)
self.format_string = format
self.format_string_path = format_path
self.unpack = unpack
self._fail_if_configuration_is_invalid()
def _fail_if_configuration_is_invalid(self):
if self.format_string is not None and self.format_string_path is not None:
e_msg = 'Cannot specify both a format and a format path.'
detail = 'Using both a format and a format path is ambiguous.'
raise exceptions.ConfigurationError(e_msg, detail)
if self.format_string is None and self.format_string_path is None:
e_msg = "Must specify either 'format' or a 'format_path'"
raise exceptions.ConfigurationError(e_msg)
def _get_format_string(self, baton):
if self.format_string is not None:
return self.format_string
return util.dict_get_path(baton, self.format_string_path)
def process_input(self, input, baton):
format_string = self._get_format_string(baton)
if self.unpack:
if isinstance(input, (list, tuple)):
formatted = format_string.format(*input)
elif isinstance(input, dict):
formatted = format_string.format(**input)
else:
formatted = format_string.format(input)
else:
formatted = format_string.format(input)
return formatted
class StringPrefixer(StringFormatter):
""" Prefixes the string at *input_path* with the *prefix*. """
interface.classProvides(processing.IProcessor)
name = 'prefix-string'
def __init__(self, prefix, **kw):
kw['format'] = unicode(prefix) + '{0}'
super(StringPrefixer, self).__init__(**kw)
class PDBTraceSetter(base.Processor):
""" Calls pdb.trace() whenever a baton is processed. """
name = 'set-pdb-trace'
interface.classProvides(processing.IProcessor)
def process(self, baton):
pdb.set_trace()
return baton
class RaiseException(base.Processor):
""" Raise an exception of the specified *type*.
The exception is instantiated with the optional *args* and
*kwargs*."""
interface.classProvides(processing.IProcessor)
name = 'raise-exception'
def __init__(self, type='exceptions.Exception', args=None, kwargs=None, **kw):
super(RaiseException, self).__init__(**kw)
self.type = reflect.namedAny(type)
self.args = args or list()
if not isinstance(self.args, list):
self.args = [self.args]
self.kwargs = kwargs or dict()
def process(self, baton):
raise self.type(*self.args, **self.kwargs)
class MappingSetter(base.Processor):
""" Takes a path-to-value-mapping and sets values at the specified
paths.
A *path_prefix* can be specified, if all the paths in the mapping
share a common prefix.
"""
interface.classProvides(processing.IProcessor)
name = 'set-values'
def __init__(self, mapping, path_prefix='', **kw):
super(MappingSetter, self).__init__(**kw)
self.mapping = mapping
self.path_prefix = path_prefix
def process(self, baton):
for path, value in self.mapping.items():
path = self.path_prefix + path
util.dict_set_path(baton, path, value)
return baton
class ValueSetter(base.Processor):
""" Sets the *value* at *path*. """
interface.classProvides(processing.IProcessor)
name = 'set-value'
def __init__(self, path, value, **kw):
super(ValueSetter, self).__init__(**kw)
self.path = path
self.value = value
def process(self, baton):
util.dict_set_path(baton, self.path, self.value)
return baton
class CounterIncrementer(base.Processor):
""" Increases the counter found at *counter_path* with *increment*. """
interface.classProvides(processing.IProcessor)
name = 'increment-counter'
def __init__(self, counter_path, increment=1, **kw):
super(CounterIncrementer, self).__init__(**kw)
self.counter_path = counter_path
self.increment = increment
def process(self, baton):
value = util.dict_get_path(baton, self.counter_path)
util.dict_set_path(baton, self.counter_path, value + self.increment)
return baton
class RateReporter(base.Processor):
interface.classProvides(processing.IProcessor)
name = 'report-rate'
def __init__(self, counter_path, delta_path='delta', format='rate: %(rate).02f', report_zero=False, **kw):
super(RateReporter, self).__init__(**kw)
self.counter_path = counter_path
self.delta_path = delta_path
self.format = format
self.report_zero = report_zero
def process(self, baton):
value = util.dict_get_path(baton, self.counter_path)
delta = util.dict_get_path(baton, self.delta_path)
rate = value / delta
log_string = self.format % dict(rate=rate, delta=delta, value=value)
if(rate != 0 or self.report_zero):
logger.info(log_string)
return baton
class Logger(base.Processor):
""" Logs a message with the configured log-level.
The message is either configured at *message*, or looked up in the
baton at *message_path*.
The message is logged with the configured *level*.
.. seealso:: :mod:`piped.log`
"""
interface.classProvides(processing.IProcessor)
name = 'log'
def __init__(self, message=None, message_path=None, logger_name=None, level='info', **kw):
super(Logger, self).__init__(**kw)
if (message is None) + (message_path is None) != 1:
raise exceptions.ConfigurationError('specify either message or message_path')
processor_logger = logging.root
if logger_name:
processor_logger = logging.getLogger(logger_name)
level = level.lower()
if level not in ('critical', 'debug', 'error', 'failure', 'info', 'warn'):
raise ValueError('Invalid log-level: "%s"' % level)
self.logger = getattr(processor_logger, level)
self.message = message
self.message_path = message_path
def process(self, baton):
if self.message_path:
message = util.dict_get_path(baton, self.message_path)
else:
message = self.message
if message:
self.logger(message)
return baton
class DependencyCaller(base.Processor):
""" Calls a method on a dependency.
This processor may be useful if you want to call a method on a provided dependency.
"""
interface.classProvides(processing.IProcessor)
name = 'call-dependency'
NO_ARGUMENTS = object()
def __init__(self, dependency, method='__call__', arguments=NO_ARGUMENTS,
unpack_arguments=False, output_path=None, *a, **kw):
"""
:param dependency: The dependency to use.
:param method: The name of the method to call.
:param arguments: The arguments to call the method with. Defaults to no
arguments.
:param unpack_arguments: Whether to unpack arguments
:param output_path: Where to store the output in the baton.
"""
super(DependencyCaller, self).__init__(*a, **kw)
if isinstance(dependency, basestring):
dependency = dict(provider=dependency)
self.dependency_config = dependency
self.method_name = method
self.arguments = arguments
self.unpack_arguments = unpack_arguments
self.output_path = output_path
def configure(self, runtime_environment):
dm = runtime_environment.dependency_manager
self.dependency = dm.add_dependency(self, self.dependency_config)
@defer.inlineCallbacks
def process(self, baton):
dependency = yield self.dependency.wait_for_resource()
method_name = self.get_input(baton, self.method_name)
method = getattr(dependency, method_name)
arguments = self.get_input(baton, self.arguments)
if self.arguments == self.NO_ARGUMENTS:
result = yield method()
else:
if self.unpack_arguments:
if isinstance(arguments, dict):
result = yield method(**arguments)
else:
result = yield method(*arguments)
else:
result = yield method(arguments)
baton = self.get_resulting_baton(baton, self.output_path, result)
defer.returnValue(baton)
|
|
# Copyright (c) 2010 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SwiftS3 middleware emulates AWS S3 REST api on top of Swift.
The following opperations are currently supported:
* GET Service
* DELETE Bucket (Delete bucket; abort running MPUs)
* GET Bucket (List Objects; List in-progress multipart uploads)
* PUT Bucket
* DELETE Object
* GET Object
* HEAD Object
* PUT Object
* PUT Object (Copy)
To add this middleware to your configuration, add the swifts3 middleware
before the auth middleware and before any other middleware that
waits for swift requests (like rate limiting).
To set up your client, the access key will be the concatenation of the
account and user strings that should look like test:tester, and the
secret access key is the account password. The host should also point
to the swift storage hostname and it should use the old style
calling format, not the hostname based container format.
An example client using the python boto library might look like the
following for a SAIO setup:
connection = boto.s3.Connection(
aws_access_key_id='test:tester',
aws_secret_access_key='testing',
port=8080,
host='127.0.0.1',
is_secure=False,
calling_format=boto.s3.connection.OrdinaryCallingFormat())
Note that all the operations with multipart upload buckets are denied
to user, as well as multipart buckets are not listed in all buckets list.
In case of GET/DELETE - NoSuchBucket error is returned;
In case of PUT/POST - InvalidBucketName error is returned.
"""
from urllib import unquote, quote
import rfc822
import hmac
import base64
import uuid
import errno
from xml.sax.saxutils import escape as xml_escape
import urlparse
from webob import Request as WebObRequest, Response
from webob.exc import HTTPNotFound
from webob.multidict import MultiDict
import simplejson as json
from swift.common.utils import split_path, get_logger
#XXX: In webob-1.9b copied environment contained link to the original
# instance of a TrackableMultiDict which reflects to original
# request.
class Request(WebObRequest):
def _remove_query_vars(self):
if 'webob._parsed_query_vars' in self.environ:
del self.environ['webob._parsed_query_vars']
def copy(self):
req = super(Request, self).copy()
req._remove_query_vars()
return req
def copy_get(self):
req = super(Request, self).copy_get()
req._remove_query_vars()
return req
MAX_BUCKET_LISTING = 1000
MAX_UPLOADS_LISTING = 1000
MULTIPART_UPLOAD_PREFIX = 'MPU.'
# List of Query String Arguments of Interest
qsa_of_interest = ['acl', 'defaultObjectAcl', 'location', 'logging',
'partNumber', 'policy', 'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle']
def get_err_response(code):
"""
Creates a properly formatted xml error response by a
given HTTP response code,
:param code: error code
:returns: webob.response object
"""
error_table = {
'AccessDenied':
(403, 'Access denied'),
'BucketAlreadyExists':
(409, 'The requested bucket name is not available'),
'BucketNotEmpty':
(409, 'The bucket you tried to delete is not empty'),
'InvalidArgument':
(400, 'Invalid Argument'),
'InvalidBucketName':
(400, 'The specified bucket is not valid'),
'InvalidURI':
(400, 'Could not parse the specified URI'),
'NoSuchBucket':
(404, 'The specified bucket does not exist'),
'SignatureDoesNotMatch':
(403, 'The calculated request signature does not match '\
'your provided one'),
'NoSuchKey':
(404, 'The resource you requested does not exist'),
'NoSuchUpload':
(404, 'The specified multipart upload does not exist.'),
}
resp = Response(content_type='text/xml')
resp.status = error_table[code][0]
resp.body = error_table[code][1]
resp.body = '<?xml version="1.0" encoding="UTF-8"?>\r\n<Error>\r\n ' \
'<Code>%s</Code>\r\n <Message>%s</Message>\r\n</Error>\r\n' \
% (code, error_table[code][1])
return resp
def get_acl(account_name):
body = ('<AccessControlPolicy>'
'<Owner>'
'<ID>%s</ID>'
'</Owner>'
'<AccessControlList>'
'<Grant>'
'<Grantee xmlns:xsi="http://www.w3.org/2001/'\
'XMLSchema-instance" xsi:type="CanonicalUser">'
'<ID>%s</ID>'
'</Grantee>'
'<Permission>FULL_CONTROL</Permission>'
'</Grant>'
'</AccessControlList>'
'</AccessControlPolicy>' %
(account_name, account_name))
return Response(body=body, content_type="text/plain")
def canonical_string(req):
"""
Canonicalize a request to a token that can be signed.
"""
def unquote_v(nv):
if len(nv) == 1:
return nv
else:
return (nv[0], unquote(nv[1]))
amz_headers = {}
buf = "%s\n%s\n%s\n" % (req.method, req.headers.get('Content-MD5', ''),
req.headers.get('Content-Type') or '')
for amz_header in sorted((key.lower() for key in req.headers
if key.lower().startswith('x-amz-'))):
amz_headers[amz_header] = req.headers[amz_header]
if 'x-amz-date' in amz_headers:
buf += "\n"
elif 'Date' in req.headers:
buf += "%s\n" % req.headers['Date']
for k in sorted(key.lower() for key in amz_headers):
buf += "%s:%s\n" % (k, amz_headers[k])
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
parts = req.path_qs.split('?')
buf += parts[0]
if len(parts) > 1:
qsa = parts[1].split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest]
if len(qsa) > 0:
qsa.sort(cmp=lambda x, y: cmp(x[0], y[0]))
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def check_container_name_no_such_bucket_error(container_name):
"""Checks that user do not tries to operate with MPU container"""
if container_name.startswith(MULTIPART_UPLOAD_PREFIX):
return get_err_response('NoSuchBucket')
def check_container_name_invalid_bucket_name_error(container_name):
"""Checks that user do not tries to operate with MPU container"""
if container_name.startswith(MULTIPART_UPLOAD_PREFIX):
return get_err_response('InvalidBucketName')
def meta_request_head(req, meta_path, app):
"""
HEAD request to check that meta file presents and
multipart upload is in progress.
"""
meta_req = req.copy()
meta_req.method = 'HEAD'
meta_req.body = ''
meta_req.upath_info = meta_path
meta_req.GET.clear()
return meta_req.get_response(app)
class ServiceController(object):
"""
Handles account level requests.
"""
def __init__(self, env, app, account_name, token, **kwargs):
self.app = app
env['HTTP_X_AUTH_TOKEN'] = token
env['PATH_INFO'] = '/v1/%s' % account_name
def GET(self, req):
"""
Handle GET Service request
"""
req.GET.clear()
req.GET['format'] = 'json'
resp = req.get_response(self.app)
status = resp.status_int
body = resp.body
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
else:
return get_err_response('InvalidURI')
containers = json.loads(body)
# we don't keep the creation time of a bucket (s3cmd doesn't
# work without that) so we use some bogus
body = '<?xml version="1.0" encoding="UTF-8"?>' \
'<ListAllMyBucketsResult ' \
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' \
'<Buckets>%s</Buckets>' \
'</ListAllMyBucketsResult>' \
% ("".join(['<Bucket><Name>%s</Name><CreationDate>' \
'2009-02-03T16:45:09.000Z</CreationDate></Bucket>' %
xml_escape(i['name']) for i in containers if \
not i['name'].startswith(MULTIPART_UPLOAD_PREFIX)]))
# we shold not show multipart buckets here
return Response(status=200, content_type='application/xml', body=body)
class BucketController(object):
"""
Handles bucket requests.
"""
def __init__(self, env, app, account_name, token, container_name,
**kwargs):
self.app = app
self.container_name = unquote(container_name)
self.account_name = unquote(account_name)
env['HTTP_X_AUTH_TOKEN'] = token
env['PATH_INFO'] = '/v1/%s/%s' % (account_name, container_name)
def get_uploads(self, req):
"""Handles listing of in-progress multipart uploads"""
acl = req.GET.get('acl')
params = MultiDict([('format', 'json')])
max_uploads = req.GET.get('max-uploads')
if (max_uploads is not None and max_uploads.isdigit()):
max_uploads = min(int(max_uploads), MAX_UPLOADS_LISTING)
else:
max_uploads = MAX_UPLOADS_LISTING
params['limit'] = str(max_uploads + 1)
for param_name in ('key-marker', 'prefix', 'delimiter',
'upload-id-marker'):
if param_name in req.GET:
params[param_name] = req.GET[param_name]
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
req.upath_info = cont_path
req.GET.clear()
req.GET.update(params)
resp = req.get_response(self.app)
status = resp.status_int
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
if acl is not None:
return get_acl(self.account_name)
objects = json.loads(resp.body)
uploads = ''
splited_name = ''
for obj in objects:
if obj['name'].endswith('/meta'):
splited_name = obj['name'].split('/')
uploads = uploads.join(
"<Upload>"
"<Key>%s</Key>"
"<UploadId>%s</UploadId>"
"<Initiator>"
"<ID>%s</ID>"
"<DisplayName>%s</DisplayName>"
"</Initiator>"
"<Owner>"
"<ID>%s</ID>"
"<DisplayName>%s</DisplayName>"
"</Owner>"
"<StorageClass>STANDARD</StorageClass>"
"<Initiated>%sZ</Initiated>"
"</Upload>" % (
splited_name[0],
splited_name[1],
self.account_name,
self.account_name,
self.account_name,
self.account_name,
obj['last_modified'][:-3]))
else:
objects.remove(obj)
#TODO: Currently there are less then max_uploads results
# in a response; Amount of uploads == amount of meta files
# received in a request for a list of objects in a bucket.
if len(objects) == (max_uploads + 1):
is_truncated = 'true'
next_key_marker = splited_name[0]
next_uploadId_marker = splited_name[1]
else:
is_truncated = 'false'
next_key_marker = next_uploadId_marker = ''
body = ('<?xml version="1.0" encoding="UTF-8"?>'
'<ListMultipartUploadsResult '
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
'<Bucket>%s</Bucket>'
'<KeyMarker>%s</KeyMarker>'
'<UploadIdMarker>%s</UploadIdMarker>'
'<NextKeyMarker>%s</NextKeyMarker>'
'<NextUploadIdMarker>%s</NextUploadIdMarker>'
'<MaxUploads>%s</MaxUploads>'
'<IsTruncated>%s</IsTruncated>'
'%s'
'</ListMultipartUploadsResult>' %
(
xml_escape(self.container_name),
xml_escape(params.get('key-marker', '')),
xml_escape(params.get('upload-id-marker', '')),
next_key_marker,
next_uploadId_marker,
max_uploads,
is_truncated,
uploads
)
)
return Response(body=body, content_type='application/xml')
def GET(self, req):
"""
Handles listing of in-progress multipart uploads,
handles list objects request.
"""
# any operations with multipart buckets are not allowed to user
check_container_name_no_such_bucket_error(self.container_name)
if 'uploads' in req.GET:
return self.get_uploads(req)
else:
acl = req.GET.get('acl')
params = MultiDict([('format', 'json')])
max_keys = req.GET.get('max-keys')
if (max_keys is not None and max_keys.isdigit()):
max_keys = min(int(max_keys), MAX_BUCKET_LISTING)
else:
max_keys = MAX_BUCKET_LISTING
params['limit'] = str(max_keys + 1)
for param_name in ('marker', 'prefix', 'delimiter'):
if param_name in req.GET:
params[param_name] = req.GET[param_name]
req.GET.clear()
req.GET.update(params)
resp = req.get_response(self.app)
status = resp.status_int
body = resp.body
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
if acl is not None:
return get_acl(self.account_name)
objects = json.loads(resp.body)
body = ('<?xml version="1.0" encoding="UTF-8"?>'
'<ListBucketResult '
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
'<Prefix>%s</Prefix>'
'<Marker>%s</Marker>'
'<Delimiter>%s</Delimiter>'
'<IsTruncated>%s</IsTruncated>'
'<MaxKeys>%s</MaxKeys>'
'<Name>%s</Name>'
'%s'
'%s'
'</ListBucketResult>' %
(
xml_escape(params.get('prefix', '')),
xml_escape(params.get('marker', '')),
xml_escape(params.get('delimiter', '')),
'true' if len(objects) == (max_keys + 1) else 'false',
max_keys,
xml_escape(self.container_name),
"".join(['<Contents><Key>%s</Key><LastModified>%sZ</Last'\
'Modified><ETag>%s</ETag><Size>%s</Size><Storage'\
'Class>STANDARD</StorageClass></Contents>' %
(xml_escape(i['name']), i['last_modified'][:-3],
i['hash'], i['bytes'])
for i in objects[:max_keys] if 'subdir' not in i]),
"".join(['<CommonPrefixes><Prefix>%s</Prefix></Common'\
'Prefixes>' % xml_escape(i['subdir'])
for i in objects[:max_keys] if 'subdir' in i])))
return Response(body=body, content_type='application/xml')
def PUT(self, req):
"""
Handles PUT Bucket request.
"""
# any operations with multipart buckets are not allowed to user
check_container_name_invalid_bucket_name_error(self.container_name)
resp = req.get_response(self.app)
status = resp.status_int
if status != 201:
if status == 401:
return get_err_response('AccessDenied')
elif status == 202:
return get_err_response('BucketAlreadyExists')
else:
return get_err_response('InvalidURI')
resp = Response()
resp.headers.add('Location', self.container_name)
resp.status = 200
return resp
def mpu_bucket_deletion_list_request(self, req, cont_path):
"""This method returns listing of MPU bucket for deletion"""
list_req = req.copy()
list_req.method = 'GET'
list_req.upath_info = cont_path
list_req.GET.clear()
list_req.GET['format'] = 'json'
return list_req.get_response(self.app)
def mpu_bucket_deletion(self, req):
"""
This method checks if MPU bucket exists and
if there are any active MPUs are in it.
MPUs are aborted, uploaded parts are deleted.
"""
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
list_resp = self.mpu_bucket_deletion_list_request(req, cont_path)
status = list_resp.status_int
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
# there is no MPU bucket, it's OK, there is only regular bucket
pass
else:
return get_err_response('InvalidURI')
elif status == 200:
# aborting multipart uploads by deleting meta and other files
objects = json.loads(list_resp.body)
for obj in objects:
if obj['name'].endswith('/meta'):
for mpu_obj in objects:
if mpu_obj['name'].startswith(obj['name'][:-5]):
obj_req = req.copy()
obj_req.upath_info = "%s%s" % (cont_path,
mpu_obj['name'])
obj_req.GET.clear()
obj_resp = obj_req.get_response(self.app)
status = obj_resp.status_int
#TODO: Add some logs here
if status not in (200, 204):
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('NoSuchKey')
else:
return get_err_response('InvalidURI')
# deleting multipart bucket
del_mpu_req = req.copy()
del_mpu_req.upath_info = cont_path
del_mpu_req.GET.clear()
del_mpu_resp = del_mpu_req.get_response(self.app)
status = del_mpu_resp.status_int
if status != 204:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
elif status == 409:
return get_err_response('BucketNotEmpty')
else:
return get_err_response('InvalidURI')
return Response(status=204)
def DELETE(self, req):
"""
Handles DELETE Bucket request.
Also deletes multipart bucket if it exists.
Aborts all multipart uploads initiated for this bucket.
"""
# any operations with multipart buckets are not allowed to user
check_container_name_no_such_bucket_error(self.container_name)
# deleting regular bucket,
# request is copied to save valid authorization
del_req = req.copy()
resp = del_req.get_response(self.app)
status = resp.status_int
if status != 204:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
elif status == 409:
return get_err_response('BucketNotEmpty')
else:
return get_err_response('InvalidURI')
# check if there is a multipart bucket and
# return 204 when everything is deleted
return self.mpu_bucket_deletion(req)
class NormalObjectController(object):
"""
Handles requests on objects.
"""
def __init__(self, env, app, account_name, token, container_name,
object_name, **kwargs):
self.app = app
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
env['HTTP_X_AUTH_TOKEN'] = token
env['PATH_INFO'] = '/v1/%s/%s/%s' % (account_name, container_name,
object_name)
def GETorHEAD(self, req):
resp = req.get_response(self.app)
status = resp.status_int
headers = resp.headers
app_iter = resp.app_iter
if 200 <= status < 300:
if 'acl' in req.GET:
return get_acl(self.account_name)
new_hdrs = {}
for key, val in headers.iteritems():
_key = key.lower()
if _key.startswith('x-object-meta-'):
new_hdrs['x-amz-meta-' + key[14:]] = val
elif _key in ('content-length', 'content-type',
'content-encoding', 'etag', 'last-modified'):
new_hdrs[key] = val
return Response(status=status, headers=new_hdrs, app_iter=app_iter)
elif status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('NoSuchKey')
else:
return get_err_response('InvalidURI')
def HEAD(self, req):
"""
Handles HEAD Object request.
"""
return self.GETorHEAD(req)
def GET(self, req):
"""
Handles GET Object request.
"""
return self.GETorHEAD(req)
def PUT(self, req):
"""
Handles PUT Object and PUT Object (Copy) request.
"""
environ = req.environ
for key, value in environ.items():
if key.startswith('HTTP_X_AMZ_META_'):
del environ[key]
environ['HTTP_X_OBJECT_META_' + key[16:]] = value
elif key == 'HTTP_CONTENT_MD5':
environ['HTTP_ETAG'] = value.decode('base64').encode('hex')
elif key == 'HTTP_X_AMZ_COPY_SOURCE':
environ['HTTP_X_COPY_FROM'] = value
resp = req.get_response(self.app)
status = resp.status_int
headers = resp.headers
if status != 201:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
if 'HTTP_X_COPY_FROM' in environ:
body = '<CopyObjectResult>' \
'<ETag>"%s"</ETag>' \
'</CopyObjectResult>' % headers['ETag']
return Response(status=200, body=body)
return Response(status=200, etag=headers['ETag'])
def DELETE(self, req):
"""
Handles DELETE Object request.
"""
resp = req.get_response(self.app)
status = resp.status_int
if status not in (200, 204):
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('NoSuchKey')
else:
return get_err_response('InvalidURI')
return Response(status=204)
class MultiPartObjectController(object):
def __init__(self, env, app, account_name, token, container_name,
object_name, **kwargs):
self.app = app
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
self.orig_path_info = env['PATH_INFO']
env['HTTP_X_AUTH_TOKEN'] = token
env['PATH_INFO'] = '/v1/%s/%s/%s' % (account_name, container_name,
object_name)
def GET(self, req):
"""
Lists multipart uploads by uploadId.
"""
# any operations with multipart buckets are not allowed to user
check_container_name_no_such_bucket_error(self.container_name)
upload_id = req.GET.get('uploadId')
max_parts = req.GET.get('max-parts', '1000')
part_number_marker = req.GET.get('part-number-marker', '')
try:
int(upload_id, 16)
max_parts = int(max_parts)
if part_number_marker:
part_number_marker = int(part_number_marker)
except (TypeError, ValueError):
return get_err_response('InvalidURI')
object_name_prefix_len = len(self.object_name) + 1
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
meta_path = "%s%s/%s/meta" % (cont_path,
self.object_name,
upload_id)
meta_resp = meta_request_head(req, meta_path, self.app)
status = meta_resp.status_int
if status != 200:
return get_err_response('NoSuchUpload')
list_req = req.copy()
list_req.upath_info = cont_path
list_req.GET.clear()
list_req.GET['format'] = 'json'
list_req.GET['prefix'] = "%s/%s/%s/part/" % (cont_name,
self.object_name,
upload_id)
list_req.GET['limit'] = str(max_parts + 1)
if part_number_marker:
list_req.GET['marker'] = "%s/%s/part/%s" % (self.object_name,
upload_id,
part_number_marker)
resp = list_req.get_response(self.app)
status = resp.status_int
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
objects = json.loads(resp.body)
if len(objects) > max_parts:
objects = objects.pop(-1)
next_marker = objects[-1]['name'][object_name_prefix_len:]
is_truncated = 'true'
else:
next_marker = ''
is_truncated = 'false'
if next_marker:
next_marker = "<NextPartNumberMarker>%</NextPartNumberMarker>" % \
next_marker
if part_number_marker:
part_number_marker = "<PartNumberMarker>%</PartNumberMarker>" % \
part_number_marker
parts = ''.join(("<Part>"
"<PartNumber>%s</PartNumber>"
"<LastModified>%sZ</LastModified>"
"<ETag>\"%s\"</ETag>"
"<Size>%s</Size>"
"</Part>" % (
obj['name'][object_name_prefix_len:],
obj['last_modified'][:-3],
obj['hash'],
obj['bytes']) for obj in objects))
body = (
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
"<ListPartsResult xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">"
"<Bucket>%s</Bucket>"
"<Key>%s</Key>"
"<UploadId>%s</UploadId>"
"<Initiator>"
"<ID>%s</ID>"
"<DisplayName>%s</DisplayName>"
"</Initiator>"
"<Owner>"
"<ID>%s</ID>"
"<DisplayName>%s</DisplayName>"
"</Owner>"
"<StorageClass>STANDARD</StorageClass>"
"%s%s"
"<MaxParts>%s</MaxParts>"
"<IsTruncated>%s</IsTruncated>"
"%s"
"</ListPartsResult>" % (
self.container_name,
self.object_name,
upload_id,
self.account_name,
self.account_name,
self.account_name,
self.account_name,
part_number_marker,
next_marker,
max_parts,
is_truncated,
parts,
))
return Response(status=200,
body=body,
content_type='application/xml')
def post_uploads_container_request(self, req, cont_path):
"""Method used to create a container for MPU."""
cont_req = req.copy()
cont_req.method = 'PUT'
cont_req.upath_info = cont_path
cont_req.GET.clear()
return cont_req.get_response(self.app)
def post_uploads_put_meta_req(self, req, cont_path, upload_id):
"""Method to create a MPU metafile."""
meta_req = req.copy()
meta_req.method = 'PUT'
meta_req.upath_info = "%s%s/%s/meta" % (cont_path,
self.object_name,
upload_id)
for header, value in meta_req.headers.items():
if header.lower().startswith('x-amz-meta-'):
meta_req.headers['X-Object-Meta-Amz-' + header[11:]] = \
value
return meta_req.get_response(self.app)
def post_uploads(self, req):
"""
Called if POST with 'uploads' query string was received.
Creates metafile which is used as a flag on uncompleted MPU.
Initiates multipart upload.
"""
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
cont_req = req.copy()
cont_req.method = 'HEAD'
cont_req.upath_info = cont_path
cont_req.GET.clear()
cont_resp = cont_req.get_response(self.app)
status = cont_resp.status_int
if status == 404:
# creating container for MPU
cont_resp = self.post_uploads_container_request(req, cont_path)
status = cont_resp.status_int
if status not in (201, 204):
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
upload_id = uuid.uuid4().hex
meta_resp = self.post_uploads_put_meta_req(req, cont_path, upload_id)
status = meta_resp.status_int
if status != 201:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
body = ('<?xml version="1.0" encoding="UTF-8"?>'
'<InitiateMultipartUploadResult '\
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
'<Bucket>%s</Bucket>'
'<Key>%s</Key>'
'<UploadId>%s</UploadId>'
'</InitiateMultipartUploadResult>' %
(self.container_name, self.object_name, upload_id))
return Response(status=200,
body=body,
content_type='application/xml')
def post_uploadId(self, req):
"""
Called if POST with 'uploadId' query string was received.
Deletes metafile after completion of MPU.
Completes multipart upload.
"""
upload_id = req.GET.get('uploadId')
try:
int(upload_id, 16)
except (TypeError, ValueError):
return get_err_response('InvalidURI')
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
meta_path = "%s%s/%s/meta" % (cont_path,
self.object_name,
upload_id)
meta_resp = meta_request_head(req, meta_path, self.app)
status = meta_resp.status_int
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('NoSuchUpload')
else:
return get_err_response('InvalidURI')
# TODO: Validate uploaded parts.
manifest_path = MULTIPART_UPLOAD_PREFIX + \
"%s/%s/%s/part/" % (self.container_name,
self.object_name,
upload_id)
manifest_req = req.copy()
manifest_req.method = 'PUT'
manifest_req.GET.clear()
manifest_req.headers['X-Object-Manifest'] = manifest_path
for header, value in meta_resp.headers.iteritems():
if header.lower().startswith('x-object-meta-amz-'):
manifest_req.headers['x-amz-meta-' + header[18:]] = value
manifest_resp = manifest_req.get_response(self.app)
status = manifest_resp.status_int
if status == 201:
finish_req = req.copy()
finish_req.method = 'DELETE'
finish_req.upath_info = meta_path
finish_req.body = ''
finish_req.GET.clear()
finish_resp = finish_req.get_response(self.app)
status = finish_resp.status_int
if status not in (201, 204):
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
body = ('<?xml version="1.0" encoding="UTF-8"?>'
'<CompleteMultipartUploadResult '\
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
'<Location>%s</Location>'
'<Bucket>%s</Bucket>'
'<Key>%s</Key>'
'<ETag>%s</ETag>'
'</CompleteMultipartUploadResult>' %
(self.orig_path_info,
self.container_name,
self.object_name,
manifest_resp.headers['ETag']))
return Response(status=200,
body=body,
content_type='application/xml')
def POST(self, req):
"""
Initiate and complete multipart upload.
"""
# any operations with multipart buckets are not allowed to user
check_container_name_invalid_bucket_name_error(self.container_name)
if 'uploads' in req.GET:
return self.post_uploads(req)
elif 'uploadId' in req.GET:
return self.post_uploadId(req)
return get_err_response('InvalidURI')
def PUT(self, req):
"""
Upload part of a multipart upload.
"""
upload_id = req.GET.get('uploadId')
part_number = req.GET.get('partNumber', '')
try:
int(upload_id, 16)
except (TypeError, ValueError):
return get_err_response('InvalidURI')
if not part_number.isdigit():
return get_err_response('InvalidURI')
# any operations with multipart buckets are not allowed to user
check_container_name_invalid_bucket_name_error(self.container_name)
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
meta_path = "%s%s/%s/meta" % (cont_path, self.object_name, upload_id)
meta_resp = meta_request_head(req, meta_path, self.app)
status = meta_resp.status_int
if status != 200:
return get_err_response('NoSuchUpload')
req = req.copy()
req.upath_info = "%s%s/%s/part/%s" % (cont_path, self.object_name,
upload_id, part_number)
req.GET.clear()
resp = req.get_response(self.app)
status = resp.status_int
headers = resp.headers
if status != 201:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
if 'HTTP_X_COPY_FROM' in req.environ:
body = '<CopyObjectResult>' \
'<ETag>"%s"</ETag>' \
'</CopyObjectResult>' % resp.headers['ETag']
return Response(status=200, body=body)
return Response(status=200, etag=resp.headers['ETag'])
def DELETE(self, req):
"""
Aborts multipart upload by uploadId.
"""
upload_id = req.GET.get('uploadId')
try:
int(upload_id, 16)
except (TypeError, ValueError):
return get_err_response('InvalidURI')
# any operations with multipart buckets are not allowed to user
check_container_name_no_such_bucket_error(self.container_name)
cont_name = MULTIPART_UPLOAD_PREFIX + self.container_name
cont_path = "/v1/%s/%s/" % (self.account_name, cont_name)
prefix = "%s/%s/" % (self.object_name, upload_id)
list_req = req.copy_get()
list_req.upath_info = cont_path
list_req.GET.clear()
list_req.GET['format'] = 'json'
list_req.GET['prefix'] = prefix
list_resp = list_req.get_response(self.app)
status = list_resp.status_int
if status != 200:
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('InvalidBucketName')
else:
return get_err_response('InvalidURI')
objects = json.loads(list_resp.body)
for obj in objects:
obj_req = req.copy()
obj_req.method = 'DELETE'
obj_req.upath_info = "%s%s" % (cont_path, obj['name'])
obj_req.GET.clear()
obj_resp = obj_req.get_response(self.app)
status = obj_resp.status_int
if status not in (200, 204):
if status == 401:
return get_err_response('AccessDenied')
elif status == 404:
return get_err_response('NoSuchKey')
else:
return get_err_response('InvalidURI')
return Response(status=204)
class ObjectController(NormalObjectController, MultiPartObjectController):
"""Manages requests on normal and multipart objects"""
def __init__(self, *args, **kwargs):
MultiPartObjectController.__init__(self, *args, **kwargs)
def GET(self, req):
if 'uploadId' in req.GET:
return MultiPartObjectController.GET(self, req)
return NormalObjectController.GET(self, req)
def PUT(self, req):
if 'uploadId' in req.GET:
return MultiPartObjectController.PUT(self, req)
return NormalObjectController.PUT(self, req)
def POST(self, req):
if 'uploadId' in req.GET or 'uploads' in req.GET:
return MultiPartObjectController.POST(self, req)
return NormalObjectController.POST(self, req)
def DELETE(self, req):
if 'uploadId' in req.GET:
return MultiPartObjectController.DELETE(self, req)
obj_req = req.copy_get()
obj_req.method = 'HEAD'
obj_req.GET.clear()
obj_resp = obj_req.get_response(self.app)
status = obj_resp.status_int
if status == 200 and 'X-Object-Manifest' in obj_resp.headers:
manifest = obj_resp.headers['X-Object-Manifest']
upload_id = manifest.split('/')[2]
del_req = req.copy()
del_req.GET['uploadId'] = upload_id
MultiPartObjectController.DELETE(self, del_req)
return NormalObjectController.DELETE(self, req)
class Swift3Middleware(object):
"""Swift3 S3 compatibility midleware"""
def __init__(self, app, conf, *args, **kwargs):
self.app = app
def get_controller(self, path, params):
container, obj = split_path(path, 0, 2, True)
d = dict(container_name=container, object_name=obj)
if container and obj:
return ObjectController, d
elif container:
return BucketController, d
return ServiceController, d
def __call__(self, env, start_response):
req = Request(env)
if 'AWSAccessKeyId' in req.GET:
try:
req.headers['Date'] = req.GET['Expires']
req.headers['Authorization'] = \
'AWS %(AWSAccessKeyId)s:%(Signature)s' % req.GET
except KeyError:
return get_err_response('InvalidArgument')(env, start_response)
if not 'Authorization' in req.headers:
return self.app(env, start_response)
try:
account, signature = \
req.headers['Authorization'].split(' ')[-1].rsplit(':', 1)
except Exception:
return get_err_response('InvalidArgument')(env, start_response)
try:
controller, path_parts = self.get_controller(req.path, req.GET)
except ValueError:
return get_err_response('InvalidURI')(env, start_response)
token = base64.urlsafe_b64encode(canonical_string(req))
controller = controller(req.environ,
self.app,
account,
token,
**path_parts)
if hasattr(controller, req.method):
res = getattr(controller, req.method)(req)
else:
return get_err_response('InvalidURI')(env, start_response)
return res(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Standard filter factory to use the middleware with paste.deploy"""
conf = global_conf.copy()
conf.update(local_conf)
def swifts3_filter(app):
return Swift3Middleware(app, conf)
return swifts3_filter
|
|
#
# This is a script to install yotta. Eventually I would like to add all OS's to this script
# but for now it just works on windows.
#
# There are some ganky hacks in place holding it together, such as opening
# IE to get certificates that windows doesnt have. Currently the script
# just downloads the dependencies and has the user run through the click
# throughs. Eventually I would like to hack together some silent installers
# which would involve some exe brute forcing and some registry hacking.
#
# copyright ARMmbed 2014
#
# Author: Austin Blackstone
# Date: December 17,2014
import math
import sys
import pip
import os
import subprocess
#
# Downloads to download
#
# Note that on windows the '.exe' extension is necessary to run with a subprocess
downloads = {
"all":{
"cmake.exe":"http://www.cmake.org/files/v3.2/cmake-3.2.1-win32-x86.exe",
"ninja.zip":"https://github.com/martine/ninja/releases/download/v1.5.3/ninja-win.zip",
"gcc.exe":"https://launchpad.net/gcc-arm-embedded/4.8/4.8-2014-q3-update/+download/gcc-arm-none-eabi-4_8-2014q3-20140805-win32.exe"
},
"64bit":{
},
"32bit":{
}
}
#
# Prompt to re-install / download packages
#
def shouldInstall(binName):
import shutil
import shutilwhich
question = "\n\t"+binName+" already exists on your PATH, would you like to reinstall it? (y/n): "
choice = ""
if shutil.which(binName):
sys.stdout.write(question)
choice = raw_input().lower() # check for a y for yes, all others are no
dir(choice)
if choice =='y':
return True #re-install bin
else:
print("\n\tSkipping installation of "+binName)
return False #skip installation
else:
return True; #bin does not exist, install it
#
# Cygwin Install Script - TODO
#
def cygwin():
print("Cygwin is not currently supported. Please install for the windows command line. See http://docs.yottabuild.org/#installing-on-windows for details.");
return;
#
# Linux Install Script - TODO
#
def linux():
print("For Linux install instructions please see http://docs.yottabuild.org/#installing-on-linux");
return;
#
# OSX Install Script - TODO
#
def osx():
print("For OSX install instructions please see http://docs.yottabuild.org/#installing-on-osx");
return;
#
# Windows Install Script
#
def windows():
import wget
import shutil
import shutilwhich
print("\nOpening an Internet Explorer window to launchpad.net to grab security certificate to download GCC.");
w = subprocess.Popen(r'"C:\Program Files\Internet Explorer\iexplore.exe" https://launchpad.net/' ); #hack to get the security certificate in place so we can dowload the file.
print("\nDownloading dependencies...");
# Downloads for both 64bit / 32bit
for key in downloads['all']:
if os.path.isfile(key):
print("\n\t" +key +" already exists in this folder. [Skipped]");
else:
print("\n\tDownloading " +key);
wget.download(downloads['all'][key],key);
w.kill(); #close the internet explorer window hack
# 64bit Downloads
if sys.maxsize > math.pow(2,32):
print("\nWindows 64bit detected");
for key in downloads['64bit']:
if os.path.isfile(key):
print("\n\t" +key +" already exists in this folder.[Skipped]");
else:
print("\n\tDownloading " +key );
wget.download(downloads['64bit'][key],key);
# 32bit Downloads
elif sys.maxsize <= math.pow(2,32):
print("\nWindows 32bit detected");
for key in downloads['32bit']:
if os.path.isfile(key):
print("\n\t" +key +" already exists in this folder. [Skipped]");
else:
print("\n\tDownloading " +key);
wget.download(downloads['32bit'][key],key);
# Install the Packages
print("\nInstalling packages: Please Follow the Click Throughs ");
#Yotta
if shouldInstall("yotta"):
print("\n\tInstalling Yotta from pip ...");
x = subprocess.call(['pip','install','-qU','yotta']);
if x!= 0:
print("\t[**ERROR**]: Yotta install failed. Please run 'pip install yotta -U' from the command line");
else:
print("\t[Installed]");
#cmake
if shouldInstall("cmake"):
print("\n\tInstalling Cmake: Please allow admin permissions and check 'Add CMake to system PATH for all users' option");
x = subprocess.call(['cmake.exe'], shell=True);
if x!=0:
print("\t[**ERROR**]: Cmake install failed, Please re-run installer and give admin rights to installer");
else:
print("\t[Installed]");
#gcc-arm-none-eabi
if shouldInstall("arm-none-eabi-gcc"):
print("\n\tInstalling gcc-none-eabi-gcc : Please allow admin permissions and check 'Add path to enviroment variable' box");
x = subprocess.call(['gcc.exe'], shell=True);
if x!=0:
print("\t[**ERROR**]: gcc-none-eabi-gcc install failed, Please re-run installer and give admin rights to installer");
else:
print("\t[Installed]");
#ninja
if shouldInstall("ninja"):
import zipfile
import shutil
print("\n\tInstalling Ninja...");
zipfile.ZipFile('ninja.zip').extract('ninja.exe');
if not os.path.exists('c:/ninja'):
os.makedirs('c:/ninja');
shutil.copy2('ninja.exe','c:/ninja/ninja.exe')
print("\t**REQUIRED:** Add c:/ninja/ to your PATH to complete ninja install")
#
# install extra packages for python
#
def bootstrap():
# check for Pip
try:
import pip
except ImportError:
print("\n****ERROR: Pip is not installed on this system. Please update your python install and / or install Pip, then retry***");
sys.exit();
return;
#install wget if it doesnt already exist
try:
import wget
except ImportError:
print("\nWget package missing, installing now...");
x = subprocess.call(['pip', 'install', '-q','wget']);
if x!= 0:
print("\t**ERROR** wget did not install correctly!");
sys.exit();
else:
print("[Installed]");
#install shutil.which if it doesnt already exist.
#Python 3 has this already, python 2.7 does not so we need to install it.
try:
import shutilwhich
except ImportError:
print("\nshutilwhich package missing, installing now...");
x = subprocess.call(['pip', 'install', '-q','shutilwhich']);
if x!= 0:
print("\t**ERROR** shutilwhich did not install correctly!");
sys.exit();
else:
print("[Installed]");
return;
#
# The main function figures out what OS is running and calls appropriate handler
#
def main():
chooseOS = {
"win32" : windows, # Windows32 and 64bit
"cygwin": cygwin, # cygwin on windows
"darwin": osx, # Mac OSX
"linux" : linux # Linux
}
if sys.platform in chooseOS:
bootstrap();
chooseOS[sys.platform]();
else:
print("Your OS is not supported!");
return;
if __name__ == "__main__":
main()
|
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training on various tasks using a generalized form of FedAvg.
Specifically, we create (according to flags) an iterative processes that allows
for client and server learning rate schedules, as well as various client and
server optimization methods. For more details on the learning rate scheduling
and optimization methods, see `shared/optimizer_utils.py`. For details on the
iterative process, see `utils/fed_avg_schedule.py`.
"""
import collections
from typing import Callable
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from generalization.tasks import cifar100_image
from generalization.tasks import emnist_character
from generalization.tasks import shakespeare_character
from generalization.tasks import stackoverflow_word
from generalization.tasks import training_specs
from generalization.utils import fed_avg_schedule
from generalization.utils import federated_training_loop
from generalization.utils import metric_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
optimizer_utils.define_lr_schedule_flags('client')
optimizer_utils.define_lr_schedule_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer(
'train_clients_per_round', 10,
'How many training clients to sample per round during training.')
# Training loop configuration
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.mark_flag_as_required('experiment_name')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
flags.DEFINE_string(
'sql_database', None,
'An optional str indicating the data source. If set to None, the TFF '
'original data source will be used. Otherwise the program will load '
'SQL-based ClientData from `sql_database`.')
flags.DEFINE_float(
'unpart_clients_proportion',
None,
'An optional floating number in (0.0, 1.0) representing the proportion '
'of un-participating clients among the total clients. '
'If sql_database is not None, or if sql_database is None but the TFF '
'original federated dataset source does *not* provide a vertical split, '
'then `unpart_clients_proportion` must not be None. In this case, a '
'random set of clients will be drawn from the total sets of clients. '
'If sql_database is None, and the TFF original federated dataset source '
'provides a vertical split, then `unpart_clients_proportion` must be '
'None, and the original vertical split will be used.',
lower_bound=0.0,
upper_bound=1.0)
flags.DEFINE_integer(
'train_val_ratio_intra_client',
None,
'An optional integer representing the ratio of ratio of train-validation '
'split for each client. '
'If sql_database is not None, or if sql_database is None but the TFF '
'original federated dataset does *not* provide a horizontal split, '
'then `train_val_ratio_intra_client` must not be None. In this case, for '
'each client, the validation dataset contains 1/(train_val_ratio+1) of '
'total samples, round up if fractional. The training dataset contains '
'the rest of samples. '
'If sql_database is None, and the TFF original federated dataset '
'provides a horizontal split, then then `train_val_ratio_intra_client` '
'must be None, and the TFF original horizontal split will be used.',
lower_bound=1)
flags.DEFINE_float(
'part_clients_subsampling_rate', 1.0,
'A floating number in (0.0, 1.0] representing the actual proportion of '
'candidate participating clients. If < 1.0, a random subset of clients '
'will be drawn from the "candidate participating clients" that become '
'the actual participating clients. This attribute is mostly intended for '
'the ablation study on the effect of participation rate.')
flags.DEFINE_boolean(
'include_unpart_train_for_val', False,
'Whether to include the training dataset of unparticipated clients for '
'validation. Please refere to training_specs.py for the detailed doc.')
flags.DEFINE_integer(
'max_elements_per_client',
None,
'An optional integer controlling the maximum number of examples to take '
'per client. If none, keep all elements for each client. This is intended '
'primarily to contend with the small set of clients with tens of '
'thousands of examples. This truncation is applied after all the previous '
'splits, and effective for all the three-way split.',
lower_bound=1)
# Evaluation configuration
flags.DEFINE_integer(
'part_clients_per_eval', None,
'An optional integer representing the number of clients taken from'
'training dataset for evaluation per evaluation round, used for both '
'training and valiadtion. '
'If `None`, all training clients will be used.')
flags.DEFINE_integer(
'unpart_clients_per_eval', None,
'An optional integer representing the number of clients taken from'
'validation dataset. If `None`, all validation clients will be used.')
flags.DEFINE_integer(
'test_clients_for_eval', None,
'An optional integer representing the number of clients taken from'
'test dataset. If `None`, all validation clients will be used.')
flags.DEFINE_boolean(
'resample_eval_clients', False,
'Whether resample validation clients every evaluation round')
flags.DEFINE_integer(
'eval_client_batch_size', 16,
'An integer representing the batch size used on validation and test clients.'
)
flags.DEFINE_integer(
'shared_random_seed', 1,
'An optional integer used to seed the pseudo-random number generator. '
'The seeds are shared across the following functions: '
'1) Sampling training client for each training round. '
'2) Sampling training, validation and test clients for evaluation rounds. '
'If `None`, no seed is used. '
'Note that specifying `shared_random_seed` does not result in the same '
'clients being sampled every round in a given experiment.')
# Task specific flags are defined in the corresponding task definition module.
TASK_FLAGS = collections.OrderedDict(
cifar100_image=cifar100_image.cifar100_image_flags,
emnist_character=emnist_character.emnist_character_flags,
stackoverflow_word=stackoverflow_word.stackoverflow_word_flags,
shakespeare_character=shakespeare_character.shakespeare_character_flags,
)
_SUPPORTED_TASKS = list(TASK_FLAGS.keys())
with utils_impl.record_hparam_flags() as task_flags:
flags.DEFINE_enum('task', None, _SUPPORTED_TASKS,
'Which task to perform training on.')
FLAGS = flags.FLAGS
def _get_task_kwargs_from_flags():
"""Get task-specific kwargs from FLAGS."""
task_prefix_len_dict = collections.OrderedDict(
cifar100_image=len(cifar100_image.FLAG_PREFIX),
emnist_character=len(emnist_character.FLAG_PREFIX),
shakespeare_character=len(shakespeare_character.FLAG_PREFIX),
stackoverflow_word=len(stackoverflow_word.FLAG_PREFIX),
)
task_name = FLAGS.task
prefix_len = task_prefix_len_dict[task_name]
task_flag_dict = utils_impl.lookup_flag_values(TASK_FLAGS[task_name])
return {key[prefix_len:]: value for key, value in task_flag_dict.items()}
def _get_hparam_dict_from_flags():
"""Creates an ordered dictionary of hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
task_name = FLAGS.task
if task_name in TASK_FLAGS:
task_hparam_dict = utils_impl.lookup_flag_values(TASK_FLAGS[task_name])
hparam_dict.update(task_hparam_dict)
return hparam_dict
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
client_lr_schedule = optimizer_utils.create_lr_schedule_from_flags('client')
server_lr_schedule = optimizer_utils.create_lr_schedule_from_flags('server')
def iterative_process_builder(
model_fn: Callable[[],
tff.learning.Model]) -> tff.templates.IterativeProcess:
"""Creates an iterative process using a given TFF `model_fn`.
Args:
model_fn: A no-arg function returning a `tff.learning.Model`.
Returns:
A `tff.templates.IterativeProcess`.
"""
if FLAGS.task == 'stackoverflow_word':
def client_weight_fn(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weight_fn = None
return fed_avg_schedule.build_fed_avg_process(
model_fn=model_fn,
client_optimizer_fn=client_optimizer_fn,
client_lr=client_lr_schedule,
server_optimizer_fn=server_optimizer_fn,
server_lr=server_lr_schedule,
client_weight_fn=client_weight_fn)
task_spec = training_specs.TaskSpecFederated(
iterative_process_builder=iterative_process_builder,
client_epochs_per_round=FLAGS.client_epochs_per_round,
client_batch_size=FLAGS.client_batch_size,
train_clients_per_round=FLAGS.train_clients_per_round,
rounds_per_eval=FLAGS.rounds_per_eval,
# The remaining attributes are for base class TaskSpec.
sql_database=FLAGS.sql_database,
unpart_clients_proportion=FLAGS.unpart_clients_proportion,
train_val_ratio_intra_client=FLAGS.train_val_ratio_intra_client,
part_clients_subsampling_rate=FLAGS.part_clients_subsampling_rate,
include_unpart_train_for_val=FLAGS.include_unpart_train_for_val,
max_elements_per_client=FLAGS.max_elements_per_client,
part_clients_per_eval=FLAGS.part_clients_per_eval,
unpart_clients_per_eval=FLAGS.unpart_clients_per_eval,
test_clients_for_eval=FLAGS.test_clients_for_eval,
resample_eval_clients=FLAGS.resample_eval_clients,
eval_client_batch_size=FLAGS.eval_client_batch_size,
shared_random_seed=FLAGS.shared_random_seed)
task_config_fn_dict = collections.OrderedDict(
cifar100_image=cifar100_image.configure_training_federated,
emnist_character=emnist_character.configure_training_federated,
shakespeare_character=shakespeare_character.configure_training_federated,
stackoverflow_word=stackoverflow_word.configure_training_federated,
)
config_fn = task_config_fn_dict[FLAGS.task]
task_kwargs = _get_task_kwargs_from_flags()
logging.info('Starting configuring task.')
runner_spec = config_fn(task_spec, **task_kwargs)
logging.info('Finished configuring task.')
metric_utils.write_hparams(_get_hparam_dict_from_flags(),
FLAGS.root_output_dir, FLAGS.experiment_name)
checkpoint_manager, metrics_managers = metric_utils.configure_default_managers(
FLAGS.root_output_dir, FLAGS.experiment_name)
logging.info('Starting `federated_training_loop.run_simulation`.')
federated_training_loop.run_simulation(
process=runner_spec.iterative_process,
client_selection_fn=runner_spec.client_datasets_fn,
total_rounds=FLAGS.total_rounds,
part_train_eval_fn=runner_spec.part_train_eval_fn,
part_val_fn=runner_spec.part_val_fn,
unpart_fn=runner_spec.unpart_fn,
test_fn=runner_spec.test_fn,
program_state_manager=checkpoint_manager,
rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,
metrics_managers=metrics_managers,
)
if __name__ == '__main__':
app.run(main)
|
|
#!/usr/bin/env python
"""SeqAn documentation raw object representation.
This is the direct representation as it can be determined from the embedded
Doxygen-style comments without the interpretation of commands within clauses
and cross-linking.
"""
import textwrap
import dox_tokens
import raw_doc
class DoxFormatter(object):
"""Formatter for printing correctly indented and wrapped in Doxygen style.
"""
def __init__(self, width=77):
self.width = width
def formatCommand(self, name, text, leading=None):
"""RawReturn string with a formatted command.
The general format is "@$name $leading $text" where the text is wrapped
to the end of leading.
"""
if leading:
res = ['@', name, ' ', leading, ' ']
else:
res = ['@', name, ' ']
l = len(''.join(res))
indent = ' ' * l
wrapped_text = textwrap.wrap(text, self.width - l)
if wrapped_text:
res.append(wrapped_text[0])
for x in wrapped_text[1:]:
res += ['\n', indent, x]
return ''.join(res) + '\n'
def formatParagraph(self, text):
"""Format paragraph."""
return '\n'.join(textwrap.wrap(text, self.width)) + '\n'
class RawText(object):
"""List of token with easy concatenation into a string.
This type is used for collecting lists of tokens.
@ivar tokens: The list of token objects.
"""
def __init__(self, tokens=[]):
self.tokens = list(tokens)
def append(self, token):
"""Append the token to the list of tokens.
@param token: The lexer.Token object to add.
@return: Nothing
"""
self.tokens.append(token)
@property
def empty(self):
"""RawReturns whether the token set is empty.
@return: Whether or not the token list is empty.
"""
return not bool(self.tokens)
@property
def text(self):
"""RawReturns the concatenated tokens' text.
@return: The concatenated tokens' text.
"""
return ''.join([x.val for x in self.tokens])
def __eq__(self, other):
if not hasattr(other, 'tokens'):
return False
return self.tokens == other.tokens
class RawDoc(object):
"""The documentation consists of a number of documentation objects.
@ivar entries List of RawEntry objects.
"""
def __init__(self):
self.entries = []
def merge(self, other_doc):
for e in other_doc.entries:
self.addEntry(e)
def addEntry(self, entry):
self.entries.append(entry)
def getFormatted(self, width=77):
"""Get formatted and normalized in dox format."""
formatter = DoxFormatter(width)
res = []
first = True
for entry in self.entries:
res.append(entry.getFormatted(formatter))
first = False
return '\n\n'.join(res)
class RawEntry(object):
"""One top-level entry of the documentation.
@ivar first_token The first token for this entry.
@ivar name The identifier of the entry.
@ivar title The title of the entry.
@ivar brief A string object with a brief summary of the entry.
@ivar body A RawBody object with the entry's documentation.
@ivar sees A list of RawSee objects.
@ivar command The name of the command starting the entry type.
"""
def __init__(self, first_token, briefs=[], command='<entry>'):
self.first_token = first_token
self.name = RawText()
self.title = RawText()
self.briefs = list(briefs)
self.body = RawBody()
self.sees = []
self.command = command
def addBrief(self, b):
self.briefs.append(b)
def addSee(self, see):
while see.text.tokens and see.text.tokens[-1].type in dox_tokens.WHITESPACE:
see.text.tokens.pop()
self.sees.append(see)
@classmethod
def entryTypes(cls):
"""RawReturns iterable with all entry types."""
res = ('concept', 'class', 'function', 'metafunction', 'page', 'enum', 'var',
'tag', 'defgroup', 'macro', 'enum_value')
return res
def addParagraph(self, p):
self.body.addParagraph(p)
def getFormatted(self, formatter):
"""Get formatted and normalized in dox format."""
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res += ['\n', self.body.getFormatted(formatter)]
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawCodeEntry(RawEntry):
"""RawDoc for one code entry concept having a signature.
@ivar signatures A list of RawSignature objects.
"""
def __init__(self, first_token, briefs=[], command='<code entry>'):
RawEntry.__init__(self, first_token, briefs=briefs, command=command)
self.signatures = []
self.headerfiles = []
self.deprecation_msgs = []
self.notes = []
self.warnings = []
self.akas = []
self.internals = []
def addSignature(self, s):
self.signatures.append(s)
def addHeaderfile(self, h):
self.headerfiles.append(h)
def addDeprecationMsg(self, d):
self.deprecation_msgs.append(d)
def addNote(self, n):
self.notes.append(n)
def addWarning(self, w):
self.warnings.append(w)
def addAka(self, a):
self.akas.append(a)
def addInternal(self, i):
self.internals.append(i)
def getType(self):
return 'code'
def __str__(self):
res = RawEntry.__str__(self)
return res + '\n' + '\n'.join([' @signature %s' % x for x in self.signatures])
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawVariable(RawCodeEntry):
"""RawDoc for one variable constant.
@ivar type: The type of the variable as a RawText or None.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='var')
self.type = None
def getType(self):
if '::' in self.name.text:
return 'member_variable'
else:
return 'variable'
def getFormatted(self, formatter):
res = []
if self.type:
res.append(formatter.formatCommand(self.command, self.name.text + ';', self.type.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawEnumValue(RawVariable):
"""RawDoc for one enum value.
@ivar type: The type of the variable as a RawText or None.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='val')
self.type = None
def getType(self):
return 'enum_value'
class RawTag(RawCodeEntry):
"""RawDoc for one tag."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='tag')
self.tparams = []
def addTParam(self, p):
self.tparams.append(p)
def getType(self):
if '#' in self.name.text:
return 'grouped_tag'
else:
return 'tag'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
for x in self.tparams:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawConcept(RawCodeEntry):
"""RawDoc for one concept.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='concept')
self.extends = []
def addExtends(self, c):
self.extends.append(c)
def getType(self):
return 'concept'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.extends:
res.append('\n')
for x in self.extends:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawEnum(RawCodeEntry):
"""RawDoc for one enum."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='enum')
def getType(self):
return 'enum'
class RawTypedef(RawCodeEntry):
"""RawDoc for one typedef."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='typedef')
def getType(self):
if '#' in self.name.text:
return 'grouped_typedef'
elif '::' in self.name.text:
return 'member_typedef'
else:
return 'global_typedef'
class RawAdaption(RawCodeEntry):
"""RawDoc for one adaption."""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='adaption')
def getType(self):
return 'adaption'
class RawClass(RawCodeEntry):
"""RawDoc for one class.
@ivar tparams List of RawParameter objects.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='class')
self.extends = []
self.implements = []
self.tparams = []
def addTParam(self, p):
self.tparams.append(p)
def addExtends(self, p):
self.extends.append(p)
def addImplements(self, p):
self.implements.append(p)
def getType(self):
return 'class'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.implements:
res.append('\n')
for x in self.implements:
res.append(x.getFormatted(formatter))
if self.extends:
res.append('\n')
for x in self.extends:
res.append(x.getFormatted(formatter))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if self.tparams:
res.append('\n')
for x in self.tparams:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
def __str__(self):
res = RawCodeEntry.__str__(self)
s = res + '\n'
if self.tparams:
s += '\n'.join([' @tparam %s' % s for s in self.tparams]) + '\n'
return s
class RawFunction(RawCodeEntry):
"""RawDoc for one function.
@ivar tparams List of RawParameter objects.
@ivar params List of RawParameter objects.
@ivar returns List of RawReturn objects.
@ivar throw List of RawThrow objects.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='fn')
self.tparams = []
self.params = []
self.returns = []
self.throws = []
def addTParam(self, p):
self.tparams.append(p)
def addParam(self, p):
self.params.append(p)
def addReturn(self, p):
self.returns.append(p)
def addThrow(self, t):
self.throws.append(t)
def getType(self):
if '#' in self.name.text:
return 'interface_function'
elif '::' in self.name.text:
return 'member_function'
else:
return 'global_function'
def getFormatted(self, formatter):
res = []
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if self.tparams:
res.append('\n')
for x in self.tparams:
res.append(x.getFormatted(formatter))
if self.params:
res.append('\n')
for x in self.params:
res.append(x.getFormatted(formatter))
if self.returns:
res.append('\n')
for x in self.returns:
res.append(x.getFormatted(formatter))
if self.throws:
res.append('\n')
for x in self.throws:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
def __str__(self):
res = RawCodeEntry.__str__(self)
res += '\n' + '\n'.join([' @return %s ' % x for x in self.returns])
res += '\n' + '\n'.join([' @tparam %s ' % x for x in self.tparams])
res += '\n' + '\n'.join([' @param %s ' % x for x in self.params])
res += '\n'
return res
class RawMacro(RawCodeEntry):
"""RawDoc for one function.
@ivar params List of RawParameter objects.
@ivar returns List of RawReturn objects.
@ivar throws List of RawThrow objects.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs, command='macro')
self.params = []
self.returns = []
self.throws = []
def addParam(self, p):
self.params.append(p)
def addReturn(self, p):
self.returns.append(p)
def addThrow(self, t):
self.throws.append(t)
def getType(self):
if '#' in self.name.text:
return 'grouped_macro'
else:
return 'macro'
def getFormatted(self, formatter):
res = []
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if self.params:
res.append('\n')
for x in self.params:
res.append(x.getFormatted(formatter))
if self.returns:
res.append('\n')
for x in self.returns:
res.append(x.getFormatted(formatter))
if self.throws:
res.append('\n')
for x in self.throws:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
def __str__(self):
res = RawCodeEntry.__str__(self)
res += '\n' + '\n'.join([' @return %s ' % x for x in self.returns])
res += '\n' + '\n'.join([' @tparam %s ' % x for x in self.tparams])
res += '\n' + '\n'.join([' @param %s ' % x for x in self.params])
res += '\n'
return res
class RawMetafunction(RawCodeEntry):
"""RawDoc for one metafunction.
@ivar tparams List of RawParameter objects.
@ivar returns List of RawReturn objects.
"""
def __init__(self, first_token, briefs=[]):
RawCodeEntry.__init__(self, first_token, briefs=briefs)
self.tparams = []
self.returns = []
self.command = 'mfn'
def addTParam(self, p):
self.tparams.append(p)
def addReturn(self, p):
self.returns.append(p)
def getType(self):
if '#' in self.name.text:
return 'interface_metafunction'
else:
return 'global_metafunction'
def getFormatted(self, formatter):
res = []
res.append(formatter.formatCommand(self.command, self.name.text))
if self.headerfiles:
res.append('\n')
if self.headerfiles:
for x in self.headerfiles:
res.append(x.getFormatted(formatter))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if self.deprecation_msgs or self.warnings or self.notes:
res.append('\n')
for x in self.deprecation_msgs:
res.append(x.getFormatted(formatter))
for x in self.warnings:
res.append(x.getFormatted(formatter))
for x in self.notes:
res.append(x.getFormatted(formatter))
if self.signatures:
res.append('\n')
for x in self.signatures:
res.append(x.getFormatted(formatter))
if self.tparams:
res.append('\n')
for x in self.tparams:
res.append(x.getFormatted(formatter))
if self.returns:
res.append('\n')
for x in self.returns:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawPage(RawEntry):
"""A page in the documentation."""
def __init__(self, first_token, briefs=[]):
RawEntry.__init__(self, first_token, briefs=briefs)
self.command = 'page'
def getType(self):
return 'page'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawMainPage(RawPage):
"""The main page in the documentation."""
def __init__(self, first_token, briefs=[]):
RawPage.__init__(self, first_token, briefs=briefs)
self.command = 'mainpage'
def getType(self):
return 'page'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text))
else:
res.append(formatter.formatCommand(self.command, 'NO TITLE'))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawGroup(RawEntry):
"""A group in the documentation."""
def __init__(self, first_token, briefs=[]):
RawEntry.__init__(self, first_token, briefs=briefs)
self.command = 'defgroup'
def getType(self):
return 'defgroup'
def getFormatted(self, formatter):
res = []
if self.title.text:
res.append(formatter.formatCommand(self.command, self.title.text,
self.name.text))
else:
res.append(formatter.formatCommand(self.command, self.name.text))
if self.briefs:
res.append('\n')
for x in self.briefs:
res.append(x.getFormatted(formatter))
if not self.body.empty:
res.append('\n')
res += self.body.getFormatted(formatter)
if self.sees:
res.append('\n')
for x in self.sees:
res.append(x.getFormatted(formatter))
res.append('\n')
return ''.join(res)
class RawBody(object):
"""A documentation body consists of multiple RawParagraph, RawSection, RawInclude objects.
@ivar entries A list of RawParagraph and RawSection objects.
"""
def __init__(self):
self.first_token = None
self.paragraphs = []
def addParagraph(self, p):
self.paragraphs.append(p)
def getFormatted(self, formatter):
res = []
for p in self.paragraphs:
res.append(p.getFormatted(formatter))
return '\n'.join(res)
@property
def empty(self):
return not bool(self.paragraphs)
def __eq__(self, other):
return self.paragraphs == other.paragraphs
class RawSection(object):
"""Represents one section or subsection.
@ivar level An int with the indentation level, starts at 0.
@ivar heading The text of the heading.
"""
def getType(self):
return 'section'
def __init__(self, first_token, heading=RawText(), level=0):
self.first_token = first_token
self.heading = heading
self.level = level
def __str__(self):
if self.level == 0:
return 'Section(%s)' % (repr(self.heading.text))
else:
return 'Sub%ssection(%s)' % (''.join(['sub'] * (self.level - 1)), repr(self.heading.text))
def getCommand(self):
if self.level == 0:
return 'section'
else:
return 'sub%ssection' % ''.join(['sub'] * (self.level - 1))
def getFormatted(self, formatter):
res = [formatter.formatCommand(self.getCommand(), self.heading.text.strip())]
return ''.join(res)
class RawInclude(object):
"""An @include statement.
@ivar path A RawText object with the path to the included file.
@ivar text Alias of path.
@ivar tokens List of tokens for the include statement.
"""
def __init__(self, first_token, tokens):
self.first_token = first_token
self.tokens = list(tokens)
self.path = RawText(tokens)
self.text = self.path
def getType(self):
return 'include'
def __str__(self):
return 'RawInclude(%s)' % (repr(self.path.text),)
def getFormatted(self, formatter):
res = ['@include ', self.path.text.strip(), '\n']
return ''.join(res)
class RawSnippet(object):
"""A @snippet statement.
@ivar tokens: A list of Token object.
@ivar path: A RawText object with the path to the included file.
@ivar name: The name of the snippet, a RawText.
@ivar text: Alias to path, such that the begin token can be retrieved by
looking at text in exception handling.
"""
def __init__(self, first_token, path_tokens, name_tokens):
self.first_token = first_token
self.tokens = path_tokens + name_tokens
self.path = raw_doc.RawText(path_tokens)
self.name = raw_doc.RawText(name_tokens)
self.text = self.path
def getType(self):
return 'snippet'
def __str__(self):
return 'RawSnippet(%s, %s)' % (repr(self.path.text),
repr(self.name.text))
def getFormatted(self, formatter):
res = ['@snippet ', self.path.text.strip(), ' ', self.name.text.strip(), '\n']
return ''.join(res)
class RawParagraph(object):
"""A paragraph in the RawBody of an RawEntry object's documentation.
@ivar text A string with the paragraph's text.
"""
def __init__(self, first_token, text=RawText()):
self.first_token = first_token
self.text = text
def getType(self):
return 'paragraph'
def __str__(self):
return 'RawParagraph(%s)' % (repr(self.text.text))
def getFormatted(self, formatter):
return formatter.formatParagraph(self.text.text)
class RawCode(RawParagraph):
"""A special paragraph that is rendered as code.
@ivar extension The extension identifying the language.
"""
def __init__(self, first_token, text=RawText(), extension='.txt'):
RawParagraph.__init__(self, first_token, text)
self.extension = extension
def getType(self):
return 'code'
def __str__(self):
return 'RawCode(%s)' % repr(self.text)
def getFormatted(self, formatter):
return '@code%s@endcode' % self.text.text
class RawHtmlOnly(RawParagraph):
"""A special paragraph that is directly put into HTML."""
def __init__(self, first_token, text=RawText()):
RawParagraph.__init__(self, first_token, text)
def getType(self):
return 'htmlonly'
def __str__(self):
return 'RawHtmlOnly(%s)' % repr(self.text)
def getFormatted(self, formatter):
return '@endhtmlonly%s@endhtmlonly' % self.text.text
class RawBrief(object):
"""A representation of a @brief entry.
@ivar text The @brief clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'brief'
def getFormatted(self, formatter):
return formatter.formatCommand('brief', self.text.text.strip())
def __str__(self):
return 'RawBrief(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawExtends(object):
"""A representation of a @extends entry.
@ivar text The @extends clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'extends'
def getFormatted(self, formatter):
return formatter.formatCommand('extends', self.text.text.strip())
def __str__(self):
return 'RawExtends(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawImplements(object):
"""A representation of a @implements entry.
@ivar text The @implements clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'implements'
def getFormatted(self, formatter):
return formatter.formatCommand('implements', self.text.text.strip())
def __str__(self):
return 'RawImplements(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawHeaderfile(object):
"""A representation of a @headerfile entry.
@ivar text The @headerfile clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'headerfile'
def getFormatted(self, formatter):
return formatter.formatCommand('headerfile', self.text.text.strip())
def __str__(self):
return 'RawHeaderfile(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawDeprecated(object):
"""A representation of a @deprecated entry.
@ivar text The @deprecated clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'deprecated'
def getFormatted(self, formatter):
return formatter.formatCommand('deprecated', self.text.text.strip())
def __str__(self):
return 'RawDeprecated(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawNote(object):
"""A representation of a @note entry.
@ivar text The @note clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'note'
def getFormatted(self, formatter):
return formatter.formatCommand('note', self.text.text.strip())
def __str__(self):
return 'RawNote(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawWarning(object):
"""A representation of a @warning entry.
@ivar text The @warning clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'warning'
def getFormatted(self, formatter):
return formatter.formatCommand('warning', self.text.text.strip())
def __str__(self):
return 'RawWarning(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawAka(object):
"""A representation of an @aka entry.
@ivar text The @aka clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'aka'
def getFormatted(self, formatter):
return formatter.formatCommand('aka', self.text.text.strip())
def __str__(self):
return 'RawAka(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawInternal(object):
"""A representation of a @internal entry.
@ivar text The @internal clauses's text.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'internal'
def getFormatted(self, formatter):
return formatter.formatCommand('internal', self.text.text.strip())
def __str__(self):
return 'RawInternal(%s)' % repr(self.text)
def __eq__(self, other):
return self.text == other.text
class RawSee(object):
"""A representation of a @see entry.
@ivar text The @see clauses's parameter.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'see'
def getFormatted(self, formatter):
return formatter.formatCommand('see', self.text.text)
class RawParam(object):
"""RawDoc for one parameter.
@ivar name Name of the parameter.
@ivar inout String in {'', 'in', 'out', 'in,out'} describing mutability.
@ivar text RawParagraph entry with the documentation of the parameter.
"""
def getType(self):
return 'param'
def __init__(self, first_token, name=RawText(), text=RawText(), inout=None):
self.first_token = first_token
self.name = name
self.inout = inout
self.text = text
def __str__(self):
inout = ''
if self.inout:
inout = self.inout.val
return ' @param%s %s %s' % (inout, self.name.text, self.text.text)
def getFormatted(self, formatter):
inout = ''
if self.inout:
inout = self.inout.val
return formatter.formatCommand('param%s' % inout, self.text.text, self.name.text)
class RawTParam(RawParam):
"""RawDoc for one template parameter.
@ivar name Name of the parameter.
@ivar text RawParagraph entry with the documentation of the parameter.
"""
def getType(self):
return 'tparam'
def __init__(self, first_token, name=RawText(), text=RawText(), in_out=None):
RawParam.__init__(self, first_token, name, text)
def __str__(self):
return 'RawTParam(%s, %s)' % (repr(self.name.text), repr(self.text.text))
def getFormatted(self, formatter):
return formatter.formatCommand('tparam', self.text.text, self.name.text)
class RawReturn(RawParam):
"""RawDoc for one return description.
@ivar type The return type.
@ivar text RawParagraph entry with the documentation of the parameter.
"""
def __init__(self, first_token, name=RawText(), text=RawText(), in_out=None):
RawParam.__init__(self, first_token, name, text)
def getType(self):
return 'return'
def getFormatted(self, formatter):
return formatter.formatCommand('return', self.text.text, self.name.text)
class RawThrow(RawParam):
"""RawDoc for one throw description.
@ivar type The thrown type.
@ivar text RawParagraph entry with the documentation of the parameter.
"""
def __init__(self, first_token, name=RawText(), text=RawText(), in_out=None):
RawParam.__init__(self, first_token, name, text)
def getType(self):
return 'throw'
def getFormatted(self, formatter):
return formatter.formatCommand('throw', self.text.text, self.name.text)
class RawSignature(object):
"""A representation of a @signature entry.
@ivar value The @signature's clauses's parameter. RawText.
"""
def __init__(self, first_token, text):
self.first_token = first_token
self.text = text
def getType(self):
return 'signature'
def __str__(self):
return 'RawSignature(%s)' % repr(self.text.text)
def getFormatted(self, formatter):
return formatter.formatCommand('signature', self.text.text.strip())
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import numpy as np
import tensorflow as tf
from niftynet.io.image_reader import ImageReader
from niftynet.io.image_sets_partitioner import ImageSetsPartitioner
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.utilities.util_common import ParserNamespace
from tests.reader_modular_test import generate_2d_images, SEG_THRESHOLD
from tests.niftynet_testcase import NiftyNetTestCase
generate_2d_images()
# test multiple modalities
MULTI_MOD_DATA = {
'T1': ParserNamespace(
csv_file=os.path.join('testing_data', 'T1reader.csv'),
path_to_search='testing_data',
filename_contains=('_o_T1_time',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
),
'FLAIR': ParserNamespace(
csv_file=os.path.join('testing_data', 'FLAIRreader.csv'),
path_to_search='testing_data',
filename_contains=('FLAIR_',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
)
}
MULTI_MOD_TASK = ParserNamespace(image=('T1', 'FLAIR'))
# test single modalities
SINGLE_MOD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
)
}
SINGLE_MOD_TASK = ParserNamespace(image=('lesion',))
EXISTING_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
interp_order=3,
pixdim=None,
axcodes=None,
loader=None
)
}
# test labels
LABEL_DATA = {
'parcellation': ParserNamespace(
csv_file=os.path.join('testing_data', 'labels.csv'),
path_to_search='testing_data',
filename_contains=('Parcellation',),
filename_not_contains=('Lesion',),
interp_order=0,
pixdim=(3, 3.9, 3),
axcodes=None,
loader=None
)
}
LABEL_TASK = ParserNamespace(label=('parcellation',))
BAD_DATA = {
'lesion': ParserNamespace(
csv_file=os.path.join('testing_data', 'lesion.csv'),
path_to_search='testing_data',
filename_contains=('Lesion',),
filename_not_contains=('Parcellation',),
pixdim=None,
axcodes=None,
loader=None
# missing interp_order
)
}
BAD_TASK = ParserNamespace(image=('test',))
IMAGE_2D_DATA = {
'color_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_u.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_u.png',),
interp_order=1,
pixdim=None,
axcodes=None,
loader=None
),
'gray_images': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_g.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_g.png',),
interp_order=1,
pixdim=None,
axcodes=None,
loader=None
),
'seg_masks': ParserNamespace(
csv_file=os.path.join('testing_data', 'images_2d_m.csv'),
path_to_search=os.path.join('testing_data', 'images2d'),
filename_contains=('_m.png',),
interp_order=0,
pixdim=None,
axcodes=None,
loader=None
)
}
IMAGE_2D_TASK_COLOR = ParserNamespace(image=('color_images',))
IMAGE_2D_TASK_GRAY = ParserNamespace(image=('gray_images',))
IMAGE_2D_TASK_MASK = ParserNamespace(image=('seg_masks',))
# default data_partitioner
data_partitioner = ImageSetsPartitioner()
multi_mod_list = data_partitioner.initialise(MULTI_MOD_DATA).get_file_list()
single_mod_list = data_partitioner.initialise(SINGLE_MOD_DATA).get_file_list()
existing_list = data_partitioner.initialise(EXISTING_DATA).get_file_list()
label_list = data_partitioner.initialise(LABEL_DATA).get_file_list()
bad_data_list = data_partitioner.initialise(BAD_DATA).get_file_list()
image2d_data_list = data_partitioner.initialise(IMAGE_2D_DATA).get_file_list()
class ImageReaderTest(NiftyNetTestCase):
def test_initialisation(self):
with self.assertRaisesRegexp(ValueError, ''):
reader = ImageReader(['test'])
reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
reader = ImageReader(None)
# reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
reader = ImageReader(['image'])
reader.initialise(MULTI_MOD_DATA, MULTI_MOD_TASK, multi_mod_list)
self.assertEqual(len(reader.output_list), 4)
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
self.assertEqual(len(reader.output_list), 4)
reader = ImageReader(['image'])
with self.assertRaisesRegexp(ValueError, ''):
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, [])
def test_properties(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
self.assertEqual(len(reader.output_list), 4)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertDictEqual(reader.shapes,
{'image': (256, 168, 256, 1, 1)})
self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
self.assertEqual(reader.names, ('image',))
self.assertDictEqual(reader.input_sources,
{'image': ('lesion',)})
self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
self.assertTrue(isinstance(reader.get_subject(1), dict))
def test_existing_csv(self):
reader_for_csv = ImageReader(['image'])
reader_for_csv.initialise(
SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
reader = ImageReader(['image'])
reader.initialise(EXISTING_DATA, SINGLE_MOD_TASK, existing_list)
self.assertEqual(len(reader.output_list), 4)
self.assertDictEqual(reader.spatial_ranks, {'image': 3})
self.assertDictEqual(reader.shapes,
{'image': (256, 168, 256, 1, 1)})
self.assertDictEqual(reader.tf_dtypes, {'image': tf.float32})
self.assertEqual(reader.names, ('image',))
self.assertDictEqual(reader.input_sources,
{'image': ('lesion',)})
self.assertEqual(reader.get_subject_id(1)[:4], 'Fin_')
self.assertTrue(isinstance(reader.get_subject(1), dict))
def test_operations(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(
SINGLE_MOD_DATA['lesion'].interp_order, interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
def test_preprocessing(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
reader.add_preprocessing_layers(
[PadLayer(image_name=['image'], border=(10, 5, 5))])
idx, data, interp_order = reader(idx=2)
self.assertEqual(idx, 2)
self.assertAllClose(data['image'].shape, (276, 178, 266, 1, 1))
def test_preprocessing_zero_padding(self):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader()
self.assertEqual(SINGLE_MOD_DATA['lesion'].interp_order,
interp_order['image'][0])
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
reader.add_preprocessing_layers(
[PadLayer(image_name=['image'], border=(0, 0, 0))])
idx, data, interp_order = reader(idx=2)
self.assertEqual(idx, 2)
self.assertAllClose(data['image'].shape, (256, 168, 256, 1, 1))
def test_trainable_preprocessing(self):
label_file = os.path.join('testing_data', 'label_reader.txt')
if os.path.exists(label_file):
os.remove(label_file)
label_normaliser = DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(LABEL_TASK).get('label'),
model_filename=os.path.join('testing_data', 'label_reader.txt'))
reader = ImageReader(['label'])
with self.assertRaisesRegexp(AssertionError, ''):
reader.add_preprocessing_layers(label_normaliser)
reader.initialise(LABEL_DATA, LABEL_TASK, label_list)
reader.add_preprocessing_layers(label_normaliser)
reader.add_preprocessing_layers(
[PadLayer(image_name=['label'], border=(10, 5, 5))])
idx, data, interp_order = reader(idx=0)
unique_data = np.unique(data['label'])
expected_v1 = np.array(
[0., 1., 2., 3., 4., 5., 6., 7., 8.,
9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 19., 20., 21., 22., 23., 24., 25., 26., 27.,
28., 29., 30., 31., 32., 33., 34., 35., 36.,
37., 38., 39., 40., 41., 42., 43., 44., 45.,
46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59., 60., 61., 62., 63.,
64., 65., 66., 67., 68., 69., 70., 71., 72.,
73., 74., 75., 76., 77., 78., 79., 80., 81.,
82., 83., 84., 85., 86., 87., 88., 89., 90.,
91., 92., 93., 94., 95., 96., 97., 98., 99.,
100., 101., 102., 103., 104., 105., 106., 107., 108.,
109., 110., 111., 112., 113., 114., 115., 116., 117.,
118., 119., 120., 121., 122., 123., 124., 125., 126.,
127., 128., 129., 130., 131., 132., 133., 134., 135.,
136., 137., 138., 139., 140., 141., 142., 143., 144.,
145., 146., 147., 148., 149., 150., 151., 152., 153.,
154., 155., 156., 157.], dtype=np.float32)
expected_v2 = np.array(
[0., 1., 2., 3., 4., 5., 6., 7., 8.,
9., 10., 11., 12., 13., 14., 15., 16., 17.,
18., 20., 21., 22., 23., 24., 25., 26., 27.,
28., 29., 30., 31., 32., 33., 34., 35., 36.,
37., 38., 39., 40., 41., 42., 43., 44., 45.,
46., 47., 48., 49., 50., 51., 52., 53., 54.,
55., 56., 57., 58., 59., 60., 61., 62., 63.,
64., 65., 66., 67., 68., 69., 70., 71., 72.,
73., 74., 75., 76., 77., 78., 79., 80., 81.,
82., 83., 84., 85., 86., 87., 88., 89., 90.,
91., 92., 93., 94., 95., 96., 97., 98., 99.,
100., 101., 102., 103., 104., 105., 106., 107., 108.,
109., 110., 111., 112., 113., 114., 115., 116., 117.,
118., 119., 120., 121., 122., 123., 124., 125., 126.,
127., 128., 129., 130., 131., 132., 133., 134., 135.,
136., 137., 138., 139., 140., 141., 142., 143., 144.,
145., 146., 147., 148., 149., 150., 151., 152., 153.,
154., 155., 156., 157.], dtype=np.float32)
compatible_assert = \
np.all(unique_data == expected_v1) or \
np.all(unique_data == expected_v2)
self.assertTrue(compatible_assert)
self.assertAllClose(data['label'].shape, (103, 74, 93, 1, 1))
def test_errors(self):
reader = ImageReader(['image'])
reader.initialise(BAD_DATA, SINGLE_MOD_TASK, bad_data_list)
with self.assertRaisesRegexp(ValueError, ''):
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, BAD_TASK, single_mod_list)
reader = ImageReader(['image'])
reader.initialise(SINGLE_MOD_DATA, SINGLE_MOD_TASK, single_mod_list)
idx, data, interp_order = reader(idx=100)
self.assertEqual(idx, -1)
self.assertEqual(data, None)
idx, data, interp_order = reader(shuffle=True)
self.assertEqual(data['image'].shape, (256, 168, 256, 1, 1))
def test_images2d(self):
reader = ImageReader(['image'])
# COLOR IMAGES
reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_COLOR,
image2d_data_list)
idx, data, interp_order = reader()
image = data['image']
# Check index
self.assertGreaterEqual(idx, 0)
self.assertLess(idx, 10)
# Check data type
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 255)
self.assertEqual(image.dtype, np.float32)
# Check shape
self.assertEqual(image.ndim, 5)
self.assertAllEqual(image.shape, (100, 100, 1, 1, 3))
self.assertEqual(interp_order['image'], (1,))
# GRAY IMAGES
reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_GRAY,
image2d_data_list)
idx, data, interp_order = reader()
image = data['image']
# Check index
self.assertGreaterEqual(idx, 0)
self.assertLess(idx, 10)
# Check data type
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 255)
self.assertEqual(image.dtype, np.float32)
# Check shape
self.assertEqual(image.ndim, 5)
self.assertAllEqual(image.shape, (100, 100, 1, 1, 1))
self.assertEqual(interp_order['image'], (1,))
gray_idx, gray_data, gray_order = reader(idx=5)
# SEGMENTATION MASKS
reader.initialise(IMAGE_2D_DATA, IMAGE_2D_TASK_MASK,
image2d_data_list)
idx, data, interp_order = reader()
image = data['image']
# Check index
self.assertGreaterEqual(idx, 0)
self.assertLess(idx, 10)
# Check data type
self.assertGreaterEqual(image.min(), 0)
self.assertLessEqual(image.max(), 255)
self.assertEqual(image.dtype, np.float32)
self.assertEqual(np.unique(image).size, 2)
# Check shape
self.assertEqual(image.ndim, 5)
self.assertAllEqual(image.shape, (100, 100, 1, 1, 1))
self.assertEqual(interp_order['image'], (0,))
# Compare segmentation masks to thresholding original image
mask_idx, mask_data, mask_order = reader(idx=5)
gray_data = gray_data['image']
mask_data = mask_data['image']
self.assertEqual(gray_idx, mask_idx)
self.assertEqual(gray_order['image'], (1,))
self.assertEqual(mask_order['image'], (0,))
self.assertAllEqual((gray_data > SEG_THRESHOLD) * 255, mask_data)
if __name__ == "__main__":
tf.test.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, [email protected], 2016-2017
# - Tobias Wegner, [email protected], 2017
import os
import random
import shutil
import tempfile
import unittest
import uuid
from pilot.api import data
def check_env():
"""
Function to check whether cvmfs is available.
To be used to decide whether to skip some test functions.
:returns True: if unit test should run (currently broken)
"""
return False
@unittest.skipIf(not check_env(), "This unit test is broken")
class TestHarvesterStageIn(unittest.TestCase):
"""
Automatic stage-in tests for Harvester.
from pilot.api import data
data_client = data.StageInClient(site)
result = data_client.transfer(files=[{scope, name, destination}, ...])
Notabene:
The following datasets with their constituent files are replicated
on every DATADISK and should thus be generally available:
user.mlassnig:user.mlassnig.pilot.test.single.hits
mc15_13TeV:HITS.06828093._000096.pool.root.1
user.mlassnig:user.mlassnig.pilot.test.multi.hits
mc15_14TeV:HITS.10075481._000432.pool.root.1
mc15_14TeV:HITS.10075481._000433.pool.root.1
mc15_14TeV:HITS.10075481._000434.pool.root.1
mc15_14TeV:HITS.10075481._000435.pool.root.1
mc15_14TeV:HITS.10075481._000444.pool.root.1
mc15_14TeV:HITS.10075481._000445.pool.root.1
mc15_14TeV:HITS.10075481._000451.pool.root.1
mc15_14TeV:HITS.10075481._000454.pool.root.1
mc15_14TeV:HITS.10075481._000455.pool.root.1
"""
def setUp(self):
# skip tests if running through Travis -- github does not have working rucio
self.travis = os.environ.get('TRAVIS') == 'true'
# setup pilot data client
# 1st example: using StageIn client with infosys
# initialize StageInClient using infosys component to resolve allowed input sources
#from pilot.info import infosys
#infosys.init('ANALY_CERN')
#self.data_client = data.StageInClient(infosys)
# 2nd example: avoid using infosys instance but it requires to pass explicitly copytools and allowed input Strorages in order to resolve replicas
#self.data_client = data.StageInClient(acopytools={'pr':'rucio'})
self.data_client = data.StageInClient(acopytools='rucio') ## use rucio everywhere
def test_stagein_sync_fail_nodirectory(self):
'''
Test error message propagation.
'''
if self.travis:
return True
result = self.data_client.transfer(files=[{'scope': 'does_not_matter',
'name': 'does_not_matter',
'destination': '/i_do_not_exist'},
{'scope': 'does_not_matter_too',
'name': 'does_not_matter_too',
'destination': '/neither_do_i'}])
self.assertIsNotNone(result)
for _file in result:
self.assertEqual(_file['errno'], 1)
self.assertEqual(_file['status'], 'failed')
#self.assertIn(_file['errmsg'], ['Destination directory does not exist: /i_do_not_exist',
# 'Destination directory does not exist: /neither_do_i'])
def test_stagein_sync_fail_noexist(self):
'''
Test error message propagation.
'''
if self.travis:
return True
result = self.data_client.transfer(files=[{'scope': 'no_scope1',
'name': 'no_name1',
'destination': '/tmp'},
{'scope': 'no_scope2',
'name': 'no_name2',
'destination': '/tmp'}])
self.assertIsNotNone(result)
for _file in result:
self.assertEqual(_file['errno'], 3)
self.assertEqual(_file['status'], 'failed')
#self.assertIn(_file['errmsg'], ['Data identifier \'no_scope1:no_name1\' not found',
# 'Data identifier \'no_scope2:no_name2\' not found'])
def test_stagein_sync_fail_mix(self):
'''
Test error message propagation
'''
if self.travis:
return True
## if infosys was not passed to StageInClient in constructor
## then it's mandatory to specify allowed `inputddms` that can be used as source for replica lookup
tmp_dir1, tmp_dir2 = tempfile.mkdtemp(), tempfile.mkdtemp()
result = self.data_client.transfer(files=[{'scope': 'no_scope1',
'name': 'no_name1',
'destination': '/tmp'},
{'scope': 'mc15_13TeV',
'name': 'HITS.06828093._000096.pool.root.1',
'destination': tmp_dir1},
{'scope': 'mc15_13TeV',
'name': 'HITS.06828093._000096.pool.root.1',
'destination': tmp_dir2},
{'scope': 'no_scope2',
'name': 'no_name2',
'destination': '/tmp'}])
ls_tmp_dir1 = os.listdir(tmp_dir1)
ls_tmp_dir2 = os.listdir(tmp_dir2)
shutil.rmtree(tmp_dir1)
shutil.rmtree(tmp_dir2)
self.assertIn('HITS.06828093._000096.pool.root.1', ls_tmp_dir1)
self.assertIn('HITS.06828093._000096.pool.root.1', ls_tmp_dir2)
self.assertIsNotNone(result)
for _file in result:
if _file['name'] in ['no_name1', 'no_name2']:
self.assertEqual(_file['errno'], 3)
self.assertEqual(_file['status'], 'failed')
#self.assertIn(_file['errmsg'], ['Data identifier \'no_scope1:no_name1\' not found',
# 'Data identifier \'no_scope2:no_name2\' not found'])
else:
self.assertEqual(_file['errno'], 0)
self.assertEqual(_file['status'], 'done')
def test_stagein_sync_simple(self):
'''
Single file going to a destination directory.
'''
if self.travis:
return True
result = self.data_client.transfer(files=[{'scope': 'mc15_13TeV',
'name': 'HITS.06828093._000096.pool.root.1',
'destination': '/tmp'}])
os.remove('/tmp/HITS.06828093._000096.pool.root.1')
self.assertIsNotNone(result)
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stagein_sync_merged_same(self):
'''
Multiple files going to the same destination directory.
'''
if self.travis:
return True
result = self.data_client.transfer(files=[{'scope': 'mc15_14TeV',
'name': 'HITS.10075481._000432.pool.root.1',
'destination': '/tmp'},
{'scope': 'mc15_14TeV',
'name': 'HITS.10075481._000433.pool.root.1',
'destination': '/tmp'}])
os.remove('/tmp/HITS.10075481._000432.pool.root.1')
os.remove('/tmp/HITS.10075481._000433.pool.root.1')
self.assertIsNotNone(result)
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stagein_sync_merged_diff(self):
'''
Multiple files going to different destination directories.
'''
if self.travis:
return True
tmp_dir1, tmp_dir2 = tempfile.mkdtemp(), tempfile.mkdtemp()
result = self.data_client.transfer(files=[{'scope': 'mc15_14TeV',
'name': 'HITS.10075481._000432.pool.root.1',
'destination': tmp_dir1},
{'scope': 'mc15_14TeV',
'name': 'HITS.10075481._000433.pool.root.1',
'destination': tmp_dir2}])
ls_tmp_dir1 = os.listdir(tmp_dir1)
ls_tmp_dir2 = os.listdir(tmp_dir2)
shutil.rmtree(tmp_dir1)
shutil.rmtree(tmp_dir2)
self.assertIsNotNone(result)
for _file in result:
self.assertEqual(_file['errno'], 0)
self.assertIn('HITS.10075481._000432.pool.root.1', ls_tmp_dir1)
self.assertIn('HITS.10075481._000433.pool.root.1', ls_tmp_dir2)
@unittest.skipIf(not check_env(), "This unit test is broken")
class TestHarvesterStageOut(unittest.TestCase):
'''
Automatic stage-out tests for Harvester.
from pilot.api import data
data_client = data.StageOutClient(site)
result = data_client.transfer(files=[{scope, name, ...}, ...])
'''
def setUp(self):
# skip tests if running through Travis -- github does not have working rucio
self.travis = os.environ.get('TRAVIS') == 'true'
# setup pilot data client
self.data_client = data.StageOutClient(acopytools=['rucio'])
def test_stageout_fail_notfound(self):
'''
Test error message propagation.
'''
if self.travis:
return True
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': 'i_do_not_exist',
'rse': 'CERN-PROD_SCRATCHDISK'},
{'scope': 'tests',
'file': 'neither_do_i',
'rse': 'CERN-PROD_SCRATCHDISK'}])
for _file in result:
self.assertEqual(_file['errno'], 1)
def test_stageout_file(self):
'''
Single file upload with various combinations of parameters.
'''
if self.travis:
return True
tmp_fd, tmp_file1 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file2 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file3 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file4 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': tmp_file1,
'rse': 'CERN-PROD_SCRATCHDISK'},
{'scope': 'tests',
'file': tmp_file2,
'lifetime': 600,
'rse': 'CERN-PROD_SCRATCHDISK'},
{'scope': 'tests',
'file': tmp_file3,
'lifetime': 600,
'summary': True,
'rse': 'CERN-PROD_SCRATCHDISK'},
{'scope': 'tests',
'file': tmp_file4,
'guid': str(uuid.uuid4()),
'rse': 'CERN-PROD_SCRATCHDISK'}])
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stageout_file_and_attach(self):
'''
Single file upload and attach to dataset.
'''
if self.travis:
return True
tmp_fd, tmp_file1 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file2 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': tmp_file1,
'lifetime': 600,
'rse': 'CERN-PROD_SCRATCHDISK',
'attach': {'scope': 'tests',
'name': 'pilot2.tests.test_harvester'}},
{'scope': 'tests',
'file': tmp_file2,
'rse': 'CERN-PROD_SCRATCHDISK',
'attach': {'scope': 'tests',
'name': 'pilot2.tests.test_harvester'}}])
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stageout_file_noregister(self):
'''
Single file upload without registering.
'''
if self.travis:
return True
tmp_fd, tmp_file1 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file2 = tempfile.mkstemp()
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': tmp_file1,
'rse': 'CERN-PROD_SCRATCHDISK',
'no_register': True},
{'scope': 'tests',
'file': tmp_file2,
'rse': 'CERN-PROD_SCRATCHDISK',
'no_register': True}])
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stageout_dir(self):
'''
Single file upload.
'''
if self.travis:
return True
tmp_dir = tempfile.mkdtemp()
tmp_fd, tmp_file1 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file2 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file3 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': tmp_dir,
'rse': 'CERN-PROD_SCRATCHDISK'}])
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stageout_dir_and_attach(self):
'''
Single file upload and attach to dataset.
'''
if self.travis:
return True
tmp_dir = tempfile.mkdtemp()
tmp_fd, tmp_file1 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file2 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file3 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': tmp_dir,
'lifetime': 600,
'rse': 'CERN-PROD_SCRATCHDISK',
'attach': {'scope': 'tests',
'name': 'pilot2.tests.test_harvester'}}])
for _file in result:
self.assertEqual(_file['errno'], 0)
def test_stageout_dir_noregister(self):
'''
Single file upload without registering.
'''
if self.travis:
return True
tmp_dir = tempfile.mkdtemp()
tmp_fd, tmp_file1 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file2 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
tmp_fd, tmp_file3 = tempfile.mkstemp(dir=tmp_dir)
tmp_fdo = os.fdopen(tmp_fd, 'wb')
tmp_fdo.write(str(random.randint(1, 2**2048)))
tmp_fdo.close()
result = self.data_client.transfer(files=[{'scope': 'tests',
'file': tmp_dir,
'no_register': True,
'rse': 'CERN-PROD_SCRATCHDISK'}])
for _file in result:
self.assertEqual(_file['errno'], 0)
|
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess, os.path, tempfile
from .. import mlog
from .. import coredata
from . import compilers
from ..mesonlib import (
EnvironmentException, version_compare, Popen_safe, listify,
for_windows, for_darwin, for_cygwin, for_haiku,
)
from .compilers import (
GCC_MINGW,
get_largefile_args,
gnu_winlibs,
msvc_buildtype_args,
msvc_buildtype_linker_args,
msvc_winlibs,
vs32_instruction_set_args,
vs64_instruction_set_args,
ClangCompiler,
Compiler,
CompilerArgs,
CrossNoRunException,
GnuCompiler,
IntelCompiler,
RunResult,
)
class CCompiler(Compiler):
def __init__(self, exelist, version, is_cross, exe_wrapper=None):
# If a child ObjC or CPP class has already set it, don't set it ourselves
if not hasattr(self, 'language'):
self.language = 'c'
super().__init__(exelist, version)
self.id = 'unknown'
self.is_cross = is_cross
self.can_compile_suffixes.add('h')
if isinstance(exe_wrapper, str):
self.exe_wrapper = [exe_wrapper]
else:
self.exe_wrapper = exe_wrapper
def needs_static_linker(self):
return True # When compiling static libraries, so yes.
def get_always_args(self):
'''
Args that are always-on for all C compilers other than MSVC
'''
return ['-pipe'] + get_largefile_args(self)
def get_linker_debug_crt_args(self):
"""
Arguments needed to select a debug crt for the linker
This is only needed for MSVC
"""
return []
def get_no_stdinc_args(self):
return ['-nostdinc']
def get_no_stdlib_link_args(self):
return ['-nostdlib']
def get_warn_args(self, level):
return self.warn_args[level]
def get_no_warn_args(self):
# Almost every compiler uses this for disabling warnings
return ['-w']
def get_soname_args(self, prefix, shlib_name, suffix, path, soversion, is_shared_module):
return []
def split_shlib_to_parts(self, fname):
return None, fname
# The default behavior is this, override in MSVC
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
if self.id == 'clang' and self.clang_type == compilers.CLANG_OSX:
return self.build_osx_rpath_args(build_dir, rpath_paths, build_rpath)
return self.build_unix_rpath_args(build_dir, from_dir, rpath_paths, build_rpath, install_rpath)
def get_dependency_gen_args(self, outtarget, outfile):
return ['-MMD', '-MQ', outtarget, '-MF', outfile]
def depfile_for_object(self, objfile):
return objfile + '.' + self.get_depfile_suffix()
def get_depfile_suffix(self):
return 'd'
def get_exelist(self):
return self.exelist[:]
def get_linker_exelist(self):
return self.exelist[:]
def get_preprocess_only_args(self):
return ['-E', '-P']
def get_compile_only_args(self):
return ['-c']
def get_no_optimization_args(self):
return ['-O0']
def get_compiler_check_args(self):
'''
Get arguments useful for compiler checks such as being permissive in
the code quality and not doing any optimization.
'''
return self.get_no_optimization_args()
def get_output_args(self, target):
return ['-o', target]
def get_linker_output_args(self, outputname):
return ['-o', outputname]
def get_coverage_args(self):
return ['--coverage']
def get_coverage_link_args(self):
return ['--coverage']
def get_werror_args(self):
return ['-Werror']
def get_std_exe_link_args(self):
return []
def get_include_args(self, path, is_system):
if path == '':
path = '.'
if is_system:
return ['-isystem', path]
return ['-I' + path]
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_library_dirs(self):
env = os.environ.copy()
env['LC_ALL'] = 'C'
stdo = Popen_safe(self.exelist + ['--print-search-dirs'], env=env)[1]
for line in stdo.split('\n'):
if line.startswith('libraries:'):
libstr = line.split('=', 1)[1]
return libstr.split(':')
return []
def get_pic_args(self):
return ['-fPIC']
def name_string(self):
return ' '.join(self.exelist)
def get_pch_use_args(self, pch_dir, header):
return ['-include', os.path.split(header)[-1]]
def get_pch_name(self, header_name):
return os.path.split(header_name)[-1] + '.' + self.get_pch_suffix()
def get_linker_search_args(self, dirname):
return ['-L' + dirname]
def get_default_include_dirs(self):
return []
def gen_import_library_args(self, implibname):
"""
The name of the outputted import library
This implementation is used only on Windows by compilers that use GNU ld
"""
return ['-Wl,--out-implib=' + implibname]
def sanity_check_impl(self, work_dir, environment, sname, code):
mlog.debug('Sanity testing ' + self.get_display_language() + ' compiler:', ' '.join(self.exelist))
mlog.debug('Is cross compiler: %s.' % str(self.is_cross))
extra_flags = []
source_name = os.path.join(work_dir, sname)
binname = sname.rsplit('.', 1)[0]
if self.is_cross:
binname += '_cross'
if self.exe_wrapper is None:
# Linking cross built apps is painful. You can't really
# tell if you should use -nostdlib or not and for example
# on OSX the compiler binary is the same but you need
# a ton of compiler flags to differentiate between
# arm and x86_64. So just compile.
extra_flags += self.get_cross_extra_flags(environment, link=False)
extra_flags += self.get_compile_only_args()
else:
extra_flags += self.get_cross_extra_flags(environment, link=True)
# Is a valid executable output for all toolchains and platforms
binname += '.exe'
# Write binary check source
binary_name = os.path.join(work_dir, binname)
with open(source_name, 'w') as ofile:
ofile.write(code)
# Compile sanity check
cmdlist = self.exelist + extra_flags + [source_name] + self.get_output_args(binary_name)
pc, stdo, stde = Popen_safe(cmdlist, cwd=work_dir)
mlog.debug('Sanity check compiler command line:', ' '.join(cmdlist))
mlog.debug('Sanity check compile stdout:')
mlog.debug(stdo)
mlog.debug('-----\nSanity check compile stderr:')
mlog.debug(stde)
mlog.debug('-----')
if pc.returncode != 0:
raise EnvironmentException('Compiler {0} can not compile programs.'.format(self.name_string()))
# Run sanity check
if self.is_cross:
if self.exe_wrapper is None:
# Can't check if the binaries run so we have to assume they do
return
cmdlist = self.exe_wrapper + [binary_name]
else:
cmdlist = [binary_name]
mlog.debug('Running test binary command: ' + ' '.join(cmdlist))
pe = subprocess.Popen(cmdlist)
pe.wait()
if pe.returncode != 0:
raise EnvironmentException('Executables created by {0} compiler {1} are not runnable.'.format(self.language, self.name_string()))
def sanity_check(self, work_dir, environment):
code = 'int main(int argc, char **argv) { int class=0; return class; }\n'
return self.sanity_check_impl(work_dir, environment, 'sanitycheckc.c', code)
def has_header(self, hname, prefix, env, extra_args=None, dependencies=None):
fargs = {'prefix': prefix, 'header': hname}
code = '''{prefix}
#ifdef __has_include
#if !__has_include("{header}")
#error "Header '{header}' could not be found"
#endif
#else
#include <{header}>
#endif'''
return self.compiles(code.format(**fargs), env, extra_args,
dependencies, 'preprocess')
def has_header_symbol(self, hname, symbol, prefix, env, extra_args=None, dependencies=None):
fargs = {'prefix': prefix, 'header': hname, 'symbol': symbol}
t = '''{prefix}
#include <{header}>
int main () {{
/* If it's not defined as a macro, try to use as a symbol */
#ifndef {symbol}
{symbol};
#endif
}}'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
def _get_compiler_check_args(self, env, extra_args, dependencies, mode='compile'):
if extra_args is None:
extra_args = []
elif isinstance(extra_args, str):
extra_args = [extra_args]
if dependencies is None:
dependencies = []
elif not isinstance(dependencies, list):
dependencies = [dependencies]
# Collect compiler arguments
args = CompilerArgs(self)
for d in dependencies:
# Add compile flags needed by dependencies
args += d.get_compile_args()
if d.need_threads():
args += self.thread_flags(env)
if mode == 'link':
# Add link flags needed to find dependencies
args += d.get_link_args()
if d.need_threads():
args += self.thread_link_flags(env)
# Select a CRT if needed since we're linking
if mode == 'link':
args += self.get_linker_debug_crt_args()
# Read c_args/cpp_args/etc from the cross-info file (if needed)
args += self.get_cross_extra_flags(env, link=(mode == 'link'))
if not self.is_cross:
if mode == 'preprocess':
# Add CPPFLAGS from the env.
args += env.coredata.external_preprocess_args[self.language]
elif mode == 'compile':
# Add CFLAGS/CXXFLAGS/OBJCFLAGS/OBJCXXFLAGS from the env
args += env.coredata.external_args[self.language]
elif mode == 'link':
# Add LDFLAGS from the env
args += env.coredata.external_link_args[self.language]
args += self.get_compiler_check_args()
# extra_args must override all other arguments, so we add them last
args += extra_args
return args
def compiles(self, code, env, extra_args=None, dependencies=None, mode='compile'):
args = self._get_compiler_check_args(env, extra_args, dependencies, mode)
# We only want to compile; not link
with self.compile(code, args.to_native(), mode) as p:
return p.returncode == 0
def _links_wrapper(self, code, env, extra_args, dependencies):
"Shares common code between self.links and self.run"
args = self._get_compiler_check_args(env, extra_args, dependencies, mode='link')
return self.compile(code, args)
def links(self, code, env, extra_args=None, dependencies=None):
with self._links_wrapper(code, env, extra_args, dependencies) as p:
return p.returncode == 0
def run(self, code, env, extra_args=None, dependencies=None):
if self.is_cross and self.exe_wrapper is None:
raise CrossNoRunException('Can not run test applications in this cross environment.')
with self._links_wrapper(code, env, extra_args, dependencies) as p:
if p.returncode != 0:
mlog.debug('Could not compile test file %s: %d\n' % (
p.input_name,
p.returncode))
return RunResult(False)
if self.is_cross:
cmdlist = self.exe_wrapper + [p.output_name]
else:
cmdlist = p.output_name
try:
pe, so, se = Popen_safe(cmdlist)
except Exception as e:
mlog.debug('Could not run: %s (error: %s)\n' % (cmdlist, e))
return RunResult(False)
mlog.debug('Program stdout:\n')
mlog.debug(so)
mlog.debug('Program stderr:\n')
mlog.debug(se)
return RunResult(True, pe.returncode, so, se)
def _compile_int(self, expression, prefix, env, extra_args, dependencies):
fargs = {'prefix': prefix, 'expression': expression}
t = '''#include <stdio.h>
{prefix}
int main() {{ static int a[1-2*!({expression})]; a[0]=0; return 0; }}'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
def cross_compute_int(self, expression, low, high, guess, prefix, env, extra_args, dependencies):
if isinstance(guess, int):
if self._compile_int('%s == %d' % (expression, guess), prefix, env, extra_args, dependencies):
return guess
cur = low
while low < high:
cur = int((low + high) / 2)
if cur == low:
break
if self._compile_int('%s >= %d' % (expression, cur), prefix, env, extra_args, dependencies):
low = cur
else:
high = cur
if self._compile_int('%s == %d' % (expression, cur), prefix, env, extra_args, dependencies):
return cur
raise EnvironmentException('Cross-compile check overflowed')
def compute_int(self, expression, low, high, guess, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_compute_int(expression, low, high, guess, prefix, env, extra_args, dependencies)
fargs = {'prefix': prefix, 'expression': expression}
t = '''#include<stdio.h>
{prefix}
int main(int argc, char **argv) {{
printf("%ld\\n", (long)({expression}));
return 0;
}};'''
res = self.run(t.format(**fargs), env, extra_args, dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise EnvironmentException('Could not run compute_int test binary.')
return int(res.stdout)
def cross_sizeof(self, typename, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
{prefix}
int main(int argc, char **argv) {{
{type} something;
}}'''
if not self.compiles(t.format(**fargs), env, extra_args, dependencies):
return -1
return self.cross_compute_int('sizeof(%s)' % typename, 1, 128, None, prefix, env, extra_args, dependencies)
def sizeof(self, typename, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
if self.is_cross:
return self.cross_sizeof(typename, prefix, env, extra_args, dependencies)
t = '''#include<stdio.h>
{prefix}
int main(int argc, char **argv) {{
printf("%ld\\n", (long)(sizeof({type})));
return 0;
}};'''
res = self.run(t.format(**fargs), env, extra_args, dependencies)
if not res.compiled:
return -1
if res.returncode != 0:
raise EnvironmentException('Could not run sizeof test binary.')
return int(res.stdout)
def cross_alignment(self, typename, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
{prefix}
int main(int argc, char **argv) {{
{type} something;
}}'''
if not self.compiles(t.format(**fargs), env, extra_args, dependencies):
return -1
t = '''#include <stddef.h>
{prefix}
struct tmp {{
char c;
{type} target;
}};'''
return self.cross_compute_int('offsetof(struct tmp, target)', 1, 1024, None, t.format(**fargs), env, extra_args, dependencies)
def alignment(self, typename, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
if self.is_cross:
return self.cross_alignment(typename, prefix, env, extra_args, dependencies)
fargs = {'prefix': prefix, 'type': typename}
t = '''#include <stdio.h>
#include <stddef.h>
{prefix}
struct tmp {{
char c;
{type} target;
}};
int main(int argc, char **argv) {{
printf("%d", (int)offsetof(struct tmp, target));
return 0;
}}'''
res = self.run(t.format(**fargs), env, extra_args, dependencies)
if not res.compiled:
raise EnvironmentException('Could not compile alignment test.')
if res.returncode != 0:
raise EnvironmentException('Could not run alignment test binary.')
align = int(res.stdout)
if align == 0:
raise EnvironmentException('Could not determine alignment of %s. Sorry. You might want to file a bug.' % typename)
return align
def get_define(self, dname, prefix, env, extra_args, dependencies):
delim = '"MESON_GET_DEFINE_DELIMITER"'
fargs = {'prefix': prefix, 'define': dname, 'delim': delim}
code = '''
{prefix}
#ifndef {define}
# define {define}
#endif
{delim}\n{define}'''
args = self._get_compiler_check_args(env, extra_args, dependencies,
mode='preprocess').to_native()
with self.compile(code.format(**fargs), args, 'preprocess') as p:
if p.returncode != 0:
raise EnvironmentException('Could not get define {!r}'.format(dname))
# Get the preprocessed value after the delimiter,
# minus the extra newline at the end
return p.stdo.split(delim + '\n')[-1][:-1]
def get_return_value(self, fname, rtype, prefix, env, extra_args, dependencies):
if rtype == 'string':
fmt = '%s'
cast = '(char*)'
elif rtype == 'int':
fmt = '%lli'
cast = '(long long int)'
else:
raise AssertionError('BUG: Unknown return type {!r}'.format(rtype))
fargs = {'prefix': prefix, 'f': fname, 'cast': cast, 'fmt': fmt}
code = '''{prefix}
#include <stdio.h>
int main(int argc, char *argv[]) {{
printf ("{fmt}", {cast} {f}());
}}'''.format(**fargs)
res = self.run(code, env, extra_args, dependencies)
if not res.compiled:
m = 'Could not get return value of {}()'
raise EnvironmentException(m.format(fname))
if rtype == 'string':
return res.stdout
elif rtype == 'int':
try:
return int(res.stdout.strip())
except:
m = 'Return value of {}() is not an int'
raise EnvironmentException(m.format(fname))
@staticmethod
def _no_prototype_templ():
"""
Try to find the function without a prototype from a header by defining
our own dummy prototype and trying to link with the C library (and
whatever else the compiler links in by default). This is very similar
to the check performed by Autoconf for AC_CHECK_FUNCS.
"""
# Define the symbol to something else since it is defined by the
# includes or defines listed by the user or by the compiler. This may
# include, for instance _GNU_SOURCE which must be defined before
# limits.h, which includes features.h
# Then, undef the symbol to get rid of it completely.
head = '''
#define {func} meson_disable_define_of_{func}
{prefix}
#include <limits.h>
#undef {func}
'''
# Override any GCC internal prototype and declare our own definition for
# the symbol. Use char because that's unlikely to be an actual return
# value for a function which ensures that we override the definition.
head += '''
#ifdef __cplusplus
extern "C"
#endif
char {func} ();
'''
# The actual function call
main = '''
int main () {{
return {func} ();
}}'''
return head, main
@staticmethod
def _have_prototype_templ():
"""
Returns a head-er and main() call that uses the headers listed by the
user for the function prototype while checking if a function exists.
"""
# Add the 'prefix', aka defines, includes, etc that the user provides
# This may include, for instance _GNU_SOURCE which must be defined
# before limits.h, which includes features.h
head = '{prefix}\n#include <limits.h>\n'
# We don't know what the function takes or returns, so return it as an int.
# Just taking the address or comparing it to void is not enough because
# compilers are smart enough to optimize it away. The resulting binary
# is not run so we don't care what the return value is.
main = '''\nint main() {{
void *a = (void*) &{func};
long b = (long) a;
return (int) b;
}}'''
return head, main
def has_function(self, funcname, prefix, env, extra_args=None, dependencies=None):
"""
First, this function looks for the symbol in the default libraries
provided by the compiler (stdlib + a few others usually). If that
fails, it checks if any of the headers specified in the prefix provide
an implementation of the function, and if that fails, it checks if it's
implemented as a compiler-builtin.
"""
if extra_args is None:
extra_args = []
# Short-circuit if the check is already provided by the cross-info file
varname = 'has function ' + funcname
varname = varname.replace(' ', '_')
if self.is_cross:
val = env.cross_info.config['properties'].get(varname, None)
if val is not None:
if isinstance(val, bool):
return val
raise EnvironmentException('Cross variable {0} is not a boolean.'.format(varname))
fargs = {'prefix': prefix, 'func': funcname}
# glibc defines functions that are not available on Linux as stubs that
# fail with ENOSYS (such as e.g. lchmod). In this case we want to fail
# instead of detecting the stub as a valid symbol.
# We already included limits.h earlier to ensure that these are defined
# for stub functions.
stubs_fail = '''
#if defined __stub_{func} || defined __stub___{func}
fail fail fail this function is not going to work
#endif
'''
# If we have any includes in the prefix supplied by the user, assume
# that the user wants us to use the symbol prototype defined in those
# includes. If not, then try to do the Autoconf-style check with
# a dummy prototype definition of our own.
# This is needed when the linker determines symbol availability from an
# SDK based on the prototype in the header provided by the SDK.
# Ignoring this prototype would result in the symbol always being
# marked as available.
if '#include' in prefix:
head, main = self._have_prototype_templ()
else:
head, main = self._no_prototype_templ()
templ = head + stubs_fail + main
if self.links(templ.format(**fargs), env, extra_args, dependencies):
return True
# MSVC does not have compiler __builtin_-s.
if self.get_id() == 'msvc':
return False
# Detect function as a built-in
#
# Some functions like alloca() are defined as compiler built-ins which
# are inlined by the compiler and you can't take their address, so we
# need to look for them differently. On nice compilers like clang, we
# can just directly use the __has_builtin() macro.
fargs['no_includes'] = '#include' not in prefix
t = '''{prefix}
int main() {{
#ifdef __has_builtin
#if !__has_builtin(__builtin_{func})
#error "__builtin_{func} not found"
#endif
#elif ! defined({func})
/* Check for __builtin_{func} only if no includes were added to the
* prefix above, which means no definition of {func} can be found.
* We would always check for this, but we get false positives on
* MSYS2 if we do. Their toolchain is broken, but we can at least
* give them a workaround. */
#if {no_includes:d}
__builtin_{func};
#else
#error "No definition for __builtin_{func} found in the prefix"
#endif
#endif
}}'''
return self.links(t.format(**fargs), env, extra_args, dependencies)
def has_members(self, typename, membernames, prefix, env, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
fargs = {'prefix': prefix, 'type': typename, 'name': 'foo'}
# Create code that accesses all members
members = ''
for member in membernames:
members += '{}.{};\n'.format(fargs['name'], member)
fargs['members'] = members
t = '''{prefix}
void bar() {{
{type} {name};
{members}
}};'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
def has_type(self, typename, prefix, env, extra_args, dependencies=None):
fargs = {'prefix': prefix, 'type': typename}
t = '''{prefix}
void bar() {{
sizeof({type});
}};'''
return self.compiles(t.format(**fargs), env, extra_args, dependencies)
def symbols_have_underscore_prefix(self, env):
'''
Check if the compiler prefixes an underscore to global C symbols
'''
symbol_name = b'meson_uscore_prefix'
code = '''#ifdef __cplusplus
extern "C" {
#endif
void ''' + symbol_name.decode() + ''' () {}
#ifdef __cplusplus
}
#endif
'''
args = self.get_cross_extra_flags(env, link=False)
args += self.get_compiler_check_args()
n = 'symbols_have_underscore_prefix'
with self.compile(code, args, 'compile') as p:
if p.returncode != 0:
m = 'BUG: Unable to compile {!r} check: {}'
raise RuntimeError(m.format(n, p.stdo))
if not os.path.isfile(p.output_name):
m = 'BUG: Can\'t find compiled test code for {!r} check'
raise RuntimeError(m.format(n))
with open(p.output_name, 'rb') as o:
for line in o:
# Check if the underscore form of the symbol is somewhere
# in the output file.
if b'_' + symbol_name in line:
return True
# Else, check if the non-underscored form is present
elif symbol_name in line:
return False
raise RuntimeError('BUG: {!r} check failed unexpectedly'.format(n))
def get_library_naming(self, env, libtype):
'''
Get library prefixes and suffixes for the target platform ordered by
priority
'''
stlibext = ['a']
# We've always allowed libname to be both `foo` and `libfoo`,
# and now people depend on it
prefixes = ['lib', '']
# Library suffixes and prefixes
if for_darwin(env.is_cross_build(), env):
shlibext = ['dylib']
elif for_windows(env.is_cross_build(), env):
# FIXME: .lib files can be import or static so we should read the
# file, figure out which one it is, and reject the wrong kind.
if self.id == 'msvc':
shlibext = ['lib']
else:
shlibext = ['dll.a', 'lib', 'dll']
# Yep, static libraries can also be foo.lib
stlibext += ['lib']
elif for_cygwin(env.is_cross_build(), env):
shlibext = ['dll', 'dll.a']
prefixes = ['cyg'] + prefixes
else:
# Linux/BSDs
shlibext = ['so']
# Search priority
if libtype in ('default', 'shared-static'):
suffixes = shlibext + stlibext
elif libtype == 'static-shared':
suffixes = stlibext + shlibext
elif libtype == 'shared':
suffixes = shlibext
elif libtype == 'static':
suffixes = stlibext
else:
raise AssertionError('BUG: unknown libtype {!r}'.format(libtype))
return prefixes, suffixes
def find_library(self, libname, env, extra_dirs, libtype='default'):
# These libraries are either built-in or invalid
if libname in self.ignore_libs:
return []
# First try if we can just add the library as -l.
code = 'int main(int argc, char **argv) { return 0; }'
if extra_dirs and isinstance(extra_dirs, str):
extra_dirs = [extra_dirs]
# Gcc + co seem to prefer builtin lib dirs to -L dirs.
# Only try to find std libs if no extra dirs specified.
if not extra_dirs and libtype == 'default':
args = ['-l' + libname]
if self.links(code, env, extra_args=args):
return args
# Not found or we want to use a specific libtype? Try to find the
# library file itself.
extra_dirs += self.get_library_dirs()
prefixes, suffixes = self.get_library_naming(env, libtype)
# Triply-nested loop!
for d in extra_dirs:
for suffix in suffixes:
for prefix in prefixes:
trial = os.path.join(d, prefix + libname + '.' + suffix)
if os.path.isfile(trial):
return [trial]
return None
def thread_flags(self, env):
if for_haiku(self.is_cross, env):
return []
return ['-pthread']
def thread_link_flags(self, env):
if for_haiku(self.is_cross, env):
return []
return ['-pthread']
def has_multi_arguments(self, args, env):
return self.compiles('int i;\n', env, extra_args=args)
class ClangCCompiler(ClangCompiler, CCompiler):
def __init__(self, exelist, version, clang_type, is_cross, exe_wrapper=None):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
ClangCompiler.__init__(self, clang_type)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
return {'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99', 'c11',
'gnu89', 'gnu99', 'gnu11'],
'none')}
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
return []
def get_linker_always_args(self):
basic = super().get_linker_always_args()
if self.clang_type == compilers.CLANG_OSX:
return basic + ['-Wl,-headerpad_max_install_names']
return basic
class GnuCCompiler(GnuCompiler, CCompiler):
def __init__(self, exelist, version, gcc_type, is_cross, exe_wrapper=None, defines=None):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
GnuCompiler.__init__(self, gcc_type, defines)
default_warn_args = ['-Wall', '-Winvalid-pch']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
opts = {'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none', 'c89', 'c99', 'c11',
'gnu89', 'gnu99', 'gnu11'],
'none')}
if self.gcc_type == GCC_MINGW:
opts.update({
'c_winlibs': coredata.UserArrayOption('c_winlibs', 'Standard Win libraries to link against',
gnu_winlibs), })
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_option_link_args(self, options):
if self.gcc_type == GCC_MINGW:
return options['c_winlibs'].value[:]
return []
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_pch_use_args(self, pch_dir, header):
return ['-fpch-preprocess', '-include', os.path.split(header)[-1]]
class IntelCCompiler(IntelCompiler, CCompiler):
def __init__(self, exelist, version, icc_type, is_cross, exe_wrapper=None):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrapper)
IntelCompiler.__init__(self, icc_type)
self.lang_header = 'c-header'
default_warn_args = ['-Wall', '-w3', '-diag-disable:remark', '-Wpch-messages']
self.warn_args = {'1': default_warn_args,
'2': default_warn_args + ['-Wextra'],
'3': default_warn_args + ['-Wextra', '-Wpedantic']}
def get_options(self):
c_stds = ['c89', 'c99']
g_stds = ['gnu89', 'gnu99']
if version_compare(self.version, '>=16.0.0'):
c_stds += ['c11']
opts = {'c_std': coredata.UserComboOption('c_std', 'C language standard to use',
['none'] + c_stds + g_stds,
'none')}
return opts
def get_option_compile_args(self, options):
args = []
std = options['c_std']
if std.value != 'none':
args.append('-std=' + std.value)
return args
def get_std_shared_lib_link_args(self):
return ['-shared']
def has_multi_arguments(self, args, env):
return super().has_multi_arguments(args + ['-diag-error', '10006'], env)
class VisualStudioCCompiler(CCompiler):
std_warn_args = ['/W3']
std_opt_args = ['/O2']
ignore_libs = ('m', 'c', 'pthread')
def __init__(self, exelist, version, is_cross, exe_wrap, is_64):
CCompiler.__init__(self, exelist, version, is_cross, exe_wrap)
self.id = 'msvc'
# /showIncludes is needed for build dependency tracking in Ninja
# See: https://ninja-build.org/manual.html#_deps
self.always_args = ['/nologo', '/showIncludes']
self.warn_args = {'1': ['/W2'],
'2': ['/W3'],
'3': ['/W4']}
self.base_options = ['b_pch'] # FIXME add lto, pgo and the like
self.is_64 = is_64
# Override CCompiler.get_always_args
def get_always_args(self):
return self.always_args
def get_linker_debug_crt_args(self):
"""
Arguments needed to select a debug crt for the linker
Sometimes we need to manually select the CRT (C runtime) to use with
MSVC. One example is when trying to link with static libraries since
MSVC won't auto-select a CRT for us in that case and will error out
asking us to select one.
"""
return ['/MDd']
def get_buildtype_args(self, buildtype):
return msvc_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return msvc_buildtype_linker_args[buildtype]
def get_pch_suffix(self):
return 'pch'
def get_pch_name(self, header):
chopped = os.path.split(header)[-1].split('.')[:-1]
chopped.append(self.get_pch_suffix())
pchname = '.'.join(chopped)
return pchname
def get_pch_use_args(self, pch_dir, header):
base = os.path.split(header)[-1]
pchname = self.get_pch_name(header)
return ['/FI' + base, '/Yu' + base, '/Fp' + os.path.join(pch_dir, pchname)]
def get_preprocess_only_args(self):
return ['/EP']
def get_compile_only_args(self):
return ['/c']
def get_no_optimization_args(self):
return ['/Od']
def get_output_args(self, target):
if target.endswith('.exe'):
return ['/Fe' + target]
return ['/Fo' + target]
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_linker_exelist(self):
return ['link'] # FIXME, should have same path as compiler.
def get_linker_always_args(self):
return ['/nologo']
def get_linker_output_args(self, outputname):
return ['/OUT:' + outputname]
def get_linker_search_args(self, dirname):
return ['/LIBPATH:' + dirname]
def get_gui_app_args(self):
return ['/SUBSYSTEM:WINDOWS']
def get_pic_args(self):
return [] # PIC is handled by the loader on Windows
def get_std_shared_lib_link_args(self):
return ['/DLL']
def gen_vs_module_defs_args(self, defsfile):
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# With MSVC, DLLs only export symbols that are explicitly exported,
# so if a module defs file is specified, we use that to export symbols
return ['/DEF:' + defsfile]
def gen_pch_args(self, header, source, pchname):
objname = os.path.splitext(pchname)[0] + '.obj'
return objname, ['/Yc' + header, '/Fp' + pchname, '/Fo' + objname]
def gen_import_library_args(self, implibname):
"The name of the outputted import library"
return ['/IMPLIB:' + implibname]
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return []
# FIXME, no idea what these should be.
def thread_flags(self, env):
return []
def thread_link_flags(self, env):
return []
def get_options(self):
return {'c_winlibs': coredata.UserArrayOption('c_winlibs',
'Windows libs to link against.',
msvc_winlibs)
}
def get_option_link_args(self, options):
return options['c_winlibs'].value[:]
@classmethod
def unix_args_to_native(cls, args):
result = []
for i in args:
# -mms-bitfields is specific to MinGW-GCC
# -pthread is only valid for GCC
if i in ('-mms-bitfields', '-pthread'):
continue
if i.startswith('-L'):
i = '/LIBPATH:' + i[2:]
# Translate GNU-style -lfoo library name to the import library
elif i.startswith('-l'):
name = i[2:]
if name in cls.ignore_libs:
# With MSVC, these are provided by the C runtime which is
# linked in by default
continue
else:
i = name + '.lib'
# -pthread in link flags is only used on Linux
elif i == '-pthread':
continue
result.append(i)
return result
def get_werror_args(self):
return ['/WX']
def get_include_args(self, path, is_system):
if path == '':
path = '.'
# msvc does not have a concept of system header dirs.
return ['-I' + path]
# Visual Studio is special. It ignores some arguments it does not
# understand and you can't tell it to error out on those.
# http://stackoverflow.com/questions/15259720/how-can-i-make-the-microsoft-c-compiler-treat-unknown-flags-as-errors-rather-t
def has_multi_arguments(self, args, env):
warning_text = '9002'
code = 'int i;\n'
(fd, srcname) = tempfile.mkstemp(suffix='.' + self.default_suffix)
os.close(fd)
with open(srcname, 'w') as ofile:
ofile.write(code)
# Read c_args/cpp_args/etc from the cross-info file (if needed)
extra_args = self.get_cross_extra_flags(env, link=False)
extra_args += self.get_compile_only_args()
commands = self.exelist + args + extra_args + [srcname]
mlog.debug('Running VS compile:')
mlog.debug('Command line: ', ' '.join(commands))
mlog.debug('Code:\n', code)
p, stdo, stde = Popen_safe(commands, cwd=os.path.split(srcname)[0])
if p.returncode != 0:
return False
return not(warning_text in stde or warning_text in stdo)
def get_compile_debugfile_args(self, rel_obj, pch=False):
pdbarr = rel_obj.split('.')[:-1]
pdbarr += ['pdb']
args = ['/Fd' + '.'.join(pdbarr)]
# When generating a PDB file with PCH, all compile commands write
# to the same PDB file. Hence, we need to serialize the PDB
# writes using /FS since we do parallel builds. This slows down the
# build obviously, which is why we only do this when PCH is on.
# This was added in Visual Studio 2013 (MSVC 18.0). Before that it was
# always on: https://msdn.microsoft.com/en-us/library/dn502518.aspx
if pch and version_compare(self.version, '>=18.0'):
args = ['/FS'] + args
return args
def get_link_debugfile_args(self, targetfile):
pdbarr = targetfile.split('.')[:-1]
pdbarr += ['pdb']
return ['/DEBUG', '/PDB:' + '.'.join(pdbarr)]
def get_link_whole_for(self, args):
# Only since VS2015
args = listify(args)
return ['/WHOLEARCHIVE:' + x for x in args]
def get_instruction_set_args(self, instruction_set):
if self.is_64:
return vs64_instruction_set_args.get(instruction_set, None)
if self.version.split('.')[0] == '16' and instruction_set == 'avx':
# VS documentation says that this exists and should work, but
# it does not. The headers do not contain AVX intrinsics
# and the can not be called.
return None
return vs32_instruction_set_args.get(instruction_set, None)
def get_toolset_version(self):
# See boost/config/compiler/visualc.cpp for up to date mapping
try:
version = int(''.join(self.version.split('.')[0:2]))
except:
return None
if version < 1310:
return '7.0'
elif version < 1400:
return '7.1' # (Visual Studio 2003)
elif version < 1500:
return '8.0' # (Visual Studio 2005)
elif version < 1600:
return '9.0' # (Visual Studio 2008)
elif version < 1700:
return '10.0' # (Visual Studio 2010)
elif version < 1800:
return '11.0' # (Visual Studio 2012)
elif version < 1900:
return '12.0' # (Visual Studio 2013)
elif version < 1910:
return '14.0' # (Visual Studio 2015)
elif version < 1920:
return '14.1' # (Visual Studio 2017)
return None
def get_default_include_dirs(self):
if 'INCLUDE' not in os.environ:
return []
return os.environ['INCLUDE'].split(os.pathsep)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Script assumes that database connection information is stored in
# ~/mysql/db_config. Change if needed. Variables are initialized
# this way for compatibility with prior Bash script.
#
# Determination of 3rd next appointment similar that used in Oscar EMR code in
# src/main/webapp/appointment/appointmentsearch.jsp and
# src/main/java/org/oscarehr/appointment/web/NextAppointmentSearchHelper.java
#
import sys
__author__ = 'rrusk'
import os
import csv
# import datetime
from datetime import datetime, date
# from datetime import date
# import collections
# if import MySQLdb fails (for Ubuntu 14.04.1) run 'sudo apt-get install python-mysqldb'
import MySQLdb as Mdb
# if import dateutil.relativedelta fails run 'sudo apt-get install python-dateutil'
from dateutil.relativedelta import relativedelta
con = None
f = None
max_days_to_search = 180
# create dictionary on first item in tuple
def create_dict(tlist):
tdict = {}
for trow in tlist:
if trow[0] in tdict:
# warn about values that are actually different
if tdict[trow[0]] != trow[1:]:
print("WARNING: key (" + str(trow[0])),
print("\thas multiple values ")
print('\t' + str(tdict[trow[0]]))
print('\t' + str(trow[1:]))
continue # only take first row with this key
else:
tdict[trow[0]] = trow[1:]
return tdict
# get total sum of elements in dictionary containing values that are lists
def sum_dict_values(the_dict):
item_cnt = 0
for the_key in the_dict:
item_cnt += len(the_dict[the_key])
return item_cnt
# create dict of active providers with key provider_no and value [first_name, last_name].
def get_active_providers_dict(cursor):
query = """SELECT p.provider_no, p.first_name, p.last_name from provider p where status='1' order by p.last_name"""
cursor.execute(query)
result = cursor.fetchall()
return create_dict(result)
# get dict of scheduletemplatecodes with code as key and [duration, description] as value
def get_schedule_template_code_dict(cursor):
query = """select code, duration, description, id from scheduletemplatecode order by code"""
cursor.execute(query)
result = cursor.fetchall()
return create_dict(result)
# get dictionary of schedule templates with (name, provider_no) as key and [summary, timecode] as value
def get_schedule_template_dict(cursor):
query = """select name, provider_no, summary, timecode from scheduletemplate"""
cursor.execute(query)
result = cursor.fetchall()
st_dict = {}
for st_item in result:
if (st_item[0], st_item[1]) in st_dict:
if st_dict[(st_item[0], st_item[1])][0] != st_item[2] or st_dict[(st_item[0], st_item[1])][1] != st_item[3]:
# warn about values that are actually different
print("WARNING: key (" + str(st_item[0]) + ',' + str(st_item[1]) + ')'),
print("\thas multiple values ")
print('\t' + str(st_dict[(st_item[0], st_item[1])]))
print('\t' + str(st_item[2:]))
continue # only take first row with this key
else:
st_dict[(st_item[0], st_item[1])] = st_item[2:]
return st_dict
# test whether the timecode strings in scheduletemplate are valid
def validate_all_timecode_strings(schedule_template_dict, schedule_template_code_dict):
result = True
defaultv = None
cnt_valid = 0
cnt_invalid = 0
cnt_missing_codes = 0
minutes_per_day = 24. * 60
for st_item in schedule_template_dict:
total_min = 0
timecode_str = schedule_template_dict[st_item][1]
slotduration = None
warning = False
if timecode_str is not None or len(timecode_str) != 0:
slotduration = minutes_per_day / len(timecode_str)
for char in timecode_str:
if char == '_':
total_min += slotduration
else:
value = schedule_template_code_dict.get(char, defaultv)
if value and value[0] != '':
total_min += slotduration # int(value[0])
else:
total_min += slotduration # assume unrecognized or absent codes occupy one time slot
warning = True
else:
print("ERROR: timecode string is empty")
result = False
if total_min != minutes_per_day:
sys.stdout.write("INVALID TIMECODE STRING [" + str(st_item)),
print("]: Totals " + str(total_min) + " rather then " + str(minutes_per_day))
print(str(timecode_str))
cnt_invalid += 1
result = False
elif warning:
sys.stdout.write("WARNING: UNKNOWN CODES IN TIMECODE STRING [" + str(st_item)),
print("]: (will assume unknown codes have " + str(slotduration) + " min durations)")
print(str(timecode_str))
cnt_missing_codes += 1
else:
# print("VALID TIMECODE STRING FOR " + str(st_item) + ":")
# print(str(timecode_str))
cnt_valid += 1
print("scheduletemplate entries:")
print(" Valid: " + str(cnt_valid) + " Invalid: " + str(cnt_invalid)),
print(" Valid with unknown codes assumed one timeslot in duration: " + str(cnt_missing_codes))
return result
# test whether specific timecode string in scheduletemplate is valid
def is_valid_timecode_string(timecode_str, schedule_template_code_dict):
result = True
defaultv = None
total_min = 0
warning = False
if timecode_str is None or len(timecode_str) == 0:
return False
minutes_per_day = 24. * 60
slotduration = minutes_per_day / len(timecode_str)
for char in timecode_str:
if char == '_':
total_min += slotduration
else:
value = schedule_template_code_dict.get(char, defaultv)
if value and value[0] != '':
total_min += slotduration # int(value[0])
else:
total_min += slotduration # assume unrecognized or absent codes occupy one time slot
warning = True
if total_min != minutes_per_day:
sys.stdout.write("INVALID TIMECODE STRING [" + str(timecode_str)),
print("]: Totals " + str(total_min) + " rather then " + str(minutes_per_day))
print(str(timecode_str))
result = False
elif warning:
sys.stdout.write("WARNING: UNKNOW CODES IN TIMECODE STRING [" + str(timecode_str)),
print("]: (will assume unknown codes have " + str(slotduration) + " min durations)")
print(str(timecode_str))
return result
# get beginning and end of schedule from timecode
def get_timecode_start_stop(timecode):
minutes_per_day = 24. * 60
slotduration = minutes_per_day / len(timecode)
total_min = -slotduration
result = None
for char in timecode:
if char == '_':
total_min += slotduration
else:
total_min += slotduration
start_time = str(int(total_min) / 60) + ':' + str(int(total_min) % 60)
result = "Start time: " + str(start_time)
break
total_min = minutes_per_day + slotduration
for char in reversed(timecode):
if char == '_':
total_min -= slotduration
else:
total_min -= slotduration
stop_time = str(int(total_min) / 60) + ':' + str(int(total_min) % 60)
result += "\nStop time: " + str(stop_time)
break
return result
# get beginning and end of schedule from timecode
def show_timecode_start_stop(timecode):
minutes_per_day = 24. * 60
slotduration = minutes_per_day / len(timecode)
total_min = -slotduration
for char in timecode:
total_min += slotduration
print(char + " " + str(int(total_min) / 60) + ':' + str(int(total_min) % 60))
# get dictionary of schedule template name values indexed by (provider_no, date)
def get_scheduledate_dict(cursor):
# for reasons unknown some rows are duplicated in the scheduledate table (except for primary key) so the
# dictionary can be shorter than complete list of rows
query = """
select sdate, provider_no, hour, available, id from scheduledate
where status='A' order by sdate, provider_no;
"""
cursor.execute(query)
result = cursor.fetchall()
s_dict = {}
for s_item in result:
if (s_item[0], s_item[1]) in s_dict:
if s_dict[(s_item[0], s_item[1])][0] != s_item[2] or s_dict[(s_item[0], s_item[1])][1] != s_item[3]:
# warn about values that are actually different asisde from their id
print("WARNING: key (" + str(s_item[0]) + ',' + str(s_item[1]) + ')'),
print("\thas multiple values ")
print('\t' + str(s_dict[(s_item[0], s_item[1])]))
print('\t' + str(s_item[2:]))
continue # only take first row with this key since Oscar EMR limits queries to 1 returned value
else:
s_dict[(s_item[0], s_item[1])] = s_item[2:]
return s_dict
# get dictionary of appointments with key provide, day and values [start_time, end_time, appointment_no]
# that are not "no-show" or "cancelled"
def get_appointment_dict(cursor):
# Note, Oscar's code in src/main/webapp/appointment/appointmentsearch.jsp and
# src/main/java/org/oscarehr/appointment/web/NextAppointmentSearchHelper.java
# uses a database query similar to the following but with "status!='N' and status!='C'"
query = """
select provider_no, appointment_date, start_time, end_time, appointment_no from appointment
where status!='C' order by appointment_date, start_time;
"""
cursor.execute(query)
result = cursor.fetchall()
app_dict = {}
for app_item in result:
if (app_item[0], app_item[1]) in app_dict:
app_dict[(app_item[0], app_item[1])].append(app_item[2:])
else:
app_dict[(app_item[0], app_item[1])] = [app_item[2:]]
return app_dict
# check appointment dictionary for existing booking at specified datetime and duration
def check_availability(app_dict, provider_no, ref_datetime, duration):
# print("check: " + str(ref_datetime))
available_default = None
the_date = ref_datetime.date()
next_app_list = app_dict.get((provider_no, the_date), available_default)
if next_app_list is None:
print("NO APPOINTMENTS FOR provider_no=" + str(provider_no) + " on " + str(the_date))
# for key in app_dict:
# print("Format is " + str(app_dict[key]))
# break
return True # true because provider_no has templatecode set for that schedule date but no appointments yet
start_time = ref_datetime
end_time = start_time + relativedelta(minutes=+(duration - 1))
ref_date = datetime.combine(ref_datetime, datetime.min.time()) # 0:00AM on date checked
booked = False
# print("ref_date: " + str(ref_date))
# print("start_time: " + str(start_time))
# print("end_time: " + str(end_time))
# print("len(next_app_list): " + str(len(next_app_list)))
for app in next_app_list:
seconds_start = app[0].total_seconds()
seconds_end = app[1].total_seconds()
app_start = ref_date + relativedelta(seconds=+seconds_start)
app_end = ref_date + relativedelta(seconds=+seconds_end)
# print("checking app_start: " + str(app_start)),
# print(", checking app_end: " + str(app_end))
# print("seconds_start: " + str(seconds_start) + " for " + str(app[0]))
# print("seconds_end: " + str(seconds_end) + " for " + str(app[1]))
#
# TODO inefficient; since appointment is ordered by start_time this can be optimized further
if end_time < app_start or start_time > app_end:
continue
booked = True
break
if booked:
return False
# print("Open for " + str(start_time) + " " + str(end_time))
return True
def find_next_available_appointments(sd_dict, st_dict, stc_dict, app_dict, ref_datetime, provider_no, duration=15,
num_appointments=3):
next_app_list = []
sd_default = None
sd = sd_dict.get((ref_datetime.date(), provider_no), sd_default)
if sd is None:
# print("WARNING: no schedule for provider_no=" + str(provider_no) + " on " + str(ref_datetime))
return None
st_name = sd[0] # hour field of scheduledate corresponds to name of scheduletemplate
if st_name.startswith('P:'):
template = st_dict.get((st_name, "Public"), default)
else:
template = st_dict.get((st_name, provider_no), default)
if not template:
sys.stdout.write("ERROR: Missing template [" + str(st_name) + "] for " + str(
provider_no) + " in find_next_available_appointments\n")
return None
timecodestr = template[1]
# print("provider_no=" + str(provider_no) + " ref_datetime " + str(ref_datetime))
# show_timecode_start_stop(timecodestr)
if not is_valid_timecode_string(timecodestr, stc_dict):
return None
# print(get_timecode_start_stop(timecodestr))
# check which slots are available between the start and end times
ref_date = datetime.combine(ref_datetime, datetime.min.time())
# print("timecodestr [" + str(st_name) + "]:" + str(timecodestr))
slotduration = (24. * 60) / len(timecodestr)
total_min = -slotduration
for char in timecodestr:
if char == '_':
total_min += slotduration
else:
value = stc_dict.get(char, sd_default)
if value and value[0] != '':
total_min += slotduration # int(value[0])
else:
total_min += slotduration # assume unrecognized or absent codes have duration 15 minutes
start_app = ref_date + relativedelta(minutes=+total_min)
end_app = ref_date + relativedelta(minutes=+(total_min + duration - 1))
if start_app < ref_datetime:
print("skipping at " + str(start_app))
continue
if check_availability(app_dict, provider_no, start_app, duration):
next_app_list.append((start_app, end_app))
if len(next_app_list) >= num_appointments:
break
else:
pass
# print("conflict at: " + str(start_app) + " " + str(end_app))
return next_app_list
# reads csv file containing study providers listed row by row using first_name|last_name
def get_study_provider_list(csv_file):
provider_list = []
home = os.path.expanduser("~")
with open(os.path.join(home, "mysql", "db_config", csv_file), 'rb') as cf:
reader = csv.reader(cf, delimiter='|')
for cf_row in reader:
provider_list.append((cf_row[0].strip(), cf_row[1].strip()))
return provider_list
# uses the providers csv file to determine provider_no values for study practitioners
def get_provider_nums(provider_list, study_provider_list):
pnums_list = []
for s in study_provider_list:
for p in provider_list:
if provider_list[p][0].strip() == s[0].strip() and provider_list[p][1].strip() == s[1].strip():
pnums_list.append(p.strip())
return pnums_list
# builds a string from provider_no entries in provider_no_list, for example "('101', '102', '999998')"
def build_provider_no_str(provider_no_list):
provider_nums_str = ""
idx = 0
length = len(provider_no_list)
for provider_no in provider_no_list:
idx += 1
provider_nums_str += "'" + str(provider_no) + "'"
if idx < length:
provider_nums_str += ','
return provider_nums_str
# patterned after ThirdApptTimeReport in Oscar's
# src/main/java/oscar/oscarReport/reportByTemplate/ThirdApptTimeReporter.java
def third_appt_time_reporter(cursor, provider_no_list, date_from, sched_symbols_list, appt_length):
num_days = -1
if date_from is None or provider_no_list is None or sched_symbols_list is None:
print("ERROR: date_from and provider_no_list must be set and at least one schedule symbol must be set")
return False
provider_nums_str = build_provider_no_str(provider_no_list)
date_str = str(date_from.date()) # expect datetime object from which the date is extracted
schedule_sql = "select scheduledate.provider_no, scheduletemplate.timecode, scheduledate.sdate" \
" from scheduletemplate, scheduledate" \
" where scheduletemplate.name=scheduledate.hour and scheduledate.sdate >= '" + date_str + \
"' and scheduledate.provider_no in (" + provider_nums_str + ") and scheduledate.status = 'A' and " \
" (scheduletemplate.provider_no=scheduledate.provider_no or scheduletemplate.provider_no='Public')" \
" order by scheduledate.sdate"
# print('sql: ' + schedule_sql)
res = get_query_results(cursor, schedule_sql)
# print('schedule results length: ' + str(len(res)))
# print('schedule results:')
# for r in res:
# print(str(r))
day_mins = 24. * 60.
i = 0
num_appts = 0
third = 3
sched_date = None
while i < len(res) and num_appts < third:
provider_no = res[i][0]
print("provider_no=" + str(provider_no))
timecodes = res[i][1]
print("templatecode=" + str(timecodes))
sched_date = res[i][2]
print("scheduledate=" + str(sched_date))
duration = day_mins / len(timecodes)
appt_sql = "select start_time, end_time from appointment where appointment_date = '" + date_str + \
"' and provider_no = '" + str(provider_no) + "' and status not like '%C%' " + \
" order by start_time asc"
print('appt_sql: ' + appt_sql)
appts = get_query_results(cursor, appt_sql)
codepos = 0
latest_appt_hour = 0
latest_appt_min = 0
unbooked = 0
itotalmin = 0
while itotalmin < day_mins:
code = timecodes[codepos]
codepos += 1
print("iTotalMin: " + str(itotalmin) + " codepos: " + str(codepos))
ihours = int(itotalmin / 60)
imins = int(itotalmin % 60)
appt_index = 0
while appt_index < len(appts):
print("appt_index: " + str(appt_index))
# appt = appts[appt_index]
appt_time = appts[appt_index][0].total_seconds()
# print('appt_time=' + str(appt_time))
appt_hour_s = int(appt_time) / 3600
appt_min_s = int(appt_time) % 60
print('hour=' + str(appt_hour_s) + ' min=' + str(appt_min_s))
print('ihour=' + str(ihours) + ' imins=' + str(imins))
if ihours == appt_hour_s and imins == appt_min_s:
appt_time = appts[appt_index][1].total_seconds()
appt_hour_e = int(appt_time) / 3600
appt_min_e = int(appt_time) % 60
print('appt_hour_e=' + str(appt_hour_e) + ' min=' + str(appt_min_e))
if appt_hour_e > latest_appt_hour or \
(appt_hour_e == latest_appt_hour and appt_min_e > latest_appt_min):
latest_appt_hour = appt_hour_e
latest_appt_min = appt_min_e
else:
appt_index -= 1
break
appt_index += 1
code_match = False
sched_idx = 0
while sched_idx < len(sched_symbols_list):
if code == sched_symbols_list[sched_idx]:
code_match = True
if ihours > latest_appt_hour or (ihours == latest_appt_hour and imins > latest_appt_min):
unbooked += duration
if unbooked >= appt_length:
unbooked = 0
num_appts += 1
if num_appts == third:
break
sched_idx += 1
if num_appts == third:
break
if not code_match:
unbooked = 0
itotalmin += duration
if sched_date is not None:
num_days = (sched_date - date_from.date()).days
print("num_days: " + str(num_days) + " date_from: " + str(date_from) + " sched_date: " + str(sched_date)),
print(" num_appts: " + str(num_appts))
# used to tune script to specific database configuration settings
def read_config(filename):
home = os.path.expanduser("~")
with open(os.path.join(home, "mysql", "db_config", filename), "rb") as fh:
return fh.readline().rstrip()
# general query used for test purposes
def get_query_results(cursor, query):
cursor.execute(query)
return cursor.fetchall()
if __name__ == '__main__':
try:
# configure database connection
db_user = read_config("db_user")
db_passwd = read_config("db_passwd")
db_name = read_config("db_name")
db_port = int(read_config("db_port"))
study_providers = get_study_provider_list("providers.csv")
print("provider_list: " + str(study_providers))
# end = datetime.date.fromordinal(datetime.date.today().toordinal() - 1) # yesterday
end = date.fromordinal(date.today().toordinal() - 1) # yesterday
# start = end + relativedelta(months=-4) # four months ago
# connect to database
con = Mdb.connect(host='127.0.0.1', port=db_port, user=db_user, passwd=db_passwd, db=db_name)
cur = con.cursor()
err_cnt = 0
def error_message(method_name, expected, actual, error_cnt):
print("ERROR in " + str(method_name) + "!!!")
print("Expected " + str(expected) + " but got " + str(actual))
return error_cnt + 1
# get provider numbers
providers = get_active_providers_dict(cur)
provider_nos = get_provider_nums(providers, study_providers)
print("provider_nums: " + str(provider_nos))
# get STOPP DSS users
stopp_user_query = """
select distinct p.provider_no, p.first_name, p.last_name, g.dateStart from provider p
inner join dsGuidelineProviderMap as gpm on p.provider_no=gpm.provider_no
inner join dsGuidelines g on g.uuid=gpm.guideline_uuid
and g.author='stopp' and g.status='A';
"""
stopp_users = get_query_results(cur, stopp_user_query)
if len(stopp_users) == 0:
print("STOPP DSS users: None")
else:
print("STOPP DSS Users:")
print("pno\tfname\tlname\t\tsince")
print("---\t-----\t-----\t\t-----")
for su in stopp_users:
print(str(su[0]) + '\t' + str(su[1]) + '\t' + str(su[2]) + '\t\t' + str(su[3]))
# get scheduletemplatecode
stc = get_schedule_template_code_dict(cur)
# print()
# print("scheduletemplatecode:")
# print('c duration\tdescription')
# for item in stc:
# print(str(item) + ' ' + str(stc[item][0]) + '\t' + str(stc[item][1]))
#
# print("ERRORS: " + str(err_cnt))
# get schedule template dictionary
stdict = get_schedule_template_dict(cur)
# for item in stdict:
# print(str(item) + " ->\t" + str(stdict[item][2]))
validate_all_timecode_strings(stdict, stc)
# from scheduleDate get name of scheduletemplate
scheduledate_dict = get_scheduledate_dict(cur)
print("length of schedule dict: " + str(len(scheduledate_dict)))
# check for missing scheduletemplates
cnt_missing_template = 0
default = None
missing_template_dict = {}
for item in scheduledate_dict:
templatename = scheduledate_dict[item][0]
clinic_provider_no = item[1]
if templatename.startswith('P:'):
tresult = stdict.get((templatename, "Public"), default)
if not tresult:
missing_template_dict[(templatename, "Public")] = "missing"
else:
tresult = stdict.get((templatename, clinic_provider_no), default)
if not tresult:
missing_template_dict[(templatename, clinic_provider_no)] = "missing"
if not tresult:
cnt_missing_template += 1
# print("WARNING: Missing template: " + str(scheduledate_dict[item][0]))
print(str(cnt_missing_template) + " scheduledate rows with missing templates")
if cnt_missing_template > 0:
print("The following template name, provider number combinations are missing:")
for tname in missing_template_dict:
print('\t' + str(tname))
# load appointment dictionary
appointment_dict = get_appointment_dict(cur)
print("length of appointment dict: " + str(len(appointment_dict)))
print("items in appointment dict: " + str(sum_dict_values(appointment_dict)))
the_datetime = datetime(2014, 4, 1, 0, 0, 0)
print("the_datetime: " + str(the_datetime))
availability_result = check_availability(appointment_dict, '110', the_datetime, 15)
print("availability: " + str(availability_result))
for provider_num in provider_nos:
# provider_num = '101'
the_datetime = datetime(2015, 6, 9, 0, 0, 0)
num_apps = 0
days = 0
apps_list = []
while days < max_days_to_search:
app_list = find_next_available_appointments(scheduledate_dict, stdict, stc, appointment_dict,
the_datetime, provider_num)
if app_list is not None and len(app_list) > 0:
apps_list += app_list
num_apps += len(app_list)
# print("apps_list:")
# for item in app_list:
# print(str(item))
if num_apps >= 3:
break
days += 1
the_datetime = the_datetime + relativedelta(days=+1)
print("apps_list:")
for item in apps_list:
print(str(item))
print("Days to 3rd next appointment = " + str(days) + " for " + str(provider_num))
# third_appt_time_reporter(cur, provider_nos, the_datetime, ['1'], 15)
except Mdb.Error as e:
print("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit(1)
finally:
if con:
con.close()
|
|
# ===============================================================================
# Copyright 2019 Jan Hendrickx and Gabriel Parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
import pandas as pd
from datetime import datetime as dt
from datetime import date, timedelta
from dateutil import relativedelta
import yaml
import numpy as np
from matplotlib import pyplot as plt
# ============= standard library imports ========================
from utils.TAW_optimization_subroutine.non_normalized_hist_analysis import geospatial_array_extract
from utils.TAW_optimization_subroutine.chisquare_timeseries_analyst import x_y_extract, raster_extract
"""This script will plot EC, PRISM, JPL and ETRM data for a point for a time series."""
def get_jpl_results(jpl_path):
"""
:param jpl_path:
:return:
"""
all_data_dict = {}
for path, dirs, files in os.walk(jpl_path, topdown=False):
# print 'path', path
# print 'dirs', dirs
# print 'files', files
them_dates = []
them_files = []
for f in files:
if f.endswith('.tif'):
flist = f.split('.')
# print flist
if len(flist[0])>0:
yr = int(flist[0])
mnth = int(flist[1])
dy = int(flist[2])
f_date = date(yr, mnth, dy)
them_dates.append(f_date)
f_path = os.path.join(path, f)
them_files.append(f_path)
sorted_files = [f for _, f in sorted(zip(them_dates, them_files))]
sorted_dates = sorted(them_dates)
all_data_dict['dates'] = sorted_dates
all_data_dict['etas'] = sorted_files
return all_data_dict
def get_prism_results(prism_path):
"""
:param prism_path:
:return:
"""
all_data_dict = {}
for path, dirs, files in os.walk(prism_path, topdown=False):
# print 'path', path
# print 'dirs', dirs
# print 'files', files
them_dates = []
them_files = []
for f in files:
# format ex 'precip_20000101.tif'
fname = f.split('.')[0]
f_time = fname.split('_')[-1]
# print f_time
f_datetime = dt.strptime(f_time, '%Y%m%d')
f_date = date(f_datetime.year, f_datetime.month, f_datetime.day)
them_dates.append(f_date)
f_path = os.path.join(path, f)
them_files.append(f_path)
sorted_files = [f for _, f in sorted(zip(them_dates, them_files))]
sorted_dates = sorted(them_dates)
all_data_dict['dates'] = sorted_dates
all_data_dict['precips'] = sorted_files
return all_data_dict
def get_monthly_etrm_outputs(output_path, output_type):
"""
For getting the paths and time series for monthly data outputs from ETRM
:param output_path:
:param output_type:
:return:
"""
all_data_dict = {}
# for path, dirs, files in os.walk(output_path, topdown=False):
# if path.endswith('numpy_arrays') and len(files) > 0:
# # print 'path', path
# # print 'dirs', dirs
# # print 'files', files
#
# example_file = files[0]
#
# taw = example_file.split('_')[4]
# print 'ex taw: ', taw
for path, dirs, files in os.walk(output_path, topdown=False):
if path.endswith('monthly_rasters') and len(files) > 0:
print 'path', path
# get the TAW value from the numpy arrays
results_path = os.path.split(path)[0]
numpy_path = os.path.join(results_path, 'numpy_arrays')
example_file = os.listdir(numpy_path)[0]
print example_file
taw = example_file.split('_')[4]
print 'ex taw: ', taw
print 'the taw of the monthly {}'.format(taw)
# if output_type == 'eta':
# NOW, get the files and timeseries for the monthlies from monthly_rasters
timeseries = []
fileseries = []
for f in files:
fname = f.split('.')[0]
flist = fname.split('_')
# to get the kind of monthly output you want i.e 'eta', or 'rzsm'
if flist[0] == output_type:
yr = int(flist[-2])
mnth = int(flist[-1])
# set day to the first of the month automatically for monthly datasets so they can be put together with
# daily timeseries
dy = 1
first_of_the_month = date(yr, mnth, dy)
first_of_next = first_of_the_month + relativedelta(months=+1)
last_of_month = first_of_next - timedelta(days=1)
timeseries.append(last_of_month)
filepath = os.path.join(path, f)
fileseries.append(filepath)
# do a nifty sort of file paths based on the dates
sorted_files = [f for _, f in sorted(zip(timeseries, fileseries))]
sorted_dates = sorted(timeseries)
print 'len sorted files {}, len sorted dates {}, taw {}'.format(len(sorted_files), len(sorted_dates), taw)
all_data_dict[taw] = (sorted_files, sorted_dates)
return all_data_dict
# etrm_path = '/Volumes/Seagate_Blue/ameriflux_aoi_etrm_results_ceff_06'
# etrm_taw = '425'
#
# geo_info_path = '/Volumes/Seagate_Blue/taw_optimization_work_folder/geo_info_ameriflux.yml'
# with open(geo_info_path, mode='r') as geofile:
# geo_dict = yaml.load(geofile)
#
# shape_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/Mpj_point_extract.shp'
#
# # get the x and y from the shapefile in order to extract
# # ... from rasters raster_extract() and geospatial arrays geospatial_array_extract()
# feature_dictionary = x_y_extract(shape_path)
# # Use the feature dictionary to extract data from the rasters.
# for feature, tup in feature_dictionary.iteritems():
# # Get the X and Y coords from the dictionary and unpack them
# x, y = tup
# print x, y
#
#
# monthly_etrm_outputs = get_monthly_etrm_outputs(etrm_path, output_type='eta')
# print 'outputs \n', monthly_etrm_outputs[etrm_taw]
def get_etrm_results(etrm_results_path, rzsm=False, observation_dates=None):
"""
:param etrm_results_path:
:param rzsm:
:param observation_dates:
:return:
"""
all_data_dict = {}
for path, dirs, files in os.walk(etrm_results_path, topdown=False):
if path.endswith('numpy_arrays') and len(files) > 0:
# print 'path', path
# print 'dirs', dirs
# print 'files', files
example_file = files[0]
taw = example_file.split('_')[4]
print 'ex taw: ', taw
them_dates = []
them_files = []
# collect date objects for each file. Then use the dates to order the files - then tack on the path
for f in files:
fname = f.split('.')[0]
flist = fname.split('_')
yr = int(flist[-3])
mnth = int(flist[-2])
dy = int(flist[-1])
if rzsm:
if flist[2] == 'rzsm':
if observation_dates != None:
file_date = date(yr, mnth, dy)
if file_date in observation_dates:
them_dates.append(file_date)
f_path = os.path.join(path, f)
them_files.append(f_path)
else:
file_date = date(yr, mnth, dy)
them_dates.append(file_date)
f_path = os.path.join(path, f)
them_files.append(f_path)
else:
if flist[2] == 'eta':
if observation_dates != None:
file_date = date(yr, mnth, dy)
if file_date in observation_dates:
them_dates.append(file_date)
f_path = os.path.join(path, f)
them_files.append(f_path)
else:
file_date = date(yr, mnth, dy)
them_dates.append(file_date)
f_path = os.path.join(path, f)
them_files.append(f_path)
# do a nifty sort of file paths based on the dates
sorted_files = [f for _, f in sorted(zip(them_dates, them_files))]
sorted_dates = sorted(them_dates)
print 'len sorted files {}, len sorted dates {}'.format(len(sorted_files), len(sorted_dates))
all_data_dict[taw] = (sorted_files, sorted_dates)
return all_data_dict
def daily_time_parse(timeseries):
timeseries = pd.to_datetime(timeseries)
daily_time_list = []
for i in timeseries:
year = i.year
month = i.month
day = i.day
daily_time = date(year, month, day)
daily_time_list.append(daily_time)
# get rid of duplicates.
daily_time_set = set(daily_time_list)
print 'len mtime_set', len(daily_time_set)
print 'mtime set \n', daily_time_set
# change back to a list and sort
daily_time = sorted(list(daily_time_set))
print 'mtime sorted\n ', daily_time
return daily_time
# TODO - Timestamp_start vs TIMESTAMP_END
def ec_data_processor_precip(path, x='TIMESTAMP_END', y='LE', daily=True):
"""
Version of ec_data processor that returns a separate dataframe of precip values.
NANs totally cancel out precip for some reason if you try to use one dataframe.
:param path: path to a csv containing Ameriflux Dataset
:param x: default is the header string for the timestamps
:param y: default is Latent Heat LE
:param daily: if true, we convert the Eddy Covariance to a daily total
:param cumulative_days: an interger number of days to be accumulated based on the daily total
:return: a timeseries of
"""
# Get the data from the path and turn the path into a data frame
# ec_dataset = pd.read_csv(path, header=2)
ec_dataset = pd.read_csv(path, header=2, engine='python')
# print ec_dataset.head()
print ec_dataset['LE'].head()
print ec_dataset[ec_dataset[y] != -9999].head()
# === get rid of no data values in any category of the energy balance ===
precip_dataset = ec_dataset[ec_dataset['P'] != -9999]
ec_dataset = ec_dataset[ec_dataset[y] != -9999]
ec_dataset = ec_dataset[ec_dataset['NETRAD'] != -9999]
ec_dataset = ec_dataset[ec_dataset['H'] != -9999]
ec_dataset = ec_dataset[ec_dataset['LE'] != -9999]
# # You probably won't need these because Marcy Doesn't think they are valid for her towers
# ec_dataset = ec_dataset[ec_dataset['SH'] != -9999]
# ec_dataset = ec_dataset[ec_dataset['SLE'] != -9999]
if x.startswith("TIMESTAMP"):
a = ec_dataset[x].apply(lambda b: dt.strptime(str(b), '%Y%m%d%H%M'))
aa = precip_dataset[x].apply(lambda d: dt.strptime(str(d), '%Y%m%d%H%M'))
# # TODO - if converting PRISM to MTN time.
# # Convert to PRISM time (Mtn Standard + 5 hours) PRISM midnight is 12:00 UTC - 7 hours for mountain. Net +5 hrs
# a = [i + timedelta(hours=19) for i in a]
# aa = [i + timedelta(hours=19) for i in aa]
else:
a = ec_dataset[x]
# ===== Time Series Processing =====
timeseries = a
p_timeseries = aa
# print 'timeseries\n', timeseries
Rn = ec_dataset['NETRAD'].values
H = ec_dataset['H'].values
LE = ec_dataset['LE'].values
P = precip_dataset['P']
print 'P \n', P
# indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))
# # testing
# plt.plot(timeseries, P, color='black')
# plt.show()
# recreate a dataframe of the variables you want to time average on a monthly timestep
halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H}) # took out precip. no good vals? 'P': P
halfhour_precip = pd.DataFrame({'timeseries': p_timeseries, 'P': P})
# set the timeseries column to the index so groupby function can group by year and month of the index.
halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))
halfhour_precip = halfhour_precip.set_index(pd.DatetimeIndex(halfhour_precip['timeseries']))
# convert latent heat to mmH2O by dividing by latent heat of vaporization.
halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4
if daily:
daily_cum_data = halfhour_data.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
daily_cum_precip = halfhour_precip.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# get each day in the timeseries. there are duplicates from the groupby function, so use set() to get rid of
# duplicates
daily_cum_time = daily_time_parse(timeseries)
daily_cum_precip_time = daily_time_parse(p_timeseries)
# # testing
# daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')
# format daily_cum_data to have datetimes
daily_cum_data['date'] = daily_cum_time
daily_cum_precip['date'] = daily_cum_precip_time
return daily_cum_data, daily_cum_precip
# new version of ec_data_processor
def ec_data_processor(path, x='TIMESTAMP_END', y='LE', daily=True):
"""
:param path: path to a csv containing Ameriflux Dataset
:param x: default is the header string for the timestamps
:param y: default is Latent Heat LE
:param daily: if true, we convert the Eddy Covariance to a daily total
:param cumulative_days: an interger number of days to be accumulated based on the daily total
:return: a timeseries of
"""
# Get the data from the path and turn the path into a data frame
# ec_dataset = pd.read_csv(path, header=2)
ec_dataset = pd.read_csv(path, header=2, engine='python')
# print ec_dataset.head()
print ec_dataset['LE'].head()
print ec_dataset[ec_dataset[y] != -9999].head()
# === get rid of no data values in any category of the energy balance ===
ec_dataset = ec_dataset[ec_dataset[y] != -9999]
ec_dataset = ec_dataset[ec_dataset['NETRAD'] != -9999]
ec_dataset = ec_dataset[ec_dataset['H'] != -9999]
ec_dataset = ec_dataset[ec_dataset['LE'] != -9999]
precip_dataset = ec_dataset[ec_dataset['P'] != -9999]
# # You probably won't need these because Marcy Doesn't think they are valid for her towers
# ec_dataset = ec_dataset[ec_dataset['SH'] != -9999]
# ec_dataset = ec_dataset[ec_dataset['SLE'] != -9999]
if x.startswith("TIMESTAMP"):
a = ec_dataset[x].apply(lambda b: dt.strptime(str(b), '%Y%m%d%H%M'))
# # TODO - if you are adjusting PRISM to mountain Time
# # Convert to PRISM time (Mtn Standard + 5 hours) PRISM midnight is 12:00 UTC - 7 hours for mountain. Net +5 hrs
# a = [i + timedelta(hours=19) for i in a]
else:
a = ec_dataset[x]
# ===== Time Series Processing =====
timeseries = a
# print 'timeseries\n', timeseries
Rn = ec_dataset['NETRAD'].values
H = ec_dataset['H'].values
LE = ec_dataset['LE'].values
P = precip_dataset['P'].values
print 'P \n', P
# indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))
plt.plot(timeseries, P)
plt.show()
# recreate a dataframe of the variables you want to time average on a monthly timestep
halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H, 'P': P}) # took out precip. no good vals? 'P': P
# set the timeseries column to the index so groupby function can group by year and month of the index.
halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))
# convert latent heat to mmH2O by dividing by latent heat of vaporization.
halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4
if daily:
# # === an example of # === accumulate mmh20 to make comparable to SSEBop ===
# timeseries = ec_data.timeseries.tolist()
# monthly_cumulative = ec_data.groupby([lambda x: x.year, lambda x: x.month]).sum()
# # the time series that matches the monthly cumulative dataframe
# monthly_list = monthly_time_parse(timeseries)arse(timeseries)
daily_cum_data = halfhour_data.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# get each day in the timeseries. there are duplicates from the groupby function, so use set() to get rid of
# duplicates
daily_cum_time = daily_time_parse(timeseries)
# # testing
# daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')
# format daily_cum_data to have datetimes
daily_cum_data['date'] = daily_cum_time
# # testing
# daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')
return daily_cum_data
else:
return halfhour_data
def dataset_plot(shape_path, ameriflux_df, raster_datasets, geo_info, taw, sitename=None):
"""
:param shape_path: path to a shapefile where the values will be read from
:param ameriflux_df: ameriflux tower corresponding to the shapefile or nearby to the shapefile ideally
:param raster_datasets: a dictionary of the raster datasets containing date objects and filepaths for timeseries
:param geo_info: dictionary containing info for georefferencing .npy files
:return:
"""
# get the x and y from the shapefile in order to extract
# ... from rasters raster_extract() and geospatial arrays geospatial_array_extract()
feature_dictionary = x_y_extract(shape_path)
print "feature dictionary", feature_dictionary
# Use the feature dictionary to extract data from the rasters.
for feature, tup in feature_dictionary.iteritems():
# Get the X and Y coords from the dictionary and unpack them
x, y = tup
print x, y
# ====== Unpack the different raster datasets
etrm_dict = raster_datasets['etrm']
jpl_dict = raster_datasets['jpl']
prism_dict = raster_datasets['prism']
print 'processing ETRM'
# ====== select etrm dataset based on TAW =======
etrm_eta_tup = etrm_dict[taw]
etrm_eta = etrm_eta_tup[0]
etrm_dates = etrm_eta_tup[1]
# GET THE ETRM VALUES from the numpy array
etrm_values = []
for etrm_rast in etrm_eta:
etrm_arr = np.load(etrm_rast)
etrm_val = geospatial_array_extract(geo_dict, etrm_arr, (x, y))
etrm_values.append(etrm_val)
print 'processing jpl'
# ====== select jpl from dict =====
jpl_eta = jpl_dict['etas']
jpl_dates = jpl_dict['dates']
# GET the JPL VALUES from the .tif
jpl_values = []
for jpl_rast in jpl_eta:
if jpl_rast.endswith('.tif'):
jpl_val = raster_extract(jpl_rast, x, y)
jpl_values.append(jpl_val)
print 'processing prism'
# ====== select precip from prism ====
prism_precip = prism_dict['precips']
prism_dates = prism_dict['dates']
# GET the PRISM VALUES from the .tif
prism_values = []
for prism_rast in prism_precip:
prism_val = raster_extract(prism_rast, x, y)
prism_values.append(prism_val)
# ====== GET the timeseries from the AMERIFLUX DATAFRAME ========
ameriflux_values = ameriflux_df.mmh20
###ameriflux_precip_values = ameriflux_df.P
ameriflux_dates = ameriflux_df.date
print 'plotting'
fig, ax = plt.subplots()
ax.plot(ameriflux_dates, ameriflux_values, color='green')
ax.plot_date(ameriflux_dates, ameriflux_values, fillstyle='none', color='green')
ax.plot(prism_dates, prism_values, color='blue')
ax.plot_date(prism_dates, prism_values, fillstyle='none', color='blue')
ax.plot(jpl_dates, jpl_values, color='red')
ax.plot_date(jpl_dates, jpl_values, fillstyle='none', color='red')
ax.plot(etrm_dates, etrm_values, color='black')
ax.plot_date(etrm_dates, etrm_values, fillstyle='none', color='black')
ax.set_title('Comprehensive ETa and Precip Site:{} TAW:{}'.format(sitename, taw))
ax.set_ylabel('ETa or Precip in mm H20')
ax.set_xlabel('Date')
plt.grid(True)
plt.show()
# todo - removed the precip values. appear to be dysfunctional.
# ax.plot(ameriflux_dates, ameriflux_precip_values, color='purple')
# ax.plot_date(ameriflux_dates, ameriflux_precip_values, fillstyle='none', color='purple')
if __name__ == '__main__':
# intput parameters
# ===== Point Info - UTM Shapefile) =====
# shapefile
# shape_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/Vcp_point_extract.shp'
# shape_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/Ss_point_extract.shp'
# shape_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/Mjs_point_extract.shp'
shape_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/Sg_point_extract.shp'
# ===== Precip Time Series =====
# PRISM - format = 'precip_YYYYjjj.tif' where jjj is three digit day of year
prism_path = '/Volumes/Seagate_Blue/ameriflux_aoi/PRISM/precip/800m_std_all'
# dict with keys 'dates' for date objs and 'precips' for filepaths
prism_dict = get_prism_results(prism_path)
# TODO - ADD in a RefET time series here
# ===== GADGET RefET Time Series =====
# TODO - ADD in FluxTower daily RefET timeseries here
# ===== FluxTower daily PM RefET ======
# ===== Observational ETa Time Series =====
# JPL - format = 'YYYY.mm.dd.PTJPL.ET_daily_kg.MODISsin1km_etrm.tif' [using full ETRM dataset so you can plot
# against Ameriflux that is outside of Study area]
jpl_path = '/Users/dcadol/Desktop/academic_docs_II/JPL_Data/JPL_calibration_approach/jpl_etrm_warp_PT'
# dict with keys 'dates' for date objs and 'etas' for filepaths
jpl_data_dict = get_jpl_results(jpl_path)
# Ameriflux
# ameriflux_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/AMF_US-Vcm_BASE_HH_9-5.csv'
# ameriflux_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/AMF_US-Mpj_BASE_HH_8-5.csv'
ameriflux_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/AMF_US-Seg_BASE_HH_10-5.csv'
# ameriflux_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/AMF_US-Ses_BASE_HH_8-5.csv'
# ameriflux_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/AMF_US-Vcp_BASE_HH_6-5.csv'
# ameriflux_path = '/Users/dcadol/Desktop/academic_docs_II/Ameriflux_data/AMF_US-Wjs_BASE_HH_7-5.csv'
# get a dataframe of daily cumulative ETa values in mm/day for the ameriflux path
daily_cum_ameriflux = ec_data_processor(ameriflux_path)
# ETRM - get it from the original output files
# etrm_path = '/Volumes/Seagate_Blue/ameriflux_aoi_etrm_results'
etrm_path = '/Volumes/Seagate_Blue/ameriflux_aoi_etrm_results_ceff_06'
# returns a dictionary where key = 'taw'. Key returns a tuple (date_objs, files) in chronological order.
etrm_dict = get_etrm_results(etrm_path)
# get geo-info path to handle importing numpy files
# geo_info_path = '/Volumes/Seagate_Expansion_Drive/taw_optimization_work_folder/geo_info_espanola.yml'
geo_info_path = '/Volumes/Seagate_Blue/taw_optimization_work_folder/geo_info_ameriflux.yml'
with open(geo_info_path, mode='r') as geofile:
geo_dict = yaml.load(geofile)
raster_datasets = {'prism': prism_dict, 'jpl': jpl_data_dict, 'etrm': etrm_dict}
dataset_plot(shape_path, ameriflux_df=daily_cum_ameriflux, raster_datasets=raster_datasets, geo_info=geo_dict, taw='225', sitename='Seg')
|
|
# pylint: disable-msg=W0511,W0212,E1111
#
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module handles WebDav server requests.
"""
import types
from webdav import Constants
import qp_xml
from tempfile import TemporaryFile
from davlib import XML_DOC_HEADER
from webdav.NameCheck import validatePropertyName
__version__ = "$LastChangedRevision: 2 $"
## TODO: create a property list class
class XmlNameSpaceMangler(object):
'''
Handles WebDav requests.
'''
# restrict instance variables
__slots__ = ('shortcuts', 'defaultNameSpace')
def __init__(self, nameList, defaultNameSpace = None):
'''
@param nameList:
@param defaultNameSpace:
'''
assert isinstance(nameList, types.ListType) or isinstance(nameList, types.TupleType), \
"1. argument has wrong type %s" % type(nameList)
self.shortcuts = {}
self.defaultNameSpace = defaultNameSpace
for name in nameList:
if not isinstance(name, types.TupleType):
name = (defaultNameSpace, name)
assert isinstance(name, types.TupleType) and len(name) == 2, \
"Name is not a namespace, name tuple: %s" % type(name)
validatePropertyName(name[1])
if name[0] and not self.shortcuts.has_key(name[0]):
self.shortcuts[name[0]] = 'ns%d' % len(self.shortcuts)
def getNameSpaces(self):
'''
Returns the namespace.
'''
result = ""
for namespace, short in self.shortcuts.items():
result += ' xmlns:%s="%s"' % (short, namespace)
return result
def getUpdateElements(self, valueMap):
'''
@param valueMap:
'''
elements = ""
for name in valueMap.keys():
fullname = name
if isinstance(name, types.StringType):
fullname = (self.defaultNameSpace, name)
if not fullname[0]:
tag = fullname[1]
else:
tag = self.shortcuts[fullname[0]] + ':' + fullname[1]
value = valueMap[name]
if value:
if isinstance(value, qp_xml._element):
tmpFile = TemporaryFile('w+')
value = qp_xml.dump(tmpFile, value)
tmpFile.flush()
tmpFile.seek(0)
tmpFile.readline()
value = tmpFile.read()
else:
value = "<![CDATA[%s]]>" % value
else:
value = ""
elements += "<%s>%s</%s>" % (tag, value, tag)
return elements
def getNameElements(self, nameList):
'''
@param nameList:
'''
elements = ""
for name in nameList:
if isinstance(name, types.StringType):
name = (self.defaultNameSpace, name)
if not name[0]:
tag = name[1]
else:
tag = self.shortcuts[name[0]] + ':' + name[1]
elements += "<%s />" % tag
return elements
def createUpdateBody(propertyDict, defaultNameSpace = None):
'''
@param propertyDict:
@param defaultNameSpace:
'''
updateTag = 'D:' + Constants.TAG_PROPERTY_UPDATE
setTag = 'D:' + Constants.TAG_PROPERTY_SET
propTag = 'D:' + Constants.TAG_PROP
mangler = XmlNameSpaceMangler(propertyDict.keys(), defaultNameSpace)
return XML_DOC_HEADER + \
'<%s xmlns:D="DAV:"><%s><%s %s>' % (updateTag, setTag, propTag, mangler.getNameSpaces()) + \
mangler.getUpdateElements(propertyDict) + \
'</%s></%s></%s>' % (propTag, setTag, updateTag)
def createDeleteBody(nameList, defaultNameSpace = None):
'''
@param nameList:
@param defaultNameSpace:
'''
updateTag = 'D:' + Constants.TAG_PROPERTY_UPDATE
removeTag = 'D:' + Constants.TAG_PROPERTY_REMOVE
propTag = 'D:' + Constants.TAG_PROP
mangler = XmlNameSpaceMangler(nameList, defaultNameSpace)
return XML_DOC_HEADER + \
'<%s xmlns:D="DAV:"><%s><%s %s>' % (updateTag, removeTag, propTag, mangler.getNameSpaces()) + \
mangler.getNameElements(nameList) + \
'</%s></%s></%s>' % (propTag, removeTag, updateTag)
def createFindBody(nameList, defaultNameSpace = None):
'''
@param nameList:
@param defaultNameSpace:
'''
findTag = 'D:' + Constants.TAG_PROPERTY_FIND
propTag = 'D:' + Constants.TAG_PROP
mangler = XmlNameSpaceMangler(nameList, defaultNameSpace)
return XML_DOC_HEADER + \
'<%s xmlns:D="DAV:"><%s %s>' % (findTag, propTag, mangler.getNameSpaces()) + \
mangler.getNameElements(nameList) + \
'</%s></%s>' % (propTag, findTag)
def createSearchBody(selects, path, conditions, defaultNameSpace = None):
'''
Creates DASL XML body.
@param selects: list of property names to retrieve for the found resources
@param path: list of conditions
@param conditions: tree of ConditionTerm instances representing a logical search term
@param defaultNameSpace: default namespace
'''
searchTag = 'D:' + Constants.TAG_SEARCH_REQUEST
basicTag = 'D:' + Constants.TAG_SEARCH_BASIC
selectTag = 'D:' + Constants.TAG_SEARCH_SELECT
fromTag = 'D:' + Constants.TAG_SEARCH_FROM
scopeTag = 'D:' + Constants.TAG_SEARCH_SCOPE
whereTag = 'D:' + Constants.TAG_SEARCH_WHERE
propTag = 'D:' + Constants.TAG_PROP
hrefTag = 'D:' + Constants.TAG_HREF
depthTag = 'D:' + Constants.TAG_LOCK_DEPTH
depthValue = Constants.HTTP_HEADER_DEPTH_INFINITY
mangler = XmlNameSpaceMangler(selects, defaultNameSpace)
return XML_DOC_HEADER + \
'<%s xmlns:D="DAV:"><%s>' % (searchTag, basicTag) + \
'<%s><%s %s>%s</%s></%s>' % (selectTag, propTag, mangler.getNameSpaces(),
mangler.getNameElements(selects), propTag, selectTag) + \
'<%s><%s><%s>%s</%s><%s>%s</%s></%s></%s>' % (fromTag, scopeTag, hrefTag, path, hrefTag,
depthTag, depthValue, depthTag, scopeTag, fromTag) + \
'<%s>%s</%s>' % (whereTag, conditions.toXML(),whereTag) + \
'</%s></%s>' % (basicTag, searchTag)
|
|
# Copyright 2013 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova import block_device
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
_BLOCK_DEVICE_OPTIONAL_JOINED_FIELD = ['instance']
BLOCK_DEVICE_OPTIONAL_ATTRS = _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD
def _expected_cols(expected_attrs):
return [attr for attr in expected_attrs
if attr in _BLOCK_DEVICE_OPTIONAL_JOINED_FIELD]
# TODO(berrange): Remove NovaObjectDictCompat
class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add instance_uuid to get_by_volume_id method
# Version 1.2: Instance version 1.14
# Version 1.3: Instance version 1.15
# Version 1.4: Instance version 1.16
# Version 1.5: Instance version 1.17
# Version 1.6: Instance version 1.18
# Version 1.7: Add update_or_create method
# Version 1.8: Instance version 1.19
# Version 1.9: Instance version 1.20
VERSION = '1.9'
fields = {
'id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
'instance': fields.ObjectField('Instance', nullable=True),
'source_type': fields.StringField(nullable=True),
'destination_type': fields.StringField(nullable=True),
'guest_format': fields.StringField(nullable=True),
'device_type': fields.StringField(nullable=True),
'disk_bus': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'device_name': fields.StringField(nullable=True),
'delete_on_termination': fields.BooleanField(default=False),
'snapshot_id': fields.StringField(nullable=True),
'volume_id': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
'image_id': fields.StringField(nullable=True),
'no_device': fields.BooleanField(default=False),
'connection_info': fields.StringField(nullable=True),
}
obj_relationships = {
'instance': [('1.0', '1.13'), ('1.2', '1.14'), ('1.3', '1.15'),
('1.4', '1.16'), ('1.5', '1.17'), ('1.6', '1.18'),
('1.8', '1.19'), ('1.9', '1.20')],
}
@staticmethod
def _from_db_object(context, block_device_obj,
db_block_device, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for key in block_device_obj.fields:
if key in BLOCK_DEVICE_OPTIONAL_ATTRS:
continue
block_device_obj[key] = db_block_device[key]
if 'instance' in expected_attrs:
my_inst = objects.Instance(context)
my_inst._from_db_object(context, my_inst,
db_block_device['instance'])
block_device_obj.instance = my_inst
block_device_obj._context = context
block_device_obj.obj_reset_changes()
return block_device_obj
def _create(self, context, update_or_create=False):
"""Create the block device record in the database.
In case the id field is set on the object, and if the instance is set
raise an ObjectActionError. Resets all the changes on the object.
Returns None
:param context: security context used for database calls
:param update_or_create: consider existing block devices for the
instance based on the device name and swap, and only update
the ones that match. Normally only used when creating the
instance for the first time.
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api':
raise exception.ObjectActionError(
action='create',
reason='BlockDeviceMapping cannot be '
'created in the API cell.')
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='create',
reason='instance assigned')
cells_create = update_or_create or None
if update_or_create:
db_bdm = db.block_device_mapping_update_or_create(
context, updates, legacy=False)
else:
db_bdm = db.block_device_mapping_create(
context, updates, legacy=False)
self._from_db_object(context, self, db_bdm)
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(
context, self, create=cells_create)
@base.remotable
def create(self):
self._create(self._context)
@base.remotable
def update_or_create(self):
self._create(self._context, update_or_create=True)
@base.remotable
def destroy(self):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
db.block_device_mapping_destroy(self._context, self.id)
delattr(self, base.get_attrname('id'))
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_destroy_at_top(self._context, self.instance_uuid,
device_name=self.device_name,
volume_id=self.volume_id)
@base.remotable
def save(self):
updates = self.obj_get_changes()
if 'instance' in updates:
raise exception.ObjectActionError(action='save',
reason='instance changed')
updates.pop('id', None)
updated = db.block_device_mapping_update(self._context, self.id,
updates, legacy=False)
self._from_db_object(self._context, self, updated)
cell_type = cells_opts.get_cell_type()
if cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.bdm_update_or_create_at_top(self._context, self)
@base.remotable_classmethod
def get_by_volume_id(cls, context, volume_id,
instance_uuid=None, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
db_bdm = db.block_device_mapping_get_by_volume_id(
context, volume_id, _expected_cols(expected_attrs))
if not db_bdm:
raise exception.VolumeBDMNotFound(volume_id=volume_id)
# NOTE (ndipanov): Move this to the db layer into a
# get_by_instance_and_volume_id method
if instance_uuid and instance_uuid != db_bdm['instance_uuid']:
raise exception.InvalidVolume(
reason=_("Volume does not belong to the "
"requested instance."))
return cls._from_db_object(context, cls(), db_bdm,
expected_attrs=expected_attrs)
@property
def is_root(self):
return self.boot_index == 0
@property
def is_volume(self):
return self.destination_type == 'volume'
@property
def is_image(self):
return self.source_type == 'image'
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
self.instance = objects.Instance.get_by_uuid(self._context,
self.instance_uuid)
self.obj_reset_changes(fields=['instance'])
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: BlockDeviceMapping <= version 1.1
# Version 1.2: Added use_slave to get_by_instance_uuid
# Version 1.3: BlockDeviceMapping <= version 1.2
# Version 1.4: BlockDeviceMapping <= version 1.3
# Version 1.5: BlockDeviceMapping <= version 1.4
# Version 1.6: BlockDeviceMapping <= version 1.5
# Version 1.7: BlockDeviceMapping <= version 1.6
# Version 1.8: BlockDeviceMapping <= version 1.7
# Version 1.9: BlockDeviceMapping <= version 1.8
# Version 1.10: BlockDeviceMapping <= version 1.9
VERSION = '1.10'
fields = {
'objects': fields.ListOfObjectsField('BlockDeviceMapping'),
}
child_versions = {
'1.0': '1.0',
'1.1': '1.1',
'1.2': '1.1',
'1.3': '1.2',
'1.4': '1.3',
'1.5': '1.4',
'1.6': '1.5',
'1.7': '1.6',
'1.8': '1.7',
'1.9': '1.8',
'1.10': '1.9',
}
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
db_bdms = db.block_device_mapping_get_all_by_instance(
context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
def root_bdm(self):
try:
return next(bdm_obj for bdm_obj in self if bdm_obj.is_root)
except StopIteration:
return
def root_metadata(self, context, image_api, volume_api):
root_bdm = self.root_bdm()
if not root_bdm:
return {}
if root_bdm.is_volume:
try:
volume = volume_api.get(context, root_bdm.volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=root_bdm.id)
elif root_bdm.is_image:
try:
image_meta = image_api.show(context, root_bdm.image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=root_bdm.id)
else:
return {}
def block_device_make_list(context, db_list, **extra_args):
return base.obj_make_list(context,
objects.BlockDeviceMappingList(context),
objects.BlockDeviceMapping, db_list,
**extra_args)
def block_device_make_list_from_dicts(context, bdm_dicts_list):
bdm_objects = [objects.BlockDeviceMapping(context=context, **bdm)
for bdm in bdm_dicts_list]
return BlockDeviceMappingList(objects=bdm_objects)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class ActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Defines the action to take on rule match.
"""
ALLOW = "Allow"
BLOCK = "Block"
LOG = "Log"
REDIRECT = "Redirect"
class AggregationInterval(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The aggregation interval of the Timeseries
"""
HOURLY = "Hourly"
DAILY = "Daily"
class Availability(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates whether the name is available.
"""
AVAILABLE = "Available"
UNAVAILABLE = "Unavailable"
class BackendEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class CustomHttpsProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning status of Custom Https of the frontendEndpoint.
"""
ENABLING = "Enabling"
ENABLED = "Enabled"
DISABLING = "Disabling"
DISABLED = "Disabled"
FAILED = "Failed"
class CustomHttpsProvisioningSubstate(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by
step.
"""
SUBMITTING_DOMAIN_CONTROL_VALIDATION_REQUEST = "SubmittingDomainControlValidationRequest"
PENDING_DOMAIN_CONTROL_VALIDATION_R_EQUEST_APPROVAL = "PendingDomainControlValidationREquestApproval"
DOMAIN_CONTROL_VALIDATION_REQUEST_APPROVED = "DomainControlValidationRequestApproved"
DOMAIN_CONTROL_VALIDATION_REQUEST_REJECTED = "DomainControlValidationRequestRejected"
DOMAIN_CONTROL_VALIDATION_REQUEST_TIMED_OUT = "DomainControlValidationRequestTimedOut"
ISSUING_CERTIFICATE = "IssuingCertificate"
DEPLOYING_CERTIFICATE = "DeployingCertificate"
CERTIFICATE_DEPLOYED = "CertificateDeployed"
DELETING_CERTIFICATE = "DeletingCertificate"
CERTIFICATE_DELETED = "CertificateDeleted"
class CustomRuleEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if the custom rule is in enabled or disabled state. Defaults to Enabled if not
specified.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class DynamicCompressionEnabled(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to use dynamic compression for cached content
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class EndpointType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of endpoint
"""
AFD = "AFD"
AZURE_REGION = "AzureRegion"
CDN = "CDN"
ATM = "ATM"
class EnforceCertificateNameCheckEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on
non-HTTPS requests.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class FrontDoorCertificateSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Defines the source of the SSL certificate
"""
AZURE_KEY_VAULT = "AzureKeyVault"
FRONT_DOOR = "FrontDoor"
class FrontDoorCertificateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Defines the type of the certificate used for secure connections to a frontendEndpoint
"""
DEDICATED = "Dedicated"
class FrontDoorEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Operational status of the Front Door load balancer. Permitted values are 'Enabled' or
'Disabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class FrontDoorForwardingProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Protocol this rule will use when forwarding traffic to backends.
"""
HTTP_ONLY = "HttpOnly"
HTTPS_ONLY = "HttpsOnly"
MATCH_REQUEST = "MatchRequest"
class FrontDoorHealthProbeMethod(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Configures which HTTP method to use to probe the backends defined under backendPools.
"""
GET = "GET"
HEAD = "HEAD"
class FrontDoorProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Accepted protocol schemes.
"""
HTTP = "Http"
HTTPS = "Https"
class FrontDoorQuery(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Treatment of URL query terms when forming the cache key.
"""
STRIP_NONE = "StripNone"
STRIP_ALL = "StripAll"
STRIP_ONLY = "StripOnly"
STRIP_ALL_EXCEPT = "StripAllExcept"
class FrontDoorRedirectProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The protocol of the destination to where the traffic is redirected
"""
HTTP_ONLY = "HttpOnly"
HTTPS_ONLY = "HttpsOnly"
MATCH_REQUEST = "MatchRequest"
class FrontDoorRedirectType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The redirect type the rule will use when redirecting traffic.
"""
MOVED = "Moved"
FOUND = "Found"
TEMPORARY_REDIRECT = "TemporaryRedirect"
PERMANENT_REDIRECT = "PermanentRedirect"
class FrontDoorResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Resource status of the Front Door or Front Door SubResource.
"""
CREATING = "Creating"
ENABLING = "Enabling"
ENABLED = "Enabled"
DISABLING = "Disabling"
DISABLED = "Disabled"
DELETING = "Deleting"
class FrontDoorTlsProtocolType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Defines the TLS extension protocol that is used for secure delivery
"""
SERVER_NAME_INDICATION = "ServerNameIndication"
class HeaderActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Which type of manipulation to apply to the header.
"""
APPEND = "Append"
DELETE = "Delete"
OVERWRITE = "Overwrite"
class HealthProbeEnabled(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to enable health probes to be made against backends defined under backendPools. Health
probes can only be disabled if there is a single enabled backend in single enabled backend
pool.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class LatencyScorecardAggregationInterval(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
DAILY = "Daily"
WEEKLY = "Weekly"
MONTHLY = "Monthly"
class ManagedRuleEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if the managed rule is in enabled or disabled state.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ManagedRuleExclusionMatchVariable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The variable type to be excluded.
"""
REQUEST_HEADER_NAMES = "RequestHeaderNames"
REQUEST_COOKIE_NAMES = "RequestCookieNames"
QUERY_STRING_ARG_NAMES = "QueryStringArgNames"
REQUEST_BODY_POST_ARG_NAMES = "RequestBodyPostArgNames"
REQUEST_BODY_JSON_ARG_NAMES = "RequestBodyJsonArgNames"
class ManagedRuleExclusionSelectorMatchOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Comparison operator to apply to the selector when specifying which elements in the collection
this exclusion applies to.
"""
EQUALS = "Equals"
CONTAINS = "Contains"
STARTS_WITH = "StartsWith"
ENDS_WITH = "EndsWith"
EQUALS_ANY = "EqualsAny"
class ManagedRuleSetActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Defines the action to take when a managed rule set score threshold is met.
"""
BLOCK = "Block"
LOG = "Log"
REDIRECT = "Redirect"
class MatchProcessingBehavior(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""If this rule is a match should the rules engine continue running the remaining rules or stop.
If not present, defaults to Continue.
"""
CONTINUE_ENUM = "Continue"
STOP = "Stop"
class MatchVariable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Request variable to compare with.
"""
REMOTE_ADDR = "RemoteAddr"
REQUEST_METHOD = "RequestMethod"
QUERY_STRING = "QueryString"
POST_ARGS = "PostArgs"
REQUEST_URI = "RequestUri"
REQUEST_HEADER = "RequestHeader"
REQUEST_BODY = "RequestBody"
COOKIES = "Cookies"
SOCKET_ADDR = "SocketAddr"
class MinimumTLSVersion(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The minimum TLS version required from the clients to establish an SSL handshake with Front
Door.
"""
ONE0 = "1.0"
ONE2 = "1.2"
class NetworkExperimentResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Defines the server side resource status
"""
CREATING = "Creating"
ENABLING = "Enabling"
ENABLED = "Enabled"
DISABLING = "Disabling"
DISABLED = "Disabled"
DELETING = "Deleting"
class NetworkOperationStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the Azure async operation. Possible values are: 'InProgress', 'Succeeded', and
'Failed'.
"""
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class Operator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Comparison type to use for matching with the variable value.
"""
ANY = "Any"
IP_MATCH = "IPMatch"
GEO_MATCH = "GeoMatch"
EQUAL = "Equal"
CONTAINS = "Contains"
LESS_THAN = "LessThan"
GREATER_THAN = "GreaterThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
REG_EX = "RegEx"
class PolicyEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if the policy is in enabled or disabled state. Defaults to Enabled if not specified.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class PolicyMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if it is in detection mode or prevention mode at policy level.
"""
PREVENTION = "Prevention"
DETECTION = "Detection"
class PolicyRequestBodyCheck(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes if policy managed rules will inspect the request body content.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class PolicyResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Resource status of the policy.
"""
CREATING = "Creating"
ENABLING = "Enabling"
ENABLED = "Enabled"
DISABLING = "Disabling"
DISABLED = "Disabled"
DELETING = "Deleting"
class PrivateEndpointStatus(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The Approval status for the connection to the Private Link
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
TIMEOUT = "Timeout"
class ResourceType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Type of Front Door resource used in CheckNameAvailability.
"""
MICROSOFT_NETWORK_FRONT_DOORS = "Microsoft.Network/frontDoors"
MICROSOFT_NETWORK_FRONT_DOORS_FRONTEND_ENDPOINTS = "Microsoft.Network/frontDoors/frontendEndpoints"
class RoutingRuleEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class RulesEngineMatchVariable(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Match Variable
"""
IS_MOBILE = "IsMobile"
REMOTE_ADDR = "RemoteAddr"
REQUEST_METHOD = "RequestMethod"
QUERY_STRING = "QueryString"
POST_ARGS = "PostArgs"
REQUEST_URI = "RequestUri"
REQUEST_PATH = "RequestPath"
REQUEST_FILENAME = "RequestFilename"
REQUEST_FILENAME_EXTENSION = "RequestFilenameExtension"
REQUEST_HEADER = "RequestHeader"
REQUEST_BODY = "RequestBody"
REQUEST_SCHEME = "RequestScheme"
class RulesEngineOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes operator to apply to the match condition.
"""
ANY = "Any"
IP_MATCH = "IPMatch"
GEO_MATCH = "GeoMatch"
EQUAL = "Equal"
CONTAINS = "Contains"
LESS_THAN = "LessThan"
GREATER_THAN = "GreaterThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
class RuleType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes type of rule.
"""
MATCH_RULE = "MatchRule"
RATE_LIMIT_RULE = "RateLimitRule"
class SessionAffinityEnabledState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class SkuName(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the pricing tier.
"""
CLASSIC_AZURE_FRONT_DOOR = "Classic_AzureFrontDoor"
STANDARD_AZURE_FRONT_DOOR = "Standard_AzureFrontDoor"
PREMIUM_AZURE_FRONT_DOOR = "Premium_AzureFrontDoor"
class State(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The state of the Experiment
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class TimeseriesAggregationInterval(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
HOURLY = "Hourly"
DAILY = "Daily"
class TimeseriesType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of Timeseries
"""
MEASUREMENT_COUNTS = "MeasurementCounts"
LATENCY_P50 = "LatencyP50"
LATENCY_P75 = "LatencyP75"
LATENCY_P95 = "LatencyP95"
class Transform(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes what transforms are applied before matching
"""
LOWERCASE = "Lowercase"
UPPERCASE = "Uppercase"
TRIM = "Trim"
URL_DECODE = "UrlDecode"
URL_ENCODE = "UrlEncode"
REMOVE_NULLS = "RemoveNulls"
class TransformType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""Describes what transforms applied before matching.
"""
LOWERCASE = "Lowercase"
UPPERCASE = "Uppercase"
TRIM = "Trim"
URL_DECODE = "UrlDecode"
URL_ENCODE = "UrlEncode"
REMOVE_NULLS = "RemoveNulls"
|
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import glob
import logging
import os
import unittest.mock
from pants.binaries.binary_util import (
BinaryRequest,
BinaryToolFetcher,
BinaryToolUrlGenerator,
BinaryUtil,
select,
)
from pants.net.http.fetcher import Fetcher
from pants.testutil.test_base import TestBase
from pants.util.collections import assert_single_element
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import is_readable_dir, safe_file_dump, safe_open
logger = logging.getLogger(__name__)
class ExternalUrlGenerator(BinaryToolUrlGenerator):
def generate_urls(self, version, host_platform):
return ["https://www.pantsbuild.org/some-binary", "https://www.pantsbuild.org/same-binary"]
# Make the __str__ deterministic, for testing exception messages.
def __repr__(self):
return "ExternalUrlGenerator(<example __str__()>)"
# TODO: test requests with an archiver!
class BinaryUtilTest(TestBase):
"""Tests binary_util's binaries_baseurls handling."""
class MapFetcher:
"""Class which pretends to be a pants.net.http.Fetcher, but is actually a dictionary."""
def __init__(self, read_map):
self._map = read_map
def download(self, url, path_or_fd=None, **kwargs):
if url not in self._map:
raise IOError(f"404: Virtual URL '{url}' does not exist.")
if not path_or_fd:
raise AssertionError("Expected path_or_fd to be set")
path_or_fd.write(self._map[url])
return path_or_fd
def keys(self):
return list(self._map.keys())
def values(self):
return list(self._map.values())
def __getitem__(self, key):
return self._map[key] # Vanilla internal map access (without lambda shenanigans).
@classmethod
def _fake_base(cls, name):
return f"fake-url-{name}"
@classmethod
def _fake_url(cls, binaries, base, binary_key):
binary_util = cls._gen_binary_util()
supportdir, version, name = binaries[binary_key]
binary_request = binary_util._make_deprecated_binary_request(supportdir, version, name)
binary_path = binary_request.get_download_path(binary_util.host_platform())
return f"{base}/{binary_path}"
@classmethod
def _gen_binary_tool_fetcher(
cls, bootstrap_dir="/tmp", timeout_secs=30, fetcher=None, ignore_cached_download=True
):
return BinaryToolFetcher(
bootstrap_dir=bootstrap_dir,
timeout_secs=timeout_secs,
fetcher=fetcher,
ignore_cached_download=ignore_cached_download,
)
@classmethod
def _gen_binary_util(
cls,
baseurls=[],
path_by_id=None,
allow_external_binary_tool_downloads=True,
uname_func=None,
**kwargs,
):
return BinaryUtil(
baseurls=baseurls,
binary_tool_fetcher=cls._gen_binary_tool_fetcher(**kwargs),
path_by_id=path_by_id,
allow_external_binary_tool_downloads=allow_external_binary_tool_downloads,
uname_func=uname_func,
)
@classmethod
def _read_file(cls, file_path):
with open(file_path, "rb") as result_file:
return result_file.read()
def test_timeout(self):
fetcher = unittest.mock.create_autospec(Fetcher, spec_set=True)
timeout_value = 42
binary_util = self._gen_binary_util(
baseurls=["http://binaries.example.com"], timeout_secs=timeout_value, fetcher=fetcher
)
self.assertFalse(fetcher.download.called)
fetch_path = binary_util.select_script(
supportdir="a-binary", version="v1.2", name="a-binary"
)
logger.debug(f"fetch_path: {fetch_path}")
fetcher.download.assert_called_once_with(
"http://binaries.example.com/a-binary/v1.2/a-binary",
listener=unittest.mock.ANY,
path_or_fd=unittest.mock.ANY,
timeout_secs=timeout_value,
)
def test_no_base_urls_error(self):
"""Tests exception handling if build support urls are improperly specified."""
binary_util = self._gen_binary_util()
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_script("supportdir", "version", "name")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.NoBaseUrlsError.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=False, external_url_generator=None, archiver=None): "
"--binaries-baseurls is empty."
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_support_url_multi(self):
"""Tests to make sure existing base urls function as expected."""
bootstrap_dir = "/tmp"
with temporary_dir() as invalid_local_files, temporary_dir() as valid_local_files:
binary_util = self._gen_binary_util(
baseurls=[
"BLATANTLY INVALID URL",
"https://dl.bintray.com/pantsbuild/bin/reasonably-invalid-url",
invalid_local_files,
valid_local_files,
"https://dl.bintray.com/pantsbuild/bin/another-invalid-url",
],
bootstrap_dir=bootstrap_dir,
)
binary_request = binary_util._make_deprecated_binary_request(
supportdir="bin/protobuf", version="2.4.1", name="protoc"
)
binary_path = binary_request.get_download_path(binary_util.host_platform())
contents = b"proof"
with safe_open(os.path.join(valid_local_files, binary_path), "wb") as fp:
fp.write(contents)
binary_path_abs = os.path.join(bootstrap_dir, binary_path)
self.assertEqual(
os.path.realpath(binary_path_abs),
os.path.realpath(binary_util.select(binary_request)),
)
self.assertEqual(contents, self._read_file(binary_path_abs))
def test_support_url_fallback(self):
"""Tests fallback behavior with multiple support baseurls.
Mocks up some dummy baseurls and then swaps out the URL reader to make sure urls are
accessed and others are not.
"""
fake_base, fake_url = self._fake_base, self._fake_url
bases = [fake_base("apple"), fake_base("orange"), fake_base("banana")]
binaries = {
t[2]: t
for t in (
("bin/protobuf", "2.4.1", "protoc"),
("bin/ivy", "4.3.7", "ivy"),
("bin/bash", "4.4.3", "bash"),
)
}
fetcher = self.MapFetcher(
{
fake_url(binaries, bases[0], "protoc"): b"SEEN PROTOC",
fake_url(binaries, bases[0], "ivy"): b"SEEN IVY",
fake_url(binaries, bases[1], "bash"): b"SEEN BASH",
fake_url(binaries, bases[1], "protoc"): b"UNSEEN PROTOC 1",
fake_url(binaries, bases[2], "protoc"): b"UNSEEN PROTOC 2",
fake_url(binaries, bases[2], "ivy"): b"UNSEEN IVY 2",
}
)
binary_util = self._gen_binary_util(baseurls=bases, fetcher=fetcher)
unseen = [item for item in fetcher.values() if item.startswith(b"SEEN ")]
for supportdir, version, name in binaries.values():
binary_path_abs = binary_util.select_binary(
supportdir=supportdir, version=version, name=name
)
expected_content = f"SEEN {name.upper()}".encode()
self.assertEqual(expected_content, self._read_file(binary_path_abs))
unseen.remove(expected_content)
self.assertEqual(0, len(unseen)) # Make sure we've seen all the SEENs.
def test_select_binary_base_path_linux(self):
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "amd64"
binary_util = self._gen_binary_util(uname_func=uname_func)
binary_request = binary_util._make_deprecated_binary_request(
"supportdir", "version", "name"
)
self.assertEqual(
"supportdir/linux/x86_64/version/name", binary_util._get_download_path(binary_request)
)
def test_select_binary_base_path_darwin(self):
def uname_func():
return (
"darwin",
"dontcare1",
"14.9",
"dontcare2",
"dontcare3",
)
binary_util = self._gen_binary_util(uname_func=uname_func)
binary_request = binary_util._make_deprecated_binary_request(
"supportdir", "version", "name"
)
self.assertEqual(
"supportdir/mac/10.10/version/name", binary_util._get_download_path(binary_request)
)
def test_select_binary_base_path_missing_os(self):
def uname_func():
return "vms", "dontcare1", "999.9", "dontcare2", "VAX9"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_binary("supportdir", "version", "name")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=True, external_url_generator=None, archiver=None): "
"Pants could not resolve binaries for the current host: platform 'vms' was not recognized. "
"Recognized platforms are: [darwin, linux]."
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_binary_base_path_missing_arch(self):
def uname_func():
return "linux", "dontcare1", "don'tcare2", "dontcare3", "quantum_computer"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_binary("mysupportdir", "myversion", "myname")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='mysupportdir', version='myversion', "
"name='myname', platform_dependent=True, external_url_generator=None, archiver=None): "
"Pants could not resolve binaries for the current host. Update --binaries-path-by-id to "
"find binaries for the current host platform ('linux', "
"'quantum_computer').\\n--binaries-path-by-id was:"
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_script_missing_arch(self):
def uname_func():
return "linux", "dontcare1", "dontcare2", "dontcare3", "quantum_computer"
binary_util = self._gen_binary_util(uname_func=uname_func)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select_script("mysupportdir", "myversion", "myname")
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.MissingMachineInfo.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='mysupportdir', version='myversion', "
# platform_dependent=False when doing select_script()
"name='myname', platform_dependent=False, external_url_generator=None, archiver=None): Pants "
"could not resolve binaries for the current host. Update --binaries-path-by-id to find "
"binaries for the current host platform ('linux', "
"'quantum_computer').\\n--binaries-path-by-id was:"
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_binary_base_path_override(self):
def uname_func():
return "darwin", "dontcare1", "100.99", "dontcare2", "t1000"
binary_util = self._gen_binary_util(
uname_func=uname_func, path_by_id={("darwin", "100"): ["skynet", "42"]}
)
binary_request = binary_util._make_deprecated_binary_request(
"supportdir", "version", "name"
)
self.assertEqual(
"supportdir/skynet/42/version/name", binary_util._get_download_path(binary_request)
)
def test_external_url_generator(self):
binary_util = self._gen_binary_util(baseurls=[])
binary_request = BinaryRequest(
supportdir="supportdir",
version="version",
name="name",
platform_dependent=False,
external_url_generator=ExternalUrlGenerator(),
# TODO: test archiver!
archiver=None,
)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select(binary_request)
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryToolFetcher.BinaryNotFound.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=False, "
"external_url_generator=ExternalUrlGenerator(<example __str__()>), archiver=None): "
"Failed to fetch name binary from any source: (Failed to fetch binary from "
"https://www.pantsbuild.org/some-binary: Fetch of https://www.pantsbuild.org/some-binary failed with "
"status code 404, Failed to fetch binary from https://www.pantsbuild.org/same-binary: Fetch of "
"https://www.pantsbuild.org/same-binary failed with status code 404)"
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_disallowing_external_urls(self):
binary_util = self._gen_binary_util(baseurls=[], allow_external_binary_tool_downloads=False)
binary_request = binary_request = BinaryRequest(
supportdir="supportdir",
version="version",
name="name",
platform_dependent=False,
external_url_generator=ExternalUrlGenerator(),
# TODO: test archiver!
archiver=None,
)
with self.assertRaises(BinaryUtil.BinaryResolutionError) as cm:
binary_util.select(binary_request)
the_raised_exception_message = str(cm.exception)
self.assertIn(BinaryUtil.NoBaseUrlsError.__name__, the_raised_exception_message)
expected_msg = (
"Error resolving binary request BinaryRequest(supportdir='supportdir', version='version', "
"name='name', platform_dependent=False, "
"external_url_generator=ExternalUrlGenerator(<example __str__()>), archiver=None): "
"--binaries-baseurls is empty."
)
self.assertIn(expected_msg, the_raised_exception_message)
def test_select_argv(self):
"""Test invoking binary_util.py as a standalone script."""
with temporary_dir() as tmp_dir:
config_file_loc = os.path.join(tmp_dir, "pants.toml")
safe_file_dump(
config_file_loc,
payload=f"""\
[GLOBAL]
allow_external_binary_tool_downloads = true
pants_bootstrapdir = "{tmp_dir}"
""",
)
expected_output_glob = os.path.join(tmp_dir, "bin", "cmake", "*", "*", "3.9.5", "cmake")
with environment_as(PANTS_CONFIG_FILES=f"[{config_file_loc!r}]"):
# Ignore the first argument, as per sys.argv.
output_file = select(["_", "cmake", "3.9.5", "cmake.tar.gz"])
self.assertTrue(is_readable_dir(output_file))
realized_glob = assert_single_element(glob.glob(expected_output_glob))
self.assertEqual(os.path.realpath(output_file), os.path.realpath(realized_glob))
|
|
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.fsm import FSM
from direct.distributed import DistributedObject
from direct.showutil import Rope
from direct.showbase import PythonUtil
from direct.task import Task
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPGlobals
from direct.actor import Actor
from toontown.suit import Suit
from toontown.suit import SuitDNA
import random
from toontown.battle import BattleProps
from toontown.toon import NPCToons
class DistributedLawbotChair(DistributedObject.DistributedObject, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotChair')
chairCushionSurface = Point3(0, -0.75, 2.25)
landingPt = Point3(0, -1.5, 0)
courtroomCeiling = 30
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedLawbotBossChair')
self.boss = None
self.index = None
self.avId = 0
self.modelPath = 'phase_11/models/lawbotHQ/JuryBoxChair'
self.modelFindString = None
self.nodePath = None
self.ival = None
self.origHpr = Point3(0, 0, 0)
self.downTime = 0.5
self.upTime = 5
self.cogJuror = None
self.propInSound = None
self.propOutSound = None
self.propTrack = None
self.cogJurorTrack = None
self.cogJurorSound = None
self.toonJurorIndex = -1
self.toonJuror = None
return
def announceGenerate(self):
self.notify.debug('announceGenerate: %s' % self.doId)
DistributedObject.DistributedObject.announceGenerate(self)
self.name = 'Chair-%s' % self.doId
self.loadModel(self.modelPath, self.modelFindString)
self.randomGenerator = random.Random()
self.randomGenerator.seed(self.doId)
self.loadSounds()
self.loadCogJuror()
self.cogJuror.stash()
origPos = self.computePos()
self.nodePath.setPos(origPos)
self.nodePath.setHpr(-90, 0, 0)
chairParent = self.boss.getChairParent()
self.nodePath.wrtReparentTo(chairParent)
self.boss.chairs[self.index] = self
def delete(self):
DistributedObject.DistributedObject.delete(self)
loader.unloadModel(self.modelPath)
self.unloadSounds()
self.nodePath.removeNode()
def loadModel(self, modelPath, modelFindString = None):
if self.nodePath == None:
self.makeNodePath()
else:
self.chair.getChildren().detach()
model = loader.loadModel(modelPath)
if modelFindString != None:
model = model.find('**/' + modelFindString)
model.instanceTo(self.chair)
trigger_chair = self.chair.find('**/trigger_chair')
if not trigger_chair.isEmpty():
trigger_chair.stash()
collision_chair = self.chair.find('**/collision_chair')
if not collision_chair.isEmpty():
collision_chair.stash()
shadow = self.chair.find('**/shadow')
if not shadow.isEmpty():
pass
self.scale = 0.5
self.chair.setScale(self.scale)
self.attachColSphere()
return
def loadSounds(self):
if self.propInSound == None:
self.propInSound = base.loadSfx('phase_5/audio/sfx/ENC_propeller_in.ogg')
if self.propOutSound == None:
self.propOutSound = base.loadSfx('phase_5/audio/sfx/ENC_propeller_out.ogg')
if self.cogJurorSound == None:
self.cogJurorSound = base.loadSfx('phase_11/audio/sfx/LB_cog_jury.ogg')
return
def unloadSounds(self):
if self.propInSound:
del self.propInSound
self.propInSound = None
if self.propOutSound:
del self.propOutSound
self.propOutSound = None
if self.cogJurorSound:
del self.cogJurorSound
self.cogJurorSound = None
return
def loadCogJuror(self):
self.cleanupCogJuror()
self.cogJuror = Suit.Suit()
level = self.randomGenerator.randrange(len(SuitDNA.suitsPerLevel))
self.cogJuror.dna = SuitDNA.SuitDNA()
self.cogJuror.dna.newSuitRandom(level=level, dept='l')
self.cogJuror.setDNA(self.cogJuror.dna)
self.cogJuror.pose('landing', 0)
self.cogJuror.reparentTo(self.nodePath)
self.cogJuror.prop = None
if self.cogJuror.prop == None:
self.cogJuror.prop = BattleProps.globalPropPool.getProp('propeller')
head = self.cogJuror.find('**/joint_head')
self.cogJuror.prop.reparentTo(head)
self.propTrack = Sequence(ActorInterval(self.cogJuror.prop, 'propeller', startFrame=8, endFrame=25))
return
def attachColSphere(self):
chairTop = self.nodePath.find('**/top*')
chairHandle = self.nodePath.find('**/handle*')
collNode = CollisionNode(self.uniqueName('headSphere'))
topBounds = self.chair.getBounds()
center = topBounds.getCenter()
radius = topBounds.getRadius()
radius *= 0.65
adjustedZ = center[2]
adjustedZ += 0.6
sphere1 = CollisionSphere(center[0], center[1], adjustedZ, radius)
sphere1.setTangible(1)
collNode.addSolid(sphere1)
collNode.setName('Chair-%s' % self.index)
self.collNodePath = self.nodePath.attachNewNode(collNode)
def makeNodePath(self):
self.nodePath = Actor.Actor()
self.chair = self.nodePath.attachNewNode('myChair')
def disable(self):
DistributedObject.DistributedObject.disable(self)
self.nodePath.detachNode()
if self.ival:
self.ival.finish()
self.ival = None
self.ignoreAll()
del self.boss.chairs[self.index]
self.cleanup()
if self.propTrack:
self.propTrack.finish()
self.propTrack = None
if self.cogJurorTrack:
self.cogJurorTrack.finish()
self.cogJurorTrack = None
self.cleanupCogJuror()
self.cleanupToonJuror()
return
def stopCogsFlying(self):
if self.ival:
self.ival.finish()
self.ival = None
if self.propTrack:
self.propTrack.finish()
self.propTrack = None
if self.cogJurorTrack:
self.cogJurorTrack.finish()
self.cogJurorTrack = None
return
def cleanupCogJuror(self):
if self.cogJuror:
self.cogJuror.detachNode()
self.cogJuror.delete()
del self.cogJuror
self.cogJuror = None
return
def cleanupToonJuror(self):
if self.toonJuror:
self.toonJuror.detachNode()
self.toonJuror.delete()
del self.toonJuror
self.toonJuror = None
return
def cleanup(self):
self.boss = None
return
def startCogJuror(self, duration, y):
if self.cogJurorTrack:
self.cogJurorTrack.finish()
self.loadCogJuror()
self.cogJuror.stash()
x = 0
curPos = self.nodePath.getPos(render)
z = self.courtroomCeiling - curPos[2]
self.notify.debug('curPos =%s\nz=%f' % (curPos, z))
cogTrack = Sequence(Func(self.cogJuror.setPos, x, y, z), Func(self.cogJuror.unstash), Func(self.propTrack.loop), self.cogJuror.posInterval(duration, self.landingPt, Point3(x, y, z)), Func(self.propTrack.finish), Func(self.stashCogJuror))
audioTrack = SoundInterval(self.propInSound, duration=duration, node=self.cogJuror, loop=1)
self.cogJurorTrack = Parallel(audioTrack, cogTrack)
self.cogJurorTrack.start()
def stashCogJuror(self):
if self.cogJuror and not self.cogJuror.isEmpty():
self.cogJuror.stash()
def putCogJurorOnSeat(self):
self.stopCogsFlying()
if self.cogJuror and not self.cogJuror.isEmpty():
base.playSfx(self.cogJurorSound, node=self.chair)
self.cogJuror.unstash()
self.cogJuror.prop.stash()
self.cogJuror.pose('landing', 47)
self.cogJuror.setH(180)
self.cogJuror.setPos(0, -1.25, 0.95)
if self.toonJuror:
self.toonJuror.hide()
else:
self.notify.warning('putCogJurorOnSeat invalid cogJuror')
def putToonJurorOnSeat(self):
if self.toonJuror and not self.toonJuror.isEmpty():
self.toonJuror.show()
self.toonJuror.reparentTo(self.nodePath)
self.toonJuror.setH(180)
self.toonJuror.setPos(0, -2.5, 0.95)
self.toonJuror.animFSM.request('Sit')
else:
self.notify.warning('putToonJurorOnSeat invalid toonJuror')
def showCogJurorFlying(self):
self.notify.debug('showCogJurorFlying')
self.startCogJuror(ToontownGlobals.LawbotBossCogJurorFlightTime, -ToontownGlobals.LawbotBossCogJurorDistance)
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
self.boss = base.cr.doId2do[bossCogId]
def setIndex(self, index):
self.index = index
def setState(self, state):
avId = 0
if state == 'C':
self.demand('Controlled', avId)
elif state == 'F':
self.demand('Free')
elif state == 'N':
self.demand('On')
elif state == 'T':
self.demand('ToonJuror')
elif state == 'S':
self.demand('SuitJuror')
elif state == 'E':
self.demand('EmptyJuror')
elif state == 'E':
self.demand('StopCogs')
else:
self.notify.error('Invalid state from AI: %s' % state)
def __touchedChair(self, entry):
self.notify.debug('__touchedChair')
self.notify.debug('self=%s entry=%s' % (self, entry))
self.boss.touchedChair(self, entry)
def __touchedChairHandle(self, entry):
self.notify.debug('__touchedChairHandle')
self.boss.touchedChairHandle(self, entry)
def enterToonJuror(self):
self.chair.setColorScale(0.2, 0.2, 1.0, 1.0)
self.boss.countToonJurors()
if not self.cogJurorTrack:
self.cogJuror.stash()
self.putToonJurorOnSeat()
def enterSuitJuror(self):
self.chair.setColorScale(0.5, 0.5, 0.5, 1.0)
self.boss.countToonJurors()
if self.toonJuror:
self.toonJuror.hide()
self.putCogJurorOnSeat()
def enterEmptyJuror(self):
self.chair.setColorScale(1.0, 1.0, 1.0, 1.0)
def enterStopCogs(self):
self.stopCogs()
def exitStopCogs(self):
pass
def enterOn(self):
self.notify.debug('enterOn for chair %d' % self.index)
myHeadings = ToontownGlobals.LawbotBossChairHeadings[self.index]
seqName = 'LawbotBossChair-%s' % self.doId
self.ival = Sequence(name=seqName)
downAngle = -80
for index in xrange(len(myHeadings)):
nextIndex = index + 1
if nextIndex == len(myHeadings):
nextIndex = 0
goingDown = self.nodePath.hprInterval(self.downTime, Point3(myHeadings[index] + self.origHpr[0], downAngle, self.origHpr[2]), startHpr=Point3(myHeadings[index] + self.origHpr[0], 0, self.origHpr[2]))
self.ival.append(goingDown)
self.ival.append(Wait(self.stayDownTime))
goingUp = self.nodePath.hprInterval(self.upTime, Point3(myHeadings[nextIndex] + self.origHpr[0], 0, self.origHpr[2]), startHpr=Point3(myHeadings[index] + self.origHpr[0], downAngle, self.origHpr[2]))
self.ival.append(goingUp)
self.ival.loop()
self.accept('enterChairZap', self.__touchedChair)
self.accept('enterChairHandleZap', self.__touchedChairHandle)
def computePos(self):
rowIndex = self.index % 6
if self.index < 6:
startPt = Point3(*ToontownGlobals.LawbotBossChairRow1PosA)
endPt = Point3(*ToontownGlobals.LawbotBossChairRow1PosB)
else:
startPt = Point3(*ToontownGlobals.LawbotBossChairRow2PosA)
endPt = Point3(*ToontownGlobals.LawbotBossChairRow2PosB)
totalDisplacement = endPt - startPt
stepDisplacement = totalDisplacement / (6 - 1)
newPos = stepDisplacement * rowIndex
self.notify.debug('curDisplacement = %s' % newPos)
newPos += startPt
self.notify.debug('newPos before offset = %s' % newPos)
newPos -= Point3(*ToontownGlobals.LawbotBossJuryBoxRelativeEndPos)
self.notify.debug('newPos = %s' % newPos)
return newPos
def loadToonJuror(self):
self.cleanupToonJuror()
self.toonJuror = NPCToons.createLocalNPC(ToontownGlobals.LawbotBossBaseJurorNpcId + self.toonJurorIndex)
self.toonJuror.hide()
def setToonJurorIndex(self, newVal):
if not self.toonJurorIndex == newVal:
self.toonJurorIndex = newVal
self.loadToonJuror()
|
|
#!/usr/bin/python !/usr/bin/env python
# -*- coding: utf-8 -*
# Functions tha combine modular subfunctions creating
# a task to complete, such as reading from file, extracting concepts
# and saving to disk again.
from config import settings
from utilities import time_log
from data_loader import load_file, load_file_batches, load_mongo, load_mongo_batches, \
parse_remove_edges, parse_text, get_collection_count
from data_extractor import extract_semrep, extract_semrep_parallel, extract_metamap, \
get_concepts_from_edges, get_concepts_from_edges_parallel
from data_saver import save_csv, save_neo4j, save_json, save_json2, create_neo4j_results, \
create_neo4j_csv, update_neo4j, update_mongo_sentences, save_mongo, update_neo4j_parallel
from tqdm import tqdm
import ijson.backends.yajl2_cffi as ijson2
class Parser(object):
"""
Parser class for reading input. According to which pipeline
task it is called upon, it parses the appropriate file.
Filepaths and details according to settings.yaml.
"""
def __init__(self, source, key, name=None):
"""
Initialization of the class.
Attributes:
- source: str, value denoting where we will read from (e.g 'mongo')
- type: str, value denoting what we will read (e.g. text, edges)
- name: str, The name is only for pretty-printing purposes.
"""
self.source = source
self.key = key
parallel_flag = str(settings['pipeline']['in']['parallel']) == 'True'
stream_flag = str(settings['pipeline']['in']['stream']) == 'True'
if self.source == 'mongo':
if parallel_flag or stream_flag:
self.load = load_mongo_batches
else:
self.load = load_mongo
elif self.source == 'file':
if parallel_flag or stream_flag:
self.load = load_file_batches
else:
self.load = load_file
elif self.source == 'delete':
self.load = parse_remove_edges
else:
time_log('Source to read was %s. Please change settings' % self.source)
raise NotImplementedError
if self.key == 'text':
self.parse = parse_text
elif self.key == 'med_red':
self.parse = None
elif self.key == 'edges':
self.parse = None
else:
time_log('Type to read was %s. Please change settings' % self.key)
raise NotImplementedError
if name:
self.name = name
else:
self.name = 'Type: %s From : %s' % (self.source, self.key)
def read(self, N=None, ind_=0):
"""
Run the corresponding parsing function and return:
Input:
- ind_: int, the starting point to read from
Output:
1) In case of the batch or streaming processing:
- json_: dict, the corresponding read batch
- N: int, the total number of items to iterate through
- ind_: int, the index where the next iteration of readings
should start from
2) In case of loading the whole collection:
- json_: dict, the corresponding collection
"""
parallel_flag = str(settings['pipeline']['in']['parallel']) == 'True'
stream_flag = str(settings['pipeline']['in']['stream']) == 'True'
if parallel_flag or stream_flag:
json_, ind_ = self.load(self.key, N, ind_)
if json_:
if self.parse:
json_ = self.parse(json_)
time_log('Completed Parsing. Read: %d documents!' % len(json_[settings['out']['json']['itemfield']]))
return json_, ind_
else:
json_ = self.load(self.key)
if self.parse:
json_ = self.parse(json_)
time_log('Completed Parsing. Read: %d documents!' % len(json_[settings['out']['json']['itemfield']]))
return json_
class Extractor(object):
"""
Class for extracting concepts/entities and relations from medical text.
Expects to work with json files generated from the corresponding Parser
objects. Currently ['semrep'] implemented.
Filepaths and details according to settings.yaml.
"""
def __init__(self, key, parser_key, name=None):
"""
Initialization of the class.
Input:
- key: str,
string denoting what extraction task is to take place
- parser_key: str,
string denoting what type of input to expect
- name: str,
optional string for the tast to be printed
"""
self.key = key
self.parser_key = parser_key
if self.key == 'semrep':
if str(settings['pipeline']['in']['parallel']) == 'True':
self.func = extract_semrep_parallel
time_log('Will use multiprocessing for the semrep extraction!')
else:
self.func = extract_semrep
elif self.key == 'metamap':
self.func = extract_metamap
# self.func = extract_metamap
elif self.key == 'reverb':
raise NotImplementedError
elif self.key == 'get_concepts_from_edges':
if str(settings['pipeline']['in']['parallel']) == 'True':
self.func = get_concepts_from_edges_parallel
else:
self.func = get_concepts_from_edges
# self.func = extract_reverb
if name:
self.name = name
else:
self.name = self.key
def run(self, json):
"""
Run the corresponding extracting function and return the .json_
dictionary result.
"""
if type(json) == dict:
json_ = self.func(json, self.parser_key)
time_log('Completed extracting using %s!' % self.name)
else:
time_log('Unsupported type of json to work on!')
time_log('Task : %s --- Type of json: %s' % (self.name, type(json)))
time_log(json)
json_ = {}
return json_
class Dumper(object):
"""
Class for saving the extracted results. Expects to work with json files
generated from the previous extraction phases. Currently implemented
dumping methods for keys:
-json : for the enriched medical documents
-csv : for nodes, relations before importing into neo4j
-neo4j: for nodes, relations updating neo4j db directly
Filepaths and details according to settings.yaml.
Params:
- key: str,
one of the json, csv, neo4j
- inp_key: str,
the Parser key for this pipeline
- name: str,
Name of the Dumper. For printing purposes only
"""
def __init__(self, key, inp_key='text', name=None):
self.key = key
if self.key == 'json':
self.transform = None
self.func = save_json
#self.func = save_json2
elif self.key == 'csv':
self.transform = create_neo4j_results
self.func = create_neo4j_csv
elif self.key == 'neo4j':
self.transform = create_neo4j_results
parallel_flag = str(settings['pipeline']['in']['parallel']) == 'True'
self.func = update_neo4j
if parallel_flag:
self.func = update_neo4j_parallel
elif self.key == 'mongo_sentences':
self.transform = None
self.func = update_mongo_sentences
elif self.key == 'mongo':
self.transform = None
self.func = save_mongo
if inp_key == 'text':
self.type_ = 'harvester'
elif inp_key == 'edges':
self.type_ = 'edges'
if name:
self.name = name
else:
self.name = self.key
def save(self, json_):
if type(json_) == dict:
if self.transform:
results = self.transform(json_, self.type_)
else:
results = json_
json_ = self.func(results)
if self.key == 'mongo_sentences':
out_p = '/'.join([settings[self.key]['uri'],settings[self.key]['db'],settings[self.key]['collection']])
time_log('Completed saving data. Results saved in:\n %s' % out_p)
else:
time_log('Completed saving data. Results saved in:\n %s' % settings['out'][self.key]['out_path'])
else:
time_log('Unsupported type of json to work on!')
time_log('Task : %s --- Type of json: %s' % (self.name, type(json)))
time_log(json)
json_ = {}
return json_
class taskCoordinator(object):
"""
Orchestrator class for the different saving values.
"""
def __init__(self):
self.pipeline = {}
self.phases = ['in', 'trans', 'out']
for phase, dic_ in sorted(settings['pipeline'].iteritems()):
self.pipeline[phase] = {}
for key, value in dic_.iteritems():
if value:
self.pipeline[phase][key] = value
def run(self):
parallel_flag = False
stream_flag = False
if 'parallel' in self.pipeline['in']:
parallel_flag = True
if 'stream' in self.pipeline['in']:
stream_flag = True
if parallel_flag or stream_flag:
parser = Parser(self.pipeline['in']['source'], self.pipeline['in']['type'])
ind_ = 0
N = get_collection_count(parser.source, parser.key)
while ind_ < N:
old_ind = ind_
json_all, ind_ = parser.read(N=N, ind_=ind_)
outfield = settings['out']['json']['itemfield']
if json_all:
json_ = json_all
for phase in self.phases:
dic = self.pipeline[phase]
if phase == 'trans':
for key, value in dic.iteritems():
if value:
extractor = Extractor(key, parser.key)
json_ = extractor.run(json_)
if phase == 'out':
for key, value in sorted(dic.iteritems()):
if value:
dumper = Dumper(key, parser.key)
dumper.save(json_)
if ind_:
time_log('Processed %d documents in parallel. We are at index %d!' % (ind_ - old_ind, ind_))
proc = int(ind_/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('~'*50)
time_log('We are at %d/%d documents processed -- %0.2f %%' % (ind_, N, proc))
time_log('~'*50)
else:
parser = Parser(self.pipeline['in']['source'], self.pipeline['in']['type'])
json_ = parser.read()
for phase in self.phases:
dic = self.pipeline[phase]
if phase == 'trans':
for key, value in dic.iteritems():
if value:
extractor = Extractor(key, parser.key)
json_ = extractor.run(json_)
if phase == 'out':
for key, value in sorted(dic.iteritems()):
if value:
dumper = Dumper(key, parser.key)
dumper.save(json_)
# else:
# if 'stream' in self.pipeline['in']:
# stream_flag = True
# else:
# stream_flag = False
# if stream_flag:
# if self.pipeline['in']['inp'] == 'json' or self.pipeline['in']['inp'] == 'edges':
# inp_path = settings['load'][self.pipeline['in']['inp']]['inp_path']
# if self.pipeline['in']['inp'] == 'json':
# outfield_inp = settings['load'][self.pipeline['in']['inp']]['docfield']
# elif self.pipeline['in']['inp'] == 'edges':
# outfield_inp = settings['load'][self.pipeline['in']['inp']]['edge_field']
# else:
# raise NotImplementedError
# outfield_out = settings['out']['json']['itemfield']
# c = 0
# with open(inp_path, 'r') as f:
# docs = ijson2.items(f, '%s.item' % outfield_inp)
# for item in docs:
# c += 1
# if c < 0:
# continue
# print c
# time_log(c)
# json_ = {outfield_out:[item]}
# if self.pipeline['in']['inp'] == 'json':
# json_ = parse_json(json_)
# elif self.pipeline['in']['inp'] == 'edges':
# json_ = parse_edges(json_)
# parser = Parser(self.pipeline['in']['inp'])
# for phase in self.phases:
# dic = self.pipeline[phase]
# if phase == 'trans':
# for key, value in dic.iteritems():
# if value:
# extractor = Extractor(key, parser.key)
# json_ = extractor.run(json_)
# if phase == 'out':
# for key, value in sorted(dic.iteritems()):
# if value:
# dumper = Dumper(key, self.pipeline['in']['inp'])
# dumper.save(json_)
# if int(c) % 1000 == 0 and c > 1000:
# time_log('Processed %d documents in stream mode!' % (c))
# elif self.pipeline['in']['inp'] == 'mongo':
# parser = Parser(self.pipeline['in']['inp'])
# ind_ = 0#2390
# while ind_ or (ind_ == 0):
# old_ind = ind_
# json_all, ind_, N = parser.read(ind_)
# if not(ind_):
# break
# outfield = settings['out']['json']['itemfield']
# if json_all:
# json_ = json_all
# for phase in self.phases:
# dic = self.pipeline[phase]
# if phase == 'trans':
# for key, value in dic.iteritems():
# if value:
# extractor = Extractor(key, parser.key)
# json_ = extractor.run(json_)
# if phase == 'out':
# for key, value in sorted(dic.iteritems()):
# if value:
# dumper = Dumper(key, parser.key)
# dumper.save(json_)
# if ind_:
# time_log('Processed %d documents in parallel. We are at index %d!' % (ind_ - old_ind, ind_))
# proc = int(ind_/float(N)*100)
# if proc % 10 == 0 and proc > 0:
# time_log('~'*50)
# time_log('We are at %d/%d documents processed -- %0.2f %%' % (ind_, N, proc))
# time_log('~'*50)
# parser = Parser(self.pipeline['in']['inp'])
# outfield = settings['out']['json']['itemfield']
# json_all = parser.read()
# if stream_flag:
# for item in json_all[outfield]:
# json_ = {outfield:[item]}
# for phase in self.phases:
# dic = self.pipeline[phase]
# if phase == 'trans':
# for key, value in dic.iteritems():
# if value:
# extractor = Extractor(key, parser.key)
# json_ = extractor.run(json_)
# if phase == 'out':
# for key, value in sorted(dic.iteritems()):
# if value:
# dumper = Dumper(key, parser.key)
# dumper.save(json_)
# parser = Parser(self.pipeline['in']['inp'])
# out_outfield = settings['out']['json']['itemfield']
# json_ = parser.read()
# for doc in tqdm(json_[out_outfield]):
# tmp = {out_outfield:[doc]}
# for phase in self.phases:
# dic = self.pipeline[phase]
# if phase == 'in':
# pass
# if phase == 'trans':
# for key, value in dic.iteritems():
# if value:
# extractor = Extractor(key, parser.key)
# tmp = extractor.run(tmp)
# if phase == 'out':
# for key, value in sorted(dic.iteritems()):
# if value:
# dumper = Dumper(key, parser.key)
# dumper.save(tmp)
def print_pipeline(self):
print('#'*30 + ' Pipeline Schedule' + '#'*30)
for phase in self.phases:
dic = self.pipeline[phase]
if phase == 'in':
if dic['source'] == 'delete':
print("Will delete all %s resource associated edges!" % settings['neo4j']['resource'])
break
if dic['source'] == 'file':
source = settings['load']['path']['file_path']
elif dic['source'] == 'mongo':
source = settings['load']['mongo']['file_path']
print('Will read from: %s' % source)
if phase == 'trans':
print('Will use the following transformation utilities:')
for key, value in dic.iteritems():
print ('- %s' % key)
if phase == 'out':
print('Will save the outcome as follows:')
for key, value in dic.iteritems():
if key == 'mongo_sentences':
out_p = '/'.join([settings[key]['uri'],settings[key]['db'],settings[key]['collection']])
print('%s : %s' % (key, out_p))
else:
print('%s : %s' % (key, settings['out'][key]['out_path']))
print('#'*30 + ' Pipeline Schedule ' + '#'*30)
|
|
#
# Copyright 2015 Hewlett Packard
# (c) Copyright 2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from futurist import periodics
import os
import threading
import time
from oslo_log import log
import ceilometer
from ceilometer import monasca_client as mon_client
from ceilometer import publisher
from ceilometer.publisher.monasca_data_filter import MonascaDataFilter
from monascaclient import exc
import traceback
# Have to use constants rather than conf to satisfy @periodicals
BATCH_POLLING_INTERVAL = 5
BATCH_RETRY_INTERVAL = 60
LOG = log.getLogger(__name__)
class MonascaPublisher(publisher.ConfigPublisherBase):
"""Publisher to publish samples to monasca using monasca-client.
Example URL to place in pipeline.yaml:
- monasca://http://192.168.10.4:8070/v2.0
"""
def __init__(self, conf, parsed_url):
super(MonascaPublisher, self).__init__(conf, parsed_url)
# list to hold metrics to be published in batch (behaves like queue)
self.metric_queue = []
self.time_of_last_batch_run = time.time()
self.mon_client = mon_client.Client(self.conf, parsed_url)
self.mon_filter = MonascaDataFilter(self.conf)
# add flush_batch function to periodic callables
periodic_callables = [
# The function to run + any automatically provided
# positional and keyword arguments to provide to it
# everytime it is activated.
(self.flush_batch, (), {}),
]
if self.conf.monasca.retry_on_failure:
# list to hold metrics to be re-tried (behaves like queue)
self.retry_queue = []
# list to store retry attempts for metrics in retry_queue
self.retry_counter = []
# add retry_batch function to periodic callables
periodic_callables.append((self.retry_batch, (), {}))
if self.conf.monasca.archive_on_failure:
archive_path = self.conf.monasca.archive_path
if not os.path.exists(archive_path):
archive_path = self.conf.find_file(archive_path)
self.archive_handler = publisher.get_publisher(
self.conf,
'file://' +
str(archive_path),
'ceilometer.sample.publisher')
# start periodic worker
self.periodic_worker = periodics.PeriodicWorker(periodic_callables)
self.periodic_thread = threading.Thread(
target=self.periodic_worker.start)
self.periodic_thread.daemon = True
self.periodic_thread.start()
def _publish_handler(self, func, metrics, batch=False):
"""Handles publishing and exceptions that arise."""
try:
metric_count = len(metrics)
if batch:
func(**{'jsonbody': metrics})
else:
func(**metrics[0])
LOG.info('Successfully published %d metric(s)' % metric_count)
except mon_client.MonascaServiceException:
# Assuming atomicity of create or failure - meaning
# either all succeed or all fail in a batch
LOG.error('Metric create failed for %(count)d metric(s) with'
' name(s) %(names)s ' %
({'count': len(metrics),
'names': ','.join([metric['name']
for metric in metrics])}))
if self.conf.monasca.retry_on_failure:
# retry payload in case of internal server error(500),
# service unavailable error(503),bad gateway (502) or
# Communication Error
# append failed metrics to retry_queue
LOG.debug('Adding metrics to retry queue.')
self.retry_queue.extend(metrics)
# initialize the retry_attempt for the each failed
# metric in retry_counter
self.retry_counter.extend(
[0 * i for i in range(metric_count)])
else:
if hasattr(self, 'archive_handler'):
self.archive_handler.publish_samples(None, metrics)
except Exception:
LOG.info(traceback.format_exc())
if hasattr(self, 'archive_handler'):
self.archive_handler.publish_samples(None, metrics)
def publish_samples(self, samples):
"""Main method called to publish samples."""
for sample in samples:
metric = self.mon_filter.process_sample_for_monasca(sample)
# In batch mode, push metric to queue,
# else publish the metric
if self.conf.monasca.batch_mode:
LOG.debug('Adding metric to queue.')
self.metric_queue.append(metric)
else:
LOG.info('Publishing metric with name %(name)s and'
' timestamp %(ts)s to endpoint.' %
({'name': metric['name'],
'ts': metric['timestamp']}))
self._publish_handler(self.mon_client.metrics_create, [metric])
def is_batch_ready(self):
"""Method to check if batch is ready to trigger."""
previous_time = self.time_of_last_batch_run
current_time = time.time()
elapsed_time = current_time - previous_time
if elapsed_time >= self.conf.monasca.batch_timeout and len(self.
metric_queue) > 0:
LOG.info('Batch timeout exceeded, triggering batch publish.')
return True
else:
if len(self.metric_queue) >= self.conf.monasca.batch_count:
LOG.info('Batch queue full, triggering batch publish.')
return True
else:
return False
@periodics.periodic(BATCH_POLLING_INTERVAL)
def flush_batch(self):
"""Method to flush the queued metrics."""
# print "flush batch... %s" % str(time.time())
if self.is_batch_ready():
# publish all metrics in queue at this point
batch_count = len(self.metric_queue)
LOG.info("batch is ready: batch_count %s" % str(batch_count))
self._publish_handler(self.mon_client.metrics_create,
self.metric_queue[:batch_count],
batch=True)
self.time_of_last_batch_run = time.time()
# slice queue to remove metrics that
# published with success or failed and got queued on
# retry queue
self.metric_queue = self.metric_queue[batch_count:]
def is_retry_ready(self):
"""Method to check if retry batch is ready to trigger."""
if len(self.retry_queue) > 0:
LOG.info('Retry queue has items, triggering retry.')
return True
else:
return False
@periodics.periodic(BATCH_RETRY_INTERVAL)
def retry_batch(self):
"""Method to retry the failed metrics."""
# print "retry batch...%s" % str(time.time())
if self.is_retry_ready():
retry_count = len(self.retry_queue)
# Iterate over the retry_queue to eliminate
# metrics that have maxed out their retry attempts
for ctr in range(retry_count):
if self.retry_counter[ctr] > self.conf.\
monasca.batch_max_retries:
if hasattr(self, 'archive_handler'):
self.archive_handler.publish_samples(
None,
[self.retry_queue[ctr]])
LOG.info('Removing metric %s from retry queue.'
' Metric retry maxed out retry attempts' %
self.retry_queue[ctr]['name'])
del self.retry_queue[ctr]
del self.retry_counter[ctr]
# Iterate over the retry_queue to retry the
# publish for each metric.
# If an exception occurs, the retry count for
# the failed metric is incremented.
# If the retry succeeds, remove the metric and
# the retry count from the retry_queue and retry_counter resp.
ctr = 0
while ctr < len(self.retry_queue):
try:
LOG.info('Retrying metric publish from retry queue.')
self.mon_client.metrics_create(**self.retry_queue[ctr])
# remove from retry queue if publish was success
LOG.info('Retrying metric %s successful,'
' removing metric from retry queue.' %
self.retry_queue[ctr]['name'])
del self.retry_queue[ctr]
del self.retry_counter[ctr]
except exc.ClientException:
LOG.error('Exception encountered in retry. '
'Batch will be retried in next attempt.')
# if retry failed, increment the retry counter
self.retry_counter[ctr] += 1
ctr += 1
def flush_to_file(self):
# TODO(persist maxed-out metrics to file)
pass
def publish_events(self, events):
"""Send an event message for publishing
:param events: events from pipeline after transformation
"""
raise ceilometer.NotImplementedError
|
|
#!/usr/bin/env python
from fabric.api import execute, local, require, settings, task
from fabric.state import env
from termcolor import colored
import app_config
# Other fabfiles
import analysis
import assets
import data
import flat
import issues
import render
import text
import utils
if app_config.DEPLOY_TO_SERVERS:
import servers
if app_config.DEPLOY_CRONTAB:
import cron_jobs
# Bootstrap can only be run once, then it's disabled
if app_config.PROJECT_SLUG == '$NEW_PROJECT_SLUG':
import bootstrap
"""
Base configuration
"""
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
env.settings = None
"""
Environments
Changing environment requires a full-stack test.
An environment points to both a server and an S3
bucket.
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
Changing branches requires deploying that branch to a host.
"""
@task
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
@task
def master():
"""
Work on development branch.
"""
env.branch = 'master'
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Running the app
"""
@task
def app(port='8000'):
"""
Serve app.py.
"""
if env.settings:
local("DEPLOYMENT_TARGET=%s bash -c 'gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app'" % (env.settings, port))
else:
local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app' % port)
@task
def public_app(port='8001'):
"""
Serve public_app.py.
"""
if env.settings:
local("DEPLOYMENT_TARGET=%s bash -c 'gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload public_app:wsgi_app'" % (env.settings, port))
else:
local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload public_app:wsgi_app' % port)
@task
def tests():
"""
Run Python unit tests.
"""
local('nosetests')
"""
Deployment
Changes to deployment requires a full-stack test. Deployment
has two primary functions: Pushing flat files to S3 and deploying
code to a remote server if required.
"""
@task
def update():
"""
Update data by running all scrapers
"""
execute('text.update')
execute('cron_jobs.scrape_facebook')
execute('cron_jobs.scrape_homepage')
execute('cron_jobs.scrape_seamus')
execute('cron_jobs.scrape_spreadsheet')
execute('analysis')
@task
def deploy(analyse=True, remote='origin'):
"""
Deploy the latest app to S3 and, if configured, to our servers.
"""
require('settings', provided_by=[production, staging])
if app_config.DEPLOY_TO_SERVERS:
require('branch', provided_by=[stable, master, branch])
if (app_config.DEPLOYMENT_TARGET == 'production' and env.branch != 'stable'):
utils.confirm(
colored("You are trying to deploy the '%s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env.branch, "red")
)
servers.checkout_latest(remote)
servers.fabcast('text.update')
servers.fabcast('assets.sync')
if app_config.DEPLOY_CRONTAB:
servers.install_crontab()
if app_config.DEPLOY_SERVICES:
servers.deploy_confs()
if analyse == True:
execute('analysis.analyse')
execute('render.render_all')
flat.deploy_folder(
app_config.S3_BUCKET['bucket_name'],
'www',
app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.DEFAULT_MAX_AGE
},
ignore=['www/assets/*']
)
flat.deploy_folder(
app_config.S3_BUCKET['bucket_name'],
'www/assets',
'%s/assets' % app_config.PROJECT_SLUG,
headers={
'Cache-Control': 'max-age=%i' % app_config.ASSETS_MAX_AGE
})
"""
Destruction
Changes to destruction require setup/deploy to a test host in order to test.
Destruction should remove all files related to the project from both a remote
host and S3.
"""
@task
def shiva_the_destroyer():
"""
Deletes the app from s3
"""
require('settings', provided_by=[production, staging])
utils.confirm(
colored("You are about to destroy everything deployed to %s for this project.\nDo you know what you're doing?')" % app_config.DEPLOYMENT_TARGET, "red")
)
with settings(warn_only=True):
flat.delete_folder(app_config.S3_BUCKET['bucket_name'], app_config.PROJECT_SLUG)
if app_config.DEPLOY_TO_SERVERS:
execute('servers.delete_project')
if app_config.DEPLOY_CRONTAB:
execute('servers.uninstall_crontab')
if app_config.DEPLOY_SERVICES:
execute('servers.nuke_confs')
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
from botocore.exceptions import ClientError
from .common import BaseTest
from c7n.resources.ebs import (
CopyInstanceTags, EncryptInstanceVolumes, CopySnapshot, Delete)
from c7n.executor import MainThreadExecutor
logging.basicConfig(level=logging.DEBUG)
class SnapshotAccessTest(BaseTest):
def test_snapshot_access(self):
# pre conditions, 2 snapshots one shared to a separate account, and one
# shared publicly. 2 non matching volumes, one not shared, one shared
# explicitly to its own account.
self.patch(CopySnapshot, 'executor_factory', MainThreadExecutor)
factory = self.replay_flight_data('test_ebs_cross_account')
p = self.load_policy({
'name': 'snap-copy',
'resource': 'ebs-snapshot',
'filters': ['cross-account'],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r['SnapshotId']: r['c7n:CrossAccountViolations']
for r in resources},
{'snap-7f9496cf': ['619193117841'],
'snap-af0eb71b': ['all']})
class SnapshotCopyTest(BaseTest):
def test_snapshot_copy(self):
self.patch(CopySnapshot, 'executor_factory', MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION='us-west-2')
factory = self.replay_flight_data('test_ebs_snapshot_copy')
p = self.load_policy({
'name': 'snap-copy',
'resource': 'ebs-snapshot',
'filters': [
{'tag:ASV': 'RoadKill'}],
'actions': [
{'type': 'copy',
'target_region': 'us-east-1',
'target_key': '82645407-2faa-4d93-be71-7d6a8d59a5fc'}]
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client('ec2')
tags = client.describe_tags(
Filters=[{'Name': 'resource-id',
'Values': [resources[0][
'c7n:CopiedSnapshot']]}])['Tags']
tags = {t['Key']: t['Value'] for t in tags}
self.assertEqual(tags['ASV'], 'RoadKill')
class SnapshotAmiSnapshotTest(BaseTest):
def test_snapshot_ami_snapshot_filter(self):
self.patch(CopySnapshot, 'executor_factory', MainThreadExecutor)
# DEFAULT_REGION needs to be set to west for recording
factory = self.replay_flight_data('test_ebs_ami_snapshot_filter')
#first case should return only resources that are ami snapshots
p = self.load_policy({
'name': 'ami-snap-filter',
'resource': 'ebs-snapshot',
'filters': [
{'type': 'skip-ami-snapshots',
'value': False}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 3)
#second case should return resources that are NOT ami snapshots
policy = self.load_policy({
'name': 'non-ami-snap-filter',
'resource': 'ebs-snapshot',
'filters': [
{'type': 'skip-ami-snapshots',
'value': True}],
}, session_factory=factory)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotTrimTest(BaseTest):
def test_snapshot_trim(self):
factory = self.replay_flight_data('test_ebs_snapshot_delete')
p = self.load_policy({
'name': 'snapshot-trim',
'resource': 'ebs-snapshot',
'filters': [
{'tag:InstanceId': 'not-null'}],
'actions': ['delete']},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class AttachedInstanceTest(BaseTest):
def test_ebs_instance_filter(self):
factory = self.replay_flight_data('test_ebs_instance_filter')
p = self.load_policy({
'name': 'attached-instance-test',
'resource': 'ebs',
'filters': [
{'type': 'instance',
'key': 'tag:Name',
'value': 'CompiledLambda'}]},
session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
class ResizeTest(BaseTest):
def test_resize_action(self):
factory = self.replay_flight_data('test_ebs_modifyable_action')
client = factory().client('ec2')
# Change a volume from 32 gb gp2 and 100 iops (sized based) to
# 64gb and 500 iops.
vol_id = 'vol-0073dcd216489ea1b'
p = self.load_policy({
'name': 'resizable',
'resource': 'ebs',
'filters': [
'modifyable', {'VolumeId': vol_id}],
'actions': [{
'type': 'modify',
'volume-type': 'io1',
'size-percent': 200,
'iops-percent': 500
}]},
session_factory=factory)
resources = p.run()
self.assertEqual(resources[0]['Iops'], 100)
self.assertEqual(resources[0]['Size'], 32)
vol = client.describe_volumes(VolumeIds=[vol_id])['Volumes'][0]
self.assertEqual(vol['Iops'], 500)
self.assertEqual(vol['Size'], 64)
def test_resize_filter(self):
# precondition, 6 volumes, 4 not modifyable.
factory = self.replay_flight_data('test_ebs_modifyable_filter')
output = self.capture_logging('custodian.filters', level=logging.DEBUG)
p = self.load_policy({
'name': 'resizable',
'resource': 'ebs',
'filters': ['modifyable']},
session_factory=factory)
resources = p.run()
self.assertEqual(
{r['VolumeId'] for r in resources},
set(('vol-0073dcd216489ea1b', 'vol-0e4cba7adc4764f79')))
# normalizing on str/unicode repr output between versions.. punt
if sys.version_info[0] > 2:
return
self.assertEqual(
output.getvalue().strip(),
("filtered 4 of 6 volumes due to [(u'instance-type', 2), "
"(u'vol-mutation', 1), (u'vol-type', 1)]"))
class CopyInstanceTagsTest(BaseTest):
def test_copy_instance_tags(self):
# More a functional/coverage test then a unit test.
self.patch(
CopyInstanceTags, 'executor_factory', MainThreadExecutor)
factory = self.replay_flight_data('test_ebs_copy_instance_tags')
volume_id = 'vol-2b047792'
results = factory().client('ec2').describe_tags(
Filters=[{'Name': 'resource-id', 'Values': [volume_id]}])['Tags']
tags = {t['Key']: t['Value'] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy({
'name': 'test-copy-instance-tags',
'resource': 'ebs',
'actions': [{
'type': 'copy-instance-tags',
'tags': ['Name']}]},
config={'region': 'us-west-2'},
session_factory=factory)
resources = policy.run()
results = factory().client('ec2').describe_tags(
Filters=[{'Name': 'resource-id', 'Values': [volume_id]}])['Tags']
tags = {t['Key']: t['Value'] for t in results}
self.assertEqual(tags['Name'], 'CompileLambda')
class VolumeSnapshotTest(BaseTest):
def test_volume_snapshot(self):
factory = self.replay_flight_data('test_ebs_snapshot')
policy = self.load_policy(
{
'name': 'test-ebs-snapshot',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-01adbb6a4f175941d'}],
'actions': ['snapshot'],
},
session_factory=factory,
)
resources = policy.run()
snapshot_data = factory().client('ec2').describe_snapshots(
Filters=[
{
'Name': 'volume-id',
'Values': ['vol-01adbb6a4f175941d'],
},
]
)
self.assertEqual(len(snapshot_data['Snapshots']), 1)
class VolumeDeleteTest(BaseTest):
def test_volume_delete_force(self):
self.patch(Delete, 'executor_factory', MainThreadExecutor)
factory = self.replay_flight_data('test_ebs_force_delete')
policy = self.load_policy({
'name': 'test-ebs',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-d0790258'}],
'actions': [
{'type': 'delete', 'force': True}]},
session_factory=factory)
resources = policy.run()
try:
results = factory().client('ec2').describe_volumes(
VolumeIds=[resources[0]['VolumeId']])
except ClientError as e:
self.assertEqual(
e.response['Error']['Code'], 'InvalidVolume.NotFound')
else:
self.fail("Volume still exists")
class EncryptExtantVolumesTest(BaseTest):
def test_encrypt_volumes(self):
self.patch(
EncryptInstanceVolumes, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_encrypt_volumes')
policy = self.load_policy({
'name': 'ebs-remediate-attached',
'resource': 'ebs',
'filters': [
{'Encrypted': False},
{'VolumeId': 'vol-0f53c81b92b4ecfce'}],
'actions': [
{'type': 'encrypt-instance-volumes',
'delay': 0.001,
'key': 'alias/encryptebs'}]},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
volumes = session_factory().client('ec2').describe_volumes(
Filters=[{
'Name':'attachment.instance-id',
'Values': [
r['Attachments'][0]['InstanceId']
]
}]
)
for v in volumes['Volumes']:
self.assertTrue(v['Attachments'][0]['DeleteOnTermination'])
self.assertTrue(v['Encrypted'])
if 'Tags' in v:
self.assertNotIn('maid-crypt-remediation', [i['Key'] for i in v['Tags']])
self.assertNotIn('maid-origin-volume', [i['Key'] for i in v['Tags']])
self.assertNotIn('maid-instance-device', [i['Key'] for i in v['Tags']])
class TestKmsAlias(BaseTest):
def test_ebs_kms_alias(self):
session_factory = self.replay_flight_data('test_ebs_aws_managed_kms_keys')
p = self.load_policy(
{'name': 'ebs-aws-managed-kms-keys-filters',
'resource': 'ebs',
'filters': [
{'type': 'kms-alias', 'key': 'AliasName',
'value': '^(alias/aws/)', 'op': 'regex'}]},
config={'region': 'us-west-2'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['VolumeId'], 'vol-14a3cd9d')
class EbsFaultToleranceTest(BaseTest):
def test_ebs_fault_tolerant(self):
session = self.replay_flight_data('test_ebs_fault_tolerant')
policy = self.load_policy({
'name': 'ebs-fault-tolerant',
'resource': 'ebs',
'filters': ['fault-tolerant']
}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['VolumeId'], 'vol-c5eaa459')
def test_ebs_non_fault_tolerant(self):
session = self.replay_flight_data('test_ebs_non_fault_tolerant')
policy = self.load_policy({
'name': 'ebs-non-fault-tolerant',
'resource': 'ebs',
'filters': [{
'type': 'fault-tolerant',
'tolerant': False}]
}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['VolumeId'], 'vol-abdb8d37')
class PiopsMetricsFilterTest(BaseTest):
def test_ebs_metrics_percent_filter(self):
session = self.replay_flight_data('test_ebs_metrics_percent_filter')
policy = self.load_policy({
'name': 'ebs-unused-piops',
'resource': 'ebs',
'filters': [{
'type': 'metrics',
'name': 'VolumeConsumedReadWriteOps',
'op': 'lt',
'value': 50,
'statistics': 'Maximum',
'days': 1,
'percent-attr': 'Iops'}]
}, session_factory=session)
resources = policy.run()
self.assertEqual(len(resources),1)
class HealthEventsFilterTest(BaseTest):
def test_ebs_health_events_filter(self):
session_factory = self.replay_flight_data(
'test_ebs_health_events_filter')
policy = self.load_policy({
'name': 'ebs-health-events-filter',
'resource': 'ebs',
'filters': [{
'type': 'health-event',
'types': ['AWS_EBS_VOLUME_LOST']}]
}, session_factory=session_factory)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
self.assertTrue(('c7n:HealthEvent' in r) and
('Description' in e for e in r['c7n:HealthEvent']))
|
|
"""
This module contains data types used by Scrapy which are not included in the
Python Standard Library.
This module must not depend on any module outside the Standard Library.
"""
import copy
import six
from collections import OrderedDict
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
dict.__init__(self, key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, dict.__repr__(self))
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = dict.__getitem__(self, key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
dict.__setitem__(self, key, [value])
def __copy__(self):
return self.__class__(dict.items(self))
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def get(self, key, default=None):
"Returns the default value if the requested data doesn't exist"
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"Returns an empty list if the requested data doesn't exist"
try:
return dict.__getitem__(self, key)
except KeyError:
return []
def setlist(self, key, list_):
dict.__setitem__(self, key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def setlistdefault(self, key, default_list=()):
if key not in self:
self.setlist(key, default_list)
return self.getlist(key)
def appendlist(self, key, value):
"Appends an item to the internal list associated with key"
self.setlistdefault(key, [])
dict.__setitem__(self, key, self.getlist(key) + [value])
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def lists(self):
"Returns a list of (key, list) pairs."
return dict.items(self)
def values(self):
"Returns a list of the last value on every key list."
return [self[key] for key in self.keys()]
def copy(self):
"Returns a copy of this object."
return self.__deepcopy__()
def update(self, *args, **kwargs):
"update() extends rather than replaces existing key lists. Also accepts keyword args."
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key, []).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key, []).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in six.iteritems(kwargs):
self.setlistdefault(key, []).append(value)
class SiteNode(object):
"""Class to represent a site node (page, image or any other file)"""
def __init__(self, url):
self.url = url
self.itemnames = []
self.children = []
self.parent = None
def add_child(self, node):
self.children.append(node)
node.parent = self
def to_string(self, level=0):
s = "%s%s\n" % (' '*level, self.url)
if self.itemnames:
for n in self.itemnames:
s += "%sScraped: %s\n" % (' '*(level+1), n)
for node in self.children:
s += node.to_string(level+1)
return s
class CaselessDict(dict):
__slots__ = ()
def __init__(self, seq=None):
super(CaselessDict, self).__init__()
if seq:
self.update(seq)
def __getitem__(self, key):
return dict.__getitem__(self, self.normkey(key))
def __setitem__(self, key, value):
dict.__setitem__(self, self.normkey(key), self.normvalue(value))
def __delitem__(self, key):
dict.__delitem__(self, self.normkey(key))
def __contains__(self, key):
return dict.__contains__(self, self.normkey(key))
has_key = __contains__
def __copy__(self):
return self.__class__(self)
copy = __copy__
def normkey(self, key):
"""Method to normalize dictionary key access"""
return key.lower()
def normvalue(self, value):
"""Method to normalize values prior to be setted"""
return value
def get(self, key, def_val=None):
return dict.get(self, self.normkey(key), self.normvalue(def_val))
def setdefault(self, key, def_val=None):
return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))
def update(self, seq):
seq = seq.items() if isinstance(seq, dict) else seq
iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
super(CaselessDict, self).update(iseq)
@classmethod
def fromkeys(cls, keys, value=None):
return cls((k, value) for k in keys)
def pop(self, key, *args):
return dict.pop(self, self.normkey(key), *args)
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def items(self):
item_list = []
for dict_ in self.dicts:
item_list.extend(dict_.items())
return item_list
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
class LocalCache(OrderedDict):
"""Dictionary with a finite number of keys.
Older items expires first.
"""
def __init__(self, limit=None):
super(LocalCache, self).__init__()
self.limit = limit
def __setitem__(self, key, value):
while len(self) >= self.limit:
self.popitem(last=False)
super(LocalCache, self).__setitem__(key, value)
|
|
"""
Helper functions/classes for making HTML report and scaling summary output.
"""
from __future__ import annotations
import json
import logging
from jinja2 import ChoiceLoader, Environment, PackageLoader
from orderedset import OrderedSet
from cctbx import uctbx
from dxtbx import flumpy
from scitbx.array_family import flex
from dials.algorithms.scaling.error_model.error_model import (
calc_deltahl,
calc_sigmaprime,
)
from dials.algorithms.scaling.error_model.error_model_target import (
calculate_regression_x_y,
)
from dials.algorithms.scaling.model.model import (
make_combined_plots,
plot_scaling_models,
)
from dials.algorithms.scaling.plots import (
error_model_variance_plot,
error_regression_plot,
normal_probability_plot,
plot_outliers,
)
from dials.algorithms.scaling.scale_and_filter import make_scaling_filtering_plots
from dials.algorithms.scaling.scaling_library import (
DialsMergingStatisticsError,
merging_stats_from_scaled_array,
)
from dials.report.analysis import (
make_merging_statistics_summary,
reflection_tables_to_batch_dependent_properties,
table_1_summary,
)
from dials.report.plots import (
AnomalousPlotter,
IntensityStatisticsPlots,
ResolutionPlotsAndStats,
i_over_sig_i_vs_batch_plot,
i_over_sig_i_vs_i_plot,
make_image_range_table,
scale_rmerge_vs_batch_plot,
)
from dials.util import tabulate
from dials.util.batch_handling import batch_manager, get_image_ranges
from dials.util.exclude_images import get_valid_image_ranges
from dials.util.resolution_analysis import resolution_cc_half
logger = logging.getLogger("dials")
def assert_is_json_serialisable(thing, name, path=None):
path = path or []
if isinstance(thing, list):
for n, element in enumerate(thing):
assert_is_json_serialisable(element, name, path + [n])
elif isinstance(thing, dict):
for key, value in thing.items():
assert_is_json_serialisable(value, name, path + [repr(key)])
else:
try:
json.dumps(thing)
except TypeError as e:
raise TypeError(
"JSON serialisation error '%s' for value '%s' type %s in %s%s"
% (
e,
str(thing),
type(thing),
name,
"".join("[%s]" % step for step in path),
)
)
class ScalingSummaryContextManager:
def __init__(self, script):
self.script = script
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
print_scaling_summary(self.script)
def print_scaling_summary(script):
"""Log summary information after scaling."""
logger.info(print_scaling_model_error_summary(script.experiments))
valid_ranges = get_valid_image_ranges(script.experiments)
image_ranges = get_image_ranges(script.experiments)
msg = []
for (img, valid, refl) in zip(image_ranges, valid_ranges, script.reflections):
if valid:
if len(valid) > 1 or valid[0][0] != img[0] or valid[-1][1] != img[1]:
msg.append(
"Excluded images for experiment id: %s, image range: %s, limited range: %s"
% (
refl.experiment_identifiers().keys()[0],
list(img),
list(valid),
)
)
if msg:
msg = ["Summary of image ranges removed:"] + msg
logger.info("\n".join(msg))
# report on partiality of dataset
partials = flex.double()
for r in script.reflections:
if "partiality" in r:
partials.extend(r["partiality"])
not_full_sel = partials < 0.99
not_zero_sel = partials > 0.01
gt_half = partials > 0.5
lt_half = partials < 0.5
partial_gt_half_sel = not_full_sel & gt_half
partial_lt_half_sel = not_zero_sel & lt_half
logger.info("Summary of dataset partialities")
header = ["Partiality (p)", "n_refl"]
rows = [
["all reflections", str(partials.size())],
["p > 0.99", str(not_full_sel.count(False))],
["0.5 < p < 0.99", str(partial_gt_half_sel.count(True))],
["0.01 < p < 0.5", str(partial_lt_half_sel.count(True))],
["p < 0.01", str(not_zero_sel.count(False))],
]
logger.info(tabulate(rows, header))
logger.info(
"""
Reflections below a partiality_cutoff of %s are not considered for any
part of the scaling analysis or for the reporting of merging statistics.
Additionally, if applicable, only reflections with a min_partiality > %s
were considered for use when refining the scaling model.
""",
script.params.cut_data.partiality_cutoff,
script.params.reflection_selection.min_partiality,
)
stats = script.merging_statistics_result
if stats:
anom_stats, cut_stats, cut_anom_stats = (None, None, None)
if not script.scaled_miller_array.space_group().is_centric():
anom_stats = script.anom_merging_statistics_result
logger.info(make_merging_statistics_summary(stats))
try:
d_min = resolution_cc_half(stats, limit=0.3).d_min
except RuntimeError as e:
logger.debug(f"Resolution fit failed: {e}")
else:
max_current_res = stats.bins[-1].d_min
if d_min and d_min - max_current_res > 0.005:
logger.info(
"Resolution limit suggested from CC"
+ "\u00BD"
+ " fit (limit CC"
+ "\u00BD"
+ "=0.3): %.2f",
d_min,
)
try:
cut_stats, cut_anom_stats = merging_stats_from_scaled_array(
script.scaled_miller_array.resolution_filter(d_min=d_min),
script.params.output.merging.nbins,
script.params.output.use_internal_variance,
)
except DialsMergingStatisticsError:
pass
else:
if script.scaled_miller_array.space_group().is_centric():
cut_anom_stats = None
logger.info(table_1_summary(stats, anom_stats, cut_stats, cut_anom_stats))
class ScalingHTMLContextManager:
def __init__(self, script):
self.script = script
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
_make_scaling_html(self.script)
def _make_scaling_html(scaling_script):
"""Collect data from the individual observers and write the html."""
html_file = scaling_script.params.output.html
json_file = scaling_script.params.output.json
if not (html_file or json_file):
return
data = {}
data.update(
make_scaling_model_plots(scaling_script.experiments, scaling_script.reflections)
)
data.update(
make_outlier_plots(scaling_script.reflections, scaling_script.experiments)
)
data.update(
make_error_model_plots(scaling_script.params, scaling_script.experiments)
)
data.update(make_merging_stats_plots(scaling_script))
data.update(make_filtering_plots(scaling_script))
if html_file:
logger.info("Writing html report to %s", html_file)
loader = ChoiceLoader(
[
PackageLoader("dials", "templates"),
PackageLoader("dials", "static", encoding="utf-8"),
]
)
env = Environment(loader=loader)
template = env.get_template("scaling_report.html")
assert_is_json_serialisable(data, "self.data")
html = template.render(
page_title="DIALS scaling report",
scaling_model_graphs=data["scaling_model"],
scaling_tables=data["scaling_tables"],
error_model_summary=data["error_model_summary"],
resolution_plots=data["resolution_plots"],
scaling_outlier_graphs=data["outlier_plots"],
error_model_plots=data["error_model_plots"],
anom_plots=data["anom_plots"],
batch_plots=data["batch_plots"],
image_range_tables=data["image_range_tables"],
misc_plots=data["misc_plots"],
filter_plots=data["filter_plots"],
)
with open(html_file, "wb") as f:
f.write(html.encode("utf-8", "xmlcharrefreplace"))
if json_file:
logger.info("Writing html report data to %s", json_file)
with open(json_file, "w") as outfile:
json.dump(data, outfile)
def make_scaling_model_plots(experiments, reflection_tables):
"""Collect scaling model plots for html report."""
data = {i: e.scaling_model for i, e in enumerate(experiments)}
d = {}
combined_plots = make_combined_plots(data)
if combined_plots:
d.update(combined_plots)
for key in sorted(data.keys()):
scaling_model_plots = plot_scaling_models(data[key], reflection_tables[key])
for plot in scaling_model_plots.values():
plot["layout"]["title"] += f" (dataset {key})"
for name, plot in scaling_model_plots.items():
d[name + "_" + str(key)] = plot
graphs = {"scaling_model": d}
return graphs
def print_scaling_model_error_summary(experiments):
"""Get a summary of the error distribution of the models."""
models = [e.scaling_model.to_dict() for e in experiments]
first_model = models[0]
component = first_model["configuration_parameters"]["corrections"][0]
msg = ""
if "est_standard_devs" in first_model[component]:
p_sigmas = flex.double()
for model in models:
for component in model["configuration_parameters"]["corrections"]:
if "est_standard_devs" in model[component]:
params = flex.double(model[component]["parameters"])
sigmas = flex.double(model[component]["est_standard_devs"])
null_value = flex.double(
len(params), model[component]["null_parameter_value"]
)
p_sigmas.extend(flex.abs(params - null_value) / sigmas)
log_p_sigmas = flex.log(p_sigmas)
frac_high_uncertainty = (log_p_sigmas < 0.69315).count(True) / len(log_p_sigmas)
if frac_high_uncertainty > 0.5:
msg = (
"Warning: Over half ({:.2f}%) of model parameters have significant\n"
"uncertainty (sigma/abs(parameter) > 0.5), which could indicate a\n"
"poorly-determined scaling problem or overparameterisation.\n"
).format(frac_high_uncertainty * 100)
else:
msg = (
"{:.2f}% of model parameters have significant uncertainty\n"
"(sigma/abs(parameter) > 0.5)\n"
).format(frac_high_uncertainty * 100)
return msg
def make_outlier_plots(reflection_tables, experiments):
"""Make outlier plots for the HTML report."""
data = {}
for j, (table, expt) in enumerate(zip(reflection_tables, experiments)):
outliers = table.get_flags(table.flags.outlier_in_scaling)
x, y, z = table["xyzobs.px.value"].select(outliers).parts()
if expt.scan:
zrange = [
i / expt.scan.get_oscillation()[1]
for i in expt.scan.get_oscillation_range()
]
else:
zrange = [0, 0]
data[j] = {
"x": list(x),
"y": list(y),
"z": list(z),
"image_size": expt.detector[0].get_image_size(),
"z_range": zrange,
}
d = {}
for key in sorted(data):
outlier_plots = plot_outliers(data[key])
for plot in outlier_plots.values():
if plot: # may be null if no outliers
plot["layout"]["title"] += f" (dataset {key})"
d["outlier_plot_" + str(key)] = outlier_plots["outlier_xy_positions"]
d["outlier_plot_z" + str(key)] = outlier_plots["outliers_vs_z"]
graphs = {"outlier_plots": d}
return graphs
def make_error_model_plots(params, experiments):
"""Generate normal probability plot data."""
d = {"error_model_plots": {}, "error_model_summary": "No error model applied"}
error_model_data = [] # a list of dicts of error model data
if experiments[0].scaling_model.error_model:
error_models = [e.scaling_model.error_model for e in experiments]
unique_error_models = OrderedSet(error_models)
if len(unique_error_models) == 1:
d["error_model_summary"] = str(error_models[0])
else:
d["error_model_summary"] = ""
for i, e in enumerate(unique_error_models):
indices = [str(j + 1) for j, x in enumerate(error_models) if e is x]
d["error_model_summary"] += (
f"\nError model {i+1}, applied to sweeps {', '.join(indices)}:"
+ str(e)
)
for em in unique_error_models:
if em.filtered_Ih_table:
data_i = {}
table = em.filtered_Ih_table
data_i["intensity"] = table.intensities
sigmaprime = calc_sigmaprime(em.parameters, table)
data_i["delta_hl"] = calc_deltahl(table, table.calc_nh(), sigmaprime)
data_i["inv_scale"] = table.inverse_scale_factors
data_i["sigma"] = sigmaprime * data_i["inv_scale"]
data_i["binning_info"] = em.binner.binning_info
em.clear_Ih_table()
if params.weighting.error_model.basic.minimisation == "regression":
x, y = calculate_regression_x_y(em.filtered_Ih_table)
data_i["regression_x"] = x
data_i["regression_y"] = y
data_i["model_a"] = em.parameters[0]
data_i["model_b"] = em.parameters[1]
error_model_data.append(data_i)
if error_model_data:
for i, emd in enumerate(error_model_data):
d["error_model_plots"].update(normal_probability_plot(emd, label=i + 1))
d["error_model_plots"].update(
i_over_sig_i_vs_i_plot(
flumpy.from_numpy(emd["intensity"]),
flumpy.from_numpy(emd["sigma"]),
label=i + 1,
)
)
d["error_model_plots"].update(error_model_variance_plot(emd, label=i + 1))
if "regression_x" in emd:
d["error_model_plots"].update(error_regression_plot(emd, label=i + 1))
return d
def make_filtering_plots(script):
"""Make filtering plots for HTML report"""
if script.filtering_results:
data = {
"merging_stats": script.filtering_results.get_merging_stats(),
"initial_expids_and_image_ranges": script.filtering_results.initial_expids_and_image_ranges,
"cycle_results": script.filtering_results.get_cycle_results(),
"expids_and_image_ranges": script.filtering_results.expids_and_image_ranges,
"mode": script.params.filtering.deltacchalf.mode,
}
d = make_scaling_filtering_plots(data)
return {"filter_plots": d}
return {"filter_plots": {}}
def make_merging_stats_plots(script):
"""Make merging stats plots for HTML report"""
d = {
"scaling_tables": ([], []),
"resolution_plots": {},
"batch_plots": {},
"misc_plots": {},
"anom_plots": {},
"image_range_tables": [],
}
(
batches,
rvb,
isigivb,
svb,
batch_data,
) = reflection_tables_to_batch_dependent_properties( # pylint: disable=unbalanced-tuple-unpacking
script.reflections,
script.experiments,
script.scaled_miller_array,
)
bm = batch_manager(batches, batch_data)
image_range_tables = make_image_range_table(script.experiments, bm)
if script.merging_statistics_result:
stats = script.merging_statistics_result
anom_stats = script.anom_merging_statistics_result
is_centric = script.scaled_miller_array.space_group().is_centric()
# Now calculate batch data
plotter = ResolutionPlotsAndStats(stats, anom_stats, is_centric)
d["resolution_plots"].update(plotter.make_all_plots())
d["scaling_tables"] = plotter.statistics_tables()
d["batch_plots"].update(scale_rmerge_vs_batch_plot(bm, rvb, svb))
d["batch_plots"].update(i_over_sig_i_vs_batch_plot(bm, isigivb))
plotter = IntensityStatisticsPlots(
script.scaled_miller_array, run_xtriage_analysis=False
)
d["resolution_plots"].update(plotter.generate_resolution_dependent_plots())
if d["resolution_plots"]["cc_one_half"]["data"][2]:
cc_anom = d["resolution_plots"]["cc_one_half"]["data"][2]["y"]
significance = d["resolution_plots"]["cc_one_half"]["data"][3]["y"]
sig = flex.double(cc_anom) > flex.double(significance)
max_anom = 0
for i, v in enumerate(sig):
if v:
max_anom = i
else:
break
d_min = uctbx.d_star_sq_as_d(plotter.binner.limits())[max_anom + 1]
else:
d_min = 0.0
d["misc_plots"].update(plotter.generate_miscellanous_plots())
intensities_anom = script.scaled_miller_array.as_anomalous_array()
intensities_anom = intensities_anom.map_to_asu().customized_copy(
info=script.scaled_miller_array.info()
)
anom_plotter = AnomalousPlotter(intensities_anom, strong_cutoff=d_min)
d["anom_plots"].update(anom_plotter.make_plots())
d["image_range_tables"] = [image_range_tables]
return d
|
|
#!/usr/bin/env python
from __future__ import print_function
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vmodl, vim
from datetime import timedelta, datetime
import argparse
import atexit
import getpass
# FIX SSL ISSUES WITH PYVMOMI AND PYTHON 2.7.9
import ssl
import requests
context = None
# Disabling urllib3 ssl warnings
requests.packages.urllib3.disable_warnings()
# Disabling SSL certificate verification
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
# FIX SSL ISSUES WITH PYVMOMI AND PYTHON 2.7.9
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-n', '--name', required=True, action='store', help='On eor more Virtual Machines to report on')
parser.add_argument('-c', '--cert_check_skip', required=False, action='store_true', help='skip ssl certificate check')
parser.add_argument('-i', '--interval', type=int, default=15, action='store',
help='Interval to average the vSphere stats over')
args = parser.parse_args()
return args
def BuildQuery(content, vchtime, counterId, instance, vm, interval):
perfManager = content.perfManager
metricId = vim.PerformanceManager.MetricId(counterId=counterId, instance=instance)
startTime = vchtime - timedelta(minutes=(interval + 1))
endTime = vchtime - timedelta(minutes=1)
query = vim.PerformanceManager.QuerySpec(intervalId=20, entity=vm, metricId=[metricId], startTime=startTime,
endTime=endTime)
perfResults = perfManager.QueryPerf(querySpec=[query])
if perfResults:
return perfResults
else:
print('ERROR: Performance results empty. TIP: Check time drift on source and vCenter server')
print('Troubleshooting info:')
print('vCenter/host date and time: {}'.format(vchtime))
print('Start perf counter time : {}'.format(startTime))
print('End perf counter time : {}'.format(endTime))
print(query)
exit()
def PrintVmInfo(vm, content, vchtime, interval, perf_dict, ):
statInt = interval * 3 # There are 3 20s samples in each minute
summary = vm.summary
disk_list = []
network_list = []
# Convert limit and reservation values from -1 to None
if vm.resourceConfig.cpuAllocation.limit == -1:
vmcpulimit = "None"
else:
vmcpulimit = "{} Mhz".format(vm.resourceConfig.cpuAllocation.limit)
if vm.resourceConfig.memoryAllocation.limit == -1:
vmmemlimit = "None"
else:
vmmemlimit = "{} MB".format(vm.resourceConfig.cpuAllocation.limit)
if vm.resourceConfig.cpuAllocation.reservation == 0:
vmcpures = "None"
else:
vmcpures = "{} Mhz".format(vm.resourceConfig.cpuAllocation.reservation)
if vm.resourceConfig.memoryAllocation.reservation == 0:
vmmemres = "None"
else:
vmmemres = "{} MB".format(vm.resourceConfig.memoryAllocation.reservation)
vm_hardware = vm.config.hardware
for each_vm_hardware in vm_hardware.device:
if (each_vm_hardware.key >= 2000) and (each_vm_hardware.key < 3000):
disk_list.append('{} | {:.1f}GB | Thin: {} | {}'.format(each_vm_hardware.deviceInfo.label,
each_vm_hardware.capacityInKB/1024/1024,
each_vm_hardware.backing.thinProvisioned,
each_vm_hardware.backing.fileName))
elif (each_vm_hardware.key >= 4000) and (each_vm_hardware.key < 5000):
network_list.append('{} | {} | {}'.format(each_vm_hardware.deviceInfo.label,
each_vm_hardware.deviceInfo.summary,
each_vm_hardware.macAddress))
#CPU Ready Average
statCpuReady = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'cpu.ready.summation')), "", vm, interval)
cpuReady = (float(sum(statCpuReady[0].value[0].value)) / statInt)
#CPU Usage Average % - NOTE: values are type LONG so needs divided by 100 for percentage
statCpuUsage = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'cpu.usage.average')), "", vm, interval)
cpuUsage = ((float(sum(statCpuUsage[0].value[0].value)) / statInt) / 100)
#Memory Active Average MB
statMemoryActive = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.active.average')), "", vm, interval)
memoryActive = (float(sum(statMemoryActive[0].value[0].value) / 1024) / statInt)
#Memory Shared
statMemoryShared = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.shared.average')), "", vm, interval)
memoryShared = (float(sum(statMemoryShared[0].value[0].value) / 1024) / statInt)
#Memory Balloon
statMemoryBalloon = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.vmmemctl.average')), "", vm, interval)
memoryBalloon = (float(sum(statMemoryBalloon[0].value[0].value) / 1024) / statInt)
#Memory Swapped
statMemorySwapped = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.swapped.average')), "", vm, interval)
memorySwapped = (float(sum(statMemorySwapped[0].value[0].value) / 1024) / statInt)
#Datastore Average IO
statDatastoreIoRead = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.numberReadAveraged.average')),
"*", vm, interval)
DatastoreIoRead = (float(sum(statDatastoreIoRead[0].value[0].value)) / statInt)
statDatastoreIoWrite = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.numberWriteAveraged.average')),
"*", vm, interval)
DatastoreIoWrite = (float(sum(statDatastoreIoWrite[0].value[0].value)) / statInt)
#Datastore Average Latency
statDatastoreLatRead = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.totalReadLatency.average')),
"*", vm, interval)
DatastoreLatRead = (float(sum(statDatastoreLatRead[0].value[0].value)) / statInt)
statDatastoreLatWrite = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.totalWriteLatency.average')),
"*", vm, interval)
DatastoreLatWrite = (float(sum(statDatastoreLatWrite[0].value[0].value)) / statInt)
#Network usage (Tx/Rx)
statNetworkTx = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'net.transmitted.average')), "", vm, interval)
networkTx = (float(sum(statNetworkTx[0].value[0].value) * 8 / 1024) / statInt)
statNetworkRx = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'net.received.average')), "", vm, interval)
networkRx = (float(sum(statNetworkRx[0].value[0].value) * 8 / 1024) / statInt)
print('\nNOTE: Any VM statistics are averages of the last {} minutes\n'.format(statInt / 3))
print('Server Name :', summary.config.name)
print('Description :', summary.config.annotation)
print('Guest :', summary.config.guestFullName)
if vm.rootSnapshot:
print('Snapshot Status : Snapshots present')
else:
print('Snapshot Status : No Snapshots')
print('VM .vmx Path :', summary.config.vmPathName)
try:
print('Virtual Disks :', disk_list[0])
if len(disk_list) > 1:
disk_list.pop(0)
for each_disk in disk_list:
print(' ', each_disk)
except IndexError:
pass
print('Virtual NIC(s) :', network_list[0])
if len(network_list) > 1:
network_list.pop(0)
for each_vnic in network_list:
print(' ', each_vnic)
print('Number of vCPUs :', summary.config.numCpu)
print('CPU Ready : Average {:.1f} %, Maximum {:.1f} %'.format((cpuReady / 20000 * 100),
((float(max(
statCpuReady[0].value[
0].value)) / 20000 * 100))))
print('CPU (%) : {:.0f} %'.format(cpuUsage))
print('Memory : {} MB ({:.1f} GB)'.format(summary.config.memorySizeMB, (float(summary.config.memorySizeMB) / 1024)))
print('Memory Shared : {:.0f} %, {:.0f} MB'.format(
((memoryShared / summary.config.memorySizeMB) * 100), memoryShared))
print('Memory Balloon : {:.0f} %, {:.0f} MB'.format(
((memoryBalloon / summary.config.memorySizeMB) * 100), memoryBalloon))
print('Memory Swapped : {:.0f} %, {:.0f} MB'.format(
((memorySwapped / summary.config.memorySizeMB) * 100), memorySwapped))
print('Memory Active : {:.0f} %, {:.0f} MB'.format(
((memoryActive / summary.config.memorySizeMB) * 100), memoryActive))
print('Datastore Average IO : Read: {:.0f} IOPS, Write: {:.0f} IOPS'.format(DatastoreIoRead,
DatastoreIoWrite))
print('Datastore Average Latency : Read: {:.0f} ms, Write: {:.0f} ms'.format(DatastoreLatRead,
DatastoreLatWrite))
print('Overall Network Usage : Transmitted {:.3f} Mbps, Received {:.3f} Mbps'.format(networkTx, networkRx))
def StatCheck(perf_dict, counter_name):
counter_key = perf_dict[counter_name]
return counter_key
def GetProperties(content, viewType, props, specType):
# Build a view and get basic properties for all Virtual Machines
objView = content.viewManager.CreateContainerView(content.rootFolder, viewType, True)
tSpec = vim.PropertyCollector.TraversalSpec(name='tSpecName', path='view', skip=False, type=vim.view.ContainerView)
pSpec = vim.PropertyCollector.PropertySpec(all=False, pathSet=props, type=specType)
oSpec = vim.PropertyCollector.ObjectSpec(obj=objView, selectSet=[tSpec], skip=False)
pfSpec = vim.PropertyCollector.FilterSpec(objectSet=[oSpec], propSet=[pSpec], reportMissingObjectsInResults=False)
retOptions = vim.PropertyCollector.RetrieveOptions()
totalProps = []
retProps = content.propertyCollector.RetrievePropertiesEx(specSet=[pfSpec], options=retOptions)
totalProps += retProps.objects
while retProps.token:
retProps = content.propertyCollector.ContinueRetrievePropertiesEx(token=retProps.token)
totalProps += retProps.objects
objView.Destroy()
# Turn the output in retProps into a usable dictionary of values
gpOutput = []
for eachProp in totalProps:
propDic = {}
for prop in eachProp.propSet:
propDic[prop.name] = prop.val
propDic['moref'] = eachProp.obj
gpOutput.append(propDic)
return gpOutput
def main():
args = GetArgs()
try:
vmnames = args.name
si = None
if args.password:
password = args.password
else:
password = getpass.getpass(prompt="Enter password for host {} and user {}: ".format(args.host, args.user))
try:
si = SmartConnect(host=args.host, user=args.user, pwd=password, port=int(args.port),sslContext=context)
except IOError as e:
pass
if not si:
print('Could not connect to the specified host using specified username and password')
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Get vCenter date and time for use as baseline when querying for counters
vchtime = si.CurrentTime()
# Get all the performance counters
perf_dict = {}
perfList = content.perfManager.perfCounter
for counter in perfList:
counter_full = "{}.{}.{}".format(counter.groupInfo.key, counter.nameInfo.key, counter.rollupType)
perf_dict[counter_full] = counter.key
retProps = GetProperties(content, [vim.VirtualMachine], ['name', 'runtime.powerState'], vim.VirtualMachine)
#Find VM supplied as arg and use Managed Object Reference (moref) for the PrintVmInfo
for vm in retProps:
if (vm['name'] in vmnames) and (vm['runtime.powerState'] == "poweredOn"):
PrintVmInfo(vm['moref'], content, vchtime, args.interval, perf_dict)
elif vm['name'] in vmnames:
print('ERROR: Problem connecting to Virtual Machine. {} is likely powered off or suspended'.format(vm['name']))
except vmodl.MethodFault as e:
print('Caught vmodl fault : ' + e.msg)
return -1
except Exception as e:
print('Caught exception : ' + str(e))
return -1
return 0
# Start program
if __name__ == "__main__":
main()
|
|
# python imports
import locale
import math
# django imports
from django.conf import settings
from django.core.cache import cache
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils import simplejson
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
# lfs imports
import lfs.catalog.utils
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.cart.views import add_to_cart
from lfs.catalog.models import Category, Property
from lfs.catalog.models import File
from lfs.catalog.models import Product
from lfs.catalog.models import ProductPropertyValue
from lfs.catalog.models import PropertyOption
from lfs.catalog.settings import CONTENT_PRODUCTS
from lfs.catalog.settings import PROPERTY_VALUE_TYPE_DEFAULT
from lfs.catalog.settings import SELECT
from lfs.core.utils import LazyEncoder, lfs_pagination
from lfs.core.templatetags import lfs_tags
from lfs.utils import misc as lfs_utils
def file(request, language=None, id=None):
"""Delivers files to the browser.
"""
file = lfs_get_object_or_404(File, pk=id)
response = HttpResponse(file.file, mimetype='application/binary')
response['Content-Disposition'] = 'attachment; filename=%s' % file.title
return response
def select_variant(request):
"""This is called via an ajax call if the combination of properties are
changed.
"""
variant_id = request.POST.get("variant_id")
variant = Product.objects.get(pk=variant_id)
msg = _(u"The product has been changed according to your selection.")
result = simplejson.dumps({
"product": product_inline(request, variant),
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result)
def calculate_packing(request, id, quantity=None, with_properties=False, as_string=False, template_name="lfs/catalog/packing_result.html"):
"""Calculates the actual amount of pieces to buy on base on packing
information.
"""
product = Product.objects.get(pk=id)
if quantity is None:
try:
quantity = request.POST.get("quantity")
if isinstance(quantity, unicode):
# atof() on unicode string fails in some environments, like Czech
quantity = quantity.encode("utf-8")
quantity = locale.atof(quantity)
except (AttributeError, TypeError, ValueError):
quantity = 1
packing_amount, packing_unit = product.get_packing_info()
try:
packs = math.ceil(quantity / packing_amount)
real_quantity = packs * packing_amount
price = product.get_price_gross(request, with_properties=with_properties)
price += _calculate_property_price(request)
price *= real_quantity
except TypeError:
packs = 0.0
real_quantity = 0.0
price = 0.0
html = render_to_string(template_name, RequestContext(request, {
"price": price,
"product": product,
"packs": int(packs),
"real_quantity": real_quantity,
"unit": packing_unit,
}))
if as_string:
return html
result = simplejson.dumps({
"html": html,
}, cls=LazyEncoder)
return HttpResponse(result)
def calculate_price(request, id):
"""Calculates the price of the product on base of choosen properties after
a customer has selected a property on product view.
"""
product = Product.objects.get(pk=id)
property_price = _calculate_property_price(request)
if product.for_sale:
for_sale_standard_price = product.get_standard_price(request, with_properties=False)
for_sale_standard_price += property_price
for_sale_price = product.get_for_sale_price(request, with_properties=False)
for_sale_price += property_price
else:
for_sale_standard_price = 0
for_sale_price = 0
price = product.get_price(request, with_properties=False)
price += property_price
result = simplejson.dumps({
"price": lfs_tags.currency(price, request),
"for-sale-standard-price": lfs_tags.currency(for_sale_standard_price),
"for-sale-price": lfs_tags.currency(for_sale_price),
"packing-result": calculate_packing(request, id, as_string=True),
"message": _("Price has been changed according to your selection."),
}, cls=LazyEncoder)
return HttpResponse(result)
def select_variant_from_properties(request):
"""
This is called via an ajax call if the combination of properties are
changed.
"""
product_id = request.POST.get("product_id")
try:
variant = Product.objects.get(pk=product_id)
except Product.DoesNotExist:
return HttpResponse("")
else:
product = variant.parent
options = lfs_utils.parse_properties(request)
variant = product.get_variant(options)
if variant is None:
msg = _(u"The choosen combination of properties is not deliverable.")
variant = product.get_default_variant()
else:
msg = _(u"The product has been changed according to your selection.")
result = simplejson.dumps({
"product": product_inline(request, variant),
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result)
def set_filter(request, category_slug, property_id, value=None, min=None, max=None):
"""Saves the given filter to session. Redirects to the category with given
slug.
"""
product_filter = request.session.get("product-filter", {})
if value is not None:
product_filter[property_id] = value
else:
product_filter[property_id] = (min, max)
request.session["product-filter"] = product_filter
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url)
def set_price_filter(request, category_slug):
"""Saves the given price filter to session. Redirects to the category with
given slug.
"""
min = request.REQUEST.get("min", "0")
max = request.REQUEST.get("max", "99999")
try:
float(min)
except (TypeError, ValueError):
min = "0"
try:
float(max)
except (TypeError, ValueError):
max = "0"
request.session["price-filter"] = {"min": min, "max": max}
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url)
def reset_price_filter(request, category_slug):
"""Resets the price filter. Redirects to the category with given slug.
"""
if "price-filter" in request.session:
del request.session["price-filter"]
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url)
def reset_filter(request, category_slug, property_id):
"""Resets product filter with given property id. Redirects to the category
with given slug.
"""
if "product-filter" in request.session:
if property_id in request.session["product-filter"]:
del request.session["product-filter"][property_id]
request.session["product-filter"] = request.session["product-filter"]
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url)
def reset_all_filter(request, category_slug):
"""Resets all product filter. Redirects to the category with given slug.
"""
if "product-filter" in request.session:
del request.session["product-filter"]
if "price-filter" in request.session:
del request.session["price-filter"]
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url)
@csrf_exempt
def set_sorting(request):
"""Saves the given sortings (by request body) to session.
"""
sorting = request.POST.get("sorting", "")
if sorting == "" and "sorting" in request.session:
del request.session["sorting"]
else:
request.session["sorting"] = sorting
# lfs_sorting_changed.send(category_id)
return HttpResponseRedirect(request.META.get("HTTP_REFERER"))
def category_view(request, slug, template_name="lfs/catalog/category_base.html"):
"""
"""
start = request.REQUEST.get("start", 1)
category = lfs_get_object_or_404(Category, slug=slug)
if category.get_content() == CONTENT_PRODUCTS:
inline = category_products(request, slug, start)
else:
inline = category_categories(request, slug)
# Set last visited category for later use, e.g. Display breadcrumbs,
# selected menu points, etc.
request.session["last_category"] = category
# TODO: Factor top_category out to a inclusion tag, so that people can
# omit if they don't need it.
return render_to_response(template_name, RequestContext(request, {
"category": category,
"category_inline": inline,
"top_category": lfs.catalog.utils.get_current_top_category(request, category),
}))
def category_categories(request, slug, start=0, template_name="lfs/catalog/categories/category/default.html"):
"""Displays the child categories of the category with passed slug.
This view is called if the user chooses a template that is situated in settings.CATEGORY_PATH ".
"""
cache_key = "%s-category-categories-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, slug)
result = cache.get(cache_key)
if result is not None:
return result
category = lfs_get_object_or_404(Category, slug=slug)
format_info = category.get_format_info()
amount_of_cols = format_info["category_cols"]
categories = []
row = []
for i, children in enumerate(category.get_children()):
row.append(children)
if (i + 1) % amount_of_cols == 0:
categories.append(row)
row = []
if len(row) > 0:
categories.append(row)
render_template = category.get_template_name()
if render_template != None:
template_name = render_template
result = render_to_string(template_name, RequestContext(request, {
"category": category,
"categories": categories,
}))
cache.set(cache_key, result)
return result
def category_products(request, slug, start=1, template_name="lfs/catalog/categories/product/default.html"):
"""Displays the products of the category with passed slug.
This view is called if the user chooses a template that is situated in settings.PRODUCT_PATH ".
"""
# Resets the product filters if the user navigates to another category.
# TODO: Is this what a customer would expect?
last_category = request.session.get("last_category")
if (last_category is None) or (last_category.slug != slug):
if "product-filter" in request.session:
del request.session["product-filter"]
if "price-filter" in request.session:
del request.session["price-filter"]
try:
default_sorting = settings.LFS_PRODUCTS_SORTING
except AttributeError:
default_sorting = "price"
sorting = request.session.get("sorting", default_sorting)
product_filter = request.session.get("product-filter", {})
product_filter = product_filter.items()
cache_key = "%s-category-products-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, slug)
sub_cache_key = "%s-start-%s-sorting-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, start, sorting)
filter_key = ["%s-%s" % (i[0], i[1]) for i in product_filter]
if filter_key:
sub_cache_key += "-%s" % "-".join(filter_key)
price_filter = request.session.get("price-filter")
if price_filter:
sub_cache_key += "-%s-%s" % (price_filter["min"], price_filter["max"])
temp = cache.get(cache_key)
if temp is not None:
try:
return temp[sub_cache_key]
except KeyError:
pass
else:
temp = dict()
category = lfs_get_object_or_404(Category, slug=slug)
# Calculates parameters for display.
try:
start = int(start)
except (ValueError, TypeError):
start = 1
format_info = category.get_format_info()
amount_of_rows = format_info["product_rows"]
amount_of_cols = format_info["product_cols"]
amount = amount_of_rows * amount_of_cols
all_products = lfs.catalog.utils.get_filtered_products_for_category(
category, product_filter, price_filter, sorting)
# prepare paginator
paginator = Paginator(all_products, amount)
try:
current_page = paginator.page(start)
except (EmptyPage, InvalidPage):
current_page = paginator.page(paginator.num_pages)
# Calculate products
row = []
products = []
for i, product in enumerate(current_page.object_list):
if product.is_product_with_variants():
default_variant = product.get_variant_for_category(request)
if default_variant:
product = default_variant
image = None
product_image = product.get_image()
if product_image:
image = product_image.image
row.append({
"obj": product,
"slug": product.slug,
"name": product.get_name(),
"image": image,
"price_unit": product.price_unit,
"price_includes_tax": product.price_includes_tax(request),
})
if (i + 1) % amount_of_cols == 0:
products.append(row)
row = []
if len(row) > 0:
products.append(row)
amount_of_products = all_products.count()
# Calculate urls
pagination_data = lfs_pagination(request, current_page, url=category.get_absolute_url())
render_template = category.get_template_name()
if render_template is not None:
template_name = render_template
template_data = {
"category": category,
"products": products,
"amount_of_products": amount_of_products,
"pagination": pagination_data
}
result = render_to_string(template_name, RequestContext(request, template_data))
temp[sub_cache_key] = result
cache.set(cache_key, temp)
return result
def product_view(request, slug, template_name="lfs/catalog/product_base.html"):
"""Main view to display a product.
"""
product = lfs_get_object_or_404(Product, slug=slug)
if (request.user.is_superuser or product.is_active()) == False:
raise Http404()
# Store recent products for later use
recent = request.session.get("RECENT_PRODUCTS", [])
if slug in recent:
recent.remove(slug)
recent.insert(0, slug)
if len(recent) > settings.LFS_RECENT_PRODUCTS_LIMIT:
recent = recent[:settings.LFS_RECENT_PRODUCTS_LIMIT + 1]
request.session["RECENT_PRODUCTS"] = recent
result = render_to_response(template_name, RequestContext(request, {
"product_inline": product_inline(request, product),
"product": product,
}))
return result
def product_inline(request, product, template_name="lfs/catalog/products/product_inline.html"):
"""
Part of the product view, which displays the actual data of the product.
This is factored out to be able to better cached and in might in future used
used to be updated via ajax requests.
"""
cache_key = "%s-product-inline-%s-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, request.user.is_superuser, product.id)
result = cache.get(cache_key)
if result is not None:
return result
# Switching to default variant
if product.is_product_with_variants():
temp = product.get_default_variant()
product = temp if temp else product
properties = []
variants = []
display_variants_list = True
if product.is_variant():
parent = product.parent
if parent.variants_display_type == SELECT:
display_variants_list = False
# Get all properties (sorted). We need to traverse through all
# property/options to select the options of the current variant.
for property in parent.get_property_select_fields():
options = []
for property_option in property.options.all():
if product.has_option(property, property_option):
selected = True
else:
selected = False
options.append({
"id": property_option.id,
"name": property_option.name,
"selected": selected,
})
properties.append({
"id": property.id,
"name": property.name,
"title": property.title,
"unit": property.unit,
"options": options,
})
else:
properties = parent.get_property_select_fields()
variants = parent.get_variants()
elif product.is_configurable_product():
for property in product.get_configurable_properties():
options = []
try:
ppv = ProductPropertyValue.objects.get(product=product, property=property, type=PROPERTY_VALUE_TYPE_DEFAULT)
ppv_value = ppv.value
except ProductPropertyValue.DoesNotExist:
ppv = None
ppv_value = ""
for property_option in property.options.all():
if ppv_value == str(property_option.id):
selected = True
else:
selected = False
options.append({
"id": property_option.id,
"name": property_option.name,
"price": property_option.price,
"selected": selected,
})
properties.append({
"obj": property,
"id": property.id,
"name": property.name,
"title": property.title,
"unit": property.unit,
"display_price": property.display_price,
"options": options,
"value": ppv_value,
})
if product.get_template_name() != None:
template_name = product.get_template_name()
if product.get_active_packing_unit():
packing_result = calculate_packing(request, product.id, 1, True, True)
else:
packing_result = ""
# attachments
attachments = product.get_attachments()
result = render_to_string(template_name, RequestContext(request, {
"product": product,
"variants": variants,
"product_accessories": product.get_accessories(),
"properties": properties,
"packing_result": packing_result,
"attachments": attachments,
"quantity": product.get_clean_quantity(1),
"price_includes_tax": product.price_includes_tax(request),
"price_unit": product.get_price_unit(),
"unit": product.get_unit(),
"display_variants_list": display_variants_list,
"for_sale": product.get_for_sale(),
}))
cache.set(cache_key, result)
return result
def product_form_dispatcher(request):
"""Dispatches to the added-to-cart view or to the selected variant.
This is needed as the product form can have several submit buttons:
- The add-to-cart button
- The switch to the selected variant button (only in the case the
variants of of the product are displayed as select box. This may change
in future, when the switch may made with an ajax request.)
"""
if request.REQUEST.get("add-to-cart") is not None:
return add_to_cart(request)
else:
product_id = request.POST.get("product_id")
product = lfs_get_object_or_404(Product, pk=product_id)
options = lfs_utils.parse_properties(request)
variant = product.get_variant(options)
if variant is None:
variant = product.get_default_variant()
return lfs.core.utils.set_message_cookie(
variant.get_absolute_url(),
msg=_(u"The choosen combination of properties is not deliverable.")
)
return HttpResponseRedirect(variant.get_absolute_url())
def _calculate_property_price(request):
"""
Calculates the price of the currently selected properties.
"""
property_price = 0
for key, option_id in request.POST.items():
if key.startswith("property"):
try:
property_id = int(key.split('-')[1])
property = Property.objects.get(pk=property_id)
if property.is_select_field:
po = PropertyOption.objects.get(property=property, pk=option_id)
if property.add_price:
po_price = float(po.price)
property_price += po_price
except (IndexError, ValueError, TypeError, PropertyOption.DoesNotExist, Property.DoesNotExist):
pass
return property_price
|
|
"""
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung"
__version__ = "0.8.2"
"""
Change History
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
#import StringIO
import io as StringIO
import sys
import time
import unittest
from xml.sax import saxutils
from browser import document as doc
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: 'pass',
1: 'fail',
2: 'error',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript"><!--
output_list = Array();
/* level - 0:Summary; 1:Failed; 2:All */
function showCase(level) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (id.substr(0,2) == 'ft') {
if (level < 1) {
tr.className = 'hiddenRow';
}
else {
tr.className = '';
}
}
if (id.substr(0,2) == 'pt') {
if (level > 1) {
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
console.log(tid)
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
if (tid.substr(0,1) == 'f') {
document.getElementById('div_'+tid).style.display = 'none'
}
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
/* obsoleted by detail in <div>
function showOutput(id, name) {
var w = window.open("", //url
name,
"resizable,scrollbars,status,width=800,height=450");
d = w.document;
d.write("<pre>");
d.write(html_escape(output_list[id]));
d.write("\n");
d.write("<a href='javascript:window.close()'>close</a>\n");
d.write("</pre>\n");
d.close();
}
*/
--></script>
%(heading)s
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre { }
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
/*border: solid #627173 1px; */
padding: 10px;
background-color: #E6E6D6;
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
width: 500px;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
width: 80%;
border-collapse: collapse;
border: 1px solid #777;
}
#header_row {
font-weight: bold;
color: white;
background-color: #777;
}
#result_table td {
border: 1px solid #777;
padding: 2px;
}
#total_row { font-weight: bold; }
.passClass { background-color: #6c6; }
.failClass { background-color: #c60; }
.errorClass { background-color: #c00; color:#FFF}
.passCase { color: #6c6; }
.failCase { color: #c60; font-weight: bold; }
.errorCase { color: #c00; font-weight: bold; }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<p id='show_detail_line'>Show
<a href='javascript:showCase(0)'>Summary</a>
<a href='javascript:showCase(1)'>Failed</a>
<a href='javascript:showCase(2)'>All</a>
</p>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<td>Test Group/Test case</td>
<td>Count</td>
<td>Pass</td>
<td>Fail</td>
<td>Error</td>
<td>View</td>
</tr>
%(test_list)s
<tr id='total_row'>
<td>Total</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td> </td>
</tr>
</table>
""" # variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">Detail</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--css div popup start-->
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td class='%(style)s'><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>%(status)s</td>
</tr>
""" # variables: (tid, Class, style, desc, status)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
def startTest(self, test):
TestResult.startTest(self, test)
# just one buffer for both stdout and stderr
self.outputBuffer = StringIO.StringIO()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('ok ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('.')
def addError(self, test, err):
self.error_count += 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addFailure(self, test, err):
self.failure_count += 1
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
self.result.append((1, test, output, _exc_str))
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
class HTMLTestRunner(Template_mixin):
"""
"""
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None):
self.stream = stream
self.verbosity = verbosity
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
self.startTime = datetime.datetime.now()
def run(self, test):
"Run the given test case or test suite."
result = _TestResult(self.verbosity)
test(result)
self.stopTime = datetime.datetime.now()
self.generateReport(test, result)
#print('\nTime Elapsed: %s' % (self.stopTime-self.startTime), file=sys.stderr)
#return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n,t,o,e in result_list:
cls = t.__class__
if cls not in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n,t,o,e))
r = [(cls, rmap[cls]) for cls in classes]
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count: status.append('Pass %s' % result.success_count)
if result.failure_count: status.append('Failure %s' % result.failure_count)
if result.error_count: status.append('Error %s' % result.error_count )
if status:
status = ' '.join(status)
else:
status = 'none'
return [
('Start Time', startTime),
('Duration', duration),
('Status', status),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
output = self.HTML_TMPL % dict(
title = saxutils.escape(self.title),
generator = generator,
stylesheet = stylesheet,
heading = heading,
report = report,
ending = ending,
)
#self.stream.write(output.encode('utf8'))
self.stream.write(output)
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name = saxutils.escape(name),
value = saxutils.escape(value),
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title = saxutils.escape(self.title),
parameters = ''.join(a_lines),
description = saxutils.escape(self.description),
)
return heading
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = 0
for n,t,o,e in cls_results:
if n == 0: np += 1
elif n == 1: nf += 1
else: ne += 1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
row = self.REPORT_CLASS_TMPL % dict(
style = ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc = desc,
count = np+nf+ne,
Pass = np,
fail = nf,
error = ne,
cid = 'c%s' % (cid+1),
)
rows.append(row)
for tid, (n,t,o,e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
report = self.REPORT_TMPL % dict(
test_list = ''.join(rows),
count = str(result.success_count+result.failure_count+result.error_count),
Pass = str(result.success_count),
fail = str(result.failure_count),
error = str(result.error_count),
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1', etc
has_output = bool(o or e)
tid = (n == 0 and 'p' or 'f') + 't%s.%s' % (cid+1,tid+1)
name = t.id().split('.')[-1]
doc = t.shortDescription() or ""
desc = doc and ('%s: %s' % (name, doc)) or name
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
#if isinstance(o,str):
if isinstance(o,bytes):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# uo = unicode(o.encode('string_escape'))
uo = o.decode('latin-1')
else:
uo = o
#if isinstance(e,str):
if isinstance(e,bytes):
# TODO: some problem with 'string_escape': it escape \n and mess up formating
# ue = unicode(e.encode('string_escape'))
ue = e.decode('latin-1')
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id = tid,
output = saxutils.escape(uo+ue),
)
row = tmpl % dict(
tid = tid,
Class = (n == 0 and 'hiddenRow' or 'none'),
style = n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'none'),
desc = desc,
script = script,
status = self.STATUS[n],
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.generate
---------------------
Functions for generating a project from a project template.
"""
from __future__ import unicode_literals
from collections import OrderedDict
import fnmatch
import io
import json
import logging
import os
import shutil
from jinja2 import FileSystemLoader, Template
from jinja2.environment import Environment
from jinja2.exceptions import TemplateSyntaxError
from binaryornot.check import is_binary
from .exceptions import NonTemplatedInputDirException, ContextDecodingException
from .find import find_template
from .utils import make_sure_path_exists, work_in
from .hooks import run_hook
def copy_without_render(path, context):
"""
Returns True if `path` matches some pattern in the
`_copy_without_render` context setting.
:param path: A file-system path referring to a file or dir that
should be rendered or just copied.
:param context: cookiecutter context.
"""
try:
for dont_render in context['cookiecutter']['_copy_without_render']:
if fnmatch.fnmatch(path, dont_render):
return True
except KeyError:
return False
return False
def generate_context(context_file='cookiecutter.json', default_context=None,
extra_context=None):
"""
Generates the context for a Cookiecutter project template.
Loads the JSON file as a Python object, with key being the JSON filename.
:param context_file: JSON file containing key/value pairs for populating
the cookiecutter's variables.
:param default_context: Dictionary containing config to take into account.
:param extra_context: Dictionary containing configuration overrides
"""
context = {}
file_handle = open(context_file)
try:
obj = json.load(file_handle, object_pairs_hook=OrderedDict)
except ValueError as e:
# JSON decoding error. Let's throw a new exception that is more
# friendly for the developer or user.
full_fpath = os.path.abspath(context_file)
json_exc_message = str(e)
our_exc_message = (
'JSON decoding error while loading "{0}". Decoding'
' error details: "{1}"'.format(full_fpath, json_exc_message))
raise ContextDecodingException(our_exc_message)
# Add the Python object to the context dictionary
file_name = os.path.split(context_file)[1]
file_stem = file_name.split('.')[0]
context[file_stem] = obj
# Overwrite context variable defaults with the default context from the
# user's global config, if available
if default_context:
obj.update(default_context)
if extra_context:
obj.update(extra_context)
logging.debug('Context generated is {0}'.format(context))
return context
def generate_file(project_dir, infile, context, env):
"""
1. Render the filename of infile as the name of outfile.
2. Deal with infile appropriately:
a. If infile is a binary file, copy it over without rendering.
b. If infile is a text file, render its contents and write the
rendered infile to outfile.
Precondition:
When calling `generate_file()`, the root template dir must be the
current working directory. Using `utils.work_in()` is the recommended
way to perform this directory change.
:param project_dir: Absolute path to the resulting generated project.
:param infile: Input file to generate the file from. Relative to the root
template dir.
:param context: Dict for populating the cookiecutter's variables.
:param env: Jinja2 template execution environment.
"""
logging.debug('Generating file {0}'.format(infile))
# Render the path to the output file (not including the root project dir)
outfile_tmpl = Template(infile)
outfile = os.path.join(project_dir, outfile_tmpl.render(**context))
logging.debug('outfile is {0}'.format(outfile))
# Just copy over binary files. Don't render.
logging.debug("Check {0} to see if it's a binary".format(infile))
if is_binary(infile):
logging.debug('Copying binary {0} to {1} without rendering'
.format(infile, outfile))
shutil.copyfile(infile, outfile)
else:
# Force fwd slashes on Windows for get_template
# This is a by-design Jinja issue
infile_fwd_slashes = infile.replace(os.path.sep, '/')
# Render the file
try:
tmpl = env.get_template(infile_fwd_slashes)
except TemplateSyntaxError as exception:
# Disable translated so that printed exception contains verbose
# information about syntax error location
exception.translated = False
raise
rendered_file = tmpl.render(**context)
logging.debug('Writing {0}'.format(outfile))
with io.open(outfile, 'w', encoding='utf-8') as fh:
fh.write(rendered_file)
# Apply file permissions to output file
shutil.copymode(infile, outfile)
def render_and_create_dir(dirname, context, output_dir):
"""
Renders the name of a directory, creates the directory, and
returns its path.
"""
name_tmpl = Template(dirname)
rendered_dirname = name_tmpl.render(**context)
logging.debug('Rendered dir {0} must exist in output_dir {1}'.format(
rendered_dirname,
output_dir
))
dir_to_create = os.path.normpath(
os.path.join(output_dir, rendered_dirname)
)
make_sure_path_exists(dir_to_create)
return dir_to_create
def ensure_dir_is_templated(dirname):
"""
Ensures that dirname is a templated directory name.
"""
if '{{' in dirname and '}}' in dirname:
return True
else:
raise NonTemplatedInputDirException
def generate_files(repo_dir, context=None, output_dir='.'):
"""
Renders the templates and saves them to files.
:param repo_dir: Project template input directory.
:param context: Dict for populating the template's variables.
:param output_dir: Where to output the generated project dir into.
"""
template_dir = find_template(repo_dir)
logging.debug('Generating project from {0}...'.format(template_dir))
context = context or {}
unrendered_dir = os.path.split(template_dir)[1]
ensure_dir_is_templated(unrendered_dir)
project_dir = render_and_create_dir(unrendered_dir, context, output_dir)
# We want the Jinja path and the OS paths to match. Consequently, we'll:
# + CD to the template folder
# + Set Jinja's path to '.'
#
# In order to build our files to the correct folder(s), we'll use an
# absolute path for the target folder (project_dir)
project_dir = os.path.abspath(project_dir)
logging.debug('project_dir is {0}'.format(project_dir))
# run pre-gen hook from repo_dir
with work_in(repo_dir):
run_hook('pre_gen_project', project_dir, context)
with work_in(template_dir):
env = Environment(keep_trailing_newline=True)
env.loader = FileSystemLoader('.')
for root, dirs, files in os.walk('.'):
# We must separate the two types of dirs into different lists.
# The reason is that we don't want ``os.walk`` to go through the
# unrendered directories, since they will just be copied.
copy_dirs = []
render_dirs = []
for d in dirs:
d_ = os.path.normpath(os.path.join(root, d))
# We check the full path, because that's how it can be
# specified in the ``_copy_without_render`` setting, but
# we store just the dir name
if copy_without_render(d_, context):
copy_dirs.append(d)
else:
render_dirs.append(d)
for copy_dir in copy_dirs:
indir = os.path.normpath(os.path.join(root, copy_dir))
outdir = os.path.normpath(os.path.join(project_dir, indir))
logging.debug(
'Copying dir {0} to {1} without rendering'
''.format(indir, outdir)
)
shutil.copytree(indir, outdir)
# We mutate ``dirs``, because we only want to go through these dirs
# recursively
dirs[:] = render_dirs
for d in dirs:
unrendered_dir = os.path.join(project_dir, root, d)
render_and_create_dir(unrendered_dir, context, output_dir)
for f in files:
infile = os.path.normpath(os.path.join(root, f))
if copy_without_render(infile, context):
outfile_tmpl = Template(infile)
outfile_rendered = outfile_tmpl.render(**context)
outfile = os.path.join(project_dir, outfile_rendered)
logging.debug(
'Copying file {0} to {1} without rendering'
''.format(infile, outfile)
)
shutil.copyfile(infile, outfile)
shutil.copymode(infile, outfile)
continue
logging.debug('f is {0}'.format(f))
generate_file(project_dir, infile, context, env)
# run post-gen hook from repo_dir
with work_in(repo_dir):
run_hook('post_gen_project', project_dir, context)
|
|
"""
This is fast version of DecisionTreeRegressor for only one target function.
(This is the most simple case, but even multi-class boosting doesn't need more complicated things)
I need numpy implementation mostly for further experiments, rather than for real speedup.
This tree shouldn't be used by itself, only in boosting techniques
"""
from __future__ import division, print_function, absolute_import
from collections import OrderedDict
import numpy
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.linear_model import LogisticRegression, SGDClassifier, LinearRegression
from sklearn.utils.validation import check_random_state
__author__ = 'Alex Rogozhnikov'
# Criterion is minimized in tree
class MseCriterion(object):
@staticmethod
def compute_best_splits(data, y, sample_weight):
orders = numpy.argsort(data, axis=0)
answers = y[orders]
weights = sample_weight[orders]
left_sum, right_sum = _compute_cumulative_sums(answers * weights)
left_weights, right_weights = _compute_cumulative_sums(weights + 1e-20)
# mse = left_sq + right_sq - left_sum ** 2 / left_weights - right_sum ** 2 / right_weights
# one can see that left_sq + right_sq is constant, and can be omitted, so we have:
costs = - (left_sum ** 2 / left_weights + right_sum ** 2 / right_weights)
return _compute_cuts_costs_positions(costs, data=data, orders=orders)
class FriedmanMseCriterion(object):
@staticmethod
def compute_best_splits(data, y, sample_weight):
orders = numpy.argsort(data, axis=0)
answers = y[orders]
weights = sample_weight[orders]
left_sum, right_sum = _compute_cumulative_sums(answers * weights)
left_weights, right_weights = _compute_cumulative_sums(weights + 1e-50)
diff = left_sum / left_weights - right_sum / right_weights
# improvement = n_left * n_right * diff ^ 2 / (n_left + n_right)
costs = - left_weights * right_weights * (diff ** 2)
return _compute_cuts_costs_positions(costs, data=data, orders=orders)
class PValueCriterion(object):
@staticmethod
def compute_best_splits(data, y, sample_weight):
y_order = numpy.argsort(numpy.argsort(y))
# converting to [-1, 1]
y_order = numpy.linspace(-1, 1, len(y_order))[y_order]
orders = numpy.argsort(data, axis=0)
# answers = y[orders]
pred_orders = y_order[orders]
weights = sample_weight[orders]
left_sum, right_sum = _compute_cumulative_sums(pred_orders * weights)
left_weights, right_weights = _compute_cumulative_sums(weights + 1e-50)
regularization = 0.01 * numpy.sum(sample_weight)
# mean = 0, var = left_weights * right_weights
costs = - numpy.abs(left_sum) / len(y)
costs /= numpy.sqrt((left_weights + regularization) * (right_weights + regularization))
return _compute_cuts_costs_positions(costs, data=data, orders=orders)
class AbstractClassificationCriterion(object):
@staticmethod
def compute_costs(s_left, s_right, b_left, b_right):
raise NotImplementedError('Should be overloaded')
@classmethod
def compute_best_splits(cls, data, y, sample_weight):
orders = numpy.argsort(data, axis=0)
answers = y[orders]
weights = sample_weight[orders]
pos_answers = answers * (answers > 0)
neg_answers = - answers * (answers < 0)
left_pos_sum, right_pos_sum = _compute_cumulative_sums(pos_answers * weights)
left_neg_sum, right_neg_sum = _compute_cumulative_sums(neg_answers * weights)
# using passed function to compute criterion
costs = cls.compute_costs(left_pos_sum, right_pos_sum, left_neg_sum, right_neg_sum)
return _compute_cuts_costs_positions(costs, data=data, orders=orders)
class GiniCriterion(AbstractClassificationCriterion):
@staticmethod
def compute_costs(s_left, s_right, b_left, b_right):
return s_left * b_left / (s_left + b_left) + s_right * b_right / (s_right + b_right)
class EntropyCriterion(AbstractClassificationCriterion):
@staticmethod
def compute_costs(s_left, s_right, b_left, b_right):
reg = 10
total_left = s_left + b_left
total_right = s_right + b_right
return -(
s_left * numpy.log(s_left + reg)
+ s_right * numpy.log(s_right + reg)
+ b_left * numpy.log(b_left + reg)
+ b_right * numpy.log(b_right + reg)
- total_left * numpy.log(total_left + reg)
- total_right * numpy.log(total_right + reg)
)
class SignificanceCriterion(AbstractClassificationCriterion):
@staticmethod
def compute_costs(s_left, s_right, b_left, b_right):
return - (s_left ** 2 / (b_left + 5) + s_right ** 2 / (b_right + 5))
class SymmetricSignificanceCriterion(AbstractClassificationCriterion):
@staticmethod
def compute_costs(s_left, s_right, b_left, b_right):
reg = 10
result = s_left ** 2 / (b_left + reg) + s_right ** 2 / (b_right + reg)
result += b_left ** 2 / (s_left + reg) + b_right ** 2 / (s_right + reg)
return -result
class PoissonSignificanceCriterion(AbstractClassificationCriterion):
@staticmethod
def compute_costs(s_left, s_right, b_left, b_right):
reg = 20
result = (s_left - b_left) * numpy.log((s_left + reg) / (b_left + reg))
result += (s_right - b_right) * numpy.log((s_right + reg) / (b_right + reg))
return -result
def _compute_cumulative_sums(values):
# for each feature and for each split computes cumulative sums
left = numpy.cumsum(values, axis=0)
right = left[[-1], :] - left
return left[:-1, :], right[:-1, :]
def _compute_cuts_costs_positions(costs, data, orders):
"""
Supplementary function to compute things that criterion should return.
:param costs: [n_samples, n_features]
:param data: [n_samples, n_features] original dataset
:param orders: [n_samples, n_features] ordering of values for each feature
"""
optimal_costs = numpy.min(costs, axis=0)
optimal_sorted_positions = numpy.argmin(costs, axis=0)
features_index = numpy.arange(costs.shape[1])
assert (optimal_costs == costs[optimal_sorted_positions, features_index]).all()
optimal_orders = orders[optimal_sorted_positions, features_index]
_next_optimal_orders = orders[optimal_sorted_positions + 1, features_index]
optimal_cuts = (data[optimal_orders, features_index] + data[_next_optimal_orders, features_index]) / 2.
return optimal_cuts, optimal_costs, optimal_sorted_positions
criterions = {'mse': MseCriterion,
'fmse': FriedmanMseCriterion,
'friedman-mse': FriedmanMseCriterion,
'pvalue': PValueCriterion,
'significance': SignificanceCriterion,
'significance2': SymmetricSignificanceCriterion,
'gini': GiniCriterion,
'entropy': EntropyCriterion,
'poisson': PoissonSignificanceCriterion
}
class FastTreeRegressor(BaseEstimator, RegressorMixin):
def __init__(self,
max_depth=5,
max_features=None,
min_samples_split=40,
max_events_used=1000,
criterion='mse',
random_state=None):
self.max_depth = max_depth
self.max_features = max_features
self.min_samples_split = min_samples_split
self.max_events_used = max_events_used
self.criterion = criterion
self.random_state = random_state
# keeps the indices of features and the values at which we split them.
# dict{node_index -> (feature_index, split_value) or (leaf_value)}
# Node index is defined as:
# left: `0`bit, right: `1`bit.
# Indices of some nodes: root - 1b, left child of root: 10b, it's right child: 101b.
self.nodes_data = dict()
def print_tree_stats(self):
print(len(self.nodes_data), ' nodes in tree')
leaves = [k for k, v in self.nodes_data.items() if len(v) == 1]
print(len(leaves), ' leaf nodes in tree')
def print_tree(self, node_index=1, prefix=''):
data = self.nodes_data[node_index]
print(prefix, data)
if len(data) > 1:
left, right = self._children(node_index)
self.print_tree(left, " " + prefix)
self.print_tree(right, " " + prefix)
@staticmethod
def _children(node_index):
left_node = 2 * node_index
right_node = 2 * node_index + 1
return left_node, right_node
def _fit_tree_node(self, X, y, w, node_index, depth, passed_indices):
"""Recursive function to fit tree, rather simple implementation"""
if len(passed_indices) <= self.min_samples_split or depth >= self.max_depth:
self.nodes_data[node_index] = (numpy.average(y[passed_indices], weights=w[passed_indices]), )
return
selected_events = passed_indices
if len(passed_indices) > self.max_events_used:
selected_events = self.random_state.choice(passed_indices, size=self.max_events_used, replace=True)
selected_features = self.random_state.choice(self.n_features, size=self._n_used_features, replace=False)
cuts, costs, _ = self._criterion.compute_best_splits(
X[numpy.ix_(selected_events, selected_features)], y[selected_events], sample_weight=w[selected_events])
# feature that showed best pre-estimated cost
best_feature_index = numpy.argmin(costs)
feature_index = selected_features[best_feature_index]
split = cuts[best_feature_index]
# computing information for (possible) children
passed_left_subtree = passed_indices[X[passed_indices, feature_index] <= split]
passed_right_subtree = passed_indices[X[passed_indices, feature_index] > split]
left, right = self._children(node_index)
if len(passed_left_subtree) == 0 or len(passed_right_subtree) == 0:
# this will be leaf
self.nodes_data[node_index] = (numpy.average(y[passed_indices], weights=w[passed_indices]), )
else:
# non-leaf, recurrent calls
self.nodes_data[node_index] = (feature_index, split)
self._fit_tree_node(X, y, w, left, depth + 1, passed_left_subtree)
self._fit_tree_node(X, y, w, right, depth + 1, passed_right_subtree)
def _apply_node(self, X, leaf_indices, predictions, node_index, passed_indices):
"""Recursive function to compute the index """
node_data = self.nodes_data[node_index]
if len(node_data) == 1:
# leaf node
leaf_indices[passed_indices] = node_index
predictions[passed_indices] = node_data[0]
else:
# non-leaf
feature_index, split = node_data
to_right = X[passed_indices, feature_index] > split
passed_left_subtree = passed_indices[~to_right]
passed_right_subtree = passed_indices[to_right]
left, right = self._children(node_index)
self._apply_node(X, leaf_indices, predictions, left, passed_left_subtree)
self._apply_node(X, leaf_indices, predictions, right, passed_right_subtree)
def apply(self, X):
"""For each event returns the index of leaf that event belongs to"""
assert isinstance(X, numpy.ndarray), 'X should be numpy.array'
leaf_indices = numpy.zeros(len(X), dtype=int)
predictions = numpy.zeros(len(X), dtype=float)
# this function fills leaf_indices array
self._apply_node(X, leaf_indices, predictions, node_index=1, passed_indices=numpy.arange(len(X)))
return leaf_indices, predictions
def fast_apply(self, X):
"""Same as previous function, but uses special structures and vectorization """
assert isinstance(X, numpy.ndarray), 'X should be numpy.array'
# vertices in tree are enumerated (this time in dense way without binary notation), root is 0.
ordered_nodes = OrderedDict(self.nodes_data)
n_nodes = len(ordered_nodes)
old2new_index = dict(zip(ordered_nodes.keys(), range(n_nodes)))
child_indices = numpy.zeros(2 * n_nodes, dtype=int)
features = numpy.zeros(n_nodes, dtype=int)
splits = numpy.zeros(n_nodes, dtype=float)
leaf_values = numpy.zeros(n_nodes, dtype=float)
for i, (node_index, node_data) in enumerate(ordered_nodes.items()):
if len(node_data) == 1:
leaf_values[i] = node_data[0]
child_indices[2 * i] = i
child_indices[2 * i + 1] = i
else:
feature_index, split = node_data
features[i] = feature_index
splits[i] = split
left, right = self._children(node_index)
child_indices[2 * i] = old2new_index[left]
child_indices[2 * i + 1] = old2new_index[right]
rows = numpy.arange(len(X))
leaf_indices = numpy.zeros(len(X), dtype=int)
for _ in range(self.max_depth + 1):
passed = X[rows, features[leaf_indices]] > splits[leaf_indices]
leaf_indices = child_indices[2 * leaf_indices + passed]
return leaf_indices, leaf_values[leaf_indices]
def fit(self, X, y, sample_weight, check_input=True):
if check_input:
assert isinstance(X, numpy.ndarray), "X should be numpy.array"
assert isinstance(y, numpy.ndarray), "y should be numpy.array"
assert isinstance(sample_weight, numpy.ndarray), "sample_weight should be numpy.array"
assert len(X) == len(y) == len(sample_weight), 'Size of arrays is different'
self.n_features = X.shape[1]
if self.max_features is None:
self._n_used_features = self.n_features
else:
self._n_used_features = min(self.n_features, self.max_features)
self._criterion = criterions[self.criterion]
self.random_state = check_random_state(self.random_state)
self.nodes_data = dict() # clearing previous fitting
root_node_index = 1
self._fit_tree_node(X=X, y=y, w=sample_weight, node_index=root_node_index, depth=0,
passed_indices=numpy.arange(len(X)))
return self
def predict(self, X):
leaf_indices, predictions = self.apply(X)
return predictions
class FastNeuroTreeRegressor(FastTreeRegressor):
def __init__(self,
max_depth=5,
max_features=None,
min_samples_split=40,
min_samples_leaf=10,
n_lincomb=2,
n_events_form_lincomb=50,
max_events_used=1000,
criterion='mse',
random_state=None):
self.min_samples_leaf = min_samples_leaf
self.n_lincomb = n_lincomb
self.n_events_form_lincomb = n_events_form_lincomb
FastTreeRegressor.__init__(self,
max_depth=max_depth,
max_features=max_features,
min_samples_split=min_samples_split,
max_events_used=max_events_used,
criterion=criterion,
random_state=random_state)
def _fit_tree_node(self, X, y, w, node_index, depth, passed_indices):
"""Recursive function to fit tree, rather simple implementation"""
if len(passed_indices) <= self.min_samples_split or depth >= self.max_depth:
self.nodes_data[node_index] = (numpy.average(y[passed_indices], weights=w[passed_indices]), )
return
selected_events = passed_indices
if len(passed_indices) > self.max_events_used:
selected_events = self.random_state.choice(passed_indices, size=self.max_events_used, replace=True)
candidate_features = self.random_state.choice(self.n_features, replace=True,
size=[self._n_used_features, self.n_lincomb])
formed_data = numpy.zeros([len(selected_events), self._n_used_features])
candidate_lincomb_coefficients = numpy.zeros_like(candidate_features, dtype=float)
for i, lincomb_features in enumerate(candidate_features):
pre_events_used = selected_events[:self.n_events_form_lincomb]
data = X[numpy.ix_(pre_events_used, lincomb_features)]
w_used = w[pre_events_used]
lr = LinearRegression().fit(data * w_used[:, numpy.newaxis], y[pre_events_used] * w_used)
# normalizing coeffs
coeffs = lr.coef_
coeffs /= numpy.abs(coeffs).sum() + 0.01
candidate_lincomb_coefficients[i, :] = coeffs
formed_data[:, i] = self._compute_lincomb(X, indices=selected_events,
lincomb_features=lincomb_features,
lincomb_coefficients=coeffs)
cuts, costs, _ = self._criterion.compute_best_splits(
formed_data, y[selected_events], sample_weight=w[selected_events])
# feature that showed best pre-estimated cost
combination_index = numpy.argmin(costs)
split = cuts[combination_index]
lincomb_features = candidate_features[combination_index, :]
lincomb_coefficients = candidate_lincomb_coefficients[combination_index, :]
# computing information for (possible) children
lincomb_values = self._compute_lincomb(X, indices=passed_indices,
lincomb_features=lincomb_features,
lincomb_coefficients=lincomb_coefficients)
passed_left_subtree = passed_indices[lincomb_values <= split]
passed_right_subtree = passed_indices[lincomb_values > split]
left, right = self._children(node_index)
if len(passed_left_subtree) < self.min_samples_leaf or len(passed_right_subtree) < self.min_samples_leaf:
# this will be leaf
self.nodes_data[node_index] = (numpy.average(y[passed_indices], weights=w[passed_indices]), )
else:
# non-leaf, recurrent calls
self.nodes_data[node_index] = (lincomb_features, lincomb_coefficients, split)
self._fit_tree_node(X, y, w, left, depth + 1, passed_left_subtree)
self._fit_tree_node(X, y, w, right, depth + 1, passed_right_subtree)
def _compute_lincomb(self, X, indices, lincomb_features, lincomb_coefficients):
result = numpy.zeros(len(indices))
for feature, coeff in zip(lincomb_features, lincomb_coefficients):
result += X[indices, feature] * coeff
return result
def _apply_node(self, X, leaf_indices, predictions, node_index, passed_indices):
"""Recursive function to compute the index """
node_data = self.nodes_data[node_index]
if len(node_data) == 1:
# leaf node
leaf_indices[passed_indices] = node_index
predictions[passed_indices] = node_data[0]
else:
# non-leaf
lincomb_features, lincomb_coefficients, split = node_data
lincomb_values = self._compute_lincomb(X, indices=passed_indices,
lincomb_features=lincomb_features,
lincomb_coefficients=lincomb_coefficients)
passed_left_subtree = passed_indices[lincomb_values <= split]
passed_right_subtree = passed_indices[lincomb_values > split]
left, right = self._children(node_index)
self._apply_node(X, leaf_indices, predictions, left, passed_left_subtree)
self._apply_node(X, leaf_indices, predictions, right, passed_right_subtree)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class MirroredStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import threading
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import shared_variable_creator
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import coordinator
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# TODO(josh11b): Replace asserts in this file with if ...: raise ...
@contextlib.contextmanager
def _enter_graph(g, eager, creator_stack=None):
"""Context manager for selecting a graph and maybe eager mode."""
if eager:
with g.as_default(), context.eager_mode():
if creator_stack is not None:
g._variable_creator_stack = creator_stack # pylint: disable=protected-access
yield
else:
with g.as_default():
if creator_stack is not None:
g._variable_creator_stack = creator_stack # pylint: disable=protected-access
yield
def _cpu_device(device):
cpu_device = tf_device.DeviceSpec.from_string(device)
cpu_device.merge_from(tf_device.DeviceSpec(device_type="CPU", device_index=0))
return cpu_device.to_string()
class _RequestedStop(Exception): # pylint: disable=g-bad-exception-name
pass
# _call_for_each_replica is not a member of MirroredStrategy so that it is
# not allowed to use anything specific to MirroredStrategy and thus
# can be shared with other distribution strategies.
# TODO(yuefengz): maybe create a common class for those who need to call this
# _call_for_each_replica.
def _call_for_each_replica(distribution, device_map, fn, args, kwargs):
"""Run `fn` in separate threads, once per replica/worker device.
Args:
distribution: the DistributionStrategy object.
device_map: the DeviceMap with the devices to run `fn` on.
fn: function to run (will be run once per replica, each in its own thread).
args: positional arguments for `fn`
kwargs: keyword arguments for `fn`.
Returns:
Merged return value of `fn` across all replicas.
Raises:
RuntimeError: If fn() calls get_replica_context().merge_call() a different
number of times from the available devices.
"""
# TODO(josh11b): Add this option once we add synchronization to variable
# creation. Until then, this is pretty unsafe to use.
run_concurrently = False
if not context.executing_eagerly():
# Needed for per-thread device, etc. contexts in graph mode.
ops.get_default_graph().switch_to_thread_local()
coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))
shared_variable_store = {}
# TODO(isaprykin): Create these threads once instead of during every call.
threads = []
for index in range(device_map.num_replicas_in_graph):
variable_creator_fn = shared_variable_creator.make_fn(
shared_variable_store, index)
t = _MirroredReplicaThread(
distribution, coord, index, device_map, variable_creator_fn, fn,
values.select_replica(index, args),
values.select_replica(index, kwargs))
threads.append(t)
for t in threads:
t.start()
# When `fn` starts `should_run` event is set on _MirroredReplicaThread
# (`MRT`) threads. The execution waits until
# `MRT.has_paused` is set, which indicates that either `fn` is
# complete or a `get_replica_context().merge_call()` is called. If `fn` is
# complete, then `MRT.done` is set to True. Otherwise, arguments
# of `get_replica_context().merge_call` from all paused threads are grouped
# and the `merge_fn` is performed. Results of the
# `get_replica_context().merge_call` are then set to `MRT.merge_result`.
# Each such `get_replica_context().merge_call` call returns the
# `MRT.merge_result` for that thread when `MRT.should_run` event
# is reset again. Execution of `fn` resumes.
try:
with coord.stop_on_exception():
all_done = False
while not all_done and not coord.should_stop():
done = []
if run_concurrently:
for t in threads:
t.should_run.set()
for t in threads:
t.has_paused.wait()
t.has_paused.clear()
if coord.should_stop():
return None
done.append(t.done)
else:
for t in threads:
t.should_run.set()
t.has_paused.wait()
t.has_paused.clear()
if coord.should_stop():
return None
done.append(t.done)
if coord.should_stop():
return None
all_done = all(done)
if not all_done:
if any(done):
raise RuntimeError("Some replicas made a different number of "
"replica_context().merge_call() calls.")
# get_replica_context().merge_call() case
merge_args = values.regroup(
device_map, tuple(t.merge_args for t in threads))
merge_kwargs = values.regroup(
device_map, tuple(t.merge_kwargs for t in threads))
# We capture the name_scope of the MRT when we call merge_fn
# to ensure that if we have opened a name scope in the MRT,
# it will be respected when executing the merge function. We only
# capture the name_scope from the first MRT and assume it is
# the same for all other MRTs.
mtt_captured_name_scope = threads[0].captured_name_scope
# Capture and merge the control dependencies from all the threads.
mtt_captured_control_deps = set()
for t in threads:
mtt_captured_control_deps.update(t.captured_control_deps)
with ops.name_scope(mtt_captured_name_scope),\
ops.control_dependencies(mtt_captured_control_deps):
merge_result = threads[0].merge_fn(distribution, *merge_args,
**merge_kwargs)
for r, t in enumerate(threads):
t.merge_result = values.select_replica(r, merge_result)
finally:
for t in threads:
t.should_run.set()
coord.join(threads)
return values.regroup(device_map, tuple(t.main_result for t in threads))
def _create_mirrored_variable(strategy, device_map, logical_device, # pylint: disable=missing-docstring
real_mirrored_creator, *args, **kwargs):
# Figure out what collections this variable should be added to.
# We'll add the MirroredVariable to those collections instead.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Get synchronization value
synchronization = kwargs.get("synchronization",
variable_scope.VariableSynchronization.ON_WRITE)
if synchronization == variable_scope.VariableSynchronization.NONE:
raise ValueError("`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please"
" change the `synchronization` for variable: " +
kwargs["name"])
elif synchronization == variable_scope.VariableSynchronization.ON_READ:
# Variables that are to be synced on read are replica local.
is_replica_local = True
kwargs["trainable"] = False
elif (synchronization == variable_scope.VariableSynchronization.ON_WRITE or
synchronization == variable_scope.VariableSynchronization.AUTO):
# `AUTO` synchronization for `MirroredStrategy` is `ON_WRITE`.
is_replica_local = False
else:
raise ValueError("Invalid variable synchronization mode: " +
synchronization + " for variable: " + kwargs["name"])
# Get aggregation value
aggregation = kwargs.pop("aggregation",
variable_scope.VariableAggregation.NONE)
if aggregation not in (
variable_scope.VariableAggregation.NONE,
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN,
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
devices = device_map.logical_to_actual_devices(logical_device)
value_list = real_mirrored_creator(devices, *args, **kwargs)
if is_replica_local:
result = values.ReplicaLocalVariable(
strategy, device_map, value_list, aggregation,
logical_device=logical_device)
else:
result = values.MirroredVariable(
strategy, device_map, value_list, aggregation,
logical_device=logical_device)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in value_list:
if v in l:
l.remove(v)
g.add_to_collections(collections, result)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result)
return result
def _is_device_list_local(devices):
"""Checks whether the devices list is for local or multi-worker.
Args:
devices: a list of device strings, either local for remote devices.
Returns:
a boolean indicating whether these device strings are for local or for
remote.
Raises:
ValueError: if device strings are not consistent.
"""
all_local = None
for d in devices:
d_spec = tf_device.DeviceSpec().parse_from_string(d)
is_local = d_spec.job in (None, "localhost")
if all_local is None: # Determine all_local from first device.
all_local = is_local
if all_local:
if not is_local:
raise ValueError("Local device string cannot have job specified other "
"than 'localhost'")
else:
if is_local:
raise ValueError("Remote device string must have job specified.")
if d_spec.task is None:
raise ValueError("Remote device string must have task specified.")
return all_local
def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker):
"""Returns a device list given a cluster spec."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
devices = []
for task_type in ("chief", "worker"):
for task_id in range(len(cluster_spec.as_dict().get(task_type, []))):
if num_gpus_per_worker is 0:
devices.append("/job:%s/task:%d" % (task_type, task_id))
else:
devices.extend([
"/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id)
for gpu_id in range(num_gpus_per_worker)
])
return devices
def _group_device_list(devices):
"""Groups the devices list by task_type and task_id.
Args:
devices: a list of device strings for remote devices.
Returns:
a dict of list of device strings mapping from task_type to a list of devices
for the task_type in the asceding order of task_id.
"""
assert not _is_device_list_local(devices)
device_dict = {}
for d in devices:
d_spec = tf_device.DeviceSpec().parse_from_string(d)
# Create an entry for the task_type.
if d_spec.job not in device_dict:
device_dict[d_spec.job] = []
# Fill the device list for task_type until it covers the task_id.
while len(device_dict[d_spec.job]) <= d_spec.task:
device_dict[d_spec.job].append([])
device_dict[d_spec.job][d_spec.task].append(d)
return device_dict
def _is_gpu_device(device):
return tf_device.DeviceSpec().parse_from_string(device).device_type == "GPU"
def _infer_num_gpus_per_worker(devices):
"""Infers the number of GPUs on each worker.
Currently to make multi-worker cross device ops work, we need all workers to
have the same number of GPUs.
Args:
devices: a list of device strings, can be either local devices or remote
devices.
Returns:
number of GPUs per worker.
Raises:
ValueError if workers have different number of GPUs or GPU indices are not
consecutive and starting from 0.
"""
if _is_device_list_local(devices):
return sum(1 for d in devices if _is_gpu_device(d))
else:
device_dict = _group_device_list(devices)
num_gpus = None
for _, devices_in_task in device_dict.items():
for device_in_task in devices_in_task:
if num_gpus is None:
num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d))
# Verify other workers have the same number of GPUs.
elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)):
raise ValueError("All workers should have the same number of GPUs.")
for d in device_in_task:
d_spec = tf_device.DeviceSpec().parse_from_string(d)
if (d_spec.device_type == "GPU" and
d_spec.device_index >= num_gpus):
raise ValueError("GPU `device_index` on a worker should be "
"consecutive and start from 0.")
return num_gpus
def all_local_devices(num_gpus=None):
if num_gpus is None:
num_gpus = context.num_gpus()
return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or
("/device:CPU:0",))
@tf_export("distribute.MirroredStrategy")
class MirroredStrategy(distribute_lib.DistributionStrategy):
"""Mirrors vars to distribute across multiple devices and machines.
This strategy uses one replica per device and sync replication for its
multi-GPU version.
The multi-worker version will be added in the future.
Args:
devices: a list of device strings.
cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not
set, nccl will be use by default.
"""
def __init__(self, devices=None, cross_device_ops=None):
extended = MirroredExtended(
self, devices=devices, cross_device_ops=cross_device_ops)
super(MirroredStrategy, self).__init__(extended)
class MirroredExtended(distribute_lib.DistributionStrategyExtended):
"""Implementation of MirroredStrategy."""
def __init__(self, container_strategy, devices=None, cross_device_ops=None):
super(MirroredExtended, self).__init__(container_strategy)
if devices is None:
devices = all_local_devices()
if not devices:
raise ValueError("Got an empty `devices` list. Please make sure the "
"`devices` you pass in is not empty.")
self._cross_device_ops = cross_device_ops
self._initialize_strategy(devices)
def _initialize_strategy(self, devices):
# The _initialize_strategy method is intended to be used by distribute
# coordinator as well.
if _is_device_list_local(devices):
self._initialize_local(devices)
else:
self._initialize_multi_worker(devices)
def _initialize_local(self, devices):
"""Initializes the object for local training."""
self._local_mode = True
assert devices, "Must specify at least one device."
devices = tuple(device_util.resolve(d) for d in devices)
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument: %s" % (devices,))
# TODO(josh11b): Require at least 2 devices?
self._device_map = values.ReplicaDeviceMap(devices)
self._input_workers = input_lib.InputWorkers(self._device_map)
self._inferred_cross_device_ops = cross_device_ops_lib.choose_the_best(
devices)
self._host_input_device = numpy_dataset.SingleDevice("/cpu:0")
def _initialize_multi_worker(self, devices):
"""Initializes the object for multi-worker training."""
self._local_mode = False
assert devices, "Must specify at least one device."
devices = tuple(device_util.resolve(d) for d in devices)
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument: %s" % devices)
# TODO(josh11b): Require at least 2 devices?
device_dict = _group_device_list(devices)
workers = []
worker_devices = []
for job in ("chief", "worker"):
for task in range(len(device_dict.get(job, []))):
worker = "/job:%s/task:%d" % (job, task)
workers.append(worker)
worker_devices.append((worker, device_dict[job][task]))
# Setting `_default_device` will add a device scope in the
# distribution.scope. We set the default device to the first worker. When
# users specify device under distribution.scope by
# with tf.device("/cpu:0"):
# ...
# their ops will end up on the cpu device of its first worker, e.g.
# "/job:worker/task:0/device:CPU:0". Note this is not used in replica mode.
self._default_device = workers[0]
self._host_input_device = numpy_dataset.SingleDevice(workers[0])
self._device_map = values.ReplicaDeviceMap(devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, worker_devices)
self._inferred_cross_device_ops = cross_device_ops_lib.MultiWorkerAllReduce(
workers, _infer_num_gpus_per_worker(devices))
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a mirrored variable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
device_map = self._device_map
logical_device = 0 # TODO(josh11b): Get logical device from scope here.
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(*args, **kwargs)
else:
device_map = colocate_with.device_map
logical_device = colocate_with.logical_device
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
value_list = []
for i, d in enumerate(devices):
with ops.init_scope(), ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# Initialize replicas with the same value:
def initial_value_fn(device=d):
if context.executing_eagerly():
init_value = value_list[0].value()
return array_ops.identity(init_value)
else:
with ops.device(device):
init_value = value_list[0].initial_value
return array_ops.identity(init_value)
kwargs["initial_value"] = initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
# Don't record operations (e.g. other variable reads) during
# variable creation.
with tape.stop_recording():
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.DistributedVariable)
value_list.append(v)
return value_list
return _create_mirrored_variable(
self._container_strategy(), device_map, logical_device,
_real_mirrored_creator, *args, **kwargs)
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate_distributed_variable(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
return input_lib.DatasetIterator(
dataset, self._input_workers, self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(
input_fn, self._input_workers, input_contexts)
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._host_input_device, session)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_result = fn(ctx, iterator.get_next())
for (name, output) in ctx.last_step_outputs.items():
# Convert all outputs to tensors, potentially from `DistributedValues`.
ctx.last_step_outputs[name] = self._unwrap(output)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, wrap them in a Mirrored
# container, else in a PerReplica container.
if reduce_op is None:
last_step_tensor_outputs_dict[name] = values.regroup(self._device_map,
output)
else:
assert len(output) == 1
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
# TODO(josh11b): In eager mode, use one thread per device, or async mode.
if not destinations:
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = values.LogicalDeviceSpec(
device_map=self._device_map, logical_device=0)
return self._get_cross_device_ops().broadcast(tensor, destinations)
def _call_for_each_replica(self, fn, args, kwargs):
return _call_for_each_replica(self._container_strategy(), self._device_map,
fn, args, kwargs)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
if cluster_spec:
# TODO(yuefengz): remove the following code once cluster_resolver is
# added.
num_gpus_per_worker = _infer_num_gpus_per_worker(
self._device_map.all_devices)
multi_worker_devices = _cluster_spec_to_device_list(
cluster_spec, num_gpus_per_worker)
self._initialize_multi_worker(multi_worker_devices)
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
return updated_config
def _get_cross_device_ops(self):
return self._cross_device_ops or self._inferred_cross_device_ops
def _reduce_to(self, reduce_op, value, destinations):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
return self._get_cross_device_ops().reduce(
reduce_op, value, destinations=destinations)
def _batch_reduce_to(self, reduce_op, value_destination_pairs):
return self._get_cross_device_ops().batch_reduce(
reduce_op, value_destination_pairs)
def _update(self, var, fn, args, kwargs, group):
# TODO(josh11b): In eager mode, use one thread per device.
assert isinstance(var, values.DistributedVariable)
updates = []
for i, (d, v) in enumerate(zip(var.devices, var.values)):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs)))
return values.update_regroup(self, self._device_map, updates, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
assert isinstance(colocate_with, tuple)
# TODO(josh11b): In eager mode, use one thread per device.
updates = []
for i, d in enumerate(colocate_with):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
updates.append(fn(*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs)))
return values.update_regroup(self, self._device_map, updates, group)
def read_var(self, replica_local_var):
"""Read the aggregate value of a replica-local variable."""
if isinstance(replica_local_var, values.ReplicaLocalVariable):
return replica_local_var._get_cross_replica() # pylint: disable=protected-access
assert isinstance(replica_local_var, values.Mirrored)
return array_ops.identity(replica_local_var.get())
def _unwrap(self, val):
if isinstance(val, values.DistributedValues):
return val.values
return (val,)
def value_container(self, val):
return values.value_container(val)
@property
def _num_replicas_in_sync(self):
return self._device_map.num_replicas_in_graph
@property
def worker_devices(self):
return self._device_map.all_devices
@property
def worker_devices_by_replica(self):
return self._device_map.devices_by_replica
@property
def parameter_devices(self):
return self._device_map.all_devices
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
def non_slot_devices(self, var_list):
del var_list
# TODO(josh11b): Should this be the last logical device instead?
return self._device_map.logical_to_actual_devices(0)
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
class _MirroredReplicaThread(threading.Thread):
"""A thread that runs() a function on a device."""
def __init__(self, dist, coord, replica_id, device_map, variable_creator_fn,
fn, args, kwargs):
super(_MirroredReplicaThread, self).__init__()
self.coord = coord
self.distribution = dist
self.device_map = device_map
self.replica_id = replica_id
self.variable_creator_fn = variable_creator_fn
# State needed to run and return the results of `fn`.
self.main_fn = fn
self.main_args = args
self.main_kwargs = kwargs
self.main_result = None
self.done = False
# State needed to run the next merge_call() (if any) requested via
# ReplicaContext.
self.merge_fn = None
self.merge_args = None
self.merge_kwargs = None
self.merge_result = None
self.captured_name_scope = None
# We use a thread.Event for the main thread to signal when this
# thread should start running (`should_run`), and another for
# this thread to transfer control back to the main thread
# (`has_paused`, either when it gets to a
# `get_replica_context().merge_call` or when `fn` returns). In
# either case the event starts cleared, is signaled by calling
# set(). The receiving thread waits for the signal by calling
# wait() and then immediately clearing the event using clear().
self.should_run = threading.Event()
self.has_paused = threading.Event()
# These fields have to do with inheriting various contexts from the
# parent thread:
ctx = context.context()
self.in_eager = ctx.executing_eagerly()
# pylint: disable=protected-access
if not ctx._context_handle:
ctx._initialize_handle_and_devices()
self.context_device_policy = (
pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(
ctx._context_handle))
self.graph = ops.get_default_graph()
with ops.init_scope():
self._init_in_eager = context.executing_eagerly()
self._init_graph = ops.get_default_graph()
self._variable_creator_stack = self.graph._variable_creator_stack[:]
self._captured_var_scope = variable_scope.get_variable_scope()
# Adding a "/" at end lets us re-enter this scope later.
self._name_scope = self.graph.get_name_scope()
if self._name_scope:
self._name_scope += "/"
if self.replica_id > 0:
if not self._name_scope:
self._name_scope = ""
self._name_scope += "replica_%d/" % self.replica_id
def run(self):
self.should_run.wait()
self.should_run.clear()
try:
if self.coord.should_stop():
return
# TODO(josh11b): Use current logical device instead of 0 here.
with self.coord.stop_on_exception(), \
_enter_graph(self._init_graph, self._init_in_eager), \
_enter_graph(self.graph, self.in_eager,
self._variable_creator_stack), \
context.context().device_policy(self.context_device_policy), \
MirroredReplicaContext(self.distribution, constant_op.constant(
self.replica_id, dtypes.int32)), \
ops.device(self.device_map.logical_to_actual_devices(0)[
self.replica_id]), \
ops.name_scope(self._name_scope), \
variable_scope.variable_scope(
self._captured_var_scope, reuse=self.replica_id > 0), \
variable_scope.variable_creator_scope(self.variable_creator_fn):
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
self.done = True
finally:
self.has_paused.set()
class MirroredReplicaContext(distribute_lib.ReplicaContext):
"""ReplicaContext used in MirroredStrategy.extended.call_for_each_replica().
Opened in `_MirroredReplicaThread`, to allow the user to invoke
`MirroredStrategy`'s specific implementation of `merge_call()`,
which works by delegating the function and its arguments to
the main thread (the one that invoked
`MirroredStrategy.extended.call_for_each_replica()`).
"""
def _merge_call(self, fn, args, kwargs):
"""Delegate to the main thread to actually perform merge_call()."""
t = threading.current_thread() # a _MirroredReplicaThread
t.merge_fn = fn
t.merge_args = args
t.merge_kwargs = kwargs
t.captured_name_scope = t.graph.get_name_scope()
# Adding a "/" at end lets us re-enter this scope later.
if t.captured_name_scope:
t.captured_name_scope += "/"
t.captured_control_deps = t.graph._current_control_dependencies() # pylint: disable=protected-access
t.has_paused.set()
t.should_run.wait()
t.should_run.clear()
if t.coord.should_stop():
raise _RequestedStop()
return t.merge_result
@property
def devices(self):
distribute_lib.require_replica_context(self)
replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)
return [self._strategy.extended.worker_devices_by_replica[replica_id]]
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class CoinaaaRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = CoinaaaRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 12443
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase
class ListTests(TranspileTestCase):
def test_setattr(self):
self.assertCodeExecution("""
x = [1, 2, 3]
x.attr = 42
print('Done.')
""")
def test_getattr(self):
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.attr)
print('Done.')
""")
def test_creation(self):
# Empty list
self.assertCodeExecution("""
x = []
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x)
""")
def test_getitem(self):
# Simple positive index
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[2])
""")
# Simple negative index
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[-2])
""")
# Positive index out of range
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[10])
""")
# Negative index out of range
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[-10])
""")
def test_setitem(self):
self.assertCodeExecution("""
x = [1]
x[0] = 5
print(x[0])
""")
self.assertCodeExecution("""
x = [1, 2, 3]
x[1] = "hello"
x[2] = "there"
print(x)
""")
# Out of bounds
self.assertCodeExecution("""
x = []
x[0] = 5
""")
# Out of bounds (negative)
self.assertCodeExecution("""
x = [1]
x[-2] = 5
""")
def test_append(self):
# New list
self.assertCodeExecution("""
x = []
x.append("hello")
x.append(5)
print(x[0], x[1])
""")
# Existing list
self.assertCodeExecution("""
x = [1, 2, 3, 4]
x.append(5)
x.append("hello")
print(x[4], x[5])
""")
def test_remove(self):
# Remove integer
self.assertCodeExecution("""
x = [1, 2, 3]
x.remove(1)
print(x)
""")
# Remove only first duplicate
self.assertCodeExecution("""
x = [1, 2, 2, 3, 2]
x.remove(2)
print(x)
""")
# Remove boolean
self.assertCodeExecution("""
x = [True, False, True, False]
x.remove(1)
print(x)
""")
# Not in list
self.assertCodeExecution("""
x = [1, 2]
x.remove(3)
""")
def test_slice(self):
# Full slice
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[:])
""")
# Left bound slice
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[1:])
""")
# Right bound slice
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[:4])
""")
# Slice bound in both directions
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[1:4])
""")
# Slice bound in both directions with end out of bounds
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[1:6])
""")
# Slice bound in both directions with start out of bounds
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x[6:7])
""")
def test_count(self):
# Normal Count
self.assertCodeExecution("""
x = [1, 1, 1, 4, 5]
print(x.count(1))
""")
# Bool Count
self.assertCodeExecution("""
x = [1, 1, False, 1, 4, True, 5, True]
print(x.count(1))
""")
# Element doesn't exist count
self.assertCodeExecution("""
x = [1, False, 1, 1, True, 4, 5, True]
print(x.count(2))
""")
self.assertCodeExecution("""
x = [1, 1, 1, 4, 5, True]
print(x.count(1))
""")
def test_contains(self):
# Normal Contains
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x.contains(1))
""")
# Element doesn't exist
self.assertCodeExecution("""
x = [1, 2, 3, 4, 5]
print(x.contains(0))
""")
# Checking for boolean
self.assertCodeExecution("""
x = [True, False]
print(x.count(1))
""")
def test_sort(self):
self.assertCodeExecution("""
fixtures = [
[9, 4, 7],
['beta', 'theta', 'alpha'],
]
for x in fixtures:
x.sort()
print(x)
""")
self.assertCodeExecution("""
fixtures = [
[9, 4, 7],
['beta', 'theta', 'alpha'],
]
for x in fixtures:
x.sort(reverse=True)
print(x)
""")
self.assertCodeExecution("""
def second(s):
return s[1]
x = ['abc', 'bza', 'cda', 'daa']
x.sort(key=second)
print(x)
""")
self.assertCodeExecution("""
def second(s):
return s[1]
x = ['abc', 'bza', 'cda', 'daa']
x.sort(key=second, reverse=True)
print(x)
""")
def test_pop(self):
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop())
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop(0))
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop(-2))
print(x)
""")
def test_pop_exceptions(self):
self.assertCodeExecution("""
x = []
print(x.pop())
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop(3))
print(x)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.pop(-4))
print(x)
""")
def test_copy(self):
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
print(y)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
print(x == y)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
print(x is not y)
""")
self.assertCodeExecution("""
x = [1, 2, 3]
y = x.copy()
y.append(4)
print(x == y)
""")
self.assertCodeExecution("""
x = [[1], 2, 3]
y = x.copy()
print(x[0] is y[0])
""")
def test_index(self):
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.index(1))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(1, 1))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(4, 0, len(x)))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(2, 1, 2))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(2, 0, 10))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(1, 0, -2))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(1, -3, -2))
""")
# cases for 'ValueError: not in list'
self.assertCodeExecution("""
x = [1, 2, 3]
print(x.index(4))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(2, 0, 1))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(4, 0, 3))
""")
self.assertCodeExecution("""
x = [1, 2, 1]
print(x.index(3, 0, 10))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(2, 10, 20))
""")
self.assertCodeExecution("""
x = [1, 2, 3, 4]
print(x.index(2, 10, 0))
""")
self.assertCodeExecution("""
x = []
print(x.index(1, 0, 10))
""")
class UnaryListOperationTests(UnaryOperationTestCase, TranspileTestCase):
data_type = 'list'
class BinaryListOperationTests(BinaryOperationTestCase, TranspileTestCase):
data_type = 'list'
not_implemented = [
'test_add_class',
'test_add_frozenset',
'test_and_class',
'test_and_frozenset',
'test_eq_class',
'test_eq_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_ge_class',
'test_ge_frozenset',
'test_ge_list',
'test_gt_class',
'test_gt_frozenset',
'test_gt_list',
'test_le_class',
'test_le_frozenset',
'test_le_list',
'test_lshift_class',
'test_lshift_frozenset',
'test_lt_class',
'test_lt_frozenset',
'test_lt_list',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_frozenset',
'test_multiply_class',
'test_multiply_frozenset',
'test_ne_class',
'test_ne_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subscr_bool',
'test_subscr_class',
'test_subscr_frozenset',
'test_subscr_slice',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
class InplaceListOperationTests(InplaceOperationTestCase, TranspileTestCase):
data_type = 'list'
not_implemented = [
'test_add_bytearray',
'test_add_bytes',
'test_add_class',
'test_add_dict',
'test_add_frozenset',
'test_add_range',
'test_add_set',
'test_add_str',
'test_and_class',
'test_and_frozenset',
'test_floor_divide_class',
'test_floor_divide_complex',
'test_floor_divide_frozenset',
'test_lshift_class',
'test_lshift_frozenset',
'test_modulo_class',
'test_modulo_complex',
'test_modulo_frozenset',
'test_multiply_class',
'test_multiply_frozenset',
'test_or_class',
'test_or_frozenset',
'test_power_class',
'test_power_frozenset',
'test_rshift_class',
'test_rshift_frozenset',
'test_subtract_class',
'test_subtract_frozenset',
'test_true_divide_class',
'test_true_divide_frozenset',
'test_xor_class',
'test_xor_frozenset',
]
|
|
"""
Scikit-learn utilities for input validation.
"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from .. import six
from ...utils.exceptions import NotFittedError, NonBLASDotWarning, \
DataConversionWarning
try:
from inspect import signature
except ImportError:
from mlens.externals.funcsigs import signature
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Parameters
----------
X : array or sparse matrix
"""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats.
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
if X.dtype.kind in 'uib' and X.dtype.itemsize <= 4:
return_dtype = np.float32
else:
return_dtype = np.float64
return X.astype(return_dtype)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
lengths = [_num_samples(X) for X in arrays if X is not None]
uniques = np.unique(lengths)
if len(uniques) > 1:
raise ValueError("Found input variables with inconsistent numbers of"
" samples: %r" % [int(l) for l in lengths])
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). If the input is sparse but
not in the allowed format, it will be converted to the first listed
format. True allows the input to be any format. False means
that a sparse matrix input will raise an error.
dtype : string, type or None
Data type of result. If None, the dtype of the input is preserved.
copy : boolean
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if isinstance(accept_sparse, six.string_types):
accept_sparse = [accept_sparse]
if accept_sparse is False:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
elif isinstance(accept_sparse, (list, tuple)):
if len(accept_sparse) == 0:
raise ValueError("When providing 'accept_sparse' "
"as a tuple or list, it must contain at "
"least one string value.")
# ensure correct sparse format
if spmatrix.format not in accept_sparse:
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
elif accept_sparse is not True:
# any other type
raise ValueError("Parameter 'accept_sparse' should be a string, "
"boolean or list of strings. You provided "
"'accept_sparse={}'.".format(accept_sparse))
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, boolean or list/tuple of strings (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to raise a value error if X is not 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
# accept_sparse 'None' deprecation check
if accept_sparse is None:
warnings.warn(
"Passing 'None' to parameter 'accept_sparse' in methods "
"check_array and check_X_y is deprecated in version 0.19 "
"and will be removed in 0.21. Use 'accept_sparse=False' "
" instead.", DeprecationWarning)
accept_sparse = False
# store whether originally we wanted numeric dtype
dtype_numeric = isinstance(dtype, six.string_types) and dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
raise ValueError(
"Expected 2D array, got 1D array instead:\narray={}.\n"
"Reshape your data either using array.reshape(-1, 1) if "
"your data has a single feature or array.reshape(1, -1) "
"if it contains a single sample.".format(array))
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=False, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, boolean or list of string (default=False)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
.. deprecated:: 0.19
Passing 'None' to parameter ``accept_sparse`` in methods is
deprecated in version 0.19 "and will be removed in 0.21. Use
``accept_sparse=False`` instead.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None | int | instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Parameters
----------
estimator : object
An estimator to inspect.
parameter: str
The searched parameter.
Returns
-------
is_parameter: bool
Whether the parameter was found to be a named parameter of the
estimator's fit method.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
|
# Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import versionutils
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
CONF = cfg.CONF
@base.CinderObjectRegistry.register
class Snapshot(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Changed 'status' field to use SnapshotStatusField
VERSION = '1.1'
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
# are typically the relationship in the sqlalchemy object.
OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot')
fields = {
'id': fields.UUIDField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'volume_id': fields.UUIDField(nullable=True),
'cgsnapshot_id': fields.UUIDField(nullable=True),
'status': c_fields.SnapshotStatusField(nullable=True),
'progress': fields.StringField(nullable=True),
'volume_size': fields.IntegerField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'encryption_key_id': fields.UUIDField(nullable=True),
'volume_type_id': fields.UUIDField(nullable=True),
'provider_location': fields.StringField(nullable=True),
'provider_id': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'provider_auth': fields.StringField(nullable=True),
'volume': fields.ObjectField('Volume', nullable=True),
'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True),
}
@classmethod
def _get_expected_attrs(cls, context):
return 'metadata',
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
# usually part of the model
obj_extra_fields = ['name', 'volume_name']
@property
def name(self):
return CONF.snapshot_name_template % self.id
@property
def volume_name(self):
return self.volume.name
def __init__(self, *args, **kwargs):
super(Snapshot, self).__init__(*args, **kwargs)
self._orig_metadata = {}
self._reset_metadata_tracking()
def obj_reset_changes(self, fields=None):
super(Snapshot, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata)
if self.obj_attr_is_set('metadata') else {})
def obj_what_changed(self):
changes = super(Snapshot, self).obj_what_changed()
if hasattr(self, 'metadata') and self.metadata != self._orig_metadata:
changes.add('metadata')
return changes
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
super(Snapshot, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
@staticmethod
def _from_db_object(context, snapshot, db_snapshot, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in snapshot.fields.items():
if name in Snapshot.OPTIONAL_FIELDS:
continue
value = db_snapshot.get(name)
if isinstance(field, fields.IntegerField):
value = value if value is not None else 0
setattr(snapshot, name, value)
if 'volume' in expected_attrs:
volume = objects.Volume(context)
volume._from_db_object(context, volume, db_snapshot['volume'])
snapshot.volume = volume
if 'cgsnapshot' in expected_attrs:
cgsnapshot = objects.CGSnapshot(context)
cgsnapshot._from_db_object(context, cgsnapshot,
db_snapshot['cgsnapshot'])
snapshot.cgsnapshot = cgsnapshot
if 'metadata' in expected_attrs:
metadata = db_snapshot.get('snapshot_metadata')
if metadata is None:
raise exception.MetadataAbsent()
snapshot.metadata = {item['key']: item['value']
for item in metadata}
snapshot._context = context
snapshot.obj_reset_changes()
return snapshot
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.cinder_obj_get_changes()
if 'volume' in updates:
raise exception.ObjectActionError(action='create',
reason=_('volume assigned'))
if 'cgsnapshot' in updates:
raise exception.ObjectActionError(action='create',
reason=_('cgsnapshot assigned'))
db_snapshot = db.snapshot_create(self._context, updates)
self._from_db_object(self._context, self, db_snapshot)
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'volume' in updates:
raise exception.ObjectActionError(action='save',
reason=_('volume changed'))
if 'cgsnapshot' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cgsnapshot changed'))
if 'metadata' in updates:
# Metadata items that are not specified in the
# self.metadata will be deleted
metadata = updates.pop('metadata', None)
self.metadata = db.snapshot_metadata_update(self._context,
self.id, metadata,
True)
db.snapshot_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self):
db.snapshot_destroy(self._context, self.id)
def obj_load_attr(self, attrname):
if attrname not in self.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
if attrname == 'volume':
self.volume = objects.Volume.get_by_id(self._context,
self.volume_id)
if attrname == 'cgsnapshot':
self.cgsnapshot = objects.CGSnapshot.get_by_id(self._context,
self.cgsnapshot_id)
self.obj_reset_changes(fields=[attrname])
def delete_metadata_key(self, context, key):
db.snapshot_metadata_delete(context, self.id, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
@classmethod
def snapshot_data_get_for_project(cls, context, project_id,
volume_type_id=None):
return db.snapshot_data_get_for_project(context, project_id,
volume_type_id)
@base.CinderObjectRegistry.register
class SnapshotList(base.ObjectListBase, base.CinderObject):
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('Snapshot'),
}
@classmethod
def get_all(cls, context, search_opts, marker=None, limit=None,
sort_keys=None, sort_dirs=None, offset=None):
snapshots = db.snapshot_get_all(context, search_opts, marker, limit,
sort_keys, sort_dirs, offset)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
@classmethod
def get_by_host(cls, context, host, filters=None):
snapshots = db.snapshot_get_by_host(context, host, filters)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
@classmethod
def get_all_by_project(cls, context, project_id, search_opts, marker=None,
limit=None, sort_keys=None, sort_dirs=None,
offset=None):
snapshots = db.snapshot_get_all_by_project(
context, project_id, search_opts, marker, limit, sort_keys,
sort_dirs, offset)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
@classmethod
def get_all_for_volume(cls, context, volume_id):
snapshots = db.snapshot_get_all_for_volume(context, volume_id)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
@classmethod
def get_active_by_window(cls, context, begin, end):
snapshots = db.snapshot_get_active_by_window(context, begin, end)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
@classmethod
def get_all_for_cgsnapshot(cls, context, cgsnapshot_id):
snapshots = db.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id)
expected_attrs = Snapshot._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Snapshot,
snapshots, expected_attrs=expected_attrs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.