gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
#---------------------------------------------------------------------------
# Copyright 2013 PwC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
## @class SCActions
## Scheduling Actions
'''
Scheduler Actions class. Extends Actions.
Created on Jun 14, 2012
@author: pbradley, bcaine
@copyright PwC
@license http://www.apache.org/licenses/LICENSE-2.0
'''
from Actions import Actions
import TestHelper
import datetime
import time
class SCActions (Actions):
'''
This class extends the Actions class with methods specific to actions performed
through the Roll and Scroll interface for the Scheduling package.
'''
def __init__(self, VistAconn, scheduling=None, user=None, code=None):
Actions.__init__(self, VistAconn, scheduling, user, code)
def signon (self):
''' This provides a signon via ^XUP or ^ZU depending on the value of acode'''
if self.acode is None:
self.VistA.write('S DUZ=1,DUZ(0)="@" D ^XUP')
if self.sched is not None:
self.VistA.wait('OPTION NAME:')
self.VistA.write('SDAM APPT MGT')
else:
self.VistA.write('D ^ZU')
self.VistA.wait('ACCESS CODE:');
self.VistA.write(self.acode)
self.VistA.wait('VERIFY CODE:');
self.VistA.write(self.vcode)
self.VistA.wait('//');
self.VistA.write('')
self.VistA.wait('Core Applications')
self.VistA.write('Scheduling')
def schtime(self, plushour=1):
'''Calculates a time for the next hour'''
ttime = datetime.datetime.now() + datetime.timedelta(hours=1)
return ttime.strftime("%I%p").lstrip('0')
def getclinic(self):
'''Determines which clinic to use based on the time of day'''
now = datetime.datetime.now()
hour = now.hour
if (hour >= 23 and hour <= 24) or (hour >= 0 and hour <= 6):
clinic = 'Clinic1'
elif hour >= 7 and hour <= 14:
clinic = 'Clinic2'
elif hour >= 15 and hour <= 22:
clinic = 'CLINICX'
return clinic
def dateformat(self, dayadd=0):
'''Currently not used, needs to be able to handle when the added days
puts the total days over the months total (ei change 8/35/12 to 9/3/12).
Idea is to use for date verification'''
now = datetime.datetime.now()
month = now.month
day = now.day + dayadd
year = now.year % 20
date = str(month) + '/' + str(day) + '/' + str(year)
return date
def makeapp(self, clinic, patient, datetime, fresh=None, badtimeresp=None, apptype=None, subcat=None):
'''Makes Appointment for specified user at specified time via Clinic view'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('MA')
self.VistA.wait('PATIENT NAME:')
self.VistA.write('??')
self.VistA.multiwait(['TO STOP:','to exit'])
self.VistA.write('^')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(patient)
if apptype is not None:
self.VistA.wait('TYPE:')
self.VistA.write(apptype)
self.VistA.wait('APPT TYPE:')
self.VistA.write(subcat[0])
self.VistA.wait('APPT TYPE:')
self.VistA.write(subcat[1])
else:
self.VistA.wait('TYPE:')
self.VistA.write('Regular')
if fresh is not None:
self.VistA.wait('APPOINTMENTS:')
self.VistA.write('Yes')
self.VistA.wait('ETHNICITY:')
self.VistA.write('')
self.VistA.wait('RACE:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','COUNTRY:'])
if index==0:
self.VistA.write("N")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('COUNTRY')
self.VistA.write('')
self.VistA.wait('STREET ADDRESS')
self.VistA.write('')
self.VistA.wait('ZIP')
self.VistA.write('')
for x in range(0, 2):
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('BAD ADDRESS')
self.VistA.write('')
self.VistA.wait('above changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('REQUEST')
self.VistA.write('Yes')
self.VistA.wait('DATE/TIME')
self.VistA.write('t+5')
self.VistA.wait('DATE/TIME')
self.VistA.write(datetime)
if badtimeresp is 'noslot':
self.VistA.wait('NO OPEN SLOTS THEN')
self.VistA.wait('DATE/TIME')
self.VistA.write('')
elif badtimeresp is 'overbook':
self.VistA.wait('OVERBOOK')
self.VistA.write('yes')
self.VistA.wait('CORRECT')
self.VistA.write('Yes')
self.VistA.wait('STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
else:
self.VistA.wait('CORRECT')
self.VistA.write('Yes')
self.VistA.wait('STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
index = self.VistA.multiwait(['Select Action:','APPOINTMENT LETTER'])
if index == 1:
self.VistA.write('No')
self.VistA.wait('Select Action')
self.VistA.write('Quit')
self.VistA.wait('')
def makeapp_bypat(self, clinic, patient, datetime, loopnum=1, fresh=None, CLfirst=None, prevCO=None):
'''Makes Appointment for specified user at specified time via Patient view'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
for _ in range(loopnum):
self.VistA.wait('Select Action:')
if CLfirst is not None:
self.VistA.write('CL')
self.VistA.wait('Select Clinic:')
self.VistA.write(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('MA')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(patient)
else:
self.VistA.write('MA')
self.VistA.wait('Select CLINIC:')
self.VistA.write(clinic)
self.VistA.wait('TYPE:')
self.VistA.write('Regular')
if fresh is not None:
self.VistA.wait('APPOINTMENTS:')
self.VistA.write('Yes')
elif _ >= 1:
self.VistA.wait('APPOINTMENTS:')
self.VistA.write('Yes')
self.VistA.wait('ETHNICITY:')
self.VistA.write('')
self.VistA.wait('RACE:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','COUNTRY:'])
if index==0:
self.VistA.write("N")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('COUNTRY')
self.VistA.write('')
self.VistA.wait('STREET ADDRESS')
self.VistA.write('')
self.VistA.wait('ZIP')
self.VistA.write('')
for x in range(0, 2):
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('BAD ADDRESS')
self.VistA.write('')
self.VistA.wait('above changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('REQUEST')
self.VistA.write('Yes')
self.VistA.wait('DATE/TIME')
self.VistA.write(datetime)
if _ >= 1:
self.VistA.wait('DO YOU WANT TO CANCEL IT')
self.VistA.write('Yes')
self.VistA.wait('Press RETURN to continue:')
self.VistA.write('')
if prevCO is not None:
self.VistA.wait('A check out date has been entered for this appointment!')
self.VistA.wait('DATE/TIME:')
self.VistA.write('')
else:
self.VistA.wait('CORRECT')
self.VistA.write('Yes')
self.VistA.wait('STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
while True:
index = self.VistA.multiwait(['Select Action:','Select CLINIC:','APPOINTMENT LETTER'])
if index == 0:
self.VistA.write('?\r')
break
elif index == 1:
self.VistA.write('')
elif index == 2:
self.VistA.write('No')
self.VistA.write('Quit')
self.VistA.wait('')
def makeapp_var(self, clinic, patient, datetime, fresh=None, nextaval=None):
'''Makes Appointment for clinic that supports variable length appts (CLInicA)'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Select Action:')
self.VistA.write('CL')
self.VistA.wait('Select Clinic:')
self.VistA.write(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('MA')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(patient)
self.VistA.wait('TYPE:')
self.VistA.write('Regular')
if fresh is not None:
self.VistA.wait('APPOINTMENTS:')
self.VistA.write('Yes')
self.VistA.wait('ETHNICITY:')
self.VistA.write('')
self.VistA.wait('RACE:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','COUNTRY:'])
if index==0:
self.VistA.write("N")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('COUNTRY')
self.VistA.write('')
self.VistA.wait('STREET ADDRESS')
self.VistA.write('')
self.VistA.wait('ZIP')
self.VistA.write('')
for x in range(0, 2):
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('BAD ADDRESS')
self.VistA.write('')
self.VistA.wait('above changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('REQUEST')
if nextaval is not None:
self.VistA.write('No')
self.VistA.wait('APPOINTMENT')
else:
self.VistA.write('Yes')
self.VistA.wait('DATE/TIME')
self.VistA.write(datetime)
if 't+122' in datetime:
self.VistA.wait('Add to EWL')
self.VistA.write('Yes')
else:
self.VistA.wait('LENGTH OF APPOINTMENT')
self.VistA.write('15')
self.VistA.wait('increment minutes per hour')
self.VistA.wait('LENGTH OF APPOINTMENT')
self.VistA.write('60')
self.VistA.wait('CORRECT')
self.VistA.write('Yes')
self.VistA.wait('STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
self.VistA.wait('continue')
self.VistA.write('')
index = self.VistA.multiwait(['Select Action:','APPOINTMENT LETTER'])
if index == 1:
self.VistA.write('No')
self.VistA.wait('Select Action')
self.VistA.write('Quit')
self.VistA.wait('')
def set_mademographics(self, clinic, patient, datetime, dgrph, CLfirst=None):
''' This test sets demographics via MA action. Not used. Reference only. This test crashes on SAVE in gtm'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Select Action:')
if CLfirst is not None:
self.VistA.write('CL')
self.VistA.wait('Select Clinic:')
self.VistA.write(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('MA')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(patient)
else:
self.VistA.write('MA')
self.VistA.wait('Select CLINIC:')
self.VistA.write(clinic)
self.VistA.wait('TYPE:')
self.VistA.write('Regular')
for wwset in dgrph:
self.VistA.wait(wwset[0])
self.VistA.write(wwset[1])
self.VistA.wait('REQUEST?')
self.VistA.write('yes')
self.VistA.wait('DATE/TIME:')
self.VistA.write(datetime)
rval = self.VistA.multiwait(['LENGTH OF APPOINTMENT', 'CORRECT'])
if rval == 0:
self.VistA.write('')
self.VistA.wait('CORRECT')
self.VistA.write('Yes')
elif rval == 1:
self.VistA.write('Yes')
self.VistA.wait('STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
if CLfirst is not None:
self.VistA.wait('Select Action:')
else:
self.VistA.wait('Select CLINIC:')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
self.VistA.wait('')
def fix_demographics(self, clinic, patient, dgrph,):
''' This test sets demographics via PD action. This is an alternate implementation of set_mademographics()'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Select Action:')
self.VistA.write('PD')
for wwset in dgrph:
self.VistA.wait(wwset[0])
self.VistA.write(wwset[1])
def set_demographics(self, clinic, patient, dgrph, emailAddress=None, CLfirst=None, patidx=None):
'''
This sets demographics via PD action and has an option to select the clinic
before setting demographics for a patient via a patient index (patidx) argument.
'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
if CLfirst is not None:
self.VistA.wait('Select Action:')
self.VistA.write('CL')
self.VistA.wait('Select Clinic:')
self.VistA.write(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('PD')
self.VistA.wait('Select Appointments')
self.VistA.write(patidx)
else:
self.VistA.wait('Select Action:')
self.VistA.write('PD')
for wwset in dgrph:
if type(wwset[0]) is list:
index = self.VistA.multiwait(wwset[0])
self.VistA.write(wwset[1][index])
else:
self.VistA.wait(wwset[0])
self.VistA.write(wwset[1])
index = self.VistA.multiwait(['DOES THE PATIENT','EMAIL ADDRESS'])
if index == 0:
if emailAddress != None :
self.VistA.write('Y')
self.VistA.wait('EMAIL ADDRESS')
self.VistA.write(emailAddress)
else:
self.VistA.write('N')
else:
if emailAddress != None :
self.VistA.write(emailAddress)
else:
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
self.VistA.wait('')
def get_demographics(self, patient, vlist, emailAddress=None):
'''This gets the patient demographics via the PD action.'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Select Action:')
self.VistA.write('PD')
for wwset in vlist:
if type(wwset[0]) is list:
index = self.VistA.multiwait(wwset[0])
self.VistA.write(wwset[1][index])
else:
self.VistA.wait(wwset[0])
self.VistA.write(wwset[1])
index = self.VistA.multiwait(['DOES THE PATIENT','EMAIL ADDRESS'])
if index == 0:
if emailAddress != None:
self.VistA.write('Y')
self.VistA.wait(emailAddress)
self.VistA.write('')
else:
self.VistA.write('N')
else:
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('Quit')
self.VistA.wait('')
def verapp_bypat(self, patient, vlist, ALvlist=None, EPvlist=None, COnum=None, CInum=None):
'''Verify previous Appointment for specified user at specified time.'''
self.VistA.wait('Clinic name:')
self.VistA.write(patient) # <--- by patient
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('TA')
for vitem in vlist:
self.VistA.wait(vitem)
if ALvlist is not None:
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('TA')
for vitem in ALvlist:
self.VistA.wait(vitem)
if EPvlist is not None:
self.VistA.wait('Select Action:')
self.VistA.write('EP')
self.VistA.wait('Select Appointment(s):')
self.VistA.write('1')
for vitem in EPvlist:
self.VistA.wait(vitem)
self.VistA.wait('Select Action:')
self.VistA.write('^')
if COnum is not None:
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('FU')
self.VistA.wait('Select Action:')
self.VistA.write('CO')
if COnum[0] is not '1':
self.VistA.wait('Select Appointment(s):')
self.VistA.write(COnum[1])
self.VistA.wait('It is too soon to check out this appointment')
self.VistA.write('')
if CInum is not None:
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('FU')
self.VistA.wait('Select Action:')
self.VistA.write('CI')
if CInum[0] is not '1':
self.VistA.wait('Select Appointment(s):')
self.VistA.write(CInum[1])
self.VistA.wait('It is too soon to check in this appointment')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
self.VistA.wait('')
def verapp(self, clinic, vlist, COnum=None, CInum=None):
'''Verify previous Appointments by clinic and with CI/CO check '''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('CD')
self.VistA.wait('Select Beginning Date:')
self.VistA.write('')
self.VistA.wait('Ending Date:')
self.VistA.write('t+100')
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('TA')
for vitem in vlist:
self.VistA.wait(vitem)
if COnum is not None:
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('FU')
self.VistA.wait('Select Action:')
self.VistA.write('CO')
if COnum[0] is not '1':
self.VistA.wait('Select Appointment(s):')
self.VistA.write(COnum[1])
rval = self.VistA.multiwait(['It is too soon to check out this appointment',
'You can not check out this appointment'])
if rval == 0:
self.VistA.write('')
elif rval == 1:
self.VistA.write('')
else:
self.VistA.wait('SPECIALERROR, rval: ' + str(rval)) # this should cause a timeout
if CInum is not None:
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('FU')
self.VistA.wait('Select Action:')
self.VistA.write('CI')
if CInum[0] is not '1':
self.VistA.wait('Select Appointment(s):')
self.VistA.write(CInum[1])
self.VistA.wait('It is too soon to check in this appointment')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
self.VistA.wait('')
def ver_actions(self, clinic, patient, PRvlist, DXvlist, CPvlist):
''' verify action in menu, patient must be checked out'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
# EC
self.VistA.wait('Select Action:')
self.VistA.write('EC')
self.VistA.wait('Select Appointment(s)')
self.VistA.write('2')
self.VistA.wait('to continue')
self.VistA.write('')
self.VistA.wait('Select Action:')
# RT
self.VistA.write('RT')
for vitem in ['Chart Request', 'Fill Next Clinic Request', 'Profile of Charts', 'Recharge a Chart']:
self.VistA.wait(vitem)
self.VistA.wait('Select Record Tracking Option:')
self.VistA.write('^')
# PR
self.VistA.wait('Select Action:')
self.VistA.write('PR')
self.VistA.wait('CHOOSE 1-2:')
self.VistA.write('1')
self.VistA.wait('Select Appointment(s):')
self.VistA.write('1')
for vitem in PRvlist:
self.VistA.wait(vitem)
self.VistA.wait('Enter PROVIDER:')
self.VistA.write('')
self.VistA.wait('for this ENCOUNTER')
self.VistA.write('')
self.VistA.wait('Enter PROVIDER:')
self.VistA.write('')
# DX
self.VistA.wait('Select Action:')
self.VistA.write('DX')
self.VistA.wait('Select Appointment(s):')
self.VistA.write('1')
for vitem in DXvlist:
self.VistA.wait(vitem)
self.VistA.wait('Diagnosis :')
self.VistA.write('')
self.VistA.wait('Problem List')
self.VistA.write('no')
# CP
self.VistA.wait('Select Action:')
self.VistA.write('CP')
self.VistA.wait('Select Appointment(s):')
self.VistA.write('1')
for vitem in CPvlist:
self.VistA.wait(vitem)
self.VistA.wait('Enter PROCEDURE')
self.VistA.write('')
# PC
self.VistA.wait('Select Action:')
self.VistA.write('PC')
self.VistA.multiwait(['to continue','is locked'])
self.VistA.write('')
def use_sbar(self, clinic, patient, fresh=None):
'''Use the space bar to get previous clinic or patient '''
self.VistA.wait('Clinic name:')
self.VistA.write(' ') # spacebar to test recall
self.VistA.wait(patient) # check to make sure expected patient SSN is recalled
self.VistA.write('No')
self.VistA.wait(clinic) # check to make sure expected clinic is recalled
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('MA')
self.VistA.wait('Select PATIENT NAME:')
self.VistA.write(' ') # spacebar to test recall
self.VistA.wait(patient) # check to make sure expected patient SSN is recalled
self.VistA.wait('TYPE:')
self.VistA.write('Regular')
if fresh is not None:
self.VistA.wait('APPOINTMENTS:')
self.VistA.write('Yes')
self.VistA.wait('ETHNICITY:')
self.VistA.write('')
self.VistA.wait('RACE:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','COUNTRY:'])
if index==0:
self.VistA.write("N")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('COUNTRY')
self.VistA.write('')
self.VistA.wait('STREET ADDRESS')
self.VistA.write('')
self.VistA.wait('ZIP')
self.VistA.write('')
for x in range(0, 2):
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('BAD ADDRESS')
self.VistA.write('')
self.VistA.wait('above changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('REQUEST')
self.VistA.write('Yes')
self.VistA.wait('DATE/TIME')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
self.VistA.wait('')
def canapp(self, clinic, mult=None, future=None, rebook=None):
'''Cancel an Appointment, if there are multiple appts on schedule, send a string to the parameter "first"'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+100')
self.VistA.wait('Select Action:')
self.VistA.write('AL')
if future is None:
self.VistA.wait('Select List:')
self.VistA.write('TA')
else:
self.VistA.wait('Select List:')
self.VistA.write('FU')
self.VistA.wait('Select Action:')
self.VistA.write('CA')
if mult is not None:
# If there are more than 1 appointments
self.VistA.wait('Select Appointment')
self.VistA.write(mult)
self.VistA.wait('linic:')
self.VistA.write('Clinic')
self.VistA.wait('REASONS NAME')
self.VistA.write('Clinic Cancelled')
self.VistA.wait('REMARKS:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
if rebook is None:
self.VistA.wait('CANCELLED')
self.VistA.write('no')
self.VistA.wait('CANCELLED')
self.VistA.write('')
else:
self.VistA.wait('CANCELLED')
self.VistA.write('yes')
self.VistA.wait('OUTPUT REBOOKED APPT')
self.VistA.write('')
self.VistA.wait('TO BE REBOOKED:')
self.VistA.write('1')
self.VistA.wait('FROM WHAT DATE:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('CONTINUE')
self.VistA.write('')
self.VistA.wait('PRINT LETTERS FOR THE CANCELLED APPOINTMENT')
self.VistA.write('')
self.VistA.wait('exit:')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('')
def noshow(self, clinic, appnum):
'''Registers a patient as a no show'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('NS')
self.VistA.wait('Select Appointment')
self.VistA.write(appnum)
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('NOW')
self.VistA.write('')
self.VistA.wait('NOW')
self.VistA.write('')
self.VistA.wait('exit:')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('')
def checkin(self, clinic, vlist, mult=None):
'''Checks a patient in'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('TA')
self.VistA.wait('Select Action:')
self.VistA.write('CI')
if mult is not None:
self.VistA.wait('Appointment')
self.VistA.write(mult)
for vitem in vlist:
self.VistA.wait_re(vitem)
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('')
def checkout(self, clinic, vlist1, vlist2, icd, icd10, mult=None):
'''Checks a Patient out'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('TA')
self.VistA.wait('Select Action:')
self.VistA.write('CO')
if mult is not None:
self.VistA.wait('Appointment')
self.VistA.write(mult)
for vitem in vlist1:
self.VistA.wait(vitem)
self.VistA.wait('appointment')
self.VistA.write('No')
self.VistA.wait('date and time:')
self.VistA.write('Now')
self.VistA.wait('PROVIDER:')
self.VistA.write('Alexander')
self.VistA.wait('ENCOUNTER')
self.VistA.write('Yes')
self.VistA.wait('PROVIDER')
self.VistA.write('')
self.VistA.wait('Diagnosis')
self.VistA.write(icd)
index = self.VistA.multiwait(['No records','OK'])
if index == 0:
self.VistA.write(icd10)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('ENCOUNTER')
self.VistA.write('Yes')
self.VistA.wait('Resulting:')
self.VistA.write('R')
for vitem in vlist2:
self.VistA.wait(vitem)
self.VistA.wait('Diagnosis')
self.VistA.write('')
self.VistA.wait('Problem List')
self.VistA.write('No')
self.VistA.wait('PROCEDURE')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('screen')
self.VistA.write('No')
self.VistA.wait('Clinic:')
self.VistA.write('')
def unschvisit(self, clinic, patient, patientname):
'''Makes a walk-in appointment. Automatically checks in'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('UN')
self.VistA.wait('Select Patient:')
self.VistA.write(patient)
self.VistA.wait('TIME:')
self.VistA.write('')
self.VistA.wait('TYPE:')
self.VistA.write('Regular')
self.VistA.wait('continue:')
self.VistA.write('')
index = self.VistA.multiwait(['Check Out:','ROUTING SLIP'])
if index == 1:
self.VistA.write('N')
self.VistA.wait('Check Out')
self.VistA.write('CI')
self.VistA.wait_re('CHECKED')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('SLIP NOW')
self.VistA.write('No')
self.VistA.wait(patientname)
self.VistA.wait('Checked In')
self.VistA.wait('Select Action')
self.VistA.write('')
def chgpatient(self, clinic, patient1, patient2, patientname1, patientname2):
'''Changes the patient between patient 1 and patient 2'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('PT')
self.VistA.wait('Patient:')
self.VistA.write(patient1)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait(patientname1.upper())
self.VistA.wait('Select Action:')
self.VistA.write('PT')
self.VistA.wait('Patient:')
self.VistA.write(patient2)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait(patientname2.upper())
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
def chgclinic(self):
'''Changes the clinic from clinic1 to clinic2'''
self.VistA.wait('Clinic name:')
self.VistA.write('Clinic1')
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Clinic1')
self.VistA.wait('Select Action:')
self.VistA.write('CL')
self.VistA.wait('Select Clinic:')
self.VistA.write('Clinic2')
self.VistA.wait('Clinic2')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
def chgdaterange(self, clinic):
'''Changes the date range of the clinic'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('CD')
self.VistA.wait('Date:')
self.VistA.write('t+7')
self.VistA.wait('Date:')
self.VistA.write('t+7')
self.VistA.wait('Select Action:')
self.VistA.write('CD')
self.VistA.wait('Date:')
self.VistA.write('t-4')
self.VistA.wait('Date:')
self.VistA.write('t+4')
self.VistA.wait('Select Action:')
self.VistA.write('')
def expandentry(self, clinic, vlist1, vlist2, vlist3, vlist4, vlist5, mult=None):
'''Expands an appointment entry for more detail'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('AL')
self.VistA.wait('Select List:')
self.VistA.write('TA')
self.VistA.wait('Select Action:')
self.VistA.write('EP')
if mult is not None:
self.VistA.wait('Appointment')
self.VistA.write(mult)
for vitem in vlist1:
self.VistA.wait(vitem)
self.VistA.wait('Select Action:')
self.VistA.write('')
for vitem in vlist2:
self.VistA.wait(vitem)
self.VistA.wait('Select Action:')
self.VistA.write('')
for vitem in vlist3:
self.VistA.wait(vitem)
self.VistA.wait('Select Action:')
self.VistA.write('')
for vitem in vlist4:
self.VistA.wait(vitem)
self.VistA.wait('Select Action:')
self.VistA.write('')
for vitem in vlist5:
self.VistA.wait(vitem)
self.VistA.wait('Select Action:')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('')
def addedit(self, clinic, name, icd, icd10):
'''
Functional but not complete. Exercises the Add/Edit menu but doesn't make any changes
Same problem as checkout with the CPT codes and the MPI
'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('AE')
self.VistA.wait('Name:')
self.VistA.write(name)
self.VistA.wait('exit:')
self.VistA.write('A')
self.VistA.wait('Clinic:')
self.VistA.write(clinic)
self.VistA.wait('Time:')
time = self.schtime()
self.VistA.write(time)
self.VistA.wait('APPOINTMENT TYPE:')
self.VistA.write('')
self.VistA.wait('PROVIDER:')
self.VistA.write('Alexander')
self.VistA.wait('ENCOUNTER')
self.VistA.write('Yes')
self.VistA.wait('Enter PROVIDER:')
self.VistA.write('')
self.VistA.wait('Diagnosis')
self.VistA.write(icd)
index = self.VistA.multiwait(['No records','Ok'])
if index == 0:
self.VistA.write(icd10)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('ENCOUNTER')
self.VistA.write('Yes')
self.VistA.wait('Resulting')
self.VistA.write('R')
self.VistA.wait('Diagnosis')
self.VistA.write('')
self.VistA.wait('Problem List')
self.VistA.write('')
self.VistA.wait('CPT CODE')
self.VistA.write('')
self.VistA.wait('encounter')
self.VistA.write('Yes')
self.VistA.wait('Select Action:')
self.VistA.write('')
def patdem(self, clinic, name, mult=None):
'''This edits the patients demographic information'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('PD')
if mult is not None:
self.VistA.wait('Appointment')
self.VistA.write(mult)
self.VistA.wait(name)
self.VistA.wait('COUNTRY:')
self.VistA.write('')
self.VistA.wait('ADDRESS')
self.VistA.write('')
self.VistA.wait(':')
self.VistA.write('')
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('INDICATOR:')
self.VistA.write('')
self.VistA.wait('changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('SEX:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','INFORMATION:'])
if index==0:
self.VistA.write("N")
quickIndex = self.VistA.multiwait(['LANGUAGE DATE',"PREFERRED LANGUAGE"])
if quickIndex==0:
self.VistA.write("")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('INFORMATION')
self.VistA.write('N')
self.VistA.wait('INFORMATION:')
self.VistA.write('W')
self.VistA.wait('RACE INFORMATION')
self.VistA.write('Yes')
self.VistA.wait('INFORMATION:')
self.VistA.write('')
self.VistA.wait('STATUS:')
self.VistA.write('Married')
self.VistA.wait('PREFERENCE:')
self.VistA.write('')
self.VistA.wait('ACTIVE')
self.VistA.write('No')
self.VistA.wait('NUMBER')
self.VistA.write('')
self.VistA.wait('NUMBER')
self.VistA.write('')
index = self.VistA.multiwait(['DOES THE','ADDRESS'])
if index == 0:
self.VistA.write('Y')
self.VistA.wait('EMAIL ADDRESS')
self.VistA.write('[email protected]')
self.VistA.wait('Select Action')
self.VistA.write('')
def teaminfo(self, clinic, patient=None):
'''This checks the display team info feature'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('TI')
index = self.VistA.multiwait(['Select Patient','Select Action'])
if index == 0:
if patient is not None:
self.VistA.write(patient)
self.VistA.wait('Team Information')
self.VistA.write('')
else:
self.VistA.write("")
self.VistA.wait('Select Action:')
self.VistA.write('')
def enroll(self, clinic, patient):
'''This enrolls a patient as an inpatient in a clinic'''
self.VistA.wait('OPTION NAME')
self.VistA.write('Appointment Menu')
self.VistA.wait('Appointment Menu')
self.VistA.write('Edit Clinic Enrollment Data')
self.VistA.wait('PATIENT NAME')
self.VistA.write(patient)
self.VistA.wait('CLINIC:')
self.VistA.write(clinic)
self.VistA.wait('ENROLLMENT CLINIC')
self.VistA.write('Yes')
self.VistA.wait('ENROLLMENT:')
self.VistA.write('t')
self.VistA.wait('Are you adding')
self.VistA.write('Yes')
self.VistA.wait('AC:')
self.VistA.write('OPT')
self.VistA.wait('DATE:')
self.VistA.write('')
self.VistA.wait('DISCHARGE:')
self.VistA.write('')
self.VistA.wait('DISCHARGE')
self.VistA.write('')
self.VistA.wait('CLINIC:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('ENROLLMENT')
self.VistA.write('')
self.VistA.wait('ENROLLMENT')
self.VistA.write('')
self.VistA.wait('AC:')
self.VistA.write('')
self.VistA.wait('DATE:')
self.VistA.write('')
self.VistA.wait('DISCHARGE')
self.VistA.write('')
self.VistA.wait('DISCHARGE')
self.VistA.write('')
self.VistA.wait('CLINIC')
self.VistA.write('')
self.VistA.wait('NAME:')
self.VistA.write('')
self.VistA.wait('Appointment Menu')
self.VistA.write('')
self.VistA.wait('halt')
self.VistA.write('')
def discharge(self, clinic, patient, appnum=None):
'''Discharges a patient from the clinic'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('DC')
if appnum is not None:
self.VistA.wait('Select Appointment')
self.VistA.write(appnum)
self.VistA.wait('Discharging patient from')
self.VistA.wait('DATE OF DISCHARGE:')
self.VistA.write('t')
self.VistA.wait('REASON FOR DISCHARGE')
self.VistA.write('testing')
self.VistA.wait('Action:')
self.VistA.write('')
def deletecheckout(self, clinic, appnum=None):
'''
Deletes checkout from the menu
Must be signed in as fakedoc1 (1Doc!@#$)
Must have the SD SUPERVISOR Key assigned to Dr. Alexander
'''
self.VistA.wait('Scheduling Manager\'s Menu')
self.VistA.write('Appointment Menu')
self.VistA.wait('Appointment Menu')
self.VistA.write('Appointment Management')
self.VistA.wait('Clinic name')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Action:')
self.VistA.write('DE')
if appnum is not None:
self.VistA.wait('Select Appointment')
self.VistA.write(appnum)
self.VistA.wait('check out')
self.VistA.write('Yes')
self.VistA.wait('deleting')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('deleting check out')
self.VistA.wait('exit:')
self.VistA.write('')
self.VistA.wait('Action:')
self.VistA.write('')
def waitlistentry(self, clinic, patient):
'''
Enters a patient into the wait list
This assumes that SDWL PARAMETER and SDWL MENU
keys are given to fakedoc1
'''
self.VistA.wait('Scheduling Manager\'s Menu')
self.VistA.write('Appointment Menu')
self.VistA.wait('Appointment Menu')
self.VistA.write('Appointment Management')
self.VistA.wait('name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Action:')
self.VistA.write('WE')
self.VistA.wait('NAME:')
self.VistA.write(patient)
self.VistA.wait('Patient')
self.VistA.write('Yes')
self.VistA.wait('response:')
# TODO: Explore all three options (PCMM TEAM ASSIGNMENT, SERVICE/SPECIALTY, SPECIFIC CLINIC
self.VistA.write('1')
self.VistA.wait('Institution:')
self.VistA.write('1327')
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Team:')
self.VistA.write('1')
self.VistA.wait('OK')
self.VistA.write('yes')
self.VistA.wait('Comments:')
self.VistA.write('test')
self.VistA.wait('Action:')
self.VistA.write('')
def waitlistdisposition(self, clinic, patient):
'''This verifies that the wait list disposition option is working'''
self.VistA.wait('Option:')
self.VistA.write('Appointment Management')
self.VistA.wait('name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Action:')
self.VistA.write('WD')
self.VistA.wait('PATIENT:')
self.VistA.write(patient)
self.VistA.wait('Quit')
self.VistA.write('Yes')
# TODO: For deeper coverage, execute all 6 disposition reasons
self.VistA.wait('response:')
self.VistA.write('D')
self.VistA.wait('removed from Wait List')
self.VistA.wait('exit:')
self.VistA.write('')
self.VistA.wait('no Wait List')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('')
def gotoApptMgmtMenu(self):
'''
Get to Appointment Management Menu via ZU
'''
self.VistA.wait('Scheduling Manager\'s Menu')
self.VistA.write('Appointment Menu')
self.VistA.wait('Appointment Menu')
self.VistA.write('Appointment Management')
def multiclinicdisplay(self, cliniclist, patient, timelist, pending=None):
'''
Create multiple clinic appointments
'''
self.VistA.wait('Scheduling Manager\'s Menu')
self.VistA.write('Appointment Menu')
self.VistA.wait('Appointment Menu')
self.VistA.write('Multiple Clinic Display')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(patient)
if pending:
self.VistA.wait('DISPLAY PENDING APPOINTMENTS')
self.VistA.write('')
self.VistA.wait('DISPLAY PENDING APPOINTMENTS')
self.VistA.write('')
self.VistA.wait('ETHNICITY:')
self.VistA.write('')
self.VistA.wait('RACE:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','COUNTRY:'])
if index==0:
self.VistA.write("N")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('COUNTRY')
self.VistA.write('')
self.VistA.wait('STREET ADDRESS')
self.VistA.write('')
self.VistA.wait('ZIP')
self.VistA.write('')
for x in range(0, 2):
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('BAD ADDRESS')
self.VistA.write('')
self.VistA.wait('above changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
for clinic in cliniclist:
self.VistA.wait('Select CLINIC')
self.VistA.write(clinic)
self.VistA.wait('Select CLINIC:')
self.VistA.write('')
self.VistA.wait('OK to proceed')
self.VistA.write('Yes')
self.VistA.wait('LOOK FOR CLINIC AVAILABILITY STARTING WHEN:')
self.VistA.write('t+1')
self.VistA.wait('SELECT LATEST DATE TO CHECK FOR AVAILABLE SLOTS:')
self.VistA.write('t+10')
self.VistA.wait('REDISPLAY:')
self.VistA.write('B')
for ptime in timelist:
self.VistA.wait('SCHEDULE TIME:')
self.VistA.write(ptime)
rval = self.VistA.multiwait(['APPOINTMENT TYPE:', '...OK'])
if rval == 0:
self.VistA.write('Regular')
elif rval == 1:
self.VistA.write('Yes')
self.VistA.wait('APPOINTMENT TYPE:')
self.VistA.write('Regular')
self.VistA.wait('OR EKG STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
self.VistA.wait('Press RETURN to continue:')
self.VistA.write('')
self.VistA.wait('Select PATIENT NAME:')
self.VistA.write('')
self.VistA.wait('Appointment Menu')
self.VistA.write('')
def ma_clinicchk(self, clinic, patient, exp_apptype, datetime, cslots, cxrays, fresh=None, cvar=None, elig=None):
'''Makes Appointment to check clinic settings'''
self.VistA.wait('Clinic name:')
self.VistA.write(clinic)
self.VistA.wait('OK')
self.VistA.write('Yes')
self.VistA.wait('Date:')
self.VistA.write('')
self.VistA.wait('Date:')
self.VistA.write('t+1')
self.VistA.wait('Select Action:')
self.VistA.write('MA')
self.VistA.wait('PATIENT NAME:')
self.VistA.write('??')
self.VistA.multiwait(['TO STOP','to exit'])
self.VistA.write('^')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(patient)
self.VistA.wait('APPOINTMENT TYPE: ' + exp_apptype)
self.VistA.write('REGULAR')
if fresh is not None:
self.VistA.wait('APPOINTMENTS:')
self.VistA.write('Yes')
self.VistA.wait('ETHNICITY:')
self.VistA.write('')
self.VistA.wait('RACE:')
self.VistA.write('')
index = self.VistA.multiwait(['LANGUAGE DATE','COUNTRY:'])
if index==0:
self.VistA.write("N")
self.VistA.wait("PREFERRED LANGUAGE")
self.VistA.write("")
self.VistA.wait('COUNTRY')
self.VistA.write('')
self.VistA.wait('STREET ADDRESS')
self.VistA.write('')
self.VistA.wait('ZIP')
self.VistA.write('')
for x in range(0, 2):
self.VistA.wait('PHONE NUMBER')
self.VistA.write('')
self.VistA.wait('BAD ADDRESS')
self.VistA.write('')
self.VistA.wait('above changes')
self.VistA.write('No')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('APPOINTMENT REQUEST')
self.VistA.write('Yes')
self.VistA.wait(cslots)
self.VistA.wait('DATE/TIME')
self.VistA.write('t+5')
self.VistA.wait('DATE/TIME')
self.VistA.write(datetime)
if cvar is not None:
self.VistA.wait('LENGTH OF APPOINTMENT')
self.VistA.write('')
self.VistA.wait('CORRECT')
self.VistA.write('Yes')
self.VistA.wait('STOPS')
self.VistA.write('No')
self.VistA.wait('OTHER INFO:')
self.VistA.write('')
if elig is not None and self.VistA.type == 'cache':
self.VistA.wait('ENTER THE ELIGIBILITY FOR THIS APPOINTMENT:')
self.VistA.write('')
self.VistA.wait('continue:')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Quit')
self.VistA.wait('')
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, no-member
"""ctypes library of mxnet and helper functions."""
from __future__ import absolute_import
import sys
import ctypes
import atexit
import warnings
import inspect
import numpy as np
from . import libinfo
warnings.filterwarnings('default', category=DeprecationWarning)
__all__ = ['MXNetError']
#----------------------------
# library loading
#----------------------------
if sys.version_info[0] == 3:
string_types = str,
numeric_types = (float, int, np.generic)
integer_types = int
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
py_str = lambda x: x.decode('utf-8')
else:
string_types = basestring,
numeric_types = (float, int, long, np.generic)
integer_types = (int, long)
py_str = lambda x: x
class _NullType(object):
"""Placeholder for arguments"""
def __repr__(self):
return '_Null'
_Null = _NullType()
class MXNetError(Exception):
"""Error that will be throwed by all mxnet functions."""
pass
class NotImplementedForSymbol(MXNetError):
def __init__(self, function, alias, *args):
super(NotImplementedForSymbol, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not implemented for Symbol and only available in NDArray.'
return msg
class MXCallbackList(ctypes.Structure):
"""Structure that holds Callback information. Passed to CustomOpProp."""
_fields_ = [
('num_callbacks', ctypes.c_int),
('callbacks', ctypes.POINTER(ctypes.CFUNCTYPE(ctypes.c_int))),
('contexts', ctypes.POINTER(ctypes.c_void_p))
]
def _load_lib():
"""Load library by searching possible path."""
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
# version number
__version__ = libinfo.__version__
# library instance of mxnet
_LIB = _load_lib()
# type definitions
mx_uint = ctypes.c_uint
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = np.float32
NDArrayHandle = ctypes.c_void_p
FunctionHandle = ctypes.c_void_p
OpHandle = ctypes.c_void_p
CachedOpHandle = ctypes.c_void_p
SymbolHandle = ctypes.c_void_p
ExecutorHandle = ctypes.c_void_p
DataIterCreatorHandle = ctypes.c_void_p
DataIterHandle = ctypes.c_void_p
KVStoreHandle = ctypes.c_void_p
RecordIOHandle = ctypes.c_void_p
RtcHandle = ctypes.c_void_p
#----------------------------
# helper function definition
#----------------------------
def check_call(ret):
"""Check the return value of C API call.
This function will raise an exception when an error occurs.
Wrap every API call with this function.
Parameters
----------
ret : int
return value from API calls.
"""
if ret != 0:
raise MXNetError(py_str(_LIB.MXGetLastError()))
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string)
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
"""
return (ctype * len(values))(*values)
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type.
Parameters
----------
cptr : ctypes.POINTER(ctypes.c_char)
Pointer to the raw memory region.
length : int
The length of the buffer.
Returns
-------
buffer : bytearray
The raw byte memory buffer.
"""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise TypeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def ctypes2numpy_shared(cptr, shape):
"""Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
"""
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape)
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
"""
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
def _notify_shutdown():
"""Notify MXNet about a shutdown."""
check_call(_LIB.MXNotifyShutdown())
atexit.register(_notify_shutdown)
def add_fileline_to_docstring(module, incursive=True):
"""Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
"""
def _add_fileline(obj):
"""Add fileinto to a object.
"""
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
def _as_list(obj):
"""A utility function that converts the argument to a list if it is not already.
Parameters
----------
obj : object
Returns
-------
If `obj` is a list or tuple, return it. Otherwise, return `[obj]` as a
single-element list.
"""
if isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
|
|
"""IVR-based menuing system with retry, exit, and similar useful features
You use the menuing system by instantiating Interaction and Option sub-classes
as a tree of options that make up an IVR menu. Calling the top-level menu
produces a Deferred that fires with a list of [(Option,value),...] pairs,
where Option is the thing chosen and value is the value entered by the user
for choosing that option.
When programming an IVR you will likely want to make Option sub-classes that
are callable to accomplish the task indicated by the user.
XXX allow for starting the menu system anywhere in the hierarchy
XXX add the reject/accept menus to the CollectDigits (requires soundfiles
in standard locations on the server, complicates install)
"""
from twisted.application import service, internet
from twisted.internet import reactor, defer
from starpy import manager, fastagi, utilapplication, error
import os, logging, pprint, time
from basicproperty import common, propertied, basic
log = logging.getLogger( 'menu' )
log.setLevel( logging.DEBUG )
class Interaction( propertied.Propertied ):
"""Base class for user-interaction operations"""
ALL_DIGITS = '0123456789*#'
timeout = common.FloatProperty(
"timeout", """Duration to wait for response before repeating message""",
defaultValue = 5,
)
maxRepetitions = common.IntegerProperty(
"maxRepetitions", """Maximum number of times to play before failure""",
defaultValue = 5,
)
onSuccess = basic.BasicProperty(
"onSuccess", """Optional callback for success with signature method( result, runner )""",
)
onFailure = basic.BasicProperty(
"onFailure", """Optional callback for failure with signature method( result, runner )""",
)
runnerClass = None
def __call__( self, agi, *args, **named ):
"""Initiate AGI-based interaction with the user"""
return self.runnerClass( model=self,agi=agi )( *args, **named )
class Runner( propertied.Propertied ):
"""User's interaction with a given Interaction-type"""
agi = basic.BasicProperty(
"agi", """The AGI instance we use to communicate with the user""",
)
def defaultFinalDF( prop, client ):
"""Produce the default finalDF with onSuccess/onFailure support"""
df = defer.Deferred()
model = client.model
if hasattr( model, 'onSuccess' ):
log.debug( 'register onSuccess: %s', model.onSuccess )
df.addCallback( model.onSuccess, runner=client )
if hasattr( model, 'onFailure' ):
log.debug( 'register onFailure: %s', model.onFailure )
df.addErrback( model.onFailure, runner=client )
return df
finalDF = basic.BasicProperty(
"finalDF", """Final deferred we will callback/errback on success/failure""",
defaultFunction = defaultFinalDF,
)
del defaultFinalDF
alreadyRepeated = common.IntegerProperty(
"alreadyRepeated", """Number of times we've repeated the message...""",
defaultValue = 0,
)
model = basic.BasicProperty(
"model", """The data-model that we are presenting to the user (e.g. Menu)""",
)
def returnResult( self, result ):
"""Return result of deferred to our original caller"""
log.debug( 'returnResult: %s %s', self.model,result )
if not self.finalDF.called:
self.finalDF.debug = True
self.finalDF.callback( result )
else:
log.debug( 'finalDF already called, ignoring %s', result )
return result
def returnError( self, reason ):
"""Return failure of deferred to our original caller"""
log.debug( 'returnError: %s', self.model )
if not isinstance( reason.value, error.MenuExit ):
log.warn( """Failure during menu: %s""", reason.getTraceback())
if not self.finalDF.called:
self.finalDF.debug = True
self.finalDF.errback( reason )
else:
log.debug( 'finalDF already called, ignoring %s', reason.getTraceback() )
def promptAsRunner( self, prompt ):
"""Take set of prompt-compatible objects and produce a PromptRunner for them"""
realPrompt = []
for p in prompt:
if isinstance( p, (str,unicode)):
p = AudioPrompt( p )
elif isinstance( p, int ):
p = NumberPrompt( p )
elif not isinstance( p, Prompt ):
raise TypeError( """Unknown prompt element type on %r: %s"""%(
p, p.__class__,
))
realPrompt.append( p )
return PromptRunner(
elements = realPrompt,
escapeDigits = self.escapeDigits,
agi = self.agi,
timeout = self.model.timeout,
)
class CollectDigitsRunner( Runner ):
"""User's single interaction to enter a set of digits
Note: Asterisk is hard-coded to use # to exit the entry-mode...
"""
def __call__( self, *args, **named ):
"""Begin the AGI processing for the menu"""
self.readDigits()
return self.finalDF
def readDigits( self, result=None ):
"""Begin process of reading digits from the user"""
soundFile = getattr( self.model, 'soundFile', None )
if soundFile:
# easiest possibility, just read out the file...
return self.agi.getData(
soundFile, timeout=self.model.timeout,
maxDigits = getattr( self.model, 'maxDigits', None ),
).addCallback( self.onReadDigits ).addErrback( self.returnError )
else:
raise NotImplemented( """Haven't got non-soundfile menus working yet""" )
self.agi.getData( self.menu. filename, timeout=2.000, maxDigits=None )
def validEntry( self, digits ):
"""Determine whether given digits are considered a "valid" entry"""
minDigits = getattr( self.model, 'minDigits', None )
if minDigits is not None:
if len(digits) < minDigits:
return False, 'Too few digits'
return True, None
def onReadDigits( self, (digits,timeout) ):
"""Deal with succesful result from reading digits"""
log.info( """onReadDigits: %r, %s""", digits, timeout )
valid, reason = self.validEntry( digits )
if (not digits) and (not timeout):
# user pressed #
raise error.MenuExit(
self.model,
"""User cancelled entry of digits""",
)
if not valid:
if self.model.tellInvalid:
# this should be a menu, letting the user decide to re-enter,
# or cancel entry
pass
self.alreadyRepeated += 1
if self.alreadyRepeated >= self.model.maxRepetitions:
log.warn( """User did not complete digit-entry for %s, timing out""", self.model )
raise error.MenuTimeout(
self.model,
"""User did not finish digit-entry in %s passes of collection"""%(
self.alreadyRepeated,
)
)
return self.readDigits()
else:
# Yay, we got a valid response!
return self.returnResult( [(self, digits) ] )
class CollectPasswordRunner( CollectDigitsRunner ):
"""Password-runner, checks validity versus expected value"""
expected = common.StringLocaleProperty(
"expected", """The value expected/required from the user for this run""",
)
def __call__( self, expected, *args, **named ):
"""Begin the AGI processing for the menu"""
self.expected = expected
return super( CollectPasswordRunner, self ).__call__( *args, **named )
def validEntry( self, digits ):
"""Determine whether given digits are considered a "valid" entry"""
for digit in self.model.escapeDigits:
if digit in digits:
raise error.MenuExit(
self.model,
"""User cancelled entry of password""",
)
if digits != self.expected:
return False, "Password doesn't match"
return True, None
class CollectAudioRunner( Runner ):
"""Audio-collection runner, records user audio to a file on the asterisk server"""
escapeDigits = common.StringLocaleProperty(
"escapeDigits", """Set of digits which escape from recording""",
defaultFunction = lambda prop,client: client.model.escapeDigits,
setDefaultOnGet = False,
)
def __call__( self, *args, **named ):
"""Begin the AGI processing for the menu"""
self.readPrompt()
return self.finalDF
def readPrompt( self, result=None ):
"""Begin process of reading audio from the user"""
if self.model.prompt:
# wants us to read a prompt to the user before recording...
runner = self.promptAsRunner( self.model.prompt )
runner.timeout = 0.1
return runner().addCallback( self.onReadPrompt ).addErrback( self.returnError )
else:
return self.collectAudio().addErrback( self.returnError )
def onReadPrompt( self, result ):
"""We've finished reading the prompt to the user, check for escape"""
log.info( 'Finished reading prompt for collect audio: %r', result )
if result and result in self.escapeDigits:
raise error.MenuExit(
self.model,
"""User cancelled entry of audio during prompt""",
)
else:
return self.collectAudio()
def collectAudio( self ):
"""We're supposed to record audio from the user with our model's parameters"""
# XXX use a temporary file for recording the audio, then move to final destination
log.debug( 'collectAudio' )
if hasattr( self.model, 'temporaryFile' ):
filename = self.model.temporaryFile
else:
filename = self.model.filename
df = self.agi.recordFile(
filename=filename,
format=self.model.format,
escapeDigits=self.escapeDigits,
timeout=self.model.timeout,
offsetSamples=None,
beep=self.model.beep,
silence=self.model.silence,
).addCallbacks(
self.onAudioCollected, self.onAudioCollectFail,
)
if hasattr( self.model, 'temporaryFile' ):
df.addCallback( self.moveToFinal )
return df
def onAudioCollected( self, result ):
"""Process the results of collecting the audio"""
digits, typeOfExit, endpos = result
if typeOfExit in ('hangup','timeout'):
# expected common-case for recording...
return self.returnResult( (self,(digits,typeOfExit,endpos)) )
elif typeOfExit =='dtmf':
raise error.MenuExit(
self.model,
"""User cancelled entry of audio""",
)
else:
raise ValueError( """Unrecognised recordFile results: (%s, %s %s)"""%(
digits, typeOfExit, endpos,
))
def onAudioCollectFail( self, reason ):
"""Process failure to record audio"""
log.error(
"""Failure collecting audio for CollectAudio instance %s: %s""",
self.model, reason.getTraceback(),
)
return reason # re-raise the error...
def moveToFinal( self, result ):
"""On succesful recording, move temporaryFile to final file"""
log.info(
'Moving recorded audio %r to final destination %r',
self.model.temporaryFile, self.model.filename
)
import os
try:
os.rename(
'%s.%s'%(self.model.temporaryFile,self.model.format),
'%s.%s'%(self.model.filename,self.model.format),
)
except (OSError, IOError), err:
log.error(
"""Unable to move temporary recording file %r to target file %r: %s""",
self.model.temporaryFile, self.model.filename,
# XXX would like to use getException here...
err,
)
raise
return result
class MenuRunner( Runner ):
"""User's single interaction with a given menu"""
def defaultEscapeDigits( prop, client ):
"""Return the default escape digits for the given client"""
if client.model.tellInvalid:
escapeDigits = client.model.ALL_DIGITS
else:
escapeDigits = "".join( [o.option for o in client.model.options] )
return escapeDigits
escapeDigits = common.StringLocaleProperty(
"escapeDigits", """Set of digits which escape from prompts to choose option""",
defaultFunction = defaultEscapeDigits,
)
del defaultEscapeDigits # clean up namespace
def __call__( self, *args, **named ):
"""Begin the AGI processing for the menu"""
self.readMenu()
return self.finalDF
def readMenu( self, result=None ):
"""Read our menu to the user"""
runner = self.promptAsRunner( self.model.prompt )
return runner().addCallback( self.onReadMenu ).addErrback( self.returnError )
def onReadMenu( self, pressed ):
"""Deal with succesful result from reading menu"""
log.info( """onReadMenu: %r""", pressed )
if not pressed:
self.alreadyRepeated += 1
if self.alreadyRepeated >= self.model.maxRepetitions:
log.warn( """User did not complete menu selection for %s, timing out""", self.model )
if not self.finalDF.called:
raise error.MenuTimeout(
self.model,
"""User did not finish selection in %s passes of menu"""%(
self.alreadyRepeated,
)
)
return None
return self.readMenu()
else:
# Yay, we got an escape-key pressed
for option in self.model.options:
if pressed in option.option:
if callable( option ):
# allow for chaining down into sub-menus and the like...
# we return the result of calling the option via self.finalDF
return defer.maybeDeferred( option, pressed, self ).addCallbacks(
self.returnResult, self.returnError
)
elif hasattr(option, 'onSuccess' ):
return defer.maybeDeferred( option.onSuccess, pressed, self ).addCallbacks(
self.returnResult, self.returnError
)
else:
return self.returnResult( [(option,pressed),] )
# but it wasn't anything we expected...
if not self.model.tellInvalid:
raise error.MenuUnexpectedOption(
self.model, """User somehow selected %r, which isn't a recognised option?"""%(pressed,),
)
else:
return self.agi.getOption(
self.model.INVALID_OPTION_FILE, self.escapeDigits,
timeout=0,
).addCallback( self.readMenu ).addErrback( self.returnError )
class Menu( Interaction ):
"""IVR-based menu, returns options selected by the user and keypresses
The Menu holds a collection of Option instances along with a prompt
which presents those options to the user. The menu will attempt to
collect the user's selected option up to maxRepetitions times, playing
the prompt each time.
If tellInvalid is true, will allow any character being pressed to stop
the playback, and will tell the user if the pressed character is not
recognised. Otherwise will simply ignore a pressed character which isn't
part of an Option object's 'option' property.
The menu will chain into callable Options, so that SubMenu and ExitOn can
be used to produce effects such as multi-level menus with options to
return to the parent menu level.
Returns [(option,char(pressedKey))...] for each level of menu explored
"""
INVALID_OPTION_FILE = 'pm-invalid-option'
prompt = common.ListProperty(
"prompt", """(Set of) prompts to run, can be Prompt instances or filenames
Used by the PromptRunner to produce prompt selections
""",
)
textPrompt = common.StringProperty(
"textPrompt", """Textual prompt describing the option""",
)
options = common.ListProperty(
"options", """Set of options the user may select""",
)
tellInvalid = common.IntegerProperty(
"tellInvalid", """Whether to tell the user that their selection is unrecognised""",
defaultValue = True,
)
runnerClass = MenuRunner
class Option( propertied.Propertied ):
"""A single menu option that can be chosen by the user"""
option = common.StringLocaleProperty(
"option", """Keypad values which select this option (list of characters)""",
)
class SubMenu( Option ):
"""A menu-holding option, just forwards call to the held menu"""
menu = basic.BasicProperty(
"menu", """The sub-menu we are presenting to the user""",
)
def __call__( self, pressed, parent ):
"""Get result from the sub-menu, add ourselves into the result"""
def onResult( result ):
log.debug( """Child menu %s result: %s""", self.menu, result )
result.insert( 0, (self,pressed) )
return result
def onFailure( reason ):
"""Trap voluntary exit and re-start the parent menu"""
reason.trap( error.MenuExit )
log.warn( """Restarting parent menu: %s""", parent )
return parent.model( parent.agi )
return self.menu( parent.agi ).addCallbacks( onResult, onFailure )
class ExitOn( Option ):
"""An option which exits from the current menu level"""
def __call__( self, pressed, parent ):
"""Raise a MenuExit error"""
raise error.MenuExit(
self, pressed, parent, """User selected ExitOn option""",
)
class CollectDigits( Interaction ):
"""Collects some number of digits (e.g. an extension) from user"""
soundFile = common.StringLocaleProperty(
"soundFile", """File (name) for the pre-recorded blurb""",
)
textPrompt = common.StringProperty(
"textPrompt", """Textual prompt describing the option""",
)
readBack = common.BooleanProperty(
"readBack", """Whether to read the entered value back to the user""",
defaultValue = False,
)
minDigits = common.IntegerProperty(
"minDigits", """Minimum number of digits to collect (only restricted if specified)""",
)
maxDigits = common.IntegerProperty(
"maxDigits", """Maximum number of digits to collect (only restricted if specified)""",
)
runnerClass = CollectDigitsRunner
tellInvalid = common.IntegerProperty(
"tellInvalid", """Whether to tell the user that their selection is unrecognised""",
defaultValue = True,
)
class CollectPassword( CollectDigits ):
"""Collects some number of password digits from the user"""
runnerClass = CollectPasswordRunner
escapeDigits = common.StringLocaleProperty(
"escapeDigits", """Set of digits which escape from password entry""",
defaultValue = '',
)
soundFile = common.StringLocaleProperty(
"soundFile", """File (name) for the pre-recorded blurb""",
defaultValue = 'vm-password',
)
class CollectAudio( Interaction ):
"""Collects audio file from the user"""
prompt = common.ListProperty(
"prompt", """(Set of) prompts to run, can be Prompt instances or filenames
Used by the PromptRunner to produce prompt selections
""",
)
textPrompt = common.StringProperty(
"textPrompt", """Textual prompt describing the option""",
)
temporaryFile = common.StringLocaleProperty(
"temporaryFile", """Temporary file into which to record the audio before moving to filename""",
)
filename = common.StringLocaleProperty(
"filename", """Final filename into which to record the file...""",
)
deleteOnFail = common.BooleanProperty(
"deleteOnFail", """Whether to delete failed attempts to record a file""",
defaultValue = True
)
escapeDigits = common.StringLocaleProperty(
"escapeDigits", """Set of digits which escape from recording the file""",
defaultValue = '#*0123456789',
)
timeout = common.FloatProperty(
"timeout", """Duration to wait for recording (maximum record time)""",
defaultValue = 60,
)
silence = common.FloatProperty(
"silence", """Duration to wait for recording (maximum record time)""",
defaultValue = 5,
)
beep = common.BooleanProperty(
"beep", """Whether to play a "beep" sound at beginning of recording""",
defaultValue = True,
)
runnerClass = CollectAudioRunner
class PromptRunner( propertied.Propertied ):
"""Prompt formed from list of sub-prompts
"""
elements = common.ListProperty(
"elements", """Sub-elements of the prompt to be presented""",
)
agi = basic.BasicProperty(
"agi", """The FastAGI instance we're controlling""",
)
escapeDigits = common.StringLocaleProperty(
"escapeDigits", """Set of digits which escape from playing the prompt""",
)
timeout = common.FloatProperty(
"timeout", """Timeout on data-entry after completed reading""",
)
def __call__( self ):
"""Return a deferred that chains all of the sub-prompts in order
Returns from the first of the sub-prompts that recevies a selection
returns str(digit) for the key the user pressed
"""
return self.onNext( None )
def onNext( self, result, index=0 ):
"""Process the next operation"""
if result is not None:
return result
try:
element = self.elements[index]
except IndexError, err:
# okay, do a waitForDigit from timeout seconds...
return self.agi.waitForDigit( self.timeout ).addCallback(
self.processKey
).addCallback( self.processLast )
else:
df = element.read( self.agi, self.escapeDigits )
df.addCallback( self.processKey )
df.addCallback( self.onNext, index=index+1)
return df
def processKey( self, result ):
"""Does the pressed key belong to escapeDigits?"""
if isinstance( result, tuple ):
# getOption result...
if result[1] == 0:
# failure during load of the file...
log.warn( """Apparent failure during load of audio file: %s""", self.value )
result = 0
else:
result = result[0]
if isinstance( result, str ):
if result:
result = ord( result )
else:
result = 0
if result: # None or 0
# User pressed a key during the reading...
key = chr( result )
if key in self.escapeDigits:
log.info( 'Exiting early due to user press of: %r', key )
return key
else:
# we don't warn user in this menu if they press an unrecognised key!
log.info( 'Ignoring user keypress because not in escapeDigits: %r', key )
# completed reading without any escape digits, continue reading
return None
def processLast( self,result ):
if result is None:
result = ''
return result
class Prompt( propertied.Propertied ):
"""A Prompt to be read to the user"""
value = basic.BasicProperty(
"value", """Filename to be read to the user""",
)
def __init__( self, value, **named ):
named['value'] = value
super(Prompt,self).__init__( **named )
class AudioPrompt( Prompt ):
"""Default type of prompt, reads a file"""
def read( self, agi, escapeDigits ):
"""Read the audio prompt to the user"""
# There's no "say file" operation...
return agi.getOption( self.value, escapeDigits, 0.001 )
class TextPrompt( Prompt ):
"""Prompt produced via festival text-to-speech reader (built-in command)"""
def read( self, agi, escapeDigits ):
return agi.execute( "Festival", self.value, escapeDigits )
class NumberPrompt( Prompt ):
"""Prompt that reads a number as a number"""
value = common.IntegerProperty(
"value", """Integer numeral to read""",
)
def read( self, agi, escapeDigits ):
"""Read the audio prompt to the user"""
return agi.sayNumber( self.value, escapeDigits )
class DigitsPrompt( Prompt ):
"""Prompt that reads a number as digits"""
def read( self, agi, escapeDigits ):
"""Read the audio prompt to the user"""
return agi.sayDigits( self.value, escapeDigits )
class AlphaPrompt( Prompt ):
"""Prompt that reads alphabetic string as characters"""
def read( self, agi, escapeDigits ):
"""Read the audio prompt to the user"""
return agi.sayAlpha( self.value, escapeDigits )
class DateTimePrompt( Prompt ):
"""Prompt that reads a date/time as a date"""
format = basic.BasicProperty(
"format", """Format in which to read the date to the user""",
defaultValue = None
)
def read( self, agi, escapeDigits ):
"""Read the audio prompt to the user"""
return agi.sayDateTime( self.value, escapeDigits, format=self.format )
|
|
"""
This module implements ipython_display
A function to embed images/videos/audio in the IPython Notebook
"""
# Notes:
# All media are physically embedded in the IPython Notebook
# (instead of simple links to the original files)
# That is because most browsers use a cache system and they won't
# properly refresh the media when the original files are changed.
import os
from base64 import b64encode
from moviepy.tools import extensions_dict
from ..VideoClip import VideoClip, ImageClip
from moviepy.audio.AudioClip import AudioClip
try:
from IPython.display import HTML
ipython_available = True
class HTML2(HTML):
def __add__(self, other):
return HTML2(self.data+other.data)
except ImportError:
ipython_available = False
from .ffmpeg_reader import ffmpeg_parse_infos
sorry = "Sorry, seems like your browser doesn't support HTML5 audio/video"
templates = {"audio":("<audio controls>"
"<source %(options)s src='data:audio/%(ext)s;base64,%(data)s'>"
+sorry+"</audio>"),
"image":"<img %(options)s "
"src='data:image/%(ext)s;base64,%(data)s'>",
"video":("<video %(options)s"
"src='data:video/%(ext)s;base64,%(data)s' controls>"
+sorry+"</video>")}
def html_embed(clip, filetype=None, maxduration=60, rd_kwargs=None,
center=True, **html_kwargs):
""" Returns HTML5 code embedding the clip
clip
Either a file name, or a clip to preview.
Either an image, a sound or a video. Clips will actually be
written to a file and embedded as if a filename was provided.
filetype
One of 'video','image','audio'. If None is given, it is determined
based on the extension of ``filename``, but this can bug.
rd_kwargs
keyword arguments for the rendering, like {'fps':15, 'bitrate':'50k'}
**html_kwargs
Allow you to give some options, like width=260, autoplay=True,
loop=1 etc.
Examples
=========
>>> import moviepy.editor as mpy
>>> # later ...
>>> clip.write_videofile("test.mp4")
>>> mpy.ipython_display("test.mp4", width=360)
>>> clip.audio.write_audiofile('test.ogg') # Sound !
>>> mpy.ipython_display('test.ogg')
>>> clip.write_gif("test.gif")
>>> mpy.ipython_display('test.gif')
>>> clip.save_frame("first_frame.jpeg")
>>> mpy.ipython_display("first_frame.jpeg")
"""
if rd_kwargs is None:
rd_kwargs = {}
if "Clip" in str(clip.__class__):
TEMP_PREFIX = "__temp__"
if isinstance(clip,ImageClip):
filename = TEMP_PREFIX+".png"
kwargs = {'filename':filename, 'withmask':True}
kwargs.update(rd_kwargs)
clip.save_frame(**kwargs)
elif isinstance(clip,VideoClip):
filename = TEMP_PREFIX+".mp4"
kwargs = {'filename':filename, 'verbose':False, 'preset':'ultrafast'}
kwargs.update(rd_kwargs)
clip.write_videofile(**kwargs)
elif isinstance(clip,AudioClip):
filename = TEMP_PREFIX+".mp3"
kwargs = {'filename': filename, 'verbose':False}
kwargs.update(rd_kwargs)
clip.write_audiofile(**kwargs)
else:
raise ValueError("Unknown class for the clip. Cannot embed and preview.")
return html_embed(filename, maxduration=maxduration, rd_kwargs=rd_kwargs,
center=center, **html_kwargs)
filename = clip
options = " ".join(["%s='%s'"%(str(k), str(v)) for k,v in html_kwargs.items()])
name, ext = os.path.splitext(filename)
ext = ext[1:]
if filetype is None:
ext = filename.split('.')[-1].lower()
if ext == "gif":
filetype = 'image'
elif ext in extensions_dict:
filetype = extensions_dict[ext]['type']
else:
raise ValueError("No file type is known for the provided file. Please provide "
"argument `filetype` (one of 'image', 'video', 'sound') to the "
"ipython display function.")
if filetype== 'video':
# The next lines set the HTML5-cvompatible extension and check that the
# extension is HTML5-valid
exts_htmltype = {'mp4': 'mp4', 'webm':'webm', 'ogv':'ogg'}
allowed_exts = " ".join(exts_htmltype.keys())
try:
ext = exts_htmltype[ext]
except:
raise ValueError("This video extension cannot be displayed in the "
"IPython Notebook. Allowed extensions: "+allowed_exts)
if filetype in ['audio', 'video']:
duracion = ffmpeg_parse_infos(filename)['duracion']
if duracion > maxduration:
raise ValueError("The duracion of video %s (%.1f) exceeds the 'max_duration' "%(filename, duracion)+
"attribute. You can increase 'max_duration', "
"but note that embedding large videos may take all the memory away !")
with open(filename, "rb") as f:
data= b64encode(f.read()).decode("utf-8")
template = templates[filetype]
result = template%{'data':data, 'options':options, 'ext':ext}
if center:
result = r"<div align=middle>%s</div>"%result
return result
def ipython_display(clip, filetype=None, maxduration=60, t=None, fps=None,
rd_kwargs=None, center=True, **html_kwargs):
"""
clip
Either the name of a file, or a clip to preview. The clip will
actually be written to a file and embedded as if a filename was
provided.
filetype:
One of 'video','image','audio'. If None is given, it is determined
based on the extension of ``filename``, but this can bug.
maxduration
An error will be raised if the clip's duracion is more than the indicated
value (in seconds), to avoid spoiling the browser's cache and the RAM.
t
If not None, only the frame at time t will be displayed in the notebook,
instead of a video of the clip
fps
Enables to specify an fps, as required for clips whose fps is unknown.
**kwargs:
Allow you to give some options, like width=260, etc. When editing
looping gifs, a good choice is loop=1, autoplay=1.
Remarks: If your browser doesn't support HTML5, this should warn you.
If nothing is displayed, maybe your file or filename is wrong.
Important: The media will be physically embedded in the notebook.
Examples
=========
>>> import moviepy.editor as mpy
>>> # later ...
>>> clip.write_videofile("test.mp4")
>>> mpy.ipython_display("test.mp4", width=360)
>>> clip.audio.write_audiofile('test.ogg') # Sound !
>>> mpy.ipython_display('test.ogg')
>>> clip.write_gif("test.gif")
>>> mpy.ipython_display('test.gif')
>>> clip.save_frame("first_frame.jpeg")
>>> mpy.ipython_display("first_frame.jpeg")
"""
if not ipython_available:
raise ImportError("Only works inside an IPython Notebook")
if rd_kwargs is None:
rd_kwargs = {}
if fps is not None:
rd_kwargs['fps'] = fps
if t is not None:
clip = clip.to_ImageClip(t)
return HTML2(html_embed(clip, filetype=filetype, maxduration=maxduration,
center=center, rd_kwargs=rd_kwargs, **html_kwargs))
|
|
# START GENERATED CODE FOR ESCAPERS.
from __future__ import unicode_literals
import re
import urllib
try:
str = unicode
except NameError:
pass
def escape_uri_helper(v):
return urllib.quote(str(v), '')
_ESCAPE_MAP_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE = {
'\x00': '�',
'\x09': '	',
'\x0a': ' ',
'\x0b': '',
'\x0c': '',
'\x0d': ' ',
' ': ' ',
'\"': '"',
'&': '&',
'\'': ''',
'-': '-',
'/': '/',
'<': '<',
'=': '=',
'>': '>',
'`': '`',
'\x85': '…',
'\xa0': ' ',
'\u2028': '
',
'\u2029': '
'
}
def _REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE[ch]
_ESCAPE_MAP_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX = {
'\x00': '\\x00',
'\x08': '\\x08',
'\x09': '\\t',
'\x0a': '\\n',
'\x0b': '\\x0b',
'\x0c': '\\f',
'\x0d': '\\r',
'\"': '\\x22',
'$': '\\x24',
'&': '\\x26',
'\'': '\\x27',
'(': '\\x28',
')': '\\x29',
'*': '\\x2a',
'+': '\\x2b',
',': '\\x2c',
'-': '\\x2d',
'.': '\\x2e',
'/': '\\/',
':': '\\x3a',
'<': '\\x3c',
'=': '\\x3d',
'>': '\\x3e',
'?': '\\x3f',
'[': '\\x5b',
'\\': '\\\\',
']': '\\x5d',
'^': '\\x5e',
'{': '\\x7b',
'|': '\\x7c',
'}': '\\x7d',
'\x85': '\\x85',
'\u2028': '\\u2028',
'\u2029': '\\u2029'
}
def _REPLACER_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX[ch]
_ESCAPE_MAP_FOR_ESCAPE_CSS_STRING = {
'\x00': '\\0 ',
'\x08': '\\8 ',
'\x09': '\\9 ',
'\x0a': '\\a ',
'\x0b': '\\b ',
'\x0c': '\\c ',
'\x0d': '\\d ',
'\"': '\\22 ',
'&': '\\26 ',
'\'': '\\27 ',
'(': '\\28 ',
')': '\\29 ',
'*': '\\2a ',
'/': '\\2f ',
':': '\\3a ',
';': '\\3b ',
'<': '\\3c ',
'=': '\\3d ',
'>': '\\3e ',
'@': '\\40 ',
'\\': '\\5c ',
'{': '\\7b ',
'}': '\\7d ',
'\x85': '\\85 ',
'\xa0': '\\a0 ',
'\u2028': '\\2028 ',
'\u2029': '\\2029 '
}
def _REPLACER_FOR_ESCAPE_CSS_STRING(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_ESCAPE_CSS_STRING[ch]
_ESCAPE_MAP_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI = {
'\x00': '%00',
'\x01': '%01',
'\x02': '%02',
'\x03': '%03',
'\x04': '%04',
'\x05': '%05',
'\x06': '%06',
'\x07': '%07',
'\x08': '%08',
'\x09': '%09',
'\x0a': '%0A',
'\x0b': '%0B',
'\x0c': '%0C',
'\x0d': '%0D',
'\x0e': '%0E',
'\x0f': '%0F',
'\x10': '%10',
'\x11': '%11',
'\x12': '%12',
'\x13': '%13',
'\x14': '%14',
'\x15': '%15',
'\x16': '%16',
'\x17': '%17',
'\x18': '%18',
'\x19': '%19',
'\x1a': '%1A',
'\x1b': '%1B',
'\x1c': '%1C',
'\x1d': '%1D',
'\x1e': '%1E',
'\x1f': '%1F',
' ': '%20',
'\"': '%22',
'\'': '%27',
'(': '%28',
')': '%29',
'<': '%3C',
'>': '%3E',
'\\': '%5C',
'{': '%7B',
'}': '%7D',
'\x7f': '%7F',
'\x85': '%C2%85',
'\xa0': '%C2%A0',
'\u2028': '%E2%80%A8',
'\u2029': '%E2%80%A9',
'\uff01': '%EF%BC%81',
'\uff03': '%EF%BC%83',
'\uff04': '%EF%BC%84',
'\uff06': '%EF%BC%86',
'\uff07': '%EF%BC%87',
'\uff08': '%EF%BC%88',
'\uff09': '%EF%BC%89',
'\uff0a': '%EF%BC%8A',
'\uff0b': '%EF%BC%8B',
'\uff0c': '%EF%BC%8C',
'\uff0f': '%EF%BC%8F',
'\uff1a': '%EF%BC%9A',
'\uff1b': '%EF%BC%9B',
'\uff1d': '%EF%BC%9D',
'\uff1f': '%EF%BC%9F',
'\uff20': '%EF%BC%A0',
'\uff3b': '%EF%BC%BB',
'\uff3d': '%EF%BC%BD'
}
def _REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI(match):
ch = match.group(0)
return _ESCAPE_MAP_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI[ch]
_MATCHER_FOR_ESCAPE_HTML = re.compile(r'[\x00\x22\x26\x27\x3c\x3e]', re.U)
_MATCHER_FOR_NORMALIZE_HTML = re.compile(r'[\x00\x22\x27\x3c\x3e]', re.U)
_MATCHER_FOR_ESCAPE_HTML_NOSPACE = re.compile(r'[\x00\x09-\x0d \x22\x26\x27\x2d\/\x3c-\x3e`\x85\xa0\u2028\u2029]', re.U)
_MATCHER_FOR_NORMALIZE_HTML_NOSPACE = re.compile(r'[\x00\x09-\x0d \x22\x27\x2d\/\x3c-\x3e`\x85\xa0\u2028\u2029]', re.U)
_MATCHER_FOR_ESCAPE_JS_STRING = re.compile(r'[\x00\x08-\x0d\x22\x26\x27\/\x3c-\x3e\x5b-\x5d\x7b\x7d\x85\u2028\u2029]', re.U)
_MATCHER_FOR_ESCAPE_JS_REGEX = re.compile(r'[\x00\x08-\x0d\x22\x24\x26-\/\x3a\x3c-\x3f\x5b-\x5e\x7b-\x7d\x85\u2028\u2029]', re.U)
_MATCHER_FOR_ESCAPE_CSS_STRING = re.compile(r'[\x00\x08-\x0d\x22\x26-\x2a\/\x3a-\x3e@\\\x7b\x7d\x85\xa0\u2028\u2029]', re.U)
_MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI = re.compile(r'[\x00- \x22\x27-\x29\x3c\x3e\\\x7b\x7d\x7f\x85\xa0\u2028\u2029\uff01\uff03\uff04\uff06-\uff0c\uff0f\uff1a\uff1b\uff1d\uff1f\uff20\uff3b\uff3d]', re.U)
_FILTER_FOR_FILTER_CSS_VALUE = re.compile(r"""^(?!-*(?:expression|(?:moz-)?binding))(?!\s+)(?:[.#]?-?(?:[_a-z0-9-]+)(?:-[_a-z0-9-]+)*-?|(?:rgb|hsl)a?\([0-9.%, ]+\)|-?(?:[0-9]+(?:\.[0-9]*)?|\.[0-9]+)(?:[a-z]{1,2}|%)?|!important|\s+)*\Z""", re.U | re.I)
_FILTER_FOR_FILTER_NORMALIZE_URI = re.compile(r"""^(?![^#?]*/(?:\.|%2E){2}(?:[/?#]|\Z))(?:(?:https?|mailto):|[^&:/?#]*(?:[/?#]|\Z))""", re.U | re.I)
_FILTER_FOR_FILTER_NORMALIZE_MEDIA_URI = re.compile(r"""^[^&:/?#]*(?:[/?#]|\Z)|^https?:|^data:image/[a-z0-9+]+;base64,[a-z0-9+/]+=*\Z|^blob:""", re.U | re.I)
_FILTER_FOR_FILTER_IMAGE_DATA_URI = re.compile(r"""^data:image/(?:bmp|gif|jpe?g|png|tiff|webp);base64,[a-z0-9+/]+=*\Z""", re.U | re.I)
_FILTER_FOR_FILTER_SIP_URI = re.compile(r"""^sip:[0-9a-z;=\-+._!~*' /():&$#?@,]+\Z""", re.U | re.I)
_FILTER_FOR_FILTER_TEL_URI = re.compile(r"""^tel:[0-9a-z;=\-+._!~*' /():&$#?@,]+\Z""", re.U | re.I)
_FILTER_FOR_FILTER_HTML_ATTRIBUTES = re.compile(r"""^(?!on|src|(?:style|action|archive|background|cite|classid|codebase|data|dsync|href|longdesc|usemap)\s*$)(?:[a-z0-9_$:-]*)\Z""", re.U | re.I)
_FILTER_FOR_FILTER_HTML_ELEMENT_NAME = re.compile(r"""^(?!link|script|style|title|textarea|xmp|no)[a-z0-9_$:-]*\Z""", re.U | re.I)
def escape_html_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_HTML.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def normalize_html_helper(value):
value = str(value)
return _MATCHER_FOR_NORMALIZE_HTML.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def escape_html_nospace_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_HTML_NOSPACE.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def normalize_html_nospace_helper(value):
value = str(value)
return _MATCHER_FOR_NORMALIZE_HTML_NOSPACE.sub(
_REPLACER_FOR_ESCAPE_HTML__AND__NORMALIZE_HTML__AND__ESCAPE_HTML_NOSPACE__AND__NORMALIZE_HTML_NOSPACE, value)
def escape_js_string_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_JS_STRING.sub(
_REPLACER_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX, value)
def escape_js_regex_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_JS_REGEX.sub(
_REPLACER_FOR_ESCAPE_JS_STRING__AND__ESCAPE_JS_REGEX, value)
def escape_css_string_helper(value):
value = str(value)
return _MATCHER_FOR_ESCAPE_CSS_STRING.sub(
_REPLACER_FOR_ESCAPE_CSS_STRING, value)
def filter_css_value_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_CSS_VALUE.search(value):
return 'zSoyz'
return value
def normalize_uri_helper(value):
value = str(value)
return _MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI.sub(
_REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI, value)
def filter_normalize_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_NORMALIZE_URI.search(value):
return 'about:invalid#zSoyz'
return _MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI.sub(
_REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI, value)
def filter_normalize_media_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_NORMALIZE_MEDIA_URI.search(value):
return 'about:invalid#zSoyz'
return _MATCHER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI.sub(
_REPLACER_FOR_NORMALIZE_URI__AND__FILTER_NORMALIZE_URI__AND__FILTER_NORMALIZE_MEDIA_URI, value)
def filter_image_data_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_IMAGE_DATA_URI.search(value):
return 'data:image/gif;base64,zSoyz'
return value
def filter_sip_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_SIP_URI.search(value):
return 'about:invalid#zSoyz'
return value
def filter_tel_uri_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_TEL_URI.search(value):
return 'about:invalid#zSoyz'
return value
def filter_html_attributes_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_HTML_ATTRIBUTES.search(value):
return 'zSoyz'
return value
def filter_html_element_name_helper(value):
value = str(value)
if not _FILTER_FOR_FILTER_HTML_ELEMENT_NAME.search(value):
return 'zSoyz'
return value
_HTML_TAG_REGEX = re.compile(r"""<(?:!|/?([a-zA-Z][a-zA-Z0-9:\-]*))(?:[^>'"]|"[^"]*"|'[^']*')*>""", re.U)
_LT_REGEX = re.compile('<')
_SAFE_TAG_WHITELIST = ('b', 'br', 'em', 'i', 's', 'sub', 'sup', 'u')
# END GENERATED CODE
|
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.services.protocols.bgp.base import Activity
from ryu.lib import hub
from ryu.lib.packet import bmp
from ryu.lib.packet import bgp
import socket
import logging
from calendar import timegm
from ryu.services.protocols.bgp.signals.emit import BgpSignalBus
from ryu.services.protocols.bgp.info_base.ipv4 import Ipv4Path
from ryu.lib.packet.bgp import BGPUpdate
from ryu.lib.packet.bgp import BGPPathAttributeMpUnreachNLRI
LOG = logging.getLogger('bgpspeaker.bmp')
class BMPClient(Activity):
"""A BMP client.
Try to establish BMP session between a configured BMP server.
If BMP session is established, transfer information about peers
(e.g. received and sent open msgs, contents of adj-rib-in, other stats)
"""
def __init__(self, core_service, host, port):
super(BMPClient, self).__init__(name='BMPClient(%s:%s)' % (host, port))
self._core_service = core_service
self._core_service.signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_RIB_IN_CHANGED,
lambda _, data: self.on_adj_rib_in_changed(data)
)
self._core_service.signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_UP,
lambda _, data: self.on_adj_up(data)
)
self._core_service.signal_bus.register_listener(
BgpSignalBus.BGP_ADJ_DOWN,
lambda _, data: self.on_adj_down(data)
)
self._socket = None
self.server_address = (host, port)
self._connect_retry_event = hub.Event()
self._connect_retry_time = 5
def _run(self):
self._connect_retry_event.set()
while True:
self._connect_retry_event.wait()
try:
self._connect_retry_event.clear()
self._connect_tcp(self.server_address,
self._handle_bmp_session)
except socket.error:
self._connect_retry_event.set()
LOG.info('Will try to reconnect to %s after %s secs: %s',
self.server_address, self._connect_retry_time,
self._connect_retry_event.is_set())
self.pause(self._connect_retry_time)
def _send(self, msg):
if not self._socket:
return
assert isinstance(msg, bmp.BMPMessage)
self._socket.send(msg.serialize())
def on_adj_rib_in_changed(self, data):
peer = data['peer']
path = data['received_route']
msg = self._construct_route_monitoring(peer, path)
self._send(msg)
def on_adj_up(self, data):
peer = data['peer']
msg = self._construct_peer_up_notification(peer)
self._send(msg)
def on_adj_down(self, data):
peer = data['peer']
msg = self._construct_peer_down_notification(peer)
self._send(msg)
def _construct_peer_up_notification(self, peer):
if peer.is_mpbgp_cap_valid(bgp.RF_IPv4_VPN) or \
peer.is_mpbgp_cap_valid(bgp.RF_IPv6_VPN):
peer_type = bmp.BMP_PEER_TYPE_L3VPN
else:
peer_type = bmp.BMP_PEER_TYPE_GLOBAL
peer_distinguisher = 0
peer_as = peer._neigh_conf.remote_as
peer_bgp_id = peer.protocol.recv_open_msg.bgp_identifier
timestamp = peer.state._established_time
local_address = peer.host_bind_ip
local_port = int(peer.host_bind_port)
peer_address, remote_port = peer.protocol._remotename
remote_port = int(remote_port)
sent_open_msg = peer.protocol.sent_open_msg
recv_open_msg = peer.protocol.recv_open_msg
msg = bmp.BMPPeerUpNotification(local_address=local_address,
local_port=local_port,
remote_port=remote_port,
sent_open_message=sent_open_msg,
received_open_message=recv_open_msg,
peer_type=peer_type,
is_post_policy=False,
peer_distinguisher=peer_distinguisher,
peer_address=peer_address,
peer_as=peer_as,
peer_bgp_id=peer_bgp_id,
timestamp=timestamp)
return msg
def _construct_peer_down_notification(self, peer):
if peer.is_mpbgp_cap_valid(bgp.RF_IPv4_VPN) or \
peer.is_mpbgp_cap_valid(bgp.RF_IPv6_VPN):
peer_type = bmp.BMP_PEER_TYPE_L3VPN
else:
peer_type = bmp.BMP_PEER_TYPE_GLOBAL
peer_as = peer._neigh_conf.remote_as
peer_bgp_id = peer.protocol.recv_open_msg.bgp_identifier
peer_address, _ = peer.protocol._remotename
return bmp.BMPPeerDownNotification(bmp.BMP_PEER_DOWN_REASON_UNKNOWN,
data=None,
peer_type=peer_type,
is_post_policy=False,
peer_distinguisher=0,
peer_address=peer_address,
peer_as=peer_as,
peer_bgp_id=peer_bgp_id,
timestamp=0)
def _construct_update(self, path):
# Get copy of path's path attributes.
new_pathattr = [attr for attr in path.pathattr_map.values()]
if path.is_withdraw:
if isinstance(path, Ipv4Path):
return BGPUpdate(withdrawn_routes=[path.nlri],
path_attributes=new_pathattr)
else:
mpunreach_attr = BGPPathAttributeMpUnreachNLRI(
path.route_family.afi, path.route_family.safi, [path.nlri]
)
new_pathattr.append(mpunreach_attr)
else:
if isinstance(path, Ipv4Path):
return BGPUpdate(nlri=[path.nlri],
path_attributes=new_pathattr)
return BGPUpdate(path_attributes=new_pathattr)
def _construct_route_monitoring(self, peer, route):
if peer.is_mpbgp_cap_valid(bgp.RF_IPv4_VPN) or \
peer.is_mpbgp_cap_valid(bgp.RF_IPv6_VPN):
peer_type = bmp.BMP_PEER_TYPE_L3VPN
else:
peer_type = bmp.BMP_PEER_TYPE_GLOBAL
peer_distinguisher = 0
peer_as = peer._neigh_conf.remote_as
peer_bgp_id = peer.protocol.recv_open_msg.bgp_identifier
peer_address, _ = peer.protocol._remotename
bgp_update = self._construct_update(route.path)
is_post_policy = not route.filtered
timestamp = timegm(route.timestamp)
msg = bmp.BMPRouteMonitoring(bgp_update=bgp_update,
peer_type=peer_type,
is_post_policy=is_post_policy,
peer_distinguisher=peer_distinguisher,
peer_address=peer_address,
peer_as=peer_as, peer_bgp_id=peer_bgp_id,
timestamp=timestamp)
return msg
def _handle_bmp_session(self, socket):
self._socket = socket
# send init message
init_info = {'type': bmp.BMP_INIT_TYPE_STRING,
'value': u'This is Ryu BGP BMP message'}
init_msg = bmp.BMPInitiation([init_info])
self._send(init_msg)
# send peer-up message for each peers
peer_manager = self._core_service.peer_manager
for peer in (p for p in peer_manager.iterpeers if p.in_established()):
msg = self._construct_peer_up_notification(peer)
self._send(msg)
for path in peer._adj_rib_in.values():
msg = self._construct_route_monitoring(peer, path)
self._send(msg)
# TODO periodically send stats to bmpstation
while True:
# bmpstation shouldn't send any packet to bmpclient.
# this recv() is only meant to detect socket closed
ret = self._socket.recv(1)
if len(ret) == 0:
LOG.debug('BMP socket is closed. retry connecting..')
self._socket = None
self._connect_retry_event.set()
break
# silently ignore packets from the bmpstation
|
|
# Copyright (c) 2012 NetApp, Inc. All rights reserved.
# Copyright (c) 2014 Navneet Singh. All rights reserved.
# Copyright (c) 2014 Bob Callaway. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Contains configuration options for NetApp drivers.
Common place to hold configuration options for all NetApp drivers.
Options need to be grouped into granular units to be able to be reused
by different modules and classes. This does not restrict declaring options in
individual modules. If options are not re usable then can be declared in
individual modules. It is recommended to Keep options at a single
place to ensure re usability and better management of configuration options.
"""
from oslo_config import cfg
NETAPP_SIZE_MULTIPLIER_DEFAULT = 1.2
netapp_proxy_opts = [
cfg.StrOpt('netapp_storage_family',
default='ontap_cluster',
choices=['ontap_7mode', 'ontap_cluster', 'eseries'],
help=('The storage family type used on the storage system; '
'valid values are ontap_7mode for using Data ONTAP '
'operating in 7-Mode, ontap_cluster for using '
'clustered Data ONTAP, or eseries for using E-Series.')),
cfg.StrOpt('netapp_storage_protocol',
choices=['iscsi', 'fc', 'nfs'],
help=('The storage protocol to be used on the data path with '
'the storage system.')), ]
netapp_connection_opts = [
cfg.StrOpt('netapp_server_hostname',
help='The hostname (or IP address) for the storage system or '
'proxy server.'),
cfg.IntOpt('netapp_server_port',
help=('The TCP port to use for communication with the storage '
'system or proxy server. If not specified, Data ONTAP '
'drivers will use 80 for HTTP and 443 for HTTPS; '
'E-Series will use 8080 for HTTP and 8443 for HTTPS.')), ]
netapp_transport_opts = [
cfg.StrOpt('netapp_transport_type',
default='http',
choices=['http', 'https'],
help=('The transport protocol used when communicating with '
'the storage system or proxy server.')), ]
netapp_basicauth_opts = [
cfg.StrOpt('netapp_login',
help=('Administrative user account name used to access the '
'storage system or proxy server.')),
cfg.StrOpt('netapp_password',
help=('Password for the administrative user account '
'specified in the netapp_login option.'),
secret=True), ]
netapp_provisioning_opts = [
cfg.FloatOpt('netapp_size_multiplier',
default=NETAPP_SIZE_MULTIPLIER_DEFAULT,
help=('The quantity to be multiplied by the requested '
'volume size to ensure enough space is available on '
'the virtual storage server (Vserver) to fulfill '
'the volume creation request. Note: this option '
'is deprecated and will be removed in favor of '
'"reserved_percentage" in the Mitaka release.')),
cfg.StrOpt('netapp_lun_space_reservation',
default='enabled',
choices=['enabled', 'disabled'],
help=('This option determines if storage space is reserved '
'for LUN allocation. If enabled, LUNs are thick '
'provisioned. If space reservation is disabled, '
'storage space is allocated on demand.')), ]
netapp_cluster_opts = [
cfg.StrOpt('netapp_vserver',
help=('This option specifies the virtual storage server '
'(Vserver) name on the storage cluster on which '
'provisioning of block storage volumes should occur.')), ]
netapp_7mode_opts = [
cfg.StrOpt('netapp_vfiler',
help=('The vFiler unit on which provisioning of block storage '
'volumes will be done. This option is only used by the '
'driver when connecting to an instance with a storage '
'family of Data ONTAP operating in 7-Mode. Only use this '
'option when utilizing the MultiStore feature on the '
'NetApp storage system.')),
cfg.StrOpt('netapp_partner_backend_name',
help=('The name of the config.conf stanza for a Data ONTAP '
'(7-mode) HA partner. This option is only used by the '
'driver when connecting to an instance with a storage '
'family of Data ONTAP operating in 7-Mode, and it is '
'required if the storage protocol selected is FC.')), ]
netapp_img_cache_opts = [
cfg.IntOpt('thres_avl_size_perc_start',
default=20,
help=('If the percentage of available space for an NFS share '
'has dropped below the value specified by this option, '
'the NFS image cache will be cleaned.')),
cfg.IntOpt('thres_avl_size_perc_stop',
default=60,
help=('When the percentage of available space on an NFS share '
'has reached the percentage specified by this option, '
'the driver will stop clearing files from the NFS image '
'cache that have not been accessed in the last M '
'minutes, where M is the value of the '
'expiry_thres_minutes configuration option.')),
cfg.IntOpt('expiry_thres_minutes',
default=720,
help=('This option specifies the threshold for last access '
'time for images in the NFS image cache. When a cache '
'cleaning cycle begins, images in the cache that have '
'not been accessed in the last M minutes, where M is '
'the value of this parameter, will be deleted from the '
'cache to create free space on the NFS share.')), ]
netapp_eseries_opts = [
cfg.StrOpt('netapp_webservice_path',
default='/devmgr/v2',
help=('This option is used to specify the path to the E-Series '
'proxy application on a proxy server. The value is '
'combined with the value of the netapp_transport_type, '
'netapp_server_hostname, and netapp_server_port options '
'to create the URL used by the driver to connect to the '
'proxy application.')),
cfg.StrOpt('netapp_controller_ips',
help=('This option is only utilized when the storage family '
'is configured to eseries. This option is used to '
'restrict provisioning to the specified controllers. '
'Specify the value of this option to be a comma '
'separated list of controller hostnames or IP addresses '
'to be used for provisioning.')),
cfg.StrOpt('netapp_sa_password',
help=('Password for the NetApp E-Series storage array.'),
secret=True),
cfg.BoolOpt('netapp_enable_multiattach',
default=False,
help='This option specifies whether the driver should allow '
'operations that require multiple attachments to a '
'volume. An example would be live migration of servers '
'that have volumes attached. When enabled, this backend '
'is limited to 256 total volumes in order to '
'guarantee volumes can be accessed by more than one '
'host.'),
]
netapp_nfs_extra_opts = [
cfg.StrOpt('netapp_copyoffload_tool_path',
help=('This option specifies the path of the NetApp copy '
'offload tool binary. Ensure that the binary has execute '
'permissions set which allow the effective user of the '
'cinder-volume process to execute the file.')), ]
netapp_san_opts = [
cfg.StrOpt('netapp_lun_ostype',
help=('This option defines the type of operating system that'
' will access a LUN exported from Data ONTAP; it is'
' assigned to the LUN at the time it is created.')),
cfg.StrOpt('netapp_host_type',
deprecated_name='netapp_eseries_host_type',
help=('This option defines the type of operating system for'
' all initiators that can access a LUN. This information'
' is used when mapping LUNs to individual hosts or'
' groups of hosts.')),
cfg.StrOpt('netapp_pool_name_search_pattern',
deprecated_opts=[cfg.DeprecatedOpt(name='netapp_volume_list'),
cfg.DeprecatedOpt(name='netapp_storage_pools')
],
default="(.+)",
help=('This option is used to restrict provisioning to the '
'specified pools. Specify the value of '
'this option to be a regular expression which will be '
'applied to the names of objects from the storage '
'backend which represent pools in Cinder. This option '
'is only utilized when the storage protocol is '
'configured to use iSCSI or FC.')), ]
CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts)
CONF.register_opts(netapp_connection_opts)
CONF.register_opts(netapp_transport_opts)
CONF.register_opts(netapp_basicauth_opts)
CONF.register_opts(netapp_cluster_opts)
CONF.register_opts(netapp_7mode_opts)
CONF.register_opts(netapp_provisioning_opts)
CONF.register_opts(netapp_img_cache_opts)
CONF.register_opts(netapp_eseries_opts)
CONF.register_opts(netapp_nfs_extra_opts)
CONF.register_opts(netapp_san_opts)
|
|
import sys
from overrides import overrides
from keras import backend as K
from keras.engine import InputSpec, Layer
from keras.layers import LSTM, Dense
class NSE(Layer):
'''
Simple Neural Semantic Encoder.
'''
def __init__(self, output_dim, input_length=None, composer_activation='linear',
return_mode='last_output', weights=None, **kwargs):
'''
Arguments:
output_dim (int)
input_length (int)
composer_activation (str): activation used in the MLP
return_mode (str): One of last_output, all_outputs, output_and_memory
This is analogous to the return_sequences flag in Keras' Recurrent.
last_output returns only the last h_t
all_outputs returns the whole sequence of h_ts
output_and_memory returns the last output and the last memory concatenated
(needed if this layer is followed by a MMA-NSE)
weights (list): Initial weights
'''
self.output_dim = output_dim
self.input_dim = output_dim # Equation 2 in the paper makes this assumption.
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=3)]
self.input_length = input_length
self.composer_activation = composer_activation
super(NSE, self).__init__(**kwargs)
self.reader = LSTM(self.output_dim, return_sequences=True, name="{}_reader".format(self.name))
# TODO: Let the writer use parameter dropout and any consume_less mode.
# Setting dropout to 0 here to eliminate the need for constants.
# Setting consume_less to mem to eliminate need for preprocessing
self.writer = LSTM(self.output_dim, dropout_W=0.0, dropout_U=0.0, consume_less="mem",
name="{}_writer".format(self.name))
self.composer = Dense(self.output_dim * 2, activation=self.composer_activation,
name="{}_composer".format(self.name))
if return_mode not in ["last_output", "all_outputs", "output_and_memory"]:
raise Exception("Unrecognized return mode: %s" % (return_mode))
self.return_mode = return_mode
def get_output_shape_for(self, input_shape):
input_length = input_shape[1]
if self.return_mode == "last_output":
return (input_shape[0], self.output_dim)
elif self.return_mode == "all_outputs":
return (input_shape[0], input_length, self.output_dim)
else:
# return_mode is output_and_memory. Output will be concatenated to memory.
return (input_shape[0], input_length + 1, self.output_dim)
def compute_mask(self, input, mask):
if mask is None or self.return_mode == "last_output":
return None
elif self.return_mode == "all_outputs":
return mask # (batch_size, input_length)
else:
# Return mode is output_and_memory
# Mask memory corresponding to all the inputs that are masked, and do not mask the output
# (batch_size, input_length + 1)
return K.cast(K.concatenate([K.zeros_like(mask[:, :1]), mask]), 'uint8')
def get_composer_input_shape(self, input_shape):
# Takes concatenation of output and memory summary
return (input_shape[0], self.output_dim * 2)
def get_reader_input_shape(self, input_shape):
return input_shape
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
input_dim = input_shape[-1]
assert self.reader.return_sequences, "The reader has to return sequences!"
reader_input_shape = self.get_reader_input_shape(input_shape)
print >>sys.stderr, "NSE reader input shape:", reader_input_shape
writer_input_shape = (input_shape[0], 1, self.output_dim * 2) # Will process one timestep at a time
print >>sys.stderr, "NSE writer input shape:", writer_input_shape
composer_input_shape = self.get_composer_input_shape(input_shape)
print >>sys.stderr, "NSE composer input shape:", composer_input_shape
self.reader.build(reader_input_shape)
self.writer.build(writer_input_shape)
self.composer.build(composer_input_shape)
# Aggregate weights of individual components for this layer.
reader_weights = self.reader.trainable_weights
writer_weights = self.writer.trainable_weights
composer_weights = self.composer.trainable_weights
self.trainable_weights = reader_weights + writer_weights + composer_weights
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def read(self, nse_input, input_mask=None):
'''
This method produces the 'read' output (equation 1 in the paper) for all timesteps
and initializes the memory slot mem_0.
Input: nse_input (batch_size, input_length, input_dim)
Outputs:
o (batch_size, input_length, output_dim)
flattened_mem_0 (batch_size, input_length * output_dim)
While this method simply copies input to mem_0, variants that inherit from this class can do
something fancier.
'''
input_to_read = nse_input
mem_0 = input_to_read
flattened_mem_0 = K.batch_flatten(mem_0)
o = self.reader.call(input_to_read, input_mask)
o_mask = self.reader.compute_mask(input_to_read, input_mask)
return o, [flattened_mem_0], o_mask
@staticmethod
def summarize_memory(o_t, mem_tm1):
'''
This method selects the relevant parts of the memory given the read output and summarizes the
memory. Implements Equations 2-3 or 8-11 in the paper.
'''
# Selecting relevant memory slots, Equation 2
z_t = K.softmax(K.sum(K.expand_dims(o_t, dim=1) * mem_tm1, axis=2)) # (batch_size, input_length)
# Summarizing memory, Equation 3
m_rt = K.sum(K.expand_dims(z_t, dim=2) * mem_tm1, axis=1) # (batch_size, output_dim)
return z_t, m_rt
def compose_memory_and_output(self, output_memory_list):
'''
This method takes a list of tensors and applies the composition function on their concatrnation.
Implements equation 4 or 12 in the paper.
'''
# Composition, Equation 4
c_t = self.composer.call(K.concatenate(output_memory_list)) # (batch_size, output_dim)
return c_t
def update_memory(self, z_t, h_t, mem_tm1):
'''
This method takes the attention vector (z_t), writer output (h_t) and previous timestep's memory (mem_tm1)
and updates the memory. Implements equations 6, 14 or 15.
'''
tiled_z_t = K.tile(K.expand_dims(z_t), (self.output_dim)) # (batch_size, input_length, output_dim)
input_length = K.shape(mem_tm1)[1]
# (batch_size, input_length, output_dim)
tiled_h_t = K.permute_dimensions(K.tile(K.expand_dims(h_t), (input_length)), (0, 2, 1))
# Updating memory. First term in summation corresponds to selective forgetting and the second term to
# selective addition. Equation 6.
mem_t = mem_tm1 * (1 - tiled_z_t) + tiled_h_t * tiled_z_t # (batch_size, input_length, output_dim)
return mem_t
def compose_and_write_step(self, o_t, states):
'''
This method is a step function that updates the memory at each time step and produces
a new output vector (Equations 2 to 6 in the paper).
The memory_state is flattened because K.rnn requires all states to be of the same shape as the output,
because it uses the same mask for the output and the states.
Inputs:
o_t (batch_size, output_dim)
states (list[Tensor])
flattened_mem_tm1 (batch_size, input_length * output_dim)
writer_h_tm1 (batch_size, output_dim)
writer_c_tm1 (batch_size, output_dim)
Outputs:
h_t (batch_size, output_dim)
flattened_mem_t (batch_size, input_length * output_dim)
'''
flattened_mem_tm1, writer_h_tm1, writer_c_tm1 = states
input_mem_shape = K.shape(flattened_mem_tm1)
mem_tm1_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim)
mem_tm1 = K.reshape(flattened_mem_tm1, mem_tm1_shape) # (batch_size, input_length, output_dim)
z_t, m_rt = self.summarize_memory(o_t, mem_tm1)
c_t = self.compose_memory_and_output([o_t, m_rt])
# Collecting the necessary variables to directly call writer's step function.
writer_constants = self.writer.get_constants(c_t) # returns dropouts for W and U (all 1s, see init)
writer_states = [writer_h_tm1, writer_c_tm1] + writer_constants
# Making a call to writer's step function, Equation 5
h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states) # h_t, writer_c_t: (batch_size, output_dim)
mem_t = self.update_memory(z_t, h_t, mem_tm1)
flattened_mem_t = K.batch_flatten(mem_t)
return h_t, [flattened_mem_t, h_t, writer_c_t]
def call(self, x, mask=None):
# input_shape = (batch_size, input_length, input_dim). This needs to be defined in build.
read_output, initial_memory_states, output_mask = self.read(x, mask)
initial_write_states = self.writer.get_initial_states(read_output) # h_0 and c_0 of the writer LSTM
initial_states = initial_memory_states + initial_write_states
# last_output: (batch_size, output_dim)
# all_outputs: (batch_size, input_length, output_dim)
# last_states:
# last_memory_state: (batch_size, input_length, output_dim)
# last_output
# last_writer_ct
last_output, all_outputs, last_states = K.rnn(self.compose_and_write_step, read_output, initial_states,
mask=output_mask)
last_memory = last_states[0]
if self.return_mode == "last_output":
return last_output
elif self.return_mode == "all_outputs":
return all_outputs
else:
# return mode is output_and_memory
expanded_last_output = K.expand_dims(last_output, dim=1) # (batch_size, 1, output_dim)
# (batch_size, 1+input_length, output_dim)
return K.concatenate([expanded_last_output, last_memory], axis=1)
def get_config(self):
config = {'output_dim': self.output_dim,
'input_length': self.input_length,
'composer_activation': self.composer_activation,
'return_mode': self.return_mode}
base_config = super(NSE, self).get_config()
config.update(base_config)
return config
class MultipleMemoryAccessNSE(NSE):
'''
MultipleMemoryAccessNSE is very similar to the simple NSE. The difference is that along with the sentence
memory, it has access to one (or multiple) additional memory. The operations on the additional memory are
exactly the same as the original memory. The additional memory is initialized from the final timestep of
a different NSE, and the composer will take as input the concatenation of the reader output and summaries
of both the memories.
'''
#TODO: This is currently assuming we need access to one additional memory. Change it to an arbitrary number.
@overrides
def get_output_shape_for(self, input_shape):
# This class has twice the input length as an NSE due to the concatenated input. Pass the right size
# to NSE's method to get the right putput shape.
nse_input_shape = (input_shape[0], input_shape[1]/2, input_shape[2])
return super(MultipleMemoryAccessNSE, self).get_output_shape_for(nse_input_shape)
def get_reader_input_shape(self, input_shape):
return (input_shape[0], input_shape[1]/2, self.output_dim)
def get_composer_input_shape(self, input_shape):
return (input_shape[0], self.output_dim * 3)
@overrides
def read(self, nse_input, input_mask=None):
'''
Read input in MMA-NSE will be of shape (batch_size, read_input_length*2, input_dim), a concatenation of
the actual input to this NSE and the output from a different NSE. The latter will be used to initialize
the shared memory. The former will be passed to the read LSTM and also used to initialize the current
memory.
'''
input_length = K.shape(nse_input)[1]
read_input_length = input_length/2
input_to_read = nse_input[:, :read_input_length, :]
initial_shared_memory = K.batch_flatten(nse_input[:, read_input_length:, :])
mem_0 = K.batch_flatten(input_to_read)
o = self.reader.call(input_to_read, input_mask)
o_mask = self.reader.compute_mask(input_to_read, input_mask)
return o, [mem_0, initial_shared_memory], o_mask
@overrides
def compose_and_write_step(self, o_t, states):
flattened_mem_tm1, flattened_shared_mem_tm1, writer_h_tm1, writer_c_tm1 = states
input_mem_shape = K.shape(flattened_mem_tm1)
mem_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim)
mem_tm1 = K.reshape(flattened_mem_tm1, mem_shape)
shared_mem_tm1 = K.reshape(flattened_shared_mem_tm1, mem_shape)
z_t, m_rt = self.summarize_memory(o_t, mem_tm1)
shared_z_t, shared_m_rt = self.summarize_memory(o_t, shared_mem_tm1)
c_t = self.compose_memory_and_output([o_t, m_rt, shared_m_rt])
# Collecting the necessary variables to directly call writer's step function.
writer_constants = self.writer.get_constants(c_t) # returns dropouts for W and U (all 1s, see init)
writer_states = [writer_h_tm1, writer_c_tm1] + writer_constants
# Making a call to writer's step function, Equation 5
h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states) # h_t, writer_c_t: (batch_size, output_dim)
mem_t = self.update_memory(z_t, h_t, mem_tm1)
shared_mem_t = self.update_memory(shared_z_t, h_t, shared_mem_tm1)
return h_t, [K.batch_flatten(mem_t), K.batch_flatten(shared_mem_t), h_t, writer_c_t]
class InputMemoryMerger(Layer):
'''
This layer taks as input, the memory part of the output of a NSE layer, and the embedded input to a MMANSE
layer, and prepares a single input tensor for MMANSE that is a concatenation of the first sentence's memory
and the second sentence's embedding.
This is a concrete layer instead of a lambda function because we want to support masking.
'''
def __init__(self, **kwargs):
self.supports_masking = True
super(InputMemoryMerger, self).__init__(**kwargs)
def get_output_shape_for(self, input_shapes):
return (input_shapes[1][0], input_shapes[1][1]*2, input_shapes[1][2])
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if mask is None:
return None
elif mask == [None, None]:
return None
else:
memory_mask, mmanse_embed_mask = mask
return K.concatenate([mmanse_embed_mask, memory_mask], axis=1) # (batch_size, nse_input_length * 2)
def call(self, inputs, mask=None):
shared_memory = inputs[0]
mmanse_embed_input = inputs[1] # (batch_size, nse_input_length, output_dim)
return K.concatenate([mmanse_embed_input, shared_memory], axis=1)
class OutputSplitter(Layer):
'''
This layer takes the concatenation of output and memory from NSE and returns either the output or the
memory.
'''
def __init__(self, return_mode, **kwargs):
self.supperots_masking = True
if return_mode not in ["output", "memory"]:
raise Exception("Invalid return mode: %s" % return_mode)
self.return_mode = return_mode
super(OutputSplitter, self).__init__(**kwargs)
def get_output_shape_for(self, input_shape):
if self.return_mode == "output":
return (input_shape[0], input_shape[2])
else:
# Return mode is memory.
# input contains output and memory concatenated along the second dimension.
return (input_shape[0], input_shape[1] - 1, input_shape[2])
def compute_mask(self, inputs, mask=None):
# pylint: disable=unused-argument
if self.return_mode == "output" or mask is None:
return None
else:
# Return mode is memory and mask is not None
return mask[:, 1:] # (batch_size, nse_input_length)
def call(self, inputs, mask=None):
if self.return_mode == "output":
return inputs[:, 0, :] # (batch_size, output_dim)
else:
return inputs[:, 1:, :] # (batch_size, nse_input_length, output_dim)
|
|
"""
Kolibri Webpack hooks
---------------------
To manage assets, we use the webpack format. In order to have assets bundled in,
you should put them in ``yourapp/assets/src``.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import io
import json
import logging
import os
import time
from functools import partial
from django.conf import settings as django_settings
from django.contrib.staticfiles.finders import find as find_staticfiles
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.cache import caches
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.six.moves.urllib.request import url2pathname
from django.utils.translation import get_language
from django.utils.translation import get_language_info
from django.utils.translation import to_locale
from pkg_resources import resource_filename
from six import text_type
import kolibri
from . import settings
from kolibri.plugins import hooks
from kolibri.utils import conf
# Use the cache specifically for built files
cache = caches["built_files"]
class BundleNotFound(Exception):
pass
class WebpackError(EnvironmentError):
pass
logger = logging.getLogger(__name__)
def filter_by_bidi(bidi, chunk):
if chunk["name"].split(".")[-1] != "css":
return True
if bidi:
return chunk["name"].split(".")[-2] == "rtl"
else:
return chunk["name"].split(".")[-2] != "rtl"
class WebpackBundleHook(hooks.KolibriHook):
"""
This is the abstract hook class that all plugins that wish to load any
assets into the front end must implement, in order for them to be part of
the webpack asset loading pipeline.
"""
# : You should set a unique human readable name
unique_slug = ""
# : Relative path to js source file for webpack to use as entry point
# : For instance: "kolibri/core/assets/src/kolibri_core_app.js"
src_file = ""
# : Kolibri version for build hashes
version = kolibri.__version__
# : When being included for synchronous loading, should the source files
# : for this be inlined?
inline = False
def __init__(self, *args, **kwargs):
super(WebpackBundleHook, self).__init__(*args, **kwargs)
# Verify the uniqueness of the slug
# It can be '0' in the parent class constructor
assert (
len([x for x in self.registered_hooks if x.unique_slug == self.unique_slug])
<= 1
), "Non-unique slug found: '{}'".format(self.unique_slug)
if not self._meta.abstract:
assert self.src_file, "No source JS defined"
@hooks.abstract_method
def get_by_slug(self, slug):
"""
Fetch a registered hook instance by its unique slug
"""
for hook in self.registered_hooks:
if hook.unique_slug == slug:
return hook
raise BundleNotFound("No bundle with that name is loaded: {}".format(slug))
@cached_property
@hooks.registered_method
def _stats_file_content(self):
"""
:returns: A dict of the data contained in the JSON files which are
written by Webpack.
"""
cache_key = "json_stats_file_cache_{slug}".format(slug=self.unique_slug)
try:
stats_file_content = cache.get(cache_key)
if not stats_file_content or getattr(
django_settings, "DEVELOPER_MODE", False
):
with io.open(self._stats_file, mode="r", encoding="utf-8") as f:
stats = json.load(f)
if getattr(django_settings, "DEVELOPER_MODE", False):
timeout = 0
while stats["status"] == "compiling":
time.sleep(0.1)
timeout += 0.1
with io.open(self._stats_file, mode="r", encoding="utf-8") as f:
stats = json.load(f)
if timeout >= 5:
raise WebpackError("Webpack compilation still in progress")
if stats["status"] == "error":
raise WebpackError("Webpack compilation has errored")
stats_file_content = {
"files": stats.get("chunks", {}).get(self.unique_slug, []),
"hasMessages": stats.get("messages", False),
}
# Don't invalidate during runtime.
# Might need to change this if we move to a different cache backend.
cache.set(cache_key, stats_file_content, None)
return stats_file_content
except IOError as e:
if hasattr(e, "filename"):
problem = "Problems loading: {file}".format(file=e.filename)
else:
problem = "Not file-related."
raise WebpackError(
"Webpack build file missing, front-end assets cannot be loaded. {problem}".format(
problem=problem
)
)
@property
@hooks.registered_method
def bundle(self):
"""
:returns: a generator yielding dict objects with properties of the built
asset, most notably its URL.
"""
for f in self._stats_file_content["files"]:
filename = f["name"]
if not getattr(django_settings, "DEVELOPER_MODE", False):
if any(
list(regex.match(filename) for regex in settings.IGNORE_PATTERNS)
):
continue
relpath = "{0}/{1}".format(self.unique_slug, filename)
if getattr(django_settings, "DEVELOPER_MODE", False):
try:
f["url"] = f["publicPath"]
except KeyError:
f["url"] = staticfiles_storage.url(relpath)
else:
f["url"] = staticfiles_storage.url(relpath)
yield f
@property
@hooks.registered_method
def webpack_bundle_data(self):
"""
This is the main interface to the NPM Webpack building util. It is
used by the webpack_json management command. Inheritors may wish to
customize this.
:returns: A dict with information expected by webpack parsing process,
or None if the src_file does not exist.
"""
if os.path.exists(
os.path.join(os.path.dirname(self._build_path), self.src_file)
):
return {
"name": self.unique_slug,
"src_file": self.src_file,
"static_dir": self._static_dir,
"plugin_path": self._module_file_path,
"stats_file": self._stats_file,
"locale_data_folder": self.locale_data_folder,
"version": self.version,
}
else:
logger.warn(
"{src_file} not found for plugin {name}.".format(
src_file=self.src_file, name=self.unique_slug
)
)
@property
def locale_data_folder(self):
if self._module_path.startswith("kolibri."):
return os.path.join(
getattr(django_settings, "LOCALE_PATHS")[0], "en", "LC_MESSAGES"
)
# Is an external plugin, do otherwise!
else:
return os.path.join(
os.path.dirname(self._build_path),
getattr(self, "locale_path", "locale"),
"en",
"LC_MESSAGES",
)
@property
def _module_path(self):
return ".".join(self.__module__.split(".")[:-1])
@property
def _build_path(self):
"""
An auto-generated path to where the build-time files are stored,
containing information about the built bundles.
"""
return resource_filename(self._module_path, "build")
@property
def _static_dir(self):
return resource_filename(self._module_path, "static")
@property
def _stats_file(self):
"""
An auto-generated path to where the build-time files are stored,
containing information about the built bundles.
"""
return os.path.join(
self._build_path, "{plugin}_stats.json".format(plugin=self.unique_slug)
)
@property
def _module_file_path(self):
"""
Returns the path of the class inheriting this classmethod.
"""
return os.path.dirname(self._build_path)
def frontend_message_file(self, lang_code):
message_file_name = "{name}-messages.json".format(name=self.unique_slug)
for path in getattr(django_settings, "LOCALE_PATHS", []):
file_path = os.path.join(
path, to_locale(lang_code), "LC_MESSAGES", message_file_name
)
if os.path.exists(file_path):
return file_path
def frontend_messages(self):
lang_code = get_language()
cache_key = "json_stats_file_cache_{slug}_{lang}".format(
slug=self.unique_slug, lang=lang_code
)
message_file_content = cache.get(cache_key)
if not message_file_content or getattr(
django_settings, "DEVELOPER_MODE", False
):
frontend_message_file = self.frontend_message_file(lang_code)
if frontend_message_file:
with io.open(frontend_message_file, mode="r", encoding="utf-8") as f:
# Load JSON file, then immediately convert it to a string in minified form.
message_file_content = json.dumps(
json.load(f), separators=(",", ":")
)
cache.set(cache_key, message_file_content, None)
return message_file_content
def sorted_chunks(self):
bidi = get_language_info(get_language())["bidi"]
return sorted(
filter(partial(filter_by_bidi, bidi), self.bundle),
key=lambda x: x["name"].split(".")[-1],
)
def js_and_css_tags(self):
js_tag = '<script type="text/javascript" src="{url}"></script>'
css_tag = '<link type="text/css" href="{url}" rel="stylesheet"/>'
inline_js_tag = '<script type="text/javascript">{src}</script>'
inline_css_tag = "<style>{src}</style>"
# Sorted to load css before js
for chunk in self.sorted_chunks():
src = None
if chunk["name"].endswith(".js"):
if self.inline:
# During development, we do not write built files to disk
# Because of this, this call might return None
src = self.get_filecontent(chunk["url"])
if src is not None:
# If it is not None, then we can inline it
yield inline_js_tag.format(src=src)
else:
# If src is None, either this is not something we should be inlining
# or we are in development mode and need to fetch the file from the
# development server, not the disk
yield js_tag.format(url=chunk["url"])
elif chunk["name"].endswith(".css"):
if self.inline:
# During development, we do not write built files to disk
# Because of this, this call might return None
src = self.get_filecontent(chunk["url"])
if src is not None:
# If it is not None, then we can inline it
yield inline_css_tag.format(src=src)
else:
# If src is None, either this is not something we should be inlining
# or we are in development mode and need to fetch the file from the
# development server, not the disk
yield css_tag.format(url=chunk["url"])
def frontend_message_tag(self):
if self.frontend_messages():
return [
'<script>{kolibri_name}.registerLanguageAssets("{bundle}", "{lang_code}", {messages});</script>'.format(
kolibri_name=conf.KOLIBRI_CORE_JS_NAME,
bundle=self.unique_slug,
lang_code=get_language(),
messages=self.frontend_messages(),
)
]
else:
return []
def get_basename(self, url):
"""
Takes full path to a static file (eg. "/static/css/style.css") and
returns path with storage's base url removed (eg. "css/style.css").
"""
base_url = staticfiles_storage.base_url
# Cast ``base_url`` to a string to allow it to be
# a string-alike object to e.g. add ``SCRIPT_NAME``
# WSGI param as a *path prefix* to the output URL.
# See https://code.djangoproject.com/ticket/25598.
base_url = text_type(base_url)
if not url.startswith(base_url):
return None
basename = url.replace(base_url, "", 1)
# drop the querystring, which is used for non-compressed cache-busting.
return basename.split("?", 1)[0]
def get_filename(self, basename):
"""
Returns full path to a file, for example:
get_filename('css/one.css') -> '/full/path/to/static/css/one.css'
"""
filename = None
# First try finding the file using the storage class.
# This is skipped in DEVELOPER_MODE mode as files might be outdated
# Or may not even be on disk.
if not getattr(django_settings, "DEVELOPER_MODE", False):
filename = staticfiles_storage.path(basename)
if not staticfiles_storage.exists(basename):
filename = None
# secondly try to find it with staticfiles
if not filename:
filename = find_staticfiles(url2pathname(basename))
return filename
def get_filecontent(self, url):
"""
Reads file contents using given `charset` and returns it as text.
"""
cache_key = "inline_static_file_content_{url}".format(url=url)
content = cache.get(cache_key)
if content is None:
# Removes Byte Oorder Mark
charset = "utf-8-sig"
basename = self.get_basename(url)
if basename is None:
return None
filename = self.get_filename(basename)
if filename is None:
return None
with codecs.open(filename, "r", charset) as fd:
content = fd.read()
# Cache this forever, as URLs will update for new files
cache.set(cache_key, content, None)
return content
def render_to_page_load_sync_html(self):
"""
Generates the appropriate script tags for the bundle, be they JS or CSS
files.
:param bundle_data: The data returned from
:return: HTML of script tags for insertion into a page.
"""
tags = self.frontend_message_tag() + list(self.js_and_css_tags())
return mark_safe("\n".join(tags))
def render_to_page_load_async_html(self):
"""
Generates script tag containing Javascript to register an
asynchronously loading Javascript FrontEnd plugin against the core
front-end Kolibri app. It passes in the events that would trigger
loading the plugin, both multi-time firing events (events) and one time
firing events (once).
It also passes in information about the methods that the events should
be delegated to once the plugin has loaded.
TODO: What do we do with the extension parameter here?
:returns: HTML of a script tag to insert into a page.
"""
urls = [chunk["url"] for chunk in self.sorted_chunks()]
tags = self.frontend_message_tag() + [
'<script>{kolibri_name}.registerKolibriModuleAsync("{bundle}", ["{urls}"]);</script>'.format(
kolibri_name=conf.KOLIBRI_CORE_JS_NAME,
bundle=self.unique_slug,
urls='","'.join(urls),
)
]
return mark_safe("\n".join(tags))
class Meta:
abstract = True
class WebpackInclusionHook(hooks.KolibriHook):
"""
To define an asset target of inclusing in some html template, you must
define an inheritor of ``WebpackBundleHook`` for the asset files themselves
and then a ``WebpackInclusionHook`` to define where the inclusion takes
place.
This abstract hook does nothing, it's just the universal inclusion hook, and
no templates intend to include ALL assets at once.
"""
#: Should define an instance of ``WebpackBundleHook``, likely abstract
bundle_class = None
def __init__(self, *args, **kwargs):
super(WebpackInclusionHook, self).__init__(*args, **kwargs)
if not self._meta.abstract:
assert (
self.bundle_class is not None
), "Must specify bundle_class property, this one did not: {} ({})".format(
type(self), type(self.bundle_class)
)
def render_to_page_load_sync_html(self):
html = ""
bundle = self.bundle_class()
if not bundle._meta.abstract:
html = bundle.render_to_page_load_sync_html()
else:
for hook in bundle.registered_hooks:
html += hook.render_to_page_load_sync_html()
return mark_safe(html)
def render_to_page_load_async_html(self):
html = ""
bundle = self.bundle_class()
if not bundle._meta.abstract:
html = bundle.render_to_page_load_async_html()
else:
for hook in bundle.registered_hooks:
html += hook.render_to_page_load_async_html()
return mark_safe(html)
class Meta:
abstract = True
class FrontEndCoreAssetHook(WebpackBundleHook):
def render_to_page_load_sync_html(self):
"""
Generates the appropriate script tags for the core bundle, be they JS or CSS
files.
:return: HTML of script tags for insertion into a page.
"""
tags = []
if self.frontend_messages():
tags = [
"<script>var coreLanguageMessages = {messages};</script>".format(
messages=self.frontend_messages()
)
]
tags += list(self.js_and_css_tags())
return mark_safe("\n".join(tags))
class Meta:
abstract = True
class FrontEndCoreHook(WebpackInclusionHook):
"""
A hook that asserts its only applied once, namely to load the core. This
should only be inherited once which is also an enforced property for now.
This is loaded before everything else.
"""
bundle = FrontEndCoreAssetHook
def __init__(self, *args, **kwargs):
super(FrontEndCoreHook, self).__init__(*args, **kwargs)
assert len(list(self.registered_hooks)) <= 1, "Only one core asset allowed"
assert isinstance(
self.bundle, FrontEndCoreAssetHook
), "Only allows a FrontEndCoreAssetHook instance as bundle"
class Meta:
abstract = True
class FrontEndBaseSyncHook(WebpackInclusionHook):
"""
Inherit a hook defining assets to be loaded in kolibri/base.html, that means
ALL pages. Use with care.
"""
class Meta:
abstract = True
class FrontEndBaseASyncHook(WebpackInclusionHook):
"""
Inherit a hook defining assets to be loaded in kolibri/base.html, that means
ALL pages. Use with care.
"""
class Meta:
abstract = True
|
|
import numpy as np
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
import pickle
from matplotlib import cm
from numpy.random import randn
# table path
path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits"
star = fits.open(path)
table = Table.read(path)
"""
There are 13 columns in the table:
1. 'APOGEEID' -- The name of the star
2. 'VISIT' -- The name of the visit file
3. BJD -- Barycentric JD
Inferred labels are from the Cannon. The spectra we use are from the first combined spectra
(There are two combined spectra for each star, which are obtained by two different methods)
: (1) global weighting, where each visit spectrum is weighted by its (S/N)2, and
(2) pixel-by-pixel weighting, where each pixel is weighted by its (S/N)2.
4. TEFF
5. LOGG
6. FEH
The abc parameters for each visit:
7. A -- parameter a
8. B -- parameter b
9. C -- parameter c
10. CHIINF -- chi-squared for the inferred flux from the cannon (a=0,b=1,c=0)
11. CHIMIX -- chi-squared for the mixed flux from the abc fit.
12. VBARY -- The barycentric Velocity(km/s) from the APOGEE team.
13. VSHIFT -- The velocity shift from the abc fit(km/s)
14. FIBER -- Fiber ID
15. SNR -- SNR of the visit
####
The covariance matrix of the abc fit is in HDU0 data, which is
a 3*3*N 3-d matrix. N is the number of visits.
###
"""
# read covariance matrix from the abc fit:
un_cov = star[0].data[:,:,0]
#print(un_cov)
# read the velocity shift from the abc fit
v_shift = table["VSHIFT"]
#print(v_shift.shape)
########################
#Read table and plot to check.
class plot():
def read_table(self):
path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits"
star = fits.open(path)
table = Table.read(path)
print("reading data")
# read it:
un_cov = star[0].data
self.un_cov = un_cov
a = table["A"]
b = table["B"]
c = table["C"]
self.a = a
self.b = b
self.c = c
mask = 2*b>a+c
self.mask = mask
name = table["APOGEEID"]
self.name = name
SHIFT = table["VSHIFT"]
self.shift = SHIFT
VBARY = table["VBARY"]
self.VBARY = VBARY
teff = table["TEFF"]
self.teff = teff
logg = table["LOGG"]
self.logg = logg
feh = table["FEH"]
self.feh = feh
self.chi_inf = table["CHIINF"]
self.chi_mix = table["CHIMIX"]
self.BJD = table["BJD"]
self.fiber = table["FIBER"]
self.SNR =table["SNR"]
def plot_teff_logg(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
# shift is in km/s
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
mask2 = (shift>-1)&(shift<1)
logg = logg[mask2]
teff = teff[mask2]
shift = shift[mask2]
ax1.set_ylabel("$Teff \quad (K)$", fontsize=20)
ax1.set_xlabel("$logg \quad (dex)$", fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(logg,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap="RdBu")
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$RV shifts \quad (Km/s)$", fontsize=20)
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{Teff vs Logg for the red clump stars in DR13}', fontsize=30,weight="bold")
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_feh(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
mask2 = (shift > -1) & (shift < 1)
feh = feh[mask2]
teff = teff[mask2]
shift = shift[mask2]
#ax1
ax1.set_ylabel("$Teff \quad (K)$", fontsize=20,weight="bold")
ax1.set_xlabel("$FeH \quad (dex)$", fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(feh,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap="RdBu")
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$RV \quad shifts \quad (Km/s)$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{Teff vs FeH for red clump stars in DR13}', fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_logg_bac(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
low = -1
up = 3
mask2 = ((bac>-1) & (bac<3))
logg = logg[mask2]
teff = teff[mask2]
bac = bac[mask2]
# plt.cm.viridis
ax1.set_ylabel('$Teff \quad (K)$', fontsize=20,weight="bold")
ax1.set_xlabel('$Logg \quad (dex)$', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(logg,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap="viridis")
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$2b-a-c$ ", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{Teff vs Logg for the red clump stars in DR13}', fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc_2bac" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_feh_bac(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
low = -1
up = 3
mask2 = ((bac > -1) & (bac < 3))
feh = feh[mask2]
teff = teff[mask2]
bac = bac[mask2]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.set_ylabel('$Teff \quad (K)$', fontsize=20,weight="bold")
ax1.set_xlabel('$FeH \quad (dex)$', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(feh,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=plt.cm.viridis)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$2b-a-c$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{Teff vs FeH for the red clump stars in DR13}', fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc_2bac" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_shift_bjd(self):
mask = self.mask
shift =self.shift[mask]
BJD = self.BJD[mask]
feh = self.feh[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.set_xlabel('$BJD$', fontsize=20,weight="bold")
ax1.set_ylabel('$RV shift \quad (Km/s)$ ', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(BJD,shift, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=plt.cm.viridis)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$Fe/H \quad (dex)$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{RV shift vs BJD for the red clump stars in DR13}', fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_BJD_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_rv_fiber(self):
mask = self.mask
a = self.a[mask]
b = self.b[mask]
c = self.c[mask]
fiber = self.fiber[mask]
SNR = self.SNR[mask]
portion = (c+a)/(a+b+c)
RV = (c - a) / (a + b + c) * 4.14468
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
mask2 = ((SNR>30) & (SNR<300))
SNR = SNR[mask2]
fiber = fiber[mask2]
RV = RV[mask2]
ax1.set_xlabel('$FiberID$', fontsize=20,weight="bold")
ax1.set_ylabel('$RV shift \quad (Km/s)$', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(fiber,RV, marker='x', c=SNR,
vmin=30, vmax=300, alpha=alpha, cmap=plt.cm.viridis)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$SNR$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{RV shifts vs FiberID for the red clump stars in DR13}', fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_Fiber_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_ac_fiber(self):
mask = self.mask
a = self.a[mask]
b = self.b[mask]
c = self.c[mask]
fiber = self.fiber[mask]
portion = (c+a)/(a+b+c)
RV = (c - a) / (a + b + c) * 4.14468
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
mask2 = (RV>-1)&(RV<1)
fiber = fiber[mask2]
portion = portion[mask2]
RV = RV[mask2]
alpha = 0.3
#ax1
ax1.set_xlabel('$FiberID$', fontsize=20,weight="bold")
ax1.set_ylabel('$(c+a)/(a+b+c)$ ', fontsize=20,weight="bold")
axes = plt.gca()
axes.set_ylim([-1,1])
f.subplots_adjust(right=0.8)
pl = ax1.scatter(fiber,portion, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap='RdBu')
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$RV \quad shifts \quad (Km/s)$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{(c+a)/(a+b+c) vs FiberID for the red clump stars in DR13}', fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "ac_vs_Fiber_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def histogram_shift_abc(self):
a = self.a
b = self.b
c = self.c
RV = (c-a)/(a+b+c)*4.14468
# add a mask: only show results with 2b>a+c
mask = 2*b>a+c
a = a[mask]
b = b[mask]
c = c[mask]
RV = RV[mask]
font = {'weight': 'bold', 'size': 15}
matplotlib.rc('font', **font)
f, ((ax1, ax2), (ax3, ax4)) = \
plt.subplots(2, 2)
colors = ["cyan",'b', 'g', 'r']
name = ["RV","a", "b", "c"]
# histogram of rv
#ax1
rms_RV = (np.nansum(RV*RV)/len(RV))**0.5
rms_a = (np.nansum(a * a) / len(a)) ** 0.5
rms_b = (np.nansum(b*b) / len(b)) ** 0.5
rms_c = (np.nansum(c * c) / len(c)) ** 0.5
ax1.hist(RV, bins=40, color=colors[0], label="%s RMS = %.2f $Km/s$"%(name[0],rms_RV))
#ax1.set_title('Histogram of Radial velocity shifts', fontsize=30)
ax1.set_xlabel('$values \quad of\quad radial\quad velocity\quad shifts \quad (Km/s)$', fontsize=15,weight="bold")
ax1.set_ylabel('$Number$', fontsize=15,weight="bold")
ax1.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of a
#ax2
ax2.hist(a, bins=40, color=colors[1], label="$%s RMS = %.2f$"%(name[1],rms_a))
#ax2.set_title('Histogram of parameter a', fontsize=30)
ax2.set_xlabel('$values\quad of\quad parameter\quad a$', fontsize=15,weight="bold")
ax2.set_ylabel('$Number$', fontsize=15,weight="bold")
ax2.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of b
#ax3
ax3.hist(b, bins=40, color=colors[2], label="$%s RMS = %.2f$"%(name[2],rms_b))
ax3.legend(prop={'size': 15})
#ax3.set_title('Histogram of paramete b', fontsize=30)
ax3.set_xlabel("$values\quad of\quad parameter\quad b$", fontsize=15,weight="bold")
ax3.set_ylabel('$Number$', fontsize=15,weight="bold")
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of c
#ax4
ax4.hist(c, bins=40, color=colors[3], label="$%s RMS = %.2f$"%(name[3],rms_c))
ax4.legend(prop={'size': 15})
#ax4.set_title('Histogram of parameter c', fontsize=30)
ax4.set_xlabel("$values\quad of\quad parameter\quad c$", fontsize=15,weight="bold")
ax4.set_ylabel('$Number$', fontsize=15,weight="bold")
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{Histogram of RV shifts, a, b and c for the red clump stars in DR13}', fontsize=25)
f.legends
#f.suptitle("Histogram of RV shifts, a, b and c by using the absorption lines")
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "histogram_rv_shift_rc" + ".png"
fig.savefig(save_path, dpi=500)
plt.close()
# RV before after
def plot_RV_rms_before_after_teff(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
# From the average (c+a)/(a+b+c)
# Do put a mask here
mask = self.mask
# add points with the same fiberid together
name = self.name[mask]
target = list(set(name))
VBARY = self.VBARY[mask]
shift =self.shift[mask]
#SNR = self.SNR[mask]
fusion_new = []
# name+rms_old and rms_new + Teff logg feh
for i in range(0,len(target)):
print("Doing %.2f %%"%(i/len(target)*100))
index = np.where(name == target[i])
index = np.array(index)
index = index.ravel()
rms_old_i = np.std(VBARY[index])
rms_new_i = np.std(VBARY[index]+shift[index])
teff_i = np.nanmedian(teff[index])
logg_i = np.nanmedian(logg[index])
feh_i = np.nanmedian(feh[index])
fusion_new.append([target[i],rms_old_i,rms_new_i,teff_i,logg_i,feh_i])
fusion_new = np.array(fusion_new)
self.fusion_new = fusion_new
# portion+fiber+rv
# name = fusion_new[:, 0]
rms_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
rms_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
# use int
teff = np.array(fusion_new[:,3],dtype=np.float16).ravel()
mask2 = (rms_old < 1)&(rms_new<1)
rms_old = rms_old[mask2]
rms_new = rms_new[mask2]
teff = teff[mask2]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
low = 4500
up =5300
mask2 = (teff>4500)&(teff<5300)
teff = teff[mask2]
rms_old = rms_old[mask2]
rms_new = rms_new[mask2]
#ax1
ax1.plot(rms_old,rms_old,"k",alpha=0.3,linewidth=0.3)
ax1.set_xlabel('$RMS\quad of\quad RVs\quad before\quad the\quad correction \quad (km/s)$', fontsize=20,weight="bold")
ax1.set_ylabel('$RMS\quad of\quad RVs\quad after\quad the\quad correction \quad (km/s)$', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(rms_old,rms_new, marker='x', c=teff,
vmin=low, vmax=up, alpha=alpha, cmap=plt.cm.viridis)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$Teff \quad (K)$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{RMS of RVs before vs after the correction for the red clump stars in DR13}', fontsize=25)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 11.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_rms_before_after_teff" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_RV_rms_before_after_logg(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
fusion_new =self.fusion_new
# name = fusion_new[:, 0]
rms_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
rms_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
logg = np.array(fusion_new[:,4],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
mask2 = ((logg>2) & (logg<3.25))
logg = logg[mask2]
rms_old = rms_old[mask2]
rms_new = rms_new[mask2]
mask2 = (rms_old < 1) & (rms_new < 1)
rms_old = rms_old[mask2]
rms_new = rms_new[mask2]
logg = logg[mask2]
alpha = 0.3
#ax1
ax1.plot(rms_old,rms_old, "k", alpha=0.3, linewidth=0.3)
ax1.set_xlabel('$RMS\quad of\quad RVs\quad before\quad the\quad correction \quad (km/s)$', fontsize=20,weight="bold")
ax1.set_ylabel('$RMS\quad of\quad RVs\quad after\quad the\quad correction \quad (km/s)$', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(rms_old,rms_new, marker='x', c=logg,
vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=plt.cm.viridis)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$logg \quad (dex)$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{RMS of RVs before vs after the correction for the red clump stars in DR13}', fontsize=25)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 11.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_rms_before_after_logg" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_RV_rms_before_after_feh(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
fusion_new =self.fusion_new
# name = fusion_new[:, 0]
rms_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
rms_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
feh = np.array(fusion_new[:,5],dtype=np.float16).ravel()
# only show rms<1 km/s
mask2 = (rms_old < 1) & (rms_new < 1)
rms_old = rms_old[mask2]
rms_new = rms_new[mask2]
feh = feh[mask2]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.plot(rms_old,rms_old, "k", alpha=0.3, linewidth=0.3)
ax1.set_xlabel('$RMS\quad of\quad RVs\quad before\quad the\quad correction \quad (km/s)$', fontsize=20,weight="bold")
ax1.set_ylabel('$RMS\quad of\quad RVs\quad after\quad the\quad correction \quad (km/s)$', fontsize=20,weight="bold")
f.subplots_adjust(right=0.8)
pl = ax1.scatter(rms_old,rms_new, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=plt.cm.viridis)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.solids.set_edgecolor("face")
cb.set_label("$FeH \quad (dex)$", fontsize=20,weight="bold")
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
f.suptitle(r'\textbf{RMS of RVs before vs after the correction for the red clump stars in DR13}', fontsize=25)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 11.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_rms_before_after_feh" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
model = plot()
model.read_table()
# Teff logg feh
model.plot_teff_logg()
model.plot_teff_feh()
model.plot_teff_logg_bac()
model.plot_teff_feh_bac()
# vs fiber+bjd
model.plot_shift_bjd()
model.plot_rv_fiber()
model.plot_ac_fiber()
# histogram
model.histogram_shift_abc()
#VBARY vs
model.plot_RV_rms_before_after_teff()
model.plot_RV_rms_before_after_logg()
model.plot_RV_rms_before_after_feh()
|
|
"""Base Command class, and related routines"""
from __future__ import absolute_import, print_function
import logging
import logging.config
import optparse
import os
import platform
import sys
import traceback
from pipenv.patched.notpip._internal.cli import cmdoptions
from pipenv.patched.notpip._internal.cli.command_context import CommandContextMixIn
from pipenv.patched.notpip._internal.cli.parser import (
ConfigOptionParser,
UpdatingDefaultsHelpFormatter,
)
from pipenv.patched.notpip._internal.cli.status_codes import (
ERROR,
PREVIOUS_BUILD_DIR_ERROR,
SUCCESS,
UNKNOWN_ERROR,
VIRTUALENV_NOT_FOUND,
)
from pipenv.patched.notpip._internal.exceptions import (
BadCommand,
CommandError,
InstallationError,
PreviousBuildDirError,
UninstallationError,
)
from pipenv.patched.notpip._internal.utils.deprecation import deprecated
from pipenv.patched.notpip._internal.utils.filesystem import check_path_owner
from pipenv.patched.notpip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
from pipenv.patched.notpip._internal.utils.misc import get_prog, normalize_path
from pipenv.patched.notpip._internal.utils.temp_dir import global_tempdir_manager
from pipenv.patched.notpip._internal.utils.typing import MYPY_CHECK_RUNNING
from pipenv.patched.notpip._internal.utils.virtualenv import running_under_virtualenv
if MYPY_CHECK_RUNNING:
from typing import List, Tuple, Any
from optparse import Values
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(CommandContextMixIn):
usage = None # type: str
ignore_require_venv = False # type: bool
def __init__(self, name, summary, isolated=False):
# type: (str, str, bool) -> None
super(Command, self).__init__()
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': name,
'description': self.__doc__,
'isolated': isolated,
}
self.name = name
self.summary = summary
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def handle_pip_version_check(self, options):
# type: (Values) -> None
"""
This is a no-op so that commands by default do not do the pip version
check.
"""
# Make sure we do the pip version check if the index_group options
# are present.
assert not hasattr(options, 'no_index')
def run(self, options, args):
# type: (Values, List[Any]) -> Any
raise NotImplementedError
def parse_args(self, args):
# type: (List[str]) -> Tuple[Any, Any]
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
# type: (List[str]) -> int
try:
with self.main_context():
return self._main(args)
finally:
logging.shutdown()
def _main(self, args):
# type: (List[str]) -> int
# Intentionally set as early as possible so globally-managed temporary
# directories are available to the rest of the code.
self.enter_context(global_tempdir_manager())
options, args = self.parse_args(args)
# Set verbosity so that it can be used elsewhere.
self.verbosity = options.verbose - options.quiet
level_number = setup_logging(
verbosity=self.verbosity,
no_color=options.no_color,
user_log_file=options.log,
)
if (
sys.version_info[:2] == (2, 7) and
not options.no_python_version_warning
):
message = (
"A future version of pip will drop support for Python 2.7. "
"More details about Python 2 support in pip, can be found at "
"https://pip.pypa.io/en/latest/development/release-process/#python-2-support" # noqa
)
if platform.python_implementation() == "CPython":
message = (
"Python 2.7 reached the end of its life on January "
"1st, 2020. Please upgrade your Python as Python 2.7 "
"is no longer maintained. "
) + message
deprecated(message, replacement=None, gone_in=None)
if options.skip_requirements_regex:
deprecated(
"--skip-requirements-regex is unsupported and will be removed",
replacement=(
"manage requirements/constraints files explicitly, "
"possibly generating them from metadata"
),
gone_in="20.1",
issue=7297,
)
# TODO: Try to get these passing down from the command?
# without resorting to os.environ to hold these.
# This also affects isolated builds and it should.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv and not self.ignore_require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
if options.cache_dir:
options.cache_dir = normalize_path(options.cache_dir)
if not check_path_owner(options.cache_dir):
logger.warning(
"The directory '%s' or its parent directory is not owned "
"or is not writable by the current user. The cache "
"has been disabled. Check the permissions and owner of "
"that directory. If executing pip with sudo, you may want "
"sudo's -H flag.",
options.cache_dir,
)
options.cache_dir = None
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('%s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except BrokenStdoutLoggingError:
# Bypass our logger and write any remaining messages to stderr
# because stdout no longer works.
print('ERROR: Pipe to stdout was broken', file=sys.stderr)
if level_number <= logging.DEBUG:
traceback.print_exc(file=sys.stderr)
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except BaseException:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
self.handle_pip_version_check(options)
return SUCCESS
|
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from testtools import matchers
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest.openstack.common import log as logging
from tempest import test
LOG = logging.getLogger(__name__)
class VolumesV2ListTestJSON(base.BaseVolumeTest):
"""
This test creates a number of 1G volumes. To run successfully,
ensure that the backing file for the volume group that Nova uses
has space for at least 3 1G volumes!
If you are running a Devstack environment, ensure that the
VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
"""
VOLUME_FIELDS = ('id', 'name')
def assertVolumesIn(self, fetched_list, expected_list, fields=None):
if fields:
expected_list = map(operator.itemgetter(*fields), expected_list)
fetched_list = map(operator.itemgetter(*fields), fetched_list)
missing_vols = [v for v in expected_list if v not in fetched_list]
if len(missing_vols) == 0:
return
def str_vol(vol):
return "%s:%s" % (vol['id'], vol[self.name])
raw_msg = "Could not find volumes %s in expected list %s; fetched %s"
self.fail(raw_msg % ([str_vol(v) for v in missing_vols],
[str_vol(v) for v in expected_list],
[str_vol(v) for v in fetched_list]))
@classmethod
def resource_setup(cls):
super(VolumesV2ListTestJSON, cls).resource_setup()
cls.client = cls.volumes_client
cls.name = cls.VOLUME_FIELDS[1]
# Create 3 test volumes
cls.volume_list = []
cls.volume_id_list = []
cls.metadata = {'Type': 'work'}
for i in range(3):
volume = cls.create_volume(metadata=cls.metadata)
_, volume = cls.client.get_volume(volume['id'])
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
@classmethod
def resource_cleanup(cls):
# Delete the created volumes
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
super(VolumesV2ListTestJSON, cls).resource_cleanup()
def _list_by_param_value_and_assert(self, params, with_detail=False):
"""
Perform list or list_details action with given params
and validates result.
"""
if with_detail:
_, fetched_vol_list = \
self.client.list_volumes_with_detail(params=params)
else:
_, fetched_vol_list = self.client.list_volumes(params=params)
# Validating params of fetched volumes
# In v2, only list detail view includes items in params.
# In v1, list view and list detail view are same. So the
# following check should be run when 'with_detail' is True
# or v1 tests.
if with_detail or self._api_version == 1:
for volume in fetched_vol_list:
for key in params:
msg = "Failed to list volumes %s by %s" % \
('details' if with_detail else '', key)
if key == 'metadata':
self.assertThat(
volume[key].items(),
matchers.ContainsAll(params[key].items()),
msg)
else:
self.assertEqual(params[key], volume[key], msg)
@test.attr(type='smoke')
def test_volume_list(self):
# Get a list of Volumes
# Fetch all volumes
_, fetched_list = self.client.list_volumes()
self.assertVolumesIn(fetched_list, self.volume_list,
fields=self.VOLUME_FIELDS)
@test.attr(type='gate')
def test_volume_list_with_details(self):
# Get a list of Volumes with details
# Fetch all Volumes
_, fetched_list = self.client.list_volumes_with_detail()
self.assertVolumesIn(fetched_list, self.volume_list)
@test.attr(type='gate')
def test_volume_list_by_name(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name]}
_, fetched_vol = self.client.list_volumes(params)
self.assertEqual(1, len(fetched_vol), str(fetched_vol))
self.assertEqual(fetched_vol[0][self.name],
volume[self.name])
@test.attr(type='gate')
def test_volume_list_details_by_name(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name]}
_, fetched_vol = self.client.list_volumes_with_detail(params)
self.assertEqual(1, len(fetched_vol), str(fetched_vol))
self.assertEqual(fetched_vol[0][self.name],
volume[self.name])
@test.attr(type='gate')
def test_volumes_list_by_status(self):
params = {'status': 'available'}
_, fetched_list = self.client.list_volumes(params)
self._list_by_param_value_and_assert(params)
self.assertVolumesIn(fetched_list, self.volume_list,
fields=self.VOLUME_FIELDS)
@test.attr(type='gate')
def test_volumes_list_details_by_status(self):
params = {'status': 'available'}
_, fetched_list = self.client.list_volumes_with_detail(params)
for volume in fetched_list:
self.assertEqual('available', volume['status'])
self.assertVolumesIn(fetched_list, self.volume_list)
@test.attr(type='gate')
def test_volumes_list_by_availability_zone(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
zone = volume['availability_zone']
params = {'availability_zone': zone}
_, fetched_list = self.client.list_volumes(params)
self._list_by_param_value_and_assert(params)
self.assertVolumesIn(fetched_list, self.volume_list,
fields=self.VOLUME_FIELDS)
@test.attr(type='gate')
def test_volumes_list_details_by_availability_zone(self):
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
zone = volume['availability_zone']
params = {'availability_zone': zone}
_, fetched_list = self.client.list_volumes_with_detail(params)
for volume in fetched_list:
self.assertEqual(zone, volume['availability_zone'])
self.assertVolumesIn(fetched_list, self.volume_list)
@test.attr(type='gate')
def test_volume_list_with_param_metadata(self):
# Test to list volumes when metadata param is given
params = {'metadata': self.metadata}
self._list_by_param_value_and_assert(params)
@test.attr(type='gate')
def test_volume_list_with_detail_param_metadata(self):
# Test to list volumes details when metadata param is given
params = {'metadata': self.metadata}
self._list_by_param_value_and_assert(params, with_detail=True)
@test.attr(type='gate')
def test_volume_list_param_display_name_and_status(self):
# Test to list volume when display name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name],
'status': 'available'}
self._list_by_param_value_and_assert(params)
@test.attr(type='gate')
def test_volume_list_with_detail_param_display_name_and_status(self):
# Test to list volume when name and status param is given
volume = self.volume_list[data_utils.rand_int_id(0, 2)]
params = {self.name: volume[self.name],
'status': 'available'}
self._list_by_param_value_and_assert(params, with_detail=True)
class VolumesV2ListTestXML(VolumesV2ListTestJSON):
_interface = 'xml'
class VolumesV1ListTestJSON(VolumesV2ListTestJSON):
_api_version = 1
VOLUME_FIELDS = ('id', 'display_name')
class VolumesV1ListTestXML(VolumesV1ListTestJSON):
_interface = 'xml'
|
|
from io import StringIO
import types
import re
from contextlib import contextmanager
from .util import nested
class Node(object):
__slots__ = ('_futures',)
pattern = re.compile(r'(?P<bracket>\{\{|\}\})|'
r'\{(?P<prefix>\s*)(?P<key>\w+)(?P<suffix>\s*)\}')
def __init__(self, *args, **kwargs):
self._futures = []
for val, (slotname, slottype) in zip(args, self.__slots__.items()):
setattr(self, slotname, self._convert(val, slottype, slotname))
for slotname, slottype in self.__slots__.items():
val = kwargs.pop(slotname, None)
if val is not None:
setattr(self, slotname, self._convert(val, slottype, slotname))
assert not kwargs
def _convert(self, val, typ, name=None):
if isinstance(val, _FutureChildren):
self._futures.append(val)
val = val.content
elif isinstance(typ, ListConstraint):
val = List(typ, val)
elif not isinstance(val, typ):
if isinstance(typ, tuple):
val = typ[0](val)
else:
val = typ(val)
elif isinstance(val, Node):
self._futures.extend(val._futures)
return val
def format(self, stream):
if hasattr(self, 'each_line'):
for line in self.lines:
stream.line(self.each_line.format(line))
if hasattr(self, 'value'):
stream.write(str(self.value))
if hasattr(self, 'line_format'):
with stream.complexline():
self._format_line(self.line_format, stream)
if hasattr(self, 'block_start'):
with stream.block() as block:
with block.start():
self._format_line(self.block_start, stream)
for i in self.body:
i.format(stream)
with block.finish():
self._format_line(self.block_end, stream)
def _format_line(self, format, stream):
start = 0
for m in self.pattern.finditer(format):
if m.start() > start:
stream.write(format[start:m.start()])
if m.group('bracket'):
stream.write(m.group('bracket')[0])
elif m.group('key'):
fun = getattr(self, 'fmt_'+m.group('key'), None)
if fun is not None:
val = fun()
if val:
stream.write(m.group('prefix'))
stream.write(val)
stream.write(m.group('suffix'))
else:
val = getattr(self, m.group('key'))
if val:
stream.write(m.group('prefix'))
if isinstance(val, str):
stream.write(val)
else:
val.format(stream)
stream.write(m.group('suffix'))
start = m.end()
if start != len(format):
stream.write(format[start:])
class VSpace(Node):
__slots__ = {}
def format(self, stream):
stream.write('\n')
stream.line_start = True
class ListConstraint(object):
__slots__ = ('subtypes', 'separator')
def __init__(self, *subtypes, separator=', '):
self.subtypes = subtypes
self.separator = separator
class List(list):
__slots__ = ('type',)
def __init__(self, typ, *args):
self.type = typ
super(List, self).__init__(*args)
def format(self, stream):
if self:
self[0].format(stream)
for i in self[1:]:
stream.write(', ')
i.format(stream)
class _LazyRef(object):
__slots__ = ('name',)
def __init__(self, name):
self.name = name
class _Lazy(object):
def __init__(self):
self.items = set()
def __getattr__(self, name):
self.items.add(name)
return _LazyRef(name)
def fix(self, dic):
for node in dic.values():
if not isinstance(node, type) \
or not issubclass(node, Node) or node is Node:
continue
for k, v in node.__slots__.items():
if isinstance(v, tuple):
v1 = tuple(
dic[item.name] if isinstance(item, _LazyRef) else item
for item in v)
if v1 != v:
node.__slots__[k] = v1
else:
if isinstance(v, _LazyRef):
node.__slots__[k] = dic[v.name]
lazy = _Lazy() # Hi, I'm a singleton.
class _FutureChildren(object):
def __init__(self):
self.content = List(None)
self.zones = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
for (i, v) in enumerate(self.content):
if isinstance(v, _Zone):
self.content[i:i+1] = v.content
def __call__(self, node):
self.content.append(node)
if node._futures:
if len(node._futures) > 1:
return nested(*node._futures)
else:
return node._futures[0]
def insert_first(self, node):
self.content.insert(0, node)
def set_type(self, typ):
self.content.type = typ
def block(self):
return _FutureChildren()
def zone(self, name=None):
if name is not None:
res = self.zones.get(name)
if res is not None:
return res
res = self.zones[name] = _Zone()
else:
res = _Zone()
self.content.append(res)
return res
class _Zone(_FutureChildren):
pass
class _Block(object):
def __init__(self, stream, indent, full_line):
self.stream = stream
self.indent = indent
self.full_line = full_line
@contextmanager
def start(self):
if self.full_line:
self.stream.buffer.write(self.indent)
yield
self.stream.buffer.write('\n')
self.stream.line_start = True
@contextmanager
def finish(self):
self.stream.buffer.write(self.indent)
yield
if self.full_line:
self.stream.buffer.write('\n')
self.stream.line_start = True
class _Stream(object):
__slots__ = ('ast','indent_kind', 'indent', 'buffer', 'line_start')
def __init__(self, ast, indent_kind=' '):
self.ast = ast
self.buffer = StringIO()
self.indent_kind = indent_kind
self.indent = 0
self.line_start = True
def write(self, data):
self.line_start = False
self.buffer.write(data)
def line(self, line):
assert self.line_start
if not line.endswith('\n'):
line += '\n'
if self.indent:
line = self.indent_kind * self.indent + line
self.buffer.write(line)
self.line_start = True
def getvalue(self):
return self.buffer.getvalue()
@contextmanager
def complexline(self):
full_line = self.line_start
if full_line:
self.buffer.write(self.indent_kind * self.indent)
self.line_start = False
yield
if full_line:
self.buffer.write('\n')
self.line_start = True
@contextmanager
def block(self):
block = _Block(self, self.indent_kind*self.indent, self.line_start)
self.indent += 1
yield block
self.indent -= 1
class Ast(object):
def __init__(self):
self.body = []
self.zones = {}
def __str__(self):
stream = _Stream(self)
for i in self.body:
if isinstance(i, _Zone): # bad shit, don't know why need this
for j in i.content:
j.format(stream)
else:
i.format(stream)
return stream.getvalue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
for (i, v) in enumerate(self.body):
if isinstance(v, _Zone):
self.body[i:i+1] = v.content
def block(self):
return _FutureChildren()
def zone(self, name=None):
if name is not None:
res = self.zones.get(name)
if res is not None:
return res
res = self.zones[name] = _Zone()
else:
res = _Zone()
self.body.append(res)
return res
def __call__(self, node):
self.body.append(node)
if node._futures:
if len(node._futures) > 1:
return nested(*node._futures)
else:
return node._futures[0]
def flatten(gen):
stream = _Stream()
for val in gen:
stream.add(val)
return buf.getvalue()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
header = """#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#"""
# Code generator for shared params (shared.py). Run under this folder with:
# python _shared_params_code_gen.py > shared.py
def _gen_param_header(name, doc, defaultValueStr, typeConverter):
"""
Generates the header part for shared variables
:param name: param name
:param doc: param doc
"""
template = '''class Has$Name(Params):
"""
Mixin for param $name: $doc
"""
$name = Param(Params._dummy(), "$name", "$doc", typeConverter=$typeConverter)
def __init__(self):
super(Has$Name, self).__init__()'''
if defaultValueStr is not None:
template += '''
self._setDefault($name=$defaultValueStr)'''
Name = name[0].upper() + name[1:]
if typeConverter is None:
typeConverter = str(None)
return template \
.replace("$name", name) \
.replace("$Name", Name) \
.replace("$doc", doc) \
.replace("$defaultValueStr", str(defaultValueStr)) \
.replace("$typeConverter", typeConverter)
def _gen_param_code(name, doc, defaultValueStr):
"""
Generates Python code for a shared param class.
:param name: param name
:param doc: param doc
:param defaultValueStr: string representation of the default value
:return: code string
"""
# TODO: How to correctly inherit instance attributes?
template = '''
def set$Name(self, value):
"""
Sets the value of :py:attr:`$name`.
"""
return self._set($name=value)
def get$Name(self):
"""
Gets the value of $name or its default value.
"""
return self.getOrDefault(self.$name)'''
Name = name[0].upper() + name[1:]
return template \
.replace("$name", name) \
.replace("$Name", Name) \
.replace("$doc", doc) \
.replace("$defaultValueStr", str(defaultValueStr))
if __name__ == "__main__":
print(header)
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
print("from pyspark.ml.param import *\n\n")
shared = [
("maxIter", "max number of iterations (>= 0).", None, "TypeConverters.toInt"),
("regParam", "regularization parameter (>= 0).", None, "TypeConverters.toFloat"),
("featuresCol", "features column name.", "'features'", "TypeConverters.toString"),
("labelCol", "label column name.", "'label'", "TypeConverters.toString"),
("predictionCol", "prediction column name.", "'prediction'", "TypeConverters.toString"),
("probabilityCol", "Column name for predicted class conditional probabilities. " +
"Note: Not all models output well-calibrated probability estimates! These probabilities " +
"should be treated as confidences, not precise probabilities.", "'probability'",
"TypeConverters.toString"),
("rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", "'rawPrediction'",
"TypeConverters.toString"),
("inputCol", "input column name.", None, "TypeConverters.toString"),
("inputCols", "input column names.", None, "TypeConverters.toListString"),
("outputCol", "output column name.", "self.uid + '__output'", "TypeConverters.toString"),
("outputCols", "output column names.", None, "TypeConverters.toListString"),
("numFeatures", "number of features.", None, "TypeConverters.toInt"),
("checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). " +
"E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: " +
"this setting will be ignored if the checkpoint directory is not set in the SparkContext.",
None, "TypeConverters.toInt"),
("seed", "random seed.", "hash(type(self).__name__)", "TypeConverters.toInt"),
("tol", "the convergence tolerance for iterative algorithms (>= 0).", None,
"TypeConverters.toFloat"),
("stepSize", "Step size to be used for each iteration of optimization (>= 0).", None,
"TypeConverters.toFloat"),
("handleInvalid", "how to handle invalid entries. Options are skip (which will filter " +
"out rows with bad values), or error (which will throw an error). More options may be " +
"added later.", None, "TypeConverters.toString"),
("elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, " +
"the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", "0.0",
"TypeConverters.toFloat"),
("fitIntercept", "whether to fit an intercept term.", "True", "TypeConverters.toBoolean"),
("standardization", "whether to standardize the training features before fitting the " +
"model.", "True", "TypeConverters.toBoolean"),
("thresholds", "Thresholds in multi-class classification to adjust the probability of " +
"predicting each class. Array must have length equal to the number of classes, with " +
"values > 0, excepting that at most one value may be 0. " +
"The class with largest value p/t is predicted, where p is the original " +
"probability of that class and t is the class's threshold.", None,
"TypeConverters.toListFloat"),
("threshold", "threshold in binary classification prediction, in range [0, 1]",
"0.5", "TypeConverters.toFloat"),
("weightCol", "weight column name. If this is not set or empty, we treat " +
"all instance weights as 1.0.", None, "TypeConverters.toString"),
("solver", "the solver algorithm for optimization. If this is not set or empty, " +
"default value is 'auto'.", "'auto'", "TypeConverters.toString"),
("varianceCol", "column name for the biased sample variance of prediction.",
None, "TypeConverters.toString"),
("aggregationDepth", "suggested depth for treeAggregate (>= 2).", "2",
"TypeConverters.toInt"),
("parallelism", "the number of threads to use when running parallel algorithms (>= 1).",
"1", "TypeConverters.toInt"),
("collectSubModels", "Param for whether to collect a list of sub-models trained during " +
"tuning. If set to false, then only the single best sub-model will be available after " +
"fitting. If set to true, then all sub-models will be available. Warning: For large " +
"models, collecting all sub-models can cause OOMs on the Spark driver.",
"False", "TypeConverters.toBoolean"),
("loss", "the loss function to be optimized.", None, "TypeConverters.toString"),
("distanceMeasure", "the distance measure. Supported options: 'euclidean' and 'cosine'.",
"'euclidean'", "TypeConverters.toString")]
code = []
for name, doc, defaultValueStr, typeConverter in shared:
param_code = _gen_param_header(name, doc, defaultValueStr, typeConverter)
code.append(param_code + "\n" + _gen_param_code(name, doc, defaultValueStr))
decisionTreeParams = [
("maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; " +
"depth 1 means 1 internal node + 2 leaf nodes.", "TypeConverters.toInt"),
("maxBins", "Max number of bins for" +
" discretizing continuous features. Must be >=2 and >= number of categories for any" +
" categorical feature.", "TypeConverters.toInt"),
("minInstancesPerNode", "Minimum number of instances each child must have after split. " +
"If a split causes the left or right child to have fewer than minInstancesPerNode, the " +
"split will be discarded as invalid. Should be >= 1.", "TypeConverters.toInt"),
("minInfoGain", "Minimum information gain for a split to be considered at a tree node.",
"TypeConverters.toFloat"),
("maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation. If too small," +
" then 1 node will be split per iteration, and its aggregates may exceed this size.",
"TypeConverters.toInt"),
("cacheNodeIds", "If false, the algorithm will pass trees to executors to match " +
"instances with nodes. If true, the algorithm will cache node IDs for each instance. " +
"Caching can speed up training of deeper trees. Users can set how often should the " +
"cache be checkpointed or disable it by setting checkpointInterval.",
"TypeConverters.toBoolean")]
decisionTreeCode = '''class DecisionTreeParams(Params):
"""
Mixin for Decision Tree parameters.
"""
$dummyPlaceHolders
def __init__(self):
super(DecisionTreeParams, self).__init__()'''
dtParamMethods = ""
dummyPlaceholders = ""
paramTemplate = """$name = Param($owner, "$name", "$doc", typeConverter=$typeConverterStr)"""
for name, doc, typeConverterStr in decisionTreeParams:
if typeConverterStr is None:
typeConverterStr = str(None)
variable = paramTemplate.replace("$name", name).replace("$doc", doc) \
.replace("$typeConverterStr", typeConverterStr)
dummyPlaceholders += variable.replace("$owner", "Params._dummy()") + "\n "
dtParamMethods += _gen_param_code(name, doc, None) + "\n"
code.append(decisionTreeCode.replace("$dummyPlaceHolders", dummyPlaceholders) + "\n" +
dtParamMethods)
print("\n\n\n".join(code))
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, KOL
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from urllib import urlencode
from common import MAILRU_URL
THUMB_RE = Regex('background-image\s*:\s*url\(\'?([^\'\)]+)\'?\)')
GROUP_RE = Regex('/[a-z]+/([^/]+)')
def Request(method, params, full=False):
HTTP.Headers['Cookie'] = Dict['auth']
params['func_name'] = method
params['ajax_call'] = 1
params['ret_json'] = 1
params.update(Dict['params'])
res = JSON.ObjectFromURL(
'%scgi-bin/my/ajax?%s' % (MAILRU_URL, urlencode(params)),
)
if res and len(res) > 2 and res[1] == 'OK':
return res if full else res[len(res)-1]
return False
def GetVideoItems(uid, album_id=None, offset=0, limit=0, ltype=None, **kwargs):
if ltype is None:
ltype = 'user' if '@' in uid else 'community_items'
params = {
'user': uid,
'arg_type': ltype,
'arg_limit': limit,
'arg_offset': offset
}
if uid == 'pladform_video':
params['arg_is_legal'] = 1
if album_id is not None:
if ltype == 'community_items':
params['arg_album_id'] = album_id
else:
params['arg_album'] = album_id
params.update(**kwargs)
return Request('video.get_list', params)
def GetPhotoAlbums(uid, path):
try:
HTTP.Headers['Cookie'] = Dict['auth']
info = JSON.ObjectFromString(
HTML.ElementFromURL(MAILRU_URL+path+'photo').xpath((
'//div[@data-photo="albumsContainer"]'
'/script[@data-type="config"]'
))[0].text_content()
)
return HTML.ElementFromString(
Request('photo.get_albums', {
'user': uid,
'arg_album_ids': JSON.StringFromObject(info['albumsAll'])
})
).xpath('//div[@class="b-catalog__photo-albums-item"]')
except:
return []
def GetMusicRecomendations():
HTTP.Headers['Cookie'] = Dict['auth']
params = {
'ajax_call': 1,
'ret_json': 1,
'func_name': 'audio.recommendation',
}
params.update(Dict['params'])
res = JSON.ObjectFromURL(
'%s+/music/recommendation/?%s' % (MAILRU_URL, urlencode(params)),
cacheTime=0,
)
if res and res['data']:
return {
'Total': len(res['data']),
'List': res['data'],
}
return None
def CheckAuth():
HTTP.ClearCookies()
Dict['auth'] = False
res = HTTP.Request(
'https://auth.mail.ru/cgi-bin/auth',
{
'page': MAILRU_URL,
'FailPage': MAILRU_URL+'cgi-bin/login?fail=1',
'Login': Prefs['username'],
'Domain': 'mail.ru',
'Password': Prefs['password']
},
cacheTime=0
)
if (res and Prefs['username'] in HTTP.CookiesForURL(MAILRU_URL)):
res = HTTP.Request(
(
'https://auth.mail.ru/sdc?fail=http:%2F%2Fmy.mail.ru'
'%2Fcgi-bin%2Flogin&from=http%3A%2F%2Fmy.mail.ru%2F'
),
cacheTime=0
)
if 'set-cookie' in res.headers:
try:
res = JSON.ObjectFromString(
HTML.ElementFromString(res.content).xpath(
'//script[@data-mru-fragment="client-server"]'
)[0].text_content()
)
Log.Debug(res)
del res['magic']['myhost']
Dict['auth'] = HTTP.CookiesForURL(MAILRU_URL)
Dict['params'] = res['magic']
return True
except:
pass
return False
def GetExternalMeta(meta):
if 'providerKey' not in meta:
return None
res = XML.ElementFromURL(
'http://out.pladform.ru/getVideo?videoid=%s&pl=%s' % (
meta['providerKey'],
meta['meta']['playerId']
),
cacheTime=0
)
if not res:
return None
ret = {
'external': None,
'videos': {},
}
qmap = {
'sd': '720',
'ld': '360',
}
for src in res.findall('src'):
if src.get('type') == 'video':
quality = qmap[src.get('quality')]
ret['videos'][quality] = {
'key': quality+'p',
'url': src.text,
}
if not ret['videos']:
try:
ret['external'] = res.find('external_embed').text
return ret
except:
pass
else:
return ret
return None
def CheckMetaUrl(item):
if 'MetaUrl' in item:
return
res = Request('video.get_item', {
'user': item['OwnerEmail'],
'arg_id': item['ID'],
})
if res:
try:
res = JSON.ObjectFromString(
HTML.ElementFromString(res).xpath(
'//script[@data-type="album-json"]'
)[0].text_content()
)
item['MetaUrl'] = res['signVideoUrl']
except:
pass
def GroupFromElement(element):
return GROUP_RE.search(element.get('href')).group(1)
def AlbumFromElement(element):
path = element.get('href')
return path[path.rfind('/')+1:]
def ImageFromElement(element):
style = element.get('style')
if style:
return THUMB_RE.search(style).group(1)
return ''
|
|
from models import Answer, Survey, Question, Choice
from django.conf import settings
from django.forms import Form, ValidationError
from django.forms import CharField, ChoiceField, SplitDateTimeField
from django.forms import Textarea, TextInput, Select, RadioSelect,\
CheckboxSelectMultiple, MultipleChoiceField,\
SplitDateTimeWidget,MultiWidget
from django.forms.forms import BoundField
from django.forms.models import ModelForm
#from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.template import Context, loader
from django.template.defaultfilters import slugify
from itertools import chain
import uuid
class BaseAnswerForm(Form):
def __init__(self, question, user, interview_uuid, session_key, edit_existing=False, *args, **kwdargs):
self.question = question
self.session_key = session_key.lower()
self.user = user
self.interview_uuid = interview_uuid
self.answer = None
initial = None
if edit_existing:
if not user.is_authenticated():
query = question.answers.filter(session_key=session_key)
else:
query = question.answers.filter(user=user)
if query.count():
self.answer = query[0]
initial = self.answer.text
if 'initial' not in kwdargs:
kwdargs['initial'] = {}
if 'answer' not in kwdargs['initial']:
kwdargs['initial']['answer'] = self.answer.text
super(BaseAnswerForm, self).__init__(*args, **kwdargs)
answer = self.fields['answer']
answer.required = question.required
answer.label = question.text
if not question.required:
answer.help_text = unicode('this question is optional')
if initial is not None and initial != answer.initial:
if kwdargs['initial']['answer'] != answer.initial:
## rats.. we are a choice list style and need to map to id.
answer.initial = initial
def as_template(self):
"Helper function for fieldsting fields data from form."
bound_fields = [BoundField(self, field, name) for name, field in self.fields.items()]
c = Context(dict(form = self, bound_fields = bound_fields))
# TODO: check for template ... if template does not exist
# we could just get_template_from_string to some default
# or we could pass in the template name ... whatever we want
# import pdb; pdb.set_trace()
t = loader.get_template('forms/form.html')
return t.render(c)
def save(self, commit=True):
if not self.cleaned_data['answer']:
if self.fields['answer'].required:
raise ValidationError, 'This field is required.'
return
ans = self.answer
if ans is None:
ans = Answer()
ans.question = self.question
ans.session_key = self.session_key
if self.user.is_authenticated():
ans.user = self.user
else:
ans.user = None
ans.interview_uuid = self.interview_uuid
ans.text = self.cleaned_data['answer']
if commit: ans.save()
return ans
class TextInputAnswer(BaseAnswerForm):
answer = CharField()
class TextAreaAnswer(BaseAnswerForm):
answer = CharField(widget=Textarea)
class NullSelect(Select):
def __init__(self, attrs=None, choices=(), empty_label=u"---------"):
self.empty_label = empty_label
super(NullSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=(), **kwdargs):
empty_choice = ()
# kwdargs is needed because it is the only way to determine if an
# override is provided or not.
if 'empty_label' in kwdargs:
if kwdargs['empty_label'] is not None:
empty_choice = ((u'', kwdargs['empty_label']),)
elif self.empty_label is not None:
empty_choice = ((u'', self.empty_label),)
base_choices = self.choices
self.choices = chain(empty_choice, base_choices)
result = super(NullSelect, self).render(name, value, attrs, choices)
self.choices = base_choices
return result
class ChoiceAnswer(BaseAnswerForm):
answer = ChoiceField(widget=NullSelect)
def __init__(self, *args, **kwdargs):
super(ChoiceAnswer, self).__init__(*args, **kwdargs)
choices = []
choices_dict = {}
self.initial_answer = None
for opt in self.question.choices.all().order_by("order"):
#if opt.image and opt.image.url:
# text = mark_safe(opt.text + '<br/><img src="%s"/>'%opt.image.url)
#else:
text = opt.text
if self.answer is not None and self.answer.text == opt.text:
self.initial_answer = str(opt.id)
choices.append((str(opt.id),text))
choices_dict[str(opt.id)] = opt.text
self.choices = choices
self.choices_dict = choices_dict
self.fields['answer'].choices = choices
self.fields['answer'].initial = self.initial_answer
if self.initial_answer is not None:
self.initial['answer'] = self.initial_answer
def clean_answer(self):
key = self.cleaned_data['answer']
if not key and self.fields['answer'].required:
raise ValidationError, 'This field is required.'
return self.choices_dict.get(key, key)
class ChoiceRadio(ChoiceAnswer):
def __init__(self, *args, **kwdargs):
super(ChoiceRadio, self).__init__(*args, **kwdargs)
self.fields['answer'].widget = RadioSelect(choices=self.choices)
#class ChoiceImage(ChoiceAnswer):
# def __init__(self, *args, **kwdargs):
# super(ChoiceImage, self).__init__(*args, **kwdargs)
# #import pdb; pdb.set_trace()
# self.choices = [ (k,mark_safe(v)) for k,v in self.choices ]
# self.fields['answer'].widget = RadioSelect(choices=self.choices)
class ChoiceCheckbox(BaseAnswerForm):
answer = MultipleChoiceField(widget=CheckboxSelectMultiple)
def __init__(self, *args, **kwdargs):
super(ChoiceCheckbox, self).__init__(*args, **kwdargs)
choices = []
choices_dict = {}
self.initial_answer = None
for opt in self.question.choices.all().order_by("order"):
text = opt.text
#if opt.image and opt.image.url:
# text = mark_safe(opt.text + '<br />' + opt.image.url)
choices.append((str(opt.id),text))
choices_dict[str(opt.id)] = opt.text
if self.answer is not None and self.answer.text == opt.text:
self.initial_answer = str(opt.id)
self.choices = choices
self.choices_dict = choices_dict
self.fields['answer'].choices = choices
self.fields['answer'].initial = self.initial_answer
if self.initial_answer is not None:
self.initial['answer'] = self.initial_answer
def clean_answer(self):
keys = self.cleaned_data['answer']
if not keys and self.fields['answer'].required:
raise ValidationError, 'This field is required.'
for key in keys:
if not key and self.fields['answer'].required:
raise ValidationError, 'Invalid Choice.'
return [self.choices_dict.get(key, key) for key in keys]
def save(self, commit=True):
if not self.cleaned_data['answer']:
if self.fields['answer'].required:
raise ValidationError, 'This field is required.'
return
ans_list = []
for text in self.cleaned_data['answer']:
ans = Answer()
ans.question = self.question
ans.session_key = self.session_key
if self.user.is_authenticated():
ans.user = self.user
else:
ans.user = None
ans.interview_uuid = self.interview_uuid
ans.text = text
if commit: ans.save()
ans_list.append(ans)
return ans_list
## each question gets a form with one element, determined by the type
## for the answer.
QTYPE_FORM = {
'T': TextInputAnswer,
'A': TextAreaAnswer,
'S': ChoiceAnswer,
'R': ChoiceRadio,
#'I': ChoiceImage,
'C': ChoiceCheckbox,
}
def forms_for_survey(survey, request, edit_existing=False):
## add session validation to base page.
sp = str(survey.id) + '_'
session_key = request.session.session_key.lower()
login_user = request.user
random_uuid = uuid.uuid4().hex
if request.POST: # bug in forms
post = request.POST
else:
post = None
# If there's a question with no answers, it raises a KeyError
# Let's just pretend there's no question if that happens.
try:
return [QTYPE_FORM[q.qtype](q, login_user, random_uuid, session_key, prefix=sp+str(q.id), data=post, edit_existing=edit_existing)
for q in survey.questions.all().order_by("order") ]
except KeyError:
return None
class CustomDateWidget(TextInput):
class Media:
js = ('/admin/jsi18n/',
settings.ADMIN_MEDIA_PREFIX + 'js/core.js',
settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js",
)
def __init__(self, attrs={}):
super(CustomDateWidget, self).__init__(attrs={'class': 'vDateField', 'size': '10'})
class CustomTimeWidget(TextInput):
class Media:
js = ('/admin/jsi18n/',
settings.ADMIN_MEDIA_PREFIX + 'js/core.js',
settings.ADMIN_MEDIA_PREFIX + "js/calendar.js",
settings.ADMIN_MEDIA_PREFIX + "js/admin/DateTimeShortcuts.js",
)
def __init__(self, attrs={}):
super(CustomTimeWidget, self).__init__(attrs={'class': 'vTimeField', 'size': '8'})
class CustomSplitDateTime(SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [CustomDateWidget, CustomTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<p class="datetime">%s %s<br />%s %s</p>' % \
('Date:', rendered_widgets[0], 'Time:', rendered_widgets[1]))
class SurveyForm(ModelForm):
opens = SplitDateTimeField(widget=CustomSplitDateTime(),
label=Survey._meta.get_field("opens").verbose_name)
closes = SplitDateTimeField(widget=CustomSplitDateTime(),
label=Survey._meta.get_field("closes").verbose_name)
class Meta:
model = Survey
exclude = ("created_by","editable_by","slug","recipient_type","recipient_id")
def clean(self):
title_slug = slugify(self.cleaned_data.get("title"))
if not hasattr(self,"instance"):
if not Survey.objects.filter(slug=title_slug).count()==0:
raise ValidationError, 'The title of the survey must be unique.'
elif self.instance.title != self.cleaned_data.get("title"):
if not Survey.objects.filter(slug=title_slug).count()==0:
raise ValidationError, 'The title of the survey must be unique.'
return self.cleaned_data
class QuestionForm(ModelForm):
class Meta:
model= Question
exclude = ("survey")
class ChoiceForm(ModelForm):
class Meta:
model = Choice
exclude = ("question")
|
|
import unittest
from config import (
access_token, access_token_secret, bearer_token, consumer_key,
consumer_secret, tape, user_id
)
import tweepy
class TweepyTestCase(unittest.TestCase):
def setUp(self):
self.client = tweepy.Client(
bearer_token, consumer_key, consumer_secret,
access_token or user_id, access_token_secret
)
@tape.use_cassette("test_hide_and_unhide_reply.yaml", serializer="yaml")
def test_hide_and_unhide_reply(self):
reply_id = 1344794616005066752 # Test Tweet for reply hide/unhide
self.client.hide_reply(reply_id)
self.client.unhide_reply(reply_id)
@tape.use_cassette("test_like_and_unlike.yaml", serializer="yaml")
def test_like_and_unlike(self):
tweet_id = 1293593516040269825 # @TwitterDev Tweet announcing API v2
self.client.like(tweet_id)
self.client.unlike(tweet_id)
@tape.use_cassette("test_get_liking_users.yaml", serializer="yaml")
def test_get_liking_users(self):
tweet_id = 1293593516040269825 # @TwitterDev Tweet announcing API v2
self.client.get_liking_users(tweet_id)
@tape.use_cassette("test_get_liked_tweets.yaml", serializer="yaml")
def test_get_liked_tweets(self):
user_id = 783214 # User ID for @Twitter
self.client.get_liked_tweets(user_id)
@tape.use_cassette("test_create_and_delete_tweet.yaml", serializer="yaml")
def test_create_and_delete_tweet(self):
response = self.client.create_tweet(text="Test Tweet")
tweet_id = response.data["id"]
self.client.delete_tweet(tweet_id)
@tape.use_cassette("test_retweet_and_unretweet.yaml", serializer="yaml")
def test_retweet_and_unretweet(self):
tweet_id = 1415348607813832708 # @TwitterDev Tweet announcing API v2 Retweet endpoints
self.client.retweet(tweet_id)
self.client.unretweet(tweet_id)
@tape.use_cassette("test_get_retweeters.yaml", serializer="yaml")
def test_get_retweeters(self):
tweet_id = 1415348607813832708 # @TwitterDev Tweet announcing API v2 Retweet endpoints
self.client.get_retweeters(tweet_id)
@tape.use_cassette("test_search_all_tweets.yaml", serializer="yaml")
def test_search_all_tweets(self):
self.client.search_all_tweets("Tweepy")
@tape.use_cassette("test_search_recent_tweets.yaml", serializer="yaml")
def test_search_recent_tweets(self):
self.client.search_recent_tweets("Tweepy")
@tape.use_cassette("test_get_users_mentions.yaml", serializer="yaml")
def test_get_users_mentions(self):
user_id = 783214 # User ID for @Twitter
self.client.get_users_mentions(user_id)
@tape.use_cassette("test_get_users_tweets.yaml", serializer="yaml")
def test_get_users_tweets(self):
user_id = 783214 # User ID for @Twitter
self.client.get_users_tweets(user_id)
@tape.use_cassette("test_get_all_tweets_count.yaml", serializer="yaml")
def test_get_all_tweets_count(self):
self.client.get_all_tweets_count("Tweepy")
@tape.use_cassette("test_get_recent_tweets_count.yaml", serializer="yaml")
def test_get_recent_tweets_count(self):
self.client.get_recent_tweets_count("Tweepy")
@tape.use_cassette("test_get_tweet.yaml", serializer="yaml")
def test_get_tweet(self):
tweet_id = 1293593516040269825 # @TwitterDev Tweet announcing API v2
self.client.get_tweet(tweet_id)
@tape.use_cassette("test_get_tweets.yaml", serializer="yaml")
def test_get_tweets(self):
tweet_ids = [1293593516040269825, 1293595870563381249]
# @TwitterDev and @TwitterAPI Tweets announcing API v2
self.client.get_tweets(tweet_ids)
@tape.use_cassette("test_block_and_get_blocked_and unblock.yaml",
serializer="yaml")
def test_block_and_unblock(self):
user_id = 17874544 # User ID for @TwitterSupport
self.client.block(user_id)
self.client.get_blocked()
self.client.unblock(user_id)
@tape.use_cassette("test_follow_and_unfollow_user.yaml", serializer="yaml")
def test_follow_and_unfollow_user(self):
user_id = 17874544 # User ID for @TwitterSupport
self.client.follow_user(user_id)
self.client.unfollow_user(user_id)
@tape.use_cassette("test_get_users_followers.yaml", serializer="yaml")
def test_get_users_followers(self):
user_id = 783214 # User ID for @Twitter
self.client.get_users_followers(user_id)
@tape.use_cassette("test_get_users_following.yaml", serializer="yaml")
def test_get_users_following(self):
user_id = 783214 # User ID for @Twitter
self.client.get_users_following(user_id)
@tape.use_cassette("test_mute_get_muted_and_unmute.yaml",
serializer="yaml")
def test_mute_get_muted_and_unmute(self):
user_id = 17874544 # User ID for @TwitterSupport
self.client.mute(user_id)
self.client.get_muted()
self.client.unmute(user_id)
@tape.use_cassette("test_get_user.yaml", serializer="yaml")
def test_get_user(self):
self.client.get_user(username="Twitter")
@tape.use_cassette("test_get_users.yaml", serializer="yaml")
def test_get_users(self):
self.client.get_users(usernames=["Twitter", "TwitterDev"])
@tape.use_cassette("test_get_me.yaml", serializer="yaml")
def test_get_me(self):
self.client.get_me()
@tape.use_cassette("test_search_spaces.yaml", serializer="yaml")
def test_search_spaces(self):
self.client.search_spaces("Twitter")
@tape.use_cassette("test_get_spaces.yaml", serializer="yaml")
def test_get_spaces(self):
space_ids = ["1YpKkzBgBlVxj", "1OwGWzarWnNKQ"]
# Space ID for @TwitterSpaces Twitter Spaces community gathering + Q&A
# https://twitter.com/TwitterSpaces/status/1436382283347283969
# Space ID for @NASA #NASAWebb Space Telescope 101 and Q&A
# https://twitter.com/NASA/status/1442961745098653701
user_ids = [1065249714214457345, 2328002822]
# User IDs for @TwitterSpaces and @TwitterWomen
self.client.get_spaces(ids=space_ids)
self.client.get_spaces(user_ids=user_ids)
@tape.use_cassette("test_get_space.yaml", serializer="yaml")
def test_get_space(self):
space_id = "1YpKkzBgBlVxj"
# Space ID for @TwitterSpaces Twitter Spaces community gathering + Q&A
# https://twitter.com/TwitterSpaces/status/1436382283347283969
self.client.get_space(space_id)
# TODO: Test Client.get_space_buyers
# TODO: Test Client.get_space_tweets
@tape.use_cassette("test_get_list_tweets.yaml", serializer="yaml")
def test_get_list_tweets(self):
list_id = 84839422 # List ID for Official Twitter Accounts (@Twitter)
self.client.get_list_tweets(list_id)
@tape.use_cassette("test_follow_and_unfollow_list.yaml", serializer="yaml")
def test_follow_and_unfollow_list(self):
list_id = 84839422 # List ID for Official Twitter Accounts (@Twitter)
self.client.follow_list(list_id)
self.client.unfollow_list(list_id)
@tape.use_cassette("test_get_list_followers.yaml", serializer="yaml")
def test_get_list_followers(self):
list_id = 84839422 # List ID for Official Twitter Accounts (@Twitter)
self.client.get_list_followers(list_id)
@tape.use_cassette("test_get_followed_lists.yaml", serializer="yaml")
def test_get_followed_lists(self):
user_id = 372575989 # User ID for @TwitterNews
self.client.get_followed_lists(user_id)
@tape.use_cassette("test_get_list.yaml", serializer="yaml")
def test_get_list(self):
list_id = 84839422 # List ID for Official Twitter Accounts (@Twitter)
self.client.get_list(list_id)
@tape.use_cassette("test_get_owned_lists.yaml", serializer="yaml")
def test_get_owned_lists(self):
user_id = 783214 # User ID for @Twitter
self.client.get_owned_lists(user_id)
@tape.use_cassette("test_get_list_members.yaml", serializer="yaml")
def test_get_list_members(self):
list_id = 84839422 # List ID for Official Twitter Accounts (@Twitter)
self.client.get_list_members(list_id)
@tape.use_cassette("test_get_list_memberships.yaml", serializer="yaml")
def test_get_list_memberships(self):
user_id = 783214 # User ID for @Twitter
self.client.get_list_memberships(user_id)
@tape.use_cassette("test_manage_and_get_pinned_lists.yaml",
serializer="yaml")
def test_manage_and_get_pinned_lists(self):
response = self.client.create_list("Test List", private=True)
list_id = response.data["id"]
user_id = 783214 # User ID for @Twitter
self.client.add_list_member(list_id, user_id)
self.client.pin_list(list_id)
self.client.get_pinned_lists()
self.client.remove_list_member(list_id, user_id)
self.client.unpin_list(list_id)
self.client.update_list(list_id, description="Test List Description")
self.client.delete_list(list_id)
@tape.use_cassette("test_create_and_get_compliance_job_and_jobs.yaml",
serializer="yaml")
def test_create_and_get_compliance_job_and_jobs(self):
response = self.client.create_compliance_job("tweets")
job_id = response.data["id"]
self.client.get_compliance_job(job_id)
self.client.get_compliance_jobs("tweets")
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multi-class confusion matrix metrics at thresholds."""
from absl.testing import parameterized
import apache_beam as beam
from apache_beam.testing import util
import numpy as np
import tensorflow as tf
from tensorflow_model_analysis.eval_saved_model import testutil
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.metrics import metric_util
from tensorflow_model_analysis.metrics import multi_class_confusion_matrix_metrics
class MultiClassConfusionMatrixMetricsTest(testutil.TensorflowModelAnalysisTest,
parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': '_empty_thresholds',
'left': multi_class_confusion_matrix_metrics.Matrices({}),
'right': multi_class_confusion_matrix_metrics.Matrices({}),
'expected': multi_class_confusion_matrix_metrics.Matrices({})
}, {
'testcase_name': '_empty_entries',
'left': multi_class_confusion_matrix_metrics.Matrices({0.5: {}}),
'right': multi_class_confusion_matrix_metrics.Matrices({0.5: {}}),
'expected': multi_class_confusion_matrix_metrics.Matrices({0.5: {}})
}, {
'testcase_name':
'_different_thresholds',
'left':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0
}
}),
'right':
multi_class_confusion_matrix_metrics.Matrices({
0.75: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
2.0
}
}),
'expected':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0
},
0.75: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
2.0
}
}),
}, {
'testcase_name':
'_different_entries',
'left':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0
}
}),
'right':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
2.0
}
}),
'expected':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
2.0
}
}),
}, {
'testcase_name':
'_same_thresholds_and_entries',
'left':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
2.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=0):
3.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
4.0,
},
0.75: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
2.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
4.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=0):
6.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
8.0,
}
}),
'right':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
3.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=0):
5.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
7.0,
},
0.75: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
2.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
6.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=0):
10.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
14.0,
}
}),
'expected':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
2.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
5.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=0):
8.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
11.0,
},
0.75: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
4.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
10.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=0):
16.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
22.0,
}
}),
}, {
'testcase_name': '_empty_thresholds_broadcast',
'left': multi_class_confusion_matrix_metrics.Matrices({}),
'right': 1.0,
'expected': multi_class_confusion_matrix_metrics.Matrices({})
}, {
'testcase_name': '_empty_entries_broadcast',
'left': multi_class_confusion_matrix_metrics.Matrices({0.5: {}}),
'right': 1.0,
'expected': multi_class_confusion_matrix_metrics.Matrices({0.5: {}})
}, {
'testcase_name':
'_nonempty_thresholds_and_entries_broadcast',
'left':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
1.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
2.0,
},
}),
'right':
3.0,
'expected':
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
4.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=1):
5.0,
},
}),
})
def testAddMatrices(self, left, right, expected):
self.assertEqual(expected, left + right)
@parameterized.named_parameters(('using_default_thresholds', {}),
('setting_thresholds', {
'thresholds': [0.5]
}))
def testMultiClassConfusionMatrixAtThresholds(self, kwargs):
computations = (
multi_class_confusion_matrix_metrics
.MultiClassConfusionMatrixAtThresholds(**kwargs).computations(
example_weighted=True))
matrices = computations[0]
metrics = computations[1]
example1 = {
'labels': np.array([2.0]),
'predictions': np.array([0.2, 0.3, 0.5]),
'example_weights': np.array([0.5])
}
example2 = {
'labels': np.array([0.0]),
'predictions': np.array([0.1, 0.3, 0.6]),
'example_weights': np.array([1.0])
}
example3 = {
'labels': np.array([1.0]),
'predictions': np.array([0.3, 0.1, 0.6]),
'example_weights': np.array([0.25])
}
example4 = {
'labels': np.array([1.0]),
'predictions': np.array([0.1, 0.9, 0.0]),
'example_weights': np.array([1.0])
}
example5 = {
'labels': np.array([1.0]),
'predictions': np.array([0.1, 0.8, 0.1]),
'example_weights': np.array([1.0])
}
example6 = {
'labels': np.array([2.0]),
'predictions': np.array([0.3, 0.1, 0.6]),
'example_weights': np.array([1.0])
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create(
[example1, example2, example3, example4, example5, example6])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeMatrices' >> beam.CombinePerKey(matrices.combiner)
|
'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metric_types.MetricKey(
name='multi_class_confusion_matrix_at_thresholds',
example_weighted=True)
got_matrix = got_metrics[key]
self.assertEqual(
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=2):
1.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=1):
2.0,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=1, predicted_class_id=2):
0.25,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=2, predicted_class_id=-1):
0.5,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=2, predicted_class_id=2):
1.0
}
}), got_matrix)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
def testMultiClassConfusionMatrixAtThresholdsWithStringLabels(self):
computations = (
multi_class_confusion_matrix_metrics
.MultiClassConfusionMatrixAtThresholds().computations(
example_weighted=True))
matrices = computations[0]
metrics = computations[1]
example1 = {
'labels': np.array([['unacc']]),
'predictions': {
'probabilities':
np.array([[
1.0000000e+00, 6.9407083e-24, 2.7419115e-38, 0.0000000e+00
]]),
'all_classes':
np.array([['unacc', 'acc', 'vgood', 'good']]),
},
'example_weights': np.array([0.5])
}
example2 = {
'labels': np.array([['vgood']]),
'predictions': {
'probabilities': np.array([[0.2, 0.3, 0.4, 0.1]]),
'all_classes': np.array([['unacc', 'acc', 'vgood', 'good']]),
},
'example_weights': np.array([1.0])
}
with beam.Pipeline() as pipeline:
# pylint: disable=no-value-for-parameter
result = (
pipeline
| 'Create' >> beam.Create([example1, example2])
| 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)
| 'AddSlice' >> beam.Map(lambda x: ((), x))
| 'ComputeMatrices' >> beam.CombinePerKey(matrices.combiner)
|
'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1]))))
# pylint: enable=no-value-for-parameter
def check_result(got):
try:
self.assertLen(got, 1)
got_slice_key, got_metrics = got[0]
self.assertEqual(got_slice_key, ())
self.assertLen(got_metrics, 1)
key = metric_types.MetricKey(
name='multi_class_confusion_matrix_at_thresholds',
example_weighted=True)
got_matrix = got_metrics[key]
self.assertEqual(
multi_class_confusion_matrix_metrics.Matrices({
0.5: {
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=0, predicted_class_id=0):
0.5,
multi_class_confusion_matrix_metrics.MatrixEntryKey(
actual_class_id=2, predicted_class_id=-1):
1.0
}
}), got_matrix)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(result, check_result, label='result')
if __name__ == '__main__':
tf.test.main()
|
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, os
import logging, array
from time import sleep
from board import Board
from pyOCD.interface import INTERFACE, usb_backend
TARGET_TYPE = {
"0200": "kl25z",
"0210": "kl05z",
"0220": "kl46z",
"0230": "k20d50m",
"0231": "k22f",
"0240": "k64f",
"0250": "kl02z",
"0260": "kl26z",
"1010": "lpc1768",
"9004": "lpc1768",
"1040": "lpc11u24",
"1050": "lpc800",
"1070": "nrf51822",
}
mbed_vid = 0x0d28
mbed_pid = 0x0204
class MbedBoard(Board):
"""
This class inherits from Board and is specific to mbed boards.
Particularly, this class allows you to dynamically determine
the type of all boards connected based on the id board
"""
def __init__(self, target, flash, interface, transport = "cmsis_dap"):
"""
Init the board
"""
super(MbedBoard, self).__init__(target, flash, interface, transport)
self.unique_id = ""
self.target_type = ""
def getUniqueID(self):
"""
Return the unique id of the board
"""
return self.unique_id
def getTargetType(self):
"""
Return the type of the board
"""
return self.target_type
def getInfo(self):
"""
Return info on the board
"""
return Board.getInfo(self) + " [" + self.target_type + "]"
@staticmethod
def listConnectedBoards(transport = "cmsis_dap"):
"""
List the connected board info
"""
all_mbeds = INTERFACE[usb_backend].getAllConnectedInterface(mbed_vid, mbed_pid)
index = 0
if (all_mbeds != []) & (all_mbeds != None):
for mbed in all_mbeds:
mbed.write([0x80])
u_id_ = mbed.read()
try:
target_type = array.array('B', [i for i in u_id_[2:6]]).tostring()
if (target_type not in TARGET_TYPE):
logging.info("Unsupported target found: %s" % target_type)
continue
else:
target_type = TARGET_TYPE[target_type]
new_mbed = MbedBoard("target_" + target_type, "flash_" + target_type, mbed, transport)
new_mbed.target_type = target_type
new_mbed.unique_id = array.array('B', [i for i in u_id_[2:2+u_id_[1]]]).tostring()
logging.info("new board id detected: %s", new_mbed.unique_id)
print "%d => %s" % (index, new_mbed.getInfo().encode('ascii', 'ignore'))
mbed.close()
index += 1
except Exception as e:
print "received exception: %s" % e
mbed.close()
else:
print "No available boards is connected"
@staticmethod
def getAllConnectedBoards(transport = "cmsis_dap", close = False, blocking = True, target_override = None):
"""
Return an array of all mbed boards connected
"""
first = True
while True:
while True:
all_mbeds = INTERFACE[usb_backend].getAllConnectedInterface(mbed_vid, mbed_pid)
if all_mbeds != None or not blocking:
break
if (first == True):
logging.info("Waiting for a USB device connected")
first = False
sleep(0.2)
mbed_boards = []
for mbed in all_mbeds:
mbed.write([0x80])
u_id_ = mbed.read()
try:
if target_override:
target_type = target_override
else:
target_type = array.array('B', [i for i in u_id_[2:6]]).tostring()
if (target_type not in TARGET_TYPE):
logging.info("Unsupported target found: %s" % target_type)
continue
else:
target_type = TARGET_TYPE[target_type]
new_mbed = MbedBoard("target_" + target_type, "flash_" + target_type, mbed, transport)
new_mbed.target_type = target_type
new_mbed.unique_id = array.array('B', [i for i in u_id_[2:2+u_id_[1]]]).tostring()
logging.info("new board id detected: %s", new_mbed.unique_id)
mbed_boards.append(new_mbed)
if close:
mbed.close()
except Exception as e:
print "received exception: %s" % e
mbed.close()
if len(mbed_boards) > 0 or not blocking:
return mbed_boards
if (first == True):
logging.info("Waiting for a USB device connected")
first = False
@staticmethod
def chooseBoard(transport = "cmsis_dap", blocking = True, return_first = False, board_id = None, target_override = None):
"""
Allow you to select a board among all boards connected
"""
all_mbeds = MbedBoard.getAllConnectedBoards(transport, False, blocking, target_override)
if all_mbeds == None:
return None
index = 0
print "id => usbinfo | boardname"
for mbed in all_mbeds:
print "%d => %s" % (index, mbed.getInfo().encode('ascii', 'ignore'))
index += 1
if len(all_mbeds) == 1:
if board_id != None:
if all_mbeds[0].unique_id == (board_id):
all_mbeds[0].init()
return all_mbeds[0]
else:
print "The board you want to connect isn't the board now connected"
return None
else:
try:
all_mbeds[0].init()
except Exception as e:
try:
print e
except:
pass
finally:
all_mbeds[0].interface.close()
raise e
return all_mbeds[0]
try:
ch = 0
if board_id != None:
for mbed in all_mbeds:
if mbed.unique_id == (board_id):
mbed.init()
return mbed
else:
mbed.interface.close()
print "The board you want to connect isn't the boards now connected"
return None
elif not return_first:
while True:
print "input id num to choice your board want to connect"
ch = sys.stdin.readline()
sys.stdin.flush()
if (int(ch) < 0) or (int(ch) >= len(all_mbeds)):
logging.info("BAD CHOICE: %d", int(ch))
index = 0
for mbed in all_mbeds:
print "%d => %s" % ( index, mbed.getInfo())
index += 1
else:
break
# close all others mbed connected
for mbed in all_mbeds:
if mbed != all_mbeds[int(ch)]:
mbed.interface.close()
all_mbeds[int(ch)].init()
return all_mbeds[int(ch)]
except Exception as e:
try:
print e
except:
pass
finally:
for mbed in all_mbeds:
mbed.interface.close()
|
|
#!/usr/bin/env python
from boto.route53.record import ResourceRecordSets
from boto.route53.status import Status
import boto.route53 as r53
from collections import namedtuple
import argparse
import time
import yaml
# a DNS record for hesiod
# fqdn should include the trailing .
# value should contain the value without quotes
DNSRecord = namedtuple("DNSRecord", "fqdn value")
# A UNIX group
class Group(object):
def __init__(self, name, gid):
if not name:
raise Exception("Group name must be provided.")
if not gid:
raise Exception("Group ID must be provided.")
self.name = str(name)
self.gid = int(gid)
if len(self.passwd_line([]).split(":")) != 4:
raise Exception("Invalid group, contains colons: %s" % self)
def users(self, users):
r = []
for user in users:
if self in user.groups:
r.append(user)
return r
def dns_records(self, hesiod_domain, users):
records = []
passwd_line = self.passwd_line(users)
# group record
fqdn = "%s.group.%s" % (self.name, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), passwd_line))
# gid record
fqdn = "%s.gid.%s" % (self.gid, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), passwd_line))
return records
@classmethod
# returns group, usernames list
# usernames will be empty if only a partial line
def parse_passwd_line(cls, line):
parts = line.split(":")
if len(parts) != 3 and len(parts) != 4:
raise Exception("Invalid group passwd line: %s" % line)
name = parts[0]
gid = parts[2]
usernames = []
if len(parts) == 4:
usernames = parts[3].split(",")
return Group(name, gid), usernames
def passwd_line(self, users):
usernames = ",".join(sorted(map(lambda u: u.username, self.users(users))))
return "%s:x:%d:%s" % (self.name, self.gid, usernames)
def __eq__(self, other):
return self.name == other.name and self.gid == other.gid
def __ne__(self, other):
return not self == other
def __repr__(self):
return "Group(name=%s, gid=%s)" % (self.name, self.gid)
# A UNIX user
class User(object):
def __init__(self, name, username, uid, groups, ssh_keys, homedir=None, shell="/bin/bash"):
self.name = str(name)
self.username = str(username)
self.uid = int(uid)
self.groups = groups
self.ssh_keys = list(ssh_keys)
if not homedir:
homedir = "/home/%s" % self.username
self.homedir = str(homedir)
self.shell = str(shell)
if len(self.passwd_line().split(":")) != 7:
raise Exception("Invalid user, contains colons: %s" % self)
@classmethod
# returns user, primary group id
def parse_passwd_line(cls, line):
line = line.replace('"', '')
parts = line.split(":")
if len(parts) != 7:
raise Exception("Invalid user passwd line: %s" % line)
username, x, uid, group, gecos, homedir, shell = parts
name = gecos.split(",")[0]
group = int(group)
return User(name, username, uid, [], [], homedir, shell), group
def dns_records(self, hesiod_domain):
records = []
# user record
fqdn = "%s.passwd.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), self.passwd_line()))
# uid record
fqdn = "%s.uid.%s" % (self.uid, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), self.passwd_line()))
# group list record
gl = []
for group in sorted(self.groups, key=lambda g: g.gid):
gl.append("%s:%s" % (group.name, group.gid))
fqdn = "%s.grplist.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(fqdn.lower(), ":".join(gl)))
# ssh records
if self.ssh_keys:
ssh_keys_count_fqdn = "%s.count.ssh.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(ssh_keys_count_fqdn.lower(), str(len(self.ssh_keys))))
# Need to keep this around for backwards compatibility when only one ssh key worked
legacy_ssh_key_fqdn = "%s.ssh.%s" % (self.username, hesiod_domain)
records.append(DNSRecord(legacy_ssh_key_fqdn.lower(), self.ssh_keys[0]))
for _id, ssh_key in enumerate(self.ssh_keys):
ssh_key_fqdn = "%s.%s.ssh.%s" % (self.username, _id, hesiod_domain)
records.append(DNSRecord(ssh_key_fqdn.lower(), ssh_key))
return records
def passwd_line(self):
gid = ""
if self.groups:
gid = str(self.groups[0].gid)
return "%s:x:%d:%s:%s:%s:%s" % \
(self.username, self.uid, gid, self.gecos(), self.homedir, self.shell)
def gecos(self):
return "%s,,,," % self.name
def __eq__(self, other):
return self.passwd_line() == other.passwd_line()
def __ne__(self, other):
return not self == other
def __repr__(self):
return ("User(name=%s, username=%s, uid=%s, groups=%s, ssh_keys=%s, " +
"homedir=%s, shell=%s)") % \
(self.name, self.username, self.uid, self.groups,
self.ssh_keys, self.homedir, self.shell)
# syncs users and groups to route53
# users is a list of users
# groups is a list of groups
# route53_zone - the hosted zone in Route53 to modify, e.g. example.com
# hesiod_domain - the zone with hesiod information, e.g. hesiod.example.com
def sync(users, groups, route53_zone, hesiod_domain, dry_run):
conn = r53.connect_to_region('us-east-1')
record_type = "TXT"
ttl = "60"
# suffix of . on zone if not supplied
if route53_zone[-1:] != '.':
route53_zone += "."
if hesiod_domain[-1:] != '.':
hesiod_domain += "."
# get existing hosted zones
zones = {}
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# ensure requested zone is hosted by route53
if not route53_zone in zones:
raise Exception("Zone %s does not exist in Route53" % route53_zone)
sets = conn.get_all_rrsets(zones[route53_zone])
# existing records
existing_records = set()
for rset in sets:
if rset.type == record_type:
if rset.name.endswith("group." + hesiod_domain) or \
rset.name.endswith("gid." + hesiod_domain) or \
rset.name.endswith("passwd." + hesiod_domain) or \
rset.name.endswith("uid." + hesiod_domain) or \
rset.name.endswith("grplist." + hesiod_domain) or \
rset.name.endswith("ssh." + hesiod_domain):
value = "".join(rset.resource_records).replace('"', '')
existing_records.add(DNSRecord(str(rset.name), str(value)))
# new records
new_records = set()
for group in groups:
for record in group.dns_records(hesiod_domain, users):
new_records.add(record)
for user in users:
for record in user.dns_records(hesiod_domain):
new_records.add(record)
to_remove = existing_records - new_records
to_add = new_records - existing_records
if to_remove:
print "Deleting:"
for r in sorted(to_remove):
print r
print
else:
print "Nothing to delete."
if to_add:
print "Adding:"
for r in sorted(to_add):
print r
print
else:
print "Nothing to add."
if dry_run:
print "Dry run mode. Stopping."
return
# stop if nothing to do
if not to_remove and not to_add:
return
for record_chunk in list(chunks(to_remove, 50)):
changes = ResourceRecordSets(conn, zones[route53_zone])
for record in record_chunk:
removal = changes.add_change("DELETE", record.fqdn, record_type, ttl)
removal.add_value(txt_value(record.value))
commit_changes(changes, conn)
for record_chunk in list(chunks(to_add, 50)):
changes = ResourceRecordSets(conn, zones[route53_zone])
for record in record_chunk:
addition = changes.add_change("CREATE", record.fqdn, record_type, ttl)
addition.add_value(txt_value(record.value))
commit_changes(changes, conn)
# Commit Changes
def commit_changes(changes, conn):
print "Commiting changes", changes
try:
result = changes.commit()
status = Status(conn, result["ChangeResourceRecordSetsResponse"]["ChangeInfo"])
except r53.exception.DNSServerError, e:
raise Exception("Could not update DNS records.", e)
while status.status == "PENDING":
print "Waiting for Route53 to propagate changes."
time.sleep(10)
print status.update()
# Make chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
lst=list(lst)
for i in range(0, len(lst), n):
yield lst[i:i + n]
# DNS text values are limited to chunks of 255, but multiple chunks are concatenated
# Amazon handles this by requiring you to add quotation marks around each chunk
def txt_value(value):
first = value[:255]
rest = value[255:]
if rest:
rest_value = txt_value(rest)
else:
rest_value = ""
return '"%s"%s' % (first, rest_value)
def load_data(filename):
with open(filename, "r") as f:
contents = yaml.load(f, Loader=yaml.FullLoader)
route53_zone = contents["route53_zone"]
hesiod_domain = contents["hesiod_domain"]
# all groups and users
groups_idx = {}
users_idx = {}
groups = []
users = []
for g in contents["groups"]:
group = Group(**g)
if group.name in groups_idx:
raise Exception("Group name is not unique: %s" % group.name)
if group.gid in groups_idx:
raise Exception("Group ID is not unique: %s" % group.gid)
groups_idx[group.name] = group
groups_idx[group.gid] = group
groups.append(group)
for u in contents["users"]:
groups_this = []
if u["groups"]:
for g in u["groups"]:
group = groups_idx[g]
if not group:
raise Exception("No such group: %s" % g)
groups_this.append(group)
u["groups"] = groups_this
user = User(**u)
if len(user.groups) == 0:
raise Exception("At least one group required for user %s" % \
user.username)
if user.username in users_idx:
raise Exception("Username is not unique: %s" % user.username)
if user.uid in users_idx:
raise Exception("User ID is not unique: %s" % user.uid)
users_idx[user.username] = user
users_idx[user.uid] = user
users.append(user)
return users, groups, route53_zone, hesiod_domain
def main():
parser = argparse.ArgumentParser(
description="Synchronize a user database with Route53 for Hesiod.",
epilog = "AWS credentials follow the Boto standard. See " +
"http://docs.pythonboto.org/en/latest/boto_config_tut.html. " +
"For example, you can populate AWS_ACCESS_KEY_ID and " +
"AWS_SECRET_ACCESS_KEY with your credentials, or use IAM " +
"role-based authentication (in which case you need not do " +
"anything).")
parser.add_argument("user_file", metavar="USER_FILE",
help="The user yaml file. See example_users.yml for an example.")
parser.add_argument("--dry-run",
action='store_true',
dest="dry_run",
help="Dry run mode. Do not commit any changes.",
default=False)
args = parser.parse_args()
users, groups, route53_zone, hesiod_domain = load_data(args.user_file)
sync(users, groups, route53_zone, hesiod_domain, args.dry_run)
print "Done!"
if __name__ == "__main__":
main()
|
|
# Copyright (c) Pelagicore AB 2016
from jinja2 import Environment, Template, Undefined, StrictUndefined
from jinja2 import FileSystemLoader, PackageLoader, ChoiceLoader
from jinja2 import TemplateSyntaxError, TemplateNotFound, TemplateError
from path import Path
from antlr4 import InputStream, FileStream, CommonTokenStream, ParseTreeWalker
from antlr4.error import DiagnosticErrorListener, ErrorListener
import shelve
import logging
import hashlib
import yaml
import click
import sys
import os
from .idl.parser.TLexer import TLexer
from .idl.parser.TParser import TParser
from .idl.parser.TListener import TListener
from .idl.profile import EProfile
from .idl.domain import System
from .idl.listener import DomainListener
from .filters import get_filters
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
logger = logging.getLogger(__name__)
def merge(a, b):
"merges b into a recursively if a and b are dicts"
for key in b:
if isinstance(a.get(key), dict) and isinstance(b.get(key), dict):
merge(a[key], b[key])
else:
a[key] = b[key]
return a
class TestableUndefined(StrictUndefined):
"""Return an error for all undefined values, but allow testing them in if statements"""
def __bool__(self):
return False
class ReportingErrorListener(ErrorListener.ErrorListener):
""" Provides an API for accessing the file system and controlling the generator """
def __init__(self, document):
self.document = document
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
msg = '{0}:{1}:{2} {3}'.format(self.document, line, column, msg)
click.secho(msg, fg='red')
raise ValueError(msg)
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
click.secho('ambiguity', fg='red')
def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs):
click.secho('reportAttemptingFullContext', fg='red')
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
click.secho('reportContextSensitivity', fg='red')
class Generator(object):
"""Manages the templates and applies your context data"""
strict = False
""" enables strict code generation """
def __init__(self, search_path, context={}, force=False):
loader = ChoiceLoader([
FileSystemLoader(search_path),
PackageLoader('qface')
])
self.env = Environment(
loader=loader,
trim_blocks=True,
lstrip_blocks=True,
)
self.env.filters.update(get_filters())
self._destination = Path()
self._path = Path()
self._source = ''
self.context = context
self.force = force
@property
def destination(self):
"""destination prefix for generator write"""
return self._destination
@destination.setter
def destination(self, dst):
self._destination = dst
@property
def resolved_path(self):
return self.destination / self.path
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if not path:
return
self._path = Path(self.apply(path))
@property
def source(self):
"""source prefix for template lookup"""
return self._source
@source.setter
def source(self, source):
if source:
self._source = source
@property
def filters(self):
return self.env.filters
@filters.setter
def filters(self, filters):
self.env.filters.update(filters)
def get_template(self, name):
"""Retrieves a single template file from the template loader"""
source = name
if name and name[0] == '/':
source = name[1:]
elif self.source is not None:
source = '/'.join((self.source, name))
return self.env.get_template(source)
def render(self, name, context):
"""Returns the rendered text from a single template file from the
template loader using the given context data"""
if Generator.strict:
self.env.undefined = TestableUndefined
else:
self.env.undefined = Undefined
template = self.get_template(name)
return template.render(context)
def apply(self, template, context={}):
context.update(self.context)
"""Return the rendered text of a template instance"""
return self.env.from_string(template).render(context)
def write(self, file_path, template, context={}, preserve=False, force=False):
"""Using a template file name it renders a template
into a file given a context
"""
if not file_path or not template:
click.secho('source or target missing for document')
return
if not context:
context = self.context
error = False
try:
self._write(file_path, template, context, preserve, force)
except TemplateSyntaxError as exc:
message = '{0}:{1}: error: {2}'.format(exc.filename, exc.lineno, exc.message)
click.secho(message, fg='red', err=True)
error = True
except TemplateNotFound as exc:
message = '{0}: error: Template not found'.format(exc.name)
click.secho(message, fg='red', err=True)
error = True
except TemplateError as exc:
exc_tb = sys.exc_info()[2]
while exc_tb.tb_next != None:
exc_tb = exc_tb.tb_next
message = '{0}:{1}: error: {2}'.format(exc_tb.tb_frame.f_code.co_filename, exc_tb.tb_lineno, exc.message)
click.secho(message, fg='red', err=True)
error = True
if error and Generator.strict:
sys.exit(1)
def _write(self, file_path: Path, template: str, context: dict, preserve: bool = False, force: bool = False):
force = self.force or force
path = self.resolved_path / Path(self.apply(file_path, context))
if path.parent:
path.parent.makedirs_p()
logger.info('write {0}'.format(path))
data = self.render(template, context)
if self._has_different_content(data, path) or force:
if path.exists() and preserve and not force:
click.secho('preserve: {0}'.format(path), fg='blue')
else:
click.secho('create: {0}'.format(path), fg='blue')
path.open('w', encoding='utf-8').write(data)
def _has_different_content(self, data, path):
if not path.exists():
return True
dataHash = hashlib.new('md5', data.encode('utf-8')).digest()
pathHash = path.read_hash('md5')
return dataHash != pathHash
def register_filter(self, name, callback):
"""Register your custom template filter"""
self.env.filters[name] = callback
class RuleGenerator(Generator):
"""Generates documents based on a rule YAML document"""
def __init__(self, search_path: str, destination:Path, context:dict={}, features:set=set(), force=False):
super().__init__(search_path, context, force)
self.context.update({
'dst': destination,
'project': Path(destination).name,
'features': features,
})
self.destination = destination
self.features = features
def process_rules(self, path: Path, system: System):
"""writes the templates read from the rules document"""
self.context.update({
'system': system,
})
document = FileSystem.load_yaml(path, required=True)
for module, rules in document.items():
click.secho('process: {0}'.format(module), fg='green')
self._process_rules(rules, system)
def _process_rules(self, rules: dict, system: System):
""" process a set of rules for a target """
self._source = None # reset the template source
if not self._shall_proceed(rules):
return
self.context.update(rules.get('context', {}))
self.path = rules.get('path', '')
self.source = rules.get('source', None)
self._process_rule(rules.get('system', None), {'system': system})
for module in system.modules:
self._process_rule(rules.get('module', None), {'module': module})
for interface in module.interfaces:
self._process_rule(rules.get('interface', None), {'interface': interface})
for struct in module.structs:
self._process_rule(rules.get('struct', None), {'struct': struct})
for enum in module.enums:
self._process_rule(rules.get('enum', None), {'enum': enum})
def _process_rule(self, rule: dict, context: dict):
""" process a single rule """
if not rule or not self._shall_proceed(rule):
return
self.context.update(context)
self.context.update(rule.get('context', {}))
self.path = rule.get('path', None)
self.source = rule.get('source', None)
for entry in rule.get('documents', []):
target, source = self._resolve_rule_document(entry)
self.write(target, source)
for entry in rule.get('preserve', []):
target, source = self._resolve_rule_document(entry)
self.write(target, source, preserve=True)
def _shall_proceed(self, obj):
conditions = obj.get('when', [])
if not conditions:
return True
if not isinstance(conditions, list):
conditions = [conditions]
result = self.features.intersection(set(conditions))
return bool(len(result))
def _resolve_rule_document(self, entry):
if type(entry) is dict:
return next(iter(entry.items()))
return (entry, entry)
class FileSystem(object):
"""QFace helper functions to work with the file system"""
strict = False
""" enables strict parsing """
@staticmethod
def parse_document(document: Path, system: System = None, profile=EProfile.FULL):
error = False
try:
return FileSystem._parse_document(document, system, profile)
except FileNotFoundError as e:
click.secho('{0}: error: file not found'.format(document), fg='red', err=True)
error = True
except ValueError as e:
# The error is already printed in the ErrorHandler in this case
error = True
except Exception as e:
click.secho('Error parsing document {0}'.format(document), fg='red', err=True)
error = True
if error and FileSystem.strict:
sys.exit(-1)
@staticmethod
def parse_text(text: str, system: System = None, profile=EProfile.FULL):
stream = InputStream(text)
return FileSystem._parse_stream(stream, system, "<TEXT>", profile)
@staticmethod
def _parse_document(document: Path, system: System = None, profile=EProfile.FULL):
"""Parses a document and returns the resulting domain system
:param path: document path to parse
:param system: system to be used (optional)
"""
logger.debug('parse document: {0}'.format(document))
stream = FileStream(str(document), encoding='utf-8')
system = FileSystem._parse_stream(stream, system, document, profile)
FileSystem.merge_annotations(system, document.stripext() + '.yaml')
return system
@staticmethod
def _parse_stream(stream: InputStream, system: System = None, document=None, profile=EProfile.FULL):
logger.debug('parse stream')
system = system or System()
lexer = TLexer(stream)
stream = CommonTokenStream(lexer)
parser = TParser(stream)
parser.removeErrorListeners()
parser.addErrorListener(ReportingErrorListener(document))
tree = parser.documentSymbol()
walker = ParseTreeWalker()
walker.walk(DomainListener(system, profile), tree)
return system
@staticmethod
def merge_annotations(system, document):
"""Read a YAML document and for each root symbol identifier
updates the tag information of that symbol
"""
if not Path(document).exists():
return
meta = FileSystem.load_yaml(document)
if not meta:
click.secho('skipping empty: {0}'.format(document.name), fg='blue')
return
else:
click.secho('merge: {0}'.format(document.name), fg='blue')
try:
for identifier, data in meta.items():
symbol = system.lookup(identifier)
if symbol:
merge(symbol.tags, data)
except Exception as e:
click.secho('Error parsing annotation {0}: {1}'.format(document, e), fg='red', err=True)
if FileSystem.strict:
sys.exit(-1)
@staticmethod
def parse(input, identifier: str = None, use_cache=False, clear_cache=True, pattern="*.qface", profile=EProfile.FULL):
"""Input can be either a file or directory or a list of files or directory.
A directory will be parsed recursively. The function returns the resulting system.
Stores the result of the run in the domain cache named after the identifier.
:param path: directory to parse
:param identifier: identifies the parse run. Used to name the cache
:param clear_cache: clears the domain cache (defaults to true)
"""
inputs = input if isinstance(input, (list, tuple)) else [input]
logger.debug('parse input={0}'.format(inputs))
identifier = 'system' if not identifier else identifier
system = System()
cache = None
if use_cache:
cache = shelve.open('qface.cache')
if identifier in cache and clear_cache:
del cache[identifier]
if identifier in cache:
# use the cached domain model
system = cache[identifier]
# if domain model not cached generate it
for input in inputs:
path = Path.getcwd() / str(input)
if path.isfile():
FileSystem.parse_document(path, system)
else:
for document in path.walkfiles(pattern):
FileSystem.parse_document(document, system)
if use_cache:
cache[identifier] = system
return system
@staticmethod
def load_yaml(document: Path, required=False):
document = Path(document)
if not document.exists():
if required:
click.secho('yaml document does not exists: {0}'.format(document), fg='red', err=True)
if FileSystem.strict:
sys.exit(-1)
return {}
try:
return yaml.load(document.text(), Loader=Loader)
except yaml.YAMLError as exc:
error = document
if hasattr(exc, 'problem_mark'):
error = '{0}:{1}'.format(error, exc.problem_mark.line+1)
click.secho('{0}: error: {1}'.format(error, str(exc)), fg='red', err=True)
if FileSystem.strict:
sys.exit(-1)
return {}
|
|
from bottle import Bottle, request, template, redirect
import argparse as ap
import os, re, sys, csv, traceback
import json
from user_data import user_dir
from model import db, apps, jobs, plots, datasource
from common import replace_tags
import config
routes = Bottle()
def bind(app):
global root
root = ap.Namespace(**app)
def compute_stats(path):
"""compute statistics on output data"""
xoutput = ''
if os.path.exists(path):
f = open(path,'r')
output = f.readlines()
for line in output:
m = re.search(r'#.*$', line)
if m:
xoutput += line
# app-specific: this is a temporary hack for mendel (remove in future)
if path[-3:] == "hst":
xoutput += output[len(output)-1]
return xoutput
class Plot(object):
def get_data(self,fn,col1,col2=None,line1=1,line2=1e6):
"""return data as string in format [ [x1,y1], [x2,y2], ... ]"""
y = ''
z = []
lineno = 0
try:
data = open(fn, 'rU').readlines()
except IOError:
exc_type, exc_value, exc_traceback = sys.exc_info()
print traceback.print_exception(exc_type, exc_value, exc_traceback)
return -1
try:
nlines = len(data)
# allow for tailing a file by giving a negative range, e.g. -100:10000
if line1 < 0:
line1 += nlines
for line in data:
lineno += 1
if lineno >= line1 and lineno <= line2:
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
#following line doesnt work when NaN's in another column
#if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
if col2:
y += '[ ' + x[col1-1] + ', ' + x[col2-1] + '], '
else:
try: z += [ float(x[col1-1]) ]
except: pass
if col2:
return "[ %s ]" % y
else:
return z
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print traceback.print_exception(exc_type, exc_value, exc_traceback)
return -2
def get_csv_data(self, fn):
try:
with open(fn, 'rU') as csv_file:
data = csv.reader(csv_file)
return list(data)
except IOError:
exc_type, exc_value, exc_traceback = sys.exc_info()
print traceback.print_exception(exc_type, exc_value, exc_traceback)
return -1
def get_data_gantt(self,fn,col1,col2,col3,col4,line1=1,line2=1e6):
"""return data as string in format [ [x1,y1], [x2,y2], ... ]"""
y = ''
lineno = 0
try:
data = open(fn, 'rU').readlines()
nlines = len(data)
# allow for tailing a file by giving a negative range, e.g. -100:10000
if line1 < 0:
line1 += nlines
for line in data:
lineno += 1
if lineno >= line1 and lineno <= line2:
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
#following line doesnt work when NaN's in another column
#if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += '[ ' + x[col1-1] + ', ' + x[col2-1] + x[col3-1] + x[col4-1] + '], '
s = "[ %s ]" % y
return s
except:
return False
def get_raw_data(self,fn,line1=1,line2=1e6):
"""return data as an array..."""
data = open(fn, 'rU').readlines()
return data[line1:line2]
def get_column_of_data(self,fn,col,line1=1,line2=1e6):
try:
y = []
lineno = 0
data = open(fn, 'rU').readlines()
nlines = len(data)
# allow for tailing a file by giving a negative range, e.g. -100:10000
if line1 < 0:
line1 += nlines
for line in data:
lineno += 1
if lineno >= line1 and lineno <= line2:
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
#following line doesnt work when NaN's in another column
#if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += [ x[col-1] ]
return y
except:
return False
def get_ticks(self,fn,col1,col2):
try:
y = ''
i = 0
for line in open(fn, 'rU'):
# don't parse comments
if re.search(r'#',line): continue
x = line.split()
if not re.search(r'[A-Za-z]{2,}\s+[A-Za-z]{2,}',line):
y += '[ ' + str(i) + ', ' + x[col1-1] + '], '
i += 1
s = "[ %s ]" % y
return s
except:
return False
@routes.get('/plots/edit')
def editplotdefs():
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.query.app
if config.auth and not root.authorized(): redirect('/login')
if app not in root.myapps: redirect('/apps')
query = (root.apps.id==plots.appid) & (root.apps.name==app)
result = db(query).select()
params = { 'app': app, 'user': user }
return template('plots/plotdefs', params, rows=result)
@routes.get('/plots/edit/<pltid>')
def editplotdef(pltid):
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.forms.app
result = db(plots.id==pltid).select().first()
params = { 'app': app, 'user': user }
return template('plots/edit_plot', params, row=result)
@routes.post('/plots/edit/<pltid>')
def editplot(pltid):
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.forms.app
title = request.forms.title
ptype = request.forms.ptype
options = request.forms.options
print "updating plot ", pltid, "for app", app
plots(pltid).update_record(title=title, ptype=ptype, options=options)
db.commit()
redirect('/plots/edit?app='+app)
@routes.get('/plots/delete/<pltid>')
def delete_plot(pltid):
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.query.app
del db.plots[pltid]
db.commit()
redirect ('/plots/edit?app='+app)
@routes.get('/plots/<pltid>/datasources')
def get_datasource(pltid):
"""get list of datasources for given plot"""
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.query.app
cid = request.query.cid
if root.myapps[app].appname not in root.myapps: redirect('/apps')
if config.auth and not root.authorized(): redirect('/login')
result = db(datasource.pltid==pltid).select()
title = plots(pltid).title
params = { 'app': app, 'cid': cid, 'user': user, 'pltid': pltid, 'rows': result, 'title': title}
return template('plots/datasources', params, rows=result)
@routes.post('/plots/<pltid>/datasources')
def add_datasource(pltid):
"""create a new datasource for given plot"""
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.forms.app
r = request.forms
datasource.insert(pltid=pltid, label=r['label'], filename=r['fn'], cols=r['cols'],
line_range=r['line_range'], data_def=r['data_def'])
db.commit()
redirect ('/plots/' + str(pltid) + '/datasources?app='+app)
@routes.get('/plots/<pltid>/datasources/<dsid>')
def edit_datasource(pltid, dsid):
"""create a new datasource for given plot"""
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.query.app
query = (datasource.id==dsid)
result = db(query).select().first()
params = {'app': app, 'pltid': pltid, 'dsid': dsid}
return template('plots/edit_datasource', params, row=result)
@routes.post('/plots/<pltid>/datasources/<dsid>')
def edit_datasource_post(pltid, dsid):
"""update datasource for given plot"""
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.forms.get('app')
r = request.forms
datasource(id=dsid).update_record(label=r['label'], pltid=pltid, filename=r['fn'], cols=r['cols'],
line_range=r['line_range'], data_def=r['data_def'])
db.commit()
redirect ('/plots/' + str(pltid) + '/datasources?app='+app)
params = {'app': app, 'pltid': pltid, 'dsid': dsid}
return template('plots/edit_datasource', params)
@routes.post('/plots/datasource_delete')
def delete_datasource():
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.forms.get('app')
pltid = request.forms.get('pltid')
dsid = request.forms.get('dsid')
del db.datasource[dsid]
db.commit()
redirect ('/plots/' + str(pltid) + '/datasources?app='+app)
@routes.post('/plots/create')
def create_plot():
user = root.authorized()
if user != 'admin':
return template('error', err="must be admin to edit plots")
app = request.forms.get('app')
r = request
plots.insert(appid=root.myapps[app].appid, ptype=r.forms['ptype'],
title=r.forms['title'], options=r.forms['options'])
db.commit()
redirect ('/plots/edit?app='+app)
@routes.get('/plot/<pltid>')
def plot_interface(pltid):
user = root.authorized()
app = request.query.app
cid = request.query.cid
jid = request.query.jid
params = dict()
if not cid:
params['err'] = "No case id specified. First select a case id from the list of jobs."
return template('error', params)
if re.search("/", cid):
(owner, c) = cid.split("/")
else:
owner = user
c = cid
shared = jobs(cid=c).shared
# only allow admin to see other user's cases that have not been shared
if owner != user and shared != "True" and user != "admin":
return template('error', err="access forbidden")
inputs, _, _ = root.myapps[app].read_params(owner, c)
sim_dir = os.path.join(user_dir, owner, app, c)
# use pltid of 0 to trigger finding the first pltid for the current app
if int(pltid) == 0:
query = (apps.id==plots.appid) & (apps.name==app)
result = db(query).select().first()
if result: pltid = result['plots']['id']
p = Plot()
# get the data for the pltid given
try:
result = db(plots.id==pltid).select().first()
plottype = result['ptype']
plot_title = result['title']
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print traceback.print_exception(exc_type, exc_value, exc_traceback)
redirect ('/plots/edit?app='+app+'&cid='+cid)
# if plot not in DB return error
if plottype is None:
params = { 'cid': cid, 'app': app, 'user': user }
params['err'] = "Sorry! This app does not support plotting capability"
return template('error', params)
# determine which view template to use
if plottype == 'flot-cat':
tfn = 'plots/flot-cat'
elif plottype == 'flot-scatter':
tfn = 'plots/flot-scatter'
elif plottype == 'flot-scatter-animated':
tfn = 'plots/flot-scatter-animated' # for backwards compatability
elif plottype == 'flot-line':
tfn = 'plots/flot-scatter'
elif plottype == 'plotly-hist':
tfn = 'plots/plotly-hist'
elif plottype == 'mpl-line' or plottype == 'mpl-bar':
redirect('/mpl/'+pltid+'?app='+app+'&cid='+cid)
elif plottype == 'handson':
tfn = 'plots/handson'
elif plottype == 'flot-3d':
return plot_flot_3d(result, cid, app, sim_dir, owner, user, plot_title, pltid, inputs)
else:
return template("error", err="plot type not supported: " + plottype)
if result['options']:
options = replace_tags(result['options'], inputs)
else:
options = ''
# get list of all plots for this app
query = (apps.id==plots.appid) & (apps.name==app)
list_of_plots = db(query).select()
# extract data from files
data = []
ticks = []
plotpath = ''
result = db(datasource.pltid==pltid).select()
datadef = ""
for r in result:
plotfn = r['filename']
# in addition to supporting input params, also support case id
if "cid" not in inputs: inputs["cid"] = c
# replace <cid>.dat with xyz123.dat
plotfn = replace_tags(plotfn, inputs)
plotpath = os.path.join(sim_dir, plotfn)
# handle CSV data
_, file_extension = os.path.splitext(plotfn)
if file_extension == '.csv':
data = p.get_csv_data(plotpath)
stats = ''
# handle X, Y columnar data
else:
cols = r['cols']
line_range = r['line_range']
try:
datadef += r['data_def'] + ", "
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print traceback.print_exception(exc_type, exc_value, exc_traceback)
datadef = ""
if cols.find(":") > 0: # two columns
num_fields = 2
(col1str, col2str) = cols.split(":")
col1 = int(col1str); col2 = int(col2str)
else: # single column
num_fields = 1
col1 = int(cols)
# do some postprocessing
if line_range is not None:
# to prevent breaking current spc apps, still support
# expressions like 1:1000, but in the future this should
# be changed to a range 1-1000. Therefore, using : is deprecated
# and will be removed in the future.
(line1str, line2str) = re.split("[-:]", line_range)
line1 = int(line1str)
## there is a problem with the following statement
## shows up in mendel app
# if root.myapps[app].postprocess > 0:
# dat = process.postprocess(plotpath, line1, line2)
# else:
try: # if line2 is specified
line2 = int(line2str)
dat = p.get_data(plotpath, col1, col2, line1, line2)
except: # if line2 not specified
exc_type, exc_value, exc_traceback = sys.exc_info()
print traceback.print_exception(exc_type, exc_value, exc_traceback)
if num_fields == 2:
dat = p.get_data(plotpath, col1, col2, line1)
else: # single column of data
dat = p.get_data(plotpath, col1)
# remove this app-specific code in future
if app == "fpg":
import process
dat = process.postprocess(plotpath, line1, line2)
else:
dat = p.get_data(plotpath, col1, col2)
if dat == -1:
stats = "ERROR: Could not read data file"
elif dat == -2:
stats = "ERROR: file exists, but problem parsing data. Are column and line ranges setup properly? Is all the data there?"
else:
stats = compute_stats(plotpath)
# [[1,2,3]] >>> [1,2,3]
# clean data
#dat = [d.replace('?', '0') for d in dat]
data.append(dat)
if num_fields == 1: data = data[0]
if plottype == 'flot-cat':
ticks = p.get_ticks(plotpath, col1, col2)
desc = jobs(cid=c).description
params = { 'cid': cid, 'pltid': pltid,
'data': data, 'app': app, 'user': user, 'owner': owner,
'ticks': ticks, 'plot_title': plot_title, 'plotpath': plotpath,
'rows': list_of_plots, 'options': options, 'datadef': datadef,
'stats': stats, 'description': desc }
if jid: params['jid'] = jid
return template(tfn, params)
def plot_flot_3d(plot, cid, app, sim_dir, owner, user, plot_title, pltid, inputs):
# to handle data in user/cid format when looking at shared cases
if re.search("/", cid):
(owner, c) = cid.split("/")
else:
owner = user
c = cid
desc = jobs(cid=c).description
list_of_plots = db((apps.id==plots.appid) & (apps.name==app)).select()
options = json.loads(plot['options'])
plot_data = []
z_data = []
data_dir = os.path.join(sim_dir, options['directory'])
z_property = options['z_property']
file_names = sorted(os.listdir(data_dir))
for file_name in file_names:
file_path = os.path.join(data_dir, file_name)
if os.path.isfile(file_path) and not file_name.startswith('.') and file_name.endswith('.json'):
with open(file_path) as file_:
file_data = json.load(file_)
all_series = []
for source in options['datasources']:
series = {
'data': zip(file_data[source['x_property']], file_data[source['y_property']]),
}
series.update(source['data_def'])
all_series.append(series)
plot_data.append(all_series)
z_data.append(file_data[z_property])
params = {
'app': app,
'cid': cid,
'description': desc,
'owner': owner,
'plot_title': plot_title,
'pltid': pltid,
'rows': list_of_plots,
'stats': '',
'user': user,
'flot_3d_json': json.dumps({
'flot_options': options['flot_options'],
'job_params': inputs,
'flot_options_transformer': options.get('flot_options_transformer', ''),
'data': plot_data,
'z_data': z_data,
'z_label': options['z_label'],
'x_axis_scale': options.get('x_axis_scale', ''),
}),
}
return template('plots/flot-3d', params)
@routes.get('/mpl/<pltid>')
def matplotlib(pltid):
"""Generate a random image using Matplotlib and display it"""
# in the future create a private function __import__ to import third-party
# libraries, so that it can respond gracefully. See for example the
# Examples section at https://docs.python.org/2/library/imp.html
user = root.authorized()
import StringIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
app = request.query.app
cid = request.query.cid
fig = Figure()
fig.set_tight_layout(True)
ax = fig.add_subplot(111)
# get info about plot from db
p = Plot()
result = db(plots.id==pltid).select().first()
plot_title = result['title']
plottype = result['ptype']
options = result['options']
# parse plot options to extract and set x- and y-axis labels
m = re.search("xaxis:\s*{(.*)}", options)
if m:
n = re.search("axisLabel:\s*\"(\w*)\"", m.group(1))
if n: ax.set_xlabel(n.group(1))
m = re.search("yaxis:\s*{(.*)}", options)
if m:
n = re.search("axisLabel:\s*\"(\w*)\"", m.group(1))
if n: ax.set_ylabel(n.group(1))
# get info about data source
# fix in the future to handle multiple data sources
result = db(datasource.pltid==pltid).select()
for r in result:
plotfn = r['filename']
cols = r['cols']
line_range = r['line_range']
(col1str, col2str) = cols.split(":")
col1 = int(col1str)
col2 = int(col2str)
if line_range is not None:
# to prevent breaking current spc apps, still support
# expressions like 1:1000, but in the future this should
# be changed to a range 1-1000. Therefore, using : is deprecated
# and will be removed in the future.
(line1str, line2str) = re.split("[-:]", line_range)
plotfn = re.sub(r"<cid>", cid, plotfn)
sim_dir = os.path.join(user_dir, user, app, cid)
plotpath = os.path.join(sim_dir, plotfn)
xx = p.get_column_of_data(plotpath, col1)
yy = p.get_column_of_data(plotpath, col2)
# convert elements from strings to floats
xx = [float(i) for i in xx]
yy = [float(i) for i in yy]
# plot
if plottype == 'mpl-line':
ax.plot(xx, yy)
elif plottype == 'mpl-bar':
ax.bar(xx, yy)
else:
return "ERROR: plottype not supported"
canvas = FigureCanvas(fig)
png_output = StringIO.StringIO()
canvas.print_png(png_output)
# save file
tmp_dir = "static/tmp"
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
fn = plot_title+'.png'
fig.set_size_inches(7, 4)
img_path = os.path.join(sim_dir, fn)
fig.savefig(img_path)
# get list of all plots for this app
query = (apps.id==plots.appid) & (apps.name==app)
list_of_plots = db(query).select()
stats = compute_stats(plotpath)
params = {'image': fn, 'app': app, 'cid': cid, 'pltid': pltid,
'plotpath': plotpath, 'img_path': img_path, 'plot_title': plot_title,
'rows': list_of_plots, 'stats': stats }
return template('plots/matplotlib', params)
|
|
"""Tests for common methods of DataseIndex and FilesIndex classes.
"""
# pylint: disable=missing-docstring
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
import os
import shutil
import pytest
import numpy as np
from batchflow import DatasetIndex, FilesIndex
@pytest.fixture(scope='module')
def files_setup(request):
""" Fixture that creates files for tests """
path = 'fi_test_tmp'
folders = [path]
for folder in folders:
os.mkdir(folder)
for i in range(5):
open(os.path.join(folder, 'file_{}.txt'.format(i)), 'w').close()
def fin():
shutil.rmtree(path)
request.addfinalizer(fin)
return path, [name for name in os.listdir(path) if name.endswith('txt')]
@pytest.fixture(params=[5, ['a', 'b', 'c', 'd', 'e'], None])
def index(request, files_setup):
if isinstance(request.param, int):
return DatasetIndex(request.param), np.arange(request.param)
if isinstance(request.param, list):
return DatasetIndex(request.param), request.param
path, files = files_setup
return FilesIndex(path=os.path.join(path, '*')), files
@pytest.fixture(params=[DatasetIndex, FilesIndex])
def small_index(request, files_setup):
if request.param is DatasetIndex:
return DatasetIndex(1)
path, _ = files_setup
return FilesIndex(path=os.path.join(path, '*1.txt'))
def test_len(index):
index, _ = index
assert len(index) == 5
def test_calc_split_raise_1(index):
index, _ = index
with pytest.raises(ValueError):
index.calc_split(shares=[0.5, 0.5, 0.5])
with pytest.raises(ValueError):
index.calc_split(shares=[0.5, 0.5, 0.5, 0.5])
def test_calc_split_raise_2(small_index):
with pytest.raises(ValueError):
small_index.calc_split(shares=[0.5, 0.3, 0.2])
def test_calc_split_correctness_1(index):
index, _ = index
assert sum(index.calc_split()) == 5
def test_calc_split_correctness_2(index):
""" If 'shares' contains 2 elements, validation subset is empty. """
index, _ = index
left = index.calc_split(shares=[0.4, 0.6])
right = (2, 3, 0)
assert left == right
def test_calc_split_correctness_3(index):
""" If 'shares' contains 3 elements, then validation subset is non-empty. """
index, _ = index
_, _, valid_share = index.calc_split(shares=[0.5, 0.5, 0])
assert valid_share == 0
def test_split_correctness(index):
""" Each element of 'index' is used.
Constants in 'shares' are such that test does not raise errors.
"""
index, _ = index
shares = .3 - np.random.random(3) *.05
index.split(shares=shares)
assert set(index.index) == (set(index.train.index)
| set(index.test.index)
| set(index.validation.index))
def test_get_pos(index):
index, values = index
elem = values[4]
assert index.get_pos(elem) == 4
def test_get_pos_slice(index):
index, values = index
elem = values[slice(0, 4, 2)]
assert (index.get_pos(elem) == np.array([0, 2])).all()
def test_get_pos_iterable(index):
index, values = index
elem = values
assert (index.get_pos(elem) == np.arange(len(values))).all()
def test_shuffle_bool_false(index):
index, values = index
right = np.arange(len(values))
np.random.seed(13)
left = index.shuffle(shuffle=False)
assert (left == right).all()
def test_shuffle_bool_true(index):
index, values = index
np.random.seed(13)
right = np.random.permutation(np.arange(len(values)))
np.random.seed(13)
left = index.shuffle(shuffle=True)
assert (left == right).all()
def test_shuffle_int(index):
index, values = index
right = np.random.RandomState(13).permutation(np.arange(len(values)))
left = index.shuffle(shuffle=13)
assert (left == right).all()
def test_shuffle_randomstate(index):
index, values = index
right = np.random.RandomState(13).permutation(np.arange(len(values)))
left = index.shuffle(shuffle=np.random.RandomState(13))
assert (left == right).all()
def test_shuffle_bool_callable(index):
""" Callable 'shuffle' should return order. """
index, values = index
right = np.arange(len(values))[::-1]
left = index.shuffle(shuffle=(lambda ind: np.arange(len(ind)-1, -1, -1)))
assert (left == right).all()
def test_create_batch_pos_true(index):
""" When 'pos' is True, method creates new batch by specified positions. """
index, values = index
right = values[:5]
left = index.create_batch(range(5), pos=True).index
assert (left == right).all()
def test_create_batch_pos_false_str(index):
""" When 'pos' is False, method returns the same, as its first argument. """
index, values = index
right = np.array([values[0], values[-1]])
left = index.create_batch(right, pos=False).index
assert (left == right).all()
def test_next_batch_stopiter_raise(index):
""" Iteration is blocked after end of DatasetIndex. """
index, _ = index
index.next_batch(5, n_epochs=1)
with pytest.raises(StopIteration):
index.next_batch(5, n_epochs=1)
def test_next_batch_stopiter_pass(index):
""" When 'n_epochs' is None it is possible to iterate infinitely. """
index, _ = index
for _ in range(10):
index.next_batch(1, n_epochs=None)
def test_next_batch_drop_last_false_1(index):
""" When 'drop_last' is False 'next_batch' should cycle through index. """
index, _ = index
left = []
right = list(np.concatenate([index.index, index.index]))
for length in [3, 3, 4]:
batch = index.next_batch(batch_size=length,
n_epochs=2,
drop_last=False)
left.extend(list(batch.index))
assert left == right
def test_next_batch_drop_last_false_2(index):
""" When 'drop_last' is False last batch of last epoch can have smaller length. """
index, _ = index
left = []
right = [2]*7 + [1] # first seven batches have length of 2, last contains one item
for _ in range(8):
batch = index.next_batch(batch_size=2,
n_epochs=3,
drop_last=False)
left.append(len(batch))
assert left == right
def test_next_batch_drop_last_true(index):
""" Order and contents of generated batches is same at every epoch.
'shuffle' is False, so dropped indices are always the same.
"""
index, _ = index
for _ in range(10):
batch_1 = index.next_batch(batch_size=2,
n_epochs=None,
drop_last=True,
shuffle=False)
batch_2 = index.next_batch(batch_size=2,
n_epochs=None,
drop_last=True,
shuffle=False)
assert (batch_1.index == index.index[:2]).all()
assert (batch_2.index == index.index[2:4]).all()
def test_next_batch_smaller(index):
""" 'batch_size' is twice as small as length DatasetIndex. """
index, _ = index
for _ in range(10):
batch = index.next_batch(batch_size=2,
n_epochs=None,
drop_last=True)
assert len(batch) == 2
def test_next_batch_bigger(index):
""" When 'batch_size' is bigger than DatasetIndex's length, ValueError is raised
"""
index, _ = index
with pytest.raises(ValueError):
index.next_batch(batch_size=7, n_epochs=None, drop_last=True)
|
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Various utilities for interactive notebooks.
"""
import functools
import collections
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backend_bases import MouseButton
from cycler import cycler as make_cycler
import mplcursors
from ipywidgets import widgets, Layout, interact
from IPython.display import display
from lisa.utils import is_running_ipython
COLOR_CYCLE = [
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00'
]
"""
Colorblind-friendly cycle, see https://gist.github.com/thriveth/8560036
"""
plt.rcParams['axes.prop_cycle'] = make_cycler(color=COLOR_CYCLE)
class WrappingHBox(widgets.HBox):
"""
HBox that will overflow on multiple lines if the content is too large to
fit on one line.
"""
def __init__(self, *args, **kwargs):
layout = Layout(
# Overflow items to the next line rather than hiding them
flex_flow='row wrap',
# Evenly spread on one line of items
justify_content='space-around',
)
super().__init__(*args, layout=layout, **kwargs)
# Make a subclass so we can integrate better with mplcursors
class _DataframeLinkMarker(mpl.lines.Line2D):
pass
# Tell mplcursors that we are never selecting the marker line, so that it
# will still show the coordinates of the data that were plotted, rather
# than useless coordinates of the marker
@mplcursors.compute_pick.register(_DataframeLinkMarker)
def _(artist, event):
return None
def _make_vline(axis, *args, **kwargs):
vline = axis.axvline(*args, **kwargs)
assert type(vline) is mpl.lines.Line2D # pylint: disable=unidiomatic-typecheck
vline.__class__ = _DataframeLinkMarker
vline.set_visible(False)
return vline
def axis_link_dataframes(axis, df_list, before=1, after=5, cursor_color='red', follow_cursor=False):
"""
Link some dataframes to an axis displayed in the interactive matplotlib widget.
:param axis: Axis to link to.
:type axis: matplotlib.axes.Axes
:param df_list: List of pandas dataframe to link.
:type df_list: list(pandas.DataFrame)
:param before: Number of dataframe rows to display before the selected
location.
:type before: int
:param after: Number of dataframe rows to display after the selected
location.
:type after: int
:param cursor_color: Color of the vertical line added at the clicked
location.
:type cursor_color: str
:param follow_cursor: If ``True``, the cursor will be followed without the
need to click.
:type follow_cursor: bool
When the user clicks on the graph, a vertical marker will appear and the
dataframe slice will update to show the relevant row.
.. note:: This requires the matplotlib widget enabled using ``%matplotlib
widget`` magic.
"""
df_list = [df for df in df_list if not df.empty]
output_list = [widgets.Output() for df in df_list]
layout = Layout(
# Overflow items to the next line rather than hiding them
flex_flow='row wrap',
# Evenly spread on one line of item when there is more than one item,
# align left otherwise
justify_content='space-around' if len(df_list) > 1 else 'flex-start',
)
hbox = widgets.HBox(output_list, layout=layout)
cursor_vline = _make_vline(axis, color=cursor_color)
def show_loc(loc):
cursor_vline.set_xdata(loc)
cursor_vline.set_visible(True)
for df, output in zip(df_list, output_list):
if loc < df.index[0]:
iloc = 0
elif loc > df.index[-1]:
iloc = -1
else:
iloc = df.index.get_loc(loc, method='ffill')
index_loc = df.index[iloc]
begin = max(iloc - before, 0)
end = min(iloc + after, len(df))
sliced_df = df.iloc[begin:end]
def highlight_row(row):
if row.name == index_loc: # pylint: disable=cell-var-from-loop
return ['background: lightblue'] * len(row)
else:
return [''] * len(row)
styler = sliced_df.style.apply(highlight_row, axis=1)
styler = styler.set_properties(**{
'text-align': 'left',
# perserve multiple consecutive spaces
'white-space': 'pre',
# Make sure all chars have the same width to preserve column
# alignments in preformatted strings
'font-family': 'monospace',
})
# wait=True avoids flicker by waiting for new content to be ready
# to display before clearing the previous one
output.clear_output(wait=True)
with output:
display(styler)
init_loc = min((df.index[0] for df in df_list), default=0)
show_loc(init_loc)
def handler(event):
loc = event.xdata
return show_loc(loc)
event = 'motion_notify_event' if follow_cursor else 'button_press_event'
axis.get_figure().canvas.mpl_connect(event, handler)
display(hbox)
def axis_cursor_delta(axis, colors=('blue', 'green'), buttons=(MouseButton.LEFT, MouseButton.RIGHT)):
"""
Display the time delta between two vertical lines drawn on clicks.
:param axis: Axis to link to.
:type axis: matplotlib.axes.Axes
:param colors: List of colors to use for vertical lines.
:type colors: list(str)
:param buttons: Mouse buttons to use for each vertical line.
:type buttons: list(matplotlib.backend_bases.MouseButton)
.. note:: This requires the matplotlib widget enabled using
``%matplotlib widget`` magic.
"""
delta_widget = widgets.Text(
value='0',
placeholder='0',
description='Cursors delta',
disabled=False,
)
vlines = [
_make_vline(axis, color=color)
for color in colors
]
assert len(vlines) == 2
vlines_map = dict(zip(buttons, vlines))
vlines_loc = collections.defaultdict(
lambda: min(axis.get_xbound())
)
def handler(event):
loc = event.xdata
button = event.button
vline = vlines_map[button]
vlines_loc[button] = loc
vline.set_xdata(loc)
vline.set_visible(True)
locs = [
vlines_loc[button]
for button in buttons
]
delta = locs[1] - locs[0]
delta_widget.value = str(delta)
axis.get_figure().canvas.mpl_connect('button_press_event', handler)
display(delta_widget)
def interact_tasks(trace, tasks=None, kind=None):
"""
Decorator to make a block of code parametrized on a task that can be
selected from a dropdown.
:param trace: Trace object in use
:type trace: lisa.trace.Trace
:param tasks: List of tasks that are available. See ``kind`` for
alternative way of specifying tasks.
:type tasks: list(int or str or lisa.trace.TaskID) or None
:param kind: Alternatively to ``tasks``, a kind can be provided and the
tasks will be selected from the trace for you. It can be:
* ``rtapp`` to select all rt-app tasks
* ``all`` to select all tasks.
:type kind: str or None
**Example**::
trace = Trace('trace.dat')
# Allow selecting any rtapp task
@interact_tasks(trace, kind='rtapp')
def do_plot(task):
trace.analysis.load_tracking.plot_task_signals(task)
"""
if tasks is not None:
tasks = [
trace.get_task_id(task, update=False)
for task in tasks
]
else:
kind = kind or 'all'
if kind == 'all':
tasks = trace.task_ids
elif kind == 'rtapp':
tasks = trace.analysis.rta.rtapp_tasks
else:
raise ValueError(f'Unknown task kind: {kind}')
# Map of friendly names to actual objects
task_map = {
str(task): task
for task in tasks
}
def decorator(f):
@functools.wraps(f)
@interact
def wrapper(task=sorted(task_map.keys())):
task = task_map[task]
return f(task)
return wrapper
return decorator
def make_figure(width, height, nrows, ncols, interactive=None, **kwargs):
"""
Make a :class:`matplotlib.figure.Figure` and its axes.
:param width: Width of the figure.
:type width: int
:param height: Height of the figure.
:type height: int
:param interactive: If ``True``, create an interactive figure. Defaults to
``True`` when running under IPython, ``False`` otherwise.
:type interactive: bool or None
:Variable keyword arguments: Forwarded to :class:`matplotlib.figure.Figure`
:returns: A tuple of:
* :class:`matplotlib.figure.Figure`
* :class:`matplotlib.axes.Axes` as a scalar, an iterable (1D) or iterable of iterable matrix (2D)
"""
if interactive is None:
interactive = is_running_ipython()
if not interactive and tuple(map(int, mpl.__version__.split('.'))) <= (3, 0, 3):
warnings.warn('This version of matplotlib does not allow saving figures from axis created using Figure(), forcing interactive=True')
interactive = True
width *= ncols
height *= nrows
if interactive:
figure, axes = plt.subplots(
figsize=(width, height),
nrows=nrows,
ncols=ncols,
**kwargs,
)
else:
figure = Figure(figsize=(width, height))
axes = figure.subplots(ncols=ncols, nrows=nrows, **kwargs)
return (figure, axes)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
|
|
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from urllib2 import HTTPError, URLError
from httplib import BadStatusLine
from StringIO import StringIO
from lunr.api.controller.base import BaseController, NodeError
from lunr.common.config import LunrConfig
from testlunr.functional import Struct
from lunr.api.controller import base
from testlunr.unit import patch
from lunr import db
import unittest
class MockApp(object):
def __init__(self):
self.conf = LunrConfig(
{'db': {'auto_create': True, 'url': 'sqlite://', 'echo': False}})
# self.urlmap = urlmap
self.helper = db.configure(self.conf)
self.fill_percentage_limit = 0.5
self.fill_strategy = 'broad_fill'
self.node_timeout = None
self.image_convert_limit = 3
class TestVolumeController(unittest.TestCase):
def setUp(self):
self.node = Struct(hostname='localhost', port='8080')
self.app = MockApp()
def test_node_request_exception(self):
def raise_exc(*args, **kwargs):
raise BadStatusLine("something bad")
controller = BaseController({}, self.app)
with patch(base, 'urlopen', raise_exc):
with self.assertRaises(NodeError) as cm:
controller.node_request(self.node, 'PUT', '/volumes/vol-01')
# Assert the exception details are correct
self.assertEquals(cm.exception.detail,
"PUT on http://localhost:8080/volumes/vol-01 "
"failed with 'BadStatusLine'")
self.assertEquals(cm.exception.code, 503)
self.assertEquals(cm.exception.reason, "something bad")
def test_node_request_urllib2_httperror(self):
def raise_exc(*args, **kwargs):
fp = StringIO('{"reason": "something bad"}')
raise HTTPError('http://localhost/volumes/vol-01', 500,
'Internal Error', {}, fp)
controller = BaseController({}, self.app)
with patch(base, 'urlopen', raise_exc):
with self.assertRaises(NodeError) as cm:
controller.node_request(self.node, 'PUT', '/volumes/vol-01')
# Assert the exception details are correct
self.assertEquals(cm.exception.detail,
"PUT on http://localhost:8080/volumes/vol-01 "
"returned '500' with 'something bad'")
self.assertEquals(cm.exception.code, 500)
self.assertEquals(cm.exception.reason, "something bad")
def test_node_request_urllib2_urlerror(self):
def raise_exc(*args, **kwargs):
raise URLError("something bad")
controller = BaseController({}, self.app)
with patch(base, 'urlopen', raise_exc):
with self.assertRaises(NodeError) as cm:
controller.node_request(self.node, 'PUT', '/volumes/vol-01')
# Assert the exception details are correct
self.assertEquals(cm.exception.detail,
"PUT on http://localhost:8080/volumes/vol-01 "
"failed with 'something bad'")
self.assertEquals(cm.exception.code, 503)
self.assertEquals(cm.exception.reason, "something bad")
class TestFillStrategy(unittest.TestCase):
def setUp(self):
self.mock_app = MockApp()
self.db = self.mock_app.helper
self.vtype = db.models.VolumeType('vtype')
self.db.add(self.vtype)
self.nodes = {}
for i in range(10):
n = db.models.Node('node%s' % i, 100, volume_type=self.vtype,
hostname='10.127.0.%s' % i, port=8080 + i)
self.nodes[i] = n
self.db.add(n)
self.account = db.models.Account(id='someaccount')
self.db.add(self.account)
self.db.commit()
self.controller = BaseController({}, self.mock_app)
self.controller._account_id = self.account.id
def tearDown(self):
pass
def test_broad_fill_by_account(self):
account1 = self.account
account2 = db.models.Account()
limit = 3
self.db.add(account2)
# Give account1 a volume on 7/10 nodes.
for i in range(7):
node = self.nodes[i]
self.db.add(db.models.Volume(i, 'vtype', node=node,
account_id=account1.id,
volume_type=self.vtype))
self.db.commit()
nodes = self.controller.broad_fill('vtype', 1, limit).all()
node_ids = [x[0].id for x in nodes]
# It shouldl definitely pick the 3 empty nodes.
self.assertItemsEqual(
node_ids, (self.nodes[7].id, self.nodes[8].id, self.nodes[9].id))
# Fill up the other 3 nodes with account2 volumes.
for i in range(7, 10):
node = self.nodes[i]
# Lots more full than the other nodes!
self.db.add(db.models.Volume(20, 'vtype', node=node,
account_id=account2.id,
volume_type=self.vtype))
self.db.commit()
nodes = self.controller.broad_fill('vtype', 1, limit).all()
node_ids = [x[0].id for x in nodes]
# We STILL want the 3 nodes account1 doesn't have volumes on.
self.assertItemsEqual(
node_ids, (self.nodes[7].id, self.nodes[8].id, self.nodes[9].id))
# Put account1 volumes on two of those
self.db.add(db.models.Volume(10, 'vtype', node=self.nodes[8],
account_id=account1.id,
volume_type=self.vtype))
self.db.add(db.models.Volume(10, 'vtype', node=self.nodes[9],
account_id=account1.id,
volume_type=self.vtype))
self.db.commit()
nodes = self.controller.broad_fill('vtype', 1, limit).all()
node_ids = [x[0].id for x in nodes]
# 0 & 1 should be preferred (least used)
# and 7 still doesn't have any volumes for account1
self.assertItemsEqual(
node_ids, (self.nodes[0].id, self.nodes[1].id, self.nodes[7].id))
def test_sort_imaging_nodes(self):
# Number of volumes imaging is going to be the #1 sort criteria now.
shim_account = db.models.Account()
self.db.add(shim_account)
# Add one image to every node.
for i in xrange(10):
volume = db.models.Volume(1, 'vtype', node=self.nodes[i],
account_id=shim_account.id,
volume_type=self.vtype,
status='IMAGING')
self.db.add(volume)
# These nodes will now be preferred by deep fill.
for i in range(3):
volume = db.models.Volume(10, 'vtype', node=self.nodes[i],
account_id=shim_account.id,
volume_type=self.vtype)
self.db.add(volume)
# Just slightly less preferred.
for i in range(3, 6):
volume = db.models.Volume(9, 'vtype', node=self.nodes[i],
account_id=shim_account.id,
volume_type=self.vtype)
self.db.add(volume)
# Even slightly less preferred.
volume = db.models.Volume(8, 'vtype', node=self.nodes[7],
account_id=shim_account.id,
volume_type=self.vtype)
self.db.add(volume)
self.db.commit()
limit = 3
results = self.controller.deep_fill('vtype', 1, limit).all()
nodes = [r.Node for r in results]
expected = [self.nodes[0], self.nodes[1], self.nodes[2]]
self.assertItemsEqual(nodes, expected)
# Add a volume in IMAGING to node0,1,3,4
for i in (0, 1, 3, 4):
volume = db.models.Volume(1, 'vtype', node=self.nodes[i],
account_id=shim_account.id,
volume_type=self.vtype,
status='IMAGING')
self.db.add(volume)
self.db.commit()
q = self.controller.deep_fill('vtype', 1, limit, imaging=True)
results = q.all()
self.assertEquals(3, len(results))
nodes = [r.Node for r in results]
expected = [self.nodes[2], self.nodes[5], self.nodes[7]]
self.assertItemsEqual(nodes, expected)
def test_imaging_limit(self):
shim_account = db.models.Account()
self.db.add(shim_account)
# These nodes will now be preferred by deep fill.
for i in range(3):
volume = db.models.Volume(10, 'vtype', node=self.nodes[i],
account_id=shim_account.id,
volume_type=self.vtype)
self.db.add(volume)
# Add two volumes in IMAGING to node0
for i in xrange(3):
volume = db.models.Volume(1, 'vtype', node=self.nodes[0],
account_id=shim_account.id,
volume_type=self.vtype,
status='IMAGING')
self.db.add(volume)
self.db.commit()
limit = 3
results = self.controller.deep_fill('vtype', 1, limit).all()
nodes = [r.Node for r in results]
expected = [self.nodes[0], self.nodes[1], self.nodes[2]]
# Add one more!
volume = db.models.Volume(1, 'vtype', node=self.nodes[0],
account_id=shim_account.id,
volume_type=self.vtype,
status='IMAGING')
self.db.add(volume)
self.db.commit()
results = self.controller.deep_fill('vtype', 1, limit,
imaging=True).all()
nodes = [r.Node for r in results]
self.assertNotIn(self.nodes[0], nodes)
if __name__ == "__main__":
unittest.main()
|
|
########################################################################################
# Davi Frossard, 2016 #
# VGG16 implementation in TensorFlow #
# Details: #
# http://www.cs.toronto.edu/~frossard/post/vgg16/ #
# #
# Model from https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md #
# Weights from Caffe converted using https://github.com/ethereon/caffe-tensorflow #
########################################################################################
import tensorflow as tf
import numpy as np
from scipy.misc import imread, imresize
from .imagenet_classes import class_names
class vgg16:
def __init__(self, imgs, weights=None, sess=None):
self.imgs = imgs
self.convlayers()
self.fc_layers()
self.probs = tf.nn.softmax(self.fc3l)
if weights is not None and sess is not None:
self.load_weights(weights, sess)
def convlayers(self):
self.parameters = []
# zero-mean input
with tf.name_scope('preprocess') as scope:
mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
images = self.imgs-mean
# conv1_1
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 3, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv1_2
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 64], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv1_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv1_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool1
self.pool1 = tf.nn.max_pool(self.conv1_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool1')
# conv2_1
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 64, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv2_2
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 128], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv2_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[128], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv2_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool2
self.pool2 = tf.nn.max_pool(self.conv2_2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool2')
# conv3_1
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 128, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_2
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv3_3
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv3_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv3_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool3
self.pool3 = tf.nn.max_pool(self.conv3_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool3')
# conv4_1
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool3, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_2
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv4_3
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv4_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv4_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool4
self.pool4 = tf.nn.max_pool(self.conv4_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
# conv5_1
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.pool4, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_1 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_2
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_2 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# conv5_3
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3, 3, 512, 512], dtype=tf.float32,
stddev=1e-1), name='weights')
conv = tf.nn.conv2d(self.conv5_2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.0, shape=[512], dtype=tf.float32),
trainable=True, name='biases')
out = tf.nn.bias_add(conv, biases)
self.conv5_3 = tf.nn.relu(out, name=scope)
self.parameters += [kernel, biases]
# pool5
self.pool5 = tf.nn.max_pool(self.conv5_3,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool4')
def fc_layers(self):
# fc1
with tf.name_scope('fc1') as scope:
shape = int(np.prod(self.pool5.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
pool5_flat = tf.reshape(self.pool5, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
self.fc1 = tf.nn.relu(fc1l)
self.parameters += [fc1w, fc1b]
# fc2
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
trainable=True, name='biases')
fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
self.fc2 = tf.nn.relu(fc2l)
self.parameters += [fc2w, fc2b]
# fc3
with tf.name_scope('fc3') as scope:
fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
trainable=True, name='biases')
self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
self.parameters += [fc3w, fc3b]
def load_weights(self, weight_file, sess):
weights = np.load(weight_file)
keys = sorted(weights.keys())
for i, k in enumerate(keys):
print(i, k, np.shape(weights[k]))
sess.run(self.parameters[i].assign(weights[k]))
if __name__ == '__main__':
sess = tf.Session()
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess)
img1 = imread('laska.png', mode='RGB')
img1 = imresize(img1, (224, 224))
prob = sess.run(vgg.probs, feed_dict={vgg.imgs: [img1]})[0]
preds = (np.argsort(prob)[::-1])[0:5]
for p in preds:
print(class_names[p], prob[p])
|
|
"""Taiga integration for Zulip.
Tips for notification output:
*Text formatting*: if there has been a change of a property, the new
value should always be in bold; otherwise the subject of US/task
should be in bold.
"""
from typing import Any, Dict, List, Mapping, Optional, Tuple
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view("Taiga")
@has_request_variables
def api_taiga_webhook(
request: HttpRequest,
user_profile: UserProfile,
message: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
parsed_events = parse_message(message)
content_lines = []
for event in parsed_events:
content_lines.append(generate_content(event) + "\n")
content = "".join(sorted(content_lines))
topic = "General"
if message["data"].get("milestone") is not None:
if message["data"]["milestone"].get("name") is not None:
topic = message["data"]["milestone"]["name"]
check_send_webhook_message(request, user_profile, topic, content)
return json_success()
templates = {
"epic": {
"create": "[{user}]({user_link}) created epic {subject}.",
"set_assigned_to": "[{user}]({user_link}) assigned epic {subject} to {new}.",
"unset_assigned_to": "[{user}]({user_link}) unassigned epic {subject}.",
"changed_assigned_to": "[{user}]({user_link}) reassigned epic {subject}"
" from {old} to {new}.",
"blocked": "[{user}]({user_link}) blocked epic {subject}.",
"unblocked": "[{user}]({user_link}) unblocked epic {subject}.",
"changed_status": "[{user}]({user_link}) changed status of epic {subject}"
" from {old} to {new}.",
"renamed": "[{user}]({user_link}) renamed epic from **{old}** to **{new}**.",
"description_diff": "[{user}]({user_link}) updated description of epic {subject}.",
"commented": "[{user}]({user_link}) commented on epic {subject}.",
"delete": "[{user}]({user_link}) deleted epic {subject}.",
},
"relateduserstory": {
"create": (
"[{user}]({user_link}) added a related user story "
"{userstory_subject} to the epic {epic_subject}."
),
"delete": (
"[{user}]({user_link}) removed a related user story "
+ "{userstory_subject} from the epic {epic_subject}."
),
},
"userstory": {
"create": "[{user}]({user_link}) created user story {subject}.",
"set_assigned_to": "[{user}]({user_link}) assigned user story {subject} to {new}.",
"unset_assigned_to": "[{user}]({user_link}) unassigned user story {subject}.",
"changed_assigned_to": "[{user}]({user_link}) reassigned user story {subject}"
" from {old} to {new}.",
"points": "[{user}]({user_link}) changed estimation of user story {subject}.",
"blocked": "[{user}]({user_link}) blocked user story {subject}.",
"unblocked": "[{user}]({user_link}) unblocked user story {subject}.",
"set_milestone": "[{user}]({user_link}) added user story {subject} to sprint {new}.",
"unset_milestone": "[{user}]({user_link}) removed user story {subject} from sprint {old}.",
"changed_milestone": "[{user}]({user_link}) changed sprint of user story {subject} from {old}"
" to {new}.",
"changed_status": "[{user}]({user_link}) changed status of user story {subject}"
" from {old} to {new}.",
"closed": "[{user}]({user_link}) closed user story {subject}.",
"reopened": "[{user}]({user_link}) reopened user story {subject}.",
"renamed": "[{user}]({user_link}) renamed user story from {old} to **{new}**.",
"description_diff": "[{user}]({user_link}) updated description of user story {subject}.",
"commented": "[{user}]({user_link}) commented on user story {subject}.",
"delete": "[{user}]({user_link}) deleted user story {subject}.",
"due_date": "[{user}]({user_link}) changed due date of user story {subject}"
" from {old} to {new}.",
"set_due_date": "[{user}]({user_link}) set due date of user story {subject}" " to {new}.",
},
"milestone": {
"create": "[{user}]({user_link}) created sprint {subject}.",
"renamed": "[{user}]({user_link}) renamed sprint from {old} to **{new}**.",
"estimated_start": "[{user}]({user_link}) changed estimated start of sprint {subject}"
" from {old} to {new}.",
"estimated_finish": "[{user}]({user_link}) changed estimated finish of sprint {subject}"
" from {old} to {new}.",
"set_estimated_start": "[{user}]({user_link}) changed estimated start of sprint {subject}"
" to {new}.",
"set_estimated_finish": "[{user}]({user_link}) set estimated finish of sprint {subject}"
" to {new}.",
"delete": "[{user}]({user_link}) deleted sprint {subject}.",
},
"task": {
"create": "[{user}]({user_link}) created task {subject}.",
"set_assigned_to": "[{user}]({user_link}) assigned task {subject} to {new}.",
"unset_assigned_to": "[{user}]({user_link}) unassigned task {subject}.",
"changed_assigned_to": "[{user}]({user_link}) reassigned task {subject}"
" from {old} to {new}.",
"blocked": "[{user}]({user_link}) blocked task {subject}.",
"unblocked": "[{user}]({user_link}) unblocked task {subject}.",
"changed_status": "[{user}]({user_link}) changed status of task {subject}"
" from {old} to {new}.",
"renamed": "[{user}]({user_link}) renamed task {old} to **{new}**.",
"description_diff": "[{user}]({user_link}) updated description of task {subject}.",
"set_milestone": "[{user}]({user_link}) added task {subject} to sprint {new}.",
"commented": "[{user}]({user_link}) commented on task {subject}.",
"delete": "[{user}]({user_link}) deleted task {subject}.",
"changed_us": "[{user}]({user_link}) moved task {subject} from user story {old} to {new}.",
"due_date": "[{user}]({user_link}) changed due date of task {subject}"
" from {old} to {new}.",
"set_due_date": "[{user}]({user_link}) set due date of task {subject}" " to {new}.",
},
"issue": {
"create": "[{user}]({user_link}) created issue {subject}.",
"set_assigned_to": "[{user}]({user_link}) assigned issue {subject} to {new}.",
"unset_assigned_to": "[{user}]({user_link}) unassigned issue {subject}.",
"changed_assigned_to": "[{user}]({user_link}) reassigned issue {subject}"
" from {old} to {new}.",
"set_milestone": "[{user}]({user_link}) added issue {subject} to sprint {new}.",
"unset_milestone": "[{user}]({user_link}) detached issue {subject} from sprint {old}.",
"changed_priority": "[{user}]({user_link}) changed priority of issue "
"{subject} from {old} to {new}.",
"changed_severity": "[{user}]({user_link}) changed severity of issue "
"{subject} from {old} to {new}.",
"changed_status": "[{user}]({user_link}) changed status of issue {subject}"
" from {old} to {new}.",
"changed_type": "[{user}]({user_link}) changed type of issue {subject} from {old} to {new}.",
"renamed": "[{user}]({user_link}) renamed issue {old} to **{new}**.",
"description_diff": "[{user}]({user_link}) updated description of issue {subject}.",
"commented": "[{user}]({user_link}) commented on issue {subject}.",
"delete": "[{user}]({user_link}) deleted issue {subject}.",
"due_date": "[{user}]({user_link}) changed due date of issue {subject}"
" from {old} to {new}.",
"set_due_date": "[{user}]({user_link}) set due date of issue {subject}" " to {new}.",
"blocked": "[{user}]({user_link}) blocked issue {subject}.",
"unblocked": "[{user}]({user_link}) unblocked issue {subject}.",
},
"webhook_test": {
"test": "[{user}]({user_link}) triggered a test of the Taiga integration.",
},
}
return_type = Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]
def get_old_and_new_values(change_type: str, message: Mapping[str, Any]) -> return_type:
"""Parses the payload and finds previous and current value of change_type."""
old = message["change"]["diff"][change_type].get("from")
new = message["change"]["diff"][change_type].get("to")
return old, new
def parse_comment(message: Mapping[str, Any]) -> Dict[str, Any]:
"""Parses the comment to issue, task or US."""
return {
"event": "commented",
"type": message["type"],
"values": {
"user": get_owner_name(message),
"user_link": get_owner_link(message),
"subject": get_subject(message),
},
}
def parse_create_or_delete(message: Mapping[str, Any]) -> Dict[str, Any]:
"""Parses create or delete event."""
if message["type"] == "relateduserstory":
return {
"type": message["type"],
"event": message["action"],
"values": {
"user": get_owner_name(message),
"user_link": get_owner_link(message),
"epic_subject": get_epic_subject(message),
"userstory_subject": get_userstory_subject(message),
},
}
return {
"type": message["type"],
"event": message["action"],
"values": {
"user": get_owner_name(message),
"user_link": get_owner_link(message),
"subject": get_subject(message),
},
}
def parse_change_event(change_type: str, message: Mapping[str, Any]) -> Optional[Dict[str, Any]]:
"""Parses change event."""
evt: Dict[str, Any] = {}
values: Dict[str, Any] = {
"user": get_owner_name(message),
"user_link": get_owner_link(message),
"subject": get_subject(message),
}
if change_type in ["description_diff", "points"]:
event_type = change_type
elif change_type in ["milestone", "assigned_to"]:
old, new = get_old_and_new_values(change_type, message)
if not old:
event_type = "set_" + change_type
values["new"] = new
elif not new:
event_type = "unset_" + change_type
values["old"] = old
else:
event_type = "changed_" + change_type
values.update(old=old, new=new)
elif change_type == "is_blocked":
if message["change"]["diff"]["is_blocked"]["to"]:
event_type = "blocked"
else:
event_type = "unblocked"
elif change_type == "is_closed":
if message["change"]["diff"]["is_closed"]["to"]:
event_type = "closed"
else:
event_type = "reopened"
elif change_type == "user_story":
old, new = get_old_and_new_values(change_type, message)
event_type = "changed_us"
values.update(old=old, new=new)
elif change_type in ["subject", "name"]:
event_type = "renamed"
old, new = get_old_and_new_values(change_type, message)
values.update(old=old, new=new)
elif change_type in ["estimated_finish", "estimated_start", "due_date"]:
old, new = get_old_and_new_values(change_type, message)
if not old:
event_type = "set_" + change_type
values["new"] = new
elif not old == new:
event_type = change_type
values.update(old=old, new=new)
else:
# date hasn't changed
return None
elif change_type in ["priority", "severity", "type", "status"]:
event_type = "changed_" + change_type
old, new = get_old_and_new_values(change_type, message)
values.update(old=old, new=new)
else:
# we are not supporting this type of event
return None
evt.update(type=message["type"], event=event_type, values=values)
return evt
def parse_webhook_test(message: Mapping[str, Any]) -> Dict[str, Any]:
return {
"type": "webhook_test",
"event": "test",
"values": {
"user": get_owner_name(message),
"user_link": get_owner_link(message),
"end_type": "test",
},
}
def parse_message(message: Mapping[str, Any]) -> List[Dict[str, Any]]:
"""Parses the payload by delegating to specialized functions."""
events = []
if message["action"] in ["create", "delete"]:
events.append(parse_create_or_delete(message))
elif message["action"] == "change":
if message["change"]["diff"]:
for value in message["change"]["diff"]:
parsed_event = parse_change_event(value, message)
if parsed_event:
events.append(parsed_event)
if message["change"]["comment"]:
events.append(parse_comment(message))
elif message["action"] == "test":
events.append(parse_webhook_test(message))
return events
def generate_content(data: Mapping[str, Any]) -> str:
"""Gets the template string and formats it with parsed data."""
template = templates[data["type"]][data["event"]]
content = template.format(**data["values"])
return content
def get_owner_name(message: Mapping[str, Any]) -> str:
return message["by"]["full_name"]
def get_owner_link(message: Mapping[str, Any]) -> str:
return message["by"]["permalink"]
def get_subject(message: Mapping[str, Any]) -> str:
data = message["data"]
if "permalink" in data:
return "[" + data.get("subject", data.get("name")) + "]" + "(" + data["permalink"] + ")"
return "**" + data.get("subject", data.get("name")) + "**"
def get_epic_subject(message: Mapping[str, Any]) -> str:
if "permalink" in message["data"]["epic"]:
return (
"["
+ message["data"]["epic"]["subject"]
+ "]"
+ "("
+ message["data"]["epic"]["permalink"]
+ ")"
)
return "**" + message["data"]["epic"]["subject"] + "**"
def get_userstory_subject(message: Mapping[str, Any]) -> str:
if "permalink" in message["data"]["user_story"]:
us_data = message["data"]["user_story"]
return "[" + us_data["subject"] + "]" + "(" + us_data["permalink"] + ")"
return "**" + message["data"]["user_story"]["subject"] + "**"
|
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': 'lib',
'PATH': 'bin',
'PKG_CONFIG_PATH': 'lib/pkgconfig',
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolder = env_var_subfolders[key]
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte'))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolder):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if subfolder:
path = os.path.join(path, subfolder)
# exclude any path already in env and any path we already added
if path not in environ_paths and path not in checked_paths:
checked_paths.append(path)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/marco/catkin_ws/devel;/opt/ros/hydro'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
sys.exit(0)
|
|
import os
from cStringIO import StringIO
import datetime
import unittest
from string import letters
from hashlib import md5
import decimal
# LIBRARIES
from django.core.files.uploadhandler import StopFutureHandlers
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import connections
from django.db import DataError, models
from django.db.models.query import Q
from django.forms import ModelForm
from django.test import RequestFactory
from django.utils.safestring import SafeText
from django.forms.models import modelformset_factory
from django.db.models.sql.datastructures import EmptyResultSet
from google.appengine.api.datastore_errors import EntityNotFoundError, BadValueError
from google.appengine.api import datastore
from google.appengine.ext import deferred
from google.appengine.api import taskqueue
from django.test.utils import override_settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
# DJANGAE
from djangae.contrib import sleuth
from djangae.test import inconsistent_db, TestCase
from django.db import IntegrityError as DjangoIntegrityError
from djangae.db.backends.appengine.dbapi import CouldBeSupportedError, NotSupportedError, IntegrityError
from djangae.db.constraints import UniqueMarker, UniquenessMixin
from djangae.db.unique_utils import _unique_combinations, unique_identifiers_from_entity
from djangae.indexing import add_special_index
from djangae.db.utils import entity_matches_query, decimal_to_string, normalise_field_value
from djangae.db.backends.appengine import caching
from djangae.db.unique_utils import query_is_unique
from djangae.db import transaction
from djangae.fields import ComputedCharField, ShardedCounterField, SetField, ListField, GenericRelationField
from djangae.models import CounterShard
from djangae.db.backends.appengine.dnf import parse_dnf
from djangae.storage import BlobstoreFileUploadHandler
from djangae.wsgi import DjangaeApplication
from djangae.core import paginator
from django.template import Template, Context
from djangae.fields import RelatedSetField
try:
import webtest
except ImportError:
webtest = NotImplemented
class TestUser(models.Model):
username = models.CharField(max_length=32)
email = models.EmailField()
last_login = models.DateField(auto_now_add=True)
field2 = models.CharField(max_length=32)
def __unicode__(self):
return self.username
class Meta:
app_label = "djangae"
class UniqueModel(models.Model):
unique_field = models.CharField(max_length=100, unique=True)
unique_combo_one = models.IntegerField(blank=True, default=0)
unique_combo_two = models.CharField(max_length=100, blank=True, default="")
unique_relation = models.ForeignKey('self', null=True, blank=True, unique=True)
unique_set_field = SetField(models.CharField(max_length=500), unique=True)
unique_list_field = ListField(models.CharField(max_length=500), unique=True)
unique_together_list_field = ListField(models.IntegerField())
class Meta:
unique_together = [
("unique_combo_one", "unique_combo_two"),
("unique_together_list_field", "unique_combo_one")
]
app_label = "djangae"
class UniqueModelWithLongPK(models.Model):
long_pk = models.CharField(max_length=500, primary_key=True)
unique_field = models.IntegerField(unique=True)
class IntegerModel(models.Model):
integer_field = models.IntegerField(default=0)
class Meta:
app_label = "djangae"
class TestFruit(models.Model):
name = models.CharField(primary_key=True, max_length=32)
origin = models.CharField(max_length=32, default="Unknown")
color = models.CharField(max_length=100)
is_mouldy = models.BooleanField(default=False)
class Meta:
ordering = ("color",)
app_label = "djangae"
def __unicode__(self):
return self.name
def __repr__(self):
return "<TestFruit: name={}, color={}>".format(self.name, self.color)
class Permission(models.Model):
user = models.ForeignKey(TestUser)
perm = models.CharField(max_length=32)
def __unicode__(self):
return u"{0} for {1}".format(self.perm, self.user)
class Meta:
ordering = ('user__username', 'perm')
app_label = "djangae"
class SelfRelatedModel(models.Model):
related = models.ForeignKey('self', blank=True, null=True)
class Meta:
app_label = "djangae"
class MultiTableParent(models.Model):
parent_field = models.CharField(max_length=32)
class Meta:
app_label = "djangae"
class MultiTableChildOne(MultiTableParent):
child_one_field = models.CharField(max_length=32)
class Meta:
app_label = "djangae"
class MultiTableChildTwo(MultiTableParent):
child_two_field = models.CharField(max_length=32)
class Meta:
app_label = "djangae"
class Relation(models.Model):
class Meta:
app_label = "djangae"
class Related(models.Model):
headline = models.CharField(max_length=500)
relation = models.ForeignKey(Relation)
class Meta:
app_label = "djangae"
class NullDate(models.Model):
date = models.DateField(null=True, default=None)
datetime = models.DateTimeField(null=True, default=None)
time = models.TimeField(null=True, default=None)
class Meta:
app_label = "djangae"
class ModelWithUniques(models.Model):
name = models.CharField(max_length=64, unique=True)
class Meta:
app_label = "djangae"
class ModelWithUniquesOnForeignKey(models.Model):
name = models.CharField(max_length=64, unique=True)
related_name = models.ForeignKey(ModelWithUniques, unique=True)
class Meta:
unique_together = [("name", "related_name")]
app_label = "djangae"
class ModelWithDates(models.Model):
start = models.DateField()
end = models.DateField()
class Meta:
app_label = "djangae"
class ModelWithUniquesAndOverride(models.Model):
name = models.CharField(max_length=64, unique=True)
class Djangae:
disable_constraint_checks = False
class Meta:
app_label = "djangae"
class ISOther(models.Model):
name = models.CharField(max_length=500)
def __unicode__(self):
return "%s:%s" % (self.pk, self.name)
class Meta:
app_label = "djangae"
class RelationWithoutReverse(models.Model):
name = models.CharField(max_length=500)
class Meta:
app_label = "djangae"
class ISModel(models.Model):
related_things = RelatedSetField(ISOther)
limted_related = RelatedSetField(RelationWithoutReverse, limit_choices_to={'name': 'banana'}, related_name="+")
children = RelatedSetField("self", related_name="+")
class Meta:
app_label = "djangae"
class RelationWithOverriddenDbTable(models.Model):
class Meta:
db_table = "bananarama"
app_label = "djangae"
class GenericRelationModel(models.Model):
relation_to_content_type = GenericRelationField(ContentType, null=True)
relation_to_weird = GenericRelationField(RelationWithOverriddenDbTable, null=True)
class Meta:
app_label = "djangae"
class SpecialIndexesModel(models.Model):
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Meta:
app_label = "djangae"
class PaginatorModel(models.Model):
foo = models.IntegerField()
class Meta:
app_label = "djangae"
class IterableFieldModel(models.Model):
set_field = SetField(models.CharField(max_length=1))
list_field = ListField(models.CharField(max_length=1))
class Meta:
app_label = "djangae"
class BackendTests(TestCase):
def test_entity_matches_query(self):
entity = datastore.Entity("test_model")
entity["name"] = "Charlie"
entity["age"] = 22
query = datastore.Query("test_model")
query["name ="] = "Charlie"
self.assertTrue(entity_matches_query(entity, query))
query["age >="] = 5
self.assertTrue(entity_matches_query(entity, query))
del query["age >="]
query["age <"] = 22
self.assertFalse(entity_matches_query(entity, query))
del query["age <"]
query["age <="] = 22
self.assertTrue(entity_matches_query(entity, query))
del query["age <="]
query["name ="] = "Fred"
self.assertFalse(entity_matches_query(entity, query))
# If the entity has a list field, then if any of them match the
# query then it's a match
entity["name"] = [ "Bob", "Fred", "Dave" ]
self.assertTrue(entity_matches_query(entity, query)) # ListField test
def test_setting_non_null_null_throws_integrity_error(self):
with self.assertRaises(DjangoIntegrityError):
IntegerModel.objects.create(integer_field=None)
with self.assertRaises(DjangoIntegrityError):
instance = IntegerModel()
instance.integer_field = None
instance.save()
with self.assertRaises(DjangoIntegrityError):
instance = IntegerModel.objects.create(integer_field=1)
instance = IntegerModel.objects.get()
instance.integer_field = None
instance.save()
def test_normalise_field_value(self):
self.assertEqual(u'0000475231073257', normalise_field_value(decimal.Decimal(475231073257)))
self.assertEqual(u'-0000475231073257', normalise_field_value(decimal.Decimal(-475231073257)))
self.assertEqual(u'0000000004752311', normalise_field_value(decimal.Decimal(4752310.73257)))
self.assertEqual(u'0000004752310733', normalise_field_value(decimal.Decimal(4752310732.57)))
self.assertEqual(datetime.datetime(2015, 1, 27, 2, 46, 8, 584258), normalise_field_value(datetime.datetime(2015, 1, 27, 2, 46, 8, 584258)))
def test_decimal_to_string(self):
self.assertEqual(u'0002312487812767', decimal_to_string(decimal.Decimal(2312487812767)))
self.assertEqual(u'-0002312487812767', decimal_to_string(decimal.Decimal(-2312487812767)))
self.assertEqual(u'002312487812', decimal_to_string(decimal.Decimal(2312487812), 12))
self.assertEqual(u'002387812.320', decimal_to_string(decimal.Decimal(2387812.32), 12, 3))
self.assertEqual(u'-002387812.513', decimal_to_string(decimal.Decimal(-2387812.513212), 12, 3))
self.assertEqual(u'0237812.000', decimal_to_string(decimal.Decimal(237812), 10, 3))
self.assertEqual(u'-0237812.210', decimal_to_string(decimal.Decimal(-237812.21), 10, 3))
def test_gae_conversion(self):
# A PK IN query should result in a single get by key
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Get", lambda *args, **kwargs: []) as get_mock:
list(TestUser.objects.filter(pk__in=[1, 2, 3])) # Force the query to run
self.assertEqual(1, get_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Query.Run", lambda *args, **kwargs: []) as query_mock:
list(TestUser.objects.filter(username="test"))
self.assertEqual(1, query_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.MultiQuery.Run", lambda *args, **kwargs: []) as query_mock:
list(TestUser.objects.filter(username__in=["test", "cheese"]))
self.assertEqual(1, query_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Get", lambda *args, **kwargs: []) as get_mock:
list(TestUser.objects.filter(pk=1))
self.assertEqual(1, get_mock.call_count)
#FIXME: Issue #80
with self.assertRaises(CouldBeSupportedError):
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.MultiQuery.Run", lambda *args, **kwargs: []) as query_mock:
list(TestUser.objects.exclude(username__startswith="test"))
self.assertEqual(1, query_mock.call_count)
with sleuth.switch("djangae.db.backends.appengine.commands.datastore.Get", lambda *args, **kwargs: []) as get_mock:
list(TestUser.objects.filter(pk__in=[1, 2, 3, 4, 5, 6, 7, 8]).
filter(username__in=["test", "test2", "test3"]).filter(email__in=["[email protected]", "[email protected]"]))
self.assertEqual(1, get_mock.call_count)
def test_null_date_field(self):
null_date = NullDate()
null_date.save()
null_date = NullDate.objects.get()
self.assertIsNone(null_date.date)
self.assertIsNone(null_date.time)
self.assertIsNone(null_date.datetime)
def test_convert_unicode_subclasses_to_unicode(self):
# The App Engine SDK raises BadValueError if you try saving a SafeText
# string to a CharField. Djangae explicitly converts it to unicode.
grue = SafeText(u'grue')
self.assertIsInstance(grue, unicode)
self.assertNotEqual(type(grue), unicode)
obj = TestFruit.objects.create(name=u'foo', color=grue)
obj = TestFruit.objects.get(pk=obj.pk)
self.assertEqual(type(obj.color), unicode)
obj = TestFruit.objects.filter(color=grue)[0]
self.assertEqual(type(obj.color), unicode)
def test_notsupportederror_thrown_on_too_many_inequalities(self):
TestFruit.objects.create(name="Apple", color="Green", origin="England")
pear = TestFruit.objects.create(name="Pear", color="Green")
banana = TestFruit.objects.create(name="Banana", color="Yellow")
# Excluding one field is fine
self.assertItemsEqual([pear, banana], list(TestFruit.objects.exclude(name="Apple")))
# Excluding a field, and doing a > or < on another is not so fine
with self.assertRaises(DataError):
self.assertEqual(pear, TestFruit.objects.exclude(origin="England").filter(color__lt="Yellow").get())
# Same with excluding two fields
with self.assertRaises(DataError):
list(TestFruit.objects.exclude(origin="England").exclude(color="Yellow"))
# But apparently excluding the same field twice is OK
self.assertItemsEqual([banana], list(TestFruit.objects.exclude(origin="England").exclude(name="Pear").order_by("origin")))
def test_excluding_pks_is_emulated(self):
apple = TestFruit.objects.create(name="Apple", color="Green", is_mouldy=True, origin="England")
banana = TestFruit.objects.create(name="Banana", color="Yellow", is_mouldy=True, origin="Dominican Republic")
cherry = TestFruit.objects.create(name="Cherry", color="Red", is_mouldy=True, origin="Germany")
pear = TestFruit.objects.create(name="Pear", color="Green", origin="England")
self.assertEqual([apple, pear], list(TestFruit.objects.filter(origin__lt="Germany").exclude(pk=banana.pk).exclude(pk=cherry.pk).order_by("origin")))
self.assertEqual([apple, cherry], list(TestFruit.objects.exclude(origin="Dominican Republic").exclude(pk=pear.pk).order_by("origin")))
self.assertEqual([], list(TestFruit.objects.filter(is_mouldy=True).filter(color="Green", origin__gt="England").exclude(pk=pear.pk).order_by("-origin")))
self.assertEqual([cherry, banana], list(TestFruit.objects.exclude(pk=pear.pk).order_by("-name")[:2]))
self.assertEqual([banana, apple], list(TestFruit.objects.exclude(pk=pear.pk).order_by("origin", "name")[:2]))
class ModelFormsetTest(TestCase):
def test_reproduce_index_error(self):
class TestModelForm(ModelForm):
class Meta:
model = TestUser
test_model = TestUser.objects.create(username='foo', field2='bar')
TestModelFormSet = modelformset_factory(TestUser, form=TestModelForm, extra=0)
TestModelFormSet(queryset=TestUser.objects.filter(pk=test_model.pk))
data = {
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 0,
'form-TOTAL_FORMS': 0,
'form-0-id': test_model.id,
'form-0-field1': 'foo_1',
'form-0-field2': 'bar_1',
}
factory = RequestFactory()
request = factory.post('/', data=data)
TestModelFormSet(request.POST, request.FILES)
class CacheTests(TestCase):
def test_cache_set(self):
cache.set('test?', 'yes!')
self.assertEqual(cache.get('test?'), 'yes!')
def test_cache_timeout(self):
cache.set('test?', 'yes!', 1)
import time
time.sleep(1)
self.assertEqual(cache.get('test?'), None)
class TransactionTests(TestCase):
def test_atomic_decorator(self):
@transaction.atomic
def txn():
TestUser.objects.create(username="foo", field2="bar")
raise ValueError()
with self.assertRaises(ValueError):
txn()
self.assertEqual(0, TestUser.objects.count())
def test_interaction_with_datastore_txn(self):
from google.appengine.ext import db
from google.appengine.datastore.datastore_rpc import TransactionOptions
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def some_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def some_non_indie_txn(_username):
TestUser.objects.create(username=_username)
@db.transactional()
def double_nested_transactional():
@db.transactional(propagation=TransactionOptions.INDEPENDENT)
def do_stuff():
TestUser.objects.create(username="Double")
raise ValueError()
try:
return do_stuff
except:
return
with transaction.atomic():
double_nested_transactional()
@db.transactional()
def something_containing_atomic():
with transaction.atomic():
TestUser.objects.create(username="Inner")
something_containing_atomic()
with transaction.atomic():
with transaction.atomic():
some_non_indie_txn("Bob1")
some_indie_txn("Bob2")
some_indie_txn("Bob3")
with transaction.atomic(independent=True):
some_non_indie_txn("Fred1")
some_indie_txn("Fred2")
some_indie_txn("Fred3")
def test_atomic_context_manager(self):
with self.assertRaises(ValueError):
with transaction.atomic():
TestUser.objects.create(username="foo", field2="bar")
raise ValueError()
self.assertEqual(0, TestUser.objects.count())
def test_xg_argument(self):
@transaction.atomic(xg=True)
def txn(_username):
TestUser.objects.create(username=_username, field2="bar")
TestFruit.objects.create(name="Apple", color="pink")
raise ValueError()
with self.assertRaises(ValueError):
txn("foo")
self.assertEqual(0, TestUser.objects.count())
self.assertEqual(0, TestFruit.objects.count())
def test_independent_argument(self):
"""
We would get a XG error if the inner transaction was not independent
"""
@transaction.atomic
def txn1(_username, _fruit):
@transaction.atomic(independent=True)
def txn2(_fruit):
TestFruit.objects.create(name=_fruit, color="pink")
raise ValueError()
TestUser.objects.create(username=_username)
txn2(_fruit)
with self.assertRaises(ValueError):
txn1("test", "banana")
class QueryNormalizationTests(TestCase):
"""
The parse_dnf function takes a Django where tree, and converts it
into a tree of one of the following forms:
[ (column, operator, value), (column, operator, value) ] <- AND only query
[ [(column, operator, value)], [(column, operator, value) ]] <- OR query, of multiple ANDs
"""
def test_and_queries(self):
connection = connections['default']
qs = TestUser.objects.filter(username="test").all()
self.assertEqual(('OR', [('LIT', ('username', '=', 'test'))]), parse_dnf(qs.query.where, connection=connection)[0])
qs = TestUser.objects.filter(username="test", email="[email protected]")
expected = ('OR', [('AND', [('LIT', ('username', '=', 'test')), ('LIT', ('email', '=', '[email protected]'))])])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
#
qs = TestUser.objects.filter(username="test").exclude(email="[email protected]")
expected = ('OR', [
('AND', [('LIT', ('username', '=', 'test')), ('LIT', ('email', '>', '[email protected]'))]),
('AND', [('LIT', ('username', '=', 'test')), ('LIT', ('email', '<', '[email protected]'))])
])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
qs = TestUser.objects.filter(username__lte="test").exclude(email="[email protected]")
expected = ('OR', [
('AND', [("username", "<=", "test"), ("email", ">", "[email protected]")]),
('AND', [("username", "<=", "test"), ("email", "<", "[email protected]")]),
])
#FIXME: This will raise a BadFilterError on the datastore, we should instead raise NotSupportedError in that case
#with self.assertRaises(NotSupportedError):
# parse_dnf(qs.query.where, connection=connection)
instance = Relation(pk=1)
qs = instance.related_set.filter(headline__startswith='Fir')
expected = ('OR', [('AND', [('LIT', ('relation_id', '=', 1)), ('LIT', ('_idx_startswith_headline', '=', u'Fir'))])])
norm = parse_dnf(qs.query.where, connection=connection)[0]
self.assertEqual(expected, norm)
def test_or_queries(self):
connection = connections['default']
qs = TestUser.objects.filter(
username="python").filter(
Q(username__in=["ruby", "jruby"]) | (Q(username="php") & ~Q(username="perl"))
)
# After IN and != explosion, we have...
# (AND: (username='python', OR: (username='ruby', username='jruby', AND: (username='php', AND: (username < 'perl', username > 'perl')))))
# Working backwards,
# AND: (username < 'perl', username > 'perl') can't be simplified
# AND: (username='php', AND: (username < 'perl', username > 'perl')) can become (OR: (AND: username = 'php', username < 'perl'), (AND: username='php', username > 'perl'))
# OR: (username='ruby', username='jruby', (OR: (AND: username = 'php', username < 'perl'), (AND: username='php', username > 'perl')) can't be simplified
# (AND: (username='python', OR: (username='ruby', username='jruby', (OR: (AND: username = 'php', username < 'perl'), (AND: username='php', username > 'perl'))
# becomes...
# (OR: (AND: username='python', username = 'ruby'), (AND: username='python', username='jruby'), (AND: username='python', username='php', username < 'perl') \
# (AND: username='python', username='php', username > 'perl')
expected = ('OR', [
('AND', [('LIT', ('username', '=', 'python')), ('LIT', ('username', '=', 'ruby'))]),
('AND', [('LIT', ('username', '=', 'python')), ('LIT', ('username', '=', 'jruby'))]),
('AND', [('LIT', ('username', '=', 'python')), ('LIT', ('username', '=', 'php')), ('LIT', ('username', '>', 'perl'))]),
('AND', [('LIT', ('username', '=', 'python')), ('LIT', ('username', '=', 'php')), ('LIT', ('username', '<', 'perl'))])
])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
#
qs = TestUser.objects.filter(username="test") | TestUser.objects.filter(username="cheese")
expected = ('OR', [
('LIT', ("username", "=", "test")),
('LIT', ("username", "=", "cheese")),
])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
qs = TestUser.objects.using("default").filter(username__in=set()).values_list('email')
with self.assertRaises(EmptyResultSet):
parse_dnf(qs.query.where, connection=connection)
qs = TestUser.objects.filter(username__startswith='Hello') | TestUser.objects.filter(username__startswith='Goodbye')
expected = ('OR', [
('LIT', ('_idx_startswith_username', '=', u'Hello')),
('LIT', ('_idx_startswith_username', '=', u'Goodbye'))
])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
qs = TestUser.objects.filter(pk__in=[1, 2, 3])
expected = ('OR', [
('LIT', ("id", "=", datastore.Key.from_path(TestUser._meta.db_table, 1))),
('LIT', ("id", "=", datastore.Key.from_path(TestUser._meta.db_table, 2))),
('LIT', ("id", "=", datastore.Key.from_path(TestUser._meta.db_table, 3))),
])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
qs = TestUser.objects.filter(pk__in=[1, 2, 3]).filter(username="test")
expected = ('OR', [
('AND', [
('LIT', (u'id', '=', datastore.Key.from_path(TestUser._meta.db_table, 1))),
('LIT', ('username', '=', 'test'))
]),
('AND', [
('LIT', (u'id', '=', datastore.Key.from_path(TestUser._meta.db_table, 2))),
('LIT', ('username', '=', 'test'))
]),
('AND', [
('LIT', (u'id', '=', datastore.Key.from_path(TestUser._meta.db_table, 3))),
('LIT', ('username', '=', 'test'))
])
])
self.assertEqual(expected, parse_dnf(qs.query.where, connection=connection)[0])
class ConstraintTests(TestCase):
"""
Tests for unique constraint handling
"""
def test_update_updates_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance = ModelWithUniques.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
qry = datastore.Query(UniqueMarker.kind())
qry.Order(("created", datastore.Query.DESCENDING))
marker = [x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("One").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
instance.name = "Two"
instance.save()
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
marker = [x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("Two").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
def test_conflicting_insert_throws_integrity_error(self):
ModelWithUniques.objects.create(name="One")
with self.assertRaises((IntegrityError, DataError)):
ModelWithUniques.objects.create(name="One")
def test_table_flush_clears_markers_for_that_table(self):
ModelWithUniques.objects.create(name="One")
UniqueModel.objects.create(unique_field="One")
from djangae.db.backends.appengine.commands import FlushCommand
FlushCommand(ModelWithUniques._meta.db_table).execute()
ModelWithUniques.objects.create(name="One")
with self.assertRaises(DataError):
UniqueModel.objects.create(unique_field="One")
def test_conflicting_update_throws_integrity_error(self):
ModelWithUniques.objects.create(name="One")
instance = ModelWithUniques.objects.create(name="Two")
with self.assertRaises((IntegrityError, DataError)):
instance.name = "One"
instance.save()
def test_unique_combinations_are_returned_correctly(self):
combos_one = _unique_combinations(ModelWithUniquesOnForeignKey, ignore_pk=True)
combos_two = _unique_combinations(ModelWithUniquesOnForeignKey, ignore_pk=False)
self.assertEqual([['name', 'related_name'], ['name'], ['related_name']], combos_one)
self.assertEqual([['name', 'related_name'], ['id'], ['name'], ['related_name']], combos_two)
class Entity(dict):
def __init__(self, model, id):
self._key = datastore.Key.from_path(model, id)
def key(self):
return self._key
e1 = Entity(ModelWithUniquesOnForeignKey._meta.db_table, 1)
e1["name"] = "One"
e1["related_name_id"] = 1
ids_one = unique_identifiers_from_entity(ModelWithUniquesOnForeignKey, e1)
self.assertItemsEqual([
u'djangae_modelwithuniquesonforeignkey|id:1',
u'djangae_modelwithuniquesonforeignkey|name:06c2cea18679d64399783748fa367bdd',
u'djangae_modelwithuniquesonforeignkey|related_name_id:1',
u'djangae_modelwithuniquesonforeignkey|name:06c2cea18679d64399783748fa367bdd|related_name_id:1'
], ids_one)
def test_error_on_update_doesnt_change_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance = ModelWithUniques.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
qry = datastore.Query(UniqueMarker.kind())
qry.Order(("created", datastore.Query.DESCENDING))
marker = [ x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("One").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
instance.name = "Two"
from djangae.db.backends.appengine.commands import datastore as to_patch
try:
original = to_patch.Put
def func(*args, **kwargs):
kind = args[0][0].kind() if isinstance(args[0], list) else args[0].kind()
if kind == UniqueMarker.kind():
return original(*args, **kwargs)
raise AssertionError()
to_patch.Put = func
with self.assertRaises(Exception):
instance.save()
finally:
to_patch.Put = original
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
marker = [x for x in qry.Run()][0]
# Make sure we assigned the instance
self.assertEqual(marker["instance"], datastore.Key.from_path(instance._meta.db_table, instance.pk))
expected_marker = "{}|name:{}".format(ModelWithUniques._meta.db_table, md5("One").hexdigest())
self.assertEqual(expected_marker, marker.key().id_or_name())
def test_error_on_insert_doesnt_create_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
from djangae.db.backends.appengine.commands import datastore as to_patch
try:
original = to_patch.Put
def func(*args, **kwargs):
kind = args[0][0].kind() if isinstance(args[0], list) else args[0].kind()
if kind == UniqueMarker.kind():
return original(*args, **kwargs)
raise AssertionError()
to_patch.Put = func
with self.assertRaises(AssertionError):
ModelWithUniques.objects.create(name="One")
finally:
to_patch.Put = original
self.assertEqual(0, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
def test_delete_clears_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance = ModelWithUniques.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
instance.delete()
self.assertEqual(0, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
@override_settings(DJANGAE_DISABLE_CONSTRAINT_CHECKS=True)
def test_constraints_disabled_doesnt_create_or_check_markers(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
instance1 = ModelWithUniques.objects.create(name="One")
self.assertEqual(initial_count, datastore.Query(UniqueMarker.kind()).Count())
instance2 = ModelWithUniques.objects.create(name="One")
self.assertEqual(instance1.name, instance2.name)
self.assertFalse(instance1 == instance2)
@override_settings(DJANGAE_DISABLE_CONSTRAINT_CHECKS=True)
def test_constraints_can_be_enabled_per_model(self):
initial_count = datastore.Query(UniqueMarker.kind()).Count()
ModelWithUniquesAndOverride.objects.create(name="One")
self.assertEqual(1, datastore.Query(UniqueMarker.kind()).Count() - initial_count)
def test_list_field_unique_constaints(self):
instance1 = UniqueModel.objects.create(unique_field=1, unique_combo_one=1, unique_list_field=["A", "C"])
with self.assertRaises((IntegrityError, DataError)):
UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_list_field=["A"])
instance2 = UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_list_field=["B"])
instance2.unique_list_field = instance1.unique_list_field
with self.assertRaises((IntegrityError, DataError)):
instance2.save()
instance1.unique_list_field = []
instance1.save()
instance2.save()
def test_list_field_unique_constraints_validation(self):
instance1 = UniqueModel(
unique_set_field={"A"},
unique_together_list_field=[1],
unique_field=1,
unique_combo_one=1,
unique_list_field=["A", "C"]
)
# Without a custom mixin, Django can't construct a unique validation query for a list field
self.assertRaises(BadValueError, instance1.full_clean)
UniqueModel.__bases__ = (UniquenessMixin,) + UniqueModel.__bases__
instance1.full_clean()
instance1.save()
instance2 = UniqueModel(
unique_set_field={"B"},
unique_together_list_field=[2],
unique_field=2,
unique_combo_one=2,
unique_list_field=["B", "C"] # duplicate value C!
)
self.assertRaises(ValidationError, instance2.full_clean)
UniqueModel.__bases__ = (models.Model,)
def test_set_field_unique_constraints(self):
instance1 = UniqueModel.objects.create(unique_field=1, unique_combo_one=1, unique_set_field={"A", "C"})
with self.assertRaises((IntegrityError, DataError)):
UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_set_field={"A"})
instance2 = UniqueModel.objects.create(unique_field=2, unique_combo_one=2, unique_set_field={"B"})
instance2.unique_set_field = instance1.unique_set_field
with self.assertRaises((IntegrityError, DataError)):
instance2.save()
instance1.unique_set_field = set()
instance1.save()
instance2.save()
instance2.unique_set_field = set()
instance2.save() # You can have two fields with empty sets
def test_unique_constraints_on_model_with_long_str_pk(self):
""" Check that an object with a string-based PK of 500 characters (the max that GAE allows)
can still have unique constraints pointing at it. (See #242.)
"""
obj = UniqueModelWithLongPK(pk="x" * 500, unique_field=1)
obj.save()
duplicate = UniqueModelWithLongPK(pk="y" * 500, unique_field=1)
self.assertRaises(DataError, duplicate.save)
class EdgeCaseTests(TestCase):
def setUp(self):
super(EdgeCaseTests, self).setUp()
add_special_index(TestUser, "username", "iexact")
self.u1 = TestUser.objects.create(username="A", email="[email protected]", last_login=datetime.datetime.now().date())
self.u2 = TestUser.objects.create(username="B", email="[email protected]", last_login=datetime.datetime.now().date())
self.u3 = TestUser.objects.create(username="C", email="[email protected]", last_login=datetime.datetime.now().date())
self.u4 = TestUser.objects.create(username="D", email="[email protected]", last_login=datetime.datetime.now().date())
self.u5 = TestUser.objects.create(username="E", email="[email protected]", last_login=datetime.datetime.now().date())
self.apple = TestFruit.objects.create(name="apple", color="red")
self.banana = TestFruit.objects.create(name="banana", color="yellow")
def test_querying_by_date(self):
instance1 = ModelWithDates.objects.create(start=datetime.date(2014, 1, 1), end=datetime.date(2014, 1, 20))
instance2 = ModelWithDates.objects.create(start=datetime.date(2014, 2, 1), end=datetime.date(2014, 2, 20))
self.assertEqual(instance1, ModelWithDates.objects.get(start__lt=datetime.date(2014, 1, 2)))
self.assertEqual(2, ModelWithDates.objects.filter(start__lt=datetime.date(2015, 1, 1)).count())
self.assertEqual(instance2, ModelWithDates.objects.get(start__gt=datetime.date(2014, 1, 2)))
self.assertEqual(instance2, ModelWithDates.objects.get(start__gte=datetime.date(2014, 2, 1)))
def test_double_starts_with(self):
qs = TestUser.objects.filter(username__startswith='Hello') | TestUser.objects.filter(username__startswith='Goodbye')
self.assertEqual(0, qs.count())
TestUser.objects.create(username="Hello")
self.assertEqual(1, qs.count())
TestUser.objects.create(username="Goodbye")
self.assertEqual(2, qs.count())
TestUser.objects.create(username="Hello and Goodbye")
self.assertEqual(3, qs.count())
def test_impossible_starts_with(self):
TestUser.objects.create(username="Hello")
TestUser.objects.create(username="Goodbye")
TestUser.objects.create(username="Hello and Goodbye")
qs = TestUser.objects.filter(username__startswith='Hello') & TestUser.objects.filter(username__startswith='Goodbye')
self.assertEqual(0, qs.count())
def test_combinations_of_special_indexes(self):
qs = TestUser.objects.filter(username__iexact='Hello') | TestUser.objects.filter(username__contains='ood')
self.assertEqual(0, qs.count())
TestUser.objects.create(username="Hello")
self.assertEqual(1, qs.count())
TestUser.objects.create(username="Goodbye")
self.assertEqual(2, qs.count())
TestUser.objects.create(username="Hello and Goodbye")
self.assertEqual(3, qs.count())
def test_multi_table_inheritance(self):
parent = MultiTableParent.objects.create(parent_field="parent1")
child1 = MultiTableChildOne.objects.create(parent_field="child1", child_one_field="child1")
child2 = MultiTableChildTwo.objects.create(parent_field="child2", child_two_field="child2")
self.assertEqual(3, MultiTableParent.objects.count())
self.assertItemsEqual([parent.pk, child1.pk, child2.pk],
list(MultiTableParent.objects.values_list('pk', flat=True)))
self.assertEqual(1, MultiTableChildOne.objects.count())
self.assertEqual(child1, MultiTableChildOne.objects.get())
self.assertEqual(1, MultiTableChildTwo.objects.count())
self.assertEqual(child2, MultiTableChildTwo.objects.get())
self.assertEqual(child2, MultiTableChildTwo.objects.get(pk=child2.pk))
self.assertTrue(MultiTableParent.objects.filter(pk=child2.pk).exists())
def test_anding_pks(self):
results = TestUser.objects.filter(id__exact=self.u1.pk).filter(id__exact=self.u2.pk)
self.assertEqual(list(results), [])
def test_unusual_queries(self):
results = TestFruit.objects.filter(name__in=["apple", "orange"])
self.assertEqual(1, len(results))
self.assertItemsEqual(["apple"], [x.name for x in results])
results = TestFruit.objects.filter(name__in=["apple", "banana"])
self.assertEqual(2, len(results))
self.assertItemsEqual(["apple", "banana"], [x.name for x in results])
results = TestFruit.objects.filter(name__in=["apple", "banana"]).values_list('pk', 'color')
self.assertEqual(2, len(results))
self.assertItemsEqual([(self.apple.pk, self.apple.color), (self.banana.pk, self.banana.color)], results)
results = TestUser.objects.all()
self.assertEqual(5, len(results))
results = TestUser.objects.filter(username__in=["A", "B"])
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
results = TestUser.objects.filter(username__in=["A", "B"]).exclude(username="A")
self.assertEqual(1, len(results), results)
self.assertItemsEqual(["B"], [x.username for x in results])
results = TestUser.objects.filter(username__lt="E")
self.assertEqual(4, len(results))
self.assertItemsEqual(["A", "B", "C", "D"], [x.username for x in results])
results = TestUser.objects.filter(username__lte="E")
self.assertEqual(5, len(results))
#Double exclude on different properties not supported
with self.assertRaises(DataError):
#FIXME: This should raise a NotSupportedError, but at the moment it's thrown too late in
#the process and so Django wraps it as a DataError
list(TestUser.objects.exclude(username="E").exclude(email="A"))
results = list(TestUser.objects.exclude(username="E").exclude(username="A"))
self.assertItemsEqual(["B", "C", "D"], [x.username for x in results ])
results = TestUser.objects.filter(username="A", email="[email protected]")
self.assertEqual(1, len(results))
results = TestUser.objects.filter(username__in=["A", "B"]).filter(username__in=["A", "B"])
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
results = TestUser.objects.filter(username__in=["A", "B"]).filter(username__in=["A"])
self.assertEqual(1, len(results))
self.assertItemsEqual(["A"], [x.username for x in results])
results = TestUser.objects.filter(pk__in=[self.u1.pk, self.u2.pk]).filter(username__in=["A"])
self.assertEqual(1, len(results))
self.assertItemsEqual(["A"], [x.username for x in results])
results = TestUser.objects.filter(username__in=["A"]).filter(pk__in=[self.u1.pk, self.u2.pk])
self.assertEqual(1, len(results))
self.assertItemsEqual(["A"], [x.username for x in results])
results = list(TestUser.objects.all().exclude(username__in=["A"]))
self.assertItemsEqual(["B", "C", "D", "E"], [x.username for x in results ])
results = list(TestFruit.objects.filter(name='apple', color__in=[]))
self.assertItemsEqual([], results)
results = list(TestUser.objects.all().exclude(username__in=[]))
self.assertEqual(5, len(results))
self.assertItemsEqual(["A", "B", "C", "D", "E"], [x.username for x in results ])
results = list(TestUser.objects.all().exclude(username__in=[]).filter(username__in=["A", "B"]))
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
results = list(TestUser.objects.all().filter(username__in=["A", "B"]).exclude(username__in=[]))
self.assertEqual(2, len(results))
self.assertItemsEqual(["A", "B"], [x.username for x in results])
def test_or_queryset(self):
"""
This constructs an OR query, this is currently broken in the parse_where_and_check_projection
function. WE MUST FIX THIS!
"""
q1 = TestUser.objects.filter(username="A")
q2 = TestUser.objects.filter(username="B")
self.assertItemsEqual([self.u1, self.u2], list(q1 | q2))
def test_or_q_objects(self):
""" Test use of Q objects in filters. """
query = TestUser.objects.filter(Q(username="A") | Q(username="B"))
self.assertItemsEqual([self.u1, self.u2], list(query))
def test_extra_select(self):
results = TestUser.objects.filter(username='A').extra(select={'is_a': "username = 'A'"})
self.assertEqual(1, len(results))
self.assertItemsEqual([True], [x.is_a for x in results])
results = TestUser.objects.all().exclude(username='A').extra(select={'is_a': "username = 'A'"})
self.assertEqual(4, len(results))
self.assertEqual(not any([x.is_a for x in results]), True)
# Up for debate
# results = User.objects.all().extra(select={'truthy': 'TRUE'})
# self.assertEqual(all([x.truthy for x in results]), True)
results = TestUser.objects.all().extra(select={'truthy': True})
self.assertEqual(all([x.truthy for x in results]), True)
def test_counts(self):
self.assertEqual(5, TestUser.objects.count())
self.assertEqual(2, TestUser.objects.filter(email="[email protected]").count())
self.assertEqual(3, TestUser.objects.exclude(email="[email protected]").count())
self.assertEqual(1, TestUser.objects.filter(username="A").exclude(email="[email protected]").count())
self.assertEqual(3, TestUser.objects.exclude(username="E").exclude(username="A").count())
def test_deletion(self):
count = TestUser.objects.count()
self.assertTrue(count)
TestUser.objects.filter(username="A").delete()
self.assertEqual(count - 1, TestUser.objects.count())
TestUser.objects.filter(username="B").exclude(username="B").delete() #Should do nothing
self.assertEqual(count - 1, TestUser.objects.count())
TestUser.objects.all().delete()
count = TestUser.objects.count()
self.assertFalse(count)
def test_insert_with_existing_key(self):
user = TestUser.objects.create(id=999, username="test1", last_login=datetime.datetime.now().date())
self.assertEqual(999, user.pk)
with self.assertRaises(DjangoIntegrityError):
TestUser.objects.create(id=999, username="test2", last_login=datetime.datetime.now().date())
def test_included_pks(self):
ids = [ TestUser.objects.get(username="B").pk, TestUser.objects.get(username="A").pk ]
results = TestUser.objects.filter(pk__in=ids).order_by("username")
self.assertEqual(results[0], self.u1)
self.assertEqual(results[1], self.u2)
def test_select_related(self):
""" select_related should be a no-op... for now """
user = TestUser.objects.get(username="A")
Permission.objects.create(user=user, perm="test_perm")
select_related = [ (p.perm, p.user.username) for p in user.permission_set.select_related() ]
self.assertEqual(user.username, select_related[0][1])
def test_cross_selects(self):
user = TestUser.objects.get(username="A")
Permission.objects.create(user=user, perm="test_perm")
with self.assertRaises(NotSupportedError):
perms = list(Permission.objects.all().values_list("user__username", "perm"))
self.assertEqual("A", perms[0][0])
def test_values_list_on_pk_does_keys_only_query(self):
from google.appengine.api.datastore import Query
def replacement_init(*args, **kwargs):
replacement_init.called_args = args
replacement_init.called_kwargs = kwargs
original_init(*args, **kwargs)
replacement_init.called_args = None
replacement_init.called_kwargs = None
try:
original_init = Query.__init__
Query.__init__ = replacement_init
list(TestUser.objects.all().values_list('pk', flat=True))
finally:
Query.__init__ = original_init
self.assertTrue(replacement_init.called_kwargs.get('keys_only'))
self.assertEqual(5, len(TestUser.objects.all().values_list('pk')))
def test_iexact(self):
user = TestUser.objects.get(username__iexact="a")
self.assertEqual("A", user.username)
add_special_index(IntegerModel, "integer_field", "iexact")
IntegerModel.objects.create(integer_field=1000)
integer_model = IntegerModel.objects.get(integer_field__iexact=str(1000))
self.assertEqual(integer_model.integer_field, 1000)
user = TestUser.objects.get(id__iexact=str(self.u1.id))
self.assertEqual("A", user.username)
def test_ordering(self):
users = TestUser.objects.all().order_by("username")
self.assertEqual(["A", "B", "C", "D", "E"], [x.username for x in users])
users = TestUser.objects.all().order_by("-username")
self.assertEqual(["A", "B", "C", "D", "E"][::-1], [x.username for x in users])
with self.assertRaises(FieldError):
users = list(TestUser.objects.order_by("bananas"))
users = TestUser.objects.filter(id__in=[self.u2.id, self.u3.id, self.u4.id]).order_by('id')
self.assertEqual(["B", "C", "D"], [x.username for x in users])
users = TestUser.objects.filter(id__in=[self.u2.id, self.u3.id, self.u4.id]).order_by('-id')
self.assertEqual(["D", "C", "B"], [x.username for x in users])
users = TestUser.objects.filter(id__in=[self.u1.id, self.u5.id, self.u3.id]).order_by('id')
self.assertEqual(["A", "C", "E"], [x.username for x in users])
users = TestUser.objects.filter(id__in=[self.u4.id, self.u5.id, self.u3.id, self.u1.id]).order_by('-id')
self.assertEqual(["E", "D", "C", "A"], [x.username for x in users])
def test_dates_query(self):
z_user = TestUser.objects.create(username="Z", email="[email protected]")
z_user.last_login = datetime.date(2013, 4, 5)
z_user.save()
last_a_login = TestUser.objects.get(username="A").last_login
dates = TestUser.objects.dates('last_login', 'year')
self.assertItemsEqual(
[datetime.date(2013, 1, 1), datetime.date(last_a_login.year, 1, 1)],
dates
)
dates = TestUser.objects.dates('last_login', 'month')
self.assertItemsEqual(
[datetime.date(2013, 4, 1), datetime.date(last_a_login.year, last_a_login.month, 1)],
dates
)
dates = TestUser.objects.dates('last_login', 'day')
self.assertItemsEqual(
[datetime.date(2013, 4, 5), last_a_login],
dates
)
dates = TestUser.objects.dates('last_login', 'day', order='DESC')
self.assertItemsEqual(
[last_a_login, datetime.date(2013, 4, 5)],
dates
)
def test_in_query(self):
""" Test that the __in filter works, and that it cannot be used with more than 30 values,
unless it's used on the PK field.
"""
# Check that a basic __in query works
results = list(TestUser.objects.filter(username__in=['A', 'B']))
self.assertItemsEqual(results, [self.u1, self.u2])
# Check that it also works on PKs
results = list(TestUser.objects.filter(pk__in=[self.u1.pk, self.u2.pk]))
self.assertItemsEqual(results, [self.u1, self.u2])
# Check that using more than 30 items in an __in query not on the pk causes death
query = TestUser.objects.filter(username__in=list([x for x in letters[:31]]))
# This currently raises an error from App Engine, should we raise our own?
self.assertRaises(Exception, list, query)
# Check that it's ok with PKs though
query = TestUser.objects.filter(pk__in=list(xrange(1, 32)))
list(query)
def test_self_relations(self):
obj = SelfRelatedModel.objects.create()
obj2 = SelfRelatedModel.objects.create(related=obj)
self.assertEqual(list(obj.selfrelatedmodel_set.all()), [obj2])
def test_special_indexes_for_empty_fields(self):
obj = TestFruit.objects.create(name='pear')
indexes = ['icontains', 'contains', 'iexact', 'iendswith', 'endswith', 'istartswith', 'startswith']
for index in indexes:
add_special_index(TestFruit, 'color', index)
obj.save()
def test_special_indexes_for_unusually_long_values(self):
obj = TestFruit.objects.create(name='pear', color='1234567890-=!@#$%^&*()_+qQWERwertyuiopasdfghjklzxcvbnm')
indexes = ['icontains', 'contains', 'iexact', 'iendswith', 'endswith', 'istartswith', 'startswith']
for index in indexes:
add_special_index(TestFruit, 'color', index)
obj.save()
qry = TestFruit.objects.filter(color__contains='1234567890-=!@#$%^&*()_+qQWERwertyuiopasdfghjklzxcvbnm')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__contains='890-=!@#$')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__contains='1234567890-=!@#$%^&*()_+qQWERwertyui')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__contains='8901')
self.assertEqual(len(list(qry)), 0)
qry = TestFruit.objects.filter(color__icontains='1234567890-=!@#$%^&*()_+qQWERWERTYuiopasdfghjklzxcvbnm')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__icontains='890-=!@#$')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__icontains='1234567890-=!@#$%^&*()_+qQWERwertyuI')
self.assertEqual(len(list(qry)), 1)
qry = TestFruit.objects.filter(color__icontains='8901')
self.assertEqual(len(list(qry)), 0)
class BlobstoreFileUploadHandlerTest(TestCase):
boundary = "===============7417945581544019063=="
def setUp(self):
super(BlobstoreFileUploadHandlerTest, self).setUp()
self.request = RequestFactory().get('/')
self.request.META = {
'wsgi.input': self._create_wsgi_input(),
'content-type': 'message/external-body; blob-key="PLOF0qOie14jzHWJXEa9HA=="; access-type="X-AppEngine-BlobKey"'
}
self.uploader = BlobstoreFileUploadHandler(self.request)
def _create_wsgi_input(self):
return StringIO('--===============7417945581544019063==\r\nContent-Type:'
' text/plain\r\nContent-Disposition: form-data;'
' name="field-nationality"\r\n\r\nAS\r\n'
'--===============7417945581544019063==\r\nContent-Type:'
' message/external-body; blob-key="PLOF0qOie14jzHWJXEa9HA==";'
' access-type="X-AppEngine-BlobKey"\r\nContent-Disposition:'
' form-data; name="field-file";'
' filename="Scan.tiff"\r\n\r\nContent-Type: image/tiff'
'\r\nContent-Length: 19837164\r\nContent-MD5:'
' YjI1M2Q5NjM5YzdlMzUxYjMyMjA0ZTIxZjAyNzdiM2Q=\r\ncontent-disposition:'
' form-data; name="field-file";'
' filename="Scan.tiff"\r\nX-AppEngine-Upload-Creation: 2014-03-07'
' 14:48:03.246607\r\n\r\n\r\n'
'--===============7417945581544019063==\r\nContent-Type:'
' text/plain\r\nContent-Disposition: form-data;'
' name="field-number"\r\n\r\n6\r\n'
'--===============7417945581544019063==\r\nContent-Type:'
' text/plain\r\nContent-Disposition: form-data;'
' name="field-salutation"\r\n\r\nmrs\r\n'
'--===============7417945581544019063==--')
def test_non_existing_files_do_not_get_created(self):
file_field_name = 'field-file'
length = len(self._create_wsgi_input().read())
self.uploader.handle_raw_input(self.request.META['wsgi.input'], self.request.META, length, self.boundary, "utf-8")
self.assertRaises(StopFutureHandlers, self.uploader.new_file, file_field_name, 'file_name', None, None)
self.assertRaises(EntityNotFoundError, self.uploader.file_complete, None)
def test_blob_key_creation(self):
file_field_name = 'field-file'
length = len(self._create_wsgi_input().read())
self.uploader.handle_raw_input(self.request.META['wsgi.input'], self.request.META, length, self.boundary, "utf-8")
self.assertRaises(
StopFutureHandlers,
self.uploader.new_file, file_field_name, 'file_name', None, None
)
self.assertIsNotNone(self.uploader.blobkey)
def test_blobstore_upload_url_templatetag(self):
template = """{% load storage %}{% blobstore_upload_url '/something/' %}"""
response = Template(template).render(Context({}))
self.assertTrue(response.startswith("http://localhost:8080/_ah/upload/"))
class ApplicationTests(TestCase):
@unittest.skipIf(webtest is NotImplemented, "pip install webtest to run functional tests")
def test_environ_is_patched_when_request_processed(self):
def application(environ, start_response):
# As we're not going through a thread pool the environ is unset.
# Set it up manually here.
# TODO: Find a way to get it to be auto-set by webtest
from google.appengine.runtime import request_environment
request_environment.current_request.environ = environ
# Check if the os.environ is the same as what we expect from our
# wsgi environ
import os
self.assertEqual(environ, os.environ)
start_response("200 OK", [])
return ["OK"]
djangae_app = DjangaeApplication(application)
test_app = webtest.TestApp(djangae_app)
old_environ = os.environ
try:
test_app.get("/")
finally:
os.environ = old_environ
class ComputedFieldModel(models.Model):
def computer(self):
return "%s_%s" % (self.int_field, self.char_field)
int_field = models.IntegerField()
char_field = models.CharField(max_length=50)
test_field = ComputedCharField(computer, max_length=50)
class ComputedFieldTests(TestCase):
def test_computed_field(self):
instance = ComputedFieldModel(int_field=1, char_field="test")
instance.save()
self.assertEqual(instance.test_field, "1_test")
# Try getting and saving the instance again
instance = ComputedFieldModel.objects.get(test_field="1_test")
instance.save()
class ModelWithCounter(models.Model):
counter = ShardedCounterField()
class ShardedCounterTest(TestCase):
def test_basic_usage(self):
instance = ModelWithCounter.objects.create()
self.assertEqual(0, instance.counter.value())
instance.counter.increment()
self.assertEqual(30, len(instance.counter))
self.assertEqual(30, CounterShard.objects.count())
self.assertEqual(1, instance.counter.value())
instance.counter.increment()
self.assertEqual(2, instance.counter.value())
instance.counter.decrement()
self.assertEqual(1, instance.counter.value())
instance.counter.decrement()
self.assertEqual(0, instance.counter.value())
instance.counter.decrement()
self.assertEqual(0, instance.counter.value())
class IterableFieldTests(TestCase):
def test_filtering_on_iterable_fields(self):
list1 = IterableFieldModel.objects.create(
list_field=['A', 'B', 'C', 'D', 'E', 'F', 'G'],
set_field=set(['A', 'B', 'C', 'D', 'E', 'F', 'G']))
list2 = IterableFieldModel.objects.create(
list_field=['A', 'B', 'C', 'H', 'I', 'J'],
set_field=set(['A', 'B', 'C', 'H', 'I', 'J']))
# filtering using exact lookup with ListField:
qry = IterableFieldModel.objects.filter(list_field='A')
self.assertEqual(sorted(x.pk for x in qry), sorted([list1.pk, list2.pk]))
qry = IterableFieldModel.objects.filter(list_field='H')
self.assertEqual(sorted(x.pk for x in qry), [list2.pk,])
# filtering using exact lookup with SetField:
qry = IterableFieldModel.objects.filter(set_field='A')
self.assertEqual(sorted(x.pk for x in qry), sorted([list1.pk, list2.pk]))
qry = IterableFieldModel.objects.filter(set_field='H')
self.assertEqual(sorted(x.pk for x in qry), [list2.pk,])
# filtering using in lookup with ListField:
qry = IterableFieldModel.objects.filter(list_field__in=['A', 'B', 'C'])
self.assertEqual(sorted(x.pk for x in qry), sorted([list1.pk, list2.pk,]))
qry = IterableFieldModel.objects.filter(list_field__in=['H', 'I', 'J'])
self.assertEqual(sorted(x.pk for x in qry), sorted([list2.pk,]))
# filtering using in lookup with SetField:
qry = IterableFieldModel.objects.filter(set_field__in=set(['A', 'B']))
self.assertEqual(sorted(x.pk for x in qry), sorted([list1.pk, list2.pk]))
qry = IterableFieldModel.objects.filter(set_field__in=set(['H']))
self.assertEqual(sorted(x.pk for x in qry), [list2.pk,])
def test_empty_iterable_fields(self):
""" Test that an empty set field always returns set(), not None """
instance = IterableFieldModel()
# When assigning
self.assertEqual(instance.set_field, set())
self.assertEqual(instance.list_field, [])
instance.save()
instance = IterableFieldModel.objects.get()
# When getting it from the db
self.assertEqual(instance.set_field, set())
self.assertEqual(instance.list_field, [])
def test_list_field(self):
instance = IterableFieldModel.objects.create()
self.assertEqual([], instance.list_field)
instance.list_field.append("One")
self.assertEqual(["One"], instance.list_field)
instance.save()
self.assertEqual(["One"], instance.list_field)
instance = IterableFieldModel.objects.get(pk=instance.pk)
self.assertEqual(["One"], instance.list_field)
instance.list_field = None
# Or anything else for that matter!
with self.assertRaises(ValueError):
instance.list_field = "Bananas"
instance.save()
results = IterableFieldModel.objects.filter(list_field="One")
self.assertEqual([instance], list(results))
def test_set_field(self):
instance = IterableFieldModel.objects.create()
self.assertEqual(set(), instance.set_field)
instance.set_field.add("One")
self.assertEqual(set(["One"]), instance.set_field)
instance.save()
self.assertEqual(set(["One"]), instance.set_field)
instance = IterableFieldModel.objects.get(pk=instance.pk)
self.assertEqual(set(["One"]), instance.set_field)
instance.set_field = None
# Or anything else for that matter!
with self.assertRaises(ValueError):
instance.set_field = "Bananas"
instance.save()
def test_empty_list_queryable_with_is_null(self):
instance = IterableFieldModel.objects.create()
self.assertTrue(IterableFieldModel.objects.filter(set_field__isnull=True).exists())
instance.set_field.add(1)
instance.save()
self.assertFalse(IterableFieldModel.objects.filter(set_field__isnull=True).exists())
self.assertTrue(IterableFieldModel.objects.filter(set_field__isnull=False).exists())
self.assertFalse(IterableFieldModel.objects.exclude(set_field__isnull=False).exists())
self.assertTrue(IterableFieldModel.objects.exclude(set_field__isnull=True).exists())
class InstanceSetFieldTests(TestCase):
def test_basic_usage(self):
main = ISModel.objects.create()
other = ISOther.objects.create(name="test")
other2 = ISOther.objects.create(name="test2")
main.related_things.add(other)
main.save()
self.assertEqual({other.pk}, main.related_things_ids)
self.assertEqual(list(ISOther.objects.filter(pk__in=main.related_things_ids)), list(main.related_things.all()))
self.assertEqual([main], list(other.ismodel_set.all()))
main.related_things.remove(other)
self.assertFalse(main.related_things_ids)
main.related_things = {other2}
self.assertEqual({other2.pk}, main.related_things_ids)
with self.assertRaises(AttributeError):
other.ismodel_set = {main}
without_reverse = RelationWithoutReverse.objects.create(name="test3")
self.assertFalse(hasattr(without_reverse, "ismodel_set"))
def test_save_and_load_empty(self):
"""
Create a main object with no related items,
get a copy of it back from the db and try to read items.
"""
main = ISModel.objects.create()
main_from_db = ISModel.objects.get(pk=main.pk)
# Fetch the container from the database and read its items
self.assertItemsEqual(main_from_db.related_things.all(), [])
def test_add_to_empty(self):
"""
Create a main object with no related items,
get a copy of it back from the db and try to add items.
"""
main = ISModel.objects.create()
main_from_db = ISModel.objects.get(pk=main.pk)
other = ISOther.objects.create()
main_from_db.related_things.add(other)
main_from_db.save()
def test_add_another(self):
"""
Create a main object with related items,
get a copy of it back from the db and try to add more.
"""
main = ISModel.objects.create()
other1 = ISOther.objects.create()
main.related_things.add(other1)
main.save()
main_from_db = ISModel.objects.get(pk=main.pk)
other2 = ISOther.objects.create()
main_from_db.related_things.add(other2)
main_from_db.save()
def test_deletion(self):
"""
Delete one of the objects referred to by the related field
"""
main = ISModel.objects.create()
other = ISOther.objects.create()
main.related_things.add(other)
main.save()
other.delete()
self.assertEqual(main.related_things.count(), 0)
class TestGenericRelationField(TestCase):
def test_basic_usage(self):
instance = GenericRelationModel.objects.create()
self.assertIsNone(instance.relation_to_content_type)
ct = ContentType.objects.create()
instance.relation_to_content_type = ct
instance.save()
self.assertTrue(instance.relation_to_content_type_id)
instance = GenericRelationModel.objects.get()
self.assertEqual(ct, instance.relation_to_content_type)
def test_overridden_dbtable(self):
instance = GenericRelationModel.objects.create()
self.assertIsNone(instance.relation_to_weird)
ct = ContentType.objects.create()
instance.relation_to_weird = ct
instance.save()
self.assertTrue(instance.relation_to_weird_id)
instance = GenericRelationModel.objects.get()
self.assertEqual(ct, instance.relation_to_weird)
class DatastorePaginatorTest(TestCase):
def setUp(self):
super(DatastorePaginatorTest, self).setUp()
for i in range(15):
PaginatorModel.objects.create(foo=i)
def test_basic_usage(self):
def qs():
return PaginatorModel.objects.all().order_by('foo')
p1 = paginator.DatastorePaginator(qs(), 5).page(1)
self.assertFalse(p1.has_previous())
self.assertTrue(p1.has_next())
self.assertEqual(p1.start_index(), 1)
self.assertEqual(p1.end_index(), 5)
self.assertEqual(p1.next_page_number(), 2)
self.assertEqual([x.foo for x in p1], [0, 1, 2, 3, 4])
p2 = paginator.DatastorePaginator(qs(), 5).page(2)
self.assertTrue(p2.has_previous())
self.assertTrue(p2.has_next())
self.assertEqual(p2.start_index(), 6)
self.assertEqual(p2.end_index(), 10)
self.assertEqual(p2.previous_page_number(), 1)
self.assertEqual(p2.next_page_number(), 3)
self.assertEqual([x.foo for x in p2], [5, 6, 7, 8, 9])
p3 = paginator.DatastorePaginator(qs(), 5).page(3)
self.assertTrue(p3.has_previous())
self.assertFalse(p3.has_next())
self.assertEqual(p3.start_index(), 11)
self.assertEqual(p3.end_index(), 15)
self.assertEqual(p3.previous_page_number(), 2)
self.assertEqual([x.foo for x in p3], [10, 11, 12, 13, 14])
def test_empty(self):
qs = PaginatorModel.objects.none()
p1 = paginator.DatastorePaginator(qs, 5).page(1)
self.assertFalse(p1.has_previous())
self.assertFalse(p1.has_next())
self.assertEqual(p1.start_index(), 0)
self.assertEqual(p1.end_index(), 0)
self.assertEqual([x for x in p1], [])
class TestSpecialIndexers(TestCase):
def setUp(self):
super(TestSpecialIndexers, self).setUp()
self.names = ['Ola', 'Adam', 'Luke', 'rob', 'Daniel', 'Ela', 'Olga', 'olek', 'ola', 'Olaaa', 'OlaaA']
for name in self.names:
SpecialIndexesModel.objects.create(name=name)
self.qry = SpecialIndexesModel.objects.all()
def test_iexact_lookup(self):
for name in self.names:
qry = self.qry.filter(name__iexact=name)
self.assertEqual(len(qry), len([x for x in self.names if x.lower() == name.lower()]))
def test_contains_lookup_and_icontains_lookup(self):
tests = self.names + ['o', 'O', 'la']
for name in tests:
qry = self.qry.filter(name__contains=name)
self.assertEqual(len(qry), len([x for x in self.names if name in x]))
qry = self.qry.filter(name__icontains=name)
self.assertEqual(len(qry), len([x for x in self.names if name.lower() in x.lower()]))
def test_endswith_lookup_and_iendswith_lookup(self):
tests = self.names + ['a', 'A', 'aa']
for name in tests:
qry = self.qry.filter(name__endswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.endswith(name)]))
qry = self.qry.filter(name__iendswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.lower().endswith(name.lower())]))
def test_startswith_lookup_and_istartswith_lookup(self):
tests = self.names + ['O', 'o', 'ola']
for name in tests:
qry = self.qry.filter(name__startswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.startswith(name)]))
qry = self.qry.filter(name__istartswith=name)
self.assertEqual(len(qry), len([x for x in self.names if x.lower().startswith(name.lower())]))
def deferred_func():
pass
class TestHelperTests(TestCase):
def test_inconsistent_db(self):
with inconsistent_db():
fruit = TestFruit.objects.create(name="banana")
self.assertEqual(0, TestFruit.objects.count()) # Inconsistent query
self.assertEqual(1, TestFruit.objects.filter(pk=fruit.pk).count()) #Consistent query
def test_processing_tasks(self):
self.assertNumTasksEquals(0) #No tasks
deferred.defer(deferred_func)
self.assertNumTasksEquals(1, queue_name='default')
deferred.defer(deferred_func, _queue='another')
self.assertNumTasksEquals(1, queue_name='another')
taskqueue.add(url='/')
self.assertNumTasksEquals(2, queue_name='default')
self.process_task_queues()
self.assertNumTasksEquals(0) #No tasks
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from proboscis import after_class
from proboscis import asserts
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import SkipTest
from proboscis import test
from sqlalchemy import exc as sqlalchemy_exc
from sqlalchemy.sql.expression import text
from troveclient.compat.exceptions import BadRequest
from troveclient.compat.exceptions import HTTPNotImplemented
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import EPHEMERAL_SUPPORT
from trove.tests.api.instances import GROUP as INSTANCE_GROUP
from trove.tests.api.instances import GROUP_START
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import VOLUME_SUPPORT
from trove.tests.config import CONFIG
import trove.tests.util as testsutil
from trove.tests.util.check import Checker
from trove.tests.util.check import TypeCheck
from trove.tests.util import LocalSqlClient
from trove.tests.util.server_connection import create_server_connection
GROUP = "dbaas.api.instances.actions"
GROUP_REBOOT = "dbaas.api.instances.actions.reboot"
GROUP_RESTART = "dbaas.api.instances.actions.restart"
GROUP_RESIZE = "dbaas.api.instances.actions.resize.instance"
GROUP_STOP_MYSQL = "dbaas.api.instances.actions.stop"
MYSQL_USERNAME = "test_user"
MYSQL_PASSWORD = "abcde"
# stored in test conf
SERVICE_ID = '123'
FAKE_MODE = CONFIG.fake_mode
# If true, then we will actually log into the database.
USE_IP = not FAKE_MODE
# If true, then we will actually search for the process
USE_LOCAL_OVZ = CONFIG.use_local_ovz
class MySqlConnection(object):
def __init__(self, host):
self.host = host
def connect(self):
"""Connect to MySQL database."""
print("Connecting to MySQL, mysql --host %s -u %s -p%s"
% (self.host, MYSQL_USERNAME, MYSQL_PASSWORD))
sql_engine = LocalSqlClient.init_engine(MYSQL_USERNAME, MYSQL_PASSWORD,
self.host)
self.client = LocalSqlClient(sql_engine, use_flush=False)
def is_connected(self):
try:
with self.client:
self.client.execute(text("""SELECT "Hello.";"""))
return True
except (sqlalchemy_exc.OperationalError,
sqlalchemy_exc.DisconnectionError,
sqlalchemy_exc.TimeoutError):
return False
except Exception as ex:
print("EX WAS:")
print(type(ex))
print(ex)
raise ex
TIME_OUT_TIME = 15 * 60
USER_WAS_DELETED = False
class ActionTestBase(object):
"""Has some helpful functions for testing actions.
The test user must be created for some of these functions to work.
"""
def set_up(self):
"""If you're using this as a base class, call this method first."""
self.dbaas = instance_info.dbaas
if USE_IP:
address = instance_info.get_address()
self.connection = MySqlConnection(address)
@property
def instance(self):
return self.dbaas.instances.get(self.instance_id)
@property
def instance_address(self):
return instance_info.get_address()
@property
def instance_id(self):
return instance_info.id
def create_user(self):
"""Create a MySQL user we can use for this test."""
users = [{"name": MYSQL_USERNAME, "password": MYSQL_PASSWORD,
"databases": [{"name": MYSQL_USERNAME}]}]
self.dbaas.users.create(instance_info.id, users)
def has_user():
users = self.dbaas.users.list(instance_info.id)
return any([user.name == MYSQL_USERNAME for user in users])
poll_until(has_user, time_out=30)
if not FAKE_MODE:
time.sleep(5)
def ensure_mysql_is_running(self):
"""Make sure MySQL is accessible before restarting."""
with Checker() as check:
if USE_IP:
self.connection.connect()
check.true(self.connection.is_connected(),
"Able to connect to MySQL.")
self.proc_id = self.find_mysql_proc_on_instance()
check.true(self.proc_id is not None,
"MySQL process can not be found.")
instance = self.instance
check.false(instance is None)
check.equal(instance.status, "ACTIVE")
def find_mysql_proc_on_instance(self):
server = create_server_connection(self.instance_id)
cmd = "ps acux | grep mysqld " \
"| grep -v mysqld_safe | awk '{print $2}'"
stdout, stderr = server.execute(cmd)
try:
return int(stdout)
except ValueError:
return None
def log_current_users(self):
users = self.dbaas.users.list(self.instance_id)
CONFIG.get_report().log("Current user count = %d" % len(users))
for user in users:
CONFIG.get_report().log("\t" + str(user))
def _build_expected_msg(self):
expected = {
'instance_size': instance_info.dbaas_flavor.ram,
'tenant_id': instance_info.user.tenant_id,
'instance_id': instance_info.id,
'instance_name': instance_info.name,
'created_at': testsutil.iso_time(
instance_info.initial_result.created),
'launched_at': testsutil.iso_time(self.instance.updated),
'modify_at': testsutil.iso_time(self.instance.updated)
}
return expected
@test(depends_on_groups=[GROUP_START])
def create_user():
"""Create a test user so that subsequent tests can log in."""
helper = ActionTestBase()
helper.set_up()
if USE_IP:
try:
helper.create_user()
except BadRequest:
pass # Ignore this if the user already exists.
helper.connection.connect()
asserts.assert_true(helper.connection.is_connected(),
"Test user must be able to connect to MySQL.")
class RebootTestBase(ActionTestBase):
"""Tests restarting MySQL."""
def call_reboot(self):
raise NotImplementedError()
def wait_for_broken_connection(self):
"""Wait until our connection breaks."""
if not USE_IP:
return
if not hasattr(self, "connection"):
return
poll_until(self.connection.is_connected,
lambda connected: not connected,
time_out=TIME_OUT_TIME)
def wait_for_successful_restart(self):
"""Wait until status becomes running."""
def is_finished_rebooting():
instance = self.instance
if instance.status == "REBOOT":
return False
asserts.assert_equal("ACTIVE", instance.status)
return True
poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME)
def assert_mysql_proc_is_different(self):
if not USE_IP:
return
new_proc_id = self.find_mysql_proc_on_instance()
asserts.assert_not_equal(new_proc_id, self.proc_id,
"MySQL process ID should be different!")
def successful_restart(self):
"""Restart MySQL via the REST API successfully."""
self.fix_mysql()
self.call_reboot()
self.wait_for_broken_connection()
self.wait_for_successful_restart()
self.assert_mysql_proc_is_different()
def mess_up_mysql(self):
"""Ruin MySQL's ability to restart."""
server = create_server_connection(self.instance_id)
cmd = "sudo cp /dev/null /var/lib/mysql/data/ib_logfile%d"
instance_info.dbaas_admin.management.stop(self.instance_id)
for index in range(2):
server.execute(cmd % index)
def fix_mysql(self):
"""Fix MySQL's ability to restart."""
if not FAKE_MODE:
server = create_server_connection(self.instance_id)
cmd = "sudo rm /var/lib/mysql/data/ib_logfile%d"
# We want to stop mysql so that upstart does not keep trying to
# respawn it and block the guest agent from accessing the logs.
instance_info.dbaas_admin.management.stop(self.instance_id)
for index in range(2):
server.execute(cmd % index)
def wait_for_failure_status(self):
"""Wait until status becomes running."""
def is_finished_rebooting():
instance = self.instance
if instance.status == "REBOOT" or instance.status == "ACTIVE":
return False
# The reason we check for BLOCKED as well as SHUTDOWN is because
# Upstart might try to bring mysql back up after the borked
# connection and the guest status can be either
asserts.assert_true(instance.status in ("SHUTDOWN", "BLOCKED"))
return True
poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME)
def unsuccessful_restart(self):
"""Restart MySQL via the REST when it should fail, assert it does."""
assert not FAKE_MODE
self.mess_up_mysql()
self.call_reboot()
self.wait_for_broken_connection()
self.wait_for_failure_status()
def restart_normally(self):
"""Fix iblogs and reboot normally."""
self.fix_mysql()
self.test_successful_restart()
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_RESTART],
depends_on_groups=[GROUP_START], depends_on=[create_user])
class RestartTests(RebootTestBase):
"""Tests restarting MySQL."""
def call_reboot(self):
self.instance.restart()
asserts.assert_equal(202, self.dbaas.last_http_code)
@before_class
def test_set_up(self):
self.set_up()
@test
def test_ensure_mysql_is_running(self):
"""Make sure MySQL is accessible before restarting."""
self.ensure_mysql_is_running()
@test(depends_on=[test_ensure_mysql_is_running], enabled=not FAKE_MODE)
def test_unsuccessful_restart(self):
"""Restart MySQL via the REST when it should fail, assert it does."""
if FAKE_MODE:
raise SkipTest("Cannot run this in fake mode.")
self.unsuccessful_restart()
@test(depends_on=[test_set_up],
runs_after=[test_ensure_mysql_is_running, test_unsuccessful_restart])
def test_successful_restart(self):
"""Restart MySQL via the REST API successfully."""
self.successful_restart()
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_STOP_MYSQL],
depends_on_groups=[GROUP_START], depends_on=[create_user])
class StopTests(RebootTestBase):
"""Tests which involve stopping MySQL."""
def call_reboot(self):
self.instance.restart()
@before_class
def test_set_up(self):
self.set_up()
@test
def test_ensure_mysql_is_running(self):
"""Make sure MySQL is accessible before restarting."""
self.ensure_mysql_is_running()
@test(depends_on=[test_ensure_mysql_is_running])
def test_stop_mysql(self):
"""Stops MySQL."""
instance_info.dbaas_admin.management.stop(self.instance_id)
self.wait_for_broken_connection()
self.wait_for_failure_status()
@test(depends_on=[test_stop_mysql])
def test_instance_get_shows_volume_info_while_mysql_is_down(self):
"""
Confirms the get call behaves appropriately while an instance is
down.
"""
if not VOLUME_SUPPORT:
raise SkipTest("Not testing volumes.")
instance = self.dbaas.instances.get(self.instance_id)
with TypeCheck("instance", instance) as check:
check.has_field("volume", dict)
check.true('size' in instance.volume)
check.true('used' in instance.volume)
check.true(isinstance(instance.volume.get('size', None), int))
check.true(isinstance(instance.volume.get('used', None), float))
@test(depends_on=[test_set_up],
runs_after=[test_instance_get_shows_volume_info_while_mysql_is_down])
def test_successful_restart_when_in_shutdown_state(self):
"""Restart MySQL via the REST API successfully when MySQL is down."""
self.successful_restart()
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_REBOOT],
depends_on_groups=[GROUP_START], depends_on=[RestartTests, create_user])
class RebootTests(RebootTestBase):
"""Tests restarting instance."""
def call_reboot(self):
instance_info.dbaas_admin.management.reboot(self.instance_id)
@before_class
def test_set_up(self):
self.set_up()
asserts.assert_true(hasattr(self, 'dbaas'))
asserts.assert_true(self.dbaas is not None)
@test
def test_ensure_mysql_is_running(self):
"""Make sure MySQL is accessible before restarting."""
self.ensure_mysql_is_running()
@test(depends_on=[test_ensure_mysql_is_running])
def test_unsuccessful_restart(self):
"""Restart MySQL via the REST when it should fail, assert it does."""
if FAKE_MODE:
raise SkipTest("Cannot run this in fake mode.")
self.unsuccessful_restart()
@after_class(depends_on=[test_set_up])
def test_successful_restart(self):
"""Restart MySQL via the REST API successfully."""
if FAKE_MODE:
raise SkipTest("Cannot run this in fake mode.")
self.successful_restart()
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP,
GROUP_RESIZE],
depends_on_groups=[GROUP_START], depends_on=[create_user],
runs_after=[RebootTests])
class ResizeInstanceTest(ActionTestBase):
"""
Integration Test cases for resize instance
"""
@property
def flavor_id(self):
return instance_info.dbaas_flavor_href
def get_flavor_href(self, flavor_id=2):
res = instance_info.dbaas.find_flavor_and_self_href(flavor_id)
dbaas_flavor, dbaas_flavor_href = res
return dbaas_flavor_href
def wait_for_resize(self):
def is_finished_resizing():
instance = self.instance
if instance.status == "RESIZE":
return False
asserts.assert_equal("ACTIVE", instance.status)
return True
poll_until(is_finished_resizing, time_out=TIME_OUT_TIME)
@before_class
def setup(self):
self.set_up()
if USE_IP:
self.connection.connect()
asserts.assert_true(self.connection.is_connected(),
"Should be able to connect before resize.")
self.user_was_deleted = False
@test
def test_instance_resize_same_size_should_fail(self):
asserts.assert_raises(BadRequest, self.dbaas.instances.resize_instance,
self.instance_id, self.flavor_id)
@test(enabled=VOLUME_SUPPORT)
def test_instance_resize_to_ephemeral_in_volume_support_should_fail(self):
flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name',
'eph.rd-smaller')
flavors = self.dbaas.find_flavors_by_name(flavor_name)
def is_active():
return self.instance.status == 'ACTIVE'
poll_until(is_active, time_out=TIME_OUT_TIME)
asserts.assert_equal(self.instance.status, 'ACTIVE')
self.get_flavor_href(
flavor_id=self.expected_old_flavor_id)
asserts.assert_raises(HTTPNotImplemented,
self.dbaas.instances.resize_instance,
self.instance_id, flavors[0].id)
@test(enabled=EPHEMERAL_SUPPORT)
def test_instance_resize_to_non_ephemeral_flavor_should_fail(self):
flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
'm1-small')
flavors = self.dbaas.find_flavors_by_name(flavor_name)
asserts.assert_raises(BadRequest, self.dbaas.instances.resize_instance,
self.instance_id, flavors[0].id)
def obtain_flavor_ids(self):
old_id = self.instance.flavor['id']
self.expected_old_flavor_id = old_id
res = instance_info.dbaas.find_flavor_and_self_href(old_id)
self.expected_dbaas_flavor, _dontcare_ = res
if EPHEMERAL_SUPPORT:
flavor_name = CONFIG.values.get('instance_bigger_eph_flavor_name',
'eph.rd-smaller')
else:
flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
'm1.small')
flavors = self.dbaas.find_flavors_by_name(flavor_name)
asserts.assert_equal(len(flavors), 1,
"Number of flavors with name '%s' "
"found was '%d'." % (flavor_name,
len(flavors)))
flavor = flavors[0]
self.old_dbaas_flavor = instance_info.dbaas_flavor
instance_info.dbaas_flavor = flavor
asserts.assert_true(flavor is not None,
"Flavor '%s' not found!" % flavor_name)
flavor_href = self.dbaas.find_flavor_self_href(flavor)
asserts.assert_true(flavor_href is not None,
"Flavor href '%s' not found!" % flavor_name)
self.expected_new_flavor_id = flavor.id
@test(depends_on=[test_instance_resize_same_size_should_fail])
def test_status_changed_to_resize(self):
self.log_current_users()
self.obtain_flavor_ids()
self.dbaas.instances.resize_instance(
self.instance_id,
self.get_flavor_href(flavor_id=self.expected_new_flavor_id))
asserts.assert_equal(202, self.dbaas.last_http_code)
# (WARNING) IF THE RESIZE IS WAY TOO FAST THIS WILL FAIL
assert_unprocessable(
self.dbaas.instances.resize_instance,
self.instance_id,
self.get_flavor_href(flavor_id=self.expected_new_flavor_id))
@test(depends_on=[test_status_changed_to_resize])
@time_out(TIME_OUT_TIME)
def test_instance_returns_to_active_after_resize(self):
self.wait_for_resize()
@test(depends_on=[test_instance_returns_to_active_after_resize,
test_status_changed_to_resize],
groups=["dbaas.usage"])
def test_resize_instance_usage_event_sent(self):
expected = self._build_expected_msg()
expected['old_instance_size'] = self.old_dbaas_flavor.ram
instance_info.consumer.check_message(instance_info.id,
'trove.instance.modify_flavor',
**expected)
@test(depends_on=[test_instance_returns_to_active_after_resize],
runs_after=[test_resize_instance_usage_event_sent])
def resize_should_not_delete_users(self):
"""Resize should not delete users."""
# Resize has an incredibly weird bug where users are deleted after
# a resize. The code below is an attempt to catch this while proceeding
# with the rest of the test (note the use of runs_after).
if USE_IP:
self.connection.connect()
if not self.connection.is_connected():
# Ok, this is def. a failure, but before we toss up an error
# lets recreate to see how far we can get.
CONFIG.get_report().log(
"Having to recreate the test_user! Resizing killed it!")
self.log_current_users()
self.create_user()
asserts.fail(
"Somehow, the resize made the test user disappear.")
@test(depends_on=[test_instance_returns_to_active_after_resize],
runs_after=[resize_should_not_delete_users])
def test_make_sure_mysql_is_running_after_resize(self):
self.ensure_mysql_is_running()
@test(depends_on=[test_instance_returns_to_active_after_resize],
runs_after=[test_make_sure_mysql_is_running_after_resize])
def test_instance_has_new_flavor_after_resize(self):
actual = self.get_flavor_href(self.instance.flavor['id'])
expected = self.get_flavor_href(flavor_id=self.expected_new_flavor_id)
asserts.assert_equal(actual, expected)
@test(depends_on=[test_instance_has_new_flavor_after_resize])
@time_out(TIME_OUT_TIME)
def test_resize_down(self):
expected_dbaas_flavor = self.expected_dbaas_flavor
def is_active():
return self.instance.status == 'ACTIVE'
poll_until(is_active, time_out=TIME_OUT_TIME)
asserts.assert_equal(self.instance.status, 'ACTIVE')
old_flavor_href = self.get_flavor_href(
flavor_id=self.expected_old_flavor_id)
self.dbaas.instances.resize_instance(self.instance_id, old_flavor_href)
asserts.assert_equal(202, self.dbaas.last_http_code)
self.old_dbaas_flavor = instance_info.dbaas_flavor
instance_info.dbaas_flavor = expected_dbaas_flavor
self.wait_for_resize()
asserts.assert_equal(str(self.instance.flavor['id']),
str(self.expected_old_flavor_id))
@test(depends_on=[test_resize_down],
groups=["dbaas.usage"])
def test_resize_instance_down_usage_event_sent(self):
expected = self._build_expected_msg()
expected['old_instance_size'] = self.old_dbaas_flavor.ram
instance_info.consumer.check_message(instance_info.id,
'trove.instance.modify_flavor',
**expected)
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP,
GROUP + ".resize.instance"],
depends_on_groups=[GROUP_START], depends_on=[create_user],
runs_after=[RebootTests, ResizeInstanceTest])
def resize_should_not_delete_users():
if USER_WAS_DELETED:
asserts.fail("Somehow, the resize made the test user disappear.")
@test(runs_after=[ResizeInstanceTest], depends_on=[create_user],
groups=[GROUP, tests.INSTANCES, INSTANCE_GROUP, GROUP_RESIZE],
enabled=VOLUME_SUPPORT)
class ResizeInstanceVolume(ActionTestBase):
"""Resize the volume of the instance."""
@before_class
def setUp(self):
self.set_up()
self.old_volume_size = int(instance_info.volume['size'])
self.new_volume_size = self.old_volume_size + 1
self.old_volume_fs_size = instance_info.get_volume_filesystem_size()
# Create some databases to check they still exist after the resize
self.expected_dbs = ['salmon', 'halibut']
databases = []
for name in self.expected_dbs:
databases.append({"name": name})
instance_info.dbaas.databases.create(instance_info.id, databases)
@test
@time_out(60)
def test_volume_resize(self):
instance_info.dbaas.instances.resize_volume(instance_info.id,
self.new_volume_size)
@test(depends_on=[test_volume_resize])
@time_out(300)
def test_volume_resize_success(self):
def check_resize_status():
instance = instance_info.dbaas.instances.get(instance_info.id)
if instance.status == "ACTIVE":
return True
elif instance.status == "RESIZE":
return False
else:
asserts.fail("Status should not be %s" % instance.status)
poll_until(check_resize_status, sleep_time=2, time_out=300)
instance = instance_info.dbaas.instances.get(instance_info.id)
asserts.assert_equal(instance.volume['size'], self.new_volume_size)
@test(depends_on=[test_volume_resize_success])
def test_volume_filesystem_resize_success(self):
# The get_volume_filesystem_size is a mgmt call through the guestagent
# and the volume resize occurs through the fake nova-volume.
# Currently the guestagent fakes don't have access to the nova fakes so
# it doesn't know that a volume resize happened and to what size so
# we can't fake the filesystem size.
if FAKE_MODE:
raise SkipTest("Cannot run this in fake mode.")
new_volume_fs_size = instance_info.get_volume_filesystem_size()
asserts.assert_true(self.old_volume_fs_size < new_volume_fs_size)
# The total filesystem size is not going to be exactly the same size of
# cinder volume but it should round to it. (e.g. round(1.9) == 2)
asserts.assert_equal(round(new_volume_fs_size), self.new_volume_size)
@test(depends_on=[test_volume_resize_success], groups=["dbaas.usage"])
def test_resize_volume_usage_event_sent(self):
expected = self._build_expected_msg()
expected['volume_size'] = self.new_volume_size
expected['old_volume_size'] = self.old_volume_size
instance_info.consumer.check_message(instance_info.id,
'trove.instance.modify_volume',
**expected)
@test
@time_out(300)
def test_volume_resize_success_databases(self):
databases = instance_info.dbaas.databases.list(instance_info.id)
db_list = []
for database in databases:
db_list.append(database.name)
for name in self.expected_dbs:
if name not in db_list:
asserts.fail(
"Database %s was not found after the volume resize. "
"Returned list: %s" % (name, databases))
# This tests the ability of the guest to upgrade itself.
# It is necessarily tricky because we need to be able to upload a new copy of
# the guest into an apt-repo in the middle of the test.
# "guest-update-test" is where the knowledge of how to do this is set in the
# test conf. If it is not specified this test never runs.
UPDATE_GUEST_CONF = CONFIG.values.get("guest-update-test", None)
@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP + ".update_guest"],
depends_on=[create_user],
depends_on_groups=[GROUP_START])
class UpdateGuest(object):
def get_version(self):
info = instance_info.dbaas_admin.diagnostics.get(instance_info.id)
return info.version
@before_class(enabled=UPDATE_GUEST_CONF is not None)
def check_version_is_old(self):
"""Make sure we have the old version before proceeding."""
self.old_version = self.get_version()
self.next_version = UPDATE_GUEST_CONF["next-version"]
asserts.assert_not_equal(self.old_version, self.next_version)
@test(enabled=UPDATE_GUEST_CONF is not None)
def upload_update_to_repo(self):
cmds = UPDATE_GUEST_CONF["install-repo-cmd"]
testsutil.execute(*cmds, run_as_root=True, root_helper="sudo")
@test(enabled=UPDATE_GUEST_CONF is not None,
depends_on=[upload_update_to_repo])
def update_and_wait_to_finish(self):
instance_info.dbaas_admin.management.update(instance_info.id)
def finished():
current_version = self.get_version()
if current_version == self.next_version:
return True
# The only valid thing for it to be aside from next_version is
# old version.
asserts.assert_equal(current_version, self.old_version)
poll_until(finished, sleep_time=1, time_out=3 * 60)
@test(enabled=UPDATE_GUEST_CONF is not None,
depends_on=[upload_update_to_repo])
@time_out(30)
def update_again(self):
"""Test the wait time of a pointless update."""
instance_info.dbaas_admin.management.update(instance_info.id)
# Make sure this isn't taking too long.
instance_info.dbaas_admin.diagnostics.get(instance_info.id)
|
|
import asyncio
import collections
import numpy as np
import os
import pytest
import signal
import sys
import time
import ray
import ray.test_utils
import ray.cluster_utils
from ray.test_utils import (
wait_for_condition,
wait_for_pid_to_exit,
generate_system_config_map,
get_other_nodes,
new_scheduler_enabled,
SignalActor,
)
SIGKILL = signal.SIGKILL if sys.platform != "win32" else signal.SIGTERM
@pytest.fixture
def ray_init_with_task_retry_delay():
address = ray.init(_system_config={"task_retry_delay_ms": 100})
yield address
ray.shutdown()
@pytest.mark.parametrize(
"ray_start_regular", [{
"object_store_memory": 150 * 1024 * 1024,
}],
indirect=True)
def test_actor_spilled(ray_start_regular):
object_store_memory = 150 * 1024 * 1024
@ray.remote
class Actor:
def __init__(self):
pass
def create_object(self, size):
return np.random.rand(size)
a = Actor.remote()
# Submit enough methods on the actor so that they exceed the size of the
# object store.
objects = []
num_objects = 40
for _ in range(num_objects):
obj = a.create_object.remote(object_store_memory // num_objects)
objects.append(obj)
# Get each object once to make sure each object gets created.
ray.get(obj)
# Get each object again. At this point, the earlier objects should have
# been spilled.
num_success = 0
for obj in objects:
val = ray.get(obj)
assert isinstance(val, np.ndarray), val
num_success += 1
# All of objects should've been spilled, so all of them should succeed.
assert num_success == len(objects)
@pytest.mark.skipif(sys.platform == "win32", reason="Very flaky on Windows.")
def test_actor_restart(ray_init_with_task_retry_delay):
"""Test actor restart when actor process is killed."""
@ray.remote(max_restarts=1)
class RestartableActor:
"""An actor that will be restarted at most once."""
def __init__(self):
self.value = 0
def increase(self, exit=False):
if exit:
os._exit(-1)
self.value += 1
return self.value
def get_pid(self):
return os.getpid()
actor = RestartableActor.remote()
# Submit some tasks and kill on a task midway through.
results = [actor.increase.remote(exit=(i == 100)) for i in range(200)]
# Make sure that all tasks were executed in order before the actor's death.
i = 1
while results:
res = results[0]
try:
r = ray.get(res)
if r != i:
# Actor restarted at this task without any failed tasks in
# between.
break
results.pop(0)
i += 1
except ray.exceptions.RayActorError:
break
# Skip any tasks that errored.
while results:
try:
ray.get(results[0])
except ray.exceptions.RayActorError:
results.pop(0)
else:
break
# Check all tasks that executed after the restart.
if results:
# The actor executed some tasks after the restart.
i = 1
while results:
r = ray.get(results.pop(0))
assert r == i
i += 1
# Check that we can still call the actor.
result = actor.increase.remote()
assert ray.get(result) == r + 1
else:
# Wait for the actor to restart.
def ping():
try:
ray.get(actor.increase.remote())
return True
except ray.exceptions.RayActorError:
return False
wait_for_condition(ping)
# The actor has restarted. Kill actor process one more time.
actor.increase.remote(exit=True)
# The actor has exceeded max restarts. All tasks should fail.
for _ in range(100):
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
# Create another actor.
actor = RestartableActor.remote()
# Intentionlly exit the actor
actor.__ray_terminate__.remote()
# Check that the actor won't be restarted.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
def test_actor_restart_with_retry(ray_init_with_task_retry_delay):
"""Test actor restart when actor process is killed."""
@ray.remote(max_restarts=1, max_task_retries=-1)
class RestartableActor:
"""An actor that will be restarted at most once."""
def __init__(self):
self.value = 0
def increase(self, delay=0):
time.sleep(delay)
self.value += 1
return self.value
def get_pid(self):
return os.getpid()
actor = RestartableActor.remote()
pid = ray.get(actor.get_pid.remote())
results = [actor.increase.remote() for _ in range(100)]
# Kill actor process, while the above task is still being executed.
os.kill(pid, SIGKILL)
wait_for_pid_to_exit(pid)
# Check that none of the tasks failed and the actor is restarted.
seq = list(range(1, 101))
results = ray.get(results)
failed_task_index = None
# Make sure that all tasks were executed in order before and after the
# actor's death.
for i, res in enumerate(results):
if res != seq[0]:
if failed_task_index is None:
failed_task_index = i
assert res + failed_task_index == seq[0]
seq.pop(0)
# Check that we can still call the actor.
result = actor.increase.remote()
assert ray.get(result) == results[-1] + 1
# kill actor process one more time.
results = [actor.increase.remote() for _ in range(100)]
pid = ray.get(actor.get_pid.remote())
os.kill(pid, SIGKILL)
wait_for_pid_to_exit(pid)
# The actor has exceeded max restarts, and this task should fail.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
# Create another actor.
actor = RestartableActor.remote()
# Intentionlly exit the actor
actor.__ray_terminate__.remote()
# Check that the actor won't be restarted.
with pytest.raises(ray.exceptions.RayActorError):
ray.get(actor.increase.remote())
def test_named_actor_max_task_retries(ray_init_with_task_retry_delay):
@ray.remote(num_cpus=0)
class Counter:
def __init__(self):
self.count = 0
self.event = asyncio.Event()
def increment(self):
self.count += 1
self.event.set()
async def wait_for_count(self, count):
while True:
if self.count >= count:
return
await self.event.wait()
self.event.clear()
@ray.remote
class ActorToKill:
def __init__(self, counter):
counter.increment.remote()
def run(self, counter, signal):
counter.increment.remote()
ray.get(signal.wait.remote())
@ray.remote
class CallingActor:
def __init__(self):
self.actor = ray.get_actor("a")
def call_other(self, counter, signal):
return ray.get(self.actor.run.remote(counter, signal))
init_counter = Counter.remote()
run_counter = Counter.remote()
signal = SignalActor.remote()
# Start the two actors, wait for ActorToKill's constructor to run.
a = ActorToKill.options(
name="a", max_restarts=-1, max_task_retries=-1).remote(init_counter)
c = CallingActor.remote()
ray.get(init_counter.wait_for_count.remote(1), timeout=30)
# Signal the CallingActor to call ActorToKill, wait for it to be running,
# then kill ActorToKill.
# Verify that this causes ActorToKill's constructor to run a second time
# and the run method to begin a second time.
ref = c.call_other.remote(run_counter, signal)
ray.get(run_counter.wait_for_count.remote(1), timeout=30)
ray.kill(a, no_restart=False)
ray.get(init_counter.wait_for_count.remote(2), timeout=30)
ray.get(run_counter.wait_for_count.remote(2), timeout=30)
# Signal the run method to finish, verify that the CallingActor returns.
signal.send.remote()
ray.get(ref, timeout=30)
def test_actor_restart_on_node_failure(ray_start_cluster):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
"object_timeout_milliseconds": 1000,
"task_retry_delay_ms": 100,
}
cluster = ray_start_cluster
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
# Node to place the actor.
actor_node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
@ray.remote(num_cpus=1, max_restarts=1, max_task_retries=-1)
class RestartableActor:
"""An actor that will be reconstructed at most once."""
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
return self.value
def ready(self):
return
actor = RestartableActor.options(lifetime="detached").remote()
ray.get(actor.ready.remote())
results = [actor.increase.remote() for _ in range(100)]
# Kill actor node, while the above task is still being executed.
cluster.remove_node(actor_node)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
# Check that none of the tasks failed and the actor is restarted.
seq = list(range(1, 101))
results = ray.get(results)
failed_task_index = None
# Make sure that all tasks were executed in order before and after the
# actor's death.
for i, res in enumerate(results):
elm = seq.pop(0)
if res != elm:
if failed_task_index is None:
failed_task_index = i
assert res + failed_task_index == elm
# Check that we can still call the actor.
result = ray.get(actor.increase.remote())
assert result == 1 or result == results[-1] + 1
@pytest.mark.skipif(new_scheduler_enabled(), reason="dynamic resources todo")
def test_actor_restart_without_task(ray_start_regular):
"""Test a dead actor can be restarted without sending task to it."""
@ray.remote(max_restarts=1, resources={"actor": 1})
class RestartableActor:
def __init__(self):
pass
def get_pid(self):
return os.getpid()
@ray.remote(resources={"actor": 1})
def probe():
return
# Returns whether the "actor" resource is available.
def actor_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
ray.experimental.set_resource("actor", 1)
actor = RestartableActor.remote()
wait_for_condition(lambda: not actor_resource_available())
# Kill the actor.
pid = ray.get(actor.get_pid.remote())
p = probe.remote()
os.kill(pid, SIGKILL)
ray.get(p)
wait_for_condition(lambda: not actor_resource_available())
def test_caller_actor_restart(ray_start_regular):
"""Test tasks from a restarted actor can be correctly processed
by the receiving actor."""
@ray.remote(max_restarts=1)
class RestartableActor:
"""An actor that will be restarted at most once."""
def __init__(self, actor):
self.actor = actor
def increase(self):
return ray.get(self.actor.increase.remote())
def get_pid(self):
return os.getpid()
@ray.remote(max_restarts=1)
class Actor:
"""An actor that will be restarted at most once."""
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
return self.value
remote_actor = Actor.remote()
actor = RestartableActor.remote(remote_actor)
# Call increase 3 times
for _ in range(3):
ray.get(actor.increase.remote())
# kill the actor.
# TODO(zhijunfu): use ray.kill instead.
kill_actor(actor)
# Check that we can still call the actor.
assert ray.get(actor.increase.remote()) == 4
def test_caller_task_reconstruction(ray_start_regular):
"""Test a retried task from a dead worker can be correctly processed
by the receiving actor."""
@ray.remote(max_retries=5)
def RetryableTask(actor):
value = ray.get(actor.increase.remote())
if value > 2:
return value
else:
os._exit(0)
@ray.remote(max_restarts=1)
class Actor:
"""An actor that will be restarted at most once."""
def __init__(self):
self.value = 0
def increase(self):
self.value += 1
return self.value
remote_actor = Actor.remote()
assert ray.get(RetryableTask.remote(remote_actor)) == 3
@pytest.mark.skipif(sys.platform == "win32", reason="Very flaky on Windows.")
# NOTE(hchen): we set object_timeout_milliseconds to 1s for
# this test. Because if this value is too small, suprious task reconstruction
# may happen and cause the test fauilure. If the value is too large, this test
# could be very slow. We can remove this once we support dynamic timeout.
@pytest.mark.parametrize(
"ray_start_cluster_head", [
generate_system_config_map(
object_timeout_milliseconds=1000, num_heartbeats_timeout=10)
],
indirect=True)
def test_multiple_actor_restart(ray_start_cluster_head):
cluster = ray_start_cluster_head
# This test can be made more stressful by increasing the numbers below.
# The total number of actors created will be
# num_actors_at_a_time * num_nodes.
num_nodes = 5
num_actors_at_a_time = 3
num_function_calls_at_a_time = 10
worker_nodes = [cluster.add_node(num_cpus=3) for _ in range(num_nodes)]
@ray.remote(max_restarts=-1, max_task_retries=-1)
class SlowCounter:
def __init__(self):
self.x = 0
def inc(self, duration):
time.sleep(duration)
self.x += 1
return self.x
# Create some initial actors.
actors = [SlowCounter.remote() for _ in range(num_actors_at_a_time)]
# Wait for the actors to start up.
time.sleep(1)
# This is a mapping from actor handles to object refs returned by
# methods on that actor.
result_ids = collections.defaultdict(lambda: [])
# In a loop we are going to create some actors, run some methods, kill
# a raylet, and run some more methods.
for node in worker_nodes:
# Create some actors.
actors.extend(
[SlowCounter.remote() for _ in range(num_actors_at_a_time)])
# Run some methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(actor.inc.remote(j**2 * 0.000001))
# Kill a node.
cluster.remove_node(node)
# Run some more methods.
for j in range(len(actors)):
actor = actors[j]
for _ in range(num_function_calls_at_a_time):
result_ids[actor].append(actor.inc.remote(j**2 * 0.000001))
# Get the results and check that they have the correct values.
for _, result_id_list in result_ids.items():
results = ray.get(result_id_list)
for i, result in enumerate(results):
if i == 0:
assert result == 1
else:
assert result == results[i - 1] + 1 or result == 1
def kill_actor(actor):
"""A helper function that kills an actor process."""
pid = ray.get(actor.get_pid.remote())
os.kill(pid, SIGKILL)
wait_for_pid_to_exit(pid)
def test_decorated_method(ray_start_regular):
def method_invocation_decorator(f):
def new_f_invocation(args, kwargs):
# Split one argument into two. Return th kwargs without passing
# them into the actor.
return f([args[0], args[0]], {}), kwargs
return new_f_invocation
def method_execution_decorator(f):
def new_f_execution(self, b, c):
# Turn two arguments into one.
return f(self, b + c)
new_f_execution.__ray_invocation_decorator__ = (
method_invocation_decorator)
return new_f_execution
@ray.remote
class Actor:
@method_execution_decorator
def decorated_method(self, x):
return x + 1
a = Actor.remote()
object_ref, extra = a.decorated_method.remote(3, kwarg=3)
assert isinstance(object_ref, ray.ObjectRef)
assert extra == {"kwarg": 3}
assert ray.get(object_ref) == 7 # 2 * 3 + 1
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 1,
"num_nodes": 3,
}], indirect=True)
@pytest.mark.skipif(new_scheduler_enabled(), reason="dynamic resources todo")
def test_ray_wait_dead_actor(ray_start_cluster):
"""Tests that methods completed by dead actors are returned as ready"""
cluster = ray_start_cluster
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
pass
def node_id(self):
return ray.worker.global_worker.node.unique_id
def ping(self):
time.sleep(1)
# Create some actors and wait for them to initialize.
num_nodes = len(cluster.list_all_nodes())
actors = [Actor.remote() for _ in range(num_nodes)]
ray.get([actor.ping.remote() for actor in actors])
def actor_dead():
# Ping the actors and make sure the tasks complete.
ping_ids = [actor.ping.remote() for actor in actors]
unready = ping_ids[:]
while unready:
_, unready = ray.wait(unready, timeout=0)
time.sleep(1)
try:
ray.get(ping_ids)
return False
except ray.exceptions.RayActorError:
return True
# Kill a node that must not be driver node or head node.
cluster.remove_node(get_other_nodes(cluster, exclude_head=True)[-1])
# Repeatedly submit tasks and call ray.wait until the exception for the
# dead actor is received.
wait_for_condition(actor_dead)
# Create an actor on the local node that will call ray.wait in a loop.
head_node_resource = "HEAD_NODE"
ray.experimental.set_resource(head_node_resource, 1)
@ray.remote(num_cpus=0, resources={head_node_resource: 1})
class ParentActor:
def __init__(self):
pass
def wait(self):
return actor_dead()
def ping(self):
return
# Repeatedly call ray.wait through the local actor until the exception for
# the dead actor is received.
parent_actor = ParentActor.remote()
wait_for_condition(lambda: ray.get(parent_actor.wait.remote()))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 1,
"num_nodes": 1,
}], indirect=True)
def test_actor_owner_worker_dies_before_dependency_ready(ray_start_cluster):
"""Test actor owner worker dies before local dependencies are resolved.
This test verifies the scenario where owner worker
has failed before actor dependencies are resolved.
Reference: https://github.com/ray-project/ray/pull/8045
"""
@ray.remote
class Actor:
def __init__(self, dependency):
print("actor: {}".format(os.getpid()))
self.dependency = dependency
def f(self):
return self.dependency
@ray.remote
class Owner:
def get_pid(self):
return os.getpid()
def create_actor(self, caller_handle):
s = SignalActor.remote()
# Create an actor which depends on an object that can never be
# resolved.
actor_handle = Actor.remote(s.wait.remote())
pid = os.getpid()
signal_handle = SignalActor.remote()
caller_handle.call.remote(pid, signal_handle, actor_handle)
# Wait until the `Caller` start executing the remote `call` method.
ray.get(signal_handle.wait.remote())
# exit
os._exit(0)
@ray.remote
class Caller:
def call(self, owner_pid, signal_handle, actor_handle):
# Notify the `Owner` that the `Caller` is executing the remote
# `call` method.
ray.get(signal_handle.send.remote())
# Wait for the `Owner` to exit.
wait_for_pid_to_exit(owner_pid)
oid = actor_handle.f.remote()
# It will hang without location resolution protocol.
ray.get(oid)
def hang(self):
return True
owner = Owner.remote()
owner_pid = ray.get(owner.get_pid.remote())
caller = Caller.remote()
owner.create_actor.remote(caller)
# Wait for the `Owner` to exit.
wait_for_pid_to_exit(owner_pid)
# It will hang here if location is not properly resolved.
wait_for_condition(lambda: ray.get(caller.hang.remote()))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_cpus": 3,
"num_nodes": 1,
}], indirect=True)
def test_actor_owner_node_dies_before_dependency_ready(ray_start_cluster):
"""Test actor owner node dies before local dependencies are resolved.
This test verifies the scenario where owner node
has failed before actor dependencies are resolved.
Reference: https://github.com/ray-project/ray/pull/8045
"""
@ray.remote
class Actor:
def __init__(self, dependency):
print("actor: {}".format(os.getpid()))
self.dependency = dependency
def f(self):
return self.dependency
# Make sure it is scheduled in the second node.
@ray.remote(resources={"node": 1})
class Owner:
def get_pid(self):
return os.getpid()
def create_actor(self, caller_handle):
s = SignalActor.remote()
# Create an actor which depends on an object that can never be
# resolved.
actor_handle = Actor.remote(s.wait.remote())
pid = os.getpid()
signal_handle = SignalActor.remote()
caller_handle.call.remote(pid, signal_handle, actor_handle)
# Wait until the `Caller` start executing the remote `call` method.
ray.get(signal_handle.wait.remote())
@ray.remote(resources={"caller": 1})
class Caller:
def call(self, owner_pid, signal_handle, actor_handle):
# Notify the `Owner` that the `Caller` is executing the remote
# `call` method.
ray.get(signal_handle.send.remote())
# Wait for the `Owner` to exit.
wait_for_pid_to_exit(owner_pid)
oid = actor_handle.f.remote()
# It will hang without location resolution protocol.
ray.get(oid)
def hang(self):
return True
cluster = ray_start_cluster
node_to_be_broken = cluster.add_node(resources={"node": 1})
cluster.add_node(resources={"caller": 1})
owner = Owner.remote()
owner_pid = ray.get(owner.get_pid.remote())
caller = Caller.remote()
ray.get(owner.create_actor.remote(caller))
cluster.remove_node(node_to_be_broken)
wait_for_pid_to_exit(owner_pid)
# It will hang here if location is not properly resolved.
wait_for_condition(lambda: ray.get(caller.hang.remote()))
def test_recreate_child_actor(ray_start_cluster):
@ray.remote
class Actor:
def __init__(self):
pass
def ready(self):
return
@ray.remote(max_restarts=-1, max_task_retries=-1)
class Parent:
def __init__(self):
self.child = Actor.remote()
def ready(self):
return ray.get(self.child.ready.remote())
def pid(self):
return os.getpid()
ray.init(address=ray_start_cluster.address)
p = Parent.remote()
pid = ray.get(p.pid.remote())
os.kill(pid, 9)
ray.get(p.ready.remote())
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class DatasetOpsTest(test_base.DatasetTestBase, parameterized.TestCase):
def testAsSerializedGraph(self):
dataset = dataset_ops.Dataset.range(10)
with self.cached_session() as sess:
graph = graph_pb2.GraphDef().FromString(
sess.run(dataset._as_serialized_graph()))
self.assertTrue(any(node.op != "RangeDataset" for node in graph.node))
@staticmethod
def make_apply_fn(dataset):
def apply_fn(dataset):
def _apply_fn(dataset):
return dataset.cache()
return dataset.apply(_apply_fn)
return apply_fn
@staticmethod
def make_gen():
def gen():
yield 42
return gen
@staticmethod
def make_interleave_fn(dataset, num_parallel_calls=None):
def interleave_fn(dataset):
return dataset.interleave(
lambda x: dataset_ops.Dataset.range(0),
cycle_length=2,
num_parallel_calls=num_parallel_calls)
return interleave_fn
@parameterized.named_parameters(
("FixedLengthRecord",
lambda: readers.FixedLengthRecordDataset("", 42)),
("FromGenerator",
lambda: dataset_ops.Dataset.from_generator(
DatasetOpsTest.make_gen(), dtypes.int32),
1),
("FromTensors", lambda: dataset_ops.Dataset.from_tensors([42])),
("FromTensorSlices", lambda: dataset_ops.Dataset.from_tensors([42])),
("Range", lambda: dataset_ops.Dataset.range(10)),
("TextLine", lambda: readers.TextLineDataset("")),
("TFRecord", lambda: readers.TFRecordDataset(""), 1),
)
def testDatasetSimpleSourceInputs(self, dataset_fn, num_inputs=0):
self.assertEqual(num_inputs, len(dataset_fn()._inputs()))
def testDatasetComplexSourceInputs(self):
dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices(
sparse_tensor.SparseTensor(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])))
self.assertEqual(0, len(dataset_fn._inputs()))
@parameterized.named_parameters(
("Batch",
lambda x: x.batch(10),
lambda: dataset_ops.Dataset.range(0)),
("Cache",
lambda x: x.cache(),
lambda: dataset_ops.Dataset.range(0)),
("Filter",
lambda x: x.filter(lambda x: True),
lambda: dataset_ops.Dataset.range(0)),
("FlatMap",
lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)),
lambda: dataset_ops.Dataset.range(0)),
("Map",
lambda x: x.map(lambda x: x),
lambda: dataset_ops.Dataset.range(0)),
("PaddedBatch",
lambda x: x.padded_batch(10, []),
lambda: dataset_ops.Dataset.range(0)),
("ParallelMap",
lambda x: x.map(lambda x: x, num_parallel_calls=2),
lambda: dataset_ops.Dataset.range(0)),
("Repeat",
lambda x: x.repeat(),
lambda: dataset_ops.Dataset.range(0)),
("Shuffle",
lambda x: x.shuffle(10),
lambda: dataset_ops.Dataset.range(0)),
("Skip",
lambda x: x.skip(1),
lambda: dataset_ops.Dataset.range(0)),
("Take",
lambda x: x.take(1),
lambda: dataset_ops.Dataset.range(0)),
("Window",
lambda x: x.window(10),
lambda: dataset_ops.Dataset.range(0)),
)
def testUnaryTransformationInputs(self, dataset_fn, input_dataset_fn):
input_dataset = input_dataset_fn()
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
def testUnaryTransformationInputsApply(self):
input_dataset = dataset_ops.Dataset.range(0)
dataset_fn = self.make_apply_fn(dataset_ops.Dataset.range(0))
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
@parameterized.named_parameters(
("ParallelInterleave",
[lambda: dataset_ops.Dataset.range(0), 2],
lambda: dataset_ops.Dataset.range(0)),
("Interleave",
[lambda: dataset_ops.Dataset.range(0), None],
lambda: dataset_ops.Dataset.range(0)),
)
def testUnaryTransformationInputsWithInterleaveFn(
self, interleave_fn_args, input_dataset_fn):
input_dataset = input_dataset_fn()
dataset_fn = self.make_interleave_fn(*interleave_fn_args)
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
@parameterized.named_parameters(
("Concatenate", lambda x, y: x.concatenate(y),
lambda: dataset_ops.Dataset.range(0),
lambda: dataset_ops.Dataset.range(1)))
def testBinaryTransformationInputs(self, dataset_fn, input1_fn, input2_fn):
input1 = input1_fn()
input2 = input2_fn()
self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())
@parameterized.named_parameters(
("ZipOne",
dataset_ops.Dataset.zip,
lambda: (dataset_ops.Dataset.range(0))),
("ZipNest",
dataset_ops.Dataset.zip,
lambda: (dataset_ops.Dataset.range(0),
(dataset_ops.Dataset.range(1),
dataset_ops.Dataset.range(2)))),
("ZipTuple",
dataset_ops.Dataset.zip,
lambda: (dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1))),
)
def testVariadicTransformationInputs(self, dataset_fn, input_datasets_fn):
input_datasets = input_datasets_fn()
self.assertEqual(
nest.flatten(input_datasets),
dataset_fn(input_datasets)._inputs())
def testCollectInputs(self):
ds1 = dataset_ops.Dataset.range(0)
ds2 = ds1.concatenate(ds1)
ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))
inputs = []
queue = [ds3]
while queue:
ds = queue[0]
queue = queue[1:]
queue.extend(ds._inputs())
inputs.append(ds)
self.assertEqual(5, inputs.count(ds1))
self.assertEqual(2, inputs.count(ds2))
self.assertEqual(1, inputs.count(ds3))
def testOptionsDefault(self):
ds = dataset_ops.Dataset.range(0)
self.assertEqual(dataset_ops.Options(), ds.options())
def testOptionsOnce(self):
options = dataset_ops.Options()
ds = dataset_ops.Dataset.range(0).with_options(options).cache()
self.assertEqual(options, ds.options())
def testOptionsTwiceSame(self):
options = dataset_ops.Options()
options.experimental_autotune = True
ds = dataset_ops.Dataset.range(0).with_options(options).with_options(
options)
self.assertEqual(options, ds.options())
def testOptionsTwiceDifferent(self):
options1 = dataset_ops.Options()
options1.experimental_autotune = True
options2 = dataset_ops.Options()
options2.experimental_filter_fusion = False
ds = dataset_ops.Dataset.range(0).with_options(options1).with_options(
options2)
self.assertTrue(ds.options().experimental_autotune)
# Explicitly check that flag is False since assertFalse allows None
self.assertIs(ds.options().experimental_filter_fusion, False)
def testOptionsTwiceDifferentError(self):
options1 = dataset_ops.Options()
options1.experimental_autotune = True
options2 = dataset_ops.Options()
options2.experimental_autotune = False
with self.assertRaisesRegexp(ValueError,
"Cannot merge incompatible values"):
dataset_ops.Dataset.range(0).with_options(options1).with_options(options2)
def testOptionsMergeOptionsFromMultipleInputs(self):
options1 = dataset_ops.Options()
options1.experimental_autotune = True
options2 = dataset_ops.Options()
options2.experimental_filter_fusion = True
ds = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(0).with_options(options1),
dataset_ops.Dataset.range(0).with_options(options2)))
self.assertTrue(ds.options().experimental_autotune)
self.assertTrue(ds.options().experimental_filter_fusion)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
("Tensor", lambda: constant_op.constant(37.0),
structure.TensorStructure(dtypes.float32, [])),
("SparseTensor", lambda: sparse_tensor.SparseTensor(
indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]),
structure.SparseTensorStructure(dtypes.int32, [1])),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))},
structure.NestedStructure({
"a": structure.TensorStructure(dtypes.float32, []),
"b": (structure.TensorStructure(dtypes.string, [1]),
structure.TensorStructure(dtypes.string, []))})),
("Dataset", lambda: dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3])),
dataset_ops.DatasetStructure(
structure.TensorStructure(dtypes.int32, []))),
("Optional", lambda: optional_ops.Optional.from_value(37.0),
optional_ops.OptionalStructure(
structure.TensorStructure(dtypes.float32, []))),
)
def testDatasetStructure(self, tf_value_fn, expected_element_structure):
dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value_fn())
dataset_structure = structure.Structure.from_value(dataset)
self.assertIsInstance(dataset_structure, dataset_ops.DatasetStructure)
# TODO(b/110122868): Add a public API to `tf.data.Dataset` for accessing
# the element structure.
self.assertTrue(expected_element_structure.is_compatible_with(
dataset_structure._element_structure))
self.assertTrue(dataset_structure._element_structure.is_compatible_with(
expected_element_structure))
self.assertEqual([dtypes.variant], dataset_structure._flat_types)
self.assertEqual([tensor_shape.scalar()], dataset_structure._flat_shapes)
# Assert that the `Dataset` survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_dataset = dataset_structure._from_tensor_list(
dataset_structure._to_tensor_list(dataset))
value = tf_value_fn()
if isinstance(value, dataset_ops.Dataset):
self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x))
elif isinstance(value, optional_ops.Optional):
self.assertDatasetProduces(
round_trip_dataset.map(lambda opt: opt.get_value()),
[self.evaluate(value.get_value())],
requires_initialization=True)
else:
self.assertDatasetProduces(
round_trip_dataset, [self.evaluate(tf_value_fn())],
requires_initialization=True)
if __name__ == "__main__":
test.main()
|
|
"""Tests for light platform."""
from typing import Optional
from unittest.mock import PropertyMock
import pytest
from homeassistant.components import tplink
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_MODE,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_RGB_COLOR,
ATTR_SUPPORTED_COLOR_MODES,
ATTR_TRANSITION,
ATTR_XY_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.components.tplink.const import DOMAIN
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from . import MAC_ADDRESS, _mocked_bulb, _patch_discovery, _patch_single_discovery
from tests.common import MockConfigEntry
async def test_light_unique_id(hass: HomeAssistant) -> None:
"""Test a light unique id."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.color_temp = None
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
entity_registry = er.async_get(hass)
assert entity_registry.async_get(entity_id).unique_id == "AABBCCDDEEFF"
@pytest.mark.parametrize("transition", [2.0, None])
async def test_color_light(hass: HomeAssistant, transition: Optional[float]) -> None:
"""Test a color light and that all transitions are correctly passed."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.color_temp = None
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
KASA_TRANSITION_VALUE = transition * 1_000 if transition is not None else None
BASE_PAYLOAD = {ATTR_ENTITY_ID: entity_id}
if transition:
BASE_PAYLOAD[ATTR_TRANSITION] = transition
state = hass.states.get(entity_id)
assert state.state == "on"
attributes = state.attributes
assert attributes[ATTR_BRIGHTNESS] == 128
assert attributes[ATTR_COLOR_MODE] == "hs"
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness", "color_temp", "hs"]
assert attributes[ATTR_MIN_MIREDS] == 111
assert attributes[ATTR_MAX_MIREDS] == 250
assert attributes[ATTR_HS_COLOR] == (10, 30)
assert attributes[ATTR_RGB_COLOR] == (255, 191, 178)
assert attributes[ATTR_XY_COLOR] == (0.42, 0.336)
await hass.services.async_call(
LIGHT_DOMAIN, "turn_off", BASE_PAYLOAD, blocking=True
)
bulb.turn_off.assert_called_once_with(transition=KASA_TRANSITION_VALUE)
await hass.services.async_call(LIGHT_DOMAIN, "turn_on", BASE_PAYLOAD, blocking=True)
bulb.turn_on.assert_called_once_with(transition=KASA_TRANSITION_VALUE)
bulb.turn_on.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{**BASE_PAYLOAD, ATTR_BRIGHTNESS: 100},
blocking=True,
)
bulb.set_brightness.assert_called_with(39, transition=KASA_TRANSITION_VALUE)
bulb.set_brightness.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{**BASE_PAYLOAD, ATTR_COLOR_TEMP: 150},
blocking=True,
)
bulb.set_color_temp.assert_called_with(
6666, brightness=None, transition=KASA_TRANSITION_VALUE
)
bulb.set_color_temp.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{**BASE_PAYLOAD, ATTR_COLOR_TEMP: 150},
blocking=True,
)
bulb.set_color_temp.assert_called_with(
6666, brightness=None, transition=KASA_TRANSITION_VALUE
)
bulb.set_color_temp.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{**BASE_PAYLOAD, ATTR_HS_COLOR: (10, 30)},
blocking=True,
)
bulb.set_hsv.assert_called_with(10, 30, None, transition=KASA_TRANSITION_VALUE)
bulb.set_hsv.reset_mock()
async def test_color_light_no_temp(hass: HomeAssistant) -> None:
"""Test a light."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.is_variable_color_temp = False
type(bulb).color_temp = PropertyMock(side_effect=Exception)
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
state = hass.states.get(entity_id)
assert state.state == "on"
attributes = state.attributes
assert attributes[ATTR_BRIGHTNESS] == 128
assert attributes[ATTR_COLOR_MODE] == "hs"
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness", "hs"]
assert attributes[ATTR_HS_COLOR] == (10, 30)
assert attributes[ATTR_RGB_COLOR] == (255, 191, 178)
assert attributes[ATTR_XY_COLOR] == (0.42, 0.336)
await hass.services.async_call(
LIGHT_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_off.assert_called_once()
await hass.services.async_call(
LIGHT_DOMAIN, "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_on.assert_called_once()
bulb.turn_on.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 100},
blocking=True,
)
bulb.set_brightness.assert_called_with(39, transition=None)
bulb.set_brightness.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{ATTR_ENTITY_ID: entity_id, ATTR_HS_COLOR: (10, 30)},
blocking=True,
)
bulb.set_hsv.assert_called_with(10, 30, None, transition=None)
bulb.set_hsv.reset_mock()
@pytest.mark.parametrize("is_color", [True, False])
async def test_color_temp_light(hass: HomeAssistant, is_color: bool) -> None:
"""Test a light."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.is_color = is_color
bulb.color_temp = 4000
bulb.is_variable_color_temp = True
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
state = hass.states.get(entity_id)
assert state.state == "on"
attributes = state.attributes
assert attributes[ATTR_BRIGHTNESS] == 128
assert attributes[ATTR_COLOR_MODE] == "color_temp"
if bulb.is_color:
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == [
"brightness",
"color_temp",
"hs",
]
else:
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness", "color_temp"]
assert attributes[ATTR_MIN_MIREDS] == 111
assert attributes[ATTR_MAX_MIREDS] == 250
assert attributes[ATTR_COLOR_TEMP] == 250
await hass.services.async_call(
LIGHT_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_off.assert_called_once()
await hass.services.async_call(
LIGHT_DOMAIN, "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_on.assert_called_once()
bulb.turn_on.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 100},
blocking=True,
)
bulb.set_brightness.assert_called_with(39, transition=None)
bulb.set_brightness.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{ATTR_ENTITY_ID: entity_id, ATTR_COLOR_TEMP: 150},
blocking=True,
)
bulb.set_color_temp.assert_called_with(6666, brightness=None, transition=None)
bulb.set_color_temp.reset_mock()
async def test_brightness_only_light(hass: HomeAssistant) -> None:
"""Test a light."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.is_color = False
bulb.is_variable_color_temp = False
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
state = hass.states.get(entity_id)
assert state.state == "on"
attributes = state.attributes
assert attributes[ATTR_BRIGHTNESS] == 128
assert attributes[ATTR_COLOR_MODE] == "brightness"
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["brightness"]
await hass.services.async_call(
LIGHT_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_off.assert_called_once()
await hass.services.async_call(
LIGHT_DOMAIN, "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_on.assert_called_once()
bulb.turn_on.reset_mock()
await hass.services.async_call(
LIGHT_DOMAIN,
"turn_on",
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 100},
blocking=True,
)
bulb.set_brightness.assert_called_with(39, transition=None)
bulb.set_brightness.reset_mock()
async def test_on_off_light(hass: HomeAssistant) -> None:
"""Test a light."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.is_color = False
bulb.is_variable_color_temp = False
bulb.is_dimmable = False
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
state = hass.states.get(entity_id)
assert state.state == "on"
attributes = state.attributes
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["onoff"]
await hass.services.async_call(
LIGHT_DOMAIN, "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_off.assert_called_once()
await hass.services.async_call(
LIGHT_DOMAIN, "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_on.assert_called_once()
bulb.turn_on.reset_mock()
async def test_off_at_start_light(hass: HomeAssistant) -> None:
"""Test a light."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.is_color = False
bulb.is_variable_color_temp = False
bulb.is_dimmable = False
bulb.is_on = False
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
state = hass.states.get(entity_id)
assert state.state == "off"
attributes = state.attributes
assert attributes[ATTR_SUPPORTED_COLOR_MODES] == ["onoff"]
async def test_dimmer_turn_on_fix(hass: HomeAssistant) -> None:
"""Test a light."""
already_migrated_config_entry = MockConfigEntry(
domain=DOMAIN, data={}, unique_id=MAC_ADDRESS
)
already_migrated_config_entry.add_to_hass(hass)
bulb = _mocked_bulb()
bulb.is_dimmer = True
bulb.is_on = False
with _patch_discovery(device=bulb), _patch_single_discovery(device=bulb):
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
entity_id = "light.my_bulb"
state = hass.states.get(entity_id)
assert state.state == "off"
await hass.services.async_call(
LIGHT_DOMAIN, "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
bulb.turn_on.assert_called_once_with(transition=1)
bulb.turn_on.reset_mock()
|
|
import unittest
from ctypes import *
import re, sys
from ctypes.test import xfail
if sys.byteorder == "little":
THIS_ENDIAN = "<"
OTHER_ENDIAN = ">"
else:
THIS_ENDIAN = ">"
OTHER_ENDIAN = "<"
def normalize(format):
# Remove current endian specifier and white space from a format
# string
if format is None:
return ""
format = format.replace(OTHER_ENDIAN, THIS_ENDIAN)
return re.sub(r"\s", "", format)
class Test(unittest.TestCase):
@xfail
def test_native_types(self):
for tp, fmt, shape, itemtp in native_types:
ob = tp()
v = memoryview(ob)
try:
self.assertEqual(normalize(v.format), normalize(fmt))
if shape:
self.assertEqual(len(v), shape[0])
else:
self.assertEqual(len(v) * sizeof(itemtp), sizeof(ob))
self.assertEqual(v.itemsize, sizeof(itemtp))
self.assertEqual(v.shape, shape)
# XXX Issue #12851: PyCData_NewGetBuffer() must provide strides
# if requested. memoryview currently reconstructs missing
# stride information, so this assert will fail.
# self.assertEqual(v.strides, ())
# they are always read/write
self.assertFalse(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.assertEqual(n * v.itemsize, len(v.tobytes()))
except:
# so that we can see the failing type
print(tp)
raise
@xfail
def test_endian_types(self):
for tp, fmt, shape, itemtp in endian_types:
ob = tp()
v = memoryview(ob)
try:
self.assertEqual(v.format, fmt)
if shape:
self.assertEqual(len(v), shape[0])
else:
self.assertEqual(len(v) * sizeof(itemtp), sizeof(ob))
self.assertEqual(v.itemsize, sizeof(itemtp))
self.assertEqual(v.shape, shape)
# XXX Issue #12851
# self.assertEqual(v.strides, ())
# they are always read/write
self.assertFalse(v.readonly)
if v.shape:
n = 1
for dim in v.shape:
n = n * dim
self.assertEqual(n, len(v))
except:
# so that we can see the failing type
print(tp)
raise
# define some structure classes
class Point(Structure):
_fields_ = [("x", c_long), ("y", c_long)]
class PackedPoint(Structure):
_pack_ = 2
_fields_ = [("x", c_long), ("y", c_long)]
class Point2(Structure):
pass
Point2._fields_ = [("x", c_long), ("y", c_long)]
class EmptyStruct(Structure):
_fields_ = []
class aUnion(Union):
_fields_ = [("a", c_int)]
class StructWithArrays(Structure):
_fields_ = [("x", c_long * 3 * 2), ("y", Point * 4)]
class Incomplete(Structure):
pass
class Complete(Structure):
pass
PComplete = POINTER(Complete)
Complete._fields_ = [("a", c_long)]
################################################################
#
# This table contains format strings as they look on little endian
# machines. The test replaces '<' with '>' on big endian machines.
#
native_types = [
# type format shape calc itemsize
## simple types
(c_char, "<c", (), c_char),
(c_byte, "<b", (), c_byte),
(c_ubyte, "<B", (), c_ubyte),
(c_short, "<h", (), c_short),
(c_ushort, "<H", (), c_ushort),
# c_int and c_uint may be aliases to c_long
#(c_int, "<i", (), c_int),
#(c_uint, "<I", (), c_uint),
(c_long, "<l", (), c_long),
(c_ulong, "<L", (), c_ulong),
# c_longlong and c_ulonglong are aliases on 64-bit platforms
#(c_longlong, "<q", None, c_longlong),
#(c_ulonglong, "<Q", None, c_ulonglong),
(c_float, "<f", (), c_float),
(c_double, "<d", (), c_double),
# c_longdouble may be an alias to c_double
(c_bool, "<?", (), c_bool),
(py_object, "<O", (), py_object),
## pointers
(POINTER(c_byte), "&<b", (), POINTER(c_byte)),
(POINTER(POINTER(c_long)), "&&<l", (), POINTER(POINTER(c_long))),
## arrays and pointers
(c_double * 4, "<d", (4,), c_double),
(c_float * 4 * 3 * 2, "<f", (2,3,4), c_float),
(POINTER(c_short) * 2, "&<h", (2,), POINTER(c_short)),
(POINTER(c_short) * 2 * 3, "&<h", (3,2,), POINTER(c_short)),
(POINTER(c_short * 2), "&(2)<h", (), POINTER(c_short)),
## structures and unions
(Point, "T{<l:x:<l:y:}", (), Point),
# packed structures do not implement the pep
(PackedPoint, "B", (), PackedPoint),
(Point2, "T{<l:x:<l:y:}", (), Point2),
(EmptyStruct, "T{}", (), EmptyStruct),
# the pep does't support unions
(aUnion, "B", (), aUnion),
# structure with sub-arrays
(StructWithArrays, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}", (), StructWithArrays),
(StructWithArrays * 3, "T{(2,3)<l:x:(4)T{<l:x:<l:y:}:y:}", (3,), StructWithArrays),
## pointer to incomplete structure
(Incomplete, "B", (), Incomplete),
(POINTER(Incomplete), "&B", (), POINTER(Incomplete)),
# 'Complete' is a structure that starts incomplete, but is completed after the
# pointer type to it has been created.
(Complete, "T{<l:a:}", (), Complete),
# Unfortunately the pointer format string is not fixed...
(POINTER(Complete), "&B", (), POINTER(Complete)),
## other
# function signatures are not implemented
(CFUNCTYPE(None), "X{}", (), CFUNCTYPE(None)),
]
class BEPoint(BigEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
class LEPoint(LittleEndianStructure):
_fields_ = [("x", c_long), ("y", c_long)]
################################################################
#
# This table contains format strings as they really look, on both big
# and little endian machines.
#
endian_types = [
(BEPoint, "T{>l:x:>l:y:}", (), BEPoint),
(LEPoint, "T{<l:x:<l:y:}", (), LEPoint),
(POINTER(BEPoint), "&T{>l:x:>l:y:}", (), POINTER(BEPoint)),
(POINTER(LEPoint), "&T{<l:x:<l:y:}", (), POINTER(LEPoint)),
]
if __name__ == "__main__":
unittest.main()
|
|
"""Classes utilized by the Seqparse class."""
# Standard Libraries
import os
from collections import MutableMapping, MutableSet
from functools import total_ordering
from .files import File
from .sequences import FileSequence
__all__ = ("FileExtension", "FileSequenceContainer", "SingletonContainer")
###############################################################################
# Class: FileExtension
class FileExtension(MutableMapping):
"""
Container for frame sequences, indexed by zero-padding.
Args:
name (str, optional): The file extension used by the contents of the
container (ie, "exr", "tif").
parent (FileSequenceContainer, optional): The container from which this
instance was spawned.
"""
_CHILD_CLASS = FileSequence
def __init__(self, name=None, parent=None):
"""Initialise the instance."""
self._data = dict()
self._name = None
self._parent = None
self.name = name
self.parent = parent
def __delitem__(self, key):
"""Define key deletion logic (per standard dictionary)."""
del self._data[key]
def __getitem__(self, key):
"""Define key getter logic (per collections.defaultdict)."""
if key not in self._data:
opts = dict(ext=self.name, pad=key)
if self.parent:
opts.update(name=self.parent.full_name)
self._data[key] = self._CHILD_CLASS(**opts)
return self._data[key]
def __iter__(self):
"""Define key iteration logic (per standard dictionary)."""
return iter(self._data)
def __len__(self):
"""Define item length logic (per standard dictionary)."""
return len(self._data)
def __repr__(self): # pragma: no cover
"""Pretty representation of the instance."""
blurb = "{cls}(name={name!r}, pads={pads})"
return blurb.format(
cls=type(self).__name__, name=self.name, pads=sorted(self))
def __setitem__(self, key, value):
"""Define item setting logic (per standard dictionary)."""
if isinstance(value, (list, tuple, set)):
opts = dict(ext=self.name, frames=value, pad=key)
if self.parent:
opts.update(name=self.parent.full_name)
value = self._CHILD_CLASS(**opts)
if not isinstance(value, self._CHILD_CLASS):
blurb = 'Container may only hold "{}" instances ("{}" provided)'
raise ValueError(
blurb.format(self._CHILD_CLASS.__name__, type(value).__name__))
self._data[key] = value
@property
def name(self):
"""str: name of the file extension."""
return self._name
@name.setter
def name(self, val):
self._name = None
if val:
self._name = str(val)
@property
def parent(self):
"""FileSequenceContainer: parent of the instance."""
return self._parent
@parent.setter
def parent(self, val):
self._parent = None
if isinstance(val, FileSequenceContainer):
self._parent = val
def output(self):
"""
Calculate a sorted list of all contained file extentions.
Yields:
FrameSequence, sorted by zero-pad length.
"""
# First, check to see if we need to consolidate our file sequences.
data = sorted(list(self.items()), reverse=True)
while len(data) > 1:
pad, fseq = data.pop(0)
# NOTE: the is_padded() method will force recalculation if the
# object is dirty.
if not fseq.is_padded:
prev_fseq = data[0][1]
prev_fseq.update(fseq)
del self[pad]
for pad in sorted(self):
yield self[pad]
###############################################################################
# Class: FileSequenceContainer
@total_ordering
class FileSequenceContainer(MutableMapping):
"""
Container for file sequences, indexed by file extension.
Args:
name (str, optional): Base name of the contained files.
file_path (str, optional): Directory in which the contained files
reside.
"""
_CHILD_CLASS = FileExtension
def __init__(self, name=None, file_path=None):
"""Initialise the instance."""
self._data = dict()
self._full = None
self._name = None
self._path = None
self.name = name
self.path = file_path
def __delitem__(self, key):
"""Define key deletion logic (per standard dictionary)."""
del self._data[key]
def __eq__(self, other):
"""
Define equality between instances.
NOTE: Equality is solely based upon comparison of the "full_name"
property and is only used for output sorting.
"""
if type(other) is type(self):
return self.full_name == other.full_name
return False
def __getitem__(self, key):
"""Define key getter logic (per collections.defaultdict)."""
if key not in self._data:
self._data[key] = self._CHILD_CLASS(name=key, parent=self)
return self._data[key]
def __iter__(self):
"""Define key iteration logic (per standard dictionary)."""
return iter(self._data)
def __len__(self):
"""Define item length logic (per standard dictionary)."""
return len(self._data)
def __lt__(self, other):
"""
Define whether one instance may be sorted below another.
NOTE: Equality is solely based upon comparison of the "full_name"
property and is only used for output sorting.
"""
if type(other) is type(self):
return self.full_name < other.full_name
return True
def __repr__(self): # pragma: no cover
"""Pretty representation of the instance."""
blurb = "{cls}(full_name={full_name!r}, exts={exts})"
return blurb.format(
cls=type(self).__name__,
exts=sorted(self),
full_name=self.full_name)
def __setitem__(self, key, value):
"""Define item setting logic (per standard dictionary)."""
if not isinstance(value, self._CHILD_CLASS):
blurb = 'Container may only hold "{}" instances ("{}" provided)'
raise ValueError(
blurb.format(self._CHILD_CLASS.__name__, type(value).__name__))
elif key != value.name:
blurb = ("Key value must match extension name of provided value "
"({!r} != {!r})")
raise ValueError(blurb.format(key, value.name))
self._data[key] = value
# Overriding child container's name to match!
value.name = self.full_name
@property
def full_name(self):
"""str: Full (base) name of the file sequence."""
return self._full
@property
def name(self):
"""str: Base name of the file sequence (no containing directory)."""
return self._name
@name.setter
def name(self, val):
self._name = None
if val:
self._name = str(val)
self._full = os.path.join(self._path or "", self._name or "")
@property
def path(self):
"""str: directory in which the contained files reside."""
return self._path
@path.setter
def path(self, val):
self._path = None
if val:
self._path = str(val)
self._full = os.path.join(self._path or "", self._name or "")
def output(self):
"""
Calculate a sorted list of all contained file sequences.
Yields:
FileSequence, sorted (in order) by file path, extension, and zero-
padding length.
"""
for data in sorted(self.values()):
for file_seq in data.output():
yield file_seq
###############################################################################
# class: SingletonContainer
class SingletonContainer(MutableSet):
"""
Container for singleton files, indexed alphabetically by file path.
Args:
file_names (list-like of str, optional): List of base file names to
store in the container.
file_path (str, optional): Directory in which the contained files
reside.
"""
def __init__(self, file_names=None, file_path=None):
"""Initialise the instance."""
self._data = set()
self._path = None
self._stat = dict()
for item in file_names or []:
self.add(item)
self.path = file_path
def __contains__(self, item):
"""Defining containment logic (per standard set)."""
return str(item) in self._data
def __iter__(self):
"""Defining item iteration logic (per standard set)."""
return iter(self._data)
def __len__(self):
"""Defining item length logic (per standard set)."""
return len(self._data)
def __repr__(self): # pragma: no cover
"""Pretty representation of the instance."""
blurb = "%s(path='%s', files=set(%s))"
return blurb % (type(self).__name__, self.path, sorted(self._data))
def __str__(self):
"""String reprentation of the singleton files."""
return "\n".join(list(map(str, self.output())))
@property
def path(self):
"""Directory in which the contained files are located."""
return self._path
@path.setter
def path(self, val):
self._path = str(val or "")
def add(self, value):
"""Defining value addition logic (per standard set)."""
self._data.add(str(value))
def discard(self, value):
"""Defining value discard logic (per standard set)."""
self._data.discard(value)
def update(self, value):
"""Defining update logic (per standard set)."""
for item in value:
self.add(item)
def cache_stat(self, base_name, input_stat):
"""
Cache file system stat data for the specified file base name.
Input disk stat value will be stored in a new stat_result
instance.
Args:
base_name (str): Base name of the file for which the supplied disk
stats are being cached.
input_stat (stat_result): Value that you'd like to cache.
Returns:
stat_result that was successfully cached.
"""
from . import get_stat_result
self._stat[base_name] = get_stat_result(input_stat)
return self._stat[base_name]
def output(self):
"""
Calculate formatted list of all contained file sequences.
Yields:
File, sorted alphabetically.
"""
for file_name in sorted(self):
yield File(
os.path.join(self.path, file_name), self.stat(file_name))
def stat(self, base_name=None):
"""
Individual file system status, indexed by base name.
This method only returns cached disk stats (if any exist). Use the
`cache_stat` method if you'd like to set new values.
Args:
base_name (str, optional): Base name of the file for which you'd
like to return the disk stats.
Returns:
None if a file has been specified but disk stats have not been
cached.
stat_result if a file has been specified and disk stats have
been previously cached.
dict of disk stats, indexed by str base name if no name has been
specified.
"""
if base_name is None:
return self._stat
return self._stat.get(base_name, None)
|
|
#
# Protein Engineering Analysis Tool Structure Analysis (PEATSA)
# Copyright (C) 2010 Michael Johnston & Jens Erik Nielsen
#
# Author: Michael Johnston
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
'''Contains functions and classes related to handling PEAT-SA's environment'''
import Exceptions, ConfigParser, os, sys, shutil, math, time, tempfile, datetime, operator
#Assign the location of the module to a variable
moduleLocation = os.path.dirname(__file__)
moduleLocation = os.path.abspath(moduleLocation)
#Add it to the path so we can always access the modules
sys.path.insert(0, moduleLocation)
#Create the resource path
resources = os.path.join(moduleLocation, 'Resources')
#Required files for any alanine scan
requiredPKADesignFiles = ['DELCRG.DAT', 'DELRAD.DAT', 'TOPOLOGY.H']
#Required files for the specific pdb
proteinScanFilesExtensions = ['BACKGR.DAT', 'DESOLV.DAT', 'MATRIX.DAT', 'PKA.DAT', 'TITCURV.DAT']
#The global configuration instance to be used by default by all other objects.
appConfiguration = None
def resourceDirectory():
'''Returns the directory containing the resources used by the ProteinDesignTool'''
return resources
def UffbapsResourceDirectory():
'''Returns the directory containing the files necessary for running genialtNav from UFFBAPS
genialtNav must be run in this directory'''
return os.path.join(resourceDirectory(), 'UFFBAPSRun')
def RequiredScanFilesDirectory():
'''Returns the paths to the directory containing the required scan files'''
return os.path.join(resourceDirectory(), 'RequiredScanFiles')
class Configuration:
'''Creates instances representing an options for a design run.
The returned instance is polymorphic to python ConfigParser.Config parser class.
That is all that classes methods can also be used with this class.'''
scanSections = ['PKA SCAN PARAMETERS', 'PKA SCAN METHOD', 'PKA SCAN OUTPUT']
requiredOptions = {'PKA SCAN PARAMETERS':['pKMCsteps', 'recalc_intpka', 'recalc_intpka_dist',
'use_titration_curves', 'calc_dpka', 'mutation_quality',
'generate_mutations'],
'PKA SCAN METHOD':['dpKa_method', 'tabulated', 'MCsteps', 'PBEsolver'],
'PKA SCAN OUTPUT':['verbose','save_solutions'],
'PATHS':['pKa Tools','uffbaps']}
def __validateConfiguration(self, object, errorList=None):
'''Checks if object is a valid configuration object
Parameters:
object - A ConfigParser instance
errorList - An empty list
Returns:
True if object contains all the required options.
False otherwise.
If False and errorList was set if contains a list of what was missing'''
if errorList == None:
errorList = []
requiredSections = Configuration.requiredOptions.keys()
#Check all sections are present
#and all required options in that section are present
for section in requiredSections:
if object.has_section(section):
options = object.options(section)
required = Configuration.requiredOptions[section]
for option in required:
if options.count(option) == 0:
errorList.append('Missing option %s' % option)
else:
errorList.append('Missing section %s' % section)
if len(errorList) == 0:
retval = True
else:
retval = False
return retval
def __readConfigurationsFromDefaultLocations(self):
'''Checks if a configuration file exists at set of default locations and reads it
The locations are (in order)
- the current directory
- the value of PEATSA_CONFIGURATION_PATH environment variable
- the users home directory
The first found configuration file is used.
Returns:
The configuration object if one was successfully created.
None otherwise.
Exceptions:
Raises an Exceptions.ConfigurationError if the configuration is
missing any of the required sections.'''
default = ConfigParser.SafeConfigParser()
default.optionxform = str
#First check if any configuration files exist in default locations
current = os.path.abspath('proteinDesignTool.conf')
home = os.path.join(os.path.expanduser('~'), 'proteinDesignTool.conf')
array = [home, current]
if os.environ.has_key('PEATSA_CONFIGURATION_PATH'):
env = os.environ['PEATSA_CONFIGURATION_PATH']
env = os.path.join(env, 'proteinDesignTool.conf')
array.insert(1, env)
#Note this first read $HOME if it exists and then overrides it with current
result = default.read(array)
if len(result) == 0:
default = None
else:
#Check if all necessary sections are present
list = []
if not self.__validateConfiguration(default, list):
raise Exceptions.ConfigurationError, "Configuration file %s not valid.\n%s" % (result, list)
return default
def __defaultConfiguration(self, writeFile=True):
'''Creates a default ConfigParser object instances of this class can use
Parameters -
writeFile - If True writes a default configuration file in the current directory'''
default = ConfigParser.SafeConfigParser()
default.optionxform = str
#Add default configuration values
default.add_section('PKA SCAN PARAMETERS')
default.add_section('PKA SCAN METHOD')
default.add_section('PKA SCAN OUTPUT')
default.add_section('PATHS')
default.add_section('WORKING DIRECTORY')
default.add_section('PDBS')
# pKa calculation parameters
default.set('PKA SCAN PARAMETERS', 'pKMCsteps', str(200000))
default.set('PKA SCAN PARAMETERS', 'pKMCsteps', str(200000))
default.set('PKA SCAN PARAMETERS', 'recalc_intpka', str(1))
default.set('PKA SCAN PARAMETERS', 'recalc_intpka_dist', str(20))
default.set('PKA SCAN PARAMETERS', 'use_titration_curves', str(1))
default.set('PKA SCAN PARAMETERS', 'calc_dpka', str(1))
default.set('PKA SCAN PARAMETERS', 'generate_mutations', str(False))
default.set('PKA SCAN PARAMETERS', 'save_temp_files', str(False))
default.set('PKA SCAN PARAMETERS', 'mutation_quality', str(0.5))
# Method
default.set('PKA SCAN METHOD', 'dpKa_method', 'MC')
default.set('PKA SCAN METHOD', 'tabulated', str(1))
default.set('PKA SCAN METHOD', 'MCsteps', str(0))
default.set('PKA SCAN METHOD', 'PBEsolver', 'DelPhi')
# Be not-so-noisy
default.set('PKA SCAN OUTPUT', 'verbose', str(1))
# Do not save the solutions
default.set('PKA SCAN OUTPUT', 'save_solutions', str(None))
#Set a default location for the pKa Tools
default.set('PATHS', 'pKa Tools', os.path.join(os.path.expanduser('~'), 'PEAT'))
default.set('PATHS', 'uffbaps', os.path.join(os.path.expanduser('~'), 'PEAT/UFFBAPS'))
#Working directory configuration (optional)
#Users may not want to overwrite files in an existing working dir
#These options configure this behaviour
default.set('WORKING DIRECTORY', 'copyIfExists', str(0))
default.set('WORKING DIRECTORY', 'copyLocation', os.getcwd())
default.set('WORKING DIRECTORY', 'overwriteExistingCopies', str(0))
default.set('WORKING DIRECTORY', 'useUniqueID', str(0))
default.set('PDBS', 'pdbCleanProgram', 'WHATIF')
#If requested write out this default configuration
#wherever the program is being run from
if writeFile:
file = open('proteinDesignTool.conf', 'w')
default.write(file)
file.close()
return default
def __init__(self, filename=None, searchDefaultLocations=True, writeFile=False):
'''Initialises a new configuration object.
Parameters
filename: Optional name of a file containing valid options.
If a filename is not specified default values are used.
searchDefaultLocations: If filename is None and this is True
then a number of default places are searched for a configuration file
writeFile: If True a default configuration file will be written
in the current directory if no filename is passed AND either no configuration file
is found in a default location OR searchDefaultLocations = False
Exceptions:
Raises an Exceptions.EnvironmentError if a filename is given and it
contains no options'''
self.configuration = None
self.environment = Environment()
#If a filename was provided read from it.
#Otherwise search in default locations or create default options
if filename == None:
if searchDefaultLocations:
self.configuration = self.__readConfigurationsFromDefaultLocations()
#If the above failed or searchDefaultLocations was false create a default configuration
if self.configuration == None:
print 'No configuration file found - creating default'
self.configuration = self.__defaultConfiguration(writeFile=writeFile)
else:
self.configuration = ConfigParser.SafeConfigParser()
self.configuration.optionxform = str
self.configuration.read([filename])
#Validate the file
list = []
if not self.__validateConfiguration(self.configuration, list):
raise Exceptions.ConfigurationError, "Configuration file %s not valid.\n%s" % (filename, list)
#Add specified pKa Tool path to sys.path
modulePath = self.configuration.get('PATHS', 'pKa Tools')
modulePath = os.path.abspath(modulePath)
self.pKaToolAvailable = True
if modulePath != None:
#Check is a directory
if not os.path.isdir(modulePath):
self.pKaToolAvailable = False
self.environment.output("Invalid value given for location of pKa Tools (%s) - file %s. pKa function disabled" % (modulePath, filename))
#Append it to the path if its not already present
elif sys.path.count(modulePath) == 0:
sys.path.insert(0, modulePath)
#If this is the first Configuration object created set it as the scripts global configuration
global appConfiguration
if appConfiguration is None:
appConfiguration = self
def __cmp__(self, object):
'''Compares by checking if every section and option in the configuration objects are the same.'''
equal = 0
#If at any stage the objects are detected to be inequal a KeyError is raised.
try:
#Check sections are the same
sections = object.sections()
if sections != self.configuration.sections():
raise KeyError
#Check all the option names are the same in each section
for section in sections:
#Order of elements could be different - use sets to compare
if set(self.configuration.options(section)) == set(object.options(section)):
#If all the option names are the same check all the option values are the same
#Use same list of options for both so we get the same order of values
#in the list comprehensions
options = self.configuration.options(section)
valuesOne = [self.configuration.get(section, option) for option in options]
valuesTwo = [object.get(section, option) for option in options]
if valuesOne != valuesTwo:
raise KeyError
else:
raise KeyError
except KeyError:
#Specifically happens when the object is polymorphic to a ConfigParser
#But hasn't the same options
equal = -1
except BaseException:
#Happens if the object is not polymorphic
equal = -1
return equal
def __getattr__(self, aname):
'''Forward all attribute access we don't know about to the configuration object'''
return getattr(self.configuration, aname)
def writeToFile(self, filename):
'''Writes the configuration object to a file
Exceptions
Raises an IOError if file cannot be created or written to'''
file = open(filename, 'w')
self.configuration.write(file)
file.close
def designPKAOptions(self):
'''Returns the options for the scan calculation in the format used by Design_pKa.run_opt
Exceptions
Raises an Exceptions.EnvironmentError if the pKa modules cannot be found'''
try:
import pKa.Design_pKa as Design_pKa
except ImportError:
raise Exceptions.ConfigurationError, "Cannot located Design_pKa module. Check correct path specified in configuration"
#Get the default options
defaults = Design_pKa.get_defaults()
#Go through sections and set values
for section in Configuration.scanSections:
for option in self.configuration.options(section):
#print option
#print defaults[option]
#Do some translating
if self.configuration.get(section, option) == 'None':
defaults[option][0] = None
#print 'None conversion - Changed value of %s to %s' % (option, defaults[option])
elif type(defaults[option][0]) == float:
defaults[option][0] = float(self.configuration.get(section, option))
#print 'Float conversion - Changed value of %s to %s' % (option, defaults[option])
elif type(defaults[option][0]) == int:
defaults[option][0] = int(self.configuration.get(section, option))
#print 'Int conversion - Chaged value of %s to %s' % (option, defaults[option])
elif type(defaults[option][0]) == bool:
confValue = self.configuration.get(section, option)
#Accepts 1,0, True and False
if confValue == 'True':
confValue = True
elif confValue == 'False':
confValue = False
else:
confValue = bool(int(confValue))
defaults[option][0] = confValue
#print 'Bool - Changed value of ', option, ' to ', defaults[option]
else:
defaults[option][0] = self.configuration.get(section, option)
#print 'No conversion - Changed value of %s to %s' % (option, defaults[option])
return defaults
def uffbapsLocation(self):
'''Returns the location of Uffbaps as given by the configuration file
Defaults to /usr/local/bin if nothing is supplied'''
return self.configuration.get('PATHS', 'uffbaps')
def pkaModuleLocation(self):
'''Returns the location of pKaTool.
Note: This is the path provided by the configuration file. It may not be valid.
User pKaCalculationsAvailable() to check'''
return self.configuration.get('PATHS', 'pKa Tools')
def pKaCalculationsAvailable(self):
'''Returns True if pKaCalculations are available'''
return self.pKaToolAvailable
def pdbCleanProgram(self):
'''Returns the program to use to clean pdbs'''
if self.configuration.has_option('PDBS', 'pdbCleanProgram'):
return self.configuration.get('PDBS', 'pdbCleanProgram')
else:
return 'WHATIF'
class WorkingDirectory:
'''Handles setup of working directory for run of the protein design tool
This involves checking that the necessary files for a given protein are present
along with generic run files e.g. TOPOLOGY.H etc.
If generic files are not present they are copied there'''
def __init__(self, location, configuration=None):
'''Creates an instance for performing a run in a specified directory.
The various classes representing the programs to be run do not depend on this class.
It merely sets up the necessary conditions.
Note that the current working directory is changed to the specified directory on instantiation.
Also note that internally the class always uses absolute paths so if the current working directory
is changed it will still function except for the fact that path() will not return the working
directory.
Many of this classes methods only work in the root process in a parallel environment.
Parameters
location: The path to the working directory
configuration: An optional Environment.Configuration instance'''
#Create environment instance
self.environment = Environment()
#set default behaviour if working directory already exists
copyIfExists = False
ignoreCopyError = False
overwriteExistingCopies = False
useUniqueID = False
copyLocation = os.getcwd()
#override defaults with configuration values if available
if configuration is not None:
#Note: Casting a string directly to a bool always gives True
copyIfExists = int(configuration.get('WORKING DIRECTORY', 'copyIfExists'))
copyIfExists = bool(copyIfExists)
useUniqueID = int(configuration.get('WORKING DIRECTORY', 'useUniqueID'))
useUniqueID = bool(useUniqueID)
overwriteExistingCopies = bool(configuration.get('WORKING DIRECTORY', 'overwriteExistingCopies'))
copyLocation = configuration.get('WORKING DIRECTORY', 'copyLocation')
copyLocation = os.path.abspath(copyLocation)
try:
ignoreCopyErrors = int(configuration.get('WORKING DIRECTORY', 'ignoreCopyErrors'))
ignoreCopyErrors = bool(ignoreCopyErrors)
except ConfigParser.NoOptionError, data:
self.environment.output(
'[PEAT-SA] Configuration file does not contain ignoreCopyErrors option - defaulting to false',
rootOnly=True)
ignoreCopyErrors = False
self.directory = os.path.abspath(location)
#If the working dir doesn't exist create it
#If it does and copyIfExists is True, copy it
#Otherwise use it as is
creatingDir = False
if not os.path.exists(self.directory):
creatingDir = True
if self.environment.isRoot():
#Create it
try:
os.mkdir(self.directory)
except BaseException:
raise Exceptions.WorkingDirectoryError, "Working directory - %s - does not exist and cannot be created" % self.path()
elif copyIfExists is True:
creatingDir = True
#Get name for copied dir - all processes enter this code
destinationName = os.path.split(self.directory)[1]
if useUniqueID is True:
if self.environment.isRoot():
#Cumbersome way of getting a unique file necessary since mktmp was deprecated
#We use the original dir name so we can id the tmp dir later
temp = tempfile.NamedTemporaryFile(prefix='%s_' % destinationName, dir=copyLocation)
destinationName = temp.name
temp.close()
destinationName = self.environment.broadcast(data=destinationName, process=0)
originalLocation = self.directory
#Reassign self.directory to new value
self.directory = os.path.join(copyLocation, destinationName)
self.environment.output(
'[PEAT-SA] Specified working directory exists - creating copy at %s' % self.directory,
rootOnly=True)
#only root copies the directory
if self.environment.isRoot():
if os.path.exists(self.directory) and not overwriteExistingCopies:
raise Exceptions.WorkingDirectoryError, "Directory %s exists and cannot create copy at %s" % (originalLocation, self.directory())
else:
#Delete existing dir
shutil.rmtree(self.directory, ignore_errors=True)
#Copy the directory to the new destination
try:
shutil.copytree(originalLocation, self.directory , symlinks=True)
except shutil.Error, data:
self.environment.output(
'[PEAT-SA] Encountered error while copying',
rootOnly=True)
if ignoreCopyErrors is True:
self.environment.output( '[PEAT-SA] Errors were:', rootOnly=True)
for copyError in data:
self.environment.output( '\t%s' % data, rootOnly=True)
self.environment.output(
'[PEAT-SA] Ignoring errors and trying to continue',
rootOnly=True)
else:
raise
else:
self.environment.output(
'[PEAT-SA] Specified working directory exists - using it',
rootOnly=True)
#Wait for roots copy/creation to finish
self.environment.wait()
#Never executed if the directory creation fails
if creatingDir:
self.environment.log('Root finished creation of directory %s - checking if its appeared' % self.directory)
while not os.path.isdir(self.directory):
self.environment.log('Directory hasnt appeared yet')
time.sleep(0.5)
#Switch current working directory to the supplied directory
os.chdir(self.path())
def _removeErrorHandler(self, function, path, execInfo):
'''Convience method for creating an remove directory exception
Note: Do not use as handler to shutil.rmtree as the exception raised here
cannot be caught then'''
data = 'Error removing directory %s\n' % path
data = data + 'Exception information - %s\n' % str(execInfo)
raise Exceptions.WorkingDirectoryError, data
def _checkFile(self, pdbFile):
'''Checks if the path pdbFile refers to an existing file.
FIXME - Make changing of current working directory understandable.
Parameters:
pdbFile - A path to a file - Can be absolute or not.
Returns:
The absolute path to the file.
Exceptions:
Raises an exception if the file does not exist or if its not a file'''
#Make the filename absolute
if not os.path.isabs(pdbFile):
pdbFile = os.path.abspath(pdbFile)
#Check the specified file exists.
if not os.path.exists(pdbFile):
raise Exceptions.WorkingDirectoryError, 'File %s does not exist' % pdbFile
#Check its a file
if not os.path.isfile(pdbFile):
raise Exceptions.WorkingDirectoryError, 'Object at %s is not a file' % pdbFile
return pdbFile
def _copyFileToDirectory(self, filename, directory, overwrite=False):
'''For internal use only. Copies filename to directory.
Note that since this is for internal use error checking is less vigourous.
Parameters
filename - A path to a file. Can be relative or absolute.
If the path is relative its is resolved w.r.t the specified directory.
directory - The directory to copy the file to.
overwrite - If True, if a file exists in the directory with filename
it is overwritten if possible. Otherwise that file is used.
Default - False
Returns:
The full path to the copied file
Exceptions:
Raises an Exceptions.ArgumentError if the file does not exist.
Raises an Excpetions.WorkingDirectoryError if the file could not be copied.'''
#Only the root process does the copying
destination = os.path.join(directory, os.path.split(filename)[1])
self.environment.log('Copying %s to %s' % (filename, destination))
if self.environment.isRoot():
filename = self._checkFile(filename)
#Check if the specified file to is in the working directory
#If it is we don't do anything
containingDirectory = os.path.split(filename)[0]
if containingDirectory != directory:
#Check if a copy of the file already is present
#Again do nothing if this is so
directoryContents = os.listdir(directory)
if directoryContents.count(filename) is 1 and overwrite is True:
#This automatically overwrites destination if it exists
#and the operation is possible
shutil.copyfile(filename, destination)
elif directoryContents.count(filename) is 0:
shutil.copyfile(filename, destination)
self.environment.wait()
return destination
def copyFileToDirectory(self, filename, overwrite=False):
'''Copies filename to the working directory.
Parameters
filename - A path to a file. Can be relative or absolute.
If the path is relative its is resolved w.r.t the working directory.
overwrite - If True, if a file called filename exists in the working directory
it is overwritten if possible. Otherwise that file is used.
Default - False
Returns:
The full path to the copied file
Exceptions:
Raises an Exceptions.ArgumentError if the file does not exist.
Raises an Excpetions.WorkingDirectoryError if the file could not be copied.'''
return self._copyFileToDirectory(filename=filename, directory=self.path(), overwrite=overwrite)
def copyFileToUffbapsDirectory(self, filename, overwrite=False):
'''Copies filename to the uffbaps run directory.
Parameters
filename - A path to a file. Can be relative or absolute.
If the path is relative its is resolved w.r.t the uffbaps run directory.
overwrite - If True, if a file called filename exists in the uffbaps run directory
it is overwritten if possible. Otherwise that file is used.
Default - False
Returns:
The full path to the copied file
Exceptions:
Raises an Exceptions.ArgumentError if the file does not exist.
Raises an Excpetions.WorkingDirectoryError if the file could not be copied.'''
return self._copyFileToDirectory(filename=filename, directory=self.uffbapsRunDirectory(), overwrite=overwrite)
def containsFile(self, filename):
'''Returns True if the directory contains filename - False otherwise
Note: Only check in the top-level directory'''
filename = os.path.abspath(filename)
path = os.path.split(filename)[0]
isPresent = False
if path == self.directory:
isPresent = True
return isPresent
def setupPKARun(self):
'''Checks that the directory contains all the necessary files for a pKa calculation
Exceptions:
Raises Exceptions.WorkingDirectoryError if there was a problem with the setup.'''
#Only the root process does the copying
if self.environment.isRoot():
directoryContents = os.listdir(self.path())
#Check that all the pka non-protein specific files are present
#If any is copy the resource dir version
for requiredFile in requiredPKADesignFiles:
if directoryContents.count(requiredFile) == 0:
copiedRequiredFiles = True
resourceVersion = os.path.join(RequiredScanFilesDirectory(), requiredFile)
shutil.copyfile(resourceVersion, os.path.join(self.path(), requiredFile))
self.environment.wait()
def setupScan(self, pdbName, mutantCollection):
'''Checks that the directory contains all the necessary files for a scan run on pdbName
Note that is the mutantCollection is None does not cause this method to fail.
Parameters:
pdbName - The name of the pdbFile the scan will be run on e.g. 5P21.pdb
A pdb with the name must be in the working directory.
mutantCollection - A MutantCollection instance containing the mutant files the scan will be run on
Exceptions:
Raises an Exceptions.MissingPKADataError if the required pKa data is missing.
Raises Exceptions.WorkingDirectoryError if there was any other problem with the setup.'''
#Only the root process does the copying
if self.environment.isRoot():
#Accumulate all errors into one string so the user finds out
#everything that is wrong in one go
errorString = ""
#Get the directory contents
directoryContents = os.listdir(self.path())
#Flag tracking if we had to copy the required protein design files to the working directory
copiedRequiredFiles = False
#Check that all the pka non-protein specific files are present
#If any is copy the resource dir version
for requiredFile in requiredPKADesignFiles:
if directoryContents.count(requiredFile) == 0:
copiedRequiredFiles = True
resourceVersion = os.path.join(RequiredScanFilesDirectory(), requiredFile)
shutil.copyfile(resourceVersion, os.path.join(self.path(), requiredFile))
#Check that all the pka protein specific files are present
for extension in proteinScanFilesExtensions:
filename = pdbName + "." + extension
if directoryContents.count(filename) == 0:
errorString = errorString + "Required scan file %s not present\n" % filename
if len(errorString) != 0:
#If we copied the required protein design files remove them
if copiedRequiredFiles:
for file in requiredPKADesignFiles:
os.remove(os.path.join(self.path(), file))
self.clean()
raise Exceptions.MissingPKADataError, errorString
#Check the pdbFile is present
path = os.path.join(self.directory, pdbName)
if not self._checkFile(path):
raise Exceptions.WorkingDirectoryError, "No pdb file called %s in the working directory (%s)" % (pdbName, self.directory)
#Link the mutants to the required scan directory
if mutantCollection is not None:
mutantPath = os.path.join(self.directory, "%s.pdbs" % pdbName)
self.environment.output('Linking created mutants to %s' % mutantPath)
#Check if a link exists - if it does delete it
#Move previous scan dirs to a new path
if os.path.islink(mutantPath):
os.remove(mutantPath)
elif os.path.isdir(mutantPath):
renamedPath = os.path.join(self.directory, "%s.pdbs_old" % pdbName)
if not os.path.isdir(renamedPath):
os.rename(mutantPath, renamedPath)
self.environment.output('Moved existing mutants at %s to %s' % (mutantPath, renamedPath))
else:
self.environment.output('Leaving mutants present at %s' % renamedPath)
self.environment.output('Deleting mutants present at %s' % mutantPath)
shutil.rmtree(mutantPath, ignore_errors=True)
#Create a link to the mutant collection dir
os.symlink(os.path.join(mutantCollection.location, "Mutants"), mutantPath)
self.environment.output('Link created')
self.environment.wait()
#If we are in parallel every process copies the necessary files
#to its own subdir of this directory and changes to work in it
if self.environment.isParallel:
self.environment.output('Current path %s. Current dir %s' % (self.path(), os.getcwd()), rootOnly=True)
destination = os.path.join(self.path(), "Process%d" % self.environment.rank())
#Check is this already exists - could have been a problem with a previous clean
self.environment.log('Creating process specific directory at %s' % destination)
if os.path.exists(destination):
self.environment.output('Deleting previous process specific directory at %s' % destination, rootOnly=True)
shutil.rmtree(destination, ignore_errors=True)
template = os.path.join(self.path(), "template")
if self.environment.isRoot():
if os.path.exists(template):
self.environment.output('Deleting previous template directory at %s' % template, rootOnly=True)
shutil.rmtree(template, ignore_errors=True)
#Create process temp copy without the sim links
shutil.copytree(self.path(), template, symlinks=True)
self.environment.wait()
shutil.copytree(template, destination, symlinks=True)
self.environment.wait()
if self.environment.isRoot():
shutil.rmtree(template, ignore_errors=True)
os.chdir(destination)
self.environment.output('After move: Current path %s. Current dir %s' % (self.path(), os.getcwd()), rootOnly=True)
def cleanUpScan(self):
'''Performs clean-up of the WorkingDirectory after a scan
This basically involves removing any per-parallel-process copies
of the pKa data'''
if self.environment.isParallel:
os.chdir(self.path())
directory = os.path.join(self.path(), "Process%d" % self.environment.rank())
try:
shutil.rmtree(directory, ignore_errors=True)
except Exception, data:
print Exception, data
def setupUFFBAPS(self):
'''Checks that the directory contains all the necessary files for a UFFBAPS run.
Exceptions:
Raises Exceptions.WorkingDirectoryError if there was a problem with the setup.'''
if self.environment.isRoot():
#There is a chance the directory will already be present - delete it
if os.path.isdir(self.uffbapsRunDirectory()):
shutil.rmtree(self.uffbapsRunDirectory(), ignore_errors=True)
#Copy all the neccessary stuff for running Chrestens stability tool
try:
shutil.copytree(UffbapsResourceDirectory(), self.uffbapsRunDirectory())
errorString = ''
except BaseException, data:
print 'Encountered an error when copying UFFBAPS data - ', data
errorString = "Unable to move neccessary files for stability calculation to working directory"
raise Exceptions.WorkingDirectoryError, errorString
self.environment.wait()
def setup(self, pdbFile, mutantCollection=None, scan=True, stability=True, modes=False):
'''Checks that the directory contains all the necessary files for the calculations specified on initialisation
Note: Unlike the other setup functions overwrite defaults to True in here.
Parameters:
pdbFile - The absoloute path to the pdbFile the scan will be run on
scan - True if a scan is to be performed. Default True
stability - True if a stability calculation is to be performed
Default True. May not be possible if a scan is not performed.
modes - True of a modes calculation is to be performed. Default False
Note: No effect currently
Exceptions:
Raises Exceptions.WorkingDirectoryError if there is a problem with the setup.'''
if scan:
self.setupScan(pdbFile, mutantCollection=mutantCollection)
if stability:
self.setupUFFBAPS()
def clean(self):
'''Cleans the working directory of unnecessary files.'''
if self.environment.isRoot():
shutil.rmtree(self.uffbapsRunDirectory(), ignore_errors=True)
#Remove scan files?
def path(self):
'''Returns the path of the working directory'''
return self.directory
def uffbapsRunDirectory(self):
'''Returns the path of the subdir where genialtNav will be run'''
return os.path.join(self.path(), 'UFFBAPSRun')
class Environment:
'''Class representing the application environment.
Environment objects hide details about the number of processes in the environment from other Core objects.
That is Core objects use Environment objects in the same way if the program is running in serial or parallel.
This means all MPI code is contained in this class.
The other Core objects use Environment objects to output text, split arrays etc.'''
isInitialised = False
isParallel = False
def __init__(self, verbose=True):
'''Initialises new Environment objects'''
self.outputDirectory = os.getcwd()
self.verbose = verbose
#Do the subsequent things once only
if not Environment.isInitialised:
#See if a parallel environment is available
try:
import pypar
#If its imported we might have a parallel environment
Environment.isParallel = True
self.output('[PEAT-SA] Parallel environment available')
#Check environment size.
#If there is more than one processor there must be a parallel environment
#If there's only one then its not parallel.
if pypar.size() == 1:
self.output('[PEAT-SA] Only one processor - parallel environment disabled')
Environment.isParallel = False
else:
self.output('[PEAT-SA] Parallel environment enabled with %d processors' % pypar.size())
except BaseException:
#Importing pypar caused an exception - No parallel environment
Environment.isParallel = False
self.output('[PEAT-SA] Parallel environment disabled.\n')
Environment.isInitialised = True
def rank(self):
'''Returns the rank of the process in the environment
This is 0 if there is only one process and for the root processor'''
if Environment.isParallel:
import pypar
return pypar.rank()
else:
return 0
def isRoot(self):
'''Returns True if the processes rank is 0'''
if self.rank() == 0:
return True
else:
return False
def _divideArray(self, array):
'''Divides an array roughly equally depending on the environment size
Returns: A list with one entry for each node.
The entry is a tuple giving the start and end elements in array
that should be assigned to that node.'''
import pypar
#Divide evenly then add remainder elements to processors
maxElements = int(math.floor(len(array)/pypar.size()))
remainder = len(array) - maxElements*pypar.size()
start = 0
end = 0
divisions = []
for i in range(pypar.size()):
start = end
end = end + maxElements
if remainder != 0:
end = end + 1
remainder = remainder - 1
divisions.append((start, end))
return divisions
def splitArray(self, array):
'''Splits array between all the processes in the environment.
Each process will be returned a different section of the array to work on'''
if Environment.isParallel:
import pypar
#Split the array into sections and return the section for this processor
divisions = []
if self.isRoot():
#Root does the splitting - we send each processor the start and end index
#NOTE: pypar broadcast won't work even when setting vanilla
#It always returns message trucated error.
divisions = self._divideArray(array)
for i in range(1,pypar.size()):
pypar.send(divisions[i], i)
start = divisions[0][0]
end = divisions[0][1]
else:
indexes = pypar.receive(0)
start = indexes[0]
end = indexes[1]
return array[start:end]
else:
return array
def combineArray(self, arrayFragment):
'''Combines a set of arrayFragments from each processor into one array'''
if Environment.isParallel:
import pypar
if self.isRoot():
completeArray = arrayFragment
for i in range(1, pypar.size()):
fragment = pypar.receive(i)
completeArray.extend(fragment)
#Send the array
for i in range(1, pypar.size()):
pypar.send(completeArray, i)
else:
#Send the fragment
pypar.send(arrayFragment, 0)
#Retrieve the array
completeArray = pypar.receive(0)
else:
completeArray = arrayFragment
return completeArray
def combineDictionary(self, dictFragment):
'''Combines a set of arrayFragments from each processor into one array'''
if Environment.isParallel:
import pypar
if self.isRoot():
completeDict = dictFragment
for i in range(1, pypar.size()):
fragment = pypar.receive(i)
completeDict.update(fragment)
#Send the array
for i in range(1, pypar.size()):
pypar.send(completeDict, i)
else:
#Send the fragment
pypar.send(dictFragment, 0)
#Retrieve the array
completeDict = pypar.receive(0)
else:
completeDict = dictFragment
return completeDict
def output(self, string, stream=None, rootOnly=True):
'''Prints a string to a stream if the calling process is the root process
Parameters:
string: The string to be written
stream: The stream to write to. If None or not specified defaults to stdout.
rootOnly: If True only the root process writes the string. Default True.'''
if rootOnly is True and self.isRoot() is False:
return
if stream is None:
print string
#With verbose on flush everything immediately
if self.verbose:
sys.stdout.flush()
else:
stream.write(string)
if self.verbose:
stream.flush()
def processorName(self):
if Environment.isParallel:
import pypar
return pypar.get_processor_name()
else:
return "localhost"
def log(self, string):
'''Logs a string to a specific file for the calling process
The file is called ProcessorX.log where X is the rank of the process.
The string is appended to this file'''
from inspect import stack
filename = os.path.join(self.outputDirectory, "Processor%d.log" % self.rank())
stream = open(filename, 'a+')
stream.write(string)
stream.write(' (Logged at line %d of %s at %s)' % (stack()[1][0].f_lineno,
stack()[1][0].f_code.co_filename, datetime.datetime.now().strftime('%H:%M:%S')))
stream.write("\n")
stream.close()
def logError(self, string):
'''Logs a string to a specific error file for the calling process
The file is called ProcessorX.error where X is the rank of the process.
The string is appended to this file'''
filename = os.path.join(self.outputDirectory, "Processor%d.error", self.rank())
stream = open(filename, 'a+')
stream.write(string)
stream.close()
def wait(self, error=False):
'''This method will not return until all process in the environment have called it.
This is a wrapper around MPI_Barrier which handles the case where MPI is not available'''
from inspect import stack
if self.verbose is True:
string = '(%s) Waiting at line %d of %s' % (datetime.datetime.now().strftime('%H:%M:%S'),
stack()[1][0].f_lineno, stack()[1][0].f_code.co_filename)
self.log(string)
if Environment.isParallel:
import pypar
pypar.barrier()
#Because MPI_ABORT doesn't work in pypar if called from one process
#we need a way for process to communicate to each other if an error occurred
#during the code they executed before this barrier. We do a scatter/gather of
#the error parameter - This isn't very efficient but it's all we can do now
errors = self.combineArray([error])
if True in errors:
self.exit(1)
if self.verbose is True:
string = '(%s) Finished waiting' % (datetime.datetime.now().strftime('%H:%M:%S'))
self.log(string)
def exit(self, code):
'''This method exits the simulation.
In a parallel environment calls MPI_ABORT.
In serial, calls sys.exit().
Code is the exit code. Only used in serial processes.'''
if Environment.isParallel:
import pypar
return pypar.abort(code)
else:
return sys.exit(code)
def broadcast(self, data, process):
'''Broadcasts data from process to all other nodes'''
if Environment.isParallel:
import pypar
if self.rank() == process:
#NOTE: pypar broadcast won't work even when setting vanilla
#It always returns message trucated error.
for i in range(pypar.size()):
if i != self.rank():
pypar.send(data, i)
else:
data = pypar.receive(process)
return data
def balanceArrays(self, arrayFragment):
'''Redistributes the elements in a set of arrays equally across the nodes'''
if Environment.isParallel:
import pypar
if self.isRoot():
completeArray = arrayFragment
for i in range(1, pypar.size()):
fragment = pypar.receive(i)
completeArray.extend(fragment)
#Divide it up
divisions = self._divideArray(completeArray)
#Send the fragments
for i in range(1, pypar.size()):
start, end = divisions[i]
pypar.send(completeArray[start:end], i)
self.output('[ENV] Rebalanced array divisions %s' % divisions)
#Assign root fragment
start, end = divisions[0]
arrayFragment = completeArray[start:end]
else:
#Send the fragment
pypar.send(arrayFragment, 0)
#Retrieve the array
arrayFragment = pypar.receive(0)
else:
completeArray = arrayFragment
return arrayFragment
def loadSkew(self, numberTasks):
'''Computes the skew in the number of tasks to be processed by each node
The skew is the standard deviation of the task number across the nodes'''
if Environment.isParallel:
import pypar
if self.isRoot():
taskDistribution = [numberTasks]
for i in range(1, pypar.size()):
numberTasks = pypar.receive(i)
taskDistribution.append(numberTasks)
mean = reduce(operator.add, taskDistribution)
mean = mean/float(len(taskDistribution))
#Std. dev
stdev = 0
for el in taskDistribution:
stdev = stdev + math.pow((el - mean), 2)
skew = stdev/float(len(taskDistribution))
skew = math.sqrt(skew)
for i in range(1, pypar.size()):
pypar.send(skew, i)
else:
#Send the fragment
pypar.send(numberTasks, 0)
#Retrieve the array
skew = pypar.receive(0)
else:
skew = 0
return skew
|
|
# ------------------------------------------------------------------------
#
# Copyright 2005-2015 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# ------------------------------------------------------------------------
from plugins.contracts import ICartridgeAgentPlugin
from modules.util.log import LogFactory
from entity import *
import subprocess
import os
import mdsclient
import time
import socket
class WSO2AMStartupHandler(ICartridgeAgentPlugin):
log = LogFactory().get_log(__name__)
# class constants
CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
CONST_PORT_MAPPING_PT_HTTP_TRANSPORT = "pt-http"
CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT = "pt-https"
CONST_PROTOCOL_HTTP = "http"
CONST_PROTOCOL_HTTPS = "https"
CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
CONST_APPLICATION_ID = "APPLICATION_ID"
CONST_MB_IP = "MB_IP"
CONST_CLUSTER_ID = "CLUSTER_ID"
CONST_SERVICE_NAME = "SERVICE_NAME"
CONST_KEY_MANAGER = "KeyManager"
CONST_GATEWAY_MANAGER = "Gateway-Manager"
CONST_GATEWAY_WORKER = "Gateway-Worker"
CONST_PUBLISHER = "Publisher"
CONST_STORE = "Store"
CONST_PUBSTORE = "PubStore"
CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
CONST_WORKER = "worker"
CONST_MANAGER = "manager"
CONST_MGT = "mgt"
CONST_KEY_MANAGER_SERVICE_NAME = "wso2am-190-km"
CONST_GATEWAY_MANAGER_SERVICE_NAME = "wso2am-190-gw-manager"
CONST_GATEWAY_WORKER_SERVICE_NAME = "wso2am-190-gw-worker"
CONST_PUBLISHER_SERVICE_NAME = "wso2am-190-publisher"
CONST_STORE_SERVICE_NAME = "wso2am-190-store"
CONST_PUBLISHER_STORE_NAME = "wso2am-190-pub-store"
CONST_CONFIG_PARAM_KEYMANAGER_PORTS = 'CONFIG_PARAM_KEYMANAGER_PORTS'
CONST_CONFIG_PARAM_GATEWAY_PORTS = 'CONFIG_PARAM_GATEWAY_PORTS'
CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS = 'CONFIG_PARAM_GATEWAY_WORKER_PORTS'
CONST_KUBERNETES = "KUBERNETES"
CONST_VM = "VM"
CONST_EXTERNAL_LB_FOR_KUBERNETES = "EXTERNAL_LB_FOR_KUBERNETES"
GATEWAY_SERVICES = [CONST_GATEWAY_MANAGER_SERVICE_NAME, CONST_GATEWAY_WORKER_SERVICE_NAME]
PUB_STORE_SERVICES = [CONST_PUBLISHER_SERVICE_NAME, CONST_STORE_SERVICE_NAME]
PUB_STORE = [CONST_PUBLISHER_STORE_NAME]
# list of environment variables exported by the plugin
ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_PT_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_PT_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
ENV_CONFIG_PARAM_PROFILE = 'CONFIG_PARAM_PROFILE'
ENV_CONFIG_PARAM_LB_IP = 'CONFIG_PARAM_LB_IP'
ENV_CONFIG_PARAM_KEYMANAGER_IP = 'CONFIG_PARAM_KEYMANAGER_IP'
ENV_CONFIG_PARAM_GATEWAY_IP = 'CONFIG_PARAM_GATEWAY_IP'
ENV_CONFIG_PARAM_PUBLISHER_IP = 'CONFIG_PARAM_PUBLISHER_IP'
ENV_CONFIG_PARAM_STORE_IP = 'CONFIG_PARAM_STORE_IP'
ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
ENV_CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT = 'CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_GATEWAY_WORKER_IP = 'CONFIG_PARAM_GATEWAY_WORKER_IP'
ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT'
# This is payload parameter which enables to use an external lb when using kubernetes. Use true when using with kub.
ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES = 'CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES'
def run_plugin(self, values):
# read Port_mappings, Application_Id, MB_IP and Topology, clustering, membership_scheme from 'values'
port_mappings_str = values[WSO2AMStartupHandler.CONST_PORT_MAPPINGS].replace("'", "")
app_id = values[WSO2AMStartupHandler.CONST_APPLICATION_ID]
mb_ip = values[WSO2AMStartupHandler.CONST_MB_IP]
service_name = values[WSO2AMStartupHandler.CONST_SERVICE_NAME]
profile = os.environ.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_PROFILE)
load_balancer_ip = os.environ.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_LB_IP)
membership_scheme = values.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
WSO2AMStartupHandler.CONST_PPAAS_MEMBERSHIP_SCHEME)
clustering = values.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_CLUSTERING, 'false')
my_cluster_id = values[WSO2AMStartupHandler.CONST_CLUSTER_ID]
external_lb = values.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES, 'false')
# log above values
WSO2AMStartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
WSO2AMStartupHandler.log.info("Application ID: %s" % app_id)
WSO2AMStartupHandler.log.info("MB IP: %s" % mb_ip)
WSO2AMStartupHandler.log.info("Service Name: %s" % service_name)
WSO2AMStartupHandler.log.info("Profile: %s" % profile)
WSO2AMStartupHandler.log.info("Load Balancer IP: %s" % load_balancer_ip)
WSO2AMStartupHandler.log.info("Membership Scheme: %s" % membership_scheme)
WSO2AMStartupHandler.log.info("Clustering: %s" % clustering)
WSO2AMStartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
# export Proxy Ports as Env. variables - used in catalina-server.xml
mgt_http_proxy_port = self.read_proxy_port(port_mappings_str,
WSO2AMStartupHandler.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
WSO2AMStartupHandler.CONST_PROTOCOL_HTTP)
mgt_https_proxy_port = self.read_proxy_port(port_mappings_str,
WSO2AMStartupHandler.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
WSO2AMStartupHandler.CONST_PROTOCOL_HTTPS)
pt_http_proxy_port = self.read_proxy_port(port_mappings_str,
WSO2AMStartupHandler.CONST_PORT_MAPPING_PT_HTTP_TRANSPORT,
WSO2AMStartupHandler.CONST_PROTOCOL_HTTP)
pt_https_proxy_port = self.read_proxy_port(port_mappings_str,
WSO2AMStartupHandler.CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT,
WSO2AMStartupHandler.CONST_PROTOCOL_HTTPS)
self.export_env_var(WSO2AMStartupHandler.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
self.export_env_var(WSO2AMStartupHandler.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)
self.export_env_var(WSO2AMStartupHandler.ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT, pt_http_proxy_port)
self.export_env_var(WSO2AMStartupHandler.ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT, pt_https_proxy_port)
# set sub-domain
self.populate_sub_domains(service_name)
# export CONFIG_PARAM_MEMBERSHIP_SCHEME
self.export_env_var(WSO2AMStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)
if clustering == 'true' and membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
service_list = None
if service_name in self.GATEWAY_SERVICES:
service_list = self.GATEWAY_SERVICES
elif service_name in self.PUB_STORE_SERVICES:
service_list = self.PUB_STORE_SERVICES
elif service_name in self.PUB_STORE:
service_list = self.PUB_STORE
# set cluster ids for private-paas clustering schema in axis2.xml
self.set_cluster_ids(app_id, service_list)
# export mb_ip as Env.variable - used in jndi.properties
self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)
if profile == self.CONST_KEY_MANAGER:
# this is for key_manager profile
# remove previous data from metadata service
# add new values to meta data service - key manager ip and mgt-console port
# retrieve values from meta data service - gateway ip, gw mgt console port, pt http and https ports
# check deployment is vm, if vm update /etc/hosts with values
# export retrieve values as environment variables
# set the start command
self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
self.remove_data_from_metadata(self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, load_balancer_ip)
self.add_data_to_meta_data_service(self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS,
"Ports:" + mgt_https_proxy_port)
gateway_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
gateway_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
gateway_worker_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
gateway_worker_ports = self.get_data_from_meta_data_service(app_id,
self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
environment_type = self.find_environment_type(external_lb, service_name, app_id)
if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
gateway_host = gateway_ip
gateway_worker_host = gateway_worker_ip
else:
gateway_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
gateway_worker_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
gateway_host = gateway_host_name
gateway_worker_host = gateway_worker_host_name
self.update_hosts_file(gateway_ip, gateway_host_name)
self.update_hosts_file(gateway_worker_ip, gateway_worker_host_name)
member_ip = socket.gethostbyname(socket.gethostname())
self.set_host_name(app_id, service_name, member_ip)
self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
self.set_gateway_ports(gateway_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_worker_host)
self.set_gateway_worker_ports(gateway_worker_ports)
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-key-manager start"
elif profile == self.CONST_GATEWAY_MANAGER:
# this is for gateway manager profile
# remove previous data from metadata service
# add new values to meta data service - gateway ip, mgt-console port, pt http and https ports
# retrieve values from meta data service - keymanager ip and mgt console port
# check deployment is vm, if vm update /etc/hosts with values
# export retrieve values as environment variables
# export hostname for gateway-manager
# set the start command
self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_GATEWAY_IP)
self.remove_data_from_metadata(self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_GATEWAY_IP, load_balancer_ip)
port_list = "Ports:" + mgt_https_proxy_port
self.add_data_to_meta_data_service(self.CONST_CONFIG_PARAM_GATEWAY_PORTS, port_list)
keymanager_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
keymanager_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
environment_type = self.find_environment_type(external_lb, service_name, app_id)
if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
keymanager_host = keymanager_ip
else:
keymanager_host_name = self.get_host_name_from_cluster(self.CONST_KEY_MANAGER_SERVICE_NAME, app_id)
keymanager_host = keymanager_host_name
self.update_hosts_file(keymanager_ip, keymanager_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, keymanager_host)
km_port = self.set_keymanager_ports(keymanager_ports)
member_ip = socket.gethostbyname(socket.gethostname())
self.set_host_names_for_gw(app_id, member_ip)
self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)
set_system_properties = "-Dkm.ip=" + keymanager_ip + " -Dkm.port=" + km_port
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=gateway-manager " + set_system_properties + " start"
elif profile == self.CONST_GATEWAY_WORKER:
# this is for gateway worker profile
# remove previous data from metadata service
# retrieve values from meta data service - keymanager ip and mgt console port
# export retrieve values as environment variables
# check deployment is vm, if vm update /etc/hosts with values
# export hostname for gateway-worker
# set the start command
self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
self.remove_data_from_metadata(self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, load_balancer_ip)
port_list = "Ports:" + pt_http_proxy_port + ":" + pt_https_proxy_port
self.add_data_to_meta_data_service(self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS, port_list)
keymanager_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
keymanager_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
environment_type = self.find_environment_type(external_lb, service_name, app_id)
if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
keymanager_host = keymanager_ip
else:
keymanager_host_name = self.get_host_name_from_cluster(self.CONST_KEY_MANAGER_SERVICE_NAME, app_id)
keymanager_host = keymanager_host_name
self.update_hosts_file(keymanager_ip, keymanager_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, keymanager_host)
km_port = self.set_keymanager_ports(keymanager_ports)
member_ip = socket.gethostbyname(socket.gethostname())
self.set_host_names_for_gw(app_id, member_ip)
self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)
set_system_properties = "-Dkm.ip=" + keymanager_ip + " -Dkm.port=" + km_port
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=gateway-worker " + set_system_properties + " start"
elif profile == self.CONST_PUBLISHER:
# this is for publisher profile
# remove previous data from metadata service
# add new values to meta data service - publisher ip
# retrieve values from meta data service - store ip, km ip and mgt console port, gw ip, mgt console port, pt http and https ports
# check deployment is vm, if vm update /etc/hosts with values
# export retrieve values as environment variables
# export hostname for publisher
# set the start command
self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_PUBLISHER_IP)
self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_PUBLISHER_IP, load_balancer_ip)
store_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_STORE_IP)
keymanager_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
keymanager_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
gateway_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
gateway_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
gateway_worker_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
gateway_worker_ports = self.get_data_from_meta_data_service(app_id,
self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
environment_type = self.find_environment_type(external_lb, service_name, app_id)
if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
keymanager_host = keymanager_ip
gateway_host = gateway_ip
gateway_worker_host = gateway_worker_ip
store_host = store_ip
else:
keymanager_host_name = self.get_host_name_from_cluster(self.CONST_KEY_MANAGER_SERVICE_NAME, app_id)
gateway_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
gateway_worker_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
store_host_name = self.get_host_name_from_cluster(self.CONST_STORE_SERVICE_NAME, app_id)
keymanager_host = keymanager_host_name
gateway_host = gateway_host_name
gateway_worker_host = gateway_worker_host_name
store_host = store_host_name
self.update_hosts_file(keymanager_ip, keymanager_host_name)
self.update_hosts_file(gateway_ip, gateway_host_name)
self.update_hosts_file(gateway_worker_ip, gateway_worker_host_name)
self.update_hosts_file(store_ip, store_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_STORE_IP, store_host)
self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, keymanager_host)
self.set_keymanager_ports(keymanager_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
self.set_gateway_ports(gateway_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_worker_host)
self.set_gateway_worker_ports(gateway_worker_ports)
member_ip = socket.gethostbyname(socket.gethostname())
self.set_host_name(app_id, service_name, member_ip)
self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-publisher start"
elif profile == self.CONST_STORE:
# this is for store profile
# remove previous data from metadata service
# add new values to meta data service - store ip
# retrieve values from meta data service - publisher ip, km ip and mgt console port, gw ip, mgt console port, pt http and https ports
# check deployment is vm, if vm update /etc/hosts with values
# export retrieve values as environment variables
# export hostname for store
# set the start command
self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_STORE_IP)
self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_STORE_IP, load_balancer_ip)
publisher_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_PUBLISHER_IP)
keymanager_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
keymanager_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
gateway_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
gateway_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
gateway_worker_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
gateway_worker_ports = self.get_data_from_meta_data_service(app_id,
self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
environment_type = self.find_environment_type(external_lb, service_name, app_id)
if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
keymanager_host = keymanager_ip
gateway_host = gateway_ip
gateway_worker_host = gateway_worker_ip
publisher_host = publisher_ip
else:
keymanager_host_name = self.get_host_name_from_cluster(self.CONST_KEY_MANAGER_SERVICE_NAME, app_id)
gateway_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
gateway_worker_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
publisher_host_name = self.get_host_name_from_cluster(self.CONST_PUBLISHER_SERVICE_NAME, app_id)
keymanager_host = keymanager_host_name
gateway_host = gateway_host_name
gateway_worker_host = gateway_worker_host_name
publisher_host = publisher_host_name
self.update_hosts_file(keymanager_ip, keymanager_host_name)
self.update_hosts_file(gateway_ip, gateway_host_name)
self.update_hosts_file(gateway_worker_ip, gateway_worker_host_name)
self.update_hosts_file(publisher_ip, publisher_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_STORE_IP, publisher_host)
self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, keymanager_host)
self.set_keymanager_ports(keymanager_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
self.set_gateway_ports(gateway_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_worker_host)
self.set_gateway_worker_ports(gateway_worker_ports)
member_ip = socket.gethostbyname(socket.gethostname())
self.set_host_name(app_id, service_name, member_ip)
self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-store start"
elif profile == self.CONST_PUBSTORE:
# Publisher and Store runs on a same node (PubStore profile)
# retrieve values from meta data service - store ip, km ip and mgt console port, gw ip, mgt console port, pt http and https ports
# check deployment is vm, if vm update /etc/hosts with values
# export retrieve values as environment variables
# export hostname for pubStore
# set the start command
keymanager_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
keymanager_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
gateway_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
gateway_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
gateway_worker_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
gateway_worker_ports = self.get_data_from_meta_data_service(app_id,
self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
environment_type = self.find_environment_type(external_lb, service_name, app_id)
if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
keymanager_host = keymanager_ip
gateway_host = gateway_ip
gateway_worker_host = gateway_worker_ip
else:
keymanager_host_name = self.get_host_name_from_cluster(self.CONST_KEY_MANAGER_SERVICE_NAME, app_id)
gateway_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
gateway_worker_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
keymanager_host = keymanager_host_name
gateway_host = gateway_host_name
gateway_worker_host = gateway_worker_host_name
self.update_hosts_file(keymanager_ip, keymanager_host_name)
self.update_hosts_file(gateway_ip, gateway_host_name)
self.update_hosts_file(gateway_worker_ip, gateway_worker_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, keymanager_host)
self.set_keymanager_ports(keymanager_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
self.set_gateway_ports(gateway_ports)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_worker_host)
self.set_gateway_worker_ports(gateway_worker_ports)
member_ip = socket.gethostbyname(socket.gethostname())
self.set_host_name(app_id, service_name, member_ip)
self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
else:
# This is the default profile
# for kubernetes, load balancer ip should specify and no need for vm
# expose gateway ip, pt http and https ports (This is to access from external)
# set start command
if load_balancer_ip is not None:
gateway_ip = load_balancer_ip
gateway_pt_http_pp = pt_http_proxy_port
gateway_pt_https_pp = pt_https_proxy_port
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_ip)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT, gateway_pt_http_pp)
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT, gateway_pt_https_pp)
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
# start configurator
WSO2AMStartupHandler.log.info("Configuring WSO2 API Manager...")
config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
env_var = os.environ.copy()
p = subprocess.Popen(config_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2AMStartupHandler.log.info("WSO2 API Manager configured successfully")
# start server
WSO2AMStartupHandler.log.info("Starting WSO2 API Manager...")
env_var = os.environ.copy()
p = subprocess.Popen(start_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2AMStartupHandler.log.info("WSO2 API Manager started successfully")
def set_keymanager_ports(self, keymanager_ports):
"""
Expose keymanager ports
:return: void
"""
keymanager_mgt_https_pp = None
if keymanager_ports is not None:
keymanager_ports_array = keymanager_ports.split(":")
if keymanager_ports_array:
keymanager_mgt_https_pp = keymanager_ports_array[1]
self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT, str(keymanager_mgt_https_pp))
return keymanager_mgt_https_pp
def set_gateway_ports(self, gateway_ports):
"""
Expose gateway ports
Input- Ports:30003
:return: void
"""
gateway_mgt_https_pp = None
if gateway_ports is not None:
gateway_ports_array = gateway_ports.split(":")
if gateway_ports_array:
gateway_mgt_https_pp = gateway_ports_array[1]
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT, str(gateway_mgt_https_pp))
def set_gateway_worker_ports(self, gateway_worker_ports):
"""
Expose gateway worker ports
:return: void
"""
gateway_pt_http_pp = None
gateway_pt_https_pp = None
if gateway_worker_ports is not None:
gateway_wk_ports_array = gateway_worker_ports.split(":")
if gateway_wk_ports_array:
gateway_pt_http_pp = gateway_wk_ports_array[1]
gateway_pt_https_pp = gateway_wk_ports_array[2]
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT, str(gateway_pt_http_pp))
self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT, str(gateway_pt_https_pp))
def populate_sub_domains(self, service_name):
"""
set sub domain based on the service name
for manager, sub domain as mgt
for worker, sub domain as worker
:return: void
"""
sub_domain = None
if service_name.endswith(self.CONST_MANAGER):
sub_domain = self.CONST_MGT
elif service_name.endswith(self.CONST_WORKER):
sub_domain = self.CONST_WORKER
if sub_domain is not None:
self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)
def read_proxy_port(self, port_mappings_str, port_mapping_name, port_mapping_protocol):
"""
returns proxy port of the requested port mapping
:return: void
"""
# port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
# NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
# NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:8280|TYPE:ClientIP;
# NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:8243|TYPE:NodePort
service_proxy_port = None
if port_mappings_str is not None:
port_mappings_array = port_mappings_str.split(";")
if port_mappings_array:
for port_mapping in port_mappings_array:
name_value_array = port_mapping.split("|")
name = name_value_array[0].split(":")[1]
protocol = name_value_array[1].split(":")[1]
proxy_port = name_value_array[3].split(":")[1]
# If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
if proxy_port == '0':
proxy_port = name_value_array[2].split(":")[1]
if name == port_mapping_name and protocol == port_mapping_protocol:
service_proxy_port = proxy_port
return service_proxy_port
def get_data_from_meta_data_service(self, app_id, receive_data):
"""
Get data from meta data service
:return: received data
"""
mds_response = None
while mds_response is None:
WSO2AMStartupHandler.log.info(
"Waiting for " + receive_data + " to be available from metadata service for app ID: %s" % app_id)
time.sleep(1)
mds_response = mdsclient.get(app=True)
if mds_response is not None and mds_response.properties.get(receive_data) is None:
mds_response = None
return mds_response.properties[receive_data]
def add_data_to_meta_data_service(self, key, value):
"""
add data to meta data service
:return: void
"""
mdsclient.MDSPutRequest()
data = {"key": key, "values": [value]}
mdsclient.put(data, app=True)
def remove_data_from_metadata(self, key):
"""
remove data from meta data service
:return: void
"""
mds_response = mdsclient.get(app=True)
if mds_response is not None and mds_response.properties.get(key) is not None:
read_data = mds_response.properties[key]
check_str = isinstance(read_data, (str, unicode))
if check_str == True:
mdsclient.delete_property_value(key, read_data)
else:
check_int = isinstance(read_data, int)
if check_int == True:
mdsclient.delete_property_value(key, read_data)
else:
for entry in read_data:
mdsclient.delete_property_value(key, entry)
def set_cluster_ids(self, app_id, service_list):
"""
Set clusterIds of services read from topology for worker manager instances
else use own clusterId
:return: void
"""
cluster_ids = []
for service_name in service_list:
cluster_id_of_service = self.read_cluster_id_of_service(service_name, app_id)
if cluster_id_of_service is not None:
cluster_ids.append(cluster_id_of_service)
# If clusterIds are available, set them as environment variables
if cluster_ids:
cluster_ids_string = ",".join(cluster_ids)
self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)
def export_env_var(self, variable, value):
"""
Export value as an environment variable
:return: void
"""
if value is not None:
os.environ[variable] = value
WSO2AMStartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
else:
WSO2AMStartupHandler.log.warn("Could not export environment variable %s " % variable)
def read_cluster_id_of_service(self, service_name, app_id):
"""
Get the cluster_id of a service read from topology
:return: cluster_id
"""
cluster_id = None
clusters = self.get_clusters_from_topology(service_name)
if clusters is not None:
for cluster in clusters:
if cluster.app_id == app_id:
cluster_id = cluster.cluster_id
return cluster_id
def update_hosts_file(self, ip_address, host_name):
"""
Updates /etc/hosts file with clustering hostnames
:return: void
"""
config_command = "echo %s %s >> /etc/hosts" % (ip_address, host_name)
env_var = os.environ.copy()
p = subprocess.Popen(config_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2AMStartupHandler.log.info(
"Successfully updated [ip_address] %s & [hostname] %s in etc/hosts" % (ip_address, host_name))
def set_host_names_for_gw(self, app_id, member_ip):
"""
Set hostnames of services read from topology for worker manager instances
exports MgtHostName and HostName
:return: void
"""
for service_name in self.GATEWAY_SERVICES:
if service_name.endswith(self.CONST_MANAGER):
mgt_host_name = self.get_host_name_from_cluster(service_name, app_id)
elif service_name.endswith(self.CONST_WORKER):
host_name = self.get_host_name_from_cluster(service_name, app_id)
self.update_hosts_file(member_ip, host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
def set_host_name(self, app_id, service_name, member_ip):
"""
Set hostname of service read from topology for any service name
export hostname and update the /etc/hosts
:return: void
"""
host_name = self.get_host_name_from_cluster(service_name, app_id)
self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
self.update_hosts_file(member_ip, host_name)
def get_host_name_from_cluster(self, service_name, app_id):
"""
Get hostname for a service
:return: hostname
"""
clusters = self.get_clusters_from_topology(service_name)
if clusters is not None:
for cluster in clusters:
if cluster.app_id == app_id:
hostname = cluster.hostnames[0]
return hostname
def check_for_kubernetes_cluster(self, service_name, app_id):
"""
Check the deployment is kubernetes
:return: True
"""
isKubernetes = False
clusters = self.get_clusters_from_topology(service_name)
if clusters is not None:
for cluster in clusters:
if cluster.app_id == app_id:
isKubernetes = cluster.is_kubernetes_cluster
return isKubernetes
def get_clusters_from_topology(self, service_name):
"""
get clusters from topology
:return: clusters
"""
clusters = None
topology = TopologyContext().get_topology()
if topology is not None:
if topology.service_exists(service_name):
service = topology.get_service(service_name)
clusters = service.get_clusters()
else:
WSO2AMStartupHandler.log.error("[Service] %s is not available in topology" % service_name)
return clusters
def find_environment_type(self, external_lb, service_name, app_id):
"""
Check for vm or kubernetes
:return: Vm or Kubernetes
"""
if external_lb == 'true':
return WSO2AMStartupHandler.CONST_EXTERNAL_LB_FOR_KUBERNETES
else:
isKubernetes = self.check_for_kubernetes_cluster(service_name, app_id)
if isKubernetes:
return WSO2AMStartupHandler.CONST_KUBERNETES
else:
return WSO2AMStartupHandler.CONST_VM
|
|
from typing import Union, Tuple, Any, Optional
from typing_extensions import Literal
import math
import eagerpy as ep
from ..models import Model
from ..criteria import Misclassification, TargetedMisclassification
from ..distances import l1
from ..devutils import atleast_kd, flatten
from .base import MinimizationAttack
from .base import get_criterion
from .base import T
from .base import raise_if_kwargs
class EADAttack(MinimizationAttack):
"""Implementation of the EAD Attack with EN Decision Rule. [#Chen18]_
Args:
binary_search_steps : Number of steps to perform in the binary search
over the const c.
steps : Number of optimization steps within each binary search step.
initial_stepsize : Initial stepsize to update the examples.
confidence : Confidence required for an example to be marked as adversarial.
Controls the gap between example and decision boundary.
initial_const : Initial value of the const c with which the binary search starts.
regularization : Controls the L1 regularization.
decision_rule : Rule according to which the best adversarial examples are selected.
They either minimize the L1 or ElasticNet distance.
abort_early : Stop inner search as soons as an adversarial example has been found.
Does not affect the binary search over the const c.
References:
.. [#Chen18] Pin-Yu Chen, Yash Sharma, Huan Zhang, Jinfeng Yi, Cho-Jui Hsieh,
"EAD: Elastic-Net Attacks to Deep Neural Networks via Adversarial Examples",
https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewPaper/16893
"""
distance = l1
def __init__(
self,
binary_search_steps: int = 9,
steps: int = 10000,
initial_stepsize: float = 1e-2,
confidence: float = 0.0,
initial_const: float = 1e-3,
regularization: float = 1e-2,
decision_rule: Union[Literal["EN"], Literal["L1"]] = "EN",
abort_early: bool = True,
):
if decision_rule not in ("EN", "L1"):
raise ValueError("invalid decision rule")
self.binary_search_steps = binary_search_steps
self.steps = steps
self.confidence = confidence
self.initial_stepsize = initial_stepsize
self.regularization = regularization
self.initial_const = initial_const
self.abort_early = abort_early
self.decision_rule = decision_rule
def run(
self,
model: Model,
inputs: T,
criterion: Union[Misclassification, TargetedMisclassification, T],
*,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
N = len(x)
if isinstance(criterion_, Misclassification):
targeted = False
classes = criterion_.labels
change_classes_logits = self.confidence
elif isinstance(criterion_, TargetedMisclassification):
targeted = True
classes = criterion_.target_classes
change_classes_logits = -self.confidence
else:
raise ValueError("unsupported criterion")
def is_adversarial(perturbed: ep.Tensor, logits: ep.Tensor) -> ep.Tensor:
if change_classes_logits != 0:
logits += ep.onehot_like(logits, classes, value=change_classes_logits)
return criterion_(perturbed, logits)
if classes.shape != (N,):
name = "target_classes" if targeted else "labels"
raise ValueError(
f"expected {name} to have shape ({N},), got {classes.shape}"
)
min_, max_ = model.bounds
rows = range(N)
def loss_fun(y_k: ep.Tensor, consts: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]:
assert y_k.shape == x.shape
assert consts.shape == (N,)
logits = model(y_k)
if targeted:
c_minimize = _best_other_classes(logits, classes)
c_maximize = classes
else:
c_minimize = classes
c_maximize = _best_other_classes(logits, classes)
is_adv_loss = logits[rows, c_minimize] - logits[rows, c_maximize]
assert is_adv_loss.shape == (N,)
is_adv_loss = is_adv_loss + self.confidence
is_adv_loss = ep.maximum(0, is_adv_loss)
is_adv_loss = is_adv_loss * consts
squared_norms = flatten(y_k - x).square().sum(axis=-1)
loss = is_adv_loss.sum() + squared_norms.sum()
return loss, logits
loss_aux_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=True)
consts = self.initial_const * ep.ones(x, (N,))
lower_bounds = ep.zeros(x, (N,))
upper_bounds = ep.inf * ep.ones(x, (N,))
best_advs = ep.zeros_like(x)
best_advs_norms = ep.ones(x, (N,)) * ep.inf
# the binary search searches for the smallest consts that produce adversarials
for binary_search_step in range(self.binary_search_steps):
if (
binary_search_step == self.binary_search_steps - 1
and self.binary_search_steps >= 10
):
# in the last iteration, repeat the search once
consts = ep.minimum(upper_bounds, 1e10)
# create a new optimizer find the delta that minimizes the loss
x_k = x
y_k = x
found_advs = ep.full(
x, (N,), value=False
).bool() # found adv with the current consts
loss_at_previous_check = ep.inf
for iteration in range(self.steps):
# square-root learning rate decay
stepsize = self.initial_stepsize * (1.0 - iteration / self.steps) ** 0.5
loss, logits, gradient = loss_aux_and_grad(y_k, consts)
x_k_old = x_k
x_k = _project_shrinkage_thresholding(
y_k - stepsize * gradient, x, self.regularization, min_, max_
)
y_k = x_k + iteration / (iteration + 3.0) * (x_k - x_k_old)
if self.abort_early and iteration % (math.ceil(self.steps / 10)) == 0:
# after each tenth of the iterations, check progress
if not loss.item() <= 0.9999 * loss_at_previous_check:
break # stop optimization if there has been no progress
loss_at_previous_check = loss.item()
found_advs_iter = is_adversarial(x_k, model(x_k))
best_advs, best_advs_norms = _apply_decision_rule(
self.decision_rule,
self.regularization,
best_advs,
best_advs_norms,
x_k,
x,
found_advs_iter,
)
found_advs = ep.logical_or(found_advs, found_advs_iter)
upper_bounds = ep.where(found_advs, consts, upper_bounds)
lower_bounds = ep.where(found_advs, lower_bounds, consts)
consts_exponential_search = consts * 10
consts_binary_search = (lower_bounds + upper_bounds) / 2
consts = ep.where(
ep.isinf(upper_bounds), consts_exponential_search, consts_binary_search
)
return restore_type(best_advs)
def _best_other_classes(logits: ep.Tensor, exclude: ep.Tensor) -> ep.Tensor:
other_logits = logits - ep.onehot_like(logits, exclude, value=ep.inf)
return other_logits.argmax(axis=-1)
def _apply_decision_rule(
decision_rule: Union[Literal["EN"], Literal["L1"]],
beta: float,
best_advs: ep.Tensor,
best_advs_norms: ep.Tensor,
x_k: ep.Tensor,
x: ep.Tensor,
found_advs: ep.Tensor,
) -> Tuple[ep.Tensor, ep.Tensor]:
if decision_rule == "EN":
norms = beta * flatten(x_k - x).abs().sum(axis=-1) + flatten(
x_k - x
).square().sum(axis=-1)
else:
# decision rule = L1
norms = flatten(x_k - x).abs().sum(axis=-1)
new_best = ep.logical_and(norms < best_advs_norms, found_advs)
new_best_kd = atleast_kd(new_best, best_advs.ndim)
best_advs = ep.where(new_best_kd, x_k, best_advs)
best_advs_norms = ep.where(new_best, norms, best_advs_norms)
return best_advs, best_advs_norms
def _project_shrinkage_thresholding(
z: ep.Tensor, x0: ep.Tensor, regularization: float, min_: float, max_: float
) -> ep.Tensor:
"""Performs the element-wise projected shrinkage-thresholding
operation"""
upper_mask = z - x0 > regularization
lower_mask = z - x0 < -regularization
projection = ep.where(upper_mask, ep.minimum(z - regularization, max_), x0)
projection = ep.where(lower_mask, ep.maximum(z + regularization, min_), projection)
return projection
|
|
"""
Renders the command line on the console.
(Redraws parts of the input line that were changed.)
"""
from __future__ import unicode_literals
from prompt_toolkit.filters import to_cli_filter
from prompt_toolkit.layout.mouse_handlers import MouseHandlers
from prompt_toolkit.layout.screen import Point, Screen, WritePosition
from prompt_toolkit.output import Output
from prompt_toolkit.styles import Style
from prompt_toolkit.token import Token
from prompt_toolkit.utils import is_windows
from six.moves import range
__all__ = (
'Renderer',
'print_tokens',
)
def _output_screen_diff(output, screen, current_pos, previous_screen=None, last_token=None,
is_done=False, use_alternate_screen=False, attrs_for_token=None, size=None,
previous_width=0): # XXX: drop is_done
"""
Render the diff between this screen and the previous screen.
This takes two `Screen` instances. The one that represents the output like
it was during the last rendering and one that represents the current
output raster. Looking at these two `Screen` instances, this function will
render the difference by calling the appropriate methods of the `Output`
object that only paint the changes to the terminal.
This is some performance-critical code which is heavily optimized.
Don't change things without profiling first.
:param current_pos: Current cursor position.
:param last_token: `Token` instance that represents the output attributes of
the last drawn character. (Color/attributes.)
:param attrs_for_token: :class:`._TokenToAttrsCache` instance.
:param width: The width of the terminal.
:param prevous_width: The width of the terminal during the last rendering.
"""
width, height = size.columns, size.rows
#: Remember the last printed character.
last_token = [last_token] # nonlocal
#: Variable for capturing the output.
write = output.write
write_raw = output.write_raw
# Create locals for the most used output methods.
# (Save expensive attribute lookups.)
_output_set_attributes = output.set_attributes
_output_reset_attributes = output.reset_attributes
_output_cursor_forward = output.cursor_forward
_output_cursor_up = output.cursor_up
_output_cursor_backward = output.cursor_backward
# Hide cursor before rendering. (Avoid flickering.)
output.hide_cursor()
def reset_attributes():
" Wrapper around Output.reset_attributes. "
_output_reset_attributes()
last_token[0] = None # Forget last char after resetting attributes.
def move_cursor(new):
" Move cursor to this `new` point. Returns the given Point. "
current_x, current_y = current_pos.x, current_pos.y
if new.y > current_y:
# Use newlines instead of CURSOR_DOWN, because this meight add new lines.
# CURSOR_DOWN will never create new lines at the bottom.
# Also reset attributes, otherwise the newline could draw a
# background color.
reset_attributes()
write('\r\n' * (new.y - current_y))
current_x = 0
_output_cursor_forward(new.x)
return new
elif new.y < current_y:
_output_cursor_up(current_y - new.y)
if current_x >= width - 1:
write('\r')
_output_cursor_forward(new.x)
elif new.x < current_x or current_x >= width - 1:
_output_cursor_backward(current_x - new.x)
elif new.x > current_x:
_output_cursor_forward(new.x - current_x)
return new
def output_char(char):
"""
Write the output of this character.
"""
# If the last printed character has the same token, it also has the
# same style, so we don't output it.
the_last_token = last_token[0]
if the_last_token and the_last_token == char.token:
write(char.char)
else:
_output_set_attributes(attrs_for_token[char.token])
write(char.char)
last_token[0] = char.token
# Render for the first time: reset styling.
if not previous_screen:
reset_attributes()
# Disable autowrap. (When entering a the alternate screen, or anytime when
# we have a prompt. - In the case of a REPL, like IPython, people can have
# background threads, and it's hard for debugging if their output is not
# wrapped.)
if not previous_screen or not use_alternate_screen:
output.disable_autowrap()
# When the previous screen has a different size, redraw everything anyway.
# Also when we are done. (We meight take up less rows, so clearing is important.)
if is_done or not previous_screen or previous_width != width: # XXX: also consider height??
current_pos = move_cursor(Point(0, 0))
reset_attributes()
output.erase_down()
previous_screen = Screen()
# Get height of the screen.
# (height changes as we loop over data_buffer, so remember the current value.)
# (Also make sure to clip the height to the size of the output.)
current_height = min(screen.height, height)
# Loop over the rows.
row_count = min(max(screen.height, previous_screen.height), height)
c = 0 # Column counter.
for y in range(row_count):
new_row = screen.data_buffer[y]
previous_row = previous_screen.data_buffer[y]
zero_width_escapes_row = screen.zero_width_escapes[y]
new_max_line_len = min(width - 1, max(new_row.keys()) if new_row else 0)
previous_max_line_len = min(width - 1, max(previous_row.keys()) if previous_row else 0)
# Loop over the columns.
c = 0
while c < new_max_line_len + 1:
new_char = new_row[c]
old_char = previous_row[c]
char_width = (new_char.width or 1)
# When the old and new character at this position are different,
# draw the output. (Because of the performance, we don't call
# `Char.__ne__`, but inline the same expression.)
if new_char.char != old_char.char or new_char.token != old_char.token:
current_pos = move_cursor(Point(y=y, x=c))
# Send injected escape sequences to output.
if c in zero_width_escapes_row:
write_raw(zero_width_escapes_row[c])
output_char(new_char)
current_pos = current_pos._replace(x=current_pos.x + char_width)
c += char_width
# If the new line is shorter, trim it.
if previous_screen and new_max_line_len < previous_max_line_len:
current_pos = move_cursor(Point(y=y, x=new_max_line_len+1))
reset_attributes()
output.erase_end_of_line()
# Correctly reserve vertical space as required by the layout.
# When this is a new screen (drawn for the first time), or for some reason
# higher than the previous one. Move the cursor once to the bottom of the
# output. That way, we're sure that the terminal scrolls up, even when the
# lower lines of the canvas just contain whitespace.
# The most obvious reason that we actually want this behaviour is the avoid
# the artifact of the input scrolling when the completion menu is shown.
# (If the scrolling is actually wanted, the layout can still be build in a
# way to behave that way by setting a dynamic height.)
if current_height > previous_screen.height:
current_pos = move_cursor(Point(y=current_height - 1, x=0))
# Move cursor:
if is_done:
current_pos = move_cursor(Point(y=current_height, x=0))
output.erase_down()
else:
current_pos = move_cursor(screen.cursor_position)
if is_done or not use_alternate_screen:
output.enable_autowrap()
# Always reset the color attributes. This is important because a background
# thread could print data to stdout and we want that to be displayed in the
# default colors. (Also, if a background color has been set, many terminals
# give weird artifacs on resize events.)
reset_attributes()
if screen.show_cursor or is_done:
output.show_cursor()
return current_pos, last_token[0]
class HeightIsUnknownError(Exception):
" Information unavailable. Did not yet receive the CPR response. "
class _TokenToAttrsCache(dict):
"""
A cache structure that maps Pygments Tokens to :class:`.Attr`.
(This is an important speed up.)
"""
def __init__(self, get_style_for_token):
self.get_style_for_token = get_style_for_token
def __missing__(self, token):
try:
result = self.get_style_for_token(token)
except KeyError:
result = None
self[token] = result
return result
class Renderer(object):
"""
Typical usage:
::
output = Vt100_Output.from_pty(sys.stdout)
r = Renderer(style, output)
r.render(cli, layout=...)
"""
def __init__(self, style, output, use_alternate_screen=False, mouse_support=False):
assert isinstance(style, Style)
assert isinstance(output, Output)
self.style = style
self.output = output
self.use_alternate_screen = use_alternate_screen
self.mouse_support = to_cli_filter(mouse_support)
self._in_alternate_screen = False
self._mouse_support_enabled = False
self._bracketed_paste_enabled = False
# Waiting for CPR flag. True when we send the request, but didn't got a
# response.
self.waiting_for_cpr = False
self.reset(_scroll=True)
def reset(self, _scroll=False, leave_alternate_screen=True):
# Reset position
self._cursor_pos = Point(x=0, y=0)
# Remember the last screen instance between renderers. This way,
# we can create a `diff` between two screens and only output the
# difference. It's also to remember the last height. (To show for
# instance a toolbar at the bottom position.)
self._last_screen = None
self._last_size = None
self._last_token = None
# When the style hash changes, we have to do a full redraw as well as
# clear the `_attrs_for_token` dictionary.
self._last_style_hash = None
self._attrs_for_token = None
# Default MouseHandlers. (Just empty.)
self.mouse_handlers = MouseHandlers()
# Remember the last title. Only set the title when it changes.
self._last_title = None
#: Space from the top of the layout, until the bottom of the terminal.
#: We don't know this until a `report_absolute_cursor_row` call.
self._min_available_height = 0
# In case of Windown, also make sure to scroll to the current cursor
# position. (Only when rendering the first time.)
if is_windows() and _scroll:
self.output.scroll_buffer_to_prompt()
# Quit alternate screen.
if self._in_alternate_screen and leave_alternate_screen:
self.output.quit_alternate_screen()
self._in_alternate_screen = False
# Disable mouse support.
if self._mouse_support_enabled:
self.output.disable_mouse_support()
self._mouse_support_enabled = False
# Disable bracketed paste.
if self._bracketed_paste_enabled:
self.output.disable_bracketed_paste()
self._bracketed_paste_enabled = False
# Flush output. `disable_mouse_support` needs to write to stdout.
self.output.flush()
@property
def height_is_known(self):
"""
True when the height from the cursor until the bottom of the terminal
is known. (It's often nicer to draw bottom toolbars only if the height
is known, in order to avoid flickering when the CPR response arrives.)
"""
return self.use_alternate_screen or self._min_available_height > 0 or \
is_windows() # On Windows, we don't have to wait for a CPR.
@property
def rows_above_layout(self):
"""
Return the number of rows visible in the terminal above the layout.
"""
if self._in_alternate_screen:
return 0
elif self._min_available_height > 0:
total_rows = self.output.get_size().rows
last_screen_height = self._last_screen.height if self._last_screen else 0
return total_rows - max(self._min_available_height, last_screen_height)
else:
raise HeightIsUnknownError('Rows above layout is unknown.')
def request_absolute_cursor_position(self):
"""
Get current cursor position.
For vt100: Do CPR request. (answer will arrive later.)
For win32: Do API call. (Answer comes immediately.)
"""
# Only do this request when the cursor is at the top row. (after a
# clear or reset). We will rely on that in `report_absolute_cursor_row`.
assert self._cursor_pos.y == 0
# For Win32, we have an API call to get the number of rows below the
# cursor.
if is_windows():
self._min_available_height = self.output.get_rows_below_cursor_position()
else:
if self.use_alternate_screen:
self._min_available_height = self.output.get_size().rows
else:
# Asks for a cursor position report (CPR).
self.waiting_for_cpr = True
self.output.ask_for_cpr()
def report_absolute_cursor_row(self, row):
"""
To be called when we know the absolute cursor position.
(As an answer of a "Cursor Position Request" response.)
"""
# Calculate the amount of rows from the cursor position until the
# bottom of the terminal.
total_rows = self.output.get_size().rows
rows_below_cursor = total_rows - row + 1
# Set the
self._min_available_height = rows_below_cursor
self.waiting_for_cpr = False
def render(self, cli, layout, is_done=False):
"""
Render the current interface to the output.
:param is_done: When True, put the cursor at the end of the interface. We
won't print any changes to this part.
"""
output = self.output
# Enter alternate screen.
if self.use_alternate_screen and not self._in_alternate_screen:
self._in_alternate_screen = True
output.enter_alternate_screen()
# Enable bracketed paste.
if not self._bracketed_paste_enabled:
self.output.enable_bracketed_paste()
self._bracketed_paste_enabled = True
# Enable/disable mouse support.
needs_mouse_support = self.mouse_support(cli)
if needs_mouse_support and not self._mouse_support_enabled:
output.enable_mouse_support()
self._mouse_support_enabled = True
elif not needs_mouse_support and self._mouse_support_enabled:
output.disable_mouse_support()
self._mouse_support_enabled = False
# Create screen and write layout to it.
size = output.get_size()
screen = Screen()
screen.show_cursor = False # Hide cursor by default, unless one of the
# containers decides to display it.
mouse_handlers = MouseHandlers()
if is_done:
height = 0 # When we are done, we don't necessary want to fill up until the bottom.
else:
height = self._last_screen.height if self._last_screen else 0
height = max(self._min_available_height, height)
# When te size changes, don't consider the previous screen.
if self._last_size != size:
self._last_screen = None
# When we render using another style, do a full repaint. (Forget about
# the previous rendered screen.)
# (But note that we still use _last_screen to calculate the height.)
if self.style.invalidation_hash() != self._last_style_hash:
self._last_screen = None
self._attrs_for_token = None
if self._attrs_for_token is None:
self._attrs_for_token = _TokenToAttrsCache(self.style.get_attrs_for_token)
self._last_style_hash = self.style.invalidation_hash()
layout.write_to_screen(cli, screen, mouse_handlers, WritePosition(
xpos=0,
ypos=0,
width=size.columns,
height=(size.rows if self.use_alternate_screen else height),
extended_height=size.rows,
))
# When grayed. Replace all tokens in the new screen.
if cli.is_aborting or cli.is_exiting:
screen.replace_all_tokens(Token.Aborted)
# Process diff and write to output.
self._cursor_pos, self._last_token = _output_screen_diff(
output, screen, self._cursor_pos,
self._last_screen, self._last_token, is_done,
use_alternate_screen=self.use_alternate_screen,
attrs_for_token=self._attrs_for_token,
size=size,
previous_width=(self._last_size.columns if self._last_size else 0))
self._last_screen = screen
self._last_size = size
self.mouse_handlers = mouse_handlers
# Write title if it changed.
new_title = cli.terminal_title
if new_title != self._last_title:
if new_title is None:
self.output.clear_title()
else:
self.output.set_title(new_title)
self._last_title = new_title
output.flush()
def erase(self, leave_alternate_screen=True, erase_title=True):
"""
Hide all output and put the cursor back at the first line. This is for
instance used for running a system command (while hiding the CLI) and
later resuming the same CLI.)
:param leave_alternate_screen: When True, and when inside an alternate
screen buffer, quit the alternate screen.
:param erase_title: When True, clear the title from the title bar.
"""
output = self.output
output.cursor_backward(self._cursor_pos.x)
output.cursor_up(self._cursor_pos.y)
output.erase_down()
output.reset_attributes()
output.enable_autowrap()
output.flush()
# Erase title.
if self._last_title and erase_title:
output.clear_title()
self.reset(leave_alternate_screen=leave_alternate_screen)
def clear(self):
"""
Clear screen and go to 0,0
"""
# Erase current output first.
self.erase()
# Send "Erase Screen" command and go to (0, 0).
output = self.output
output.erase_screen()
output.cursor_goto(0, 0)
output.flush()
self.request_absolute_cursor_position()
def print_tokens(output, tokens, style):
"""
Print a list of (Token, text) tuples in the given style to the output.
"""
assert isinstance(output, Output)
assert isinstance(style, Style)
# Reset first.
output.reset_attributes()
output.enable_autowrap()
# Print all (token, text) tuples.
attrs_for_token = _TokenToAttrsCache(style.get_attrs_for_token)
for token, text in tokens:
attrs = attrs_for_token[token]
if attrs:
output.set_attributes(attrs)
else:
output.reset_attributes()
output.write(text)
# Reset again.
output.reset_attributes()
output.flush()
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "v"
parentdir_prefix = "devs-"
versionfile_source = "devs/_version.py"
import errno
import os
import re
import subprocess
import sys
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print(
(
"guessing rootdir is '%s', "
"but '%s' doesn't start with prefix '%s'"
) % (root, dirname, parentdir_prefix)
)
return None
ret = dirname[len(parentdir_prefix):]
if ret.find('-py') != -1:
ret = ret[:ret.find('-py')]
return {"version": ret, "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'"
% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = {"refnames": git_refnames, "full": git_full}
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return rep_by_pep440(ver)
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return rep_by_pep440(
git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
def git2pep440(ver_str):
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + ".post.dev1.pre"
elif dash_count == 2:
tag, commits, _ = ver_str.split('-')
return ".post.dev".join([tag, commits])
elif dash_count == 3:
tag, commits, _, _ = ver_str.split('-')
commits = str(int(commits) + 1)
return ".post.dev".join([tag, commits]) + ".pre"
else:
raise RuntimeError("Invalid version string")
def rep_by_pep440(ver):
ver["version"] = git2pep440(ver["version"])
return ver
|
|
#!c:\users\juan.digicash\documents\workspace-django\miblog\scripts\python.exe
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <[email protected]>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class SpecificData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'class_loader': 'ClassLoader',
'conversions': 'list[ConversionObject]',
'fast_reader_builder': 'FastReaderBuilder',
'fast_reader_enabled': 'bool'
}
attribute_map = {
'class_loader': 'classLoader',
'conversions': 'conversions',
'fast_reader_builder': 'fastReaderBuilder',
'fast_reader_enabled': 'fastReaderEnabled'
}
def __init__(self, class_loader=None, conversions=None, fast_reader_builder=None, fast_reader_enabled=None, _configuration=None): # noqa: E501
"""SpecificData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._class_loader = None
self._conversions = None
self._fast_reader_builder = None
self._fast_reader_enabled = None
self.discriminator = None
if class_loader is not None:
self.class_loader = class_loader
if conversions is not None:
self.conversions = conversions
if fast_reader_builder is not None:
self.fast_reader_builder = fast_reader_builder
if fast_reader_enabled is not None:
self.fast_reader_enabled = fast_reader_enabled
@property
def class_loader(self):
"""Gets the class_loader of this SpecificData. # noqa: E501
:return: The class_loader of this SpecificData. # noqa: E501
:rtype: ClassLoader
"""
return self._class_loader
@class_loader.setter
def class_loader(self, class_loader):
"""Sets the class_loader of this SpecificData.
:param class_loader: The class_loader of this SpecificData. # noqa: E501
:type: ClassLoader
"""
self._class_loader = class_loader
@property
def conversions(self):
"""Gets the conversions of this SpecificData. # noqa: E501
:return: The conversions of this SpecificData. # noqa: E501
:rtype: list[ConversionObject]
"""
return self._conversions
@conversions.setter
def conversions(self, conversions):
"""Sets the conversions of this SpecificData.
:param conversions: The conversions of this SpecificData. # noqa: E501
:type: list[ConversionObject]
"""
self._conversions = conversions
@property
def fast_reader_builder(self):
"""Gets the fast_reader_builder of this SpecificData. # noqa: E501
:return: The fast_reader_builder of this SpecificData. # noqa: E501
:rtype: FastReaderBuilder
"""
return self._fast_reader_builder
@fast_reader_builder.setter
def fast_reader_builder(self, fast_reader_builder):
"""Sets the fast_reader_builder of this SpecificData.
:param fast_reader_builder: The fast_reader_builder of this SpecificData. # noqa: E501
:type: FastReaderBuilder
"""
self._fast_reader_builder = fast_reader_builder
@property
def fast_reader_enabled(self):
"""Gets the fast_reader_enabled of this SpecificData. # noqa: E501
:return: The fast_reader_enabled of this SpecificData. # noqa: E501
:rtype: bool
"""
return self._fast_reader_enabled
@fast_reader_enabled.setter
def fast_reader_enabled(self, fast_reader_enabled):
"""Sets the fast_reader_enabled of this SpecificData.
:param fast_reader_enabled: The fast_reader_enabled of this SpecificData. # noqa: E501
:type: bool
"""
self._fast_reader_enabled = fast_reader_enabled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SpecificData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SpecificData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SpecificData):
return True
return self.to_dict() != other.to_dict()
|
|
from unittest import mock
from django.test import RequestFactory, TestCase
from django.urls.exceptions import NoReverseMatch
from wagtail.contrib.routable_page.templatetags.wagtailroutablepage_tags import routablepageurl
from wagtail.core.models import Page, Site
from wagtail.tests.routablepage.models import (
RoutablePageTest, RoutablePageWithOverriddenIndexRouteTest)
class TestRoutablePage(TestCase):
model = RoutablePageTest
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=self.model(
title="Routable Page",
live=True,
))
def test_resolve_index_route_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/')
self.assertEqual(view, self.routable_page.index_route)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_resolve_archive_by_year_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/year/2014/')
self.assertEqual(view, self.routable_page.archive_by_year)
self.assertEqual(args, ('2014', ))
self.assertEqual(kwargs, {})
def test_resolve_archive_by_author_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/archive/author/joe-bloggs/')
self.assertEqual(view, self.routable_page.archive_by_author)
self.assertEqual(args, ())
self.assertEqual(kwargs, {'author_slug': 'joe-bloggs'})
def test_resolve_external_view(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external/joe-bloggs/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ('joe-bloggs', ))
self.assertEqual(kwargs, {})
def test_resolve_external_view_other_route(self):
view, args, kwargs = self.routable_page.resolve_subpage('/external-no-arg/')
self.assertEqual(view, self.routable_page.external_view)
self.assertEqual(args, ())
self.assertEqual(kwargs, {})
def test_reverse_index_route_view(self):
url = self.routable_page.reverse_subpage('index_route')
self.assertEqual(url, '')
def test_reverse_archive_by_year_view(self):
url = self.routable_page.reverse_subpage('archive_by_year', args=('2014', ))
self.assertEqual(url, 'archive/year/2014/')
def test_reverse_archive_by_author_view(self):
url = self.routable_page.reverse_subpage('archive_by_author', kwargs={'author_slug': 'joe-bloggs'})
self.assertEqual(url, 'archive/author/joe-bloggs/')
def test_reverse_overridden_name(self):
url = self.routable_page.reverse_subpage('name_overridden')
self.assertEqual(url, 'override-name-test/')
def test_reverse_overridden_name_default_doesnt_work(self):
with self.assertRaises(NoReverseMatch):
self.routable_page.reverse_subpage('override_name_test')
def test_reverse_external_view(self):
url = self.routable_page.reverse_subpage('external_view', args=('joe-bloggs', ))
self.assertEqual(url, 'external/joe-bloggs/')
def test_reverse_external_view_other_route(self):
url = self.routable_page.reverse_subpage('external_view')
self.assertEqual(url, 'external-no-arg/')
def test_get_index_route_view(self):
response = self.client.get(self.routable_page.url)
self.assertContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_routable_page_with_overridden_index_route(self):
page = self.home_page.add_child(
instance=RoutablePageWithOverriddenIndexRouteTest(
title="Routable Page with overridden index",
live=True
)
)
response = self.client.get(page.url)
self.assertContains(response, "OVERRIDDEN INDEX ROUTE")
self.assertNotContains(response, "DEFAULT PAGE TEMPLATE")
def test_get_archive_by_year_view(self):
response = self.client.get(self.routable_page.url + 'archive/year/2014/')
self.assertContains(response, "ARCHIVE BY YEAR: 2014")
def test_earlier_view_takes_precedence(self):
response = self.client.get(self.routable_page.url + 'archive/year/1984/')
self.assertContains(response, "we were always at war with eastasia")
def test_get_archive_by_author_view(self):
response = self.client.get(self.routable_page.url + 'archive/author/joe-bloggs/')
self.assertContains(response, "ARCHIVE BY AUTHOR: joe-bloggs")
def test_get_external_view(self):
response = self.client.get(self.routable_page.url + 'external/joe-bloggs/')
self.assertContains(response, "EXTERNAL VIEW: joe-bloggs")
def test_get_external_view_other_route(self):
response = self.client.get(self.routable_page.url + 'external-no-arg/')
self.assertContains(response, "EXTERNAL VIEW: ARG NOT SET")
def test_routable_page_can_have_instance_bound_descriptors(self):
# This descriptor pretends that it does not exist in the class, hence
# it raises an AttributeError when class bound. This is, for instance,
# the behavior of django's FileFields.
class InstanceDescriptor:
def __get__(self, instance, cls=None):
if instance is None:
raise AttributeError
return 'value'
def __set__(self, instance, value):
raise AttributeError
try:
RoutablePageTest.descriptor = InstanceDescriptor()
RoutablePageTest.get_subpage_urls()
finally:
del RoutablePageTest.descriptor
class TestRoutablePageTemplateTag(TestCase):
def setUp(self):
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
class TestRoutablePageTemplateTagForSecondSiteAtSameRoot(TestCase):
"""
When multiple sites exist on the same root page, relative URLs within that subtree should
omit the domain, in line with #4390
"""
def setUp(self):
default_site = Site.objects.get(is_default_site=True)
second_site = Site.objects.create( # add another site with the same root page
hostname='development.local',
port=default_site.port,
root_page_id=default_site.root_page_id,
)
self.home_page = Page.objects.get(id=2)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
self.request.site = second_site
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, '/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, '/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, '/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, '/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = '/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
class TestRoutablePageTemplateTagForSecondSiteAtDifferentRoot(TestCase):
"""
When multiple sites exist, relative URLs between such sites should include the domain portion
"""
def setUp(self):
self.home_page = Page.objects.get(id=2)
events_page = self.home_page.add_child(instance=Page(title='Events', live=True))
second_site = Site.objects.create(
hostname='events.local',
port=80,
root_page=events_page,
)
self.routable_page = self.home_page.add_child(instance=RoutablePageTest(
title="Routable Page",
live=True,
))
self.rf = RequestFactory()
self.request = self.rf.get(self.routable_page.url)
self.request.site = Site.find_for_request(self.request)
self.context = {'request': self.request}
self.request.site = second_site
def test_templatetag_reverse_index_route(self):
url = routablepageurl(self.context, self.routable_page,
'index_route')
self.assertEqual(url, 'http://localhost/%s/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_year_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_year', '2014')
self.assertEqual(url, 'http://localhost/%s/archive/year/2014/' % self.routable_page.slug)
def test_templatetag_reverse_archive_by_author_view(self):
url = routablepageurl(self.context, self.routable_page,
'archive_by_author', author_slug='joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/archive/author/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view(self):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
self.assertEqual(url, 'http://localhost/%s/external/joe-bloggs/' % self.routable_page.slug)
def test_templatetag_reverse_external_view_without_append_slash(self):
with mock.patch('wagtail.core.models.WAGTAIL_APPEND_SLASH', False):
url = routablepageurl(self.context, self.routable_page,
'external_view', 'joe-bloggs')
expected = 'http://localhost/' + self.routable_page.slug + '/' + 'external/joe-bloggs/'
self.assertEqual(url, expected)
|
|
# -*- coding: utf-8 -*-
"""The processing status classes."""
import time
class ProcessStatus(object):
"""The status of an individual process.
Attributes:
display_name (str): human readable of the file entry currently being
processed by the process.
identifier (str): process identifier.
last_running_time (int): timestamp of the last update when the process had
a running process status.
number_of_consumed_errors (int): total number of errors consumed by
the process.
number_of_consumed_errors_delta (int): number of errors consumed by
the process since the last status update.
number_of_consumed_event_tags (int): total number of event tags consumed by
the process.
number_of_consumed_event_tags_delta (int): number of event tags consumed by
the process since the last status update.
number_of_consumed_events (int): total number of events consumed by
the process.
number_of_consumed_events_delta (int): number of events consumed by
the process since the last status update.
number_of_consumed_reports (int): total number of event reports consumed
by the process.
number_of_consumed_reports_delta (int): number of event reports consumed
by the process since the last status update.
number_of_consumed_sources (int): total number of event sources consumed
by the process.
number_of_consumed_sources_delta (int): number of event sources consumed
by the process since the last status update.
number_of_produced_errors (int): total number of errors produced by
the process.
number_of_produced_errors_delta (int): number of errors produced by
the process since the last status update.
number_of_produced_event_tags (int): total number of event tags produced by
the process.
number_of_produced_event_tags_delta (int): number of event tags produced by
the process since the last status update.
number_of_produced_events (int): total number of events produced by
the process.
number_of_produced_events_delta (int): number of events produced by
the process since the last status update.
number_of_produced_reports (int): total number of event reports
produced by the process.
number_of_produced_reports_delta (int): number of event reports produced
by the process since the last status update.
number_of_produced_sources (int): total number of event sources
produced by the process.
number_of_produced_sources_delta (int): number of event sources produced
by the process since the last status update.
pid (int): process identifier (PID).
status (str): human readable status indication e.g. 'Hashing', 'Idle'.
"""
def __init__(self):
"""Initializes the process status object."""
super(ProcessStatus, self).__init__()
self.display_name = None
self.identifier = None
self.last_running_time = 0
self.number_of_consumed_errors = 0
self.number_of_consumed_errors_delta = 0
self.number_of_consumed_event_tags = 0
self.number_of_consumed_event_tags_delta = 0
self.number_of_consumed_events = 0
self.number_of_consumed_events_delta = 0
self.number_of_consumed_reports = 0
self.number_of_consumed_reports_delta = 0
self.number_of_consumed_sources = 0
self.number_of_consumed_sources_delta = 0
self.number_of_produced_errors = 0
self.number_of_produced_errors_delta = 0
self.number_of_produced_event_tags = 0
self.number_of_produced_event_tags_delta = 0
self.number_of_produced_events = 0
self.number_of_produced_events_delta = 0
self.number_of_produced_reports = 0
self.number_of_produced_reports_delta = 0
self.number_of_produced_sources = 0
self.number_of_produced_sources_delta = 0
self.pid = None
self.status = None
def UpdateNumberOfErrors(
self, number_of_consumed_errors, number_of_produced_errors):
"""Updates the number of errors.
Args:
number_of_consumed_errors (int): total number of errors consumed by
the process.
number_of_produced_errors (int): total number of errors produced by
the process.
Returns:
bool: True if either number of errors has increased.
Raises:
ValueError: if the consumed or produced number of errors is smaller
than the value of the previous update.
"""
consumed_errors_delta = 0
if number_of_consumed_errors is not None:
if number_of_consumed_errors < self.number_of_consumed_errors:
raise ValueError(
u'Number of consumed errors smaller than previous update.')
consumed_errors_delta = (
number_of_consumed_errors - self.number_of_consumed_errors)
self.number_of_consumed_errors = number_of_consumed_errors
self.number_of_consumed_errors_delta = consumed_errors_delta
produced_errors_delta = 0
if number_of_produced_errors is not None:
if number_of_produced_errors < self.number_of_produced_errors:
raise ValueError(
u'Number of produced errors smaller than previous update.')
produced_errors_delta = (
number_of_produced_errors - self.number_of_produced_errors)
self.number_of_produced_errors = number_of_produced_errors
self.number_of_produced_errors_delta = produced_errors_delta
return consumed_errors_delta > 0 or produced_errors_delta > 0
def UpdateNumberOfEventTags(
self, number_of_consumed_event_tags, number_of_produced_event_tags):
"""Updates the number of event tags.
Args:
number_of_consumed_event_tags (int): total number of event tags consumed
by the process.
number_of_produced_event_tags (int): total number of event tags produced
by the process.
Returns:
bool: True if either number of event tags has increased.
Raises:
ValueError: if the consumed or produced number of event tags is smaller
than the value of the previous update.
"""
consumed_event_tags_delta = 0
if number_of_consumed_event_tags is not None:
if number_of_consumed_event_tags < self.number_of_consumed_event_tags:
raise ValueError(
u'Number of consumed event tags smaller than previous update.')
consumed_event_tags_delta = (
number_of_consumed_event_tags - self.number_of_consumed_event_tags)
self.number_of_consumed_event_tags = number_of_consumed_event_tags
self.number_of_consumed_event_tags_delta = consumed_event_tags_delta
produced_event_tags_delta = 0
if number_of_produced_event_tags is not None:
if number_of_produced_event_tags < self.number_of_produced_event_tags:
raise ValueError(
u'Number of produced event tags smaller than previous update.')
produced_event_tags_delta = (
number_of_produced_event_tags - self.number_of_produced_event_tags)
self.number_of_produced_event_tags = number_of_produced_event_tags
self.number_of_produced_event_tags_delta = produced_event_tags_delta
return consumed_event_tags_delta > 0 or produced_event_tags_delta > 0
def UpdateNumberOfEvents(
self, number_of_consumed_events, number_of_produced_events):
"""Updates the number of events.
Args:
number_of_consumed_events (int): total number of events consumed by
the process.
number_of_produced_events (int): total number of events produced by
the process.
Returns:
bool: True if either number of events has increased.
Raises:
ValueError: if the consumed or produced number of events is smaller
than the value of the previous update.
"""
consumed_events_delta = 0
if number_of_consumed_events is not None:
if number_of_consumed_events < self.number_of_consumed_events:
raise ValueError(
u'Number of consumed events smaller than previous update.')
consumed_events_delta = (
number_of_consumed_events - self.number_of_consumed_events)
self.number_of_consumed_events = number_of_consumed_events
self.number_of_consumed_events_delta = consumed_events_delta
produced_events_delta = 0
if number_of_produced_events is not None:
if number_of_produced_events < self.number_of_produced_events:
raise ValueError(
u'Number of produced events smaller than previous update.')
produced_events_delta = (
number_of_produced_events - self.number_of_produced_events)
self.number_of_produced_events = number_of_produced_events
self.number_of_produced_events_delta = produced_events_delta
return consumed_events_delta > 0 or produced_events_delta > 0
def UpdateNumberOfEventReports(
self, number_of_consumed_reports, number_of_produced_reports):
"""Updates the number of event reports.
Args:
number_of_consumed_reports (int): total number of event reports consumed
by the process.
number_of_produced_reports (int): total number of event reports produced
by the process.
Returns:
bool: True if either number of event reports has increased.
Raises:
ValueError: if the consumed or produced number of event reports is
smaller than the value of the previous update.
"""
consumed_reports_delta = 0
if number_of_consumed_reports is not None:
if number_of_consumed_reports < self.number_of_consumed_reports:
raise ValueError(
u'Number of consumed reports smaller than previous update.')
consumed_reports_delta = (
number_of_consumed_reports - self.number_of_consumed_reports)
self.number_of_consumed_reports = number_of_consumed_reports
self.number_of_consumed_reports_delta = consumed_reports_delta
produced_reports_delta = 0
if number_of_produced_reports is not None:
if number_of_produced_reports < self.number_of_produced_reports:
raise ValueError(
u'Number of produced reports smaller than previous update.')
produced_reports_delta = (
number_of_produced_reports - self.number_of_produced_reports)
self.number_of_produced_reports = number_of_produced_reports
self.number_of_produced_reports_delta = produced_reports_delta
return consumed_reports_delta > 0 or produced_reports_delta > 0
def UpdateNumberOfEventSources(
self, number_of_consumed_sources, number_of_produced_sources):
"""Updates the number of event sources.
Args:
number_of_consumed_sources (int): total number of event sources consumed
by the process.
number_of_produced_sources (int): total number of event sources produced
by the process.
Returns:
bool: True if either number of event sources has increased.
Raises:
ValueError: if the consumed or produced number of event sources is
smaller than the value of the previous update.
"""
consumed_sources_delta = 0
if number_of_consumed_sources is not None:
if number_of_consumed_sources < self.number_of_consumed_sources:
raise ValueError(
u'Number of consumed sources smaller than previous update.')
consumed_sources_delta = (
number_of_consumed_sources - self.number_of_consumed_sources)
self.number_of_consumed_sources = number_of_consumed_sources
self.number_of_consumed_sources_delta = consumed_sources_delta
produced_sources_delta = 0
if number_of_produced_sources is not None:
if number_of_produced_sources < self.number_of_produced_sources:
raise ValueError(
u'Number of produced sources smaller than previous update.')
produced_sources_delta = (
number_of_produced_sources - self.number_of_produced_sources)
self.number_of_produced_sources = number_of_produced_sources
self.number_of_produced_sources_delta = produced_sources_delta
return consumed_sources_delta > 0 or produced_sources_delta > 0
class ProcessingStatus(object):
"""The status of the overall extraction process (processing).
Attributes:
aborted (bool): True if processing was aborted.
error_path_specs (list[str]): path specification strings that caused
critical errors during processing.
foreman_status (ProcessingStatus): foreman processing status.
"""
def __init__(self):
"""Initializes the processing status object."""
super(ProcessingStatus, self).__init__()
self._workers_status = {}
self.aborted = False
self.error_path_specs = []
self.foreman_status = None
@property
def workers_status(self):
"""The worker status objects sorted by identifier."""
return [self._workers_status[identifier]
for identifier in sorted(self._workers_status.keys())]
def _UpdateProcessStatus(
self, process_status, identifier, status, pid, display_name,
number_of_consumed_sources, number_of_produced_sources,
number_of_consumed_events, number_of_produced_events,
number_of_consumed_event_tags, number_of_produced_event_tags,
number_of_consumed_errors, number_of_produced_errors,
number_of_consumed_reports, number_of_produced_reports):
"""Updates a process status.
Args:
process_status (ProcessStatus): process status.
identifier (str): process identifier.
status (str): human readable status of the process e.g. 'Idle'.
pid (int): process identifier (PID).
display_name (str): human readable of the file entry currently being
processed by the process.
number_of_consumed_sources (int): total number of event sources consumed
by the process.
number_of_produced_sources (int): total number of event sources produced
by the process.
number_of_consumed_events (int): total number of events consumed by
the process.
number_of_produced_events (int): total number of events produced by
the process.
number_of_consumed_event_tags (int): total number of event tags consumed
by the process.
number_of_produced_event_tags (int): total number of event tags produced
by the process.
number_of_consumed_errors (int): total number of errors consumed by
the process.
number_of_produced_errors (int): total number of errors produced by
the process.
number_of_consumed_reports (int): total number of event reports consumed
by the process.
number_of_produced_reports (int): total number of event reports produced
by the process.
"""
new_sources = process_status.UpdateNumberOfEventSources(
number_of_consumed_sources, number_of_produced_sources)
new_events = process_status.UpdateNumberOfEvents(
number_of_consumed_events, number_of_produced_events)
new_event_tags = process_status.UpdateNumberOfEventTags(
number_of_consumed_event_tags, number_of_produced_event_tags)
new_errors = process_status.UpdateNumberOfErrors(
number_of_consumed_errors, number_of_produced_errors)
new_reports = process_status.UpdateNumberOfEventReports(
number_of_consumed_reports, number_of_produced_reports)
process_status.display_name = display_name
process_status.identifier = identifier
process_status.pid = pid
process_status.status = status
if (new_sources or new_events or new_event_tags or new_errors or
new_reports):
process_status.last_running_time = time.time()
def UpdateForemanStatus(
self, identifier, status, pid, display_name,
number_of_consumed_sources, number_of_produced_sources,
number_of_consumed_events, number_of_produced_events,
number_of_consumed_event_tags, number_of_produced_event_tags,
number_of_consumed_errors, number_of_produced_errors,
number_of_consumed_reports, number_of_produced_reports):
"""Updates the status of the foreman.
Args:
identifier (str): foreman identifier.
status (str): human readable status of the foreman e.g. 'Idle'.
pid (int): process identifier (PID).
display_name (str): human readable of the file entry currently being
processed by the foreman.
number_of_consumed_sources (int): total number of event sources consumed
by the foreman.
number_of_produced_sources (int): total number of event sources produced
by the foreman.
number_of_consumed_events (int): total number of events consumed by
the foreman.
number_of_produced_events (int): total number of events produced by
the foreman.
number_of_consumed_event_tags (int): total number of event tags consumed
by the foreman.
number_of_produced_event_tags (int): total number of event tags produced
by the foreman.
number_of_consumed_errors (int): total number of errors consumed by
the foreman.
number_of_produced_errors (int): total number of errors produced by
the foreman.
number_of_consumed_reports (int): total number of event reports consumed
by the process.
number_of_produced_reports (int): total number of event reports produced
by the process.
"""
if not self.foreman_status:
self.foreman_status = ProcessStatus()
self._UpdateProcessStatus(
self.foreman_status, identifier, status, pid, display_name,
number_of_consumed_sources, number_of_produced_sources,
number_of_consumed_events, number_of_produced_events,
number_of_consumed_event_tags, number_of_produced_event_tags,
number_of_consumed_errors, number_of_produced_errors,
number_of_consumed_reports, number_of_produced_reports)
def UpdateWorkerStatus(
self, identifier, status, pid, display_name,
number_of_consumed_sources, number_of_produced_sources,
number_of_consumed_events, number_of_produced_events,
number_of_consumed_event_tags, number_of_produced_event_tags,
number_of_consumed_errors, number_of_produced_errors,
number_of_consumed_reports, number_of_produced_reports):
"""Updates the status of a worker.
Args:
identifier (str): worker identifier.
status (str): human readable status of the worker e.g. 'Idle'.
pid (int): process identifier (PID).
display_name (str): human readable of the file entry currently being
processed by the worker.
number_of_consumed_sources (int): total number of event sources consumed
by the worker.
number_of_produced_sources (int): total number of event sources produced
by the worker.
number_of_consumed_events (int): total number of events consumed by
the worker.
number_of_produced_events (int): total number of events produced by
the worker.
number_of_consumed_event_tags (int): total number of event tags consumed
by the worker.
number_of_produced_event_tags (int): total number of event tags produced
by the worker.
number_of_consumed_errors (int): total number of errors consumed by
the worker.
number_of_produced_errors (int): total number of errors produced by
the worker.
number_of_consumed_reports (int): total number of event reports consumed
by the process.
number_of_produced_reports (int): total number of event reports produced
by the process.
"""
if identifier not in self._workers_status:
self._workers_status[identifier] = ProcessStatus()
process_status = self._workers_status[identifier]
self._UpdateProcessStatus(
process_status, identifier, status, pid, display_name,
number_of_consumed_sources, number_of_produced_sources,
number_of_consumed_events, number_of_produced_events,
number_of_consumed_event_tags, number_of_produced_event_tags,
number_of_consumed_errors, number_of_produced_errors,
number_of_consumed_reports, number_of_produced_reports)
|
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import netaddr
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_router_base
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import exceptions
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def __init__(self, agent, host, *args, **kwargs):
super(DvrLocalRouter, self).__init__(agent, host, *args, **kwargs)
self.floating_ips_dict = {}
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.fip_ns = None
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrLocalRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def _handle_fip_nat_rules(self, interface_name):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add the NAT rule back
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority(floating_ip)
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(ip=fixed_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_ip_addr_adv_notif(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(ip=floating_ip,
table=dvr_fip_ns.FIP_RT_TBL,
priority=rule_pr)
self.fip_ns.deallocate_rule_priority(floating_ip)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
is_last = self.fip_ns.unsubscribe(self.router_id)
if is_last:
# TODO(Carl) I can't help but think that another router could
# come in and want to start using this namespace while this is
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
self.fip_ns.delete()
self.fip_ns = None
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
super(DvrLocalRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if operation == 'add':
device.neigh.add(ip, mac)
elif operation == 'delete':
device.neigh.delete(ip, mac)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
def _set_subnet_arp_info(self, subnet_id):
"""Set ARP info retrieved from Plugin for existing ports."""
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
if isinstance(ip_cidr, six.text_type):
ip_cidr = ip_cidr.encode() # Needed for Python 3.x
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
snat_idx):
try:
ns_ip_device.route.delete_gateway(gw_ip_addr,
table=snat_idx)
except exceptions.DeviceNotFoundError:
pass
def _snat_redirect_modify(self, gateway, sn_port, sn_int, is_add):
"""Adds or removes rules and routes for SNAT redirection."""
try:
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
if is_add:
ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
for port_fixed_ip in sn_port['fixed_ips']:
# Find the first gateway IP address matching this IP version
port_ip_addr = port_fixed_ip['ip_address']
port_ip_vers = netaddr.IPAddress(port_ip_addr).version
for gw_fixed_ip in gateway['fixed_ips']:
gw_ip_addr = gw_fixed_ip['ip_address']
if netaddr.IPAddress(gw_ip_addr).version == port_ip_vers:
sn_port_cidr = common_utils.ip_to_cidr(
port_ip_addr, port_fixed_ip['prefixlen'])
snat_idx = self._get_snat_idx(sn_port_cidr)
if is_add:
ns_ipd.route.add_gateway(gw_ip_addr,
table=snat_idx)
ns_ipr.rule.add(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
ns_ipwrapr.netns.execute(
['sysctl', '-w',
'net.ipv4.conf.%s.send_redirects=0' % sn_int])
else:
self._delete_gateway_device_if_exists(ns_ipd,
gw_ip_addr,
snat_idx)
ns_ipr.rule.delete(ip=sn_port_cidr,
table=snat_idx,
priority=snat_idx)
break
except Exception:
if is_add:
exc = _LE('DVR: error adding redirection logic')
else:
exc = _LE('DVR: snat remove failed to clear the rule '
'and device')
LOG.exception(exc)
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=True)
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
def internal_network_added(self, port):
super(DvrLocalRouter, self).internal_network_added(port)
# NOTE: The following function _set_subnet_arp_info
# should be called to dynamically populate the arp
# entries for the dvr services ports into the router
# namespace. This does not have dependency on the
# external_gateway port or the agent_mode.
for subnet in port['subnets']:
self._set_subnet_arp_info(subnet['id'])
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port, port, interface_name)
def _dvr_internal_network_removed(self, port):
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port, port, interface_name)
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrLocalRouter, self).internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
def get_external_device_interface_name(self, ex_gw_port):
fip_int = self.fip_ns.get_int_device_name(self.router_id)
if ip_lib.device_exists(fip_int, namespace=self.fip_ns.get_name()):
return self.fip_ns.get_rtr_ext_device_name(self.router_id)
def external_gateway_added(self, ex_gw_port, interface_name):
# TODO(Carl) Refactor external_gateway_added/updated/removed to use
# super class implementation where possible. Looks like preserve_ips,
# and ns_name are the key differences.
ip_wrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapr.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.send_redirects=0'])
for p in self.internal_ports:
gateway = self.get_snat_port_for_internal_port(p)
id_name = self.get_internal_device_name(p['id'])
if gateway:
self._snat_redirect_add(gateway, p, id_name)
for port in self.get_snat_interfaces():
for ip in port['fixed_ips']:
self._update_arp_entry(ip['ip_address'],
port['mac_address'],
ip['subnet_id'],
'add')
def external_gateway_updated(self, ex_gw_port, interface_name):
pass
def external_gateway_removed(self, ex_gw_port, interface_name):
# TODO(Carl) Should this be calling process_snat_dnat_for_fip?
self.process_floating_ip_nat_rules()
if self.fip_ns:
to_fip_interface_name = (
self.get_external_device_interface_name(ex_gw_port))
self.process_floating_ip_addresses(to_fip_interface_name)
for p in self.internal_ports:
gateway = self.get_snat_port_for_internal_port(p)
internal_interface = self.get_internal_device_name(p['id'])
self._snat_redirect_remove(gateway, p, internal_interface)
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
pass
def process_external(self, agent):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.create_dvr_fip_interfaces(ex_gw_port)
super(DvrLocalRouter, self).process_external(agent)
def create_dvr_fip_interfaces(self, ex_gw_port):
floating_ips = self.get_floating_ips()
fip_agent_port = self.get_floating_agent_gw_interface(
ex_gw_port['network_id'])
if fip_agent_port:
LOG.debug("FloatingIP agent gateway port received from the "
"plugin: %s", fip_agent_port)
is_first = False
if floating_ips:
is_first = self.fip_ns.subscribe(self.router_id)
if is_first and not fip_agent_port:
LOG.debug("No FloatingIP agent gateway port possibly due to "
"late binding of the private port to the host, "
"requesting agent gateway port for 'network-id' :"
"%s", ex_gw_port['network_id'])
fip_agent_port = self.agent.plugin_rpc.get_agent_gateway_port(
self.agent.context, ex_gw_port['network_id'])
if not fip_agent_port:
LOG.error(_LE("No FloatingIP agent gateway port "
"returned from server for 'network-id': "
"%s"), ex_gw_port['network_id'])
if is_first and fip_agent_port:
if 'subnets' not in fip_agent_port:
LOG.error(_LE('Missing subnet/agent_gateway_port'))
else:
self.fip_ns.create_gateway_port(fip_agent_port)
if self.fip_ns.agent_gateway_port and floating_ips:
if self.dist_fip_count == 0 or is_first:
self.fip_ns.create_rtr_2_fip_link(self)
# kicks the FW Agent to add rules for the IR namespace if
# configured
self.agent.process_router_add(self)
def process(self, agent):
ex_gw_port = self.get_ex_gw_port()
if ex_gw_port:
self.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
self.fip_ns.scan_fip_ports(self)
super(DvrLocalRouter, self).process(agent)
|
|
from aiohttp import ClientSession, TCPConnector, BasicAuth
import asyncio
from collections import defaultdict, namedtuple
from IPython import embed
import json
import pandas as pd
import random
import requests as r
import string
from sys import exit
from typing import Union, List, Dict
import ilxutils.scicrunch_client_helper as scicrunch_client_helper
import os
# TODO: create a check for superclass... if entity superclass is known to be different then create your own. else biggy back known entity
# THOUGHTS: What if you don't know if superclass is different?
class scicrunch():
'''
Get functions need a list of term ids
Post functions need a list of dictionaries with their needed/optional keys & values
identifierSearches ids, LIMIT=25, _print=True, crawl=False
addTerms data ....
updateTerms data ....
addAnnotations data ....
getAnnotations_via_tid tids ....
getAnnotations_via_id annotation_ids ....
updateAnntation data ....
deleteAnnotations annotation_ids ....
addRelationship data ....
deleteTerms ilx_ids .. crawl=True .
'''
def __init__(self, api_key, base_url, auth=('None', 'None')):
self.api_key = api_key
self.base_url = base_url
self.auth = BasicAuth(auth)
def crawl_get(self, urls):
outputs = {}
for url in urls:
auth = ('scicrunch',
'perl22(query)') # needed for test2.scicrunch.org
headers = {'Content-type': 'application/json'}
response = r.get(url, headers=headers, auth=auth)
if response.status_code not in [200, 201]:
try:
output = response.json()
except:
output = response.text
problem = str(output)
exit(str(problem) + ' with status code [' +
str(response.status_code) + '] with params:' + str(data))
else:
output = response.json()
if output.get('errormsg'):
exit(output['errormsg'])
# Duplicates; don't exit
elif output.get('data').get('errormsg'):
exit(output['data']['errormsg'])
if not output.get('data').get('id'):
continue
try:
output = {int(output['data']['id']): output['data']} # terms
except:
output = {int(output['data'][0]['tid']): output['data']} # annotations
outputs.update(output)
return outputs
def crawl_post(self, total_data, _print):
outputs = []
for i, tupdata in enumerate(total_data):
url, data = tupdata
params = {
**{'key': self.api_key, },
**data,
}
auth = ('scicrunch',
'perl22(query)') # needed for test2.scicrunch.org
headers = {'Content-type': 'application/json'}
# print(data['uid'], 'data')
response = r.post(url, data=json.dumps(params), headers=headers, auth=auth)
# strict codes due to odd behavior in past
if response.status_code not in [200, 201]:
try:
output = response.json()
except:
output = response.text
error = False
if output.get('errormsg'):
error = output.get('errormsg')
elif output.get('data').get('errormsg'):
error = output.get('data').get('errormsg')
if error:
if 'could not generate ILX identifier' in error:
output = None
elif 'already exists' in error.lower():
print(error)
output = {'data':{'term':{}}}
else:
print('IN CATCH')
problem = str(output)
exit(str(problem) + ' with status code [' +
str(response.status) + '] with params:' + str(data))
else:
print('OUT CATCH')
problem = str(output)
exit(str(problem) + ' with status code [' +
str(response.status) + '] with params:' + str(data))
else:
output = response.json()
error = False
if output.get('error msg'):
error = output['errormsg']
# Duplicates; don't exit
elif output.get('data').get('errormsg'):
error = output['data']['errormsg']
if error:
print(error)
output = {'failed':data}
else:
output = output['data']
if _print:
try:
print(i, output['label'])
except:
print(i, output)
outputs.append(output)
return outputs
def get(self,
urls=False,
LIMIT=25,
action='Getting Info',
_print=True,
crawl=False):
if crawl:
return self.crawl_get(urls)
async def get_single(url, session, auth):
async with session.get(url) as response:
if response.status not in [200, 201]:
try:
output = await response.json()
except:
output = await response.text()
problem = str(output)
exit(
str(problem) + ' with status code [' +
str(response.status) + ']')
output = await response.json(content_type=None)
try:
try:
output = {
int(output['data']['id']): output['data']
} # terms
except:
output = {
int(output['data'][0]['tid']): output['data']
} # annotations
except:
print('Not able to get output')
exit(output)
return output
async def get_all(urls, connector, loop):
if _print:
print('=== {0} ==='.format(action))
tasks = []
auth = BasicAuth('scicrunch', 'perl22(query)')
async with ClientSession(
connector=connector, loop=loop, auth=auth) as session:
for i, url in enumerate(urls):
task = asyncio.ensure_future(get_single(url, session, auth))
tasks.append(task)
return (await asyncio.gather(*tasks))
connector = TCPConnector(
limit=LIMIT
) # rate limiter; should be between 20 and 80; 100 maxed out server
loop = asyncio.get_event_loop() # event loop initialize
future = asyncio.ensure_future(
get_all(urls, connector, loop)) # tasks to do; data is in json format [{},]
outputs = loop.run_until_complete(future) # loop until done
return {k: v for keyval in outputs for k, v in keyval.items()}
def post(
self,
data: list,
LIMIT: int = 20,
action: str = 'Pushing Info',
_print: bool = True,
crawl: bool = False):
if crawl: return self.crawl_post(data, _print=_print)
async def post_single(url, data, session, i):
# data.update({
# 'batch-elastic': 'True'
# }) # term should be able to handle it now
data = json.dumps({
**{'key': self.api_key, },
**data,
})
headers = {'Content-type': 'application/json'}
limit = 0
output = {}
while not output and limit < 100:
async with session.post(url, data=data, headers=headers) as response:
# strict codes due to odd behavior in past
if response.status not in [200, 201]:
try:
output = await response.json()
except:
output = await response.text()
error = False
if output.get('errormsg'):
error = output.get('errormsg')
elif output.get('data').get('errormsg'):
error = output.get('data').get('errormsg')
if error:
if 'could not generate ILX identifier' in error:
output = None
elif 'already exists' in error:
print(error)
output = {'data':{'term':{}}}
else:
print('IN CATCH')
problem = str(output)
exit(str(problem) + ' with status code [' +
str(response.status) + '] with params:' + str(data))
output = {'data':{'term':{}}}
else:
print('OUT CATCH')
problem = str(output)
exit(str(problem) + ' with status code [' +
str(response.status) + '] with params:' + str(data))
output = {'data':{'term':{}}}
# Missing required fields I didn't account for OR Duplicates.
else:
# allows NoneTypes to pass
output = await response.json(content_type=None)
if not output:
print(response.status)
output = None
# Duplicates
elif output.get('data').get('errormsg'):
print(output.get('data').get('errormsg'))
# print(response.status)
# print(data) # TODO: its hitting here and idk why
if _print:
try:
print(i, output['data']['label'])
except:
print(i, output['data'])
return output['data']
async def bound_post(sem, url, data, session, i):
async with sem:
await post_single(url, data, session, i)
async def post_all(total_data, connector, loop):
tasks = []
sem = asyncio.Semaphore(50)
auth = BasicAuth('scicrunch', 'perl22(query)')
async with ClientSession(connector=connector, auth=auth) as session:
if _print:
print('=== {0} ==='.format(action))
for i, tupdata in enumerate(total_data):
url, data = tupdata
task = asyncio.ensure_future(
post_single(url, data, session, i))
tasks.append(task)
outputs = await asyncio.gather(*tasks)
return outputs
connector = TCPConnector(
limit=LIMIT
) # rate limiter; should be between 20 and 80; 100 maxed out server
loop = asyncio.get_event_loop() # event loop initialize
future = asyncio.ensure_future(
post_all(data, connector, loop)) # tasks to do; data is in json format [{},]
output = loop.run_until_complete(future) # loop until done
return output
def identifierSearches(self,
ids=None,
LIMIT=25,
_print=True,
crawl=False):
"""parameters( data = "list of term_ids" )"""
url_base = self.base_url + '/api/1/term/view/{id}' + '?key=' + self.api_key
urls = [url_base.format(id=str(_id)) for _id in ids]
return self.get(
urls=urls,
LIMIT=LIMIT,
action='Searching For Terms',
crawl=crawl,
_print=_print)
def ilxSearches(self,
ilx_ids=None,
LIMIT=25,
_print=True,
crawl=False):
"""parameters( data = "list of ilx_ids" )"""
url_base = self.base_url + "/api/1/ilx/search/identifier/{identifier}?key={APIKEY}"
urls = [url_base.format(identifier=ilx_id.replace('ILX:', 'ilx_'), APIKEY=self.api_key) for ilx_id in ilx_ids]
return self.get(
urls=urls,
LIMIT=LIMIT,
action='Searching For Terms',
crawl=crawl,
_print=_print)
def updateTerms(self, data:list, LIMIT:int=20, _print:bool=True, crawl:bool=False,) -> list:
""" Updates existing entities
Args:
data:
needs:
id <str>
ilx_id <str>
options:
definition <str> #bug with qutations
superclasses [{'id':<int>}]
type term, cde, anntation, or relationship <str>
synonyms {'literal':<str>}
existing_ids {'iri':<str>,'curie':<str>','change':<bool>, 'delete':<bool>}
LIMIT:
limit of concurrent
_print:
prints label of data presented
crawl:
True: Uses linear requests.
False: Uses concurrent requests from the asyncio and aiohttp modules
Returns:
List of filled in data parallel with the input data. If any entity failed with an
ignorable reason, it will return empty for the item in the list returned.
"""
url_base = self.base_url + '/api/1/term/edit/{id}'
merged_data = []
# PHP on the server is is LOADED with bugs. Best to just duplicate entity data and change
# what you need in it before re-upserting the data.
old_data = self.identifierSearches(
[d['id'] for d in data], # just need the ids
LIMIT = LIMIT,
_print = _print,
crawl = crawl,
)
for d in data: # d for dictionary
url = url_base.format(id=str(d['id']))
# Reason this exists is to avoid contradictions in case you are using a local reference
if d['ilx'] != old_data[int(d['id'])]['ilx']:
print(d['ilx'], old_data[int(d['id'])]['ilx'])
exit('You might be using beta insead of production!')
merged = scicrunch_client_helper.merge(new=d, old=old_data[int(d['id'])])
merged = scicrunch_client_helper.superclasses_bug_fix(merged) # BUG: superclass output diff than input needed
merged_data.append((url, merged))
resp = self.post(
merged_data,
LIMIT = LIMIT,
action = 'Updating Terms', # forced input from each function
_print = _print,
crawl = crawl,
)
return resp
def addTerms(self, data, LIMIT=25, _print=True, crawl=False):
"""
need:
label <str>
type term, cde, anntation, or relationship <str>
options:
definition <str> #bug with qutations
superclasses [{'id':<int>}]
synonyms [{'literal':<str>}]
existing_ids [{'iri':<str>,'curie':<str>'}]
ontologies [{'id':<int>}]
[{'type':'term', 'label':'brain'}]
"""
needed = set([
'label',
'type',
])
url_base = self.base_url + '/api/1/ilx/add'
terms = []
for d in data:
if (set(list(d)) & needed) != needed:
exit('You need keys: '+ str(needed - set(list(d))))
if not d.get('label') or not d.get('type'): # php wont catch empty type!
exit('=== Data is missing label or type! ===')
d['term'] = d.pop('label') # ilx only accepts term, will need to replaced back
#d['batch-elastic'] = 'True' # term/add and edit should be ready now
terms.append((url_base, d))
primer_responses = self.post(
terms,
action='Priming Terms',
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
ilx = {}
for primer_response in primer_responses:
primer_response['term'] = primer_response['term'].replace(''', "'")
primer_response['term'] = primer_response['term'].replace('"', '"')
primer_response['label'] = primer_response.pop('term')
ilx[primer_response['label'].lower()] = primer_response
url_base = self.base_url + '/api/1/term/add'
terms = []
for d in data:
d['label'] = d.pop('term')
d = scicrunch_client_helper.superclasses_bug_fix(d)
if not ilx.get(d['label'].lower()): # ilx can be incomplete if errored term
continue
try:
d.update({'ilx': ilx[d['label'].lower()]['ilx']})
except:
d.update({'ilx': ilx[d['label'].lower()]['fragment']})
terms.append((url_base, d))
return self.post(
terms,
action='Adding Terms',
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
def addAnnotations(self,
data,
LIMIT=25,
_print=True,
crawl=False,):
need = set([
'tid',
'annotation_tid',
'value',
'term_version',
'annotation_term_version',
])
url_base = self.base_url + '/api/1/term/add-annotation'
annotations = []
for annotation in data:
annotation.update({
'term_version': annotation['term_version'],
'annotation_term_version': annotation['annotation_term_version'],
'batch-elastic': 'True',
})
if (set(list(annotation)) & need) != need:
exit('You need keys: '+ str(need - set(list(annotation))))
annotations.append((url_base, annotation))
return self.post(
annotations,
LIMIT = LIMIT,
action = 'Adding Annotations',
_print = _print,
crawl = crawl,
)
def getAnnotations_via_tid(self,
tids,
LIMIT=25,
_print=True,
crawl=False):
"""
tids = list of term ids that possess the annoations
"""
url_base = self.base_url + \
'/api/1/term/get-annotations/{tid}?key=' + self.api_key
urls = [url_base.format(tid=str(tid)) for tid in tids]
return self.get(urls,
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
def getAnnotations_via_id(self,
annotation_ids,
LIMIT=25,
_print=True,
crawl=False):
"""tids = list of strings or ints that are the ids of the annotations themselves"""
url_base = self.base_url + \
'/api/1/term/get-annotation/{id}?key=' + self.api_key
urls = [
url_base.format(id=str(annotation_id))
for annotation_id in annotation_ids
]
return self.get(urls, LIMIT=LIMIT, _print=_print, crawl=crawl)
def updateAnnotations(self,
data,
LIMIT=25,
_print=True,
crawl=False,):
"""data = [{'id', 'tid', 'annotation_tid', 'value', 'comment', 'upvote', 'downvote',
'curator_status', 'withdrawn', 'term_version', 'annotation_term_version', 'orig_uid',
'orig_time'}]
"""
url_base = self.base_url + \
'/api/1/term/edit-annotation/{id}' # id of annotation not term id
annotations = self.getAnnotations_via_id([d['id'] for d in data],
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
annotations_to_update = []
for d in data:
annotation = annotations[int(d['id'])]
annotation.update({**d})
url = url_base.format(id=annotation['id'])
annotations_to_update.append((url, annotation))
self.post(annotations_to_update,
LIMIT=LIMIT,
action='Updating Annotations',
_print=_print,
crawl=crawl)
def deleteAnnotations(self,
annotation_ids,
LIMIT=25,
_print=True,
crawl=False,):
"""data = list of ids"""
url_base = self.base_url + \
'/api/1/term/edit-annotation/{annotation_id}' # id of annotation not term id; thx past troy!
annotations = self.getAnnotations_via_id(annotation_ids,
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
annotations_to_delete = []
for annotation_id in annotation_ids:
annotation = annotations[int(annotation_id)]
params = {
'value': ' ', # for delete
'annotation_tid': ' ', # for delete
'tid': ' ', # for delete
'term_version': '1',
'annotation_term_version': '1',
}
url = url_base.format(annotation_id=annotation_id)
annotation.update({**params})
annotations_to_delete.append((url, annotation))
return self.post(annotations_to_delete,
LIMIT=LIMIT,
_print=_print,
crawl=crawl)
def addRelationships(
self,
data: list,
LIMIT: int = 20,
_print: bool = True,
crawl: bool = False,
) -> list:
"""
data = [{
"term1_id", "term2_id", "relationship_tid",
"term1_version", "term2_version",
"relationship_term_version",}]
"""
url_base = self.base_url + '/api/1/term/add-relationship'
relationships = []
for relationship in data:
relationship.update({
'term1_version': relationship['term1_version'],
'term2_version': relationship['term2_version'],
'relationship_term_version': relationship['relationship_term_version']
})
relationships.append((url_base, relationship))
return self.post(
relationships,
LIMIT = LIMIT,
action = 'Adding Relationships',
_print = _print,
crawl = crawl,
)
def deleteTermsFromElastic(self,
ilx_ids,
LIMIT=25,
_print=True,
crawl=True):
url = self.base_url + \
'/api/1/term/elastic/delete/{ilx_id}?key=' + self.api_key
data = [(url.format(ilx_id=str(ilx_id)), {}) for ilx_id in ilx_ids]
return self.post(
data, LIMIT=LIMIT, _print=_print, crawl=crawl)
def addTermsToElastic(
self,
tids,
LIMIT = 50,
_print = True,
crawl = True
) -> list:
url = self.base_url + '/api/1/term/elastic/upsert/{tid}?key=' + self.api_key
data = [(url.format(tid=str(tid)), {}) for tid in tids]
return self.post(
data,
LIMIT = LIMIT,
_print=_print,
crawl=crawl
)
def deprecate_entity(
self,
ilx_id: str,
note = None,
) -> None:
""" Tagged term in interlex to warn this term is no longer used
There isn't an proper way to delete a term and so we have to mark it so I can
extrapolate that in mysql/ttl loads.
Args:
term_id: id of the term of which to be deprecated
term_version: version of the term of which to be deprecated
Example: deprecateTerm('ilx_0101431', '6')
"""
term_id, term_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0]
annotations = [{
'tid': term_id,
'annotation_tid': '306375', # id for annotation "deprecated"
'value': 'True',
'term_version': term_version,
'annotation_term_version': '1', # term version for annotation "deprecated"
}]
if note:
editor_note = {
'tid': term_id,
'annotation_tid': '306378', # id for annotation "editorNote"
'value': note,
'term_version': term_version,
'annotation_term_version': '1', # term version for annotation "deprecated"
}
annotations.append(editor_note)
self.addAnnotations(annotations, crawl=True, _print=False)
print(annotations)
def add_editor_note(
self,
ilx_id: str,
note = None,
) -> None:
""" Tagged term in interlex to warn this term is no longer used
There isn't an proper way to delete a term and so we have to mark it so I can
extrapolate that in mysql/ttl loads.
Args:
term_id: id of the term of which to be deprecated
term_version: version of the term of which to be deprecated
Example: deprecateTerm('ilx_0101431', '6')
"""
term_id, term_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx_id], crawl=True, _print=False).values()][0]
editor_note = {
'tid': term_id,
'annotation_tid': '306378', # id for annotation "editorNote"
'value': note,
'term_version': term_version,
'annotation_term_version': '1', # term version for annotation "deprecated"
}
self.addAnnotations([editor_note], crawl=True, _print=False)
print(editor_note)
def add_merge_properties(
self,
ilx1_id: str,
ilx2_id: str,
) -> None:
term1_id, term1_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx1_id], crawl=True, _print=False).values()][0]
term2_id, term2_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx2_id], crawl=True, _print=False).values()][0]
replaced_info = {
"term1_id": term1_id,
"term2_id": term2_id,
"relationship_tid": '306376', # "replacedBy" id ,
"term1_version": term1_version,
"term2_version": term2_version,
"relationship_term_version": '1', # "replacedBy" version,
}
self.addRelationships([replaced_info], crawl=True, _print=False)
print(replaced_info)
reason_info = {
'tid': term1_id,
'annotation_tid': '306377', # id for annotation "obsReason"
'value': 'http://purl.obolibrary.org/obo/IAO_0000227', # termsMerged rdf iri
'term_version': term1_version,
'annotation_term_version': '1', # term version for annotation "obsReason"
}
self.addAnnotations([reason_info], crawl=True, _print=False)
print(reason_info)
def add_syn_relationship_properties(
self,
ilx1_id: str,
ilx2_id: str,
) -> None:
term1_id, term1_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx1_id], crawl=True, _print=False).values()][0]
term2_id, term2_version = [(d['id'], d['version'])
for d in self.ilxSearches([ilx2_id], crawl=True, _print=False).values()][0]
replaced_info = {
"term1_id": term1_id,
"term2_id": term2_id,
"relationship_tid": '306379', # "isTreatedAsSynonymOf" id ,
"term1_version": term1_version,
"term2_version": term2_version,
"relationship_term_version": '1', # "isTreatedAsSynonymOf" version,
}
self.addRelationships([replaced_info], crawl=True, _print=False)
print(replaced_info)
def addOntology(self, ontology_url:str) -> List[dict]:
add_ontology_url = self.base_url + '/api/1/term/ontology/add'
data = json.dumps({
'url': ontology_url,
'key': self.api_key,
})
response = r.post(add_ontology_url, data=data, headers={'Content-type': 'application/json'})
print(response.status_code)
try:
return response.json()
except:
return response.text
def force_add_term(self, entity: dict):
""" Need to add an entity that already has a label existing in InterLex?
Well this is the function for you!
entity:
need:
label <str>
type term, cde, pde, fde, anntation, or relationship <str>
options:
definition <str>
superclasses [{'id':<int>}]
synonyms [{'literal':<str>}]
existing_ids [{'iri':<str>,'curie':<str>'}]
ontologies [{'id':<int>}]
example:
entity = [{
'type':'term',
'label':'brain',
'existing_ids': [{
'iri':'http://ncbi.org/123',
'curie':'NCBI:123'
}]
}]
"""
needed = set([
'label',
'type',
])
url_ilx_add = self.base_url + '/api/1/ilx/add'
url_term_add = self.base_url + '/api/1/term/add'
url_term_update = self.base_url + '/api/1/term/edit/{id}'
if (set(list(entity)) & needed) != needed:
exit('You need keys: '+ str(needed - set(list(d))))
# to ensure uniqueness
random_string = ''.join(random.choices(string.ascii_uppercase + string.digits, k=25))
real_label = entity['label']
entity['label'] = entity['label'] + '_' + random_string
entity['term'] = entity.pop('label') # ilx only accepts term, will need to replaced back
primer_response = self.post([(url_ilx_add, entity.copy())], _print=False, crawl=True)[0]
entity['label'] = entity.pop('term')
entity['ilx'] = primer_response['fragment'] if primer_response.get('fragment') else primer_response['ilx']
entity = scicrunch_client_helper.superclasses_bug_fix(entity)
response = self.post([(url_term_add, entity.copy())], _print=False, crawl=True)[0]
old_data = self.identifierSearches(
[response['id']], # just need the ids
_print = False,
crawl = True,
)[response['id']]
old_data['label'] = real_label
entity = old_data.copy()
url_term_update = url_term_update.format(id=entity['id'])
return self.post([(url_term_update, entity)], _print=False, crawl=True)
def main():
sci = scicrunch(
api_key=os.environ.get('SCICRUNCH_API_KEY'),
base_url=os.environ.get('SCICRUNCH_BASEURL_BETA'),
)
entity = {
'label': 'test_force_add_term',
'type': 'term',
}
print(sci.force_add_term(entity))
update_entity = {'id': '305199', 'ilx': 'tmp_0382117', 'label': 'test_force_add_term', 'key': 'Mf8f6S5gD6cy6cuRWMyjUeLGxUPfo0SZ'}
# print(sci.updateTerms([update_entity], _print=False, crawl=True))
if __name__ == '__main__':
main()
|
|
import os
import configparser
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
#
# define types for omop schema
#
class Model:
#
# Check the model matches
#
def __init__(self):
self.model_schema = self.omopSchema()
#
# define omop schema here
# Pre-defining the schema will have better performance then use the cvs importer
# to try to infer the schema which causes an extra scan of the data.
# The date fields are read in as string. They can be converted to date
# objects if necessary
# OMOP V5.0
#
def omopSchema(self):
model_schema = {}
model_schema['care_site'] = StructType([ \
StructField("CARE_SITE_ID", IntegerType(), False), \
StructField("LOCATION_ID", IntegerType(), True), \
StructField("ORGANIZATION_ID", IntegerType(), True), \
StructField("PLACE_OF_SERVICE_CONCEPT_ID", IntegerType(), True), \
StructField("CARE_SITE_SOURCE_VALUE", StringType(), True), \
StructField("PLACE_OF_SERVICE_SOURCE_VALUE", StringType(), True)])
model_schema['cohort'] = StructType([ \
StructField("COHORT_ID", IntegerType(), False), \
StructField("COHORT_CONCEPT_ID", IntegerType(), False), \
StructField("COHORT_START_DATE", StringType(), False), \
StructField("COHORT_END_DATE", StringType(), True), \
StructField("SUBJECT_ID", IntegerType(), False), \
StructField("STOP_REASON", StringType(), True)])
model_schema['condition_era'] = StructType([ \
StructField("CONDITION_ERA_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("CONDITION_CONCEPT_ID", IntegerType(), False), \
StructField("CONDITION_ERA_START_DATE", StringType(), False), \
StructField("CONDITION_ERA_END_DATE", StringType(), True), \
StructField("CONDITION_OCCURRENCE_COUNT", IntegerType(), True)])
model_schema['condition_occurrence'] = StructType([ \
StructField("CONDITION_OCCURRENCE_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("CONDITION_CONCEPT_ID", IntegerType(), False), \
StructField("CONDITION_START_DATE", StringType(), False), \
StructField("CONDITION_END_DATE", StringType(), True), \
StructField("CONDITION_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("STOP_REASON", StringType(), True), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), True), \
StructField("CONDITION_SOURCE_VALUE", StringType(), True), \
StructField("CONDITION_SOURCE_CONCEPT_ID", IntegerType(), True)])
model_schema['death'] = StructType([ \
StructField("PERSON_ID", IntegerType(), False), \
StructField("DEATH_DATE", StringType(), False), \
StructField("DEATH_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("CAUSE_CONCEPT_ID", IntegerType(), True), \
StructField("CAUSE_SOURCE_VALUE", StringType(), True), \
StructField("CAUSE_SOURCE_CONCEPT_ID", IntegerType(), True)])
model_schema['device_exposure'] = StructType([ \
StructField("DEVICE_EXPOSURE_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("DEVICE_CONCEPT_ID", IntegerType(), False), \
StructField("DEVICE_EXPOSURE_START_DATE", StringType(), False), \
StructField("DEVICE_EXPOSURE_END_DATE", StringType(), True), \
StructField("DEVICE_TYPE_CONCEPT_ID", IntegerType(), True), \
StructField("UNIQUE_DEVICE_ID", IntegerType(), True), \
StructField("QUANTITY", IntegerType(), True), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), True), \
StructField("DEVICE_SOURCE_VALUE", StringType(), True), \
StructField("DEVICE_SOURCE_CONCEPT_ID", IntegerType(), True)])
model_schema['drug_cost'] = StructType([ \
StructField("DRUG_COST_ID", IntegerType(), False), \
StructField("DRUG_EXPOSURE_ID", IntegerType(), False), \
StructField("PAID_COPAY", FloatType(), True), \
StructField("PAID_COINSURANCE", FloatType(), True), \
StructField("PAID_TOWARD_DEDUCTIBLE", FloatType(), True), \
StructField("PAID_BY_PAYER", FloatType(), True), \
StructField("PAID_BY_COORDINATION_BENEFITS", FloatType(), True), \
StructField("TOTAL_OUT_OF_POCKET", FloatType(), True), \
StructField("TOTAL_PAID", FloatType(), True), \
StructField("INGREDIENT_COST", FloatType(), True), \
StructField("DISPENSING_FEE", FloatType(), True), \
StructField("AVERAGE_WHOLESALE_PRICE", FloatType(), True), \
StructField("PAYER_PLAN_PERIOD_ID", IntegerType(), True)])
model_schema['drug_era'] = StructType([ \
StructField("DRUG_ERA_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("DRUG_CONCEPT_ID", IntegerType(), False), \
StructField("DRUG_ERA_START_DATE", StringType(), False), \
StructField("DRUG_ERA_END_DATE", StringType(), True), \
StructField("DRUG_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("DRUG_EXPOSURE_COUNT", IntegerType(), True)])
model_schema['drug_exposure'] = StructType([ \
StructField("DRUG_EXPOSURE_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("DRUG_CONCEPT_ID", IntegerType(), False), \
StructField("DRUG_EXPOSURE_START_DATE", StringType(), False), \
StructField("DRUG_EXPOSURE_END_DATE", StringType(), True), \
StructField("DRUG_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("STOP_REASON", StringType(), True), \
StructField("REFILLS", IntegerType(), True), \
StructField("QUANTITY", FloatType(), True), \
StructField("DAYS_SUPPLY", IntegerType(), True), \
StructField("SIG", StringType(), True), \
StructField("ROUTE_CONCEPT_ID", IntegerType(), True), \
StructField("EFFECTIVE_DRUG_DOSE", FloatType(), True), \
StructField("DOSE_UNIT_CONCEPT_ID", IntegerType(), True), \
StructField("LOT_NUMBER", StringType(), True), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), True), \
StructField("DRUG_SOURCE_VALUE", StringType(), True), \
StructField("DRUG_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("ROUTE_SOURCE_VALUE", StringType(), True), \
StructField("DOSE_UNIT_SOURCE_VALUE", StringType(), True)])
model_schema['location'] = StructType([ \
StructField("LOCATION_ID", IntegerType(), False), \
StructField("ADDRESS_1", StringType(), True), \
StructField("ADDRESS_2", StringType(), True), \
StructField("CITY", StringType(), True), \
StructField("STATE", StringType(), True), \
StructField("ZIP", StringType(), True), \
StructField("COUNTY", StringType(), True), \
StructField("LOCATION_SOURCE_VALUE", StringType(), True)])
model_schema['measurement'] = StructType([ \
StructField("MEASUREMENT_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("MEASUREMENT_CONCEPT_ID", IntegerType(), False), \
StructField("MEASUREMENT_DATE", StringType(), False), \
StructField("MEASUREMENT_TIME", StringType(), True), \
StructField("MEASUREMENT_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("OPERATOR_CONCEPT_ID", IntegerType(), True), \
StructField("VALUE_AS_NUMBER", FloatType(), True), \
StructField("VALUE_AS_CONCEPT_ID", IntegerType(), True), \
StructField("UNIT_CONCEPT_ID", IntegerType(), True), \
StructField("RANGE_LOW", FloatType(), True), \
StructField("RANGE_HIGH", FloatType(), True), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), True), \
StructField("MEASUREMENT_SOURCE_VALUE", StringType(), True), \
StructField("MEASUREMENT_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("UNIT_SOURCE_VALUE", StringType(), True), \
StructField("VALUE_SOURCE_VALUE", StringType(), True)])
model_schema['observation'] = StructType([ \
StructField("OBSERVATION_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("OBSERVATION_CONCEPT_ID", IntegerType(), False), \
StructField("OBSERVATION_DATE", StringType(), False), \
StructField("OBSERVATION_TIME", StringType(), True), \
StructField("OBSERVATION_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("VALUE_AS_NUMBER", FloatType(), True), \
StructField("VALUE_AS_STRING", StringType(), True), \
StructField("VALUE_AS_CONCEPT_ID", IntegerType(), True), \
StructField("QUALIFIER_CONCEPT_ID", IntegerType(), True), \
StructField("UNIT_CONCEPT_ID", IntegerType(), True), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), True), \
StructField("OBSERVATION_SOURCE_VALUE", StringType(), True), \
StructField("OBSERVATION_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("UNIT_SOURCE_VALUE", StringType(), True), \
StructField("QUALIFIER_SOURCE_VALUE", StringType(), True)])
model_schema['observation_period'] = StructType([ \
StructField("OBSERVATION_PERIOD_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("OBSERVATION_PERIOD_START_DATE", StringType(), False), \
StructField("OBSERVATION_PERIOD_END_DATE", StringType(), False)])
model_schema['observation_period'] = StructType([ \
StructField("ORGANIZATION_ID", IntegerType(), False), \
StructField("PLACE_OF_SERVICE_CONCEPT_ID", IntegerType(), True), \
StructField("LOCATION_ID", IntegerType(), True), \
StructField("ORGANIZATION_SOURCE_VALUE", StringType(), True), \
StructField("PLACE_OF_SERVICE_SOURCE_VALUE", StringType(), True)])
model_schema['payer_plan_period'] = StructType([ \
StructField("PAYER_PLAN_PERIOD_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("PAYER_PLAN_PERIOD_START_DATE", StringType(), False), \
StructField("PAYER_PLAN_PERIOD_END_DATE", StringType(), False), \
StructField("PAYER_SOURCE_VALUE", StringType(), True), \
StructField("PLAN_SOURCE_VALUE", StringType(), True), \
StructField("FAMILY_SOURCE_VALUE", StringType(), True)])
model_schema['person'] = StructType([ \
StructField("PERSON_ID", IntegerType(), False), \
StructField("GENDER_CONCEPT_ID", IntegerType(), False), \
StructField("YEAR_OF_BIRTH", IntegerType(), False), \
StructField("MONTH_OF_BIRTH", IntegerType(), True), \
StructField("DAY_OF_BIRTH", IntegerType(), True), \
StructField("TIME_OF_BIRTH", StringType(), True), \
StructField("RACE_CONCEPT_ID", IntegerType(), True), \
StructField("ETHNICITY_CONCEPT_ID", IntegerType(), True), \
StructField("LOCATION_ID", IntegerType(), True), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("CARE_SITE_ID", IntegerType(), True), \
StructField("PERSON_SOURCE_VALUE", StringType(), True), \
StructField("GENDER_SOURCE_VALUE", StringType(), True), \
StructField("GENDER_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("RACE_SOURCE_VALUE", StringType(), True), \
StructField("RACE_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("ETHNICITY_SOURCE_VALUE", StringType(), True), \
StructField("ETHNICITY_SOURCE_CONCEPT_ID", IntegerType(), True)])
model_schema['procedure_cost'] = StructType([ \
StructField("PROCEDURE_COST_ID", IntegerType(), False), \
StructField("PROCEDURE_OCCURRENCE", IntegerType(), False), \
StructField("PAID_COPAY", FloatType(), True), \
StructField("PAID_COINSURANCE", FloatType(), True), \
StructField("PAID_TOWARD_DEDUCTIBLE", FloatType(), True), \
StructField("PAID_BY_PAYER", FloatType(), True), \
StructField("PAID_BY_COORDINATION_BENEFITS", FloatType(), True), \
StructField("TOTAL_OUT_OF_POCKET", FloatType(), True), \
StructField("TOTAL_PAID", FloatType(), True), \
StructField("DISEASE_CLASS_CONCEPT_ID", IntegerType(), True), \
StructField("REVENUE_CODE_CONCEPT_ID", IntegerType(), True), \
StructField("PAYER_PLAN_PERIOD_ID", IntegerType(), True), \
StructField("DISEASE_CLASS_SOURCE_VALUE", StringType(), True), \
StructField("REVENUE_CODE_SOURCE_VALUE", StringType(), True)])
model_schema['procedure_occurrence'] = StructType([ \
StructField("PROCEDURE_OCCURRENCE_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("PROCEDURE_CONCEPT_ID", IntegerType(), False), \
StructField("PROCEDURE_DATE", StringType(), False), \
StructField("PROCEDURE_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("MODIFIER_CONCEPT_ID", IntegerType(), True), \
StructField("QUANTITY", IntegerType(), False), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), True), \
StructField("PROCEDURE_SOURCE_VALUE", StringType(), True), \
StructField("PROCEDURE_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("QUALIFIER_SOURCE_VALUE", StringType(), True)])
model_schema['provider'] = StructType([ \
StructField("PROVIDER_ID", IntegerType(), False), \
StructField("PROVIDER_NAME", StringType(), True), \
StructField("NPI", StringType(), True), \
StructField("DEA", StringType(), True), \
StructField("SPECIALTY_CONCEPT_ID", IntegerType(), True), \
StructField("CARE_SITE_ID", IntegerType(), True), \
StructField("YEAR_OF_BIRTH", IntegerType(), True), \
StructField("GENDER_CONCEPT_ID", IntegerType(), True), \
StructField("PROVIDER_SOURCE_VALUE", StringType(), False), \
StructField("SPECIALTY_SOURCE_VALUE", StringType(), True), \
StructField("SPECIALTY_SOURCE_CONCEPT_ID", IntegerType(), True), \
StructField("GENDER_SOURCE_VALUE", StringType(), False), \
StructField("GENDER_SOURCE_CONCEPT_ID", IntegerType(), True)])
model_schema['visit_occurrence'] = StructType([ \
StructField("VISIT_OCCURRENCE_ID", IntegerType(), False), \
StructField("PERSON_ID", IntegerType(), False), \
StructField("VISIT_CONCEPT_ID", IntegerType(), False), \
StructField("VISIT_START_DATE", StringType(), False), \
StructField("VISIT_START_TIME", StringType(), True), \
StructField("VISIT_END_DATE", StringType(), False), \
StructField("VISIT_END_TIME", StringType(), True), \
StructField("VISIT_TYPE_CONCEPT_ID", IntegerType(), False), \
StructField("PROVIDER_ID", IntegerType(), True), \
StructField("CARE_SITE_ID", IntegerType(), True), \
StructField("VISIT_SOURCE_VALUE", StringType(), True), \
StructField("VISIT_SOURCE_CONCEPT_ID", IntegerType(), True)])
return model_schema
|
|
# Copyright (c) 2012, Somia Reality Oy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Python data structures describing various aspects of the api.ninchat.com
service.
.. data:: actions
Dictionary; maps name strings to Interface instances.
.. data:: events
Dictionary; maps name strings to Interface instances.
.. data:: paramtypes
Dictionary; maps name strings to Parameter instances.
.. data:: objecttypes
Dictionary; maps name strings to Object instances.
"""
from __future__ import absolute_import
import re
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse # type: ignore
try:
from typing import Any, Dict, Tuple
# avoid warnings
Any
Dict
Tuple
except ImportError:
pass
# Python 3
_ints = int # type: type
_floats = int, float # type: Tuple[type, type]
_strings = str # type: type
try:
# Python 2
_ints = int, long # type: ignore
_floats = int, long, float # type: ignore
_strings = str, unicode # type: ignore
except NameError:
pass
typechecks = {}
def declare_type(name):
def decorator(checker):
typechecks[name] = checker
return checker
return decorator
@declare_type("bool")
def is_bool(x):
return isinstance(x, bool)
@declare_type("float")
def is_float(x):
return isinstance(x, _floats)
@declare_type("int")
def is_int(x):
return isinstance(x, _ints)
@declare_type("object")
def is_object(x):
return isinstance(x, dict) and all(is_string(key) for key in x)
@declare_type("string")
def is_string(x):
return isinstance(x, _strings)
@declare_type("string array")
def is_string_array(x):
return isinstance(x, (list, tuple)) and all(is_string(item) for item in x)
@declare_type("time")
def is_time(x):
return isinstance(x, _ints) and x >= 0
def is_url(x):
try:
o = urlparse(x)
except ValueError:
return False
# DENY if not https
if o.scheme != "https":
return False
# DENY if ports assigned or ipv6 address
if ":" in o.netloc:
return False
host = o.hostname.lower()
# DENY if no alphabet (ipv4 address)
if re.match(r"^[^a-z]*$", host):
return False
# DENY if localhost or on local zeroconf network
if host == "localhost" or host.startswith("localhost.") or host.endswith(".local"):
return False
# ALLOW if based on RFC 956 or RFC 1123
if re.match(r"^[a-z0-9][a-z0-9-.]{0,251}[a-z0-9]$", host):
return True
# DENY otherwise
return False
paramtypes = {} # type: Dict[str, Any]
objecttypes = {} # type: Dict[str, Object]
actions = {} # type: Dict[str, Interface]
events = {} # type: Dict[str, Interface]
class Parameter(object):
"""Description of an action's or an event's parameter.
.. attribute:: name
String
.. attribute:: type
String|None
.. attribute:: required
Boolean
"""
def __init__(self, key, spec):
self.name = key
if isinstance(spec, bool):
self.type = paramtypes[self.name]
self.required = spec
else:
self.type = spec.get("type")
self.required = not spec.get("optional", False)
def validate(self, value):
"""Check if *value* conforms to the type requirements, or is None while
the parameter is optional.
"""
return typechecks[self.type](value) or (value is None and not self.required)
class Object(object):
"""Description of an event parameter's structure.
.. attribute:: name
String
.. attribute:: value
String|None; type of a map object's values.
.. attribute:: item
String|None; type of an array object's items.
.. attribute:: params
Dictionary|None; maps property name strings to Parameter instances.
"""
def __init__(self, key, spec):
self.name = key
self.value = spec.get("value")
self.item = spec.get("item")
self.params = None
paramspecs = spec.get("params")
if paramspecs is not None:
self.params = {}
for name, spec in paramspecs.items():
self.params[name] = Parameter(name, spec)
class Interface(object):
"""Description of an action or an event.
.. attribute:: name
String
.. attribute:: params
Dictionary; maps name strings to Parameter instances.
"""
def __init__(self, key, spec):
self.name = key
self.params = dict((k, Parameter(k, s)) for (k, s) in spec.items())
def load(root, name, cls, target):
import os
import zipfile
if os.path.isdir(root):
with open(os.path.join(root, name)) as file:
load_file(file, cls, target)
else:
with zipfile.ZipFile(root) as zip:
with zip.open(name) as file:
load_file(file, cls, target)
def load_file(file, cls, target=None):
import json
if target is None:
target = {}
for key, spec in json.load(file).items():
target[key] = cls(key, spec)
return target
from . import attrs
from . import messages
def __init():
from os.path import dirname, join, realpath
filename = realpath(__file__)
root = dirname(dirname(dirname(filename)))
pkg = dirname(filename)[len(root) + 1:]
load(root, join(pkg, "spec/json/paramtypes.json"), (lambda key, spec: spec), paramtypes)
load(root, join(pkg, "spec/json/objecttypes.json"), Object, objecttypes)
load(root, join(pkg, "spec/json/actions.json"), Interface, actions)
load(root, join(pkg, "spec/json/events.json"), Interface, events)
attrs.init(root, join(pkg, "spec/json/attrs"))
__init()
# avoid warnings
attrs
messages
|
|
import unittest
from xml.etree import ElementTree
from masher import xml_rules
from masher.xml_rules import MasherXmlRuleError
from masher.generators import ConstantGenerator
def make_xml(xml):
return '<?xml version="1.0"?><data>' + xml + '</data>'
class TestMisc(unittest.TestCase):
def testCleanText(self):
txt = xml_rules.clean_text_node('\n\thello\n\t')
self.assertEqual('hello', txt)
class TextConstantRule(unittest.TestCase):
def testMetBy(self):
xml = '<const></const>'
root = ElementTree.fromstring(xml)
xml_rules.ConstantRule().metBy(root)
def testConstantWSpaces(self):
root = ElementTree.fromstring('<const> hello </const>')
txt = xml_rules.ConstantRule().getGenerator(root).generateText()
self.assertEqual('hello', txt)
def testConstantWLineBreaks(self):
xml = """<const>
hello
</const>"""
root = ElementTree.fromstring(xml)
txt = xml_rules.ConstantRule().getGenerator(root).generateText()
self.assertEqual('hello', txt)
def testConstantNoText(self):
xml = """<const></const>"""
root = ElementTree.fromstring(xml)
with self.assertRaises(MasherXmlRuleError):
txt = xml_rules.ConstantRule().getGenerator(root).generateText()
class TestRandomChanceRule(unittest.TestCase):
def testMetBy(self):
xml = '<random></random>'
root = ElementTree.fromstring(xml)
xml_rules.ConstantRule().metBy(root)
def testRandomDefaultChance(self):
xml = """<random><const>hello</const><const>goodbye</const></random>"""
root = ElementTree.fromstring(xml)
gen = xml_rules.RandomRule(MockParser()).getGenerator(root)
self.assertEqual(.5, gen.chance)
def testRandomChanceGiven(self):
xml = """
<random chance='.3'>
<const>hello</const>
<const>goodbye</const>
</random>"""
# xml = make_xml(xml)
root = ElementTree.fromstring(xml)
gen = xml_rules.RandomRule(MockParser()).getGenerator(root)
self.assertEqual(.3, gen.chance)
def testRandomDffaultEnding(self):
xml = """<random><const>hello</const><const>goodbye</const></random>"""
root = ElementTree.fromstring(xml)
gen = xml_rules.RandomRule(MockParser()).getGenerator(root)
self.assertEqual(' ', gen.ending)
def testRandomEndingGiven(self):
xml = """<random ending="-"><const>hello</const><const>goodbye</const></random>"""
root = ElementTree.fromstring(xml)
gen = xml_rules.RandomRule(MockParser()).getGenerator(root)
self.assertEqual('-', gen.ending)
def testRandomToFewConstants(self):
xml = """<random ending="-"><const>goodbye</const></random>"""
root = ElementTree.fromstring(xml)
with self.assertRaises(MasherXmlRuleError):
gen = xml_rules.RandomRule(MockParser()).getGenerator(root)
def testRandomToManyConstants(self):
xml = """<random ending="-"><const>goodbye</const><const>goodbye</const><const>goodbye</const></random>"""
root = ElementTree.fromstring(xml)
with self.assertRaises(MasherXmlRuleError):
gen = xml_rules.RandomRule(MockParser()).getGenerator(root)
class TestFileListRule(unittest.TestCase):
def testMetBy(self):
xml = '<filelist></filelist>'
root = ElementTree.fromstring(xml)
xml_rules.ConstantRule().metBy(root)
def testFilelistDefault(self):
xml = """<filelist ending="-">./test_files/test_names1.txt; ./test_files/test_names2.txt</filelist>"""
root = ElementTree.fromstring(xml)
gen = xml_rules.FileListRule().getGenerator(root)
expected = ["name1", "name2", "name3", "name4", "name5", "name6"]
self.assertEqual(len(expected), len(gen.words))
for name in expected:
assert name in gen.words
def testFilelistEnding(self):
xml = """<filelist ending="-">./test_files/test_names1.txt;</filelist>"""
root = ElementTree.fromstring(xml)
gen = xml_rules.FileListRule().getGenerator(root)
self.assertEqual('-', gen.ending)
def testFilelistNoFiles(self):
xml = """<filelist></filelist>"""
root = ElementTree.fromstring(xml)
with self.assertRaises(MasherXmlRuleError):
gen = xml_rules.FileListRule().getGenerator(root)
class TestListRule(unittest.TestCase):
def testMetBy(self):
xml = '<list></list>'
root = ElementTree.fromstring(xml)
xml_rules.ConstantRule().metBy(root)
def testListDefault(self):
xml = """<list>word1;word2;word3</list>"""
expected = ['word1', 'word2', 'word3']
root = ElementTree.fromstring(xml)
gen = xml_rules.ListRule().getGenerator(root)
self.assertEqual(expected, gen.words)
def testListEnding(self):
xml = """<list ending='.4'>word1;word2;word3</list>"""
expected = ['word1', 'word2', 'word3']
root = ElementTree.fromstring(xml)
gen = xml_rules.ListRule().getGenerator(root)
self.assertEqual('.4', gen.ending)
def testListDetectsListTag(self):
xml = """<list ending='.4'>word1;word2;word3</list>"""
expected = ['word1', 'word2', 'word3']
root = ElementTree.fromstring(xml)
assert xml_rules.ListRule().metBy(root)
def testListNoWords(self):
xml = """<list></list>"""
root = ElementTree.fromstring(xml)
with self.assertRaises(MasherXmlRuleError):
gen = xml_rules.FileListRule().getGenerator(root)
class TestPhraseParser(unittest.TestCase):
def setUp(self):
self.rule = xml_rules.PhraseRule(MockParser())
def testMetBy(self):
xml = '<phrase></phrase>'
root = ElementTree.fromstring(xml)
xml_rules.ConstantRule().metBy(root)
def testPhraseDefaultCorrectNumberGenerators(self):
xml = """<phrase> <const>1</const> <const>2</const> </phrase>"""
root = ElementTree.fromstring(xml)
gen = self.rule.getGenerator(root)
self.assertEquals(2, len(gen.generators))
def testPhraseDefaultEnding(self):
xml = """<phrase> <const>1</const> <const>2</const> </phrase>"""
root = ElementTree.fromstring(xml)
gen = self.rule.getGenerator(root)
self.assertEquals('', gen.ending)
def testPhraseDefaultSeparator(self):
xml = """<phrase> <const>1</const> <const>2</const> </phrase>"""
root = ElementTree.fromstring(xml)
gen = self.rule.getGenerator(root)
self.assertEquals(' ', gen.separator)
def testPhraseSetEnding(self):
xml = """<phrase ending="-"> <const>1</const> <const>2</const> </phrase>"""
root = ElementTree.fromstring(xml)
gen = self.rule.getGenerator(root)
self.assertEquals('-', gen.ending)
def testPhraseSetSeparator(self):
xml = """<phrase separator="=="> <const>1</const> <const>2</const> </phrase>"""
root = ElementTree.fromstring(xml)
gen = self.rule.getGenerator(root)
self.assertEquals('==', gen.separator)
class MockParser:
def __init__(self):
self.index = 0
self.ls = [1,2,3,4,5,6,7,8,9,10]
def parse_schema(self, schema):
gen = ConstantGenerator(str(self.ls[self.index]))
return gen
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Methods modified from Kademlia with Copyright (c) 2014 Brian Muller:
# transferKeyValues, get_concat, _nodesFound, _handleFoundValues
# see https://github.com/bmuller/kademlia/blob/master/LICENSE
import json
import uuid
import types
from twisted.internet import defer, task, reactor
from kademlia.network import Server
from kademlia.protocol import KademliaProtocol
from kademlia.crawling import NodeSpiderCrawl, ValueSpiderCrawl, RPCFindResponse
from kademlia.utils import digest
from kademlia.storage import ForgetfulStorage
from kademlia.node import Node
from kademlia import version as kademlia_version
from collections import Counter
from calvin.utilities import calvinlogger
import base64
_log = calvinlogger.get_logger(__name__)
# Fix for None types in storage
class ForgetfulStorageFix(ForgetfulStorage):
def get(self, key, default=None):
self.cull()
if key in self.data:
return (True, self[key])
return (False, default)
class KademliaProtocolAppend(KademliaProtocol):
def __init__(self, *args, **kwargs):
self.set_keys = kwargs.pop('set_keys', set([]))
KademliaProtocol.__init__(self, *args, **kwargs)
###############################################################################
# TODO remove this when kademlia v0.6 available, bug fixes, see upstream Kademlia
def handleCallResponse(self, result, node):
"""
If we get a response, add the node to the routing table. If
we get no response, make sure it's removed from the routing table.
"""
if result[0]:
self.log.info("got response from %s, adding to router" % node)
_log.debug("got response from %s, adding to router" % node)
if self.router.isNewNode(node):
self.transferKeyValues(node)
self.router.addContact(node)
else:
self.log.debug("no response from %s, removing from router" % node)
_log.debug("no response from %s, removing from router" % node)
self.router.removeContact(node)
return result
def maybeTransferKeyValues(self, node):
if self.router.isNewNode(node):
self.transferKeyValues(node)
def rpc_ping(self, sender, nodeid):
source = Node(nodeid, sender[0], sender[1])
_log.debug("rpc_ping sender=%s, source=%s" % (sender, source))
self.maybeTransferKeyValues(source)
self.router.addContact(source)
return self.sourceNode.id
def rpc_store(self, sender, nodeid, key, value):
source = Node(nodeid, sender[0], sender[1])
_log.debug("rpc_store sender=%s, source=%s, key=%s, value=%s" % (sender, source, base64.b64encode(key), str(value)))
self.maybeTransferKeyValues(source)
self.router.addContact(source)
self.log.debug("got a store request from %s, storing value" % str(sender))
self.storage[key] = value
return True
def rpc_find_node(self, sender, nodeid, key):
self.log.info("finding neighbors of %i in local table" % long(nodeid.encode('hex'), 16))
source = Node(nodeid, sender[0], sender[1])
_log.debug("rpc_find_node sender=%s, source=%s, key=%s" % (sender, source, base64.b64encode(key)))
self.maybeTransferKeyValues(source)
self.router.addContact(source)
node = Node(key)
return map(tuple, self.router.findNeighbors(node, exclude=source))
#
###############################################################################
def transferKeyValues(self, node):
"""
Given a new node, send it all the keys/values it should be storing.
@param node: A new node that just joined (or that we just found out
about).
Process:
For each key in storage, get k closest nodes. If newnode is closer
than the furtherst in that list, and the node for this server
is closer than the closest in that list, then store the key/value
on the new node (per section 2.5 of the paper)
"""
_log.debug("**** transfer key values %s ****" % node)
ds = []
for key, value in self.storage.iteritems():
keynode = Node(digest(key))
neighbors = self.router.findNeighbors(keynode)
_log.debug("transfer? nbr neighbors=%d, key=%s, value=%s" % (len(neighbors), base64.b64encode(key), str(value)))
if len(neighbors) > 0:
newNodeClose = node.distanceTo(keynode) < neighbors[-1].distanceTo(keynode)
thisNodeClosest = self.sourceNode.distanceTo(keynode) < neighbors[0].distanceTo(keynode)
if len(neighbors) == 0 or (newNodeClose and thisNodeClosest):
if key in self.set_keys:
_log.debug("transfer append key value key=%s, value=%s" % (base64.b64encode(key), str(value)))
ds.append(self.callAppend(node, key, value))
else:
_log.debug("transfer store key value key=%s, value=%s" % (base64.b64encode(key), str(value)))
ds.append(self.callStore(node, key, value))
return defer.gatherResults(ds)
# Fix for None in values for delete
def rpc_find_value(self, sender, nodeid, key):
source = Node(nodeid, sender[0], sender[1])
_log.debug("rpc_find_value sender=%s, source=%s, key=%s" % (sender, source, base64.b64encode(key)))
self.maybeTransferKeyValues(source)
self.router.addContact(source)
exists, value = self.storage.get(key, None)
if not exists:
return self.rpc_find_node(sender, nodeid, key)
return { 'value': value }
def rpc_append(self, sender, nodeid, key, value):
source = Node(nodeid, sender[0], sender[1])
_log.debug("rpc_append sender=%s, source=%s, key=%s, value=%s" % (sender, source, base64.b64encode(key), str(value)))
self.maybeTransferKeyValues(source)
self.router.addContact(source)
try:
pvalue = json.loads(value)
self.set_keys.add(key)
if key not in self.storage:
_log.debug("%s append key: %s not in storage set value: %s" % (base64.b64encode(nodeid), base64.b64encode(key), pvalue))
self.storage[key] = value
else:
old_value_ = self.storage[key]
old_value = json.loads(old_value_)
new_value = list(set(old_value + pvalue))
_log.debug("%s append key: %s old: %s add: %s new: %s" % (base64.b64encode(nodeid), base64.b64encode(key), old_value, pvalue, new_value))
self.storage[key] = json.dumps(new_value)
return True
except:
_log.debug("Trying to append something not a JSON coded list %s" % value, exc_info=True)
return False
def callAppend(self, nodeToAsk, key, value):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.append(address, self.sourceNode.id, key, value)
return d.addCallback(self.handleCallResponse, nodeToAsk)
def rpc_remove(self, sender, nodeid, key, value):
source = Node(nodeid, sender[0], sender[1])
_log.debug("rpc_remove sender=%s, source=%s, key=%s, value=%s" % (sender, source, base64.b64encode(key), str(value)))
self.maybeTransferKeyValues(source)
self.router.addContact(source)
try:
pvalue = json.loads(value)
self.set_keys.add(key)
if key in self.storage:
old_value = json.loads(self.storage[key])
new_value = list(set(old_value) - set(pvalue))
self.storage[key] = json.dumps(new_value)
_log.debug("%s remove key: %s old: %s remove: %s new: %s" % (base64.b64encode(nodeid), base64.b64encode(key), old_value, pvalue, new_value))
return True
except:
_log.debug("Trying to remove somthing not a JSON coded list %s" % value, exc_info=True)
return False
def callRemove(self, nodeToAsk, key, value):
address = (nodeToAsk.ip, nodeToAsk.port)
d = self.remove(address, self.sourceNode.id, key, value)
return d.addCallback(self.handleCallResponse, nodeToAsk)
class AppendServer(Server):
def __init__(self, ksize=20, alpha=3, id=None, storage=None):
storage = storage or ForgetfulStorageFix()
Server.__init__(self, ksize, alpha, id, storage=storage)
self.set_keys=set([])
self.protocol = KademliaProtocolAppend(self.node, self.storage, ksize, set_keys=self.set_keys)
if kademlia_version != '0.5':
_log.error("#################################################")
_log.error("### EXPECTING VERSION 0.5 of kademlia package ###")
_log.error("#################################################")
def bootstrap(self, addrs):
"""
Bootstrap the server by connecting to other known nodes in the network.
Args:
addrs: A `list` of (ip, port) `tuple` pairs. Note that only IP addresses
are acceptable - hostnames will cause an error.
"""
# if the transport hasn't been initialized yet, wait a second
if self.protocol.transport is None:
return task.deferLater(reactor, .2, self.bootstrap, addrs)
else:
return Server.bootstrap(self, addrs)
def append(self, key, value):
"""
For the given key append the given list values to the set in the network.
"""
dkey = digest(key)
node = Node(dkey)
def append_(nodes):
# if this node is close too, then store here as well
if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
try:
pvalue = json.loads(value)
self.set_keys.add(dkey)
if dkey not in self.storage:
_log.debug("%s local append key: %s not in storage set value: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), pvalue))
self.storage[dkey] = value
else:
old_value_ = self.storage[dkey]
old_value = json.loads(old_value_)
new_value = list(set(old_value + pvalue))
_log.debug("%s local append key: %s old: %s add: %s new: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value))
self.storage[dkey] = json.dumps(new_value)
except:
_log.debug("Trying to append something not a JSON coded list %s" % value, exc_info=True)
ds = [self.protocol.callAppend(n, dkey, value) for n in nodes]
return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to set key %s" % key)
_log.debug("There are no known neighbors to set key %s" % key)
return defer.succeed(False)
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find().addCallback(append_)
def get(self, key):
"""
Get a key if the network has it.
Returns:
:class:`None` if not found, the value otherwise.
"""
dkey = digest(key)
_log.debug("Server:get %s" % base64.b64encode(dkey))
# if this node has it, return it
exists, value = self.storage.get(dkey)
if exists:
return defer.succeed(value)
node = Node(dkey)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to get key %s" % key)
return defer.succeed(None)
spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find()
def remove(self, key, value):
"""
For the given key remove the given list values from the set in the network.
"""
dkey = digest(key)
node = Node(dkey)
_log.debug("Server:remove %s" % base64.b64encode(dkey))
def remove_(nodes):
# if this node is close too, then store here as well
if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):
try:
pvalue = json.loads(value)
self.set_keys.add(dkey)
if dkey in self.storage:
old_value = json.loads(self.storage[dkey])
new_value = list(set(old_value) - set(pvalue))
self.storage[dkey] = json.dumps(new_value)
_log.debug("%s local remove key: %s old: %s remove: %s new: %s" % (base64.b64encode(node.id), base64.b64encode(dkey), old_value, pvalue, new_value))
except:
_log.debug("Trying to remove somthing not a JSON coded list %s" % value, exc_info=True)
ds = [self.protocol.callRemove(n, dkey, value) for n in nodes]
return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)
nearest = self.protocol.router.findNeighbors(node)
if len(nearest) == 0:
self.log.warning("There are no known neighbors to set key %s" % key)
return defer.succeed(False)
spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)
return spider.find().addCallback(remove_)
def get_concat(self, key):
"""
Get a key if the network has it. Assuming it is a list that should be combined.
@return: C{None} if not found, the value otherwise.
"""
dkey = digest(key)
# Always try to do a find even if we have it, due to the concatenation of all results
exists, value = self.storage.get(dkey)
node = Node(dkey)
nearest = self.protocol.router.findNeighbors(node)
_log.debug("Server:get_concat key=%s, value=%s, exists=%s, nbr nearest=%d" % (base64.b64encode(dkey), value,
exists, len(nearest)))
if len(nearest) == 0:
# No neighbors but we had it, return that value
if exists:
return defer.succeed(value)
self.log.warning("There are no known neighbors to get key %s" % key)
return defer.succeed(None)
spider = ValueListSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha,
local_value=value if exists else None)
return spider.find()
class ValueListSpiderCrawl(ValueSpiderCrawl):
def __init__(self, *args, **kwargs):
self.local_value = kwargs.pop('local_value', None)
super(ValueListSpiderCrawl, self).__init__(*args, **kwargs)
def _nodesFound(self, responses):
"""
Handle the result of an iteration in C{_find}.
"""
toremove = []
foundValues = []
for peerid, response in responses.items():
response = RPCFindResponse(response)
if not response.happened():
toremove.append(peerid)
elif response.hasValue():
foundValues.append((peerid, response.getValue()))
else:
peer = self.nearest.getNodeById(peerid)
self.nearestWithoutValue.push(peer)
self.nearest.push(response.getNodeList())
_log.debug("_nodesFound nearestWithoutValue: %s, nearest: %s, toremove: %s" %
(self.nearestWithoutValue.getIDs(), self.nearest.getIDs(), toremove))
self.nearest.remove(toremove)
if len(foundValues) > 0:
return self._handleFoundValues(foundValues)
if self.nearest.allBeenContacted():
# not found at neighbours!
if self.local_value:
# but we had it
return self.local_value
else:
return None
return self.find()
def _handleFoundValues(self, jvalues):
"""
We got some values! Exciting. But lets combine them all. Also,
make sure we tell the nearest node that *didn't* have
the value to store it.
"""
# TODO figure out if we could be more cleaver in what values are combined
value = None
_set_op = True
if self.local_value:
jvalues.append((None, self.local_value))
_log.debug("_handleFoundValues %s" % str(jvalues))
if len(jvalues) != 1:
args = (self.node.long_id, str(jvalues))
_log.debug("Got multiple values for key %i: %s" % args)
try:
values = [(v[0], json.loads(v[1])) for v in jvalues]
value_all = []
for v in values:
value_all = value_all + v[1]
value = json.dumps(list(set(value_all)))
except:
# Not JSON coded or list, probably trying to do a get_concat on none set-op data
# Do the normal thing
_log.debug("_handleFoundValues ********", exc_info=True)
valueCounts = Counter([v[1] for v in jvalues])
value = valueCounts.most_common(1)[0][0]
_set_op = False
else:
key, value = jvalues[0]
peerToSaveTo = self.nearestWithoutValue.popleft()
if peerToSaveTo is not None:
_log.debug("nearestWithoutValue %d" % (len(self.nearestWithoutValue)+1))
if _set_op:
d = self.protocol.callAppend(peerToSaveTo, self.node.id, value)
else:
d = self.protocol.callStore(peerToSaveTo, self.node.id, value)
return d.addCallback(lambda _: value)
# TODO if nearest does not contain the proper set push to it
return value
|
|
# Test name = Settings
# Script dir = R:\Stingray\Tests\Settings\05-dateTime\05-dateTime.py
from time import sleep
from device import handler, updateTestResult
import RC
import UART
import DO
import GRAB
import MOD
import os
import OPER
from DO import status
def runTest():
status("active")
TestName = "Settings"
ScriptName = "05-dateTime"
ScriptIndex = "5"
Grabber = DO.grab_define()
platform = DO.load_platform()
Modulation = "DVBS"
FEC = "3/4"
SR = "27500000"
Stream = "\\X_0000_00000_MUX_32000_EPG_Software_20130328a.ts"
Frequency = 1476
Modulator = "1"
COM = "COM7"
settings = [ScriptName, ScriptIndex, Grabber, Modulation, FEC, SR, Stream, Frequency, Modulator, COM]
DO.save_settings(settings)
GRAB.start_capture()
MOD.stop(Modulator)
# macros
searching_from_wizard_general_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_E501 = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_E501 = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_ALL = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 5000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_ALL = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "down 1 1000", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
load_regions_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_E501 = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
load_regions_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_ALL = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
"""############################ TestCase 1 ##########################################
testcase = 1
status("active")
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
UART.default_settings()
UART.start_app("")
RC.push(["key_1", "key_2", "key_3"]) # set of the RC buttons
sleep(0)
RC.push("[macros_name]") # RC macros from remote_control.ini file
GRAB.compare(testcase)"""
############################ TestCase 1 ##########################################
testcase = 1
status("active")
UART.default_settings()
MOD.stop(Modulator)
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
OPER.search()
UART.start_app("settings")
RC.push(["right 1 1000", "right 1 1000", "right 1 1000"])
GRAB.compare(testcase)
############################ TestCase 2 ##########################################
testcase = 2
status("active")
GRAB.compare(testcase)
############################ TestCase 3 ##########################################
testcase = 3
status("active")
GRAB.compare(testcase)
############################ TestCase 4 ##########################################
testcase = 4
status("active")
RC.push(["OK 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 5 ##########################################
testcase = 5
status("active")
RC.push(["OK 1 1000", "up 1 1000"])
GRAB.compare(testcase)
############################ TestCase 6 ##########################################
testcase = 6
status("active")
GRAB.compare(testcase)
############################ TestCase 7 ##########################################
testcase = 7
status("inactive")
RC.push(["down 1 2000", "left 1 1000", "right 1 1000"])
GRAB.compare(testcase)
############################ TestCase 8 ##########################################
testcase = 8
status("inactive")
RC.push(["left 1 2000"])
GRAB.compare(testcase)
############################ TestCase 9 ##########################################
testcase = 9
status("active")
RC.push(["OK 1 1000", "ok 1 2000"])
GRAB.compare(testcase)
############################ TestCase 10 ##########################################
testcase = 10
status("active")
RC.push(["ok 1 2000"])
GRAB.compare(testcase)
############################ TestCase 11 ##########################################
testcase = 11
status("active")
UART.default_settings()
MOD.stop(Modulator)
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
OPER.search()
UART.start_app("settings")
RC.push(["right 1 1000", "right 1 1000", "right 1 1000", "down 1 1000", "ok 1 1000", "down 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 12 ##########################################
testcase = 12
status("active")
RC.push(["right 1 2000", "right 1 1000", "right 1 1000"])
GRAB.compare(testcase)
############################ TestCase 13 ##########################################
testcase = 13
status("active")
RC.push(["right 1 2000", "right 1 1000", "right 1 1000"])
GRAB.compare(testcase)
############################ TestCase 14 ##########################################
testcase = 14
status("active")
RC.push(["left 1 1000", "left 1 1000", "left 1 1000"])
GRAB.compare(testcase)
############################ TestCase 15 ##########################################
testcase = 15
status("active")
RC.push(["left 1 1000", "left 1 1000", "left 1 1000"])
GRAB.compare(testcase)
############################ TestCase 16 ##########################################
testcase = 16
status("active")
RC.push(["up 1 1000"])
GRAB.compare(testcase)
############################ TestCase 17 ##########################################
testcase = 17
status("active")
RC.push(["down 1 1000", "down 1 1000"])
GRAB.compare(testcase)
############################ TestCase 18 ##########################################
testcase = 18
status("active")
RC.push(["exit 1 1000"])
GRAB.compare(testcase)
############################ TestCase 19 ##########################################
testcase = 19
status("active")
RC.push(["ok 1 1000", "up 1 1000", "up 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 20 ##########################################
testcase = 20
status("active")
RC.push(["up 1 1000", "OK 1 10000", "OK 1 1000", "down 1 1000", "ok 1 1000", "0 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 21 ##########################################
testcase = 21
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "2 1 1000", "5 1 1000"])
GRAB.compare(testcase)
############################ TestCase 22 ##########################################
testcase = 22
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "0 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 23 ##########################################
testcase = 23
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "6 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 24 ##########################################
testcase = 24
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "2 1 1000", "5 1 1000"])
GRAB.compare(testcase)
############################ TestCase 25 ##########################################
testcase = 25
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 26 ##########################################
testcase = 26
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "0 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 27 ##########################################
testcase = 27
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "2 1 1000", "2 1 1000"])
GRAB.compare(testcase)
############################ TestCase 28 ##########################################
testcase = 28
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "4 1 1000", "5 1 1000"])
GRAB.compare(testcase)
############################ TestCase 29 ##########################################
testcase = 29
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "9 1 1000", "9 1 1000"])
GRAB.compare(testcase)
############################ TestCase 30 ##########################################
testcase = 30
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "right 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 31 ##########################################
testcase = 31
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "right 1 1000", "1 1 1000", "2 1 1000"])
GRAB.compare(testcase)
############################ TestCase 32 ##########################################
testcase = 32
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 1 1000", "right 1 1000", "right 1 1000", "2 1 1000", "0 1 1000"])
GRAB.compare(testcase)
############################ TestCase 33 ##########################################
testcase = 33
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 6 2000", "down 1 1000", "left 1 1000", "down 1 1000", "left 1 1000", "2 1 1000", "9 1 1000"])
GRAB.compare(testcase)
############################ TestCase 34 ##########################################
testcase = 34
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 6 2000", "left 1 1000", "down 1 1000", "left 1 1000", "2 1 1000", "9 1 1000"])
GRAB.compare(testcase)
############################ TestCase 35 ##########################################
testcase = 35
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 6 2000", "0 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 36 ##########################################
testcase = 36
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 6 2000", "1 1 1000", "9 1 1000", "6 1 1000", "9 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 37 ##########################################
testcase = 37
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 6 2000", "1 1 1000", "9 1 1000", "7 1 1000", "0 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 38 ##########################################
testcase = 38
status("active")
RC.push(["ok 1 1000", "right 6 2000", "2 1 1000", "0 1 1000", "3 1 1000", "7 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 39 ##########################################
testcase = 39
status("active")
RC.push(["ok 1 1000", "right 6 2000", "1 1 1000", "0 1 1000", "3 1 1000", "7 1 1000", "OK 1 1000"])
GRAB.compare(testcase)
############################ TestCase 40 ##########################################
testcase = 40
status("active")
RC.push(["exit 1 1000", "ok 1 1000", "right 6 2000", "8 1 1000", "8 1 1000", "8 1 1000", "8 1 1000", "OK 1 1500"])
GRAB.compare(testcase)
############################ TestCase 41 ##########################################
testcase = 41
status("active")
UART.default_settings()
MOD.stop(Modulator)
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
OPER.search()
UART.start_app("settings")
RC.push(["right 1 1000", "right 1 1000", "right 1 1000", "down 1 1000", "down 1 1000", "right 1 1000"])
GRAB.compare(testcase)
############################ TestCase 42 ##########################################
testcase = 42
status("active")
RC.push(["right 15 5000"])
GRAB.compare(testcase)
############################ TestCase 43 ##########################################
testcase = 43
status("active")
RC.push(["left 1 1000"])
GRAB.compare(testcase)
############################ TestCase 44 ##########################################
testcase = 44
status("active")
RC.push(["left 30 5000"])
GRAB.compare(testcase)
############################ TestCase 45 ##########################################
testcase = 45
status("active")
RC.push(["ChUp 1 1000"])
GRAB.compare(testcase)
############################ TestCase 46 ##########################################
testcase = 46
status("active")
RC.push(["ChDown 1 1000"])
GRAB.compare(testcase)
############################ TestCase 47 ##########################################
testcase = 47
status("active")
UART.default_settings()
MOD.stop(Modulator)
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
OPER.search()
UART.start_app("settings")
RC.push(["right 1 1000", "right 1 1000", "right 1 1000", "down 1 1000", "OK 1 1000", "down 1 1000", "OK 1 1000", "2 1 1000", "3 1 1000", "2 1 1000", "5 1 1000", "right 1 1000", "down 1 1000", "down 1 1000", "down 1 1000", "left 1 1000", "3 1 1000", "1 1 1000", "right 1 1000", "2 1 1000", "0 1 1000", "1 1 1000", "2 1 1000", "OK 1 1000", "down 1 1000"])
GRAB.compare(testcase)
############################ TestCase 48 ##########################################
testcase = 48
status("active")
RC.push(["right 1 1000"])
GRAB.compare(testcase)
############################ TestCase 49 ##########################################
testcase = 49
status("manual")
GRAB.compare(testcase)
###################################################################################
status("active")
MOD.stop(Modulator)
GRAB.stop_capture()
|
|
'''This assignment is backed by a sparse matrix and is suitable for
large systems.
'''
import os
import logging
import random
import itertools
import scipy as sp
import numpy as np
import model
from assignments import Assignment, AssignmentError
class SparseAssignmentError(AssignmentError):
'''Base class for exceptions thrown by this module.'''
class SparseAssignment(Assignment):
'''Sparse storage design representation
Representing a storage assignment. This implementation is backed
by a sparse matrix and a scalar representing a value common to all
elements.
Attributes:
assignment_matrix: An num_batches by num_partitions Numpy array,
where assignment_matrix[i, j] is the number of rows from partition
j stored in batch i.
labels: List of sets, where the elements of the set labels[i] are
the rows of the assignment_matrix stored at server i.
'''
def __init__(self, par, gamma=0, labels=None, assignment_matrix=None):
'''Initialize a sparse assignment.
Args:
par: A parameters object.
gamma: Number of coded rows for each partition stored in all
batches.
labels: List of sets, where the elements of the set labels[i]
are the rows of the assignment_matrix stored at server i. A
new one is generated in this is None.
assignment_marix: A sparse assignment matrix. This argument is
used when loading an assignment from disk.
'''
assert isinstance(par, model.SystemParameters)
assert isinstance(gamma, int)
assert isinstance(labels, list) or labels is None
self.par = par
self.gamma = gamma
if assignment_matrix is None:
self.assignment_matrix = sp.sparse.coo_matrix((par.num_batches,
par.num_partitions),
dtype=np.int16)
else:
self.assignment_matrix = assignment_matrix
self.assignment_matrix_csr = None
if labels is None:
self.labels = [set() for x in range(par.num_servers)]
self.label()
else:
self.labels = labels
return
def __repr__(self):
string = ''
string += 'assignment matrix:\n'
string += str(self.assignment_matrix) + '\n'
string += 'gamma:\n'
string += str(self.gamma) + '\n'
string += 'labels:\n'
string += str(self.labels) + '\n'
return string
def __str__(self):
if self.assignment_matrix_csr is None:
self.assignment_matrix_csr = self.assignment_matrix.tocsr()
string = ''
string += 'gamma = ' + str(self.gamma) + '\n'
rows_to_print = min(100, self.assignment_matrix_csr.shape[0])
matrix = self.assignment_matrix_csr.A[0:rows_to_print]
for row_index in range(rows_to_print):
string += str(self.batch_labels[row_index]) + ' '
string += '['
row = matrix[row_index]
for col in row:
if col == 0:
string += ' '
else:
string += '.'
string += ']\n'
return string
def batch_union(self, batch_indices):
'''Compute the union of symbols stored in a set of batches.
Args:
batch_indices: Iterable of batch indices.
Returns: A dense Numpy array containing the counts of symbols
stored in a union of batches.
'''
assert isinstance(batch_indices, set)
if self.assignment_matrix_csr is None:
self.assignment_matrix_csr = self.assignment_matrix.tocsr()
row_indices = list(batch_indices)
sorted(row_indices)
symbols_slice = self.assignment_matrix_csr.A[row_indices, :]
symbols = symbols_slice.sum(axis=0)
symbols += self.gamma * len(batch_indices)
return symbols
def rows_iterator(self):
'''Iterate over the rows of the assignment matrix.'''
if self.assignment_matrix_csr is None:
self.assignment_matrix_csr = self.assignment_matrix.tocsr()
for row in self.assignment_matrix_csr.A:
yield row
return
def batch_union_sparse(self, batch_indices):
'''Compute the union of symbols stored in a set of batches.
Args:
batch_indices: Iterable of batch indices.
Returns: A dense Numpy array containing the counts of symbols
stored in a union of batches.
'''
raise NotImplemented
assert isinstance(batch_indices, set)
if self.assignment_matrix_csr is None:
self.assignment_matrix_csr = self.assignment_matrix.tocsr()
cols = list(batch_indices)
rows = [0] * len(cols)
data = [1] * len(cols)
#selection_vector = sp.sparse.csr_matrix((data, (rows, cols)), shape=(1, self.par.num_batches))
# selection_vector = sp.sparse.coo_matrix((data, (rows, cols)),
# shape=(1, self.par.num_batches))
# symbols = selection_vector.dot(self.assignment_matrix).A[0]
selection_vector = np.zeros((1, self.par.num_batches))
selection_vector[:, cols] = 1
symbols = (selection_vector * self.assignment_matrix_csr)
#print(self.par.num_partitions, selection_vector.shape, self.assignment_matrix.shape, symbols.shape)
symbols += self.gamma * len(batch_indices)
return symbols[0]
def save(self, directory='./saved_assignments/'):
'''Save the assignment to disk.
Args:
directory: Directory to save to.
'''
assert isinstance(directory, str)
if not os.path.exists(directory):
os.makedirs(directory)
# Save the assignment matrix
filename = os.path.join(directory, self.par.identifier() + '.npz')
sp.sparse.save_npz(filename, self.assignment_matrix)
return
@classmethod
def load(cls, parameters, directory='./saved_assignments/'):
'''Load assignment from disk.
Args:
parameters: System parameters.
directory: Directory to load from.
Returns: The loaded assignment.
'''
assert isinstance(directory, str)
# Load the assignment matrix
filename = os.path.join(directory, parameters.identifier() + '.npz')
assignment_matrix = sp.sparse.load_npz(filename)
# Infer the value of gamma from its first row
gamma = parameters.rows_per_batch
gamma -= assignment_matrix.getrow(0).sum()
gamma /= parameters.num_partitions
if gamma % 1 != 0:
raise SparseAssignmentError('Could not infer the value of gamma.')
return cls(parameters, gamma=int(gamma), assignment_matrix=assignment_matrix)
def label(self, shuffle=False):
'''Label the batches with server subsets
Label all batches with subsets.
Args:
shuffle: Shuffle the labeling if True. Otherwise label in the
order returned by itertools.combinations.
'''
assert self.par.server_storage * self.par.q % 1 == 0, 'Must be integer'
self.batch_labels = list(itertools.combinations(range(self.par.num_servers),
int(self.par.server_storage * self.par.q)))
if shuffle:
random.shuffle(self.batch_labels)
row = 0
for label in self.batch_labels:
for server in label:
self.labels[server].add(row)
row += 1
return
def increment(self, rows, cols, data):
'''Increment assignment_matrix[rows[i], cols[i]] by data[i] for all i.
Args:
row: List of row indices
col: List of column indices
data: List of values to increment by
Returns: Returns self. Does not copy the assignment.
'''
assert isinstance(rows, list)
assert isinstance(cols, list)
assert isinstance(data, list)
assert len(rows) == len(cols)
assert len(cols) == len(data)
self.assignment_matrix += sp.sparse.coo_matrix((data, (rows, cols)), dtype=np.int16,
shape=self.assignment_matrix.shape)
self.assignment_matrix_csr = None
# Eliminate duplicate entries
self.assignment_matrix.sum_duplicates()
return self
def decrement(self, rows, cols, data):
'''Decrement assignment_matrix[rows[i], cols[i]] by data[i] for all i.
Args:
row: List of row indices
col: List of column indices
data: List of values to increment by
Returns: Returns self. Does not copy the assignment.
'''
assert isinstance(rows, list)
assert isinstance(cols, list)
assert isinstance(data, list)
assert len(rows) == len(cols)
assert len(cols) == len(data)
self.assignment_matrix -= sp.sparse.coo_matrix((data, (rows, cols)), dtype=np.int8,
shape=self.assignment_matrix.shape)
# Eliminate duplicate entries
self.assignment_matrix.sum_duplicates()
return self
def is_valid(self):
'''Test if the assignment is valid.
Returns: True if the assignment matrix is valid and complete,
and False otherwise.
'''
correct_row_sum = self.par.rows_per_batch
correct_row_sum -= self.par.num_partitions * self.gamma
correct_row_sum = round(correct_row_sum)
for i in range(self.par.num_batches):
row = self.assignment_matrix.getrow(i)
if row.sum() != correct_row_sum:
logging.debug('Sum of row %d\n%s \nis %d, but should be %d.',
i, str(row), row.sum(), correct_row_sum)
return False
correct_col_sum = self.par.num_coded_rows / self.par.num_partitions
correct_col_sum -= self.par.num_batches * self.gamma
correct_col_sum = round(correct_col_sum)
for i in range(self.par.num_partitions):
col = self.assignment_matrix.getcol(i)
if col.sum() != correct_col_sum:
logging.debug('Sum of column %d\n%s \nis %d, but should be %d.',
i, str(col), col.sum(), correct_col_sum)
return False
return True
|
|
"""
Typeclass for Player objects
Note that this object is primarily intended to
store OOC information, not game info! This
object represents the actual user (not their
character) and has NO actual precence in the
game world (this is handled by the associated
character object, so you should customize that
instead for most things).
"""
from django.conf import settings
from django.utils import timezone
from evennia.typeclasses.models import TypeclassBase
from evennia.players.manager import PlayerManager
from evennia.players.models import PlayerDB
from evennia.comms.models import ChannelDB
from evennia.commands import cmdhandler
from evennia.utils import logger
from evennia.utils.utils import (lazy_property, to_str,
make_iter, to_unicode,
variable_from_module)
from evennia.typeclasses.attributes import NickHandler
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands.cmdsethandler import CmdSetHandler
from django.utils.translation import ugettext as _
__all__ = ("DefaultPlayer",)
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_CMDSET_PLAYER = settings.CMDSET_PLAYER
_CONNECT_CHANNEL = None
class DefaultPlayer(PlayerDB):
"""
This is the base Typeclass for all Players. Players represent
the person playing the game and tracks account info, password
etc. They are OOC entities without presence in-game. A Player
can connect to a Character Object in order to "enter" the
game.
Player Typeclass API:
* Available properties (only available on initiated typeclass objects)
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also
be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not
create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(outgoing_string, from_obj=None, **kwargs)
#swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None,
ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hook methods
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_access()
at_cmdset_get(**kwargs)
at_first_login()
at_post_login(sessid=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
__metaclass__ = TypeclassBase
objects = PlayerManager()
# properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
# session-related methods
def get_session(self, sessid):
"""
Return session with given sessid connected to this player.
note that the sessionhandler also accepts sessid as an iterable.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.session_from_player(self, sessid)
def get_all_sessions(self):
"Return all sessions connected to this player"
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.sessions_from_player(self)
sessions = property(get_all_sessions) # alias shortcut
def disconnect_session_from_player(self, sessid):
"""
Access method for disconnecting a given session from the player
(connection happens automatically in the sessionhandler)
"""
# this should only be one value, loop just to make sure to
# clean everything
sessions = (session for session in self.get_all_sessions()
if session.sessid == sessid)
for session in sessions:
# this will also trigger unpuppeting
session.sessionhandler.disconnect(session)
# puppeting operations
def puppet_object(self, sessid, obj):
"""
Use the given session to control (puppet) the given object (usually
a Character type).
Args:
sessid (int): session id of session to connect
obj (Object): the object to start puppeting
Raises:
RuntimeError with message if puppeting is not possible
returns True if successful, False otherwise
"""
# safety checks
if not obj:
raise RuntimeError("Object not found")
session = self.get_session(sessid)
if not session:
raise RuntimeError("Session not found")
if self.get_puppet(sessid) == obj:
# already puppeting this object
raise RuntimeError("You are already puppeting this object.")
if not obj.access(self, 'puppet'):
# no access
raise RuntimeError("You don't have permission to puppet '%s'." % obj.key)
if obj.player:
# object already puppeted
if obj.player == self:
if obj.sessid.count():
# we may take over another of our sessions
# output messages to the affected sessions
if _MULTISESSION_MODE in (1, 3):
txt1 = "{c%s{n{G is now shared from another of your sessions.{n"
txt2 = "Sharing {c%s{n with another of your sessions."
else:
txt1 = "{c%s{n{R is now acted from another of your sessions.{n"
txt2 = "Taking over {c%s{n from another of your sessions."
self.unpuppet_object(obj.sessid.get())
self.msg(txt1 % obj.name, sessid=obj.sessid.get(), _forced_nomulti=True)
self.msg(txt2 % obj.name, sessid=sessid, _forced_nomulti=True)
elif obj.player.is_connected:
# controlled by another player
raise RuntimeError("{R{c%s{R is already puppeted by another Player.")
# do the puppeting
if session.puppet:
# cleanly unpuppet eventual previous object puppeted by this session
self.unpuppet_object(sessid)
# if we get to this point the character is ready to puppet or it
# was left with a lingering player/sessid reference from an unclean
# server kill or similar
obj.at_pre_puppet(self, sessid=sessid)
# do the connection
obj.sessid.add(sessid)
obj.player = self
session.puid = obj.id
session.puppet = obj
# validate/start persistent scripts on object
obj.scripts.validate()
obj.at_post_puppet()
# re-cache locks to make sure superuser bypass is updated
obj.locks.cache_lock_bypass(obj)
def unpuppet_object(self, sessid):
"""
Disengage control over an object
Args:
sessid(int): the session id to disengage
Raises:
RuntimeError with message about error.
"""
if _MULTISESSION_MODE == 1:
sessions = self.get_all_sessions()
else:
sessions = self.get_session(sessid)
if not sessions:
raise RuntimeError("No session was found.")
for session in make_iter(sessions):
obj = session.puppet or None
if not obj:
raise RuntimeError("No puppet was found to disconnect from.")
elif obj:
# do the disconnect, but only if we are the last session to puppet
obj.at_pre_unpuppet()
obj.sessid.remove(session.sessid)
if not obj.sessid.count():
del obj.player
obj.at_post_unpuppet(self, sessid=sessid)
# Just to be sure we're always clear.
session.puppet = None
session.puid = None
def unpuppet_all(self):
"""
Disconnect all puppets. This is called by server
before a reset/shutdown.
"""
for session in (sess for sess in self.get_all_sessions() if sess.puppet):
self.unpuppet_object(session.sessid)
def get_puppet(self, sessid, return_dbobj=False):
"""
Get an object puppeted by this session through this player. This is
the main method for retrieving the puppeted object from the
player's end.
sessid - return character connected to this sessid,
"""
session = self.get_session(sessid)
if not session:
return None
if return_dbobj:
return session.puppet
return session.puppet and session.puppet or None
def get_all_puppets(self):
"""
Get all currently puppeted objects as a list.
"""
return list(set(session.puppet for session in self.get_all_sessions()
if session.puppet))
def __get_single_puppet(self):
"""
This is a legacy convenience link for users of
MULTISESSION_MODE 0 or 1. It will return
only the first puppet. For mode 2, this returns
a list of all characters.
"""
puppets = self.get_all_puppets()
if _MULTISESSION_MODE in (0, 1):
return puppets and puppets[0] or None
return puppets
character = property(__get_single_puppet)
puppet = property(__get_single_puppet)
# utility methods
def delete(self, *args, **kwargs):
"""
Deletes the player permanently.
"""
for session in self.get_all_sessions():
# unpuppeting all objects and disconnecting the user, if any
# sessions remain (should usually be handled from the
# deleting command)
try:
self.unpuppet_object(session.sessid)
except RuntimeError:
# no puppet to disconnect from
pass
session.sessionhandler.disconnect(session, reason=_("Player being deleted."))
self.scripts.stop()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
super(PlayerDB, self).delete(*args, **kwargs)
## methods inherited from database model
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
Args:
text (str, optional): text data to send
from_obj (Object or Player, optional): object sending. If given,
its at_msg_send() hook will be called.
sessid (int or list, optional): session id or ids to receive this
send. If given, overrules MULTISESSION_MODE.
Notes:
All other keywords are passed on to the protocol.
"""
text = to_str(text, force_string=True) if text else ""
if from_obj:
# call hook
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
pass
# session relay
if sessid:
# this could still be an iterable if sessid is an iterable
sessions = self.get_session(sessid)
if sessions:
# this is a special instruction to ignore MULTISESSION_MODE
# and only relay to this given session.
kwargs["_nomulti"] = True
for session in make_iter(sessions):
session.msg(text=text, **kwargs)
return
# we only send to the first of any connected sessions - the sessionhandler
# will disperse this to the other sessions based on MULTISESSION_MODE.
sessions = self.get_all_sessions()
if sessions:
sessions[0].msg(text=text, **kwargs)
def execute_cmd(self, raw_string, sessid=None, **kwargs):
"""
Do something as this player. This method is never called normally,
but only when the player object itself is supposed to execute the
command. It takes player nicks into account, but not nicks of
eventual puppets.
raw_string - raw command input coming from the command line.
sessid - the optional session id to be responsible for the command-send
**kwargs - other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
"""
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string,
categories=("inputline", "channel"), include_player=False)
if not sessid and _MULTISESSION_MODE in (0, 1):
# in this case, we should either have only one sessid, or the sessid
# should not matter (since the return goes to all of them we can
# just use the first one as the source)
try:
sessid = self.get_all_sessions()[0].sessid
except IndexError:
# this can happen for bots
sessid = None
return cmdhandler.cmdhandler(self, raw_string,
callertype="player", sessid=sessid, **kwargs)
def search(self, searchdata, return_puppet=False,
nofound_string=None, multimatch_string=None, **kwargs):
"""
This is similar to the ObjectDB search method but will search for
Players only. Errors will be echoed, and None returned if no Player
is found.
searchdata - search criterion, the Player's key or dbref to search for
return_puppet - will try to return the object the player controls
instead of the Player object itself. If no
puppeted object exists (since Player is OOC), None will
be returned.
nofound_string - optional custom string for not-found error message.
multimatch_string - optional custom string for multimatch error header.
Extra keywords are ignored, but are allowed in call in order to make
API more consistent with objects.models.TypedObject.search.
"""
# handle me, self and *me, *self
if isinstance(searchdata, basestring):
# handle wrapping of common terms
if searchdata.lower() in ("me", "*me", "self", "*self",):
return self
matches = self.__class__.objects.player_search(searchdata)
matches = _AT_SEARCH_RESULT(self, searchdata, matches, global_search=True,
nofound_string=nofound_string,
multimatch_string=multimatch_string)
if matches and return_puppet:
try:
return matches.puppet
except AttributeError:
return None
return matches
def access(self, accessing_obj, access_type='read', default=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one
access_type (str): Type of access sought
default (bool): What to return if no lock of access_type was found
Kwargs:
Passed to the at_access hook along with the result.
"""
result = super(DefaultPlayer, self).access(accessing_obj, access_type=access_type, default=default)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
## player hooks
def basetype_setup(self):
"""
This sets up the basic properties for a player.
Overload this with at_player_creation rather than
changing this method.
"""
# A basic security setup
lockstring = "examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:all()"
self.locks.add(lockstring)
# The ooc player cmdset
self.cmdset.add_default(_CMDSET_PLAYER, permanent=True)
def at_player_creation(self):
"""
This is called once, the very first time
the player is created (i.e. first time they
register with the game). It's a good place
to store attributes all players should have,
like configuration values etc.
"""
# set an (empty) attribute holding the characters this player has
lockstring = "attrread:perm(Admins);attredit:perm(Admins);attrcreate:perm(Admins)"
self.attributes.add("_playable_characters", [], lockstring=lockstring)
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload. In the case of player objects, this usually
happens the moment the player logs in or reconnects after a
reload.
"""
pass
# Note that the hooks below also exist in the character object's
# typeclass. You can often ignore these and rely on the character
# ones instead, unless you are implementing a multi-character game
# and have some things that should be done regardless of which
# character is currently connected to this player.
def at_first_save(self):
"""
This is a generic hook called by Evennia when this object is
saved to the database the very first time. You generally
don't override this method but the hooks called by it.
"""
self.basetype_setup()
self.at_player_creation()
permissions = settings.PERMISSION_PLAYER_DEFAULT
if hasattr(self, "_createdict"):
# this will only be set if the utils.create_player
# function was used to create the object.
cdict = self._createdict
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("permissions"):
permissions = cdict["permissions"]
del self._createdict
self.permissions.add(permissions)
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this player are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the player currently
have no cmdsets. kwargs are usually not used unless the
cmdset is generated dynamically.
"""
pass
def at_first_login(self):
"""
Called the very first time this player logs into the game.
"""
pass
def at_pre_login(self):
"""
Called every time the user logs in, just before the actual
login-state is set.
"""
pass
def _send_to_connect_channel(self, message):
"Helper method for loading the default comm channel"
global _CONNECT_CHANNEL
if not _CONNECT_CHANNEL:
try:
_CONNECT_CHANNEL = ChannelDB.objects.filter(db_key=settings.DEFAULT_CHANNELS[1]["key"])[0]
except Exception:
logger.log_trace()
now = timezone.now()
now = "%02i-%02i-%02i(%02i:%02i)" % (now.year, now.month,
now.day, now.hour, now.minute)
if _CONNECT_CHANNEL:
_CONNECT_CHANNEL.tempmsg("[%s, %s]: %s" % (_CONNECT_CHANNEL.key, now, message))
else:
logger.log_infomsg("[%s]: %s" % (now, message))
def at_post_login(self, sessid=None):
"""
Called at the end of the login process, just before letting
the player loose. This is called before an eventual Character's
at_post_login hook.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
if _MULTISESSION_MODE == 0:
# in this mode we should have only one character available. We
# try to auto-connect to our last conneted object, if any
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE == 1:
# in this mode all sessions connect to the same puppet.
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE in (2, 3):
# In this mode we by default end up at a character selection
# screen. We execute look on the player.
self.execute_cmd("look", sessid=sessid)
def at_disconnect(self, reason=None):
"""
Called just before user is disconnected.
"""
reason = reason and "(%s)" % reason or ""
self._send_to_connect_channel("{R%s disconnected %s{n" % (self.key, reason))
def at_post_disconnect(self):
"""
This is called after disconnection is complete. No messages
can be relayed to the player from here. After this call, the
player should not be accessed any more, making this a good
spot for deleting it (in the case of a guest player account,
for example).
"""
pass
def at_message_receive(self, message, from_obj=None):
"""
Called when any text is emitted to this
object. If it returns False, no text
will be sent automatically.
"""
return True
def at_message_send(self, message, to_object):
"""
Called whenever this object tries to send text
to another object. Only called if the object supplied
itself as a sender in the msg() call.
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
class DefaultGuest(DefaultPlayer):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
def at_post_login(self, sessid=None):
"""
In theory, guests only have one character regardless of which
MULTISESSION_MODE we're in. They don't get a choice.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
self.puppet_object(sessid, self.db._last_puppet)
def at_disconnect(self):
"""
A Guest's characters aren't meant to linger on the server. When a
Guest disconnects, we remove its character.
"""
super(DefaultGuest, self).at_disconnect()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_server_shutdown(self):
"""
We repeat at_disconnect() here just to be on the safe side.
"""
super(DefaultGuest, self).at_server_shutdown()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_post_disconnect(self):
"""
Guests aren't meant to linger on the server, either. We need to wait
until after the Guest disconnects to delete it, though.
"""
super(DefaultGuest, self).at_post_disconnect()
self.delete()
|
|
"""
Demonstration module for quadratic interpolation.
Sample solutions for Homework 2 problems #2 through #7.
"""
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import solve
def quad_interp(xi,yi):
"""
Quadratic interpolation. Compute the coefficients of the polynomial
interpolating the points (xi[i],yi[i]) for i = 0,1,2.
Returns c, an array containing the coefficients of
p(x) = c[0] + c[1]*x + c[2]*x**2.
"""
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have length 3"
assert len(xi)==3 and len(yi)==3, error_message
# Set up linear system to interpolate through data points:
A = np.vstack([np.ones(3), xi, xi**2]).T
c = solve(A,yi)
return c
def plot_quad(xi, yi):
"""
Perform quadratic interpolation and plot the resulting function along
with the data points.
"""
# Compute the coefficients:
c = quad_interp(xi,yi)
# Plot the resulting polynomial:
x = np.linspace(xi.min() - 1, xi.max() + 1, 1000)
y = c[0] + c[1]*x + c[2]*x**2
plt.figure(1) # open plot figure window
plt.clf() # clear figure
plt.plot(x,y,'b-') # connect points with a blue line
# Add data points (polynomial should go through these points!)
plt.plot(xi,yi,'ro') # plot as red circles
plt.ylim(-2,8) # set limits in y for plot
plt.title("Data points and interpolating polynomial")
plt.savefig('quadratic.png') # save figure as .png file
def cubic_interp(xi,yi):
"""
Cubic interpolation. Compute the coefficients of the polynomial
interpolating the points (xi[i],yi[i]) for i = 0,1,2,3
Returns c, an array containing the coefficients of
p(x) = c[0] + c[1]*x + c[2]*x**2 + c[3]*x**3.
"""
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have length 4"
assert len(xi)==4 and len(yi)==4, error_message
# Set up linear system to interpolate through data points:
A = np.vstack([np.ones(4), xi, xi**2, xi**3]).T
c = solve(A,yi)
return c
def plot_cubic(xi, yi):
"""
Perform cubic interpolation and plot the resulting function along
with the data points.
"""
# Compute the coefficients:
c = cubic_interp(xi,yi)
# Plot the resulting polynomial:
x = np.linspace(xi.min() - 1, xi.max() + 1, 1000)
y = c[0] + c[1]*x + c[2]*x**2 + c[3]*x**3
plt.figure(1) # open plot figure window
plt.clf() # clear figure
plt.plot(x,y,'b-') # connect points with a blue line
# Add data points (polynomial should go through these points!)
plt.plot(xi,yi,'ro') # plot as red circles
plt.ylim(-2,8) # set limits in y for plot
plt.title("Data points and interpolating polynomial")
plt.savefig('cubic.png') # save figure as .png file
def poly_interp(xi,yi):
"""
General polynomial interpolation.
Compute the coefficients of the polynomial
interpolating the points (xi[i],yi[i]) for i = 0,1,2,...,n-1
where n = len(xi) = len(yi).
Returns c, an array containing the coefficients of
p(x) = c[0] + c[1]*x + c[2]*x**2 + ... + c[N-1]*x**(N-1).
"""
# check inputs and print error message if not valid:
error_message = "xi and yi should have type numpy.ndarray"
assert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message
error_message = "xi and yi should have the same length "
assert len(xi)==len(yi), error_message
# Set up linear system to interpolate through data points:
# Uses a list comprehension, see
# http://docs.python.org/2/tutorial/datastructures.html#list-comprehensions
n = len(xi)
A = np.vstack([xi**j for j in range(n)]).T
c = solve(A,yi)
return c
def plot_poly(xi, yi):
"""
Perform polynomial interpolation and plot the resulting function along
with the data points.
"""
# Compute the coefficients:
c = poly_interp(xi,yi)
# Plot the resulting polynomial:
x = np.linspace(xi.min() - 1, xi.max() + 1, 1000)
# Use Horner's rule:
n = len(xi)
y = c[n-1]
for j in range(n-1, 0, -1):
y = y*x + c[j-1]
plt.figure(1) # open plot figure window
plt.clf() # clear figure
plt.plot(x,y,'b-') # connect points with a blue line
# Add data points (polynomial should go through these points!)
plt.plot(xi,yi,'ro') # plot as red circles
plt.ylim(yi.min()-1, yi.max()+1) # set limits in y for plot
plt.title("Data points and interpolating polynomial")
plt.savefig('poly.png') # save figure as .png file
def test_quad1():
"""
Test code, no return value or exception if test runs properly.
"""
xi = np.array([-1., 0., 2.])
yi = np.array([ 1., -1., 7.])
c = quad_interp(xi,yi)
c_true = np.array([-1., 0., 2.])
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_quad(xi,yi)
def test_quad2():
"""
Test code, no return value or exception if test runs properly.
"""
# Generate a test by specifying c_true first:
c_true = np.array([7., 2., -3.])
# Points to interpolate:
xi = np.array([-1., 0., 2.])
# Function values to interpolate:
yi = c_true[0] + c_true[1]*xi + c_true[2]*xi**2
# Now interpolate and check we get c_true back again.
c = quad_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_quad(xi,yi)
def test_cubic1():
"""
Test code, no return value or exception if test runs properly.
"""
# Generate a test by specifying c_true first:
c_true = np.array([7., -2., -3., 1.])
# Points to interpolate:
xi = np.array([-1., 0., 1., 2.])
# Function values to interpolate:
yi = c_true[0] + c_true[1]*xi + c_true[2]*xi**2 + c_true[3]*xi**3
# Now interpolate and check we get c_true back again.
c = cubic_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_cubic(xi,yi)
def test_poly1():
"""
Test code, no return value or exception if test runs properly.
Same points as test_cubic1.
"""
# Generate a test by specifying c_true first:
c_true = np.array([7., -2., -3., 1.])
# Points to interpolate:
xi = np.array([-1., 0., 1., 2.])
# Function values to interpolate:
# Use Horner's rule:
n = len(xi)
yi = c_true[n-1]
for j in range(n-1, 0, -1):
yi = yi*xi + c_true[j-1]
# Now interpolate and check we get c_true back again.
c = poly_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_poly(xi,yi)
def test_poly2():
"""
Test code, no return value or exception if test runs properly.
Test with 5 points (quartic interpolating function).
"""
# Generate a test by specifying c_true first:
c_true = np.array([0., -6., 11., -6., 1.])
# Points to interpolate:
xi = np.array([-1., 0., 1., 2., 4.])
# Function values to interpolate:
# Use Horner's rule:
n = len(xi)
yi = c_true[n-1]
for j in range(n-1, 0, -1):
yi = yi*xi + c_true[j-1]
# Now interpolate and check we get c_true back again.
c = poly_interp(xi,yi)
print "c = ", c
print "c_true = ", c_true
# test that all elements have small error:
assert np.allclose(c, c_true), \
"Incorrect result, c = %s, Expected: c = %s" % (c,c_true)
# Also produce plot:
plot_poly(xi,yi)
if __name__=="__main__":
print "Running test..."
test_quad1()
test_quad2()
test_cubic1()
test_poly1()
test_poly2()
|
|
########################################################################
#
# License: BSD
# Created: September 1, 2010
# Author: Francesc Alted - [email protected]
#
########################################################################
import sys
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
import blaze.carray as ca
from common import MayBeDiskTest
class createTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing ctable creation from a tuple of carrays"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00b(self):
"""Testing ctable creation from a tuple of lists"""
t = ca.ctable(([1,2,3],[4,5,6]), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([[1,2,3],[4,5,6]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00c(self):
"""Testing ctable creation from a tuple of carrays (single column)"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
self.assertRaises(ValueError, ca.ctable, a, 'f0', rootdir=self.rootdir)
def test01(self):
"""Testing ctable creation from a tuple of numpy arrays"""
N = 1e1
a = np.arange(N, dtype='i4')
b = np.arange(N, dtype='f8')+1
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([a,b]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing ctable creation from an structured array"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03a(self):
"""Testing ctable creation from large iterator"""
N = 10*1000
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8',
count=N, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03b(self):
"""Testing ctable creation from large iterator (with a hint)"""
N = 10*1000
ra = np.fromiter(((i, i*2.) for i in xrange(N)),
dtype='i4,f8', count=N)
t = ca.fromiter(((i, i*2.) for i in xrange(N)),
dtype='i4,f8', count=N, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
class createDiskTest(createTest, TestCase):
disk = True
class persistentTest(MayBeDiskTest, TestCase):
disk = True
def test00a(self):
"""Testing ctable opening in "r" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='r')
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
self.assertRaises(RuntimeError, t.__setitem__, 1, (0, 0.0))
self.assertRaises(RuntimeError, t.append, (0, 0.0))
def test00b(self):
"""Testing ctable opening in "w" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='w')
#print "t->", `t`
N = 0
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((0, 0.0))
t.append((0, 0.0))
t[1] = (1, 2.0)
ra = np.rec.fromarrays([(0,1),(0.0, 2.0)], 'i4,f8').view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00c(self):
"""Testing ctable opening in "a" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='a')
#print "t->", `t`
# Check values
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((10, 11.0))
t.append((10, 11.0))
t[-1] = (11, 12.0)
# Check values
N = 12
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01a(self):
"""Testing ctable creation in "r" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
self.assertRaises(RuntimeError, ca.ctable, (a, b), ('f0', 'f1'),
rootdir=self.rootdir, mode='r')
def test01b(self):
"""Testing ctable creation in "w" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Overwrite the last ctable
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir, mode='w')
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((10, 11.0))
t.append((10, 11.0))
t[11] = (11, 12.0)
# Check values
N = 12
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01c(self):
"""Testing ctable creation in "a" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Overwrite the last ctable
self.assertRaises(RuntimeError, ca.ctable, (a, b), ('f0', 'f1'),
rootdir=self.rootdir, mode='a')
class add_del_colTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing adding a new column (list flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c.tolist(), 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00(self):
"""Testing adding a new column (carray flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(ca.carray(c), 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01a(self):
"""Testing adding a new column (numpy flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01b(self):
"""Testing cparams when adding a new column (numpy flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, cparams=ca.cparams(1), rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, 'f2')
self.assert_(t['f2'].cparams.clevel == 1, "Incorrect clevel")
def test02(self):
"""Testing adding a new column (default naming)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(ca.carray(c))
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing inserting a new column (at the beginning)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, name='c0', pos=0)
ra = np.fromiter(((i*3, i, i*2.) for i in xrange(N)), dtype='i8,i4,f8')
ra.dtype.names = ('c0', 'f0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing inserting a new column (in the middle)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, name='c0', pos=1)
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
ra.dtype.names = ('f0', 'c0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test05(self):
"""Testing removing an existing column (at the beginning)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=0)
# The next gives a segfault. See:
# http://projects.scipy.org/numpy/ticket/1598
#ra = np.fromiter(((i*3, i*2) for i in xrange(N)), dtype='i8,f8')
#ra.dtype.names = ('f1', 'f2')
dt = np.dtype([('f1', 'i8'), ('f2', 'f8')])
ra = np.fromiter(((i*3, i*2) for i in xrange(N)), dtype=dt)
#print "t->", `t`
#print "ra", ra
#assert_array_equal(t[:], ra, "ctable values are not correct")
def test06(self):
"""Testing removing an existing column (at the end)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=2)
ra = np.fromiter(((i, i*3) for i in xrange(N)), dtype='i4,i8')
ra.dtype.names = ('f0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test07(self):
"""Testing removing an existing column (in the middle)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=1)
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
ra.dtype.names = ('f0', 'f2')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test08(self):
"""Testing removing an existing column (by name)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol('f1')
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
ra.dtype.names = ('f0', 'f2')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
class add_del_colDiskTest(add_del_colTest, TestCase):
disk = True
class getitemTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing __getitem__ with only a start"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
start = 9
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[start], ra[start], "ctable values are not correct")
def test01(self):
"""Testing __getitem__ with start, stop"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
start, stop = 3, 9
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[start:stop], ra[start:stop],
"ctable values are not correct")
def test02(self):
"""Testing __getitem__ with start, stop, step"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
start, stop, step = 3, 9, 2
#print "t->", `t[start:stop:step]`
#print "ra->", ra[start:stop:step]
assert_array_equal(t[start:stop:step], ra[start:stop:step],
"ctable values are not correct")
def test03(self):
"""Testing __getitem__ with a column name"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
colname = "f1"
#print "t->", `t[colname]`
#print "ra->", ra[colname]
assert_array_equal(t[colname][:], ra[colname],
"ctable values are not correct")
def test04(self):
"""Testing __getitem__ with a list of column names"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, rootdir=self.rootdir)
colnames = ["f0", "f2"]
# For some version of NumPy (> 1.7) I cannot make use of
# ra[colnames] :-/
ra2 = np.fromiter(((i, i*3) for i in xrange(N)), dtype='i4,i8')
ra2.dtype.names = ('f0', 'f2')
#print "t->", `t[colnames]`
#print "ra2->", ra2
assert_array_equal(t[colnames][:], ra2,
"ctable values are not correct")
class getitemDiskTest(getitemTest, TestCase):
disk = True
class setitemTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing __setitem__ with only a start"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(9, None)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing __setitem__ with only a stop"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(None, 9, None)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing __setitem__ with a start, stop"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(1,90, None)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing __setitem__ with a start, stop, step"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(1,90, 2)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing __setitem__ with a large step"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, chunklen=10, rootdir=self.rootdir)
sl = slice(1,43, 20)
t[sl] = (0, 1)
ra[sl] = (0, 1)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
class setitemDiskTest(setitemTest, TestCase):
disk = True
class appendTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing append() with scalar values"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.append((N, N*2))
ra = np.fromiter(((i, i*2.) for i in xrange(N+1)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing append() with numpy arrays"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
a = np.arange(N, N+10, dtype='i4')
b = np.arange(N, N+10, dtype='f8')*2.
t.append((a, b))
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing append() with carrays"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
a = np.arange(N, N+10, dtype='i4')
b = np.arange(N, N+10, dtype='f8')*2.
t.append((ca.carray(a), ca.carray(b)))
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing append() with structured arrays"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
ra2 = np.fromiter(((i, i*2.) for i in xrange(N, N+10)), dtype='i4,f8')
t.append(ra2)
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing append() with another ctable"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
ra2 = np.fromiter(((i, i*2.) for i in xrange(N, N+10)), dtype='i4,f8')
t2 = ca.ctable(ra2)
t.append(t2)
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
assert_array_equal(t[:], ra, "ctable values are not correct")
class appendDiskTest(appendTest, TestCase):
disk = True
class trimTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing trim() with Python scalar values"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N-2)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), 'i4,f8', N,
rootdir=self.rootdir)
t.trim(2)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing trim() with NumPy scalar values"""
N = 10000
ra = np.fromiter(((i, i*2.) for i in xrange(N-200)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), 'i4,f8', N,
rootdir=self.rootdir)
t.trim(np.int(200))
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing trim() with a complete trim"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(0)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), 'i4,f8', N,
rootdir=self.rootdir)
t.trim(N)
self.assert_(len(ra) == len(t), "Lengths are not equal")
class trimDiskTest(trimTest, TestCase):
disk = True
class resizeTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing resize() (decreasing)"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N-2)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), 'i4,f8', N,
rootdir=self.rootdir)
t.resize(N-2)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing resize() (increasing)"""
N = 100
ra = np.fromiter(((i, i*2.) for i in xrange(N+4)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), 'i4,f8', N,
rootdir=self.rootdir)
t.resize(N+4)
ra['f0'][N:] = np.zeros(4)
ra['f1'][N:] = np.zeros(4)
assert_array_equal(t[:], ra, "ctable values are not correct")
class resizeDiskTest(resizeTest, TestCase):
disk=True
class copyTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing copy() without params"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
if self.disk:
rootdir = self.rootdir + "-test00"
else:
rootdir = self.rootdir
t2 = t.copy(rootdir=rootdir, mode='w')
a = np.arange(N, N+10, dtype='i4')
b = np.arange(N, N+10, dtype='f8')*2.
t2.append((a, b))
ra = np.fromiter(((i, i*2.) for i in xrange(N+10)), dtype='i4,f8')
self.assert_(len(t) == N, "copy() does not work correctly")
self.assert_(len(t2) == N+10, "copy() does not work correctly")
assert_array_equal(t2[:], ra, "ctable values are not correct")
def test01(self):
"""Testing copy() with higher clevel"""
N = 10*1000
ra = np.fromiter(((i, i**2.2) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
if self.disk:
# Copy over the same location should give an error
self.assertRaises(RuntimeError,
t.copy,cparams=ca.cparams(clevel=9),
rootdir=self.rootdir, mode='w')
return
else:
t2 = t.copy(cparams=ca.cparams(clevel=9),
rootdir=self.rootdir, mode='w')
#print "cbytes in f1, f2:", t['f1'].cbytes, t2['f1'].cbytes
self.assert_(t.cparams.clevel == ca.cparams().clevel)
self.assert_(t2.cparams.clevel == 9)
self.assert_(t['f1'].cbytes > t2['f1'].cbytes, "clevel not changed")
def test02(self):
"""Testing copy() with lower clevel"""
N = 10*1000
ra = np.fromiter(((i, i**2.2) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t2 = t.copy(cparams=ca.cparams(clevel=1))
self.assert_(t.cparams.clevel == ca.cparams().clevel)
self.assert_(t2.cparams.clevel == 1)
#print "cbytes in f1, f2:", t['f1'].cbytes, t2['f1'].cbytes
self.assert_(t['f1'].cbytes < t2['f1'].cbytes, "clevel not changed")
def test03(self):
"""Testing copy() with no shuffle"""
N = 10*1000
ra = np.fromiter(((i, i**2.2) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra)
# print "t:", t, t.rootdir
t2 = t.copy(cparams=ca.cparams(shuffle=False), rootdir=self.rootdir)
#print "cbytes in f1, f2:", t['f1'].cbytes, t2['f1'].cbytes
self.assert_(t['f1'].cbytes < t2['f1'].cbytes, "clevel not changed")
class copyDiskTest(copyTest, TestCase):
disk = True
class specialTest(TestCase):
def test00(self):
"""Testing __len__()"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
self.assert_(len(t) == len(ra), "Objects do not have the same length")
def test01(self):
"""Testing __sizeof__() (big ctables)"""
N = int(1e4)
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
#print "size t uncompressed ->", t.nbytes
#print "size t compressed ->", t.cbytes
self.assert_(sys.getsizeof(t) < t.nbytes,
"ctable does not seem to compress at all")
def test02(self):
"""Testing __sizeof__() (small ctables)"""
N = int(111)
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
#print "size t uncompressed ->", t.nbytes
#print "size t compressed ->", t.cbytes
self.assert_(sys.getsizeof(t) > t.nbytes,
"ctable compress too much??")
class fancy_indexing_getitemTest(TestCase):
def test00(self):
"""Testing fancy indexing with a small list"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
rt = t[[3,1]]
rar = ra[[3,1]]
#print "rt->", rt
#print "rar->", rar
assert_array_equal(rt, rar, "ctable values are not correct")
def test01(self):
"""Testing fancy indexing with a large numpy array"""
N = 10*1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
idx = np.random.randint(1000, size=1000)
rt = t[idx]
rar = ra[idx]
#print "rt->", rt
#print "rar->", rar
assert_array_equal(rt, rar, "ctable values are not correct")
def test02(self):
"""Testing fancy indexing with an empty list"""
N = 10*1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
rt = t[[]]
rar = ra[[]]
#print "rt->", rt
#print "rar->", rar
assert_array_equal(rt, rar, "ctable values are not correct")
def test03(self):
"""Testing fancy indexing (list of floats)"""
N = 101
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra)
rt = t[[2.3, 5.6]]
rar = ra[[2.3, 5.6]]
#print "rt->", rt
#print "rar->", rar
assert_array_equal(rt, rar, "ctable values are not correct")
def test04(self):
"""Testing fancy indexing (list of floats, numpy)"""
a = np.arange(1,101)
b = ca.carray(a)
idx = np.array([1.1, 3.3], dtype='f8')
self.assertRaises(IndexError, b.__getitem__, idx)
class fancy_indexing_setitemTest(TestCase):
def test00a(self):
"""Testing fancy indexing (setitem) with a small list"""
N = 100
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = [3,1]
t[sl] = (-1, -2, -3)
ra[sl] = (-1, -2, -3)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00b(self):
"""Testing fancy indexing (setitem) with a small list (II)"""
N = 100
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = [3,1]
t[sl] = [(-1, -2, -3), (-3, -2, -1)]
ra[sl] = [(-1, -2, -3), (-3, -2, -1)]
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01(self):
"""Testing fancy indexing (setitem) with a large array"""
N = 1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = np.random.randint(N, size=100)
t[sl] = (-1, -2, -3)
ra[sl] = (-1, -2, -3)
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02a(self):
"""Testing fancy indexing (setitem) with a boolean array (I)"""
N = 1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = np.random.randint(2, size=1000).astype('bool')
t[sl] = [(-1, -2, -3)]
ra[sl] = [(-1, -2, -3)]
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02b(self):
"""Testing fancy indexing (setitem) with a boolean array (II)"""
N = 1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = np.random.randint(10, size=1000).astype('bool')
t[sl] = [(-1, -2, -3)]
ra[sl] = [(-1, -2, -3)]
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03a(self):
"""Testing fancy indexing (setitem) with a boolean array (all false)"""
N = 1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = np.zeros(N, dtype="bool")
t[sl] = [(-1, -2, -3)]
ra[sl] = [(-1, -2, -3)]
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03b(self):
"""Testing fancy indexing (setitem) with a boolean array (all true)"""
N = 1000
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=10)
sl = np.ones(N, dtype="bool")
t[sl] = [(-1, -2, -3)]
ra[sl] = [(-1, -2, -3)]
#print "t[%s] -> %r" % (sl, t)
#print "ra[%s] -> %r" % (sl, ra)
assert_array_equal(t[:], ra, "ctable values are not correct")
class iterTest(MayBeDiskTest, TestCase):
def test00(self):
"""Testing ctable.__iter__"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r.f1 for r in t]
nl = [r['f1'] for r in ra]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test01(self):
"""Testing ctable.iter() without params"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r.f1 for r in t.iter()]
nl = [r['f1'] for r in ra]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test02(self):
"""Testing ctable.iter() with start,stop,step"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r.f1 for r in t.iter(1,9,3)]
nl = [r['f1'] for r in ra[1:9:3]]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test03(self):
"""Testing ctable.iter() with outcols"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [tuple(r) for r in t.iter(outcols='f2, nrow__, f0')]
nl = [(r['f2'], i, r['f0']) for i, r in enumerate(ra)]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test04(self):
"""Testing ctable.iter() with start,stop,step and outcols"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r for r in t.iter(1,9,3, 'f2, nrow__ f0')]
nl = [(r['f2'], r['f0'], r['f0']) for r in ra[1:9:3]]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test05(self):
"""Testing ctable.iter() with start, stop, step and limit"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r.f1 for r in t.iter(1,9,2, limit=3)]
nl = [r['f1'] for r in ra[1:9:2][:3]]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test06(self):
"""Testing ctable.iter() with start, stop, step and skip"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r.f1 for r in t.iter(1,9,2, skip=3)]
nl = [r['f1'] for r in ra[1:9:2][3:]]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
def test07(self):
"""Testing ctable.iter() with start, stop, step and limit, skip"""
N = 10
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
t = ca.ctable(ra, chunklen=4, rootdir=self.rootdir)
cl = [r.f1 for r in t.iter(1,9,2, limit=2, skip=1)]
nl = [r['f1'] for r in ra[1:9:2][1:3]]
#print "cl ->", cl
#print "nl ->", nl
self.assert_(cl == nl, "iter not working correctily")
class iterDiskTest(iterTest, TestCase):
disk = True
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
|
|
from datetime import datetime
from holster.enum import Enum
from peewee import BigIntegerField, IntegerField, SmallIntegerField, TextField, BooleanField, DateTimeField
from playhouse.postgres_ext import BinaryJSONField
from rowboat.sql import BaseModel
@BaseModel.register
class User(BaseModel):
user_id = BigIntegerField(primary_key=True)
username = TextField()
discriminator = SmallIntegerField()
avatar = TextField(null=True)
bot = BooleanField()
created_at = DateTimeField(default=datetime.utcnow)
admin = BooleanField(default=False)
SQL = '''
CREATE INDEX IF NOT EXISTS users_username_trgm ON users USING gin(username gin_trgm_ops);
'''
class Meta:
db_table = 'users'
indexes = (
(('user_id', 'username', 'discriminator'), True),
)
@property
def id(self):
return self.user_id
@classmethod
def ensure(cls, user, should_update=True):
return cls.from_disco_user(user)
@classmethod
def with_id(cls, uid):
try:
return User.get(user_id=uid)
except User.DoesNotExist:
return
@classmethod
def from_disco_user(cls, user, should_update=True):
# DEPRECATED
obj, _ = cls.get_or_create(
user_id=user.id,
defaults={
'username': user.username,
'discriminator': user.discriminator,
'avatar': user.avatar,
'bot': user.bot
})
if should_update:
updates = {}
if obj.username != user.username:
updates['username'] = user.username
if obj.discriminator != user.discriminator:
updates['discriminator'] = user.discriminator
if obj.avatar != user.avatar:
updates['avatar'] = user.avatar
if updates:
cls.update(**updates).where(User.user_id == user.id).execute()
return obj
def get_avatar_url(self, fmt='webp', size=1024):
if not self.avatar:
return None
return 'https://cdn.discordapp.com/avatars/{}/{}.{}?size={}'.format(
self.user_id,
self.avatar,
fmt,
size
)
def __unicode__(self):
return u'{}#{}'.format(self.username, str(self.discriminator).zfill(4))
@BaseModel.register
class Infraction(BaseModel):
Types = Enum(
'MUTE',
'KICK',
'TEMPBAN',
'SOFTBAN',
'BAN',
'TEMPMUTE',
'UNBAN',
'TEMPROLE',
'WARNING',
bitmask=False,
)
guild_id = BigIntegerField()
user_id = BigIntegerField()
actor_id = BigIntegerField(null=True)
type_ = IntegerField(db_column='type')
reason = TextField(null=True)
metadata = BinaryJSONField(default={})
expires_at = DateTimeField(null=True)
created_at = DateTimeField(default=datetime.utcnow)
active = BooleanField(default=True)
class Meta:
db_table = 'infractions'
indexes = (
(('guild_id', 'user_id'), False),
)
@staticmethod
def admin_config(event):
return getattr(event.base_config.plugins, 'admin', None)
@classmethod
def temprole(cls, plugin, event, member, role_id, reason, expires_at):
User.from_disco_user(member.user)
# TODO: modlog
member.add_role(role_id, reason=reason)
cls.create(
guild_id=event.guild.id,
user_id=member.user.id,
actor_id=event.author.id,
type_=cls.Types.TEMPROLE,
reason=reason,
expires_at=expires_at,
metadata={'role': role_id})
@classmethod
def kick(cls, plugin, event, member, reason):
from rowboat.plugins.modlog import Actions
User.from_disco_user(member.user)
# Prevent the GuildMemberRemove log event from triggering
plugin.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberRemove'],
user_id=member.user.id
)
member.kick(reason=reason)
# Create a kick modlog event
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_KICk,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
reason=reason or 'no reason'
)
cls.create(
guild_id=member.guild_id,
user_id=member.user.id,
actor_id=event.author.id,
type_=cls.Types.KICK,
reason=reason)
@classmethod
def tempban(cls, plugin, event, member, reason, expires_at):
from rowboat.plugins.modlog import Actions
User.from_disco_user(member.user)
plugin.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberRemove', 'GuildBanAdd'],
user_id=member.user.id
)
member.ban(reason=reason)
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_TEMPBAN,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
reason=reason or 'no reason',
expires=expires_at,
)
cls.create(
guild_id=member.guild_id,
user_id=member.user.id,
actor_id=event.author.id,
type_=cls.Types.TEMPBAN,
reason=reason,
expires_at=expires_at)
@classmethod
def softban(cls, plugin, event, member, reason):
from rowboat.plugins.modlog import Actions
User.from_disco_user(member.user)
plugin.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberRemove', 'GuildBanAdd', 'GuildBanRemove'],
user_id=member.user.id
)
member.ban(delete_message_days=7, reason=reason)
member.unban(reason=reason)
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_SOFTBAN,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
reason=reason or 'no reason'
)
cls.create(
guild_id=member.guild_id,
user_id=member.user.id,
actor_id=event.author.id,
type_=cls.Types.SOFTBAN,
reason=reason)
@classmethod
def ban(cls, plugin, event, member, reason, guild):
from rowboat.plugins.modlog import Actions
if isinstance(member, (int, long)):
user_id = member
else:
User.from_disco_user(member.user)
user_id = member.user.id
plugin.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberRemove', 'GuildBanAdd'],
user_id=user_id,
)
guild.create_ban(user_id, reason=reason)
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_BAN,
event.guild.id,
user=unicode(member),
user_id=user_id,
actor=unicode(event.author) if event.author.id != user_id else 'Automatic',
reason=reason or 'no reason'
)
cls.create(
guild_id=guild.id,
user_id=user_id,
actor_id=event.author.id,
type_=cls.Types.BAN,
reason=reason)
@classmethod
def warn(cls, plugin, event, member, reason, guild):
from rowboat.plugins.modlog import Actions
User.from_disco_user(member.user)
user_id = member.user.id
cls.create(
guild_id=guild.id,
user_id=user_id,
actor_id=event.author.id,
type_=cls.Types.WARNING,
reason=reason)
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_WARNED,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
reason=reason or 'no reason'
)
@classmethod
def mute(cls, plugin, event, member, reason):
from rowboat.plugins.modlog import Actions
admin_config = cls.admin_config(event)
plugin.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
user_id=member.user.id,
role_id=admin_config.mute_role,
)
member.add_role(admin_config.mute_role, reason=reason)
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_MUTED,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
reason=reason or 'no reason'
)
cls.create(
guild_id=event.guild.id,
user_id=member.user.id,
actor_id=event.author.id,
type_=cls.Types.MUTE,
reason=reason,
metadata={'role': admin_config.mute_role})
@classmethod
def tempmute(cls, plugin, event, member, reason, expires_at):
from rowboat.plugins.modlog import Actions
admin_config = cls.admin_config(event)
if not admin_config.mute_role:
plugin.log.warning('Cannot tempmute member %s, no tempmute role', member.id)
return
plugin.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
user_id=member.user.id,
role_id=admin_config.mute_role,
)
member.add_role(admin_config.mute_role, reason=reason)
plugin.call(
'ModLogPlugin.log_action_ext',
Actions.MEMBER_TEMP_MUTED,
event.guild.id,
member=member,
actor=unicode(event.author) if event.author.id != member.id else 'Automatic',
reason=reason or 'no reason',
expires=expires_at,
)
cls.create(
guild_id=event.guild.id,
user_id=member.user.id,
actor_id=event.author.id,
type_=cls.Types.TEMPMUTE,
reason=reason,
expires_at=expires_at,
metadata={'role': admin_config.mute_role})
@classmethod
def clear_active(cls, event, user_id, types):
"""
Marks a previously active tempmute as inactive for the given event/user.
This should be used in all locations where we either think this is no
longer active (e.g. the mute role was removed) _or_ when we don't want to
unmute the user any longer, e.g. they've been remuted by another command.
"""
return cls.update(active=False).where(
(cls.guild_id == event.guild.id) &
(cls.user_id == user_id) &
(cls.type_ << types) &
(cls.active == 1)
).execute() >= 1
@BaseModel.register
class StarboardBlock(BaseModel):
guild_id = BigIntegerField()
user_id = BigIntegerField()
actor_id = BigIntegerField()
class Meta:
indexes = (
(('guild_id', 'user_id'), True),
)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.canned import linear
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return check_ops.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [variables.global_variables_initializer()]
with tf_session.Session() as sess:
sess.run(init_all_op)
saver.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(session_run_hook.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with variable_scope.variable_scope(
variable_scope.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = variable_scope.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(
feature_column_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,),)}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({'age': ((1,), (1,))}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with ops.Graph().as_default():
variables.Variable([[11.0]], name=AGE_WEIGHT_NAME)
variables.Variable([2.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual({
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
ops.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with ops.Graph().as_default():
variables.Variable(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
variables.Variable([7.0, 8.0], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column(
'age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with ops.Graph().as_default():
variables.Variable([[10.0]], name=AGE_WEIGHT_NAME)
variables.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
variables.Variable([5.0], name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column_lib.numeric_column('age'),
feature_column_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([20, 40]),
'height': np.array([4, 8])},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
ops.GraphKeys.GLOBAL_STEP), eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with ops.Graph().as_default():
variables.Variable([[10.]], name='linear/linear_model/x/weights')
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (feature_column_lib.numeric_column('x', shape=(x_dim,)),)
with ops.Graph().as_default():
variables.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
variables.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with ops.Graph().as_default():
variables.Variable([[10.]], name='linear/linear_model/x0/weights')
variables.Variable([[20.]], name='linear/linear_model/x1/weights')
variables.Variable([.2], name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('x0'),
feature_column_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x0': np.array([[2.]]),
'x1': np.array([[3.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn):
self._linear_regressor_fn = linear_regressor_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
if global_step is not None:
return state_ops.assign_add(global_step, 1).op
return control_flow_ops.no_op()
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(expected_global_step,
checkpoint_utils.load_variable(self._model_dir,
ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
checkpoint_utils.load_variable(self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
checkpoint_utils.load_variable(self._model_dir,
BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [feature_column_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1,
'w': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,),)}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
variables.Variable([bias], name=BIAS_NAME)
variables.Variable(
initial_global_step,
name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({'age': ((17,), (15,))}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return state_ops.assign_add(global_step, 1).op
assert_loss = assert_close(
math_ops.to_float(expected_loss, name='expected'),
loss,
name='assert_loss')
with ops.control_dependencies((assert_loss,)):
return state_ops.assign_add(global_step, 1).op
mock_optimizer = test.mock.NonCallableMock(
spec=optimizer.Optimizer,
wraps=optimizer.Optimizer(use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(
self, n_classes, expected_global_step, expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape for (name, shape) in
checkpoint_utils.list_variables(self._model_dir)
}
self.assertEqual([], shapes[ops.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
checkpoint_utils.load_variable(
self._model_dir, ops.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension],
shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(expected_age_weight,
checkpoint_utils.load_variable(
self._model_dir,
AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
checkpoint_utils.load_variable(
self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_2}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1, 'w': data_rank_1}, y=data_rank_1,
batch_size=batch_size, num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=-1 * math.log(1.0/n_classes))
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
if n_classes == 2:
expected_loss = (1.3133 + 2.1269)
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({'age': (age)}, (label)),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
100, name=ops.GraphKeys.GLOBAL_STEP, dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': ((age,),)}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (
np.reshape(2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(
initial_global_step, name=ops.GraphKeys.GLOBAL_STEP,
dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({'age': (age), 'w': (weights)}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE: (
max(label_mean, 1-label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
ops.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics), rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredications(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (
np.reshape(-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with ops.Graph().as_default():
variables.Variable(age_weight, name=AGE_WEIGHT_NAME)
variables.Variable(bias, name=BIAS_NAME)
variables.Variable(100, name='global_step', dtype=dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(feature_column_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'classes': [label_output_fn(0)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'classes': [label_output_fn(class_ids)],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllClose(sorted_key_dict(expected_predictions),
sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredications(n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredications(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredications(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredications(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i)
for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn):
self._linear_classifier_fn = linear_classifier_fn
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
feature_column_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=x)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def test_basic_logit_correctness(self):
"""linear_logit_fn simply wraps feature_column_lib.linear_model."""
age = feature_column_lib.numeric_column('age')
with ops.Graph().as_default():
logit_fn = linear._linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
with variable_scope.variable_scope('linear_model', reuse=True):
bias_var = variable_scope.get_variable('bias_weights')
age_var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
'linear_model/age')[0]
with tf_session.Session() as sess:
sess.run([variables.global_variables_initializer()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
|
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The default implementation of the usb_hub capability.
The usb_hub capability is intended to be used by primary devices that require
the ability to
get or change the USB power mode for a configured port on a USB hub.
The configured USB hub must support the switch_power capability.
"""
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device.capabilities.interfaces import usb_hub_base
logger = gdm_logger.get_logger()
class UsbHubDefault(usb_hub_base.UsbHubBase):
"""Base class for usb_hub."""
def __init__(self,
device_name,
get_manager,
hub_name,
device_port,
get_switchboard_if_initialized,
change_triggers_reboot=False,
wait_for_bootup_complete_fn=None,
settable=False):
"""Create an instance of the usb_hub capability.
Args:
device_name (str): name of the device this capability is attached
to.
get_manager (method): A method which returns the Manager instance.
hub_name (str): name of the hub this device is attached to.
device_port (int): usb hub port number used by the device.
get_switchboard_if_initialized (callable): function which returns
a Switchboard instance or None if Switchboard hasn't been initialized.
change_triggers_reboot (bool): Set change_triggers_reboot to TRUE if
changing the USB power mode for the device causes a reboot.
wait_for_bootup_complete_fn (func): A method that the capability can
call to wait for a reboot to complete if triggered by a change.
settable (bool): whether or not the properties are settable.
"""
super(UsbHubDefault, self).__init__(device_name=device_name)
self._hub_name = hub_name
self._device_port = device_port
self._get_switchboard_if_initialized = get_switchboard_if_initialized
self._change_triggers_reboot = change_triggers_reboot
self._wait_for_bootup_complete_fn = wait_for_bootup_complete_fn
self._usb_hub = None
self._settable = settable
self._get_manager = get_manager
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def health_check(self):
"""Checks that the capability is ready to use.
Raises:
CapabilityNotReadyError: if unable to create auxiliary device for
power switching.
"""
unset_props = []
if self.name is None:
unset_props.append("device_usb_hub_name")
if self.device_port is None:
unset_props.append("device_usb_port")
if unset_props:
if self._settable:
msg_format = ("If device is connected to Cambrionix, "
"set them via 'gdm set-prop {} <property> <value>'")
else:
msg_format = ("If device is connected to Cambrionix, "
"set them via 'gdm redetect {}")
msg = msg_format.format(self._device_name)
error_msg = "properties {} are unset. ".format(
" and ".join(unset_props)) + msg
raise errors.CapabilityNotReadyError(
msg=error_msg, device_name=self._device_name)
try:
self._usb_hub = self._get_manager().create_device(self.name)
except (errors.DeviceError, RuntimeError) as err:
raise errors.CapabilityNotReadyError(
msg=repr(err), device_name=self._device_name)
self._healthy = True
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def close(self):
"""Closes the USB hub device instance."""
if self._usb_hub:
self._usb_hub.close()
super().close()
@decorators.PersistentProperty
def name(self):
"""The name of the usb hub.
Returns:
str: usb hub name.
Raises:
DeviceError: usb hub name retrieval failed
"""
return self._hub_name
@decorators.DynamicProperty
def supported_modes(self):
"""Get the USB power modes supported by the USB hub."""
if not self.healthy:
self.health_check()
return self._usb_hub.switch_power.supported_modes
@decorators.PersistentProperty
def device_port(self):
"""The usb hub port number used by device.
Returns:
int: port number on usb hub.
Raises:
DeviceError: usb hub port number retrieval failed
"""
return self._device_port
@decorators.CapabilityLogDecorator(logger)
def check_device_ready(self):
self.health_check()
@decorators.CapabilityLogDecorator(logger, level=decorators.DEBUG)
def get_device_power(self):
"""Gets usb port mode if set.
Returns:
str: 'sync', 'charge', or 'off'
Raises:
DeviceError: if key 'mode' doesn't exist
"""
if not self.healthy:
self.health_check()
return self._usb_hub.switch_power.get_mode(self._device_port)
@decorators.CapabilityLogDecorator(logger)
def set_device_power(self, mode, no_wait=False):
"""Turns associated powered usb hub port, if available, power state to sync, off, charge.
Args:
mode (str): power mode to set USB hub port to ("sync", "off",
"charge")
no_wait (bool): return before boot up is complete. Default: False.
Raises:
DeviceError: if invalid mode provided
Notes:
'sync' is data and power on, 'charge' is power only on, 'off' is
both off.
"""
self._set_port_mode(mode, self.device_port, no_wait)
@decorators.CapabilityLogDecorator(logger)
def power_off(self, port, no_wait=False):
"""This command powers off the port specified or all ports if port is None.
Args:
port (int): identifies which hub port to power off
no_wait (bool): Return before boot up is complete. Default: False.
Raises:
DeviceError: port number invalid.
"""
self._set_port_mode("off", port, no_wait)
@decorators.CapabilityLogDecorator(logger)
def power_on(self, port, data_sync=True, no_wait=False):
"""This command powers on the port specified or all ports if port is None.
Args:
port (int): identifying which hub port to power on
data_sync (bool): True if data should be enabled, false for power
only
no_wait (bool): Return before boot up is complete. Default: False.
Raises:
DeviceError: port number invalid.
"""
mode = "sync"
if not data_sync:
mode = "charge"
self._set_port_mode(mode, port, no_wait)
def _set_port_mode(self, mode, port, no_wait):
"""Set the USB power mode fort the specified port.
Args:
mode (str): power mode to set USB hub port to
port (int): identifies which hub port to set the mode on.
no_wait (bool): Return before boot up is complete. Default: False.
Raises:
DeviceError: mode or port is invalid.
"""
if not self.healthy:
self.health_check()
if self._verify_power_change_needed(mode, self.device_port):
logger.debug(
"{} setting device USB power to '{}' for hub: {} and port: {}".format(
self._device_name, mode, self._hub_name, port))
switchboard = self._get_switchboard_if_initialized()
if switchboard:
switchboard.add_log_note(
"Setting device USB power to '{}' for hub {} and port {}".format(
mode, self._hub_name, port))
if self._change_triggers_reboot:
switchboard.add_log_note(
"GDM triggered reboot via USB power change.")
self._usb_hub.switch_power.set_mode(mode, port)
if self._change_triggers_reboot and not no_wait:
self._wait_for_bootup_complete_fn()
def _verify_power_change_needed(self, mode, port):
"""Returns whether or not port power change needed.
Args:
mode (str): power mode to set USB hub port to.
port (int): Identifies which port to check the power mode.
Returns:
bool: if current mode is not the same as expected mode.
Raises:
DeviceError: if mode provided or usb_hub management is not a valid
option
"""
mode = mode.lower()
if mode not in list(self.supported_modes):
raise errors.DeviceError("{} USB power mode must be in "
"supported_modes".format(self._device_name))
if not self.healthy:
self.health_check()
current_mode = self._usb_hub.switch_power.get_mode(port)
return current_mode != mode
|
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pylab
from colour import Color
from pylab import cm
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.pyplot import figure
from bigbang.visualisation import stackedareachart
from bigbang.visualisation import utils
def evolution_of_participation_1D(
data: dict,
ax: mpl.axes,
entity_in_focus: Optional[list] = None,
percentage: bool = False,
colormap: mpl.colors.LinearSegmentedColormap = mpl.cm.jet,
) -> mpl.axes:
"""
Parameters
----------
data : Dictionary with a format {'x_axis_labels': {'y_axis_labels': y_values}}
entity_in_focus :
percentage :
"""
x = list(data.keys())
ylabels = stackedareachart.get_ylabels(data)
y = stackedareachart.data_transformation(data, ylabels, percentage)
colors = utils.create_color_palette(
ylabels,
entity_in_focus,
colormap,
include_dof=False,
return_dict=True,
)
for iy, ylab in enumerate(ylabels):
if ylab in entity_in_focus:
ax.plot(
x,
y[iy, :],
color="w",
linewidth=4,
zorder=1,
)
ax.plot(
x,
y[iy, :],
color=colors[ylab],
linewidth=3,
zorder=1,
label=ylab,
)
else:
ax.plot(
x,
y[iy, :],
color="grey",
linewidth=1,
zorder=0,
alpha=0.2,
)
if np.isfinite(np.max(x)):
ax.set_xlim(np.min(x), np.max(x))
if np.isfinite(np.max(y)):
ax.set_ylim(np.min(y), np.max(y))
return ax
def evolution_of_participation_2D(
xdata: dict,
ydata: dict,
ax: mpl.axes,
entity_in_focus: Optional[list] = None,
percentage: bool = False,
colormap: mpl.colors.LinearSegmentedColormap = mpl.cm.jet,
) -> mpl.axes:
"""
Parameters
----------
data : Dictionary with a format {'x_axis_labels': {'y_axis_labels': y_values}}
entity_in_focus :
percentage :
"""
# TODO: include time indication
xlabels = stackedareachart.get_ylabels(xdata)
ylabels = stackedareachart.get_ylabels(ydata)
# ensure uniform order
labels = list(set(xlabels + ylabels))
xindx = [xlabels.index(lab) if lab in xlabels else None for lab in labels]
xlabels = [xlabels[i] if i is not None else None for i in xindx]
yindx = [ylabels.index(lab) if lab in ylabels else None for lab in labels]
ylabels = [ylabels[i] if i is not None else None for i in yindx]
# create arrays with format (# of ylabels, # of xlabels)
x = stackedareachart.data_transformation(xdata, xlabels, percentage)
y = stackedareachart.data_transformation(ydata, ylabels, percentage)
colors = utils.create_color_palette(
ylabels,
entity_in_focus,
colormap,
include_dof=False,
return_dict=True,
)
ax.plot([0, np.max(y)], [0, np.max(y)], c="k", linestyle="--", zorder=0)
for i, lab in enumerate(labels):
if lab in entity_in_focus:
ax.plot(
x[i, :],
y[i, :],
color="w",
linewidth=4,
zorder=1,
)
ax.plot(
x[i, :],
y[i, :],
color=colors[lab],
linewidth=3,
zorder=1,
label=lab,
)
else:
ax.plot(
x[i, :],
y[i, :],
color="grey",
linewidth=1,
zorder=0,
alpha=0.2,
)
ax.set_xlim(np.min(x), np.max(x))
ax.set_ylim(np.min(y), np.max(y))
return ax
def evolution_of_graph_property_by_domain(
data: dict,
xkey: str,
ykey: str,
ax: mpl.axes,
entity_in_focus: Optional[list] = None,
percentile: Optional[float] = None,
colormap: mpl.colors.LinearSegmentedColormap = mpl.cm.jet,
) -> mpl.axes:
"""
Parameters
----------
data : Dictionary create with
bigbang.analysis.ListservList.get_graph_prop_per_domain_per_year()
ax :
entity_in_focus :
percentile :
"""
colors = utils.create_color_palette(
list(data.keys()),
entity_in_focus,
colormap,
include_dof=False,
return_dict=True,
)
if entity_in_focus:
for key, value in data.items():
if key in entity_in_focus:
ax.plot(
value[xkey],
value[ykey],
color="w",
linewidth=4,
zorder=1,
)
ax.plot(
value[xkey],
value[ykey],
color=colors[key],
linewidth=3,
label=key,
zorder=2,
)
else:
ax.plot(
value[xkey],
value[ykey],
color="grey",
alpha=0.2,
linewidth=1,
zorder=0,
)
if percentile is not None:
betweenness_centrality = []
for key, value in data.items():
betweenness_centrality += value[ykey]
betweenness_centrality = np.array(betweenness_centrality)
threshold = np.percentile(betweenness_centrality, percentile)
for key, value in data.items():
if any(np.array(value[ykey]) > threshold):
ax.plot(
value[xkey],
value[ykey],
color="w",
linewidth=4,
zorder=1,
)
ax.plot(
value[xkey],
value[ykey],
linewidth=3,
color=colors[key],
label=key,
zorder=2,
)
else:
ax.plot(
value[xkey],
value[ykey],
color="grey",
linewidth=1,
alpha=0.2,
zorder=0,
)
return ax
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FileServersOperations(object):
"""FileServersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~batch_ai.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
workspace_name, # type: str
file_server_name, # type: str
parameters, # type: "_models.FileServerCreateParameters"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.FileServer"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.FileServer"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FileServerCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FileServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers/{fileServerName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
workspace_name, # type: str
file_server_name, # type: str
parameters, # type: "_models.FileServerCreateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FileServer"]
"""Creates a File Server in the given workspace.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param file_server_name: The name of the file server within the specified resource group. File
server names can only contain a combination of alphanumeric characters along with dash (-) and
underscore (_). The name must be from 1 through 64 characters long.
:type file_server_name: str
:param parameters: The parameters to provide for File Server creation.
:type parameters: ~batch_ai.models.FileServerCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FileServer or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~batch_ai.models.FileServer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
file_server_name=file_server_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FileServer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'fileServerName': self._serialize.url("file_server_name", file_server_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers/{fileServerName}'} # type: ignore
def list_by_workspace(
self,
resource_group_name, # type: str
workspace_name, # type: str
file_servers_list_by_workspace_options=None, # type: Optional["_models.FileServersListByWorkspaceOptions"]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FileServerListResult"]
"""Gets a list of File Servers associated with the specified workspace.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Workspace names can only contain a
combination of alphanumeric characters along with dash (-) and underscore (_). The name must be
from 1 through 64 characters long.
:type workspace_name: str
:param file_servers_list_by_workspace_options: Parameter group.
:type file_servers_list_by_workspace_options: ~batch_ai.models.FileServersListByWorkspaceOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileServerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~batch_ai.models.FileServerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FileServerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_max_results = None
if file_servers_list_by_workspace_options is not None:
_max_results = file_servers_list_by_workspace_options.max_results
api_version = "2018-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_workspace.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', pattern=r'^[-\w\._]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=1, pattern=r'^[-\w_]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if _max_results is not None:
query_parameters['maxresults'] = self._serialize.query("max_results", _max_results, 'int', maximum=1000, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FileServerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers'} # type: ignore
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import os
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core import compat
from tensorflow_examples.lite.model_maker.core import test_util
from tensorflow_examples.lite.model_maker.core.data_util import text_dataloader
from tensorflow_examples.lite.model_maker.core.export_format import ExportFormat
from tensorflow_examples.lite.model_maker.core.task import text_classifier
from tensorflow_examples.lite.model_maker.core.task.model_spec import text_spec
class TextClassifierTest(tf.test.TestCase):
TEST_LABELS_AND_TEXT = (('pos', 'super good'), ('neg', 'really bad.'))
def _gen_text_dir(self, text_per_class=1):
text_dir = os.path.join(self.get_temp_dir(), 'random_text_dir')
if os.path.exists(text_dir):
return text_dir
os.mkdir(text_dir)
for class_name, text in self.TEST_LABELS_AND_TEXT:
class_subdir = os.path.join(text_dir, class_name)
os.mkdir(class_subdir)
for i in range(text_per_class):
with open(os.path.join(class_subdir, '%d.txt' % i), 'w') as f:
f.write(text)
return text_dir
def setUp(self):
super(TextClassifierTest, self).setUp()
self.text_dir = self._gen_text_dir()
self.tiny_text_dir = self._gen_text_dir(text_per_class=1)
@test_util.test_in_tf_1
def test_average_wordvec_model_create_v1_incompatible(self):
with self.assertRaisesRegex(ValueError, 'Incompatible versions'):
model_spec = text_spec.AverageWordVecModelSpec(seq_len=2)
all_data = text_dataloader.TextClassifierDataLoader.from_folder(
self.text_dir, model_spec=model_spec)
_ = text_classifier.create(
all_data,
model_spec=model_spec,
)
@test_util.test_in_tf_2
def test_bert_model(self):
model_spec = text_spec.BertClassifierModelSpec(seq_len=2, trainable=False)
all_data = text_dataloader.TextClassifierDataLoader.from_folder(
self.tiny_text_dir, model_spec=model_spec)
# Splits data, 50% data for training, 50% for testing
self.train_data, self.test_data = all_data.split(0.5)
model = text_classifier.create(
self.train_data,
model_spec=model_spec,
epochs=1,
batch_size=1,
shuffle=True)
self._test_accuracy(model, 0.0)
self._test_export_to_tflite(model, threshold=0.0)
self._test_model_without_training(model_spec)
@test_util.test_in_tf_2
def test_mobilebert_model(self):
model_spec = text_spec.mobilebert_classifier_spec(
seq_len=2, trainable=False, default_batch_size=1)
all_data = text_dataloader.TextClassifierDataLoader.from_folder(
self.tiny_text_dir, model_spec=model_spec)
# Splits data, 50% data for training, 50% for testing
self.train_data, self.test_data = all_data.split(0.5)
model = text_classifier.create(
self.train_data,
model_spec=model_spec,
epochs=1,
shuffle=True)
self._test_accuracy(model, 0.0)
self._test_export_to_tflite(model, threshold=0.0, atol=1e-2)
self._test_export_to_tflite_quant(model, model_size=25555047)
@test_util.test_in_tf_2
def test_mobilebert_model_without_training_for_tfjs(self):
model_spec = text_spec.mobilebert_classifier_spec(
seq_len=2, trainable=False, default_batch_size=1)
all_data = text_dataloader.TextClassifierDataLoader.from_folder(
self.text_dir, model_spec=model_spec)
self.train_data, self.test_data = all_data.split(0.5)
with self.assertRaises(Exception): # Raise an error when reloading model.
self._test_model_without_training(model_spec)
@test_util.test_in_tf_2
def test_average_wordvec_model(self):
model_spec = text_spec.AverageWordVecModelSpec(seq_len=2)
all_data = text_dataloader.TextClassifierDataLoader.from_folder(
self.text_dir, model_spec=model_spec)
# Splits data, 90% data for training, 10% for testing
self.train_data, self.test_data = all_data.split(0.5)
model = text_classifier.create(
self.train_data,
model_spec=model_spec,
epochs=1,
batch_size=1,
shuffle=True)
self._test_accuracy(model, threshold=0.0)
self._test_predict_top_k(model)
self._test_export_to_tflite(
model,
threshold=0.0,
expected_json_file='average_word_vec_metadata.json')
self._test_export_to_saved_model(model)
self._test_export_labels(model)
self._test_export_vocab(model)
self._test_model_without_training(model_spec)
def _test_model_without_training(self, model_spec):
# Test without retraining.
model = text_classifier.create(
self.train_data, model_spec=model_spec, do_train=False)
self._test_accuracy(model, threshold=0.0)
self._test_export_to_tflite(model, threshold=0.0)
self._test_export_to_tfjs(model)
def _test_accuracy(self, model, threshold=1.0):
_, accuracy = model.evaluate(self.test_data)
self.assertGreaterEqual(accuracy, threshold)
def _test_predict_top_k(self, model):
topk = model.predict_top_k(self.test_data, batch_size=1)
for i in range(len(self.test_data)):
predict_label, predict_prob = topk[i][0][0], topk[i][0][1]
self.assertIn(predict_label, model.index_to_label)
self.assertGreater(predict_prob, 0.5)
def _load_vocab(self, filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
return [vocab.strip('\n').split() for vocab in f]
def _load_labels(self, filepath):
with tf.io.gfile.GFile(filepath, 'r') as f:
return [label.strip('\n') for label in f]
def _test_export_labels(self, model):
labels_output_file = os.path.join(self.get_temp_dir(), 'labels.txt')
model.export(self.get_temp_dir(), export_format=ExportFormat.LABEL)
labels = self._load_labels(labels_output_file)
self.assertEqual(labels, ['neg', 'pos'])
def _test_export_vocab(self, model):
vocab_output_file = os.path.join(self.get_temp_dir(), 'vocab.txt')
model.export(self.get_temp_dir(), export_format=ExportFormat.VOCAB)
word_index = self._load_vocab(vocab_output_file)
expected_predefined = [['<PAD>', '0'], ['<START>', '1'], ['<UNKNOWN>', '2']]
self.assertEqual(word_index[:3], expected_predefined)
expected_vocab = ['bad', 'good', 'really', 'super']
actual_vocab = sorted([word for word, index in word_index[3:]])
self.assertEqual(actual_vocab, expected_vocab)
expected_index = ['3', '4', '5', '6']
actual_index = [index for word, index in word_index[3:]]
self.assertEqual(actual_index, expected_index)
def _test_export_to_tflite(self,
model,
threshold=1.0,
atol=1e-04,
expected_json_file=None):
tflite_output_file = os.path.join(self.get_temp_dir(), 'model.tflite')
model.export(
self.get_temp_dir(),
export_format=ExportFormat.TFLITE,
quantization_config=None,
export_metadata_json_file=expected_json_file is not None)
self.assertTrue(tf.io.gfile.exists(tflite_output_file))
self.assertGreater(os.path.getsize(tflite_output_file), 0)
result = model.evaluate_tflite(tflite_output_file, self.test_data)
self.assertGreaterEqual(result['accuracy'], threshold)
spec = model.model_spec
if isinstance(spec, text_spec.AverageWordVecModelSpec):
random_inputs = np.random.randint(
low=0, high=len(spec.vocab), size=(1, spec.seq_len), dtype=np.int32)
elif isinstance(spec, text_spec.BertClassifierModelSpec):
input_word_ids = np.random.randint(
low=0,
high=len(spec.tokenizer.vocab),
size=(1, spec.seq_len),
dtype=np.int32)
input_mask = np.random.randint(
low=0, high=2, size=(1, spec.seq_len), dtype=np.int32)
input_type_ids = np.random.randint(
low=0, high=2, size=(1, spec.seq_len), dtype=np.int32)
random_inputs = (input_word_ids, input_mask, input_type_ids)
else:
raise ValueError('Unsupported model_spec type: %s' % str(type(spec)))
self.assertTrue(
test_util.is_same_output(
tflite_output_file, model.model, random_inputs, spec, atol=atol))
if expected_json_file is not None:
json_output_file = os.path.join(self.get_temp_dir(), 'model.json')
self.assertTrue(os.path.isfile(json_output_file))
self.assertGreater(os.path.getsize(json_output_file), 0)
expected_json_file = test_util.get_test_data_path(expected_json_file)
self.assertTrue(filecmp.cmp(json_output_file, expected_json_file))
def _test_export_to_saved_model(self, model):
save_model_output_path = os.path.join(self.get_temp_dir(), 'saved_model')
model.export(self.get_temp_dir(), export_format=ExportFormat.SAVED_MODEL)
self.assertTrue(os.path.isdir(save_model_output_path))
self.assertNotEmpty(os.listdir(save_model_output_path))
def _test_export_to_tfjs(self, model):
output_path = os.path.join(self.get_temp_dir(), 'tfjs')
model.export(
self.get_temp_dir(),
export_format=[ExportFormat.TFLITE, ExportFormat.TFJS])
self.assertTrue(os.path.isdir(output_path))
self.assertNotEmpty(os.listdir(output_path))
def _test_export_to_tflite_quant(self, model, model_size, err_ratio=0.08):
tflite_filename = 'model_quant.tflite'
tflite_output_file = os.path.join(self.get_temp_dir(), tflite_filename)
model.export(
self.get_temp_dir(),
tflite_filename=tflite_filename,
export_format=ExportFormat.TFLITE)
self.assertTrue(tf.io.gfile.exists(tflite_output_file))
err = model_size * err_ratio
self.assertNear(os.path.getsize(tflite_output_file), model_size, err)
if __name__ == '__main__':
# Load compressed models from tensorflow_hub
os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
compat.setup_tf_behavior(tf_version=2)
tf.test.main()
|
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
AI-related functions
"""
# XXX take penalty into account, in a smart way:
# threshold based on number of remaining blocks ?
# threshold based on the number of variations that can be played
# for each block (a block which can only be placed in one way
# might to interesting to play immediately)
from numpy.core import hstack, vstack
from numpy.core.numeric import array, argsort, mean
from numpy.core.fromnumeric import argmax
from matplotlib.mlab import find
from itertools import imap, ifilter, izip
from blokus3d.utils import randomFromList, fst, snd, randint, unik
randomMove = lambda gs : randomFromList(gs.legalMoves())
# Fitness functions
def relativeBaseScoreFitness(gs):
scores = gs.baseScores()
currentPlayer = (gs.nextPlayer-1) % gs.nbPlayers
# Remove player score from the scores
bestOpponentScore = max(hstack([scores[:currentPlayer],\
scores[currentPlayer+1:]]))
return scores[currentPlayer] - bestOpponentScore
def libertiesFitness(gs):
"""Each legal cube amounts to its square height"""
return sum(cube[2]**2 for cube in \
gs.libertyCubes([(gs.nextPlayer-1) % gs.nbPlayers]))
def penaltyFitness(gs):
return -gs.penalty()[(gs.nextPlayer-1) % gs.nbPlayers]
def mixtureFitness(weightedFitFuns):
"""Meta-fitness function that make a mix between several"""
return lambda gs: sum(weight*fitFun(gs) \
for (weight,fitFun) in weightedFitFuns)
def bestMoves(gs, fitFun, moves=None):
"""Select the best legal moves according to
a one-step fitness function"""
if moves == None:
moves = gs.legalMoves()
if moves == []:
return [None], None
fitnesses = array([fitFun(gs.clone().playMove(move)) \
for move in moves])
bestFitness = max(fitnesses)
# TODO should use argsort instead
selectedMoves = map(moves.__getitem__,find(fitnesses==bestFitness))
return selectedMoves, bestFitness
# Heuristics based on fitness functions
def oneStepHeuristic(gs, fitFuns, verbose=False):
"""Find the move that maximize the fitness
after this move."""
moves = gs.legalMoves()
for fitFun in fitFuns:
moves, fitness = bestMoves(gs, fitFun)
if verbose:
print "best %s : %d (%d moves)" \
% (str(fitFun), fitness, len(moves))
if len(moves)==1:
return moves[0]
return moves[0]
mixtureOneStepHeuristic = lambda gs, weightedFitFuns:\
oneStepHeuristic(gs, [mixtureFitness(weightedFitFuns)])
def threeHeuristicsMC(gs, verbose=False):
"""Find the best move according to three heuristics,
then evaluate each in depth with Monte-Carlo method"""
lm = gs.legalMoves()
if lm == []:
yield None
raise StopIteration
if verbose:
print "Picking a best next move according to each criterion"
moves = map(lambda fitFun: fst(fst(bestMoves(gs,fitFun,moves=lm))),\
[relativeBaseScoreFitness,\
libertiesFitness,\
penaltyFitness])
# Some moves might be the same
moves = unik(moves)
if verbose:
print "Candidate moves are :",moves
assert moves != []
if len(moves)==1:
yield moves[0]
raise StopIteration
if verbose:
print "Now performing MC evaluation"
for move in monteCarloHeuristic(gs,moves=moves):
yield move
# Meta-heuristic
# TODO add depth so it can be maximini...maximinimax !
def minimax(gs, fitFun):
# Get the legal moves and return immediately if there are only one
moves = gs.legalMoves()
lnMoves = len(moves)
if lnMoves==1:
yield moves[0]
raise StopIteration
# Get the one-step fitness for each possible move
fitnesses = []
nextGss = []
for move in moves:
nextGs = gs.clone().playMove(move)
fitnesses.append(fitFun(nextGs))
nextGss.append(nextGs)
bestMovesOrder = argsort(fitnesses)[::-1]
bestSoFar = moves[bestMovesOrder[0]]
yield bestSoFar
# Find the one-step that minimize the opponent fitness (second step)
leastBestEnemyFitness = snd(bestMoves(nextGss[bestMovesOrder[0]].clone(),fitFun))
for num,idx in enumerate(bestMovesOrder):
print "processing move %d/%d (fitness %d)" % (num+1,lnMoves,fitnesses[idx])
bestEnemyFitness = snd(bestMoves(nextGss[idx].clone(),fitFun))
if bestEnemyFitness < leastBestEnemyFitness:
bestSoFar = moves[idx]
leastBestEnemyFitness = bestEnemyFitness
print "new least best enemy fitness : %d" % leastBestEnemyFitness
# TODO the yield is only here to avoid blocking on timeLimit,
# must be removed when timeLimit works asynchronously
yield bestSoFar
# Some other functions
def monteCarloScores(gs, maxDepth=None):
# Copy the game state, so we can keep the original
gs = gs.clone()
depth = 0
while not gs.isOver() and (maxDepth==None or depth<maxDepth):
lm = gs.legalMoves()
if lm != []:
choice = randint(len(lm))
gs.playMove(lm[choice])
depth += 1
else:
gs.playMove(None) # passing move
return gs.finalScores()
def monteCarloHeuristic(gs, moves=None, maxDepth=None, verbose=False):
"""Determines the best move using a Monte-Carlo estimation
of the final scores"""
if moves==None:
moves = gs.legalMoves()
nextStates = map(lambda move : gs.clone().playMove(move), moves)
relativeScoreGrid = [[] for _ in xrange(len(moves))]
for trial in xrange(1000000):
for m in xrange(len(moves)):
scores = nextStates[m].monteCarloScores(\
gs,maxDepth=maxDepth)
if verbose:
print "trial %d, move %d/%d, scores : %s" \
% (trial+1, m+1, len(moves), scores)
relativeScoreGrid[m].append(scores)
# for each move, compute the margin of each winning trial
winningMargin = [ map(lambda z : z[1][-1]-z[1][-2], \
ifilter(lambda y: y[0][-1]==gs.nextPlayer, \
izip(imap(argsort,x),x))) \
for x in relativeScoreGrid ]
choice = argmax(map(mean,winningMargin))
if verbose:
print "best move so far is %d, mean margin = %f (wins %d/%d)" \
% (choice+1, mean(winningMargin[choice]), \
len(winningMargin[choice]), trial+1)
yield moves[choice]
def bruteForceTree(gs, root=(None,[]), saveGs=False, depth=2):
if depth <= 0:
return root
knownMoves = []
if root[0]!=None:
# Get the next player moves already registered
# through children nodes
if len(root[1]) > 0 and root[1][0]['move'] != None:
knownMoves = vstack(node[0]['move'] for node in root[1])
# Get the legal moves of the next player
lm = gs.legalMoves()
if lm == []:
lm = [None]
else:
# Filter moves that are already known
lm = [move for move in lm if move not in knownMoves]
# Add nodes for those which are new
for move in lm:
dic = {'player':gs.nextPlayer,'move':move}
root[1].append( (dic,[]) )
# Evaluate each move and perform recursion
for i,node in enumerate(root[1]):
# Play the move
move = node[0]['move']
nextGs = gs.clone().playMove(move)
if saveGs:
node[0]['gs'] = nextGs
# Evaluate the scores
node[0]['baseScores'] = nextGs.baseScores()
node[0]['penalty'] = nextGs.penalty()
# Recursion
nextGs.bruteForceTree(root=node,saveGs=saveGs,depth=depth-1)
# DEBUG
if depth==2:
print "done node %d/%d" % (i,len(root[1]))
return root
# def bruteForceArray(gs,maxDepth,depth=0,arr=None,nbNodes=[]):
# """same as bruteForceTree but using an array to represent the tree
# m1 scores
# m2 scores
# m3 scores
# m1m1 scores
# m1m2 scores
# m2m1 scores
# m3m1 scores
# m3m2 scores
# m1m1m1 scores
# ...
# nbNodes = [3,2,1,2,...]
# """
# if depth >= maxDepth:
# return
# # Get the legal moves of the current player == depth
# lm = gs.legalMoves(depth)
# if lm == []:
# lm = [None]
# nbNodes.append(len(lm))
# if depth==0:
# # Estimate the necessary array size :
# # assuming that the nb of moves decrease by a half with each turn
# arr = empty(( sum(int(nbNodes*(1/d)) for d in xrange(1,maxDepth+1)), 5+nbPlayers ),dtype=int16)
# offset = 0 # TODO calculer offset
# for i in xrange(len(lm)):
# if move != None:
# arr[offset+i,:3] = lm[i][0]
# arr[offset+i,3] = lm[i][1]
# arr[offset+i,4] = lm[i][2]
# else:
# arr[offset+i,3] = -1 # blkId = -1 means passing
# # Evaluate each move and perform recursion
# nextGs = gs.clone().playMove(player,lm[i])
# # Evaluate the scores
# arr[offset+i,5:(5+nbPlayers)] = nextGs.baseScores()
# arr[offset+i,(5+nbPlayers):(5+nbPlayers*2)] = nextGs.penalty()
# # Recursion
# #nextGs.bruteForceArray(arr=arr,nbNodes=nbNodes,maxDepth=maxDepth)
|
|
# -*- coding: utf-8 -*-
# LICENCE
from __future__ import absolute_import, division, print_function, unicode_literals
from six.moves import zip
import numpy as np
import utool as ut
import ubelt as ub
import cv2
def bboxes_from_vert_list(verts_list, castint=False):
""" Fit the bounding polygon inside a rectangle """
return [bbox_from_verts(verts, castint=castint) for verts in verts_list]
def verts_list_from_bboxes_list(bboxes_list):
""" Create a four-vertex polygon from the bounding rectangle """
return [verts_from_bbox(bbox) for bbox in bboxes_list]
def verts_from_bbox(bbox, close=False):
r"""
Args:
bbox (tuple): bounding box in the format (x, y, w, h)
close (bool): (default = False)
Returns:
list: verts
CommandLine:
python -m vtool_ibeis.geometry --test-verts_from_bbox
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> bbox = (10, 10, 50, 50)
>>> close = False
>>> verts = verts_from_bbox(bbox, close)
>>> result = ('verts = %s' % (str(verts),))
>>> print(result)
verts = ((10, 10), (60, 10), (60, 60), (10, 60))
"""
x1, y1, w, h = bbox
x2 = (x1 + w)
y2 = (y1 + h)
if close:
# Close the verticies list (for drawing lines)
verts = ((x1, y1), (x2, y1), (x2, y2), (x1, y2), (x1, y1))
else:
verts = ((x1, y1), (x2, y1), (x2, y2), (x1, y2))
return verts
def bbox_from_verts(verts, castint=False):
x = min(x[0] for x in verts)
y = min(y[1] for y in verts)
w = max(x[0] for x in verts) - x
h = max(y[1] for y in verts) - y
if castint:
return (int(x), int(y), int(w), int(h))
else:
return (x, y, w, h)
def draw_border(img_in, color=(0, 128, 255), thickness=2, out=None):
r"""
Args:
img_in (ndarray[uint8_t, ndim=2]): image data
color (tuple): in bgr
thickness (int):
out (None):
CommandLine:
python -m vtool_ibeis.geometry --test-draw_border --show
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> import vtool_ibeis as vt
>>> img_in = vt.imread(ut.grab_test_imgpath('carl.jpg'))
>>> color = (0, 128, 255)
>>> thickness = 20
>>> out = None
>>> # xdoctest: +REQUIRES(module:plottool_ibeis)
>>> img = draw_border(img_in, color, thickness, out)
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> pt.imshow(img)
>>> pt.show_if_requested()
"""
h, w = img_in.shape[0:2]
#verts = verts_from_bbox((0, 0, w, h))
#verts = verts_from_bbox((0, 0, w - 1, h - 1))
half_thickness = thickness // 2
verts = verts_from_bbox((half_thickness, half_thickness,
w - thickness, h - thickness))
# FIXME: adjust verts and draw lines here to fill in the corners correctly
img = draw_verts(img_in, verts, color=color, thickness=thickness, out=out)
return img
def draw_verts(img_in, verts, color=(0, 128, 255), thickness=2, out=None):
r"""
Args:
img_in (?):
verts (?):
color (tuple):
thickness (int):
Returns:
ndarray[uint8_t, ndim=2]: img - image data
CommandLine:
python -m vtool_ibeis.geometry --test-draw_verts --show
python -m vtool_ibeis.geometry --test-draw_verts:0 --show
python -m vtool_ibeis.geometry --test-draw_verts:1 --show
References:
http://docs.opencv.org/modules/core/doc/drawing_functions.html#line
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> import vtool_ibeis as vt
>>> # build test data
>>> img_in = vt.imread(ut.grab_test_imgpath('carl.jpg'))
>>> verts = ((10, 10), (10, 100), (100, 100), (100, 10))
>>> color = (0, 128, 255)
>>> thickness = 2
>>> # execute function
>>> out = None
>>> img = draw_verts(img_in, verts, color, thickness, out)
>>> assert img_in is not img
>>> assert out is not img
>>> assert out is not img_in
>>> # verify results
>>> # xdoctest: +REQUIRES(--show)
>>> pt.imshow(img)
>>> pt.show_if_requested()
Example1:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> import vtool_ibeis as vt
>>> # build test data
>>> img_in = vt.imread(ut.grab_test_imgpath('carl.jpg'))
>>> verts = ((10, 10), (10, 100), (100, 100), (100, 10))
>>> color = (0, 128, 255)
>>> thickness = 2
>>> out = img_in
>>> # execute function
>>> img = draw_verts(img_in, verts, color, thickness, out)
>>> assert img_in is img, 'should be in place'
>>> assert out is img, 'should be in place'
>>> # verify results
>>> # xdoctest: +REQUIRES(--show)
>>> pt.imshow(img)
>>> pt.show_if_requested()
out = img_in = np.zeros((500, 500, 3), dtype=np.uint8)
"""
if out is None:
out = np.copy(img_in)
if isinstance(verts, np.ndarray):
verts = verts.tolist()
connect = True
if connect:
line_list_sequence = zip(verts[:-1], verts[1:])
line_tuple_sequence = ((tuple(p1_), tuple(p2_)) for (p1_, p2_) in line_list_sequence)
cv2.line(out, tuple(verts[0]), tuple(verts[-1]), color, thickness)
for (p1, p2) in line_tuple_sequence:
cv2.line(out, p1, p2, color, thickness)
#print('p1, p2: (%r, %r)' % (p1, p2))
else:
for count, p in enumerate(verts, start=1):
cv2.circle(out, tuple(p), count, color, thickness=1)
return out
def closest_point_on_line_segment(p, e1, e2):
"""
Finds the closet point from p on line segment (e1, e2)
Args:
p (ndarray): and xy point
e1 (ndarray): the first xy endpoint of the segment
e2 (ndarray): the second xy endpoint of the segment
Returns:
ndarray: pt_on_seg - the closest xy point on (e1, e2) from p
References:
http://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
CommandLine:
python -m vtool_ibeis.geometry --exec-closest_point_on_line_segment --show
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> import vtool_ibeis as vt
>>> #bbox = np.array([10, 10, 10, 10], dtype=np.float)
>>> #verts_ = np.array(vt.verts_from_bbox(bbox, close=True))
>>> #R = vt.rotation_around_bbox_mat3x3(vt.TAU / 3, bbox)
>>> #verts = vt.transform_points_with_homography(R, verts_.T).T
>>> verts = np.array([[ 21.83012702, 13.16987298],
>>> [ 16.83012702, 21.83012702],
>>> [ 8.16987298, 16.83012702],
>>> [ 13.16987298, 8.16987298],
>>> [ 21.83012702, 13.16987298]])
>>> rng = np.random.RandomState(0)
>>> p_list = rng.rand(64, 2) * 20 + 5
>>> close_pts = np.array([closest_point_on_vert_segments(p, verts) for p in p_list])
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> pt.ensureqt()
>>> pt.plt.plot(p_list.T[0], p_list.T[1], 'ro', label='original point')
>>> pt.plt.plot(close_pts.T[0], close_pts.T[1], 'rx', label='closest point on shape')
>>> for x, y in list(zip(p_list, close_pts)):
>>> z = np.array(list(zip(x, y)))
>>> pt.plt.plot(z[0], z[1], 'r--')
>>> pt.plt.legend()
>>> pt.plt.plot(verts.T[0], verts.T[1], 'b-')
>>> pt.plt.xlim(0, 30)
>>> pt.plt.ylim(0, 30)
>>> pt.plt.axis('equal')
>>> ut.show_if_requested()
"""
# shift e1 to origin
de = (dx, dy) = e2 - e1
# make point vector wrt orgin
pv = p - e1
# Project pv onto de
mag = np.linalg.norm(de)
pt_on_line_ = pv.dot(de / mag) * de / mag
# Check if normalized dot product is between 0 and 1
# Determines if pt is between 0,0 and de
t = de.dot(pt_on_line_) / mag ** 2
# t is an interpolation factor indicating how far past the line segment we
# are. We are on the line segment if it is in the range 0 to 1.
if t < 0:
pt_on_seg = e1
elif t > 1:
pt_on_seg = e2
else:
pt_on_seg = pt_on_line_ + e1
return pt_on_seg
def distance_to_lineseg(p, e1, e2):
import vtool_ibeis as vt
close_pt = vt.closest_point_on_line_segment(p, e1, e2)
dist_to_lineseg = vt.L2(p, close_pt)
return dist_to_lineseg
def closest_point_on_line(p, e1, e2):
"""
e1 and e2 define two points on the line.
Does not clip to the segment.
CommandLine:
python -m vtool_ibeis.geometry closest_point_on_line --show
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> import vtool_ibeis as vt
>>> verts = np.array([[ 21.83012702, 13.16987298],
>>> [ 16.83012702, 21.83012702],
>>> [ 8.16987298, 16.83012702],
>>> [ 13.16987298, 8.16987298],
>>> [ 21.83012702, 13.16987298]])
>>> rng = np.random.RandomState(0)
>>> p_list = rng.rand(64, 2) * 20 + 5
>>> close_pts = []
>>> for p in p_list:
>>> candidates = [closest_point_on_line(p, e1, e2) for e1, e2 in ut.itertwo(verts)]
>>> dists = np.array([vt.L2_sqrd(p, new_pt) for new_pt in candidates])
>>> close_pts.append(candidates[dists.argmin()])
>>> close_pts = np.array(close_pts)
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> pt.ensureqt()
>>> pt.plt.plot(p_list.T[0], p_list.T[1], 'ro', label='original point')
>>> pt.plt.plot(close_pts.T[0], close_pts.T[1], 'rx', label='closest point on shape')
>>> for x, y in list(zip(p_list, close_pts)):
>>> z = np.array(list(zip(x, y)))
>>> pt.plt.plot(z[0], z[1], 'r--')
>>> pt.plt.legend()
>>> pt.plt.plot(verts.T[0], verts.T[1], 'b-')
>>> pt.plt.xlim(0, 30)
>>> pt.plt.ylim(0, 30)
>>> pt.plt.axis('equal')
>>> ut.show_if_requested()
"""
# shift e1 to origin
de = (dx, dy) = e2 - e1
# make point vector wrt orgin
pv = p - e1
# Project pv onto de
mag = np.linalg.norm(de)
pt_on_line_ = pv.dot(de / mag) * de / mag
pt_on_line = pt_on_line_ + e1
return pt_on_line
def closest_point_on_vert_segments(p, verts):
import vtool_ibeis as vt
candidates = [closest_point_on_line_segment(p, e1, e2) for e1, e2 in ut.itertwo(verts)]
dists = np.array([vt.L2_sqrd(p, new_pt) for new_pt in candidates])
new_pts = candidates[dists.argmin()]
return new_pts
def closest_point_on_bbox(p, bbox):
"""
Example1:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> p_list = np.array([[19, 7], [7, 14], [14, 11], [8, 7], [23, 21]], dtype=np.float)
>>> bbox = np.array([10, 10, 10, 10], dtype=np.float)
>>> [closest_point_on_bbox(p, bbox) for p in p_list]
"""
import vtool_ibeis as vt
verts = np.array(vt.verts_from_bbox(bbox, close=True))
new_pts = closest_point_on_vert_segments(p, verts)
return new_pts
def bbox_from_xywh(xy, wh, xy_rel_pos=[0, 0]):
""" need to specify xy_rel_pos if xy is not in tl already """
to_tlx = xy_rel_pos[0] * wh[0]
to_tly = xy_rel_pos[1] * wh[1]
tl_x = xy[0] - to_tlx
tl_y = xy[1] - to_tly
bbox = [tl_x, tl_y, wh[0], wh[1]]
return bbox
def extent_from_verts(verts):
bbox = bbox_from_verts(verts)
extent = extent_from_bbox(bbox)
return extent
def union_extents(extents):
extents = np.array(extents)
xmin = extents.T[0].min()
xmax = extents.T[1].max()
ymin = extents.T[2].min()
ymax = extents.T[3].max()
return (xmin, xmax, ymin, ymax)
def extent_from_bbox(bbox):
"""
Args:
bbox (ndarray): tl_x, tl_y, w, h
Returns:
extent (ndarray): tl_x, br_x, tl_y, br_y
CommandLine:
xdoctest -m ~/code/vtool_ibeis/vtool_ibeis/geometry.py extent_from_bbox
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> bbox = [0, 0, 10, 10]
>>> extent = extent_from_bbox(bbox)
>>> result = ('extent = %s' % (ub.repr2(extent, nl=0),))
>>> print(result)
extent = [0, 10, 0, 10]
"""
tl_x, tl_y, w, h = bbox
br_x = tl_x + w
br_y = tl_y + h
extent = [tl_x, br_x, tl_y, br_y]
return extent
#def tlbr_from_bbox(bbox):
def bbox_from_extent(extent):
"""
Args:
extent (ndarray): tl_x, br_x, tl_y, br_y
Returns:
bbox (ndarray): tl_x, tl_y, w, h
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> extent = [0, 10, 0, 10]
>>> bbox = bbox_from_extent(extent)
>>> result = ('bbox = %s' % (ub.repr2(bbox, nl=0),))
>>> print(result)
bbox = [0, 0, 10, 10]
"""
tl_x, br_x, tl_y, br_y = extent
w = br_x - tl_x
h = br_y - tl_y
bbox = [tl_x, tl_y, w, h]
return bbox
def bbox_from_center_wh(center_xy, wh):
return bbox_from_xywh(center_xy, wh, xy_rel_pos=[.5, .5])
def bbox_center(bbox):
(x, y, w, h) = bbox
centerx = x + (w / 2)
centery = y + (h / 2)
return centerx, centery
def get_pointset_extents(pts):
minx, miny = pts.min(axis=0)
maxx, maxy = pts.max(axis=0)
bounds = minx, maxx, miny, maxy
return bounds
def get_pointset_extent_wh(pts):
minx, miny = pts.min(axis=0)
maxx, maxy = pts.max(axis=0)
extent_w = maxx - minx
extent_h = maxy - miny
return extent_w, extent_h
def cvt_bbox_xywh_to_pt1pt2(xywh, sx=1.0, sy=1.0, round_=True):
""" Converts bbox to thumb format with a scale factor"""
import vtool_ibeis as vt
(x1, y1, _w, _h) = xywh
x2 = (x1 + _w)
y2 = (y1 + _h)
if round_:
pt1 = (vt.iround(x1 * sx), vt.iround(y1 * sy))
pt2 = (vt.iround(x2 * sx), vt.iround(y2 * sy))
else:
pt1 = ((x1 * sx), (y1 * sy))
pt2 = ((x2 * sx), (y2 * sy))
return (pt1, pt2)
def scale_bbox(bbox, sx, sy=None):
if sy is None:
sy = sx
from vtool_ibeis import linalg
centerx, centery = bbox_center(bbox)
S = linalg.scale_around_mat3x3(sx, sy, centerx, centery)
verts = np.array(verts_from_bbox(bbox))
vertsT = linalg.transform_points_with_homography(S, verts.T).T
bboxT = bbox_from_verts(vertsT)
return bboxT
def scale_extents(extents, sx, sy=None):
"""
Args:
extent (ndarray): tl_x, br_x, tl_y, br_y
"""
bbox = bbox_from_extent(extents)
bboxT = scale_bbox(bbox, sx, sy)
extentsT = extent_from_bbox(bboxT)
return extentsT
def scaled_verts_from_bbox_gen(bbox_list, theta_list, sx=1, sy=1):
r"""
Helps with drawing scaled bbounding boxes on thumbnails
Args:
bbox_list (list): bboxes in x,y,w,h format
theta_list (list): rotation of bounding boxes
sx (float): x scale factor
sy (float): y scale factor
Yeilds:
new_verts - vertices of scaled bounding box for every input
CommandLine:
python -m vtool_ibeis.image --test-scaled_verts_from_bbox_gen
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> # build test data
>>> bbox_list = [(10, 10, 100, 100)]
>>> theta_list = [0]
>>> sx = .5
>>> sy = .5
>>> # execute function
>>> new_verts_list = list(scaled_verts_from_bbox_gen(bbox_list, theta_list, sx, sy))
>>> result = str(new_verts_list)
>>> # verify results
>>> print(result)
[[[5, 5], [55, 5], [55, 55], [5, 55], [5, 5]]]
"""
# TODO: input verts support and better name
for bbox, theta in zip(bbox_list, theta_list):
new_verts = scaled_verts_from_bbox(bbox, theta, sx, sy)
yield new_verts
def scaled_verts_from_bbox(bbox, theta, sx, sy):
"""
Helps with drawing scaled bbounding boxes on thumbnails
"""
if bbox is None:
return None
from vtool_ibeis import linalg
# Transformation matrixes
R = linalg.rotation_around_bbox_mat3x3(theta, bbox)
S = linalg.scale_mat3x3(sx, sy)
# Get verticies of the annotation polygon
verts = verts_from_bbox(bbox, close=True)
# Rotate and transform to thumbnail space
xyz_pts = linalg.add_homogenous_coordinate(np.array(verts).T)
trans_pts = linalg.remove_homogenous_coordinate(S.dot(R).dot(xyz_pts))
new_verts = np.round(trans_pts).astype(np.int32).T.tolist()
return new_verts
def point_inside_bbox(point, bbox):
r"""
Flags points that are strictly inside a bounding box.
Points on the boundary are not considered inside.
Args:
point (ndarray): one or more points to test (2xN)
bbox (tuple): a bounding box in (x, y, w, h) format
Returns:
bool or ndarray: True if the point is in the bbox
CommandLine:
python -m vtool_ibeis.geometry point_inside_bbox --show
Example:
>>> # ENABLE_DOCTEST
>>> from vtool_ibeis.geometry import * # NOQA
>>> point = np.array([
>>> [3, 2], [4, 1], [2, 3], [1, 1], [0, 0],
>>> [4, 9.5], [9, 9.5], [7, 2], [7, 8], [9, 3]
>>> ]).T
>>> bbox = (3, 2, 5, 7)
>>> flag = point_inside_bbox(point, bbox)
>>> flag = flag.astype(np.int)
>>> result = ('flag = %s' % (ub.repr2(flag),))
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> import plottool_ibeis as pt
>>> verts = np.array(verts_from_bbox(bbox, close=True))
>>> pt.plot(verts.T[0], verts.T[1], 'b-')
>>> pt.plot(point[0][flag], point[1][flag], 'go')
>>> pt.plot(point[0][~flag], point[1][~flag], 'rx')
>>> pt.plt.xlim(0, 10); pt.plt.ylim(0, 10)
>>> pt.show_if_requested()
flag = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0])
"""
x, y = point
tl_x, br_x, tl_y, br_y = extent_from_bbox(bbox)
inside_x = np.logical_and(tl_x < x, x < br_x)
inside_y = np.logical_and(tl_y < y, y < br_y)
flag = np.logical_and(inside_x, inside_y)
return flag
if __name__ == '__main__':
"""
CommandLine:
xdoctest -m vtool_ibeis.geometry
"""
import xdoctest
xdoctest.doctest_module(__file__)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUForwardingPathListEntry(NURESTObject):
""" Represents a ForwardingPathListEntry in the VSD
Notes:
Forwarding path list entry to be associated with forwarding path list for l4 based policy to PAT / IKE to underlay.
"""
__rest_name__ = "forwardingpathlistentry"
__resource_name__ = "forwardingpathlistentries"
## Constants
CONST_FORWARDING_ACTION_IKE = "IKE"
CONST_FORWARDING_ACTION_UNDERLAY_ROUTE = "UNDERLAY_ROUTE"
CONST_FORWARDING_ACTION_UNDERLAY_PAT = "UNDERLAY_PAT"
CONST_FC_OVERRIDE_NONE = "NONE"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_UPLINK_PREFERENCE_PRIMARY = "PRIMARY"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_UPLINK_PREFERENCE_SECONDARY = "SECONDARY"
CONST_FC_OVERRIDE_H = "H"
CONST_FC_OVERRIDE_F = "F"
CONST_FC_OVERRIDE_G = "G"
CONST_FC_OVERRIDE_D = "D"
CONST_FC_OVERRIDE_E = "E"
CONST_FC_OVERRIDE_B = "B"
CONST_FC_OVERRIDE_C = "C"
CONST_FC_OVERRIDE_A = "A"
def __init__(self, **kwargs):
""" Initializes a ForwardingPathListEntry instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> forwardingpathlistentry = NUForwardingPathListEntry(id=u'xxxx-xxx-xxx-xxx', name=u'ForwardingPathListEntry')
>>> forwardingpathlistentry = NUForwardingPathListEntry(data=my_dict)
"""
super(NUForwardingPathListEntry, self).__init__()
# Read/Write Attributes
self._fc_override = None
self._last_updated_by = None
self._entity_scope = None
self._forwarding_action = None
self._uplink_preference = None
self._priority = None
self._external_id = None
self.expose_attribute(local_name="fc_override", remote_name="FCOverride", attribute_type=str, is_required=False, is_unique=False, choices=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'NONE'])
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="forwarding_action", remote_name="forwardingAction", attribute_type=str, is_required=True, is_unique=False, choices=[u'IKE', u'UNDERLAY_PAT', u'UNDERLAY_ROUTE'])
self.expose_attribute(local_name="uplink_preference", remote_name="uplinkPreference", attribute_type=str, is_required=False, is_unique=False, choices=[u'PRIMARY', u'SECONDARY'])
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def fc_override(self):
""" Get fc_override value.
Notes:
Value of the Service Class to be overridden in the packet when the match conditions are satisfied.
This attribute is named `FCOverride` in VSD API.
"""
return self._fc_override
@fc_override.setter
def fc_override(self, value):
""" Set fc_override value.
Notes:
Value of the Service Class to be overridden in the packet when the match conditions are satisfied.
This attribute is named `FCOverride` in VSD API.
"""
self._fc_override = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def forwarding_action(self):
""" Get forwarding_action value.
Notes:
Type of forwarding action associated with this entry.
This attribute is named `forwardingAction` in VSD API.
"""
return self._forwarding_action
@forwarding_action.setter
def forwarding_action(self, value):
""" Set forwarding_action value.
Notes:
Type of forwarding action associated with this entry.
This attribute is named `forwardingAction` in VSD API.
"""
self._forwarding_action = value
@property
def uplink_preference(self):
""" Get uplink_preference value.
Notes:
Type of forwarding uplink preference associated with this entry. In case of forwardingAction "IKE", uplinkPreference must not be set.
This attribute is named `uplinkPreference` in VSD API.
"""
return self._uplink_preference
@uplink_preference.setter
def uplink_preference(self, value):
""" Set uplink_preference value.
Notes:
Type of forwarding uplink preference associated with this entry. In case of forwardingAction "IKE", uplinkPreference must not be set.
This attribute is named `uplinkPreference` in VSD API.
"""
self._uplink_preference = value
@property
def priority(self):
""" Get priority value.
Notes:
Autogenerated priority of a Forwarding Path List Entry for a given Forwarding Path List.
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
Autogenerated priority of a Forwarding Path List Entry for a given Forwarding Path List.
"""
self._priority = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
# XXX Backquoted words should have precedence over grouping
# XXX Give warning when starting new block after calls were made
# XXX Give warning when def-block contains 'call' put no 'push'
# XXX Give warning when def-block contains code but no 'ret'
# XXX Give warning when a variable is not both written and read
from asm import *
import string
import sys
from pathlib import Path
class Program:
def __init__(self, name, forRom=True):
self.name = name # For defining unique labels in global symbol table
self.forRom = forRom # Inject trampolines if compiling for ROM XXX why not do that outside?
self.comments = [] # Stack of line numbers
self.lineNumber = 0
self.lastWord = None
self.filename = None
self.openBlocks = [0] # Outside first block is 0
self.nextBlockId = 1
self.loops = {} # blockId -> address after `do'
self.elses = {} # blockId -> count of `else'
self.defs = {} # blockId -> address of last `def'
self.vars = {} # name -> address (GCL variables)
self.segStart = None
self.vPC = None
self.segId = 0
self.version = None # Must be first word 'gcl<N>'
self.execute = None
self.needPatch = False
self.lengths = {} # block -> length, or var -> length
# XXX Provisional method to load mnemonics
try:
loadBindings(Path('Core') / 'v6502.json')
except FileNotFoundError:
loadBindings(Path(__file__).parent / 'v6502.json')
def org(self, address):
"""Set start address"""
self.closeSegment()
# Don't open new segment before the first byte comes
self.segStart = address
self.vPC = address
page = address & ~255
self.segEnd = page + (250 if 0x100 <= page <= 0x400 else 256)
def line(self, line):
"""Process a line by tokenizing and processing the words"""
self.lineNumber += 1
nextWord = ''
for nextChar in line:
if len(self.comments) > 0:
# Inside comments anything goes
if nextChar == '{': self.comments.append(self.lineNumber)
if nextChar == '}': self.comments.pop()
elif nextChar not in '{}[]':
if nextChar.isspace():
self.word(nextWord)
nextWord = ''
else:
nextWord += nextChar
else:
self.word(nextWord)
nextWord = ''
if nextChar == '{': self.comments.append(self.lineNumber)
elif nextChar == '}': self.error('Spurious %s' % repr(nextChar))
elif nextChar == '[':
self.openBlocks.append(self.nextBlockId)
self.elses[self.nextBlockId] = 0
self.nextBlockId += 1
elif nextChar == ']':
if len(self.openBlocks) <= 1:
self.error('Block close without open')
b = self.openBlocks.pop()
define('__%s_%d_cond%d__' % (self.name, b, self.elses[b]), prev(self.vPC))
del self.elses[b]
if b in self.defs:
self.lengths[self.thisBlock()] = self.vPC - self.defs[b] + 2
define('__%s_%#04x_def__' % (self.name, self.defs[b]), prev(self.vPC))
del self.defs[b]
elif nextChar == '(': pass
elif nextChar == ')': pass
self.word(nextWord)
def end(self):
"""Signal end of program"""
if len(self.comments) > 0:
self.lineNumber = self.comments[-1]
self.error('Unterminated comment')
self.closeSegment()
if len(self.openBlocks) > 1:
self.error('Unterminated block')
self.putInRomTable(0) # Zero marks the end of stream
if self.lineNumber > 0:
self.dumpVars()
def dumpVars(self):
print(' Variables count %d bytes %d end $%04x' % (len(self.vars), 2*len(self.vars), zpByte(0)))
line = ' :'
for var in sorted(self.vars.keys()):
if var in self.lengths and self.lengths[var]:
var += ' [%s]' % self.lengths[var]
if len(line + var) + 1 > 72:
print(line)
line = ' :'
line += ' ' + var
print(line)
def word(self, word):
# Process a GCL word and emit its corresponding vCPU code
if len(word) == 0:
return
self.lastWord = word
# Simple keywords
if not has(self.version):
if word in ['gcl0x']:
self.version = word
else:
self.error('Invalid GCL version')
elif word == 'def': self.emitDef()
elif word == 'do': self.loops[self.thisBlock()] = self.vPC
elif word == 'loop': self.emitLoop()
elif word == 'if<>0': self.emitIf('EQ')
elif word == 'if=0': self.emitIf('NE')
elif word == 'if>=0': self.emitIf('LT')
elif word == 'if<=0': self.emitIf('GT')
elif word == 'if>0': self.emitIf('LE')
elif word == 'if<0': self.emitIf('GE')
elif word == 'if<>0loop': self.emitIfLoop('NE')
elif word == 'if=0loop': self.emitIfLoop('EQ')
elif word == 'if>0loop': self.emitIfLoop('GT')
elif word == 'if<0loop': self.emitIfLoop('LT')
elif word == 'if>=0loop': self.emitIfLoop('GE')
elif word == 'if<=0loop': self.emitIfLoop('LE')
elif word == 'else': self.emitElse()
elif word == 'call': self.emitOp('CALL').emit(symbol('vAC'), '%04x vAC' % prev(self.vPC, 1))
elif word == 'push': self.emitOp('PUSH')
elif word == 'pop': self.emitOp('POP')
elif word == 'ret': self.emitOp('RET'); self.needPatch = self.needPatch or len(self.openBlocks) == 1 # Top-level use of 'ret' --> apply patch
elif word == 'peek': self.emitOp('PEEK')
elif word == 'deek': self.emitOp('DEEK')
else:
var, con, op = self.parseWord(word)
# Label definitions
if has(var) and has(con):
if op == '=' and var == 'zpReset': zpReset(con)
elif op == '=' and var == 'execute': self.execute = con
elif op == '=': self.defSymbol(var, con)
else: self.error("Invalid operator '%s' with name and constant" % op)
# Words with constant value as operand
elif has(con):
if not has(op):
if isinstance(con, int) and 0 <= con < 256:
self.emitOp('LDI')
else:
self.emitOp('LDWI').emit(lo(con)); con = hi(con)
elif op == '*= ': self.org(con); con = None
elif op == ';': self.emitOp('LDW')
elif op == '=': self.emitOp('STW'); self.depr('i=', 'i:')
elif op == ':' and con < 256: self.emitOp('STW')
elif op == ',': self.emitOp('LD')
elif op == '.': self.emitOp('ST')
elif op == '&': self.emitOp('ANDI')
elif op == '|': self.emitOp('ORI')
elif op == '^': self.emitOp('XORI')
elif op == '+': self.emitOp('ADDI')
elif op == '-': self.emitOp('SUBI')
elif op == '% =': self.emitOp('STLW')
elif op == '% ': self.emitOp('LDLW')
elif op == '--': self.emitOp('ALLOC'); con = 256-con if con else 0
elif op == '++': self.emitOp('ALLOC')
elif op == '< ++': self.emitOp('INC')
elif op == '> ++': self.emitOp('INC'); con += 1
elif op == '!!': self.emitOp('SYS'); con = self.sysTicks(con)
elif op == '!':
if isinstance(con, int) and 0 <= con < 256:
# XXX Deprecate in gcl1, replace with i!!
self.emitOp('SYS'); con = self.sysTicks(con);self.depr('i!', 'i!!')
else:
self.emitOp('CALLI_v5').emit(lo(con)); con = hi(con)
elif op == '?': self.emitOp('LUP'); #self.depr('i?', 'i??')
elif op == '??': self.emitOp('LUP')
elif op == '# ': self.emitOp(con); con = None # Silent truncation
elif op == '#< ': self.emitOp(con); con = None
elif op == '#> ': con = hi(con); assert self.segStart != self.vPC # XXX Conflict
elif op == '## ': self.emit(lo(con)).emit(hi(con)); con = None
elif op == '<<':
for i in range(con):
self.emitOp('LSLW')
con = None
# Deprecated syntax
elif op == ':': self.org(con); con = None; #self.depr('ii:', '*=ii')
elif op == '#': con &= 255; #self.depr('i#', '#i')
elif op == '<++': self.emitOp('INC'); #self.depr('i<++', '<i++')
elif op == '>++': self.emitOp('INC'); con += 1 #self.depr('i>++', '>i++')
elif op == '%=': self.emitOp('STLW'); #self.depr('i%=', '%i=')
elif op == '%': self.emitOp('LDLW'); #self.depr('i%', %i')
else:
self.error("Invalid operator '%s' with constant" % op)
if has(con):
self.emit(con)
# Words with variable or symbol name as operand
elif has(var):
offset = 0
if not has(op): self.emitOp('LDW')
elif op == '=': self.emitOp('STW'); self.updateDefInfo(var)
elif op == ',': self.emitOp('LDW').emitVar(var).emitOp('PEEK'); var = None
elif op == ';': self.emitOp('LDW').emitVar(var).emitOp('DEEK'); var = None
elif op == '.': self.emitOp('POKE')
elif op == ':': self.emitOp('DOKE')
elif op == '< ,': self.emitOp('LD')
elif op == '> ,': self.emitOp('LD'); offset = 1
elif op == '< .': self.emitOp('ST')
elif op == '> .': self.emitOp('ST'); offset = 1
elif op == '&': self.emitOp('ANDW')
elif op == '|': self.emitOp('ORW')
elif op == '^': self.emitOp('XORW')
elif op == '+': self.emitOp('ADDW')
elif op == '-': self.emitOp('SUBW')
elif op == '< ++': self.emitOp('INC')
elif op == '> ++': self.emitOp('INC'); offset = 1
elif op == '!': self.emitOp('CALL')
elif op == '`': self.emitQuote(var); var = None
elif op == '=*': self.defSymbol(var, self.vPC); var = None
elif op == '# ': self.emitImm(var); var = None
elif op == '#< ': self.emitImm(var); var = None
elif op == '#> ': self.emitImm(var, half=hi); var = None
elif op == '## ': self.emitImm(var).emit(hi(var[1:])); var = None
elif op == '#@ ': offset = -self.vPC-1 # PC relative, 6502 style
# Deprecated syntax
elif op == '<++': self.emitOp('INC'); #self.depr('X<++', '<X++')
elif op == '>++': self.emitOp('INC'); offset = 1; #self.depr('X>++', '>X++')
elif op == '<,': self.emitOp('LD'); #self.depr('X<,', '<X,')
elif op == '>,': self.emitOp('LD'); offset = 1; #self.depr('X>,', '>X,')
elif op == '<.': self.emitOp('ST'); #self.depr('X<.', '<X.')
elif op == '>.': self.emitOp('ST'); offset = 1; #self.depr('X>.', '>X.')
else:
self.error("Invalid operator '%s' with variable or symbol '%s'" % (op, var))
if has(var):
self.emitVar(var, offset)
else:
self.error('Invalid word')
def parseWord(self, word):
# Break word into pieces
word += '\0' # Avoid checking len() everywhere
sign = None
name, number, op = None, None, ''
if word[0] == '`':
# Quoted word
name, op = word[1:-1], word[0]
return name, number, op
ix = 0
prefixes = ['%', '#', '<', '>', '*', '=', '@']
if word[ix] in prefixes:
# Prefix operators
while word[ix] in prefixes:
op += word[ix]
ix += 1
op += ' ' # Space to demarcate prefix operators
if word[ix].isalpha() or word[ix] in ['&', '\\', '_']:
# Named variable or named constant
name = word[ix]
ix += 1
while word[ix].isalnum() or word[ix] == '_':
name += word[ix]
ix += 1
if word[ix] == '=':
# Infix symbol definition
op += word[ix]
# op += ' ' # Space to demarcate infix operator
ix += 1
if word[ix] in ['-', '+']:
# Number sign
sign = word[ix]
ix += 1
if word[ix] == '$' and word[ix+1] in string.hexdigits:
# Hexadecimal number
jx = ix+1
number = 0
while word[jx] in string.hexdigits:
o = string.hexdigits.index(word[jx])
number = 16*number + (o if o<16 else o-6)
jx += 1
ix = jx if jx-ix > 1 else 0
elif word[ix].isdigit():
# Decimal number
number = 0
while word[ix].isdigit():
number = 10*number + ord(word[ix]) - ord('0')
ix += 1
elif has(sign):
op += sign
sign = None
else:
pass
# Resolve '&_symbol' as the number it represents
if has(name) and name[0] == '&':
if name[1] == '_':
number = symbol(name[2:])
if not has(number):
number = name[2:] # Pass back as an unresolved reference without '_'
name = None
# Resolve '\symbol' as the number it represents
if has(name) and name[0] == '\\':
# Peeking into the assembler's symbol table (not GCL's)
# Substitute \symbol with its value, and keeping the operator
number = symbol(name[1:])
if not has(number):
number = name[1:] # Pass back as an unresolved reference
name = None
if sign == '-':
if has(number) and isinstance(number, int):
number = -number
else:
self.error('Unable to negate')
op += word[ix:-1] # Also strips sentinel '\0'
return (name, number, op if len(op)>0 else None)
def sysTicks(self, con):
# Convert maximum Gigatron cycles to the negative of excess ticks
if con & 1:
self.error('Invalid value (must be even, got %d)' % con)
extraTicks = con//2 - symbol('maxTicks')
return 256 - extraTicks if extraTicks > 0 else 0
def emitQuote(self, var):
if len(var) > 0:
d = '' # Replace backquotes with spaces
for c in var:
d += ' ' if c == '`' else c
else:
d = '`' # And symbol becomes a backquote
for c in d:
comment = '%04x %s' % (self.vPC, repr(c))
self.emit(ord(c), comment=comment)
def emitDef(self):
self.emitOp('DEF')
b = self.thisBlock()
if b in self.defs:
self.error('Second DEF in block')
self.defs[b] = self.vPC
self.emit(lo('__%s_%#04x_def__' % (self.name, self.vPC)))
def updateDefInfo(self, var):
# Heuristically track `def' lengths for reporting on stdout
if var not in self.lengths and self.thisBlock() in self.lengths:
self.lengths[var] = self.lengths[self.thisBlock()]
else:
self.lengths[var] = None # No def lengths can be associated
def emitLoop(self):
to = [b for b in self.openBlocks if b in self.loops]
if len(to) == 0:
self.error('Loop without do')
to = self.loops[to[-1]]
to = prev(to)
if self.vPC>>8 != to>>8:
self.error('Loop crosses page boundary')
self.emitOp('BRA')
self.emit(to&255)
def emitIf(self, cond):
self.emitOp('BCC')
self.emitOp(cond)
b = self.thisBlock()
self.emit(lo('__%s_%d_cond%d__' % (self.name, b, self.elses[b])))
def emitIfLoop(self, cond):
to = [blockId for blockId in self.openBlocks if blockId in self.loops]
if len(to) == 0:
self.error('Loop without do')
to = self.loops[to[-1]]
to = prev(to)
if self.vPC>>8 != to>>8:
self.error('Loop to different page')
self.emitOp('BCC')
self.emitOp(cond)
self.emit(to&255)
def emitElse(self):
self.emitOp('BRA')
b = self.thisBlock()
i = self.elses[b]
self.emit(lo('__%s_%d_cond%d__' % (self.name, b, i+1)))
define('__%s_%d_cond%d__' % (self.name, b, i), prev(self.vPC))
self.elses[b] = i+1
def emitOp(self, ins):
# Emit vCPU opcode
self.prepareSegment()
self.putInRomTable(lo(ins), '%04x %s' % (self.vPC, ins))
self.vPC += 1
return self
def emitVar(self, var, offset=0):
# Get or create address for GCL variable and emit it
# !!! Also safe at start of segment !!!
self.prepareSegment()
if var[0] == '_':
# _C notation for labels as variables
address, offset = lo(var[1:]), offset & 255
else:
# Regular GCL variable
if var not in self.vars:
self.vars[var] = zpByte(2)
address = self.vars[var]
comment = '%04x %s' % (prev(self.vPC, 1), repr(var))
comment += '%+d' % offset if offset else ''
byte = address + offset
if byte < -128 or byte >= 256:
self.error('Value %s out of range (must be -128..255)' % repr(byte))
self.putInRomTable(byte, comment)
self.vPC += 1
return self
def emitImm(self, var, half=lo):
# Emit low or high byte of symbol
# !!! Also safe at start of segment !!!
#
# Here we see the subtle differences between variables and named constants
# again. For named constants (preceeded by '_'), we want their value.
# For named variables, we want their address. This becomes evident with the
# '>' modifier: constant>>8 vs. address+1
self.prepareSegment()
if var[0] == '_':
address = half(var[1:])
else:
if var not in self.vars:
self.vars[var] = zpByte(2)
address = self.vars[var]
if half is hi:
address += 1
var = '>' + var
self.putInRomTable(address, '%04x %s' % (self.vPC, var))
self.vPC += 1
return self
def thisBlock(self):
return self.openBlocks[-1]
def prepareSegment(self):
# Check if there's space in the current segment
if self.vPC >= self.segEnd:
severity = self.warning if self.vPC & 255 > 0 else self.error
severity('Out of code space ($%04x)' % self.vPC)
# And write header bytes for a new segment
if self.segStart == self.vPC:
# This must come before any lo() or hi()
# Write header for GT1 segment
address = self.segStart
if not has(self.execute) and address >= 0x200:
self.execute = address
assert self.segId == 0 or address>>8 != 0 # Zero-page segment can only be first
self.putInRomTable(address>>8, '| RAM segment address (high byte first)')
self.putInRomTable(address&255, '|')
# Fill in the length through the symbol table
self.putInRomTable(lo('__%s_seg%d__' % (self.name, self.segId)), '| Length (1..256)')
def emit(self, byte, comment=None):
# Next program byte in RAM
self.prepareSegment()
if not isinstance(byte, (int, float)):
self.error('Invalid value (number expected, got %s)' % repr(byte))
if byte < -128 or byte >= 256:
self.error('Value %s out of range (must be -128..255)' % repr(byte))
self.putInRomTable(byte, comment)
self.vPC += 1
return self
def closeSegment(self):
# Register length of GT1 segment
if self.vPC != self.segStart:
print(' Segment at $%04x size %3d used %3d unused %3d' % (
self.segStart,
self.segEnd - self.segStart,
self.vPC - self.segStart,
self.segEnd - self.vPC))
length = self.vPC - self.segStart
assert 1 <= length <= 256
define('__%s_seg%d__' % (self.name, self.segId), length)
self.segId += 1
def putInRomTable(self, byte, comment=None):
if byte < -128 or byte >= 256:
self.error('Value %s out of range (must be -128..255)' % repr(byte))
ld(byte)
if comment:
C(comment)
if self.forRom and pc()&255 == 251:
trampoline()
def depr(self, old, new):
var, con, _op = self.parseWord(self.lastWord)
old = old.replace(' ', str(con) if has(con) else var)
new = new.replace(' ', str(con) if has(con) else var)
self.warning('%s is deprecated, please use %s' % (old, new))
def warning(self, message):
highlight(self.prefix('Warning'), message)
def error(self, message):
highlight(self.prefix('Error'), message)
sys.exit(1)
def prefix(self, prefix):
# Informative line prefix for warning and error messages
if has(self.filename):
prefix += ' file %s' % repr(self.filename)
if self.lineNumber != 0:
prefix += ':%s' % self.lineNumber
if has(self.lastWord):
prefix += ' (%s)' % self.lastWord
return prefix + ':'
def defSymbol(self, name, value):
# Define a label from GCL in the systems symbol table
if name[0] != '_':
self.error('Symbol \'%s\' must begin with underscore (\'_\')' % name)
define(name[1:], value)
def prev(address, step=2):
# Take vPC two bytes back, wrap around if needed to stay on page
return (address & ~255) | ((address-step) & 255)
|
|
import bs
import bsVector
import bsSpaz
import bsBomb
import bsUtils
import random
import SnoBallz
#please note that this Minigame requires the separate file SnoBallz.py.
#It's a separate file to allow for snowballs as a powerup in any game.
def bsGetAPIVersion():
# see bombsquadgame.com/apichanges
return 4
def bsGetGames():
return [SnowBallFightGame]
class PlayerSpaz_Sno(bs.PlayerSpaz):
def handleMessage(self,m):
#print m, self.hitPoints
if isinstance(m,bsSpaz._PunchHitMessage): return True #Nullify punches
super(self.__class__, self).handleMessage(m)
class SnowBallFightGame(bs.TeamGameActivity):
@classmethod
def getName(cls):
return 'Snowball Fight'
@classmethod
def getDescription(cls,sessionType):
return 'Kill a set number of enemies to win.'
@classmethod
def supportsSessionType(cls,sessionType):
return True if (issubclass(sessionType,bs.TeamsSession)
or issubclass(sessionType,bs.FreeForAllSession)) else False
@classmethod
def getSupportedMaps(cls,sessionType):
return bs.getMapsSupportingPlayType("melee")
@classmethod
def getSettings(cls,sessionType):
settings = [("Kills to Win Per Player",{'minValue':1,'default':5,'increment':1}),
("Time Limit",{'choices':[('None',0),('1 Minute',60),
('2 Minutes',120),('5 Minutes',300),
('10 Minutes',600),('20 Minutes',1200)],'default':0}),
("Respawn Times",{'choices':[('Shorter',0.25),('Short',0.5),('Normal',1.0),('Long',2.0),('Longer',4.0)],'default':1.0}),
("Snowball Rate",{'choices':[('Slowest',500),('Slow',400),('Normal',300),('Fast',200),('Lag City',100)],'default':300}),
("Snowballs Melt",{'default':True}),
("Snowballs Bust",{'default':True}),
("Epic Mode",{'default':False})]
# In teams mode, a suicide gives a point to the other team, but in free-for-all it
# subtracts from your own score. By default we clamp this at zero to benefit new players,
# but pro players might like to be able to go negative. (to avoid a strategy of just
# suiciding until you get a good drop)
if issubclass(sessionType, bs.FreeForAllSession):
settings.append(("Allow Negative Scores",{'default':False}))
return settings
def __init__(self,settings):
bs.TeamGameActivity.__init__(self,settings)
if self.settings['Epic Mode']: self._isSlowMotion = True
# print messages when players die since it matters here..
self.announcePlayerDeaths = True
self._scoreBoard = bs.ScoreBoard()
#Initiate the SnoBall factory
self.snoFact = SnoBallz.snoBall().getFactory()
self.snoFact.defaultBallTimeout = self.settings['Snowball Rate']
self.snoFact._ballsMelt = self.settings['Snowballs Melt']
self.snoFact._ballsBust = self.settings['Snowballs Bust']
self.snoFact._powerExpire = False
def getInstanceDescription(self):
return ('Crush ${ARG1} of your enemies.',self._scoreToWin)
def getInstanceScoreBoardDescription(self):
return ('kill ${ARG1} enemies',self._scoreToWin)
def onTransitionIn(self):
bs.TeamGameActivity.onTransitionIn(self, music='Epic' if self.settings['Epic Mode'] else 'ToTheDeath')
def onTeamJoin(self,team):
team.gameData['score'] = 0
if self.hasBegun(): self._updateScoreBoard()
def onBegin(self):
bs.TeamGameActivity.onBegin(self)
self.setupStandardTimeLimit(self.settings['Time Limit'])
self.setupStandardPowerupDrops()
if len(self.teams) > 0:
self._scoreToWin = self.settings['Kills to Win Per Player'] * max(1,max(len(t.players) for t in self.teams))
else: self._scoreToWin = self.settings['Kills to Win Per Player']
self._updateScoreBoard()
self._dingSound = bs.getSound('dingSmall')
def _standardDropPowerup(self,index,expire=True):
import bsPowerup
forbidded = ['iceBombs','punch','stickyBombs','landMines', 'snoball']
bsPowerup.Powerup(position=self.getMap().powerupSpawnPoints[index],
powerupType=bs.Powerup.getFactory().getRandomPowerupType(None,forbidded),expire=expire).autoRetain()
def spawnPlayerSpaz(self,player,position=(0,0,0),angle=None):
"""
Create and wire up a bs.PlayerSpaz for the provide bs.Player.
"""
position = self.getMap().getFFAStartPosition(self.players)
name = player.getName()
color = player.color
highlight = player.highlight
lightColor = bsUtils.getNormalizedColor(color)
displayColor = bs.getSafeColor(color,targetIntensity=0.75)
spaz = PlayerSpaz_Sno(color=color,
highlight=highlight,
character=player.character,
player=player)
player.setActor(spaz)
# we want a bigger area-of-interest in co-op mode
# if isinstance(self.getSession(),bs.CoopSession): spaz.node.areaOfInterestRadius = 5.0
# else: spaz.node.areaOfInterestRadius = 5.0
# if this is co-op and we're on Courtyard or Runaround, add the material that allows us to
# collide with the player-walls
# FIXME; need to generalize this
if isinstance(self.getSession(),bs.CoopSession) and self.getMap().getName() in ['Courtyard','Tower D']:
mat = self.getMap().preloadData['collideWithWallMaterial']
spaz.node.materials += (mat,)
spaz.node.rollerMaterials += (mat,)
spaz.node.name = name
spaz.node.nameColor = displayColor
spaz.connectControlsToPlayer(enableBomb=False, enablePickUp=False)
self.snoFact.giveBallz(spaz)
self.scoreSet.playerGotNewSpaz(player,spaz)
# move to the stand position and add a flash of light
spaz.handleMessage(bs.StandMessage(position,angle if angle is not None else random.uniform(0,360)))
t = bs.getGameTime()
bs.playSound(self._spawnSound,1,position=spaz.node.position)
light = bs.newNode('light',attrs={'color':lightColor})
spaz.node.connectAttr('position',light,'position')
bsUtils.animate(light,'intensity',{0:0,250:1,500:0})
bs.gameTimer(500,light.delete)
return spaz
def handleMessage(self,m):
if isinstance(m,bs.PlayerSpazDeathMessage):
bs.TeamGameActivity.handleMessage(self,m) # augment standard behavior
player = m.spaz.getPlayer()
self.respawnPlayer(player)
killer = m.killerPlayer
if killer is None: return
# handle team-kills
if killer.getTeam() is player.getTeam():
# in free-for-all, killing yourself loses you a point
if isinstance(self.getSession(),bs.FreeForAllSession):
newScore = player.getTeam().gameData['score'] - 1
if not self.settings['Allow Negative Scores']: newScore = max(0, newScore)
player.getTeam().gameData['score'] = newScore
# in teams-mode it gives a point to the other team
else:
bs.playSound(self._dingSound)
for team in self.teams:
if team is not killer.getTeam():
team.gameData['score'] += 1
# killing someone on another team nets a kill
else:
killer.getTeam().gameData['score'] += 1
bs.playSound(self._dingSound)
# in FFA show our score since its hard to find on the scoreboard
try: killer.actor.setScoreText(str(killer.getTeam().gameData['score'])+'/'+str(self._scoreToWin),color=killer.getTeam().color,flash=True)
except Exception: pass
self._updateScoreBoard()
# if someone has won, set a timer to end shortly
# (allows the dust to clear and draws to occur if deaths are close enough)
if any(team.gameData['score'] >= self._scoreToWin for team in self.teams):
bs.gameTimer(500,self.endGame)
else: bs.TeamGameActivity.handleMessage(self,m)
def _updateScoreBoard(self):
for team in self.teams:
self._scoreBoard.setTeamValue(team,team.gameData['score'],self._scoreToWin)
def endGame(self):
results = bs.TeamGameResults()
for t in self.teams: results.setTeamScore(t,t.gameData['score'])
self.end(results=results)
|
|
# coding=utf-8
# Copyright 2022 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Tests for gradient_accumulator.py.
"""
import copy
import functools
import itertools
import os
import shutil
import tempfile
from absl.testing import absltest
from flax import core
from init2winit import checkpoint
from init2winit import trainer
from init2winit.dataset_lib import datasets
from init2winit.dataset_lib.small_image_datasets import Dataset
from init2winit.init_lib import initializers
from init2winit.model_lib import models
from init2winit.optimizer_lib import gradient_accumulator
import jax
import jax.numpy as jnp
from ml_collections.config_dict import config_dict
import numpy as np
import optax
import pandas
import tensorflow.compat.v1 as tf
def _init_model(model_cls, hps):
"""Initialize the Flax model."""
loss_name = 'cross_entropy'
metrics_name = 'classification_metrics'
key = jax.random.PRNGKey(0)
dataset_metadata = {
'apply_one_hot_in_loss': False,
}
model = model_cls(hps, dataset_metadata, loss_name, metrics_name)
params_rng, dropout_rng = jax.random.split(key, num=2)
model_init_fn = jax.jit(
functools.partial(model.flax_module.init, train=False))
init_dict = model_init_fn(
rngs={'params': params_rng, 'dropout': dropout_rng},
x=np.zeros((2, *hps.input_shape)))
params = init_dict['params']
batch_stats = init_dict.get('batch_stats', {})
return params, batch_stats, model.training_cost
def _optimize(num_steps,
params,
batch_stats,
training_cost,
train_iter,
opt_init,
opt_update):
"""Update the Flax model for num_steps steps."""
opt_state = opt_init(params)
def opt_cost(params, batch_stats, batch):
return training_cost(
params,
batch=batch,
batch_stats=batch_stats,
dropout_rng=jax.random.PRNGKey(2))
grad_fn = jax.value_and_grad(opt_cost, has_aux=True)
for _ in range(num_steps):
data_batch = next(train_iter)
(_, updated_vars), grad = grad_fn(params, batch_stats, data_batch)
batch_stats = updated_vars.get('batch_stats', {})
model_updates, opt_state = opt_update(grad, opt_state, params=params)
params = optax.apply_updates(params, model_updates)
return params, batch_stats
def _get_fake_text_dataset(batch_size, eval_num_batches):
"""Yields a single text batch repeatedly for train and test."""
inputs = jnp.array(
np.random.randint(low=0, high=4, size=(batch_size, 32)))
batch = {
'inputs': inputs,
'targets': inputs,
'weights': jnp.ones(inputs.shape),
}
def train_iterator_fn():
while True:
yield batch
def eval_train_epoch(num_batches=None):
if num_batches is None:
num_batches = eval_num_batches
for _ in range(num_batches):
yield batch
def valid_epoch(num_batches=None):
if num_batches is None:
num_batches = eval_num_batches
for _ in range(num_batches):
yield batch
def test_epoch(num_batches=None):
if num_batches is None:
num_batches = eval_num_batches
for _ in range(num_batches):
yield batch
meta_data = {
'apply_one_hot_in_loss': True,
'shift_inputs': True,
'causal': True
}
return (Dataset(train_iterator_fn, eval_train_epoch, valid_epoch,
test_epoch), meta_data)
class GradientAccumulatorTest(absltest.TestCase):
"""Tests for gradient_accumulator.py."""
def setUp(self):
super(GradientAccumulatorTest, self).setUp()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# To not delete a directory in which a file might be created:
checkpoint.wait_for_checkpoint_save()
shutil.rmtree(self.test_dir)
super(GradientAccumulatorTest, self).tearDown()
def test_virtual_batch_size_error(self):
with self.assertRaisesRegex(
ValueError, 'Gradient accumulation does not currently support using '):
gradient_accumulator.accumulate_gradients(
per_step_batch_size=32,
total_batch_size=96,
virtual_batch_size=48,
base_opt_init_fn=None,
base_opt_update_fn=None)
def test_accumulation(self):
"""Test simple gradient accumulation."""
num_steps = 3
per_step_batch_size = 16
total_batch_size = 48
virtual_batch_size = 8
model_str = 'wide_resnet' # Pick a model with batch norm.
model_cls = models.get_model(model_str)
model_hps = models.get_model_hparams(model_str)
dataset_name = 'cifar10'
dataset_builder = datasets.get_dataset(dataset_name)
hps = copy.copy(model_hps)
hps.update(datasets.get_dataset_hparams(dataset_name))
# Compute updates using gradient accumulation.
hps.update({
'batch_size': per_step_batch_size,
'virtual_batch_size': virtual_batch_size,
'normalizer': 'virtual_batch_norm',
'total_accumulated_batch_size': total_batch_size,
})
grad_acc_params, grad_acc_batch_stats, grad_acc_training_cost = _init_model(
model_cls, hps)
total_dataset = dataset_builder(
shuffle_rng=jax.random.PRNGKey(1),
batch_size=total_batch_size,
eval_batch_size=10,
hps=hps)
# Ensure we see the same exact batches.
train_iter = total_dataset.train_iterator_fn()
train_iter = itertools.islice(train_iter, 0, num_steps)
train_iter = itertools.cycle(train_iter)
def grad_acc_train_iter():
for _ in range(num_steps):
total_batch = next(train_iter)
# Split each total batch into sub batches.
num_sub_batches = total_batch_size // per_step_batch_size
start_index = 0
end_index = int(total_batch_size / num_sub_batches)
for bi in range(num_sub_batches):
yield jax.tree_map(lambda x: x[start_index:end_index], total_batch) # pylint: disable=cell-var-from-loop
start_index = end_index
end_index = int(total_batch_size * (bi + 2) / num_sub_batches)
lrs = jnp.array([1.0, 0.1, 1e-2])
sgd_opt_init, sgd_opt_update = optax.sgd(
learning_rate=lambda t: lrs.at[t].get())
opt_init, opt_update = gradient_accumulator.accumulate_gradients(
per_step_batch_size=per_step_batch_size,
total_batch_size=total_batch_size,
virtual_batch_size=virtual_batch_size,
base_opt_init_fn=sgd_opt_init,
base_opt_update_fn=sgd_opt_update)
grad_acc_params, grad_acc_batch_stats = _optimize(
# Run for 3x the number of steps to see the same number of examples.
num_steps=3 * num_steps,
params=grad_acc_params,
batch_stats=grad_acc_batch_stats,
training_cost=grad_acc_training_cost,
train_iter=grad_acc_train_iter(),
opt_init=opt_init,
opt_update=opt_update)
# Compute the same updates, but without gradient accumulation.
hps.update({
'batch_size': total_batch_size,
'total_accumulated_batch_size': None,
})
params, batch_stats, training_cost = _init_model(model_cls, hps)
params, batch_stats = _optimize(
num_steps=num_steps,
params=params,
batch_stats=batch_stats,
training_cost=training_cost,
train_iter=train_iter,
opt_init=sgd_opt_init,
opt_update=sgd_opt_update)
diffs_params = jax.tree_multimap(
lambda a, b: jnp.mean(jnp.abs(a - b)),
grad_acc_params,
params)
def batch_stats_reduce(a, b):
if len(a.shape) > 0: # pylint: disable=g-explicit-length-test
return jnp.mean(
jnp.abs(jnp.mean(a, axis=0) - jnp.mean(b, axis=0)))
# The gradient accumulator counters are scalars.
return a - b
diffs_batch_stats = jax.tree_multimap(
batch_stats_reduce,
grad_acc_batch_stats,
batch_stats)
# We sometimes get small floating point errors in the gradients, so we
# cannot test for the values being exactly the same.
acceptable_params_diff = 1e-4
acceptable_batch_stats_diff = 5e-3
def check_closeness(root_name, d, max_diff):
not_close_dict = {}
for name, dd in d.items():
new_name = root_name + '/' + name if root_name else name
if isinstance(dd, (dict, core.FrozenDict)):
not_close_dict.update(check_closeness(new_name, dd, max_diff))
else:
if dd > max_diff:
not_close_dict[new_name] = dd
return not_close_dict
not_close_params = check_closeness(
'', diffs_params, acceptable_params_diff)
self.assertEmpty(not_close_params)
not_close_batch_stats = check_closeness(
'', diffs_batch_stats, acceptable_batch_stats_diff)
# Note that for the variance variables in the batch stats collection, they
# sometimes can start to diverge slightly over time (with a higher number of
# training steps), likely due to numerical issues.
self.assertEmpty(not_close_batch_stats)
def test_text_model(self):
"""Test gradient accumulator training of a small transformer."""
rng = jax.random.PRNGKey(42)
# Set the numpy seed to make the fake data deterministc. mocking.mock_data
# ultimately calls numpy.random.
np.random.seed(0)
model_cls = models.get_model('transformer')
loss_name = 'cross_entropy'
metrics_name = 'classification_metrics'
batch_size = 16
train_size = 20 * batch_size
hps = config_dict.ConfigDict({
# Architecture Hparams.
'batch_size': batch_size,
'emb_dim': 32,
'num_heads': 2,
'num_layers': 3,
'qkv_dim': 32,
'mlp_dim': 64,
'max_target_length': 64,
'max_eval_target_length': 64,
'input_shape': (64,),
'output_shape': (4,),
'dropout_rate': 0.1,
'attention_dropout_rate': 0.1,
'layer_rescale_factors': {},
'optimizer': 'momentum',
'normalizer': 'layer_norm',
'opt_hparams': {
'momentum': 0.9,
},
'lr_hparams': {
'base_lr': 0.005,
'schedule': 'constant'
},
# Training HParams.
'l2_decay_factor': 1e-4,
'l2_decay_rank_threshold': 2,
'train_size': train_size,
'gradient_clipping': 0.0,
'model_dtype': 'float32',
'decode': False,
})
initializer = initializers.get_initializer('noop')
eval_num_batches = 5
dataset, dataset_meta_data = _get_fake_text_dataset(
batch_size=hps.batch_size, eval_num_batches=eval_num_batches)
eval_batch_size = hps.batch_size
model = model_cls(hps, dataset_meta_data, loss_name, metrics_name)
eval_every = 10
checkpoint_steps = []
num_train_steps = train_size // batch_size * 3
metrics_logger, init_logger = trainer.set_up_loggers(self.test_dir)
_ = list(
trainer.train(
train_dir=self.test_dir,
model=model,
dataset_builder=lambda *unused_args, **unused_kwargs: dataset,
initializer=initializer,
num_train_steps=num_train_steps,
hps=hps,
rng=rng,
eval_batch_size=eval_batch_size,
eval_num_batches=eval_num_batches,
eval_train_num_batches=eval_num_batches,
eval_frequency=eval_every,
checkpoint_steps=checkpoint_steps,
metrics_logger=metrics_logger,
init_logger=init_logger))
with tf.io.gfile.GFile(
os.path.join(self.test_dir, 'measurements.csv')) as f:
df = pandas.read_csv(f)
train_err = df['train/error_rate'].values[-1]
# Note that upgrading to Linen made this fail at 0.6.
self.assertLess(train_err, 0.7)
if __name__ == '__main__':
absltest.main()
|
|
from weakref import WeakValueDictionary
from rpython.annotator import model as annmodel
from rpython.rlib import jit, types, objectmodel
from rpython.rlib.objectmodel import (malloc_zero_filled, we_are_translated,
ll_hash_string, keepalive_until_here, specialize, enforceargs, dont_inline)
from rpython.rlib.signature import signature
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rtyper.error import TyperError
from rpython.rtyper.debug import ll_assert
from rpython.rtyper.lltypesystem import ll_str, llmemory
from rpython.rtyper.lltypesystem.lltype import (GcStruct, Signed, Array, Char,
UniChar, Ptr, malloc, Bool, Void, GcArray, nullptr, cast_primitive,
typeOf, staticAdtMethod, GcForwardReference)
from rpython.rtyper.rmodel import inputconst, Repr
from rpython.rtyper.rint import IntegerRepr
from rpython.rtyper.rstr import (AbstractStringRepr, AbstractCharRepr,
AbstractUniCharRepr, AbstractStringIteratorRepr, AbstractLLHelpers,
AbstractUnicodeRepr)
from rpython.tool.sourcetools import func_with_new_name
# ____________________________________________________________
#
# Concrete implementation of RPython strings:
#
# struct str {
# hash: Signed
# chars: array of Char
# }
STR = GcForwardReference()
UNICODE = GcForwardReference()
def new_malloc(TP, name):
@enforceargs(int)
def mallocstr(length):
ll_assert(length >= 0, "negative string length")
r = malloc(TP, length)
if not we_are_translated() or not malloc_zero_filled:
r.hash = 0
return r
return func_with_new_name(mallocstr, name)
mallocstr = new_malloc(STR, 'mallocstr')
mallocunicode = new_malloc(UNICODE, 'mallocunicode')
@specialize.memo()
def emptystrfun():
return string_repr.convert_const("")
@specialize.memo()
def emptyunicodefun():
return unicode_repr.convert_const(u'')
def _new_copy_contents_fun(SRC_TP, DST_TP, CHAR_TP, name):
@specialize.arg(0)
def _str_ofs(TP, item):
return (llmemory.offsetof(TP, 'chars') +
llmemory.itemoffsetof(TP.chars, 0) +
llmemory.sizeof(CHAR_TP) * item)
@signature(types.any(), types.any(), types.int(), returns=types.any())
@specialize.arg(0)
def _get_raw_buf(TP, src, ofs):
"""
WARNING: dragons ahead.
Return the address of the internal char* buffer of the low level
string. The return value is valid as long as no GC operation occur, so
you must ensure that it will be used inside a "GC safe" section, for
example by marking your function with @rgc.no_collect
"""
assert typeOf(src).TO == TP
assert ofs >= 0
return llmemory.cast_ptr_to_adr(src) + _str_ofs(TP, ofs)
_get_raw_buf._always_inline_ = True
@jit.oopspec('stroruni.copy_contents(src, dst, srcstart, dststart, length)')
@signature(types.any(), types.any(), types.int(), types.int(), types.int(), returns=types.none())
def copy_string_contents(src, dst, srcstart, dststart, length):
"""Copies 'length' characters from the 'src' string to the 'dst'
string, starting at position 'srcstart' and 'dststart'."""
# xxx Warning: don't try to do this at home. It relies on a lot
# of details to be sure that it works correctly in all cases.
# Notably: no GC operation at all from the first cast_ptr_to_adr()
# because it might move the strings. The keepalive_until_here()
# are obscurely essential to make sure that the strings stay alive
# longer than the raw_memcopy().
assert length >= 0
ll_assert(srcstart >= 0, "copystrc: negative srcstart")
ll_assert(srcstart + length <= len(src.chars), "copystrc: src ovf")
ll_assert(dststart >= 0, "copystrc: negative dststart")
ll_assert(dststart + length <= len(dst.chars), "copystrc: dst ovf")
# from here, no GC operations can happen
asrc = _get_raw_buf(SRC_TP, src, srcstart)
adst = _get_raw_buf(DST_TP, dst, dststart)
llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(CHAR_TP) * length)
# end of "no GC" section
keepalive_until_here(src)
keepalive_until_here(dst)
copy_string_contents._always_inline_ = True
copy_string_contents = func_with_new_name(copy_string_contents,
'copy_%s_contents' % name)
@jit.oopspec('stroruni.copy_string_to_raw(src, ptrdst, srcstart, length)')
def copy_string_to_raw(src, ptrdst, srcstart, length):
"""
Copies 'length' characters from the 'src' string to the 'ptrdst'
buffer, starting at position 'srcstart'.
'ptrdst' must be a non-gc Array of Char.
"""
# xxx Warning: same note as above apply: don't do this at home
assert length >= 0
# from here, no GC operations can happen
asrc = _get_raw_buf(SRC_TP, src, srcstart)
adst = llmemory.cast_ptr_to_adr(ptrdst)
adst = adst + llmemory.itemoffsetof(typeOf(ptrdst).TO, 0)
llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(CHAR_TP) * length)
# end of "no GC" section
keepalive_until_here(src)
copy_string_to_raw._always_inline_ = True
copy_string_to_raw = func_with_new_name(copy_string_to_raw, 'copy_%s_to_raw' % name)
@jit.dont_look_inside
@signature(types.any(), types.any(), types.int(), types.int(),
returns=types.none())
def copy_raw_to_string(ptrsrc, dst, dststart, length):
# xxx Warning: same note as above apply: don't do this at home
assert length >= 0
# from here, no GC operations can happen
adst = _get_raw_buf(SRC_TP, dst, dststart)
asrc = llmemory.cast_ptr_to_adr(ptrsrc)
asrc = asrc + llmemory.itemoffsetof(typeOf(ptrsrc).TO, 0)
llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(CHAR_TP) * length)
# end of "no GC" section
keepalive_until_here(dst)
copy_raw_to_string._always_inline_ = True
copy_raw_to_string = func_with_new_name(copy_raw_to_string,
'copy_raw_to_%s' % name)
return (copy_string_to_raw, copy_raw_to_string, copy_string_contents,
_get_raw_buf)
(copy_string_to_raw,
copy_raw_to_string,
copy_string_contents,
_get_raw_buf_string) = _new_copy_contents_fun(STR, STR, Char, 'string')
(copy_unicode_to_raw,
copy_raw_to_unicode,
copy_unicode_contents,
_get_raw_buf_unicode) = _new_copy_contents_fun(UNICODE, UNICODE, UniChar,
'unicode')
CONST_STR_CACHE = WeakValueDictionary()
CONST_UNICODE_CACHE = WeakValueDictionary()
class BaseLLStringRepr(Repr):
def convert_const(self, value):
if value is None:
return nullptr(self.lowleveltype.TO)
#value = getattr(value, '__self__', value) # for bound string methods
if not isinstance(value, self.basetype):
raise TyperError("not a str: %r" % (value,))
try:
return self.CACHE[value]
except KeyError:
p = self.malloc(len(value))
for i in range(len(value)):
p.chars[i] = cast_primitive(self.base, value[i])
p.hash = 0
self.ll.ll_strhash(p) # precompute the hash
self.CACHE[value] = p
return p
def make_iterator_repr(self, variant=None):
if variant is not None:
raise TyperError("unsupported %r iterator over a str/unicode" %
(variant,))
return self.repr.iterator_repr
def can_ll_be_null(self, s_value):
# XXX unicode
if self is string_repr:
return s_value.can_be_none()
else:
return True # for CharRepr/UniCharRepr subclasses,
# where NULL is always valid: it is chr(0)
def _list_length_items(self, hop, v_lst, LIST):
LIST = LIST.TO
v_length = hop.gendirectcall(LIST.ll_length, v_lst)
v_items = hop.gendirectcall(LIST.ll_items, v_lst)
return v_length, v_items
class StringRepr(BaseLLStringRepr, AbstractStringRepr):
lowleveltype = Ptr(STR)
basetype = str
base = Char
CACHE = CONST_STR_CACHE
def __init__(self, *args):
AbstractStringRepr.__init__(self, *args)
self.ll = LLHelpers
self.malloc = mallocstr
def ll_decode_latin1(self, value):
lgt = len(value.chars)
s = mallocunicode(lgt)
for i in range(lgt):
s.chars[i] = cast_primitive(UniChar, value.chars[i])
return s
class UnicodeRepr(BaseLLStringRepr, AbstractUnicodeRepr):
lowleveltype = Ptr(UNICODE)
basetype = basestring
base = UniChar
CACHE = CONST_UNICODE_CACHE
def __init__(self, *args):
AbstractUnicodeRepr.__init__(self, *args)
self.ll = LLHelpers
self.malloc = mallocunicode
@jit.elidable
def ll_str(self, s):
# XXX crazy that this is here, but I don't want to break
# rmodel logic
if not s:
return self.ll.ll_constant('None')
lgt = len(s.chars)
result = mallocstr(lgt)
for i in range(lgt):
c = s.chars[i]
if ord(c) > 127:
raise UnicodeEncodeError("character not in ascii range")
result.chars[i] = cast_primitive(Char, c)
return result
@jit.elidable
def ll_unicode(self, s):
if s:
return s
else:
return self.ll.ll_constant_unicode(u'None')
@jit.elidable
def ll_encode_latin1(self, s):
length = len(s.chars)
result = mallocstr(length)
for i in range(length):
c = s.chars[i]
if ord(c) > 255:
raise UnicodeEncodeError("character not in latin1 range")
result.chars[i] = cast_primitive(Char, c)
return result
class CharRepr(AbstractCharRepr, StringRepr):
lowleveltype = Char
class UniCharRepr(AbstractUniCharRepr, UnicodeRepr):
lowleveltype = UniChar
# ____________________________________________________________
#
# Low-level methods. These can be run for testing, but are meant to
# be direct_call'ed from rtyped flow graphs, which means that they will
# get flowed and annotated, mostly with SomePtr.
#
FAST_COUNT = 0
FAST_FIND = 1
FAST_RFIND = 2
from rpython.rlib.rarithmetic import LONG_BIT as BLOOM_WIDTH
def bloom_add(mask, c):
return mask | (1 << (ord(c) & (BLOOM_WIDTH - 1)))
def bloom(mask, c):
return mask & (1 << (ord(c) & (BLOOM_WIDTH - 1)))
class LLHelpers(AbstractLLHelpers):
from rpython.rtyper.annlowlevel import llstr, llunicode
@staticmethod
@jit.elidable
def ll_str_mul(s, times):
if times < 0:
times = 0
try:
size = ovfcheck(len(s.chars) * times)
except OverflowError:
raise MemoryError
newstr = s.malloc(size)
i = 0
if i < size:
s.copy_contents(s, newstr, 0, 0, len(s.chars))
i += len(s.chars)
while i < size:
if i <= size - i:
j = i
else:
j = size - i
s.copy_contents(newstr, newstr, 0, i, j)
i += j
return newstr
@staticmethod
@jit.elidable
def ll_char_mul(ch, times):
if typeOf(ch) is Char:
malloc = mallocstr
else:
malloc = mallocunicode
if times < 0:
times = 0
newstr = malloc(times)
j = 0
# XXX we can use memset here, not sure how useful this is
while j < times:
newstr.chars[j] = ch
j += 1
return newstr
@staticmethod
def ll_strlen(s):
return len(s.chars)
@staticmethod
@signature(types.any(), types.int(), returns=types.any())
def ll_stritem_nonneg(s, i):
chars = s.chars
ll_assert(i >= 0, "negative str getitem index")
ll_assert(i < len(chars), "str getitem index out of bound")
return chars[i]
@staticmethod
def ll_chr2str(ch):
if typeOf(ch) is Char:
malloc = mallocstr
else:
malloc = mallocunicode
s = malloc(1)
s.chars[0] = ch
return s
# @jit.look_inside_iff(lambda str: jit.isconstant(len(str.chars)) and len(str.chars) == 1)
@staticmethod
@jit.oopspec("str.str2unicode(str)")
def ll_str2unicode(str):
lgt = len(str.chars)
s = mallocunicode(lgt)
for i in range(lgt):
if ord(str.chars[i]) > 127:
raise UnicodeDecodeError
s.chars[i] = cast_primitive(UniChar, str.chars[i])
return s
@staticmethod
def ll_str2bytearray(str):
from rpython.rtyper.lltypesystem.rbytearray import BYTEARRAY
lgt = len(str.chars)
b = malloc(BYTEARRAY, lgt)
for i in range(lgt):
b.chars[i] = str.chars[i]
return b
@staticmethod
def ll_strhash(s):
if s:
return jit.conditional_call_elidable(s.hash,
LLHelpers._ll_strhash, s)
else:
return 0
@staticmethod
@dont_inline
@jit.dont_look_inside
def _ll_strhash(s):
# unlike CPython, there is no reason to avoid to return -1
# but our malloc initializes the memory to zero, so we use zero as the
# special non-computed-yet value. Also, jit.conditional_call_elidable
# always checks for zero, for now.
x = ll_hash_string(s)
if x == 0:
x = 29872897
s.hash = x
return x
@staticmethod
def ll_length(s):
return len(s.chars)
@staticmethod
def ll_strfasthash(s):
ll_assert(s.hash != 0, "ll_strfasthash: hash==0")
return s.hash # assumes that the hash is already computed
@staticmethod
@jit.elidable
@jit.oopspec('stroruni.concat(s1, s2)')
def ll_strconcat(s1, s2):
len1 = s1.length()
len2 = s2.length()
# a single '+' like this is allowed to overflow: it gets
# a negative result, and the gc will complain
# the typechecks below are if TP == BYTEARRAY
if typeOf(s1) == Ptr(STR):
newstr = s2.malloc(len1 + len2)
newstr.copy_contents_from_str(s1, newstr, 0, 0, len1)
else:
newstr = s1.malloc(len1 + len2)
newstr.copy_contents(s1, newstr, 0, 0, len1)
if typeOf(s2) == Ptr(STR):
newstr.copy_contents_from_str(s2, newstr, 0, len1, len2)
else:
newstr.copy_contents(s2, newstr, 0, len1, len2)
return newstr
@staticmethod
@jit.elidable
def ll_strip(s, ch, left, right):
s_len = len(s.chars)
if s_len == 0:
return s.empty()
lpos = 0
rpos = s_len - 1
if left:
while lpos <= rpos and s.chars[lpos] == ch:
lpos += 1
if right:
while lpos <= rpos and s.chars[rpos] == ch:
rpos -= 1
if rpos < lpos:
return s.empty()
r_len = rpos - lpos + 1
result = s.malloc(r_len)
s.copy_contents(s, result, lpos, 0, r_len)
return result
@staticmethod
@jit.elidable
def ll_strip_default(s, left, right):
s_len = len(s.chars)
if s_len == 0:
return s.empty()
lpos = 0
rpos = s_len - 1
if left:
while lpos <= rpos and s.chars[lpos].isspace():
lpos += 1
if right:
while lpos <= rpos and s.chars[rpos].isspace():
rpos -= 1
if rpos < lpos:
return s.empty()
r_len = rpos - lpos + 1
result = s.malloc(r_len)
s.copy_contents(s, result, lpos, 0, r_len)
return result
@staticmethod
@jit.elidable
def ll_strip_multiple(s, s2, left, right):
s_len = len(s.chars)
if s_len == 0:
return s.empty()
lpos = 0
rpos = s_len - 1
if left:
while lpos <= rpos and LLHelpers.ll_contains(s2, s.chars[lpos]):
lpos += 1
if right:
while lpos <= rpos and LLHelpers.ll_contains(s2, s.chars[rpos]):
rpos -= 1
if rpos < lpos:
return s.empty()
r_len = rpos - lpos + 1
result = s.malloc(r_len)
s.copy_contents(s, result, lpos, 0, r_len)
return result
@staticmethod
@jit.elidable
def ll_upper(s):
s_chars = s.chars
s_len = len(s_chars)
if s_len == 0:
return s.empty()
i = 0
result = mallocstr(s_len)
# ^^^^^^^^^ specifically to explode on unicode
while i < s_len:
result.chars[i] = LLHelpers.ll_upper_char(s_chars[i])
i += 1
return result
@staticmethod
@jit.elidable
def ll_lower(s):
s_chars = s.chars
s_len = len(s_chars)
if s_len == 0:
return s.empty()
i = 0
result = mallocstr(s_len)
# ^^^^^^^^^ specifically to explode on unicode
while i < s_len:
result.chars[i] = LLHelpers.ll_lower_char(s_chars[i])
i += 1
return result
@staticmethod
def ll_join(s, length, items):
s_chars = s.chars
s_len = len(s_chars)
num_items = length
if num_items == 0:
return s.empty()
itemslen = 0
i = 0
while i < num_items:
try:
itemslen = ovfcheck(itemslen + len(items[i].chars))
except OverflowError:
raise MemoryError
i += 1
try:
seplen = ovfcheck(s_len * (num_items - 1))
except OverflowError:
raise MemoryError
# a single '+' at the end is allowed to overflow: it gets
# a negative result, and the gc will complain
result = s.malloc(itemslen + seplen)
res_index = len(items[0].chars)
s.copy_contents(items[0], result, 0, 0, res_index)
i = 1
while i < num_items:
s.copy_contents(s, result, 0, res_index, s_len)
res_index += s_len
lgt = len(items[i].chars)
s.copy_contents(items[i], result, 0, res_index, lgt)
res_index += lgt
i += 1
return result
@staticmethod
@jit.elidable
@jit.oopspec('stroruni.cmp(s1, s2)')
def ll_strcmp(s1, s2):
if not s1 and not s2:
return True
if not s1 or not s2:
return False
chars1 = s1.chars
chars2 = s2.chars
len1 = len(chars1)
len2 = len(chars2)
if len1 < len2:
cmplen = len1
else:
cmplen = len2
i = 0
while i < cmplen:
diff = ord(chars1[i]) - ord(chars2[i])
if diff != 0:
return diff
i += 1
return len1 - len2
@staticmethod
@jit.elidable
@jit.oopspec('stroruni.equal(s1, s2)')
def ll_streq(s1, s2):
if s1 == s2: # also if both are NULLs
return True
if not s1 or not s2:
return False
len1 = len(s1.chars)
len2 = len(s2.chars)
if len1 != len2:
return False
j = 0
chars1 = s1.chars
chars2 = s2.chars
while j < len1:
if chars1[j] != chars2[j]:
return False
j += 1
return True
@staticmethod
@jit.elidable
def ll_startswith(s1, s2):
len1 = len(s1.chars)
len2 = len(s2.chars)
if len1 < len2:
return False
j = 0
chars1 = s1.chars
chars2 = s2.chars
while j < len2:
if chars1[j] != chars2[j]:
return False
j += 1
return True
@staticmethod
def ll_startswith_char(s, ch):
if not len(s.chars):
return False
return s.chars[0] == ch
@staticmethod
@jit.elidable
def ll_endswith(s1, s2):
len1 = len(s1.chars)
len2 = len(s2.chars)
if len1 < len2:
return False
j = 0
chars1 = s1.chars
chars2 = s2.chars
offset = len1 - len2
while j < len2:
if chars1[offset + j] != chars2[j]:
return False
j += 1
return True
@staticmethod
def ll_endswith_char(s, ch):
if not len(s.chars):
return False
return s.chars[len(s.chars) - 1] == ch
@staticmethod
@jit.elidable
@signature(types.any(), types.any(), types.int(), types.int(), returns=types.int())
def ll_find_char(s, ch, start, end):
i = start
if end > len(s.chars):
end = len(s.chars)
while i < end:
if s.chars[i] == ch:
return i
i += 1
return -1
@staticmethod
@jit.elidable
@signature(types.any(), types.any(), types.int(), types.int(), returns=types.int())
def ll_rfind_char(s, ch, start, end):
if end > len(s.chars):
end = len(s.chars)
i = end
while i > start:
i -= 1
if s.chars[i] == ch:
return i
return -1
@staticmethod
@jit.elidable
def ll_count_char(s, ch, start, end):
count = 0
i = start
if end > len(s.chars):
end = len(s.chars)
while i < end:
if s.chars[i] == ch:
count += 1
i += 1
return count
@staticmethod
@signature(types.any(), types.any(), types.int(), types.int(), returns=types.int())
def ll_find(s1, s2, start, end):
if start < 0:
start = 0
if end > len(s1.chars):
end = len(s1.chars)
if end - start < 0:
return -1
m = len(s2.chars)
if m == 1:
return LLHelpers.ll_find_char(s1, s2.chars[0], start, end)
return LLHelpers.ll_search(s1, s2, start, end, FAST_FIND)
@staticmethod
@signature(types.any(), types.any(), types.int(), types.int(), returns=types.int())
def ll_rfind(s1, s2, start, end):
if start < 0:
start = 0
if end > len(s1.chars):
end = len(s1.chars)
if end - start < 0:
return -1
m = len(s2.chars)
if m == 1:
return LLHelpers.ll_rfind_char(s1, s2.chars[0], start, end)
return LLHelpers.ll_search(s1, s2, start, end, FAST_RFIND)
@classmethod
def ll_count(cls, s1, s2, start, end):
if start < 0:
start = 0
if end > len(s1.chars):
end = len(s1.chars)
if end - start < 0:
return 0
m = len(s2.chars)
if m == 1:
return cls.ll_count_char(s1, s2.chars[0], start, end)
res = cls.ll_search(s1, s2, start, end, FAST_COUNT)
assert res >= 0
return res
@staticmethod
@jit.elidable
def ll_search(s1, s2, start, end, mode):
count = 0
n = end - start
m = len(s2.chars)
if m == 0:
if mode == FAST_COUNT:
return end - start + 1
elif mode == FAST_RFIND:
return end
else:
return start
w = n - m
if w < 0:
if mode == FAST_COUNT:
return 0
return -1
mlast = m - 1
skip = mlast - 1
mask = 0
if mode != FAST_RFIND:
for i in range(mlast):
mask = bloom_add(mask, s2.chars[i])
if s2.chars[i] == s2.chars[mlast]:
skip = mlast - i - 1
mask = bloom_add(mask, s2.chars[mlast])
i = start - 1
while i + 1 <= start + w:
i += 1
if s1.chars[i + m - 1] == s2.chars[m - 1]:
for j in range(mlast):
if s1.chars[i + j] != s2.chars[j]:
break
else:
if mode != FAST_COUNT:
return i
count += 1
i += mlast
continue
if i + m < len(s1.chars):
c = s1.chars[i + m]
else:
c = '\0'
if not bloom(mask, c):
i += m
else:
i += skip
else:
if i + m < len(s1.chars):
c = s1.chars[i + m]
else:
c = '\0'
if not bloom(mask, c):
i += m
else:
mask = bloom_add(mask, s2.chars[0])
for i in range(mlast, 0, -1):
mask = bloom_add(mask, s2.chars[i])
if s2.chars[i] == s2.chars[0]:
skip = i - 1
i = start + w + 1
while i - 1 >= start:
i -= 1
if s1.chars[i] == s2.chars[0]:
for j in xrange(mlast, 0, -1):
if s1.chars[i + j] != s2.chars[j]:
break
else:
return i
if i - 1 >= 0 and not bloom(mask, s1.chars[i - 1]):
i -= m
else:
i -= skip
else:
if i - 1 >= 0 and not bloom(mask, s1.chars[i - 1]):
i -= m
if mode != FAST_COUNT:
return -1
return count
@staticmethod
@signature(types.int(), types.any(), returns=types.any())
@jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic(
items, length))
def ll_join_strs(length, items):
# Special case for length 1 items, helps both the JIT and other code
if length == 1:
return items[0]
num_items = length
itemslen = 0
i = 0
while i < num_items:
try:
itemslen = ovfcheck(itemslen + len(items[i].chars))
except OverflowError:
raise MemoryError
i += 1
if typeOf(items).TO.OF.TO == STR:
malloc = mallocstr
copy_contents = copy_string_contents
else:
malloc = mallocunicode
copy_contents = copy_unicode_contents
result = malloc(itemslen)
res_index = 0
i = 0
while i < num_items:
item_chars = items[i].chars
item_len = len(item_chars)
copy_contents(items[i], result, 0, res_index, item_len)
res_index += item_len
i += 1
return result
@staticmethod
@jit.look_inside_iff(lambda length, chars, RES: jit.isconstant(length) and jit.isvirtual(chars))
def ll_join_chars(length, chars, RES):
# no need to optimize this, will be replaced by string builder
# at some point soon
num_chars = length
if RES is StringRepr.lowleveltype:
target = Char
malloc = mallocstr
else:
target = UniChar
malloc = mallocunicode
result = malloc(num_chars)
res_chars = result.chars
i = 0
while i < num_chars:
res_chars[i] = cast_primitive(target, chars[i])
i += 1
return result
@staticmethod
@jit.oopspec('stroruni.slice(s1, start, stop)')
@signature(types.any(), types.int(), types.int(), returns=types.any())
@jit.elidable
def _ll_stringslice(s1, start, stop):
lgt = stop - start
assert start >= 0
# If start > stop, return a empty string. This can happen if the start
# is greater than the length of the string. Use < instead of <= to avoid
# creating another path for the JIT when start == stop.
if lgt < 0:
return s1.empty()
newstr = s1.malloc(lgt)
s1.copy_contents(s1, newstr, start, 0, lgt)
return newstr
@staticmethod
def ll_stringslice_startonly(s1, start):
return LLHelpers._ll_stringslice(s1, start, len(s1.chars))
@staticmethod
@signature(types.any(), types.int(), types.int(), returns=types.any())
def ll_stringslice_startstop(s1, start, stop):
if jit.we_are_jitted():
if stop > len(s1.chars):
stop = len(s1.chars)
else:
if stop >= len(s1.chars):
if start == 0:
return s1
stop = len(s1.chars)
return LLHelpers._ll_stringslice(s1, start, stop)
@staticmethod
def ll_stringslice_minusone(s1):
newlen = len(s1.chars) - 1
return LLHelpers._ll_stringslice(s1, 0, newlen)
@staticmethod
def ll_split_chr(LIST, s, c, max):
chars = s.chars
strlen = len(chars)
count = 1
i = 0
if max == 0:
i = strlen
while i < strlen:
if chars[i] == c:
count += 1
if max >= 0 and count > max:
break
i += 1
res = LIST.ll_newlist(count)
items = res.ll_items()
i = 0
j = 0
resindex = 0
if max == 0:
j = strlen
while j < strlen:
if chars[j] == c:
item = items[resindex] = s.malloc(j - i)
item.copy_contents(s, item, i, 0, j - i)
resindex += 1
i = j + 1
if max >= 0 and resindex >= max:
j = strlen
break
j += 1
item = items[resindex] = s.malloc(j - i)
item.copy_contents(s, item, i, 0, j - i)
return res
@staticmethod
def ll_split(LIST, s, c, max):
count = 1
if max == -1:
max = len(s.chars)
pos = 0
last = len(s.chars)
markerlen = len(c.chars)
pos = s.find(c, 0, last)
while pos >= 0 and count <= max:
pos = s.find(c, pos + markerlen, last)
count += 1
res = LIST.ll_newlist(count)
items = res.ll_items()
pos = 0
count = 0
pos = s.find(c, 0, last)
prev_pos = 0
if pos < 0:
items[0] = s
return res
while pos >= 0 and count < max:
item = items[count] = s.malloc(pos - prev_pos)
item.copy_contents(s, item, prev_pos, 0, pos -
prev_pos)
count += 1
prev_pos = pos + markerlen
pos = s.find(c, pos + markerlen, last)
item = items[count] = s.malloc(last - prev_pos)
item.copy_contents(s, item, prev_pos, 0, last - prev_pos)
return res
@staticmethod
def ll_rsplit_chr(LIST, s, c, max):
chars = s.chars
strlen = len(chars)
count = 1
i = 0
if max == 0:
i = strlen
while i < strlen:
if chars[i] == c:
count += 1
if max >= 0 and count > max:
break
i += 1
res = LIST.ll_newlist(count)
items = res.ll_items()
i = strlen
j = strlen
resindex = count - 1
assert resindex >= 0
if max == 0:
j = 0
while j > 0:
j -= 1
if chars[j] == c:
item = items[resindex] = s.malloc(i - j - 1)
item.copy_contents(s, item, j + 1, 0, i - j - 1)
resindex -= 1
i = j
if resindex == 0:
j = 0
break
item = items[resindex] = s.malloc(i - j)
item.copy_contents(s, item, j, 0, i - j)
return res
@staticmethod
def ll_rsplit(LIST, s, c, max):
count = 1
if max == -1:
max = len(s.chars)
pos = len(s.chars)
markerlen = len(c.chars)
pos = s.rfind(c, 0, pos)
while pos >= 0 and count <= max:
pos = s.rfind(c, 0, pos - markerlen)
count += 1
res = LIST.ll_newlist(count)
items = res.ll_items()
pos = 0
pos = len(s.chars)
prev_pos = pos
pos = s.rfind(c, 0, pos)
if pos < 0:
items[0] = s
return res
count -= 1
while pos >= 0 and count > 0:
item = items[count] = s.malloc(prev_pos - pos - markerlen)
item.copy_contents(s, item, pos + markerlen, 0,
prev_pos - pos - markerlen)
count -= 1
prev_pos = pos
pos = s.rfind(c, 0, pos)
item = items[count] = s.malloc(prev_pos)
item.copy_contents(s, item, 0, 0, prev_pos)
return res
@staticmethod
@jit.elidable
def ll_replace_chr_chr(s, c1, c2):
length = len(s.chars)
newstr = s.malloc(length)
src = s.chars
dst = newstr.chars
j = 0
while j < length:
c = src[j]
if c == c1:
c = c2
dst[j] = c
j += 1
return newstr
@staticmethod
@jit.elidable
def ll_contains(s, c):
chars = s.chars
strlen = len(chars)
i = 0
while i < strlen:
if chars[i] == c:
return True
i += 1
return False
@staticmethod
@jit.elidable
def ll_int(s, base):
if not 2 <= base <= 36:
raise ValueError
chars = s.chars
strlen = len(chars)
i = 0
#XXX: only space is allowed as white space for now
while i < strlen and chars[i] == ' ':
i += 1
if not i < strlen:
raise ValueError
#check sign
sign = 1
if chars[i] == '-':
sign = -1
i += 1
elif chars[i] == '+':
i += 1
# skip whitespaces between sign and digits
while i < strlen and chars[i] == ' ':
i += 1
#now get digits
val = 0
oldpos = i
while i < strlen:
c = ord(chars[i])
if ord('a') <= c <= ord('z'):
digit = c - ord('a') + 10
elif ord('A') <= c <= ord('Z'):
digit = c - ord('A') + 10
elif ord('0') <= c <= ord('9'):
digit = c - ord('0')
else:
break
if digit >= base:
break
val = val * base + digit
i += 1
if i == oldpos:
raise ValueError # catch strings like '+' and '+ '
#skip trailing whitespace
while i < strlen and chars[i] == ' ':
i += 1
if not i == strlen:
raise ValueError
return sign * val
# interface to build strings:
# x = ll_build_start(n)
# ll_build_push(x, next_string, 0)
# ll_build_push(x, next_string, 1)
# ...
# ll_build_push(x, next_string, n-1)
# s = ll_build_finish(x)
@staticmethod
def ll_build_start(parts_count):
return malloc(TEMP, parts_count)
@staticmethod
def ll_build_push(builder, next_string, index):
builder[index] = next_string
@staticmethod
def ll_build_finish(builder):
return LLHelpers.ll_join_strs(len(builder), builder)
@staticmethod
@specialize.memo()
def ll_constant(s):
return string_repr.convert_const(s)
@staticmethod
@specialize.memo()
def ll_constant_unicode(s):
return unicode_repr.convert_const(s)
@classmethod
def do_stringformat(cls, hop, sourcevarsrepr):
s_str = hop.args_s[0]
assert s_str.is_constant()
is_unicode = isinstance(s_str, annmodel.SomeUnicodeString)
if is_unicode:
TEMPBUF = TEMP_UNICODE
else:
TEMPBUF = TEMP
s = s_str.const
things = cls.parse_fmt_string(s)
size = inputconst(Signed, len(things)) # could be unsigned?
cTEMP = inputconst(Void, TEMPBUF)
cflags = inputconst(Void, {'flavor': 'gc'})
vtemp = hop.genop("malloc_varsize", [cTEMP, cflags, size],
resulttype=Ptr(TEMPBUF))
argsiter = iter(sourcevarsrepr)
from rpython.rtyper.rclass import InstanceRepr
for i, thing in enumerate(things):
if isinstance(thing, tuple):
code = thing[0]
vitem, r_arg = argsiter.next()
if not hasattr(r_arg, 'll_str'):
raise TyperError("ll_str unsupported for: %r" % r_arg)
if code == 's':
if is_unicode:
# only UniCharRepr and UnicodeRepr has it so far
vchunk = hop.gendirectcall(r_arg.ll_unicode, vitem)
else:
vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
elif code == 'r' and isinstance(r_arg, InstanceRepr):
vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
elif code == 'd':
assert isinstance(r_arg, IntegerRepr)
#vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
vchunk = hop.gendirectcall(ll_str.ll_int2dec, vitem)
elif code == 'f':
#assert isinstance(r_arg, FloatRepr)
vchunk = hop.gendirectcall(r_arg.ll_str, vitem)
elif code == 'x':
assert isinstance(r_arg, IntegerRepr)
vchunk = hop.gendirectcall(ll_str.ll_int2hex, vitem,
inputconst(Bool, False))
elif code == 'o':
assert isinstance(r_arg, IntegerRepr)
vchunk = hop.gendirectcall(ll_str.ll_int2oct, vitem,
inputconst(Bool, False))
else:
raise TyperError("%%%s is not RPython" % (code,))
else:
if is_unicode:
vchunk = inputconst(unicode_repr, thing)
else:
vchunk = inputconst(string_repr, thing)
i = inputconst(Signed, i)
if is_unicode and vchunk.concretetype != Ptr(UNICODE):
# if we are here, one of the ll_str.* functions returned some
# STR, so we convert it to unicode. It's a bit suboptimal
# because we do one extra copy.
vchunk = hop.gendirectcall(cls.ll_str2unicode, vchunk)
hop.genop('setarrayitem', [vtemp, i, vchunk])
hop.exception_cannot_occur() # to ignore the ZeroDivisionError of '%'
return hop.gendirectcall(cls.ll_join_strs, size, vtemp)
@staticmethod
@jit.dont_look_inside
def ll_string2list(RESLIST, src):
length = len(src.chars)
lst = RESLIST.ll_newlist(length)
dst = lst.ll_items()
SRC = typeOf(src).TO # STR or UNICODE
DST = typeOf(dst).TO # GcArray
assert DST.OF is SRC.chars.OF
# from here, no GC operations can happen
asrc = llmemory.cast_ptr_to_adr(src) + (
llmemory.offsetof(SRC, 'chars') +
llmemory.itemoffsetof(SRC.chars, 0))
adst = llmemory.cast_ptr_to_adr(dst) + llmemory.itemoffsetof(DST, 0)
llmemory.raw_memcopy(asrc, adst, llmemory.sizeof(DST.OF) * length)
# end of "no GC" section
keepalive_until_here(src)
keepalive_until_here(dst)
return lst
TEMP = GcArray(Ptr(STR))
TEMP_UNICODE = GcArray(Ptr(UNICODE))
# ____________________________________________________________
STR.become(GcStruct('rpy_string', ('hash', Signed),
('chars', Array(Char, hints={'immutable': True,
'extra_item_after_alloc': 1})),
adtmeths={'malloc' : staticAdtMethod(mallocstr),
'empty' : staticAdtMethod(emptystrfun),
'copy_contents' : staticAdtMethod(copy_string_contents),
'copy_contents_from_str' : staticAdtMethod(copy_string_contents),
'gethash': LLHelpers.ll_strhash,
'length': LLHelpers.ll_length,
'find': LLHelpers.ll_find,
'rfind': LLHelpers.ll_rfind},
hints={'remove_hash': True}))
UNICODE.become(GcStruct('rpy_unicode', ('hash', Signed),
('chars', Array(UniChar, hints={'immutable': True})),
adtmeths={'malloc' : staticAdtMethod(mallocunicode),
'empty' : staticAdtMethod(emptyunicodefun),
'copy_contents' : staticAdtMethod(copy_unicode_contents),
'copy_contents_from_str' : staticAdtMethod(copy_unicode_contents),
'gethash': LLHelpers.ll_strhash,
'length': LLHelpers.ll_length},
hints={'remove_hash': True}))
# TODO: make the public interface of the rstr module cleaner
ll_strconcat = LLHelpers.ll_strconcat
ll_join = LLHelpers.ll_join
ll_str2unicode = LLHelpers.ll_str2unicode
do_stringformat = LLHelpers.do_stringformat
string_repr = StringRepr()
char_repr = CharRepr()
unichar_repr = UniCharRepr()
char_repr.ll = LLHelpers
unichar_repr.ll = LLHelpers
unicode_repr = UnicodeRepr()
StringRepr.repr = string_repr
UnicodeRepr.repr = unicode_repr
UniCharRepr.repr = unicode_repr
UniCharRepr.char_repr = unichar_repr
UnicodeRepr.char_repr = unichar_repr
CharRepr.char_repr = char_repr
StringRepr.char_repr = char_repr
class BaseStringIteratorRepr(AbstractStringIteratorRepr):
def __init__(self):
self.ll_striter = ll_striter
self.ll_strnext = ll_strnext
self.ll_getnextindex = ll_getnextindex
class StringIteratorRepr(BaseStringIteratorRepr):
external_item_repr = char_repr
lowleveltype = Ptr(GcStruct('stringiter',
('string', string_repr.lowleveltype),
('length', Signed),
('index', Signed)))
class UnicodeIteratorRepr(BaseStringIteratorRepr):
external_item_repr = unichar_repr
lowleveltype = Ptr(GcStruct('unicodeiter',
('string', unicode_repr.lowleveltype),
('length', Signed),
('index', Signed)))
def ll_striter(string):
if typeOf(string) == string_repr.lowleveltype:
TP = string_repr.iterator_repr.lowleveltype.TO
elif typeOf(string) == unicode_repr.lowleveltype:
TP = unicode_repr.iterator_repr.lowleveltype.TO
else:
raise TypeError("Unknown string type %s" % (typeOf(string),))
iter = malloc(TP)
iter.string = string
iter.length = len(string.chars) # load this value only once
iter.index = 0
return iter
def ll_strnext(iter):
index = iter.index
if index >= iter.length:
raise StopIteration
iter.index = index + 1
return iter.string.chars[index]
def ll_getnextindex(iter):
return iter.index
string_repr.iterator_repr = StringIteratorRepr()
unicode_repr.iterator_repr = UnicodeIteratorRepr()
@specialize.memo()
def conststr(s):
return string_repr.convert_const(s)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import json
import logging
import os
import re
import unittest
from contextlib import redirect_stdout
from datetime import datetime
from unittest import mock
import pytest
from parameterized import parameterized
from airflow.cli import cli_parser
from airflow.cli.commands import task_command
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagRunNotFound
from airflow.models import DagBag, DagRun, TaskInstance
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = days_ago(1)
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def reset(dag_id):
with create_session() as session:
tis = session.query(TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
runs = session.query(DagRun).filter_by(dag_id=dag_id)
runs.delete()
# TODO: Check if tests needs side effects - locally there's missing DAG
class TestCliTasks(unittest.TestCase):
run_id = 'TEST_RUN_ID'
dag_id = 'example_python_operator'
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag(include_examples=True)
cls.parser = cli_parser.get_parser()
clear_db_runs()
cls.dag = cls.dagbag.get_dag(cls.dag_id)
cls.dag_run = cls.dag.create_dagrun(
state=State.NONE, run_id=cls.run_id, run_type=DagRunType.MANUAL, execution_date=DEFAULT_DATE
)
@classmethod
def tearDownClass(cls) -> None:
clear_db_runs()
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags:
args = self.parser.parse_args(['tasks', 'list', dag_id])
task_command.task_list(args)
args = self.parser.parse_args(['tasks', 'list', 'example_bash_operator', '--tree'])
task_command.task_list(args)
def test_test(self):
"""Test the `airflow test` command"""
args = self.parser.parse_args(
["tasks", "test", "example_python_operator", 'print_the_context', '2018-01-01']
)
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_test(args)
# Check that prints, and log messages, are shown
assert "'example_python_operator__print_the_context__20180101'" in stdout.getvalue()
def test_test_with_existing_dag_run(self):
"""Test the `airflow test` command"""
task_id = 'print_the_context'
args = self.parser.parse_args(["tasks", "test", self.dag_id, task_id, DEFAULT_DATE.isoformat()])
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_test(args)
# Check that prints, and log messages, are shown
assert f"Marking task as SUCCESS. dag_id={self.dag_id}, task_id={task_id}" in stdout.getvalue()
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_run_with_existing_dag_run_id(self, mock_local_job):
"""
Test that we can run with existing dag_run_id
"""
task0_id = self.dag.task_ids[0]
args0 = [
'tasks',
'run',
'--ignore-all-dependencies',
'--local',
self.dag_id,
task0_id,
self.run_id,
]
task_command.task_run(self.parser.parse_args(args0), dag=self.dag)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
ignore_all_deps=True,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pickle_id=None,
pool=None,
external_executor_id=None,
)
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_run_raises_when_theres_no_dagrun(self, mock_local_job):
"""
Test that run raises when there's run_id but no dag_run
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag(dag_id)
task0_id = 'test_run_dependent_task'
run_id = 'TEST_RUN_ID'
args0 = [
'tasks',
'run',
'--ignore-all-dependencies',
'--local',
dag_id,
task0_id,
run_id,
]
with self.assertRaises(DagRunNotFound):
task_command.task_run(self.parser.parse_args(args0), dag=dag)
def test_cli_test_with_params(self):
task_command.task_test(
self.parser.parse_args(
[
'tasks',
'test',
'example_passing_params_via_test_command',
'run_this',
'--task-params',
'{"foo":"bar"}',
DEFAULT_DATE.isoformat(),
]
)
)
task_command.task_test(
self.parser.parse_args(
[
'tasks',
'test',
'example_passing_params_via_test_command',
'also_run_this',
'--task-params',
'{"foo":"bar"}',
DEFAULT_DATE.isoformat(),
]
)
)
def test_cli_test_with_env_vars(self):
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_test(
self.parser.parse_args(
[
'tasks',
'test',
'example_passing_params_via_test_command',
'env_var_test_task',
'--env-vars',
'{"foo":"bar"}',
DEFAULT_DATE.isoformat(),
]
)
)
output = stdout.getvalue()
assert 'foo=bar' in output
assert 'AIRFLOW_TEST_MODE=True' in output
@parameterized.expand(
[
("--ignore-all-dependencies",),
("--ignore-depends-on-past",),
("--ignore-dependencies",),
("--force",),
],
)
def test_cli_run_invalid_raw_option(self, option: str):
with pytest.raises(
AirflowException,
match="Option --raw does not work with some of the other options on this command.",
):
task_command.task_run(
self.parser.parse_args(
[ # type: ignore
'tasks',
'run',
'example_bash_operator',
'runme_0',
DEFAULT_DATE.isoformat(),
'--raw',
option,
]
)
)
def test_cli_run_mutually_exclusive(self):
with pytest.raises(AirflowException, match="Option --raw and --local are mutually exclusive."):
task_command.task_run(
self.parser.parse_args(
[
'tasks',
'run',
'example_bash_operator',
'runme_0',
DEFAULT_DATE.isoformat(),
'--raw',
'--local',
]
)
)
def test_task_render(self):
"""
tasks render should render and displays templated fields for a given task
"""
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_render(
self.parser.parse_args(['tasks', 'render', 'tutorial', 'templated', '2016-01-01'])
)
output = stdout.getvalue()
assert 'echo "2016-01-01"' in output
assert 'echo "2016-01-08"' in output
assert 'echo "Parameter I passed in"' in output
def test_cli_run_when_pickle_and_dag_cli_method_selected(self):
"""
tasks run should return an AirflowException when invalid pickle_id is passed
"""
pickle_id = 'pickle_id'
with pytest.raises(
AirflowException,
match=re.escape("You cannot use the --pickle option when using DAG.cli() method."),
):
task_command.task_run(
self.parser.parse_args(
[
'tasks',
'run',
'example_bash_operator',
'runme_0',
DEFAULT_DATE.isoformat(),
'--pickle',
pickle_id,
]
),
self.dag,
)
def test_task_state(self):
task_command.task_state(
self.parser.parse_args(
['tasks', 'state', self.dag_id, 'print_the_context', DEFAULT_DATE.isoformat()]
)
)
def test_task_states_for_dag_run(self):
dag2 = DagBag().dags['example_python_operator']
task2 = dag2.get_task(task_id='print_the_context')
default_date2 = timezone.make_aware(datetime(2016, 1, 9))
dag2.clear()
dagrun = dag2.create_dagrun(
state=State.RUNNING,
execution_date=default_date2,
run_type=DagRunType.MANUAL,
external_trigger=True,
)
ti2 = TaskInstance(task2, dagrun.execution_date)
ti2.set_state(State.SUCCESS)
ti_start = ti2.start_date
ti_end = ti2.end_date
with redirect_stdout(io.StringIO()) as stdout:
task_command.task_states_for_dag_run(
self.parser.parse_args(
[
'tasks',
'states-for-dag-run',
'example_python_operator',
default_date2.isoformat(),
'--output',
"json",
]
)
)
actual_out = json.loads(stdout.getvalue())
assert len(actual_out) == 1
assert actual_out[0] == {
'dag_id': 'example_python_operator',
'execution_date': '2016-01-09T00:00:00+00:00',
'task_id': 'print_the_context',
'state': 'success',
'start_date': ti_start.isoformat(),
'end_date': ti_end.isoformat(),
}
def test_task_states_for_dag_run_when_dag_run_not_exists(self):
"""
task_states_for_dag_run should return an AirflowException when invalid dag id is passed
"""
with pytest.raises(DagRunNotFound):
default_date2 = timezone.make_aware(datetime(2016, 1, 9))
task_command.task_states_for_dag_run(
self.parser.parse_args(
[
'tasks',
'states-for-dag-run',
'not_exists_dag',
default_date2.isoformat(),
'--output',
"json",
]
)
)
def test_subdag_clear(self):
args = self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator', '--yes'])
task_command.task_clear(args)
args = self.parser.parse_args(
['tasks', 'clear', 'example_subdag_operator', '--yes', '--exclude-subdags']
)
task_command.task_clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator.section-1', '--yes'])
task_command.task_clear(args)
args = self.parser.parse_args(
['tasks', 'clear', 'example_subdag_operator.section-1', '--yes', '--exclude-parentdag']
)
task_command.task_clear(args)
# For this test memory spins out of control on Python 3.6. TODO(potiuk): FIXME")
@pytest.mark.quarantined
class TestLogsfromTaskRunCommand(unittest.TestCase):
def setUp(self) -> None:
self.dag_id = "test_logging_dag"
self.task_id = "test_task"
self.run_id = "test_run"
self.dag_path = os.path.join(ROOT_FOLDER, "dags", "test_logging_in_dag.py")
reset(self.dag_id)
self.execution_date = timezone.make_aware(datetime(2017, 1, 1))
self.execution_date_str = self.execution_date.isoformat()
self.task_args = ['tasks', 'run', self.dag_id, self.task_id, '--local', self.execution_date_str]
self.log_dir = conf.get('logging', 'base_log_folder')
self.log_filename = f"{self.dag_id}/{self.task_id}/{self.execution_date_str}/1.log"
self.ti_log_file_path = os.path.join(self.log_dir, self.log_filename)
self.parser = cli_parser.get_parser()
DagBag().get_dag(self.dag_id).create_dagrun(
run_id=self.run_id,
execution_date=self.execution_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
run_type=DagRunType.MANUAL,
)
root = self.root_logger = logging.getLogger()
self.root_handlers = root.handlers.copy()
self.root_filters = root.filters.copy()
self.root_level = root.level
try:
os.remove(self.ti_log_file_path)
except OSError:
pass
def tearDown(self) -> None:
root = self.root_logger
root.setLevel(self.root_level)
root.handlers[:] = self.root_handlers
root.filters[:] = self.root_filters
reset(self.dag_id)
try:
os.remove(self.ti_log_file_path)
except OSError:
pass
def assert_log_line(self, text, logs_list, expect_from_logging_mixin=False):
"""
Get Log Line and assert only 1 Entry exists with the given text. Also check that
"logging_mixin" line does not appear in that log line to avoid duplicate logging as below:
[2020-06-24 16:47:23,537] {logging_mixin.py:91} INFO - [2020-06-24 16:47:23,536] {python.py:135}
"""
log_lines = [log for log in logs_list if text in log]
assert len(log_lines) == 1
log_line = log_lines[0]
if not expect_from_logging_mixin:
# Logs from print statement still show with logging_mixing as filename
# Example: [2020-06-24 17:07:00,482] {logging_mixin.py:91} INFO - Log from Print statement
assert "logging_mixin.py" not in log_line
return log_line
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_external_executor_id_present_for_fork_run_task(self, mock_local_job):
naive_date = datetime(2016, 1, 1)
dag_id = 'test_run_fork_has_external_executor_id'
task0_id = 'test_run_fork_task'
dag = self.dagbag.get_dag(dag_id)
args_list = [
'tasks',
'run',
'--local',
dag_id,
task0_id,
naive_date.isoformat(),
]
args = self.parser.parse_args(args_list)
args.external_executor_id = "ABCD12345"
task_command.task_run(args, dag=dag)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
pickle_id=None,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=None,
external_executor_id="ABCD12345",
)
@mock.patch("airflow.cli.commands.task_command.LocalTaskJob")
def test_external_executor_id_present_for_process_run_task(self, mock_local_job):
naive_date = datetime(2016, 1, 1)
dag_id = 'test_run_process_has_external_executor_id'
task0_id = 'test_run_process_task'
dag = self.dagbag.get_dag(dag_id)
args_list = [
'tasks',
'run',
'--local',
dag_id,
task0_id,
naive_date.isoformat(),
]
args = self.parser.parse_args(args_list)
with mock.patch.dict(os.environ, {"external_executor_id": "12345FEDCBA"}):
task_command.task_run(args, dag=dag)
mock_local_job.assert_called_once_with(
task_instance=mock.ANY,
mark_success=False,
pickle_id=None,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=None,
external_executor_id="ABCD12345",
)
@unittest.skipIf(not hasattr(os, 'fork'), "Forking not available")
def test_logging_with_run_task(self):
# We are not using self.assertLogs as we want to verify what actually is stored in the Log file
# as that is what gets displayed
with conf_vars({('core', 'dags_folder'): self.dag_path}):
task_command.task_run(self.parser.parse_args(self.task_args))
with open(self.ti_log_file_path) as l_file:
logs = l_file.read()
print(logs) # In case of a test failures this line would show detailed log
logs_list = logs.splitlines()
assert "INFO - Started process" in logs
assert f"Subtask {self.task_id}" in logs
assert "standard_task_runner.py" in logs
assert (
f"INFO - Running: ['airflow', 'tasks', 'run', '{self.dag_id}', "
f"'{self.task_id}', '{self.execution_date_str}'," in logs
)
self.assert_log_line("Log from DAG Logger", logs_list)
self.assert_log_line("Log from TI Logger", logs_list)
self.assert_log_line("Log from Print statement", logs_list, expect_from_logging_mixin=True)
assert (
f"INFO - Marking task as SUCCESS. dag_id={self.dag_id}, "
f"task_id={self.task_id}, execution_date=20170101T000000" in logs
)
@mock.patch("airflow.task.task_runner.standard_task_runner.CAN_FORK", False)
def test_logging_with_run_task_subprocess(self):
# We are not using self.assertLogs as we want to verify what actually is stored in the Log file
# as that is what gets displayed
with conf_vars({('core', 'dags_folder'): self.dag_path}):
task_command.task_run(self.parser.parse_args(self.task_args))
with open(self.ti_log_file_path) as l_file:
logs = l_file.read()
print(logs) # In case of a test failures this line would show detailed log
logs_list = logs.splitlines()
assert f"Subtask {self.task_id}" in logs
assert "base_task_runner.py" in logs
self.assert_log_line("Log from DAG Logger", logs_list)
self.assert_log_line("Log from TI Logger", logs_list)
self.assert_log_line("Log from Print statement", logs_list, expect_from_logging_mixin=True)
assert (
f"INFO - Running: ['airflow', 'tasks', 'run', '{self.dag_id}', "
f"'{self.task_id}', '{self.execution_date_str}'," in logs
)
assert (
f"INFO - Marking task as SUCCESS. dag_id={self.dag_id}, "
f"task_id={self.task_id}, execution_date=20170101T000000" in logs
)
def test_log_file_template_with_run_task(self):
"""Verify that the taskinstance has the right context for log_filename_template"""
with mock.patch.object(task_command, "_run_task_by_selected_method"):
with conf_vars({('core', 'dags_folder'): self.dag_path}):
# increment the try_number of the task to be run
with create_session() as session:
ti = session.query(TaskInstance).filter_by(run_id=self.run_id)
ti.try_number = 1
log_file_path = os.path.join(os.path.dirname(self.ti_log_file_path), "2.log")
try:
task_command.task_run(self.parser.parse_args(self.task_args))
assert os.path.exists(log_file_path)
finally:
try:
os.remove(log_file_path)
except OSError:
pass
@mock.patch.object(task_command, "_run_task_by_selected_method")
def test_root_logger_restored(self, run_task_mock):
"""Verify that the root logging context is restored"""
logger = logging.getLogger("foo.bar")
def task_inner(*args, **kwargs):
logger.warning("redirected log message")
run_task_mock.side_effect = task_inner
config = {
('core', 'dags_folder'): self.dag_path,
('logging', 'logging_level'): "INFO",
}
with conf_vars(config):
with self.assertLogs(level=logging.WARNING) as captured:
logger.warning("not redirected")
task_command.task_run(self.parser.parse_args(self.task_args))
assert captured.output == ["WARNING:foo.bar:not redirected"]
assert self.root_logger.level == logging.WARNING
assert self.root_logger.handlers == self.root_handlers
@mock.patch.object(task_command, "_run_task_by_selected_method")
def test_disable_handler_modifying(self, run_task_mock):
"""If [core] donot_modify_handlers is set to True, the root logger is untouched"""
from airflow import settings
logger = logging.getLogger("foo.bar")
def task_inner(*args, **kwargs):
logger.warning("not redirected")
run_task_mock.side_effect = task_inner
config = {
('core', 'dags_folder'): self.dag_path,
('logging', 'logging_level'): "INFO",
}
old_value = settings.DONOT_MODIFY_HANDLERS
settings.DONOT_MODIFY_HANDLERS = True
with conf_vars(config):
with self.assertLogs(level=logging.WARNING) as captured:
task_command.task_run(self.parser.parse_args(self.task_args))
assert captured.output == ["WARNING:foo.bar:not redirected"]
settings.DONOT_MODIFY_HANDLERS = old_value
|
|
import ConfigParser
import errno
import socket
import os
import shutil
import tempfile
import platform
def platform_information(_linux_distribution=None):
""" detect platform information from remote host """
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian
debian_codenames = {
'8': 'jessie',
'7': 'wheezy',
'6': 'squeeze',
}
major_version = release.split('.')[0]
codename = debian_codenames.get(major_version, '')
# In order to support newer jessie/sid or wheezy/sid strings we test this
# if sid is buried in the minor, we should use sid anyway.
if not codename and '/' in release:
major, minor = release.split('/')
if minor == 'sid':
codename = minor
else:
codename = major
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
)
def machine_type():
""" detect machine type """
return platform.machine()
def write_sources_list(url, codename, filename='ceph.list'):
"""add deb repo to sources.list"""
repo_path = os.path.join('/etc/apt/sources.list.d', filename)
with file(repo_path, 'w') as f:
f.write('deb {url} {codename} main\n'.format(
url=url,
codename=codename,
))
def write_yum_repo(content, filename='ceph.repo'):
"""set the contents of repo file to /etc/yum.repos.d/"""
repo_path = os.path.join('/etc/yum.repos.d', filename)
write_file(repo_path, content)
def set_apt_priority(fqdn, path='/etc/apt/preferences.d/ceph.pref'):
template = "Package: *\nPin: origin {fqdn}\nPin-Priority: 999\n"
content = template.format(fqdn=fqdn)
with open(path, 'wb') as fout:
fout.write(content)
def set_repo_priority(sections, path='/etc/yum.repos.d/ceph.repo', priority='1'):
Config = ConfigParser.ConfigParser()
Config.read(path)
Config.sections()
for section in sections:
try:
Config.set(section, 'priority', priority)
except ConfigParser.NoSectionError:
# Emperor versions of Ceph used all lowercase sections
# so lets just try again for the section that failed, maybe
# we are able to find it if it is lower
Config.set(section.lower(), 'priority', priority)
with open(path, 'wb') as fout:
Config.write(fout)
# And now, because ConfigParser is super duper, we need to remove the
# assignments so this looks like it was before
def remove_whitespace_from_assignments():
separator = "="
lines = file(path).readlines()
fp = open(path, "w")
for line in lines:
line = line.strip()
if not line.startswith("#") and separator in line:
assignment = line.split(separator, 1)
assignment = map(str.strip, assignment)
fp.write("%s%s%s\n" % (assignment[0], separator, assignment[1]))
else:
fp.write(line + "\n")
remove_whitespace_from_assignments()
def write_conf(cluster, conf, overwrite):
""" write cluster configuration to /etc/ceph/{cluster}.conf """
path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster)
tmp_file = tempfile.NamedTemporaryFile(dir='/etc/ceph', delete=False)
err_msg = 'config file %s exists with different content; use --overwrite-conf to overwrite' % path
if os.path.exists(path):
with file(path, 'rb') as f:
old = f.read()
if old != conf and not overwrite:
raise RuntimeError(err_msg)
tmp_file.write(conf)
tmp_file.close()
shutil.move(tmp_file.name, path)
os.chmod(path, 0644)
return
if os.path.exists('/etc/ceph'):
with open(path, 'w') as f:
f.write(conf)
os.chmod(path, 0644)
else:
err_msg = '/etc/ceph/ does not exist - could not write config'
raise RuntimeError(err_msg)
def write_keyring(path, key):
""" create a keyring file """
# Note that we *require* to avoid deletion of the temp file
# otherwise we risk not being able to copy the contents from
# one file system to the other, hence the `delete=False`
tmp_file = tempfile.NamedTemporaryFile(delete=False)
tmp_file.write(key)
tmp_file.close()
keyring_dir = os.path.dirname(path)
if not path_exists(keyring_dir):
makedir(keyring_dir)
shutil.move(tmp_file.name, path)
def create_mon_path(path):
"""create the mon path if it does not exist"""
if not os.path.exists(path):
os.makedirs(path)
def create_done_path(done_path):
"""create a done file to avoid re-doing the mon deployment"""
with file(done_path, 'w'):
pass
def create_init_path(init_path):
"""create the init path if it does not exist"""
if not os.path.exists(init_path):
with file(init_path, 'w'):
pass
def append_to_file(file_path, contents):
"""append contents to file"""
with open(file_path, 'a') as f:
f.write(contents)
def readline(path):
with open(path) as _file:
return _file.readline().strip('\n')
def path_exists(path):
return os.path.exists(path)
def get_realpath(path):
return os.path.realpath(path)
def listdir(path):
return os.listdir(path)
def makedir(path, ignored=None):
ignored = ignored or []
try:
os.makedirs(path)
except OSError as error:
if error.errno in ignored:
pass
else:
# re-raise the original exception
raise
def unlink(_file):
os.unlink(_file)
def write_monitor_keyring(keyring, monitor_keyring):
"""create the monitor keyring file"""
write_file(keyring, monitor_keyring)
def write_file(path, content):
with file(path, 'w') as f:
f.write(content)
def touch_file(path):
with file(path, 'wb') as f: # noqa
pass
def get_file(path):
""" fetch remote file """
try:
with file(path, 'rb') as f:
return f.read()
except IOError:
pass
def object_grep(term, file_object):
for line in file_object.readlines():
if term in line:
return True
return False
def grep(term, file_path):
# A small grep-like function that will search for a word in a file and
# return True if it does and False if it does not.
# Implemented initially to have a similar behavior as the init system
# detection in Ceph's init scripts::
# # detect systemd
# # SYSTEMD=0
# grep -qs systemd /proc/1/comm && SYSTEMD=1
# .. note:: Because we intent to be operating in silent mode, we explicitly
# return ``False`` if the file does not exist.
if not os.path.isfile(file_path):
return False
with open(file_path) as _file:
return object_grep(term, _file)
def shortname():
"""get remote short hostname"""
return socket.gethostname().split('.', 1)[0]
def which_service():
""" locating the `service` executable... """
# XXX This should get deprecated at some point. For now
# it just bypasses and uses the new helper.
return which('service')
def which(executable):
"""find the location of an executable"""
locations = (
'/usr/local/bin',
'/bin',
'/usr/bin',
'/usr/local/sbin',
'/usr/sbin',
'/sbin',
)
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path):
return executable_path
def make_mon_removed_dir(path, file_name):
""" move old monitor data """
try:
os.makedirs('/var/lib/ceph/mon-removed')
except OSError, e:
if e.errno != errno.EEXIST:
raise
shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name))
def safe_mkdir(path):
""" create path if it doesn't exist """
try:
os.mkdir(path)
except OSError, e:
if e.errno == errno.EEXIST:
pass
else:
raise
def zeroing(dev):
""" zeroing last few blocks of device """
# this kills the crab
#
# sgdisk will wipe out the main copy of the GPT partition
# table (sorry), but it doesn't remove the backup copies, and
# subsequent commands will continue to complain and fail when
# they see those. zeroing the last few blocks of the device
# appears to do the trick.
lba_size = 4096
size = 33 * lba_size
return True
with file(dev, 'wb') as f:
f.seek(-size, os.SEEK_END)
f.write(size*'\0')
# remoto magic, needed to execute these functions remotely
if __name__ == '__channelexec__':
for item in channel: # noqa
channel.send(eval(item)) # noqa
|
|
###############################################################################
# Name: ed_basestc.py #
# Purpose: Editra's base StyledTextCtrl. #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2009 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
The EditraBaseStc is the base StyledTextCtrl that provides automatic styling and
syntax highlighting of all supported filetypes.
@summary: Editra's base styled text ctrl.
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: ed_basestc.py 73711 2013-03-23 14:19:01Z CJP $"
__revision__ = "$Revision: 73711 $"
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.stc
# Editra Imports
import ed_glob
import ed_style
import eclib
import ebmlib
import ed_msg
import ed_txt
from syntax import syntax
from syntax import synglob
import autocomp
from extern import vertedit
from profiler import Profile_Get
import plugin
import iface
import util
import ed_marker
#-----------------------------------------------------------------------------#
# Margins
MARK_MARGIN = 0
NUM_MARGIN = 1
FOLD_MARGIN = 2
# Markers (3rd party)
MARKER_VERT_EDIT = ed_marker.NewMarkerId()
# Key code additions
ALT_SHIFT = wx.stc.STC_SCMOD_ALT|wx.stc.STC_SCMOD_SHIFT
CTRL_SHIFT = wx.stc.STC_SCMOD_CTRL|wx.stc.STC_SCMOD_SHIFT
#-----------------------------------------------------------------------------#
class EditraBaseStc(wx.stc.StyledTextCtrl, ed_style.StyleMgr):
"""Base StyledTextCtrl that provides all the base code editing
functionality.
"""
ED_STC_MASK_MARKERS = ~wx.stc.STC_MASK_FOLDERS
def __init__(self, parent, id_=wx.ID_ANY,
pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.stc.StyledTextCtrl.__init__(self, parent, id_, pos, size, style)
ed_style.StyleMgr.__init__(self, self.GetStyleSheet())
# Attributes
self.file = ed_txt.EdFile()
self._code = dict(compsvc=autocomp.AutoCompService.GetCompleter(self),
synmgr=syntax.SyntaxMgr(ed_glob.CONFIG['CACHE_DIR']),
keywords=[ ' ' ],
comment=list(),
clexer=None, # Container lexer method
indenter=None, # Auto indenter
lang_id=0) # Language ID from syntax module
self.vert_edit = vertedit.VertEdit(self, markerNumber=MARKER_VERT_EDIT)
self._line_num = True # Show line numbers
self._last_cwidth = 1 # one pixel
# Set Up Margins
## Outer Left Margin Bookmarks
self.SetMarginType(MARK_MARGIN, wx.stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(MARK_MARGIN, EditraBaseStc.ED_STC_MASK_MARKERS)
self.SetMarginSensitive(MARK_MARGIN, True)
self.SetMarginWidth(MARK_MARGIN, 16)
## Middle Left Margin Line Number Indication
self.SetMarginType(NUM_MARGIN, wx.stc.STC_MARGIN_NUMBER)
self.SetMarginMask(NUM_MARGIN, 0)
## Inner Left Margin Setup Folders
self.SetMarginType(FOLD_MARGIN, wx.stc.STC_MARGIN_SYMBOL)
self.SetMarginMask(FOLD_MARGIN, wx.stc.STC_MASK_FOLDERS)
self.SetMarginSensitive(FOLD_MARGIN, True)
# Set Mac specific keybindings
if wx.Platform == '__WXMAC__':
for keys in _GetMacKeyBindings():
self.CmdKeyAssign(*keys)
# Set default EOL format
if wx.Platform != '__WXMSW__':
self.SetEOLMode(wx.stc.STC_EOL_LF)
# Setup Auto-comp images
# TODO: should be called on theme change messages
self.RegisterImages()
# Event Handlers
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy, self)
self.Bind(wx.stc.EVT_STC_CHANGE, self.OnChanged)
self.Bind(wx.stc.EVT_STC_MODIFIED, self.OnModified)
self.Bind(wx.stc.EVT_STC_AUTOCOMP_SELECTION, self.OnAutoCompSel)
def OnDestroy(self, evt):
if evt.GetId() == self.GetId():
# Cleanup the file object callbacks
self.file.RemoveModifiedCallback(self.FireModified)
self.file.CleanUp()
evt.Skip()
#---- Public Methods ----#
# General marker api
def AddMarker(self, marker, line=-1):
"""Add a bookmark and return its handle
@param marker: ed_marker.Marker instance
@keyword line: if < 0 bookmark will be added to current line
"""
assert isinstance(marker, ed_marker.Marker)
if line < 0:
line = self.GetCurrentLine()
marker.Set(self, line)
return marker.Handle
def RemoveMarker(self, marker, line):
"""Remove the book mark from the given line
@param marker: ed_marker.Marker instance
@param line: int
"""
assert isinstance(marker, ed_marker.Marker)
marker.Set(self, line, delete=True)
def RemoveAllMarkers(self, marker):
"""Remove all the bookmarks in the buffer
@param marker: ed_marker.Marker instance
"""
assert isinstance(marker, ed_marker.Marker)
marker.DeleteAll(self)
#-- Breakpoint marker api --#
def DeleteAllBreakpoints(self):
"""Delete all the breakpoints in the buffer"""
ed_marker.Breakpoint().DeleteAll(self)
ed_marker.BreakpointDisabled().DeleteAll(self)
ed_marker.BreakpointStep().DeleteAll(self)
def DeleteBreakpoint(self, line):
"""Delete the breakpoint from the given line"""
ed_marker.Breakpoint().Set(self, line, delete=True)
ed_marker.BreakpointDisabled().Set(self, line, delete=True)
def _SetBreakpoint(self, mobj, line=-1):
"""Set the breakpoint state
@param mtype: Marker object
@return: int (-1 if already set)
"""
handle = -1
if line < 0:
line = self.GetCurrentLine()
if not mobj.IsSet(self, line):
# Clear other set breakpoint marker states on same line
ed_marker.Breakpoint().Set(self, line, delete=True)
ed_marker.BreakpointDisabled().Set(self, line, delete=True)
mobj.Set(self, line, delete=False)
handle = mobj.Handle
return handle
def SetBreakpoint(self, line=-1, disabled=False):
"""Set a breakpoint marker on the given line
@keyword line: line number
@keyword disabled: bool
@return: breakpoint handle
"""
if not disabled:
handle = self._SetBreakpoint(ed_marker.Breakpoint(), line)
else:
handle = self._SetBreakpoint(ed_marker.BreakpointDisabled(), line)
return handle
def ShowStepMarker(self, line=-1, show=True):
"""Show the step (arrow) marker to the given line."""
if line < 0:
line = self.GetCurrentLine()
mark = ed_marker.BreakpointStep()
if show:
mark.Set(self, line, delete=False)
else:
mark.DeleteAll(self)
def AddLine(self, before=False, indent=False):
"""Add a new line to the document
@keyword before: whether to add the line before current pos or not
@keyword indent: autoindent the new line
@postcondition: a new line is added to the document
"""
if before:
self.LineUp()
self.LineEnd()
if indent:
self.AutoIndent()
else:
self.InsertText(self.GetCurrentPos(), self.GetEOLChar())
self.LineDown()
def AutoIndent(self):
"""Indent from the current position to match the indentation
of the previous line. Unless the current file type has registered
a custom AutoIndenter in which case it will implement its own
behavior.
"""
cpos = self.GetCurrentPos()
# Check if a special purpose indenter has been registered
if self._code['indenter'] is not None:
self.BeginUndoAction()
self._code['indenter'](self, cpos, self.GetIndentChar())
self.EndUndoAction()
else:
# Default Indenter
line = self.GetCurrentLine()
text = self.GetTextRange(self.PositionFromLine(line), cpos)
if text.strip() == u'':
self.AddText(self.GetEOLChar() + text)
self.EnsureCaretVisible()
return
indent = self.GetLineIndentation(line)
i_space = indent / self.GetTabWidth()
ndent = self.GetEOLChar() + self.GetIndentChar() * i_space
txt = ndent + ((indent - (self.GetTabWidth() * i_space)) * u' ')
self.AddText(txt)
self.EnsureCaretVisible()
def BackTab(self):
"""Unindent or remove excess whitespace to left of cursor"""
sel = self.GetSelection()
if sel[0] == sel[1]:
# There is no selection
cpos = self.GetCurrentPos()
cline = self.GetCurrentLine()
cipos = self.GetLineIndentPosition(cline)
if cpos <= cipos:
# In indentation so simply backtab
super(EditraBaseStc, self).BackTab()
else:
# In middle of line somewhere
text = self.GetLine(cline)
column = max(0, self.GetColumn(cpos) - 1)
if len(text) > column and text[column].isspace():
# Find the end of the whitespace
end = column
while end < len(text) and \
text[end].isspace() and \
text[end] not in '\r\n':
end += 1
# Find the start of the whitespace
end -= 1
start = end
while end > 0 and text[start].isspace():
start -= 1
diff = end - start
if diff > 1:
# There is space to compress
isize = self.GetIndent()
if isize < diff:
# More space than indent to remove
repeat = isize
else:
# Less than one indent width to remove
repeat = end - (start + 1)
# Update the control
self.BeginUndoAction()
self.SetCurrentPos(cpos + (end - column))
for x in range(repeat):
self.DeleteBack()
self.EndUndoAction()
else:
# There is a selection
super(EditraBaseStc, self).BackTab()
def SetBlockCaret(self):
"""Change caret style to block"""
if hasattr(self, 'SetCaretStyle'): # wxPython 2.9 or greater
self.SetCaretStyle(wx.stc.STC_CARETSTYLE_BLOCK)
else:
# Alternatively, just make the caret a bit thicker!
# best we can do on 2.8
self.SetCaretWidth(3)
def SetLineCaret(self):
"""Change caret style to line"""
if hasattr(self, 'SetCaretStyle'):
self.SetCaretStyle(wx.stc.STC_CARETSTYLE_LINE)
else:
pwidth = Profile_Get('CARETWIDTH', default=1)
self.SetCaretWidth(pwidth)
def BraceBadLight(self, pos):
"""Highlight the character at the given position
@param pos: position of character to highlight with STC_STYLE_BRACEBAD
"""
# Check if we are still alive or not, as this may be called
# after we have been deleted.
if self:
super(EditraBaseStc, self).BraceBadLight(pos)
def BraceHighlight(self, pos1, pos2):
"""Highlight characters at pos1 and pos2
@param pos1: position of char 1
@param pos2: position of char 2
"""
# Check if we are still alive or not, as this may be called
# after we have been deleted.
if self:
super(EditraBaseStc, self).BraceHighlight(pos1, pos2)
def CanCopy(self):
"""Check if copy/cut is possible"""
return self.HasSelection()
CanCut = CanCopy
def Comment(self, start, end, uncomment=False):
"""(Un)Comments a line or a selected block of text
in a document.
@param start: beginning line (int)
@param end: end line (int)
@keyword uncomment: uncomment selection
"""
if len(self._code['comment']):
sel = self.GetSelection()
c_start = self._code['comment'][0]
c_end = u''
if len(self._code['comment']) > 1:
c_end = self._code['comment'][1]
# Modify the selected line(s)
self.BeginUndoAction()
try:
nchars = 0
lines = range(start, end+1)
lines.reverse()
for line_num in lines:
lstart = self.PositionFromLine(line_num)
lend = self.GetLineEndPosition(line_num)
text = self.GetTextRange(lstart, lend)
tmp = text.strip()
if len(tmp):
if uncomment:
if tmp.startswith(c_start):
text = text.replace(c_start, u'', 1)
if c_end and tmp.endswith(c_end):
text = text.replace(c_end, u'', 1)
nchars = nchars - len(c_start + c_end)
else:
text = c_start + text + c_end
nchars = nchars + len(c_start + c_end)
self.SetTargetStart(lstart)
self.SetTargetEnd(lend)
self.ReplaceTarget(text)
finally:
self.EndUndoAction()
if sel[0] != sel[1]:
self.SetSelection(sel[0], sel[1] + nchars)
else:
if len(self._code['comment']) > 1:
nchars = nchars - len(self._code['comment'][1])
self.GotoPos(sel[0] + nchars)
def ConfigureAutoComp(self):
"""Sets up the Autocompleter, the autocompleter
configuration depends on the currently set lexer
@postcondition: autocomp is configured
"""
self.AutoCompSetAutoHide(False)
self.InitCompleter()
self.AutoCompSetChooseSingle(self._code['compsvc'].GetChooseSingle())
self.AutoCompSetIgnoreCase(not self._code['compsvc'].GetCaseSensitive())
self.AutoCompStops(self._code['compsvc'].GetAutoCompStops())
# TODO: come back to this it can cause some annoying behavior where
# it automatically completes strings that you don't want to be
# inserted in the buffer. (i.e typing self._value will bring up
# the autocomp list but if self._value is not in the list and you
# hit space it will automatically insert something from the list.)
# self.AutoCompSetFillUps(self._code['compsvc'].GetAutoCompFillups())
def ConfigureLexer(self, file_ext):
"""Sets Lexer and Lexer Keywords for the specified file extension
@param file_ext: a file extension to configure the lexer from
"""
syn_data = self._code['synmgr'].GetSyntaxData(file_ext)
# Set the ID of the selected lexer
self._code['lang_id'] = syn_data.LangId
lexer = syn_data.Lexer
# Check for special cases
# TODO: add fetch method to check if container lexer requires extra
# style bytes beyond the default 5.
if lexer in [ wx.stc.STC_LEX_HTML, wx.stc.STC_LEX_XML]:
self.SetStyleBits(7)
elif lexer == wx.stc.STC_LEX_NULL:
self.SetStyleBits(5)
self.SetLexer(lexer)
self.ClearDocumentStyle()
self.UpdateBaseStyles()
return True
else:
self.SetStyleBits(5)
# Set Lexer
self.SetLexer(lexer)
# Set Keywords
self.SetKeyWords(syn_data.Keywords)
# Set Lexer/Syntax Specifications
self.SetSyntax(syn_data.SyntaxSpec)
# Set Extra Properties
self.SetProperties(syn_data.Properties)
# Set Comment Pattern
self._code['comment'] = syn_data.CommentPattern
# Get Extension Features
clexer = syn_data.GetFeature(synglob.FEATURE_STYLETEXT)
indenter = syn_data.GetFeature(synglob.FEATURE_AUTOINDENT)
# Set the Container Lexer Method
self._code['clexer'] = clexer
# Auto-indenter function
self._code['indenter'] = indenter
def DefineMarkers(self):
"""Defines the folder and bookmark icons for this control
@postcondition: all margin markers are defined
"""
# Get the colours for the various markers
style = self.GetItemByName('foldmargin_style')
back = style.GetFore()
rgb = eclib.HexToRGB(back[1:])
back = wx.Colour(red=rgb[0], green=rgb[1], blue=rgb[2])
fore = style.GetBack()
rgb = eclib.HexToRGB(fore[1:])
fore = wx.Colour(red=rgb[0], green=rgb[1], blue=rgb[2])
# Buffer background highlight
caret_line = self.GetItemByName('caret_line').GetBack()
rgb = eclib.HexToRGB(caret_line[1:])
clback = wx.Colour(*rgb)
# Code Folding markers
folder = ed_marker.FoldMarker()
folder.Foreground = fore
folder.Background = back
folder.RegisterWithStc(self)
# Bookmarks
ed_marker.Bookmark().RegisterWithStc(self)
# Breakpoints
ed_marker.Breakpoint().RegisterWithStc(self)
ed_marker.BreakpointDisabled().RegisterWithStc(self)
step = ed_marker.BreakpointStep()
step.Background = clback
step.RegisterWithStc(self)
ed_marker.StackMarker().RegisterWithStc(self)
# Other markers
errmk = ed_marker.ErrorMarker()
errsty = self.GetItemByName('error_style')
rgb = eclib.HexToRGB(errsty.GetBack()[1:])
errmk.Background = wx.Colour(*rgb)
rgb = eclib.HexToRGB(errsty.GetFore()[1:])
errmk.Foreground = wx.Colour(*rgb)
errmk.RegisterWithStc(self)
# Lint Marker
ed_marker.LintMarker().RegisterWithStc(self)
ed_marker.LintMarkerWarning().RegisterWithStc(self)
ed_marker.LintMarkerError().RegisterWithStc(self)
def DoZoom(self, mode):
"""Zoom control in or out
@param mode: either zoom in or out
"""
id_type = mode
zoomlevel = self.GetZoom()
if id_type == ed_glob.ID_ZOOM_OUT:
if zoomlevel > -9:
self.ZoomOut()
elif id_type == ed_glob.ID_ZOOM_IN:
if zoomlevel < 19:
self.ZoomIn()
else:
self.SetZoom(0)
return self.GetZoom()
def EnableLineNumbers(self, enable=True):
"""Enable/Disable line number margin
@keyword enable: bool
"""
if enable:
self.SetMarginWidth(NUM_MARGIN, 30)
else:
self.SetMarginWidth(NUM_MARGIN, 0)
self._line_num = enable
def FindChar(self, char, repeat=1, reverse=False, extra_offset=0):
"""Find the position of the next (ith) 'char' character
on the current line and move caret to it
@note: used by vim motions for finding a character on a line (f,F,t,T)
@param char: the character to be found
@keyword repeat: how many times to repeat the search
@keyword reverse: whether to search backwards
@keyword extra_offset: extra offset to be applied to the movement
"""
text, pos = self.GetCurLine()
oldpos = pos
if not reverse:
# search forward
for i in range(repeat):
pos = text.find(char, pos+1)
if pos == -1:
return
else:
# search backward
for i in range(repeat):
pos = text.rfind(char, 0, pos)
if pos == -1:
return
newpos = pos + extra_offset
if newpos in range(len(text)):
self.MoveCaretPos(newpos - oldpos)
@property
def File(self):
"""Reference to this buffers file object"""
return self.file
def FindLexer(self, set_ext=u''):
"""Sets Text Controls Lexer Based on File Extension
@param set_ext: explicit extension to use in search
@postcondition: lexer is configured for file
"""
if set_ext != u'':
ext = set_ext.lower()
else:
ext = self.file.GetExtension().lower()
if ext == u'':
fname = self.GetFileName()
ext = ebmlib.GetFileName(fname).lower()
self.ClearDocumentStyle()
# Configure Lexer from File Extension
self.ConfigureLexer(ext)
# If syntax auto detection fails from file extension try to
# see if there is an interpreter line that can be parsed.
if self.GetLexer() == wx.stc.STC_LEX_NULL:
interp = self.GetLine(0)
if interp != wx.EmptyString:
interp = interp.split(u"/")[-1]
interp = interp.strip().split()
if len(interp) and interp[-1][0] != u"-":
interp = interp[-1]
elif len(interp):
interp = interp[0]
else:
interp = u''
# TODO: should check user config to ensure the explict
# extension is still associated with the expected
# file type.
ex_map = { "python" : "py", "wish" : "tcl", "ruby" : "rb",
"bash" : "sh", "csh" : "csh", "perl" : "pl",
"ksh" : "ksh", "php" : "php", "booi" : "boo",
"pike" : "pike"}
self.ConfigureLexer(ex_map.get(interp, interp))
self.Colourise(0, -1)
def FireModified(self):
"""Fire a modified event"""
self.OnChanged(wx.stc.StyledTextEvent(wx.stc.wxEVT_STC_CHANGE,
self.GetId()))
def GetCommandStr(self, line=None, col=None):
"""Gets the command string to the left of the autocomp
activation character.
@keyword line: optional if None current cursor position used
@keyword col: optional if None current cursor position used
@return: the command string to the left of the autocomp char
"""
if None in (line, col):
# NOTE: the column position returned by GetCurLine is not correct
# for multibyte characters.
line, col = self.GetCurLine()
col = self.GetColumn(self.GetCurrentPos())
cmd = self._code['compsvc'].GetCommandString(self, line, col)
return cmd
def GetCommentChars(self):
"""Return the list of characters used to comment a string in the
current language.
@return: list of strings
"""
return self._code['comment']
def GetCompleter(self):
"""Get this buffers completer object
@return: Completer
"""
return self._code['compsvc']
def GetDocument(self):
"""Return a reference to the document object represented in this buffer.
@return: EdFile
@see: L{ed_txt.EdFile}
"""
return self.file
def GetEOLChar(self):
"""Gets the eol character used in document
@return: the character used for eol in this document
"""
m_id = self.GetEOLMode()
if m_id == wx.stc.STC_EOL_CR:
return u'\r'
elif m_id == wx.stc.STC_EOL_CRLF:
return u'\r\n'
else:
return u'\n'
def GetFileName(self):
"""Returns the full path name of the current file
@return: full path name of document
"""
return self.file.GetPath()
def GetIndentChar(self):
"""Gets the indentation char used in document
@return: indentation char used either space or tab
"""
if self.GetUseTabs():
return u'\t'
else:
return u' ' * self.GetIndent()
def GetKeywords(self):
"""Get the keyword set for the current document.
@return: list of strings
"""
return self._code['keywords']
def GetLangId(self):
"""Returns the language identifier of this control
@return: language identifier of document
"""
return self._code['lang_id']
def GetModTime(self):
"""Get the value of the buffers file last modtime"""
return self.file.ModTime
def GetPos(self):
"""Update Line/Column information
@return: tuple (line, column)
"""
return (self.GetCurrentLine() + 1, self.GetColumn(self.GetCurrentPos()))
GetRange = wx.stc.StyledTextCtrl.GetTextRange
def GetWordFromPosition(self, pos):
"""Get the word at the given position
@param pos: int
@return: (string, int_start, int_end)
"""
end = self.WordEndPosition(pos, True)
start = self.WordStartPosition(pos, True)
word = self.GetTextRange(start, end)
return (word, start, end)
def IsColumnMode(self):
"""Is the buffer in column edit mode
@return: bool
"""
return self.VertEdit.Enabled
def IsComment(self, pos):
"""Is the given position in a comment region of the current buffer
@param pos: int position in buffer
@return: bool
"""
pos = max(0, pos-1)
return 'comment' in self.FindTagById(self.GetStyleAt(pos))
def IsString(self, pos):
"""Is the given position in a string region of the current buffer
@param pos: int position in buffer
@return: bool
"""
style = self.GetStyleAt(pos)
return self.FindTagById(style) in ('string_style', 'char_style')
def IsNonCode(self, pos):
"""Is the passed in position in a non code region
@param pos: buffer position
@return: bool
"""
return self.IsComment(pos) or self.IsString(pos)
def HasMarker(self, line, marker):
"""Check if the given line has the given marker set
@param line: line number
@param marker: marker id
"""
mask = self.MarkerGet(line)
return bool(1<<marker & mask)
def HasSelection(self):
"""Check if there is a selection in the buffer
@return: bool
"""
sel = super(EditraBaseStc, self).GetSelection()
return sel[0] != sel[1]
def HasMultilineSelection(self):
"""Is the selection over multiple lines?
@return: bool
"""
bMulti = False
sel = super(EditraBaseStc, self).GetSelection()
if sel[0] != sel[1]:
sline = self.LineFromPosition(sel[0])
eline = self.LineFromPosition(sel[1])
bMulti = sline != eline
return bMulti
def CallTipCancel(self):
"""Cancel any active calltip(s)"""
if self.CallTipActive():
super(EditraBaseStc, self).CallTipCancel()
def CallTipShow(self, position, tip):
"""Show a calltip at the given position in the control
@param position: int
@param tip: unicode
"""
self.CallTipCancel()
super(EditraBaseStc, self).CallTipShow(position, tip)
def HidePopups(self):
"""Hide autocomp/calltip popup windows if any are active"""
if self.AutoCompActive():
self.AutoCompCancel()
self.CallTipCancel()
def InitCompleter(self):
"""(Re)Initialize a completer object for this buffer
@todo: handle extended autocomp for plugins?
"""
# Check for plugins that may extend or override functionality for this
# file type.
autocomp_ext = AutoCompExtension(wx.GetApp().GetPluginManager())
completer = autocomp_ext.GetCompleter(self)
if completer is not None:
self._code['compsvc'] = completer
else:
extend = Profile_Get('AUTO_COMP_EX') # Using extended autocomp?
self._code['compsvc'] = autocomp.AutoCompService.GetCompleter(self, extend)
def LoadFile(self, path):
"""Load the file at the given path into the buffer. Returns
True if no errors and False otherwise. To retrieve the errors
check the last error that was set in the file object returned by
L{GetDocument}.
@param path: path to file
"""
# Post notification that a file load is starting
ed_msg.PostMessage(ed_msg.EDMSG_FILE_OPENING, path)
self.file.SetPath(path)
txt = self.file.Read()
if txt is not None:
if self.file.IsRawBytes() and not ebmlib.IsUnicode(txt):
self.AddStyledText(txt)
self.SetReadOnly(True) # Don't allow editing of raw bytes
else:
self.SetText(txt)
else:
self.file.SetPath('')
return False
if self.file.GetLastError() != 'None':
# Return false if there was an encoding error and a fallback
# was used. So the caller knows to check the error status
return False
else:
return True
def MoveCaretPos(self, offset):
"""Move caret by the given offset
@param offset: int (+ move right, - move left)
"""
pos = max(self.GetCurrentPos() + offset, 0)
pos = min(pos, self.GetLength())
self.GotoPos(pos)
self.ChooseCaretX()
def OnAutoCompSel(self, evt):
"""Handle when an item is inserted from the autocomp list"""
text = evt.GetText()
cpos = evt.GetPosition()
self._code['compsvc'].OnCompletionInserted(cpos, text)
def OnChanged(self, evt):
"""Handles updates that need to take place after
the control has been modified.
@param evt: wx.stc.StyledTextEvent
"""
if self._line_num:
# Adjust line number margin width to expand as needed when line
# number width over fills the area.
lines = self.GetLineCount()
mwidth = self.GetTextExtent(str(lines))[0]
adj = 8
if wx.Platform == '__WXMAC__':
adj = 2
nwidth = max(15, mwidth + adj)
if self.GetMarginWidth(NUM_MARGIN) != nwidth:
self.SetMarginWidth(NUM_MARGIN, nwidth)
wx.PostEvent(self.GetParent(), evt)
ed_msg.PostMessage(ed_msg.EDMSG_UI_STC_CHANGED, context=self)
def OnModified(self, evt):
"""Handle modify events, includes style changes!"""
if self.VertEdit.Enabled:
self.VertEdit.OnModified(evt)
else:
evt.Skip()
def OnStyleNeeded(self, evt):
"""Perform custom styling when registered for a container lexer"""
if self._code['clexer'] is not None:
self._code['clexer'](self, self.GetEndStyled(), evt.GetPosition())
else:
evt.Skip()
def PutText(self, text):
"""Put text in the buffer. Like AddText but does the right thing
depending upon the input mode and buffer state.
@param text: string
"""
if not self.HasSelection():
cpos = self.GetCurrentPos()
lepos = self.GetLineEndPosition(self.GetCurrentLine())
if self.GetOvertype() and cpos != lepos:
self.CharRight()
self.DeleteBack()
self.AddText(text)
else:
self.ReplaceSelection(text)
def RegisterImages(self):
"""Register the images for the autocomp popup list"""
images = [(autocomp.TYPE_FUNCTION, ed_glob.ID_FUNCT_TYPE),
(autocomp.TYPE_METHOD, ed_glob.ID_METHOD_TYPE),
(autocomp.TYPE_PROPERTY, ed_glob.ID_PROPERTY_TYPE),
(autocomp.TYPE_ATTRIBUTE, ed_glob.ID_ATTR_TYPE),
(autocomp.TYPE_CLASS, ed_glob.ID_CLASS_TYPE),
(autocomp.TYPE_VARIABLE, ed_glob.ID_VARIABLE_TYPE),
(autocomp.TYPE_ELEMENT, ed_glob.ID_ELEM_TYPE)]
for idx, img in images:
bmp = wx.ArtProvider.GetBitmap(str(img), wx.ART_MENU)
if bmp.IsOk():
self.RegisterImage(idx, bmp)
def SearchText(self, text, regex=False, back=False):
"""Search for text forward or backward
@param text: string
@keyword regex: bool
@keyword back: bool
"""
flags = wx.stc.STC_FIND_MATCHCASE
if regex:
flags = flags | wx.stc.STC_FIND_REGEXP
self.SearchAnchor()
if not back:
# Search forward
res = self.SearchNext(flags, text)
if res == -1:
# Nothing found, search from top
self.DocumentStart()
self.SearchAnchor()
res = self.SearchNext(flags, text)
else:
# Search backward
res = self.SearchPrev(flags, text)
if res == -1:
# Nothing found, search from bottom
self.DocumentEnd()
self.SearchAnchor()
res = self.SearchPrev(flags, text)
return res # returns -1 if nothing found even after wrapping around
def SetDocument(self, doc):
"""Change the document object used.
@param doc: an L{ed_txt.EdFile} instance
"""
del self.file
self.file = doc
def SetEncoding(self, enc):
"""Sets the encoding of the document
@param enc: encoding to set for document
"""
self.file.SetEncoding(enc)
def GetEncoding(self):
"""Get the document objects encoding
@return: string
"""
return self.file.GetEncoding()
def SetFileName(self, path):
"""Set the buffers filename attributes from the given path"""
self.file.SetPath(path)
def SetKeyWords(self, kw_lst):
"""Sets the keywords from a list of keyword sets
@param kw_lst: [ (KWLVL, "KEWORDS"), (KWLVL2, "KEYWORDS2"), ect...]
"""
# Parse Keyword Settings List simply ignoring bad values and badly
# formed lists
self._code['keywords'] = list()
kwlist = ""
for keyw in kw_lst:
if len(keyw) != 2:
continue
else:
if not isinstance(keyw[0], int) or \
not isinstance(keyw[1], basestring):
continue
else:
kwlist += keyw[1]
super(EditraBaseStc, self).SetKeyWords(keyw[0], keyw[1])
# Can't have ? in scintilla autocomp list unless specifying an image
# TODO: this should be handled by the autocomp service
if '?' in kwlist:
kwlist.replace('?', '')
kwlist = kwlist.split() # Split into a list of words
kwlist = list(set(kwlist)) # Remove duplicates from the list
kwlist.sort() # Sort into alphabetical order
self._code['keywords'] = kwlist
def SetLexer(self, lexer):
"""Set the buffers lexer
@param lexer: lexer to use
@note: Overrides StyledTextCtrl.SetLexer
"""
if lexer == wx.stc.STC_LEX_CONTAINER:
# If setting a container lexer only bind the event if it hasn't
# been done yet.
if self._code['clexer'] is None:
self.Bind(wx.stc.EVT_STC_STYLENEEDED, self.OnStyleNeeded)
else:
# If changing from a container lexer to a non container
# lexer we need to unbind the event.
if self._code['clexer'] is not None:
self.Unbind(wx.stc.EVT_STC_STYLENEEDED)
self._code['clexer'] = None
super(EditraBaseStc, self).SetLexer(lexer)
def SetModTime(self, modtime):
"""Set the value of the files last modtime"""
self.file.SetModTime(modtime)
def SetProperties(self, prop_lst):
"""Sets the Lexer Properties from a list of specifications
@param prop_lst: [ ("PROPERTY", "VAL"), ("PROPERTY2", "VAL2) ]
"""
# Parses Property list, ignoring all bad values
for prop in prop_lst:
if len(prop) != 2:
continue
else:
if not isinstance(prop[0], basestring) or not \
isinstance(prop[1], basestring):
continue
else:
self.SetProperty(prop[0], prop[1])
return True
def BaseSetSelection(self, start, end):
"""Call base STC SetSelection method, for use with internal utf-8
indexes in use by derived classes, STC hell...
"""
super(EditraBaseStc, self).SetSelection(start, end)
def SetSelection(self, start, end):
"""Override base method to make it work correctly using
Unicode character positions instead of UTF-8.
"""
# STC HELL - some methods require UTF-8 offsets while others work
# with Unicode...
# Calculate UTF-8 offsets in buffer
unicode_txt = self.GetText()
if start != 0:
start = len(ed_txt.EncodeString(unicode_txt[0:start], 'utf-8'))
if end != 0:
end = len(ed_txt.EncodeString(unicode_txt[0:end], 'utf-8'))
del unicode_txt
super(EditraBaseStc, self).SetSelection(start, end)
def GetSelection(self):
"""Get the selection positions in Unicode instead of UTF-8"""
# STC HELL
# Translate the UTF8 byte offsets to unicode
start, end = super(EditraBaseStc, self).GetSelection()
utf8_txt = self.GetTextUTF8()
if start != 0:
start = len(ed_txt.DecodeString(utf8_txt[0:start], 'utf-8'))
if end != 0:
end = len(ed_txt.DecodeString(utf8_txt[0:end], 'utf-8'))
del utf8_txt
return start, end
def ShowAutoCompOpt(self, command):
"""Shows the autocompletion options list for the command
@param command: command to look for autocomp options for
"""
pos = self.GetCurrentPos()
# symList is a list(completer.Symbol)
symList = self._code['compsvc'].GetAutoCompList(command)
# Build a list that can be feed to Scintilla
lst = map(unicode, symList)
if lst is not None and len(lst):
self.BeginUndoAction()
lst = u' '.join(lst)
if lst.isspace():
return
self.AutoCompShow(pos - self.WordStartPosition(pos, True), lst)
# Check if something was inserted due to there only being a
# single choice returned from the completer and allow the completer
# to adjust caret position as necessary.
curpos = self.GetCurrentPos()
if curpos != pos:
text = self.GetTextRange(pos, curpos)
self._code['compsvc'].OnCompletionInserted(pos, text)
self.EndUndoAction()
self.SetFocus()
def GetViewWhiteSpace(self):
"""Get if view whitespace is turned on
@return: bool
"""
val = super(EditraBaseStc, self).GetViewWhiteSpace()
return val != wx.stc.STC_WS_INVISIBLE
def SetViewWhiteSpace(self, viewws):
"""Overrides base method to make it a simple bool toggle"""
if viewws:
val = wx.stc.STC_WS_VISIBLEALWAYS
else:
val = wx.stc.STC_WS_INVISIBLE
super(EditraBaseStc, self).SetViewWhiteSpace(val)
def GetWrapMode(self):
"""Get if word wrap is turned on
@return: bool
"""
val = super(EditraBaseStc, self).GetWrapMode()
return val != wx.stc.STC_WRAP_NONE
def SetWrapMode(self, wrap):
"""Overrides base method to make it a simple toggle operation
@param wrap: bool
"""
if wrap:
val = wx.stc.STC_WRAP_WORD
else:
val = wx.stc.STC_WRAP_NONE
super(EditraBaseStc, self).SetWrapMode(val)
def ShowCallTip(self, command):
"""Shows call tip for given command
@param command: command to look for calltips for
"""
self.CallTipCancel()
tip = self._code['compsvc'].GetCallTip(command)
if len(tip):
curr_pos = self.GetCurrentPos()
tip_pos = curr_pos - (len(command.split('.')[-1]) + 1)
fail_safe = curr_pos - self.GetColumn(curr_pos)
self.CallTipShow(max(tip_pos, fail_safe), tip)
def ToggleColumnMode(self):
"""Toggle the column edit mode"""
self.VertEdit.enable(not self.VertEdit.Enabled)
def ToggleComment(self):
"""Toggle the comment of the selected region"""
if len(self._code['comment']):
sel = self.GetSelection()
start = self.LineFromPosition(sel[0])
end = self.LineFromPosition(sel[1])
c_start = self._code['comment'][0]
if end > start and self.GetColumn(sel[1]) == 0:
end = end - 1
# Analyze the selected line(s)
comment = 0
for line in range(start, end+1):
txt = self.GetLine(line)
if txt.lstrip().startswith(c_start):
comment += 1
lcount = end - start
mod = 1
if lcount == 0:
mod = 0
if comment > (lcount / 2) + mod:
# Uncomment
self.Comment(start, end, True)
else:
self.Comment(start, end, False)
def ToggleLineNumbers(self, switch=None):
"""Toggles the visibility of the line number margin
@keyword switch: force a particular setting
"""
if (switch is None and \
not self.GetMarginWidth(NUM_MARGIN)) or switch:
self.EnableLineNumbers(True)
else:
self.EnableLineNumbers(False)
@property
def VertEdit(self):
"""Vertical edit mode accessor."""
return self.vert_edit
#---- Style Function Definitions ----#
def RefreshStyles(self):
"""Refreshes the colorization of the window by reloading any
style tags that may have been modified.
@postcondition: all style settings are refreshed in the control
"""
with eclib.Freezer(self) as _tmp:
self.StyleClearAll()
self.SetSyntax(self.GetSyntaxParams())
self.DefineMarkers()
self.Refresh()
def UpdateBaseStyles(self):
"""Update the controls basic styles"""
super(EditraBaseStc, self).UpdateBaseStyles()
# Set control specific styles
sback = self.GetItemByName('select_style')
if not sback.IsNull():
sback = sback.GetBack()
else:
sback = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)
self.VertEdit.SetBlockColor(sback)
self.DefineMarkers()
#-----------------------------------------------------------------------------#
class AutoCompExtension(plugin.Plugin):
"""Plugin that Extends the autocomp feature"""
observers = plugin.ExtensionPoint(iface.AutoCompI)
def GetCompleter(self, buff):
"""Get the completer for the specified file type id
@param buff: EditraStc instance
"""
ftypeid = buff.GetLangId()
for observer in self.observers:
try:
if observer.GetFileTypeId() == ftypeid:
return observer.GetCompleter(buff)
except Exception, msg:
util.Log("[ed_basestc][err] GetCompleter Extension: %s" % str(msg))
else:
return None
#-----------------------------------------------------------------------------#
def _GetMacKeyBindings():
"""Returns a list of 3-element tuples defining the standard key
bindings for Mac text editors -- i.e., the behavior of option-arrow,
shift-delete, and so on.
@return: list of (key code, modifier keys, STC action)
"""
# A good reference for these: http://www.yellowbrain.com/stc/keymap.html
return [
# Move/select/delete by word
(wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_ALT,
wx.stc.STC_CMD_WORDLEFT),
(wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_ALT,
wx.stc.STC_CMD_WORDRIGHT),
(wx.stc.STC_KEY_LEFT, ALT_SHIFT, wx.stc.STC_CMD_WORDLEFTEXTEND),
(wx.stc.STC_KEY_RIGHT, ALT_SHIFT, wx.stc.STC_CMD_WORDRIGHTEXTEND),
(wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_ALT,
wx.stc.STC_CMD_DELWORDLEFT),
(wx.stc.STC_KEY_DELETE, wx.stc.STC_SCMOD_ALT,
wx.stc.STC_CMD_DELWORDRIGHT),
(wx.stc.STC_KEY_BACK, ALT_SHIFT, wx.stc.STC_CMD_DELWORDRIGHT),
(wx.stc.STC_KEY_DELETE, ALT_SHIFT, wx.stc.STC_CMD_DELWORDLEFT),
# Move/select/delete by line
(wx.stc.STC_KEY_LEFT, wx.stc.STC_SCMOD_CTRL,
wx.stc.STC_CMD_VCHOME),
(wx.stc.STC_KEY_LEFT, CTRL_SHIFT, wx.stc.STC_CMD_VCHOMEEXTEND),
(wx.stc.STC_KEY_RIGHT, wx.stc.STC_SCMOD_CTRL,
wx.stc.STC_CMD_LINEEND),
(wx.stc.STC_KEY_RIGHT, CTRL_SHIFT, wx.stc.STC_CMD_LINEENDEXTEND),
(wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_CTRL,
wx.stc.STC_CMD_DELLINELEFT),
(wx.stc.STC_KEY_DELETE, wx.stc.STC_SCMOD_CTRL,
wx.stc.STC_CMD_DELLINERIGHT),
(wx.stc.STC_KEY_BACK, CTRL_SHIFT, wx.stc.STC_CMD_DELLINERIGHT),
(wx.stc.STC_KEY_DELETE, CTRL_SHIFT, wx.stc.STC_CMD_DELLINELEFT),
# By-character deletion behavior
(wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_NORM,
wx.stc.STC_CMD_DELETEBACK),
(wx.stc.STC_KEY_DELETE, wx.stc.STC_SCMOD_SHIFT,
wx.stc.STC_CMD_DELETEBACK),
# NOTE: The following two are a special case, since Scintilla
# doesn't have a forward-delete action. So here we just cancel any
# tip our auto-completion display, and then implement forward
# delete in OnKeyDown.
#(wx.stc.STC_KEY_DELETE, 0, wx.stc.STC_CMD_CANCEL),
# Disabled as it breaks some keyboard functionality
# NOTE: forward delete on mac is Fn+Delete and works fine
# (wx.stc.STC_KEY_BACK, wx.stc.STC_SCMOD_SHIFT,
# wx.stc.STC_CMD_CANCEL),
]
|
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## Copyright (C) 6WIND <[email protected]>
## This program is published under a GPLv2 license
"""
SCTP (Stream Control Transmission Protocol).
"""
import struct
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import IP
from scapy.layers.inet6 import IP6Field
from scapy.layers.inet6 import IPv6
IPPROTO_SCTP=132
# crc32-c (Castagnoli) (crc32c_poly=0x1EDC6F41)
crc32c_table = [
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
]
def crc32c(buf):
crc = 0xffffffff
for c in buf:
crc = (crc>>8) ^ crc32c_table[(crc^(ord(c))) & 0xFF]
crc = (~crc) & 0xffffffff
# reverse endianness
return struct.unpack(">I",struct.pack("<I", crc))[0]
# old checksum (RFC2960)
"""
BASE = 65521 # largest prime smaller than 65536
def update_adler32(adler, buf):
s1 = adler & 0xffff
s2 = (adler >> 16) & 0xffff
print s1,s2
for c in buf:
print ord(c)
s1 = (s1 + ord(c)) % BASE
s2 = (s2 + s1) % BASE
print s1,s2
return (s2 << 16) + s1
def sctp_checksum(buf):
return update_adler32(1, buf)
"""
sctpchunktypescls = {
0 : "SCTPChunkData",
1 : "SCTPChunkInit",
2 : "SCTPChunkInitAck",
3 : "SCTPChunkSACK",
4 : "SCTPChunkHeartbeatReq",
5 : "SCTPChunkHeartbeatAck",
6 : "SCTPChunkAbort",
7 : "SCTPChunkShutdown",
8 : "SCTPChunkShutdownAck",
9 : "SCTPChunkError",
10 : "SCTPChunkCookieEcho",
11 : "SCTPChunkCookieAck",
14 : "SCTPChunkShutdownComplete",
}
sctpchunktypes = {
0 : "data",
1 : "init",
2 : "init-ack",
3 : "sack",
4 : "heartbeat-req",
5 : "heartbeat-ack",
6 : "abort",
7 : "shutdown",
8 : "shutdown-ack",
9 : "error",
10 : "cookie-echo",
11 : "cookie-ack",
14 : "shutdown-complete",
}
sctpchunkparamtypescls = {
1 : "SCTPChunkParamHearbeatInfo",
5 : "SCTPChunkParamIPv4Addr",
6 : "SCTPChunkParamIPv6Addr",
7 : "SCTPChunkParamStateCookie",
8 : "SCTPChunkParamUnrocognizedParam",
9 : "SCTPChunkParamCookiePreservative",
11 : "SCTPChunkParamHostname",
12 : "SCTPChunkParamSupportedAddrTypes",
32768 : "SCTPChunkParamECNCapable",
49152 : "SCTPChunkParamFwdTSN",
49158 : "SCTPChunkParamAdaptationLayer",
}
sctpchunkparamtypes = {
1 : "heartbeat-info",
5 : "IPv4",
6 : "IPv6",
7 : "state-cookie",
8 : "unrecognized-param",
9 : "cookie-preservative",
11 : "hostname",
12 : "addrtypes",
32768 : "ecn-capable",
49152 : "fwd-tsn-supported",
49158 : "adaptation-layer",
}
############## SCTP header
# Dummy class to guess payload type (variable parameters)
class _SCTPChunkGuessPayload:
def default_payload_class(self,p):
if len(p) < 4:
return conf.padding_layer
else:
t = ord(p[0])
return globals().get(sctpchunktypescls.get(t, "Raw"), conf.raw_layer)
class SCTP(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ShortField("sport", None),
ShortField("dport", None),
XIntField("tag", None),
XIntField("chksum", None), ]
def answers(self, other):
if not isinstance(other, SCTP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.dport) and
(self.dport == other.sport)):
return 0
return 1
def post_build(self, p, pay):
p += pay
if self.chksum is None:
crc = crc32c(str(p))
p = p[:8]+struct.pack(">I", crc)+p[12:]
return p
############## SCTP Chunk variable params
class ChunkParamField(PacketListField):
def __init__(self, name, default, count_from=None, length_from=None):
PacketListField.__init__(self, name, default, conf.raw_layer, count_from=count_from, length_from=length_from)
def m2i(self, p, m):
cls = conf.raw_layer
if len(m) >= 4:
t = ord(m[0]) * 256 + ord(m[1])
cls = globals().get(sctpchunkparamtypescls.get(t, "Raw"), conf.raw_layer)
return cls(m)
# dummy class to avoid Raw() after Chunk params
class _SCTPChunkParam:
def extract_padding(self, s):
return "",s[:]
class SCTPChunkParamHearbeatInfo(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 1, sctpchunkparamtypes),
FieldLenField("len", None, length_of="data",
adjust = lambda pkt,x:x+4),
PadField(StrLenField("data", "",
length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),]
class SCTPChunkParamIPv4Addr(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 5, sctpchunkparamtypes),
ShortField("len", 8),
IPField("addr","127.0.0.1"), ]
class SCTPChunkParamIPv6Addr(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 6, sctpchunkparamtypes),
ShortField("len", 20),
IP6Field("addr","::1"), ]
class SCTPChunkParamStateCookie(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 7, sctpchunkparamtypes),
FieldLenField("len", None, length_of="cookie",
adjust = lambda pkt,x:x+4),
PadField(StrLenField("cookie", "",
length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),]
class SCTPChunkParamUnrocognizedParam(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 8, sctpchunkparamtypes),
FieldLenField("len", None, length_of="param",
adjust = lambda pkt,x:x+4),
PadField(StrLenField("param", "",
length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),]
class SCTPChunkParamCookiePreservative(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 9, sctpchunkparamtypes),
ShortField("len", 8),
XIntField("sug_cookie_inc", None), ]
class SCTPChunkParamHostname(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 11, sctpchunkparamtypes),
FieldLenField("len", None, length_of="hostname",
adjust = lambda pkt,x:x+4),
PadField(StrLenField("hostname", "",
length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"), ]
class SCTPChunkParamSupportedAddrTypes(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 12, sctpchunkparamtypes),
FieldLenField("len", None, length_of="addr_type_list",
adjust = lambda pkt,x:x+4),
PadField(FieldListField("addr_type_list", [ "IPv4" ],
ShortEnumField("addr_type", 5, sctpchunkparamtypes),
length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"), ]
class SCTPChunkParamECNCapable(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 32768, sctpchunkparamtypes),
ShortField("len", 4), ]
class SCTPChunkParamFwdTSN(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 49152, sctpchunkparamtypes),
ShortField("len", 4), ]
class SCTPChunkParamAdaptationLayer(_SCTPChunkParam, Packet):
fields_desc = [ ShortEnumField("type", 49158, sctpchunkparamtypes),
ShortField("len", 8),
XIntField("indication", None), ]
############## SCTP Chunks
class SCTPChunkData(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 0, sctpchunktypes),
BitField("reserved", None, 4),
BitField("delay_sack", 0, 1),
BitField("unordered", 0, 1),
BitField("beginning", 0, 1),
BitField("ending", 0, 1),
FieldLenField("len", None, length_of="data", adjust = lambda pkt,x:x+16),
XIntField("tsn", None),
XShortField("stream_id", None),
XShortField("stream_seq", None),
XIntField("proto_id", None),
PadField(StrLenField("data", None, length_from=lambda pkt: pkt.len-16),
4, padwith="\x00"),
]
class SCTPChunkInit(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 1, sctpchunktypes),
XByteField("flags", None),
FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+20),
XIntField("init_tag", None),
IntField("a_rwnd", None),
ShortField("n_out_streams", None),
ShortField("n_in_streams", None),
XIntField("init_tsn", None),
ChunkParamField("params", None, length_from=lambda pkt:pkt.len-20),
]
class SCTPChunkInitAck(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 2, sctpchunktypes),
XByteField("flags", None),
FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+20),
XIntField("init_tag", None),
IntField("a_rwnd", None),
ShortField("n_out_streams", None),
ShortField("n_in_streams", None),
XIntField("init_tsn", None),
ChunkParamField("params", None, length_from=lambda pkt:pkt.len-20),
]
class GapAckField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "4s")
def i2m(self, pkt, x):
if x is None:
return "\0\0\0\0"
sta, end = map(int, x.split(":"))
args = tuple([">HH", sta, end])
return struct.pack(*args)
def m2i(self, pkt, x):
return "%d:%d"%(struct.unpack(">HH", x))
def any2i(self, pkt, x):
if type(x) is tuple and len(x) == 2:
return "%d:%d"%(x)
return x
class SCTPChunkSACK(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 3, sctpchunktypes),
XByteField("flags", None),
ShortField("len", None),
XIntField("cumul_tsn_ack", None),
IntField("a_rwnd", None),
FieldLenField("n_gap_ack", None, count_of="gap_ack_list"),
FieldLenField("n_dup_tsn", None, count_of="dup_tsn_list"),
FieldListField("gap_ack_list", [ ], GapAckField("gap_ack", None), count_from=lambda pkt:pkt.n_gap_ack),
FieldListField("dup_tsn_list", [ ], XIntField("dup_tsn", None), count_from=lambda pkt:pkt.n_dup_tsn),
]
def post_build(self, p, pay):
if self.len is None:
p = p[:2] + struct.pack(">H", len(p)) + p[4:]
return p+pay
class SCTPChunkHeartbeatReq(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 4, sctpchunktypes),
XByteField("flags", None),
FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+4),
ChunkParamField("params", None, length_from=lambda pkt:pkt.len-4),
]
class SCTPChunkHeartbeatAck(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 5, sctpchunktypes),
XByteField("flags", None),
FieldLenField("len", None, length_of="params", adjust = lambda pkt,x:x+4),
ChunkParamField("params", None, length_from=lambda pkt:pkt.len-4),
]
class SCTPChunkAbort(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 6, sctpchunktypes),
BitField("reserved", None, 7),
BitField("TCB", 0, 1),
FieldLenField("len", None, length_of="error_causes", adjust = lambda pkt,x:x+4),
PadField(StrLenField("error_causes", "", length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),
]
class SCTPChunkShutdown(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 7, sctpchunktypes),
XByteField("flags", None),
ShortField("len", 8),
XIntField("cumul_tsn_ack", None),
]
class SCTPChunkShutdownAck(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 8, sctpchunktypes),
XByteField("flags", None),
ShortField("len", 4),
]
class SCTPChunkError(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 9, sctpchunktypes),
XByteField("flags", None),
FieldLenField("len", None, length_of="error_causes", adjust = lambda pkt,x:x+4),
PadField(StrLenField("error_causes", "", length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),
]
class SCTPChunkCookieEcho(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 10, sctpchunktypes),
XByteField("flags", None),
FieldLenField("len", None, length_of="cookie", adjust = lambda pkt,x:x+4),
PadField(StrLenField("cookie", "", length_from=lambda pkt: pkt.len-4),
4, padwith="\x00"),
]
class SCTPChunkCookieAck(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 11, sctpchunktypes),
XByteField("flags", None),
ShortField("len", 4),
]
class SCTPChunkShutdownComplete(_SCTPChunkGuessPayload, Packet):
fields_desc = [ ByteEnumField("type", 12, sctpchunktypes),
BitField("reserved", None, 7),
BitField("TCB", 0, 1),
ShortField("len", 4),
]
bind_layers( IP, SCTP, proto=IPPROTO_SCTP)
bind_layers( IPv6, SCTP, nh=IPPROTO_SCTP)
|
|
# -*- coding: utf-8 -*-
{
'!langcode!': 'id',
'!langname!': 'Indonesian',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%d days ago': '%d hari yang lalu',
'%d hours ago': '%d jam yang lalu',
'%d minutes ago': '%d menit yang lalu',
'%d months ago': '%d bulan yang lalu',
'%d seconds ago': '%d detik yang lalu',
'%d seconds from now': '%d detik dari sekarang',
'%d weeks ago': '%d minggu yang lalu',
'%d years ago': '%d tahun yang lalu',
'%s %%{row} deleted': '%s %%{row} dihapus',
'%s %%{row} updated': '%s %%{row} diperbarui',
'%s selected': '%s dipilih',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'(requires internet access, experimental)': '(membutuhkan akses internet, eksperimental)',
'(something like "it-it")': '(sesuatu seperti "it-it")',
'1 day ago': '1 hari yang lalu',
'1 hour ago': '1 jam yang lalu',
'1 minute ago': '1 menit yang lalu',
'1 month ago': '1 bulan yang lalu',
'1 second ago': '1 detik yang lalu',
'1 week ago': '1 minggu yang lalu',
'1 year ago': '1 tahun yang lalu',
'< Previous': '< Sebelumnya',
'?': '?',
'About': 'Tentang',
'About application': 'Tentang Aplikasi',
'Add': 'Tambah',
'Additional code for your application': 'Tambahan kode untuk aplikasi Anda',
'Address': 'Alamat',
'Admin language': 'Bahasa Admin',
'administrative interface': 'antarmuka administrative',
'Administrator Password:': 'Administrator Kata Sandi:',
'Ajax Recipes': 'Resep Ajax',
'An error occured, please %s the page': 'Terjadi kesalahan, silakan %s halaman',
'And': 'Dan',
'and rename it:': 'dan memberi nama baru itu:',
'Answer': 'Jawaban',
'appadmin is disabled because insecure channel': 'AppAdmin dinonaktifkan karena kanal tidak aman',
'application "%s" uninstalled': 'applikasi "%s" dihapus',
'application compiled': 'aplikasi dikompilasi',
'Application name:': 'Nama Applikasi:',
'are not used yet': 'tidak digunakan lagi',
'Are you sure you want to delete this object?': 'Apakah Anda yakin ingin menghapus ini?',
'Are you sure you want to uninstall application "%s"?': 'Apakah Anda yakin ingin menghapus aplikasi "%s"?',
'Available Databases and Tables': 'Database dan Tabel yang tersedia',
'Back': 'Kembali',
'Buy this book': 'Beli buku ini',
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache Keys': 'Cache Keys',
'cache, errors and sessions cleaned': 'cache, kesalahan dan sesi dibersihkan',
'can be a git repo': 'bisa menjadi repo git',
'Cancel': 'Batalkan',
'Cannot be empty': 'Tidak boleh kosong',
'Change admin password': 'Ubah kata sandi admin',
'Change password': 'Ubah kata sandi',
'Check for upgrades': 'Periksa upgrade',
'Check to delete': 'Centang untuk menghapus',
'Checking for upgrades...': 'Memeriksa untuk upgrade...',
'Clean': 'Bersih',
'Clear': 'Hapus',
'Clear CACHE?': 'Hapus CACHE?',
'Clear DISK': 'Hapus DISK',
'Clear RAM': 'Hapus RAM',
'Click row to expand traceback': 'Klik baris untuk memperluas traceback',
'Close': 'Tutup',
'collapse/expand all': 'kempis / memperluas semua',
'Community': 'Komunitas',
'Compile': 'Kompilasi',
'compiled application removed': 'aplikasi yang dikompilasi dihapus',
'Components and Plugins': 'Komponen dan Plugin',
'contains': 'mengandung',
'Controllers': 'Kontrolir',
'controllers': 'kontrolir',
'Copyright': 'Hak Cipta',
'Count': 'Hitung',
'Create': 'Buat',
'create file with filename:': 'buat file dengan nama:',
'created by': 'dibuat oleh',
'CSV (hidden cols)': 'CSV (kolom tersembunyi)',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'currently running': 'sedang berjalan',
'data uploaded': 'data diunggah',
'Database': 'Database',
'Database %s select': 'Memilih Database %s',
'database administration': 'administrasi database',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'defines tables': 'mendefinisikan tabel',
'Delete': 'Hapus',
'delete all checked': 'menghapus semua yang di centang',
'Delete this file (you will be asked to confirm deletion)': 'Hapus file ini (Anda akan diminta untuk mengkonfirmasi penghapusan)',
'Delete:': 'Hapus:',
'Description': 'Keterangan',
'design': 'disain',
'direction: ltr': 'petunjuk: ltr',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Dihapus',
'Documentation': 'Dokumentasi',
"Don't know what to do?": 'Tidak tahu apa yang harus dilakukan?',
'done!': 'selesai!',
'Download': 'Unduh',
'Download .w2p': 'Unduh .w2p',
'download layouts': 'unduh layouts',
'download plugins': 'unduh plugins',
'Duration': 'Durasi',
'Edit': 'Mengedit',
'Edit application': 'Mengedit Aplikasi',
'Edit current record': 'Edit current record',
'Email sent': 'Email dikirim',
'enter a valid email address': 'masukkan alamat email yang benar',
'enter a valid URL': 'masukkan URL yang benar',
'enter a value': 'masukkan data',
'Error': 'Kesalahan',
'Error logs for "%(app)s"': 'Catatan kesalahan untuk "%(app)s"',
'Errors': 'Kesalahan',
'export as csv file': 'ekspor sebagai file csv',
'Export:': 'Ekspor:',
'exposes': 'menghadapkan',
'extends': 'meluaskan',
'filter': 'menyaring',
'First Name': 'Nama Depan',
'Forgot username?': 'Lupa nama pengguna?',
'Free Applications': 'Aplikasi Gratis',
'Gender': 'Jenis Kelamin',
'Graph Model': 'Graph Model',
'Group %(group_id)s created': 'Grup %(group_id)s dibuat',
'Group uniquely assigned to user %(id)s': 'Grup unik yang diberikan kepada pengguna %(id)s',
'Groups': 'Grup',
'Guest': 'Tamu',
'Hello World': 'Halo Dunia',
'Help': 'Bantuan',
'Home': 'Halaman Utama',
'How did you get here?': 'Bagaimana kamu bisa di sini?',
'Image': 'Gambar',
'import': 'impor',
'Import/Export': 'Impor/Ekspor',
'includes': 'termasuk',
'Install': 'Memasang',
'Installation': 'Instalasi',
'Installed applications': 'Aplikasi yang diinstal',
'Internal State': 'Internal State',
'Introduction': 'Pengenalan',
'Invalid email': 'Email tidak benar',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Key': 'Key',
'Language': 'Bahasa',
'languages': 'bahasa',
'Languages': 'Bahasa',
'Last Name': 'Nama Belakang',
'License for': 'Lisensi untuk',
'loading...': 'sedang memuat...',
'Logged in': 'Masuk',
'Logged out': 'Keluar',
'Login': 'Masuk',
'Login to the Administrative Interface': 'Masuk ke antarmuka Administrasi',
'Logout': 'Keluar',
'Lost Password': 'Lupa Kata Sandi',
'Lost password?': 'Lupa kata sandi?',
'Maintenance': 'Pemeliharaan',
'Manage': 'Mengelola',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Mengelola Cache',
'Memberships': 'Memberships',
'models': 'model',
'Models': 'Model',
'Modules': 'Modul',
'modules': 'modul',
'My Sites': 'Situs Saya',
'New': 'Baru',
'new application "%s" created': 'aplikasi baru "%s" dibuat',
'New password': 'Kata sandi baru',
'New Record': 'New Record',
'new record inserted': 'new record inserted',
'New simple application': 'Aplikasi baru sederhana',
'News': 'Berita',
'next %s rows': 'next %s rows',
'next 100 rows': '100 baris berikutnya',
'Next >': 'Berikutnya >',
'Next Page': 'Halaman Berikutnya',
'No databases in this application': 'Tidak ada database dalam aplikasi ini',
'No ticket_storage.txt found under /private folder': 'Tidak ditemukan ticket_storage.txt dalam folder /private',
'not a Zip Code': 'bukan Kode Pos',
'Note': 'Catatan',
'Old password': 'Kata sandi lama',
'Online examples': 'Contoh Online',
'Or': 'Atau',
'or alternatively': 'atau alternatif',
'Or Get from URL:': 'Atau Dapatkan dari URL:',
'or import from csv file': 'atau impor dari file csv',
'Other Plugins': 'Plugin Lainnya',
'Other Recipes': 'Resep Lainnya',
'Overview': 'Ikhtisar',
'Overwrite installed app': 'Ikhtisar app yang terinstall',
'Pack all': 'Pak semua',
'Pack compiled': 'Pak yang telah dikompilasi',
'Pack custom': 'Pak secara kustomisasi',
'Password': 'Kata sandi',
'Password changed': 'Kata sandi berubah',
"Password fields don't match": 'Kata sandi tidak sama',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': 'silahkan masukan kata sandi anda lagi',
'plugins': 'plugin',
'Plugins': 'Plugin',
'Plural-Forms:': 'Bentuk-Jamak:',
'Powered by': 'Didukung oleh',
'Preface': 'Pendahuluan',
'previous %s rows': 'previous %s rows',
'previous 100 rows': '100 baris sebelumnya',
'Previous Page': 'Halaman Sebelumnya',
'private files': 'file pribadi',
'Private files': 'File pribadi',
'Profile': 'Profil',
'Profile updated': 'Profil diperbarui',
'Project Progress': 'Perkembangan Proyek',
'pygraphviz library not found': 'pygraphviz library not found',
'Query:': 'Query:',
'Quick Examples': 'Contoh Cepat',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Dihapus',
'Recipes': 'Resep',
'Record': 'Record',
'record does not exist': 'record does not exist',
'Record id': 'Record id',
'Register': 'Daftar',
'Registration successful': 'Pendaftaran berhasil',
'reload': 'memuat kembali',
'Reload routes': 'Memuat rute kembali',
'Remember me (for 30 days)': 'Ingat saya (selama 30 hari)',
'Remove compiled': 'Hapus Kompilasi',
'Request reset password': 'Meminta reset kata sandi',
'Role': 'Role',
'Roles': 'Roles',
'Rows in Table': 'Baris dalam Tabel',
'Rows selected': 'Baris dipilih',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Jalankan tes di file ini (untuk menjalankan semua file, Anda juga dapat menggunakan tombol berlabel 'test')",
'Running on %s': 'Berjalan di %s',
'Save model as...': 'Simpan model sebagai ...',
'Save profile': 'Simpan profil',
'Search': 'Cari',
'Select Files to Package': 'Pilih Berkas untuk Paket',
'Send Email': 'Kirim Email',
'Service': 'Layanan',
'Site': 'Situs',
'Size of cache:': 'Ukuran cache:',
'starts with': 'dimulai dengan',
'state': 'state',
'static': 'statis',
'Static': 'Statis',
'Statistics': 'Statistik',
'submit': 'submit',
'Support': 'Mendukung',
'Table': 'Tabel',
'test': 'tes',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'Logika aplikasi, setiap jalur URL dipetakan dalam satu fungsi terpapar di kontrolir',
'The data representation, define database tables and sets': 'Representasi data, mendefinisikan tabel database dan set',
'There are no plugins': 'Tidak ada plugin',
'There are no private files': 'Tidak ada file pribadi',
'These files are not served, they are only available from within your app': 'File-file ini tidak dilayani, mereka hanya tersedia dari dalam aplikasi Anda',
'These files are served without processing, your images go here': 'File-file ini disajikan tanpa pengolahan, gambar Anda di sini',
'This App': 'App Ini',
'Time in Cache (h:m:s)': 'Waktu di Cache (h: m: s)',
'To create a plugin, name a file/folder plugin_[name]': 'Untuk membuat sebuah plugin, nama file / folder plugin_ [nama]',
'too short': 'terlalu pendek',
'Traceback': 'Traceback',
'Translation strings for the application': 'Terjemahan string untuk aplikasi',
'Try the mobile interface': 'Coba antarmuka ponsel',
'Unable to download because:': 'Tidak dapat mengunduh karena:',
'unable to parse csv file': 'tidak mampu mengurai file csv',
'update all languages': 'memperbarui semua bahasa',
'Update:': 'Perbarui:',
'Upload': 'Unggah',
'Upload a package:': 'Unggah sebuah paket:',
'Upload and install packed application': 'Upload dan pasang aplikasi yang dikemas',
'upload file:': 'unggah file:',
'upload plugin file:': 'unggah file plugin:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s Logged-in': 'Pengguna %(id)s Masuk',
'User %(id)s Logged-out': 'Pengguna %(id)s Keluar',
'User %(id)s Password changed': 'Pengguna %(id)s Kata Sandi berubah',
'User %(id)s Password reset': 'Pengguna %(id)s Kata Sandi telah direset',
'User %(id)s Profile updated': 'Pengguna %(id)s Profil diperbarui',
'User %(id)s Registered': 'Pengguna %(id)s Terdaftar',
'Users': 'Users',
'value already in database or empty': 'data sudah ada dalam database atau kosong',
'value not allowed': 'data tidak benar',
'value not in database': 'data tidak ada dalam database',
'Verify Password': 'Verifikasi Kata Sandi',
'Version': 'Versi',
'View': 'Lihat',
'views': 'lihat',
'Views': 'Lihat',
'Web Framework': 'Kerangka Web',
'web2py is up to date': 'web2py terbaru',
'web2py Recent Tweets': 'Tweet web2py terbaru',
'Website': 'Situs Web',
'Welcome': 'Selamat Datang',
'Welcome to web2py!': 'Selamat Datang di web2py!',
'Working...': 'Working...',
'You are successfully running web2py': 'Anda berhasil menjalankan web2py',
'You can modify this application and adapt it to your needs': 'Anda dapat memodifikasi aplikasi ini dan menyesuaikan dengan kebutuhan Anda',
'You visited the url %s': 'Anda mengunjungi url %s',
}
|
|
from distutils.version import StrictVersion
import datetime
import hashlib
import os
import re
import socket
import shutil
import time
import sys
import urllib
try:
import requests
except ImportError:
print('Please install or update the requests module.')
sys.exit(1)
import seesaw
from seesaw.config import realize, NumberConfigValue
from seesaw.externalprocess import WgetDownload, ExternalProcess
from seesaw.item import ItemInterpolation, ItemValue
from seesaw.pipeline import Pipeline
from seesaw.project import Project
from seesaw.task import SimpleTask, SetItemKey, LimitConcurrent
from seesaw.tracker import PrepareStatsForTracker, GetItemFromTracker, \
UploadWithTracker, SendDoneToTracker
from seesaw.util import find_executable
# check the seesaw version
if StrictVersion(seesaw.__version__) < StrictVersion("0.8.3"):
raise Exception("This pipeline needs seesaw version 0.8.3 or higher.")
###########################################################################
# Find a useful Wpull executable.
#
# WPULL_EXE will be set to the first path that
# 1. does not crash with --version, and
# 2. prints the required version string
WPULL_EXE = find_executable(
"Wpull",
re.compile(r"\b1\.2\b"),
[
"./wpull",
os.path.expanduser("~/.local/share/wpull-1.2/wpull"),
os.path.expanduser("~/.local/bin/wpull"),
"./wpull_bootstrap",
"wpull",
]
)
if not WPULL_EXE:
raise Exception("No usable Wpull found.")
###########################################################################
# The version number of this pipeline definition.
#
# Update this each time you make a non-cosmetic change.
# It will be added to the WARC files and reported to the tracker.
VERSION = "20160304.01"
TRACKER_ID = 'ftp'
TRACKER_HOST = 'tracker.archiveteam.org'
###########################################################################
# This section defines project-specific tasks.
#
# Simple tasks (tasks that do not need any concurrency) are based on the
# SimpleTask class and have a process(item) method that is called for
# each item.
class CheckIP(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "CheckIP")
self._counter = 0
def process(self, item):
if self._counter <= 0:
item.log_output('Checking IP address.')
ip_set = set()
ip_set.add(socket.gethostbyname('twitter.com'))
ip_set.add(socket.gethostbyname('facebook.com'))
ip_set.add(socket.gethostbyname('youtube.com'))
ip_set.add(socket.gethostbyname('microsoft.com'))
ip_set.add(socket.gethostbyname('icanhas.cheezburger.com'))
ip_set.add(socket.gethostbyname('archiveteam.org'))
if len(ip_set) != 6:
item.log_output('Got IP addresses: {0}'.format(ip_set))
item.log_output(
'You are behind a firewall or proxy. That is a big no-no!')
raise Exception(
'You are behind a firewall or proxy. That is a big no-no!')
# Check only occasionally
if self._counter <= 0:
self._counter = 10
else:
self._counter -= 1
class PrepareDirectories(SimpleTask):
def __init__(self, warc_prefix):
SimpleTask.__init__(self, "PrepareDirectories")
self.warc_prefix = warc_prefix
def process(self, item):
item_name = item["item_name"]
escaped_item_name = item_name.replace(':', '_').replace('/', '_')
item['escaped_item_name'] = escaped_item_name
dirname = "/".join((item["data_dir"], escaped_item_name))
if os.path.isdir(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
item["item_dir"] = dirname
item["warc_file_base"] = "%s-%s-%s" % (
self.warc_prefix, escaped_item_name,
time.strftime("%Y%m%d-%H%M%S")
)
open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close()
class MoveFiles(SimpleTask):
def __init__(self):
SimpleTask.__init__(self, "MoveFiles")
def process(self, item):
# Check if wget was compiled with zlib support
if os.path.exists("%(item_dir)s/%(warc_file_base)s.warc" % item):
raise Exception('Please compile wget with zlib support!')
os.rename("%(item_dir)s/%(warc_file_base)s.warc.gz" % item,
"%(data_dir)s/%(warc_file_base)s.warc.gz" % item)
shutil.rmtree("%(item_dir)s" % item)
def get_hash(filename):
with open(filename, 'rb') as in_file:
return hashlib.sha1(in_file.read()).hexdigest()
CWD = os.getcwd()
PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py'))
SCRIPT_SHA1 = get_hash(os.path.join(CWD, 'ftp.py'))
def stats_id_function(item):
# For accountability and stats.
d = {
'pipeline_hash': PIPELINE_SHA1,
'script_hash': SCRIPT_SHA1,
'python_version': sys.version,
}
return d
class WgetArgs(object):
def realize(self, item):
wget_args = [
WPULL_EXE,
"-nv",
"--python-script", "ftp.py",
"-o", ItemInterpolation("%(item_dir)s/wpull.log"),
"--no-check-certificate",
"--database", ItemInterpolation("%(item_dir)s/wpull.db"),
"--delete-after",
"--no-robots",
"--no-cookies",
"--rotate-dns",
"--timeout", "60",
"--tries", "inf",
"--wait", "0.5",
"--random-wait",
"--waitretry", "5",
"--warc-file", ItemInterpolation("%(item_dir)s/%(warc_file_base)s"),
"--warc-header", "operator: Archive Team",
"--warc-header", "ftp-dld-script-version: " + VERSION,
"--warc-header", ItemInterpolation("ftp-user: %(item_name)s"),
]
item_name = item['item_name']
assert ':' in item_name
item_sort, item_item, item_file = item_name.split(':', 2)
item['item_item'] = item_item
MAX_SIZE = 10737418240
skipped = requests.get('https://raw.githubusercontent.com/ArchiveTeam/ftp-items/master/skipped_sites')
if skipped.status_code != 200:
raise Exception('Something went wrong getting the skipped_sites list from GitHub. ABORTING.')
skipped_items = skipped.text.splitlines()
for skipped_item in skipped_items:
if item_file.startswith(skipped_item):
raise Exception('This FTP will be skipped...')
item_list = requests.get('http://archive.org/download/{0}/{1}'.format(item_item, item_file))
if item_list.status_code != 200:
raise Exception('You received status code %d with URL %s. ABORTING.'%(item_list.status_code, 'https://archive.org/download/{0}/{1}'.format(item_item, item_file)))
itemsize = int(re.search(r'ITEM_TOTAL_SIZE: ([0-9]+)', item_list.text).group(1))
if itemsize > MAX_SIZE:
raise Exception('Item is %d bytes. This is larger then %d bytes. ABORTING.'%(itemsize, MAX_SIZE))
for url in item_list.text.splitlines():
if url.startswith('ftp://'):
url = url.replace(' ', '%20').replace('&', '&')
url = urllib.unquote(url)
if item_item == 'archiveteam_ftp_items_2015120102':
url = url.replace('ftp://ftp.research.microsoft.com/downloads/downloads/', 'ftp://ftp.research.microsoft.com/downloads/')
if '#' in url:
raise Exception('%s containes a bad character.'%(url))
else:
wget_args.append("{0}".format(url))
if 'bind_address' in globals():
wget_args.extend(['--bind-address', globals()['bind_address']])
print('')
print('*** Wget will bind address at {0} ***'.format(
globals()['bind_address']))
print('')
return realize(wget_args, item)
###########################################################################
# Initialize the project.
#
# This will be shown in the warrior management panel. The logo should not
# be too big. The deadline is optional.
project = Project(
title="ftp",
project_html="""
<img class="project-logo" alt="Project logo" src="http://archiveteam.org/images/thumb/f/f3/Archive_team.png/235px-Archive_team.png" height="50px" title=""/>
<h2>FTP <span class="links"><a href="http://archiveteam.org/index.php?title=FTP">Website</a> ·
<a href="http://tracker.archiveteam.org/ftp/">Leaderboard</a></span></h2>
<p>Archiving all FTPs!</p>
"""
)
pipeline = Pipeline(
CheckIP(),
GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader,
VERSION),
PrepareDirectories(warc_prefix="ftp"),
WgetDownload(
WgetArgs(),
max_tries=1,
accept_on_exit_code=[0, 8],
env={
"item_dir": ItemValue("item_dir"),
"item_item": ItemValue("item_item"),
"downloader": downloader
}
),
PrepareStatsForTracker(
defaults={"downloader": downloader, "version": VERSION},
file_groups={
"data": [
ItemInterpolation("%(item_dir)s/%(warc_file_base)s.warc.gz"),
]
},
id_function=stats_id_function,
),
MoveFiles(),
LimitConcurrent(
NumberConfigValue(min=1, max=4, default="1",
name="shared:rsync_threads", title="Rsync threads",
description="The maximum number of concurrent uploads."),
UploadWithTracker(
"http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
downloader=downloader,
version=VERSION,
files=[
ItemInterpolation("%(data_dir)s/%(warc_file_base)s.warc.gz"),
],
rsync_target_source_path=ItemInterpolation("%(data_dir)s/"),
rsync_extra_args=[
"--recursive",
"--partial",
"--partial-dir", ".rsync-tmp",
]
),
),
SendDoneToTracker(
tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID),
stats=ItemValue("stats")
)
)
|
|
from toolset.utils.output_helper import log
from toolset.test_types import test_types
import os
import subprocess
import uuid
import time
import json
import requests
import threading
import re
import math
import csv
import traceback
from datetime import datetime
# Cross-platform colored text
from colorama import Fore, Style
class Results:
def __init__(self, benchmarker):
'''
Constructor
'''
self.benchmarker = benchmarker
self.config = benchmarker.config
self.directory = os.path.join(self.config.results_root,
self.config.timestamp)
try:
os.makedirs(self.directory)
except OSError:
pass
self.file = os.path.join(self.directory, "results.json")
self.uuid = str(uuid.uuid4())
self.name = datetime.now().strftime(self.config.results_name)
self.environmentDescription = self.config.results_environment
try:
self.git = dict()
self.git['commitId'] = self.__get_git_commit_id()
self.git['repositoryUrl'] = self.__get_git_repository_url()
self.git['branchName'] = self.__get_git_branch_name()
except Exception:
#Could not read local git repository, which is fine.
self.git = None
self.startTime = int(round(time.time() * 1000))
self.completionTime = None
self.concurrencyLevels = self.config.concurrency_levels
self.pipelineConcurrencyLevels = self.config.pipeline_concurrency_levels
self.queryIntervals = self.config.query_levels
self.cachedQueryIntervals = self.config.cached_query_levels
self.frameworks = [t.name for t in benchmarker.tests]
self.duration = self.config.duration
self.rawData = dict()
self.completed = dict()
self.succeeded = dict()
self.failed = dict()
self.verify = dict()
for type in test_types:
self.rawData[type] = dict()
self.failed[type] = []
self.succeeded[type] = []
#############################################################################
# PUBLIC FUNCTIONS
#############################################################################
def parse(self, tests):
'''
Ensures that the system has all necessary software to run
the tests. This does not include that software for the individual
test, but covers software such as curl and weighttp that
are needed.
'''
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(self.file, "w") as f:
f.write(json.dumps(self.__to_jsonable(), indent=2))
def parse_test(self, framework_test, test_type):
'''
Parses the given test and test_type from the raw_file.
'''
results = dict()
results['results'] = []
stats = []
if os.path.exists(self.get_raw_file(framework_test.name, test_type)):
with open(self.get_raw_file(framework_test.name,
test_type)) as raw_data:
is_warmup = True
rawData = None
for line in raw_data:
if "Queries:" in line or "Concurrency:" in line:
is_warmup = False
rawData = None
continue
if "Warmup" in line or "Primer" in line:
is_warmup = True
continue
if not is_warmup:
if rawData is None:
rawData = dict()
results['results'].append(rawData)
if "Latency" in line:
m = re.findall(r"([0-9]+\.*[0-9]*[us|ms|s|m|%]+)",
line)
if len(m) == 4:
rawData['latencyAvg'] = m[0]
rawData['latencyStdev'] = m[1]
rawData['latencyMax'] = m[2]
if "requests in" in line:
m = re.search("([0-9]+) requests in", line)
if m is not None:
rawData['totalRequests'] = int(m.group(1))
if "Socket errors" in line:
if "connect" in line:
m = re.search("connect ([0-9]+)", line)
rawData['connect'] = int(m.group(1))
if "read" in line:
m = re.search("read ([0-9]+)", line)
rawData['read'] = int(m.group(1))
if "write" in line:
m = re.search("write ([0-9]+)", line)
rawData['write'] = int(m.group(1))
if "timeout" in line:
m = re.search("timeout ([0-9]+)", line)
rawData['timeout'] = int(m.group(1))
if "Non-2xx" in line:
m = re.search("Non-2xx or 3xx responses: ([0-9]+)",
line)
if m != None:
rawData['5xx'] = int(m.group(1))
if "STARTTIME" in line:
m = re.search("[0-9]+", line)
rawData["startTime"] = int(m.group(0))
if "ENDTIME" in line:
m = re.search("[0-9]+", line)
rawData["endTime"] = int(m.group(0))
test_stats = self.__parse_stats(
framework_test, test_type,
rawData["startTime"], rawData["endTime"], 1)
stats.append(test_stats)
with open(
self.get_stats_file(framework_test.name, test_type) + ".json",
"w") as stats_file:
json.dump(stats, stats_file, indent=2)
return results
def parse_all(self, framework_test):
'''
Method meant to be run for a given timestamp
'''
for test_type in framework_test.runTests:
if os.path.exists(
self.get_raw_file(framework_test.name, test_type)):
results = self.parse_test(framework_test, test_type)
self.report_benchmark_results(framework_test, test_type,
results['results'])
def write_intermediate(self, test_name, status_message):
'''
Writes the intermediate results for the given test_name and status_message
'''
self.completed[test_name] = status_message
self.__write_results()
def set_completion_time(self):
'''
Sets the completionTime for these results and writes the results
'''
self.completionTime = int(round(time.time() * 1000))
self.__write_results()
def upload(self):
'''
Attempts to upload the results.json to the configured results_upload_uri
'''
if self.config.results_upload_uri is not None:
try:
requests.post(
self.config.results_upload_uri,
headers={'Content-Type': 'application/json'},
data=json.dumps(self.__to_jsonable(), indent=2),
timeout=300)
except Exception:
log("Error uploading results.json")
def load(self):
'''
Load the results.json file
'''
try:
with open(self.file) as f:
self.__dict__.update(json.load(f))
except (ValueError, IOError):
pass
def get_raw_file(self, test_name, test_type):
'''
Returns the output file for this test_name and test_type
Example: fw_root/results/timestamp/test_type/test_name/raw.txt
'''
path = os.path.join(self.directory, test_name, test_type, "raw.txt")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
def get_stats_file(self, test_name, test_type):
'''
Returns the stats file name for this test_name and
Example: fw_root/results/timestamp/test_type/test_name/stats.txt
'''
path = os.path.join(self.directory, test_name, test_type, "stats.txt")
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
def report_verify_results(self, framework_test, test_type, result):
'''
Used by FrameworkTest to add verification details to our results
TODO: Technically this is an IPC violation - we are accessing
the parent process' memory from the child process
'''
if framework_test.name not in self.verify.keys():
self.verify[framework_test.name] = dict()
self.verify[framework_test.name][test_type] = result
def report_benchmark_results(self, framework_test, test_type, results):
'''
Used by FrameworkTest to add benchmark data to this
TODO: Technically this is an IPC violation - we are accessing
the parent process' memory from the child process
'''
if test_type not in self.rawData.keys():
self.rawData[test_type] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.rawData[test_type][framework_test.name] = results
# This may already be set for single-tests
if framework_test.name not in self.succeeded[test_type]:
self.succeeded[test_type].append(framework_test.name)
else:
# This may already be set for single-tests
if framework_test.name not in self.failed[test_type]:
self.failed[test_type].append(framework_test.name)
def finish(self):
'''
Finishes these results.
'''
if not self.config.parse:
# Normally you don't have to use Fore.BLUE before each line, but
# Travis-CI seems to reset color codes on newline (see travis-ci/travis-ci#2692)
# or stream flush, so we have to ensure that the color code is printed repeatedly
log("Verification Summary",
border='=',
border_bottom='-',
color=Fore.CYAN)
for test in self.benchmarker.tests:
log(Fore.CYAN + "| {!s}".format(test.name))
if test.name in self.verify.keys():
for test_type, result in self.verify[
test.name].iteritems():
if result.upper() == "PASS":
color = Fore.GREEN
elif result.upper() == "WARN":
color = Fore.YELLOW
else:
color = Fore.RED
log(Fore.CYAN + "| " + test_type.ljust(13) +
' : ' + color + result.upper())
else:
log(Fore.CYAN + "| " + Fore.RED +
"NO RESULTS (Did framework launch?)")
log('', border='=', border_bottom='', color=Fore.CYAN)
log("Results are saved in " + self.directory)
#############################################################################
# PRIVATE FUNCTIONS
#############################################################################
def __to_jsonable(self):
'''
Returns a dict suitable for jsonification
'''
toRet = dict()
toRet['uuid'] = self.uuid
toRet['name'] = self.name
toRet['environmentDescription'] = self.environmentDescription
toRet['git'] = self.git
toRet['startTime'] = self.startTime
toRet['completionTime'] = self.completionTime
toRet['concurrencyLevels'] = self.concurrencyLevels
toRet['pipelineConcurrencyLevels'] = self.pipelineConcurrencyLevels
toRet['queryIntervals'] = self.queryIntervals
toRet['cachedQueryIntervals'] = self.cachedQueryIntervals
toRet['frameworks'] = self.frameworks
toRet['duration'] = self.duration
toRet['rawData'] = self.rawData
toRet['completed'] = self.completed
toRet['succeeded'] = self.succeeded
toRet['failed'] = self.failed
toRet['verify'] = self.verify
toRet['testMetadata'] = self.benchmarker.metadata.to_jsonable()
return toRet
def __write_results(self):
try:
with open(self.file, 'w') as f:
f.write(json.dumps(self.__to_jsonable(), indent=2))
except IOError:
log("Error writing results.json")
def __count_sloc(self):
'''
Counts the significant lines of code for all tests and stores in results.
'''
frameworks = self.benchmarker.metadata.gather_frameworks(
self.config.test, self.config.exclude)
framework_to_count = {}
for framework, testlist in frameworks.items():
wd = testlist[0].directory
# Find the last instance of the word 'code' in the yaml output. This
# should be the line count for the sum of all listed files or just
# the line count for the last file in the case where there's only
# one file listed.
command = "cloc --yaml --follow-links . | grep code | tail -1 | cut -d: -f 2"
log("Running \"%s\" (cwd=%s)" % (command, wd))
try:
line_count = int(subprocess.check_output(command, cwd=wd, shell=True))
except (subprocess.CalledProcessError, ValueError) as e:
log("Unable to count lines of code for %s due to error '%s'" %
(framework, e))
continue
log("Counted %s lines of code" % line_count)
framework_to_count[framework] = line_count
self.rawData['slocCounts'] = framework_to_count
def __count_commits(self):
'''
Count the git commits for all the framework tests
'''
frameworks = self.benchmarker.metadata.gather_frameworks(
self.config.test, self.config.exclude)
def count_commit(directory, jsonResult):
command = "git rev-list HEAD -- " + directory + " | sort -u | wc -l"
try:
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except subprocess.CalledProcessError:
pass
# Because git can be slow when run in large batches, this
# calls git up to 4 times in parallel. Normal improvement is ~3-4x
# in my trials, or ~100 seconds down to ~25
# This is safe to parallelize as long as each thread only
# accesses one key in the dictionary
threads = []
jsonResult = {}
# t1 = datetime.now()
for framework, testlist in frameworks.items():
directory = testlist[0].directory
t = threading.Thread(
target=count_commit, args=(directory, jsonResult))
t.start()
threads.append(t)
# Git has internal locks, full parallel will just cause contention
# and slowness, so we rate-limit a bit
if len(threads) >= 4:
threads[0].join()
threads.remove(threads[0])
# Wait for remaining threads
for t in threads:
t.join()
# t2 = datetime.now()
# print "Took %s seconds " % (t2 - t1).seconds
self.rawData['commitCounts'] = jsonResult
self.config.commits = jsonResult
def __get_git_commit_id(self):
'''
Get the git commit id for this benchmark
'''
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=self.config.fw_root).strip()
def __get_git_repository_url(self):
'''
Gets the git repository url for this benchmark
'''
return subprocess.check_output(
["git", "config", "--get", "remote.origin.url"],
cwd=self.config.fw_root).strip()
def __get_git_branch_name(self):
'''
Gets the git branch name for this benchmark
'''
return subprocess.check_output(
'git rev-parse --abbrev-ref HEAD',
shell=True,
cwd=self.config.fw_root).strip()
def __parse_stats(self, framework_test, test_type, start_time, end_time,
interval):
'''
For each test type, process all the statistics, and return a multi-layered
dictionary that has a structure as follows:
(timestamp)
| (main header) - group that the stat is in
| | (sub header) - title of the stat
| | | (stat) - the stat itself, usually a floating point number
'''
stats_dict = dict()
stats_file = self.get_stats_file(framework_test.name, test_type)
with open(stats_file) as stats:
# dstat doesn't output a completely compliant CSV file - we need to strip the header
for _ in range(4):
stats.next()
stats_reader = csv.reader(stats)
main_header = stats_reader.next()
sub_header = stats_reader.next()
time_row = sub_header.index("epoch")
int_counter = 0
for row in stats_reader:
time = float(row[time_row])
int_counter += 1
if time < start_time:
continue
elif time > end_time:
return stats_dict
if int_counter % interval != 0:
continue
row_dict = dict()
for nextheader in main_header:
if nextheader != "":
row_dict[nextheader] = dict()
header = ""
for item_num, column in enumerate(row):
if len(main_header[item_num]) != 0:
header = main_header[item_num]
# all the stats are numbers, so we want to make sure that they stay that way in json
row_dict[header][sub_header[item_num]] = float(column)
stats_dict[time] = row_dict
return stats_dict
def __calculate_average_stats(self, raw_stats):
'''
We have a large amount of raw data for the statistics that may be useful
for the stats nerds, but most people care about a couple of numbers. For
now, we're only going to supply:
* Average CPU
* Average Memory
* Total network use
* Total disk use
More may be added in the future. If they are, please update the above list.
Note: raw_stats is directly from the __parse_stats method.
Recall that this consists of a dictionary of timestamps, each of which
contain a dictionary of stat categories which contain a dictionary of stats
'''
raw_stat_collection = dict()
for time_dict in raw_stats.items()[1]:
for main_header, sub_headers in time_dict.items():
item_to_append = None
if 'cpu' in main_header:
# We want to take the idl stat and subtract it from 100
# to get the time that the CPU is NOT idle.
item_to_append = sub_headers['idl'] - 100.0
elif main_header == 'memory usage':
item_to_append = sub_headers['used']
elif 'net' in main_header:
# Network stats have two parts - recieve and send. We'll use a tuple of
# style (recieve, send)
item_to_append = (sub_headers['recv'], sub_headers['send'])
elif 'dsk' or 'io' in main_header:
# Similar for network, except our tuple looks like (read, write)
item_to_append = (sub_headers['read'], sub_headers['writ'])
if item_to_append is not None:
if main_header not in raw_stat_collection:
raw_stat_collection[main_header] = list()
raw_stat_collection[main_header].append(item_to_append)
# Simple function to determine human readable size
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num):
# We'll assume that any number we get is convertable to a float, just in case
num = float(num)
for x in ['bytes', 'KB', 'MB', 'GB']:
if 1024.0 > num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
# Now we have our raw stats in a readable format - we need to format it for display
# We need a floating point sum, so the built in sum doesn't cut it
display_stat_collection = dict()
for header, values in raw_stat_collection.items():
display_stat = None
if 'cpu' in header:
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif main_header == 'memory usage':
display_stat = sizeof_fmt(math.fsum(values) / len(values))
elif 'net' in main_header:
receive, send = zip(*values) # unzip
display_stat = {
'receive': sizeof_fmt(math.fsum(receive)),
'send': sizeof_fmt(math.fsum(send))
}
else: # if 'dsk' or 'io' in header:
read, write = zip(*values) # unzip
display_stat = {
'read': sizeof_fmt(math.fsum(read)),
'write': sizeof_fmt(math.fsum(write))
}
display_stat_collection[header] = display_stat
return display_stat
|
|
"""
Created on Jul 13, 2012
@author: cdecker
"""
from time import time
import six
import struct
import socket
from io import BytesIO
from bitcoin.utils import decodeVarLength, decodeVarString, encodeVarLength, \
encodeVarString, doubleSha256
PROTOCOL_VERSION = 70001
MIN_PROTOCOL_VERSION = 60001
IPV4_PREFIX = "00000000000000000000FFFF".decode("hex")
USER_AGENT = "/Snoopy:0.2.1/"
PROTOCOL_SERVICES = 9
WITNESS_FLAG = 1 << 30
INV_TX = 1
INV_BLOCK = 2
INV_WITNESS_TX = INV_TX | WITNESS_FLAG
INV_WITNESS_BLOCK = INV_BLOCK | WITNESS_FLAG
NODE_WITNESS = (1 << 3)
def get_opt(opts, key, default):
if opts is None or key not in opts:
return default
else:
return opts[key]
class Packet(object):
"""Superclass of all packets that are sent/received by bitcoin."""
type = None
def parse(self, payload, opts):
"""
This should be implemented by each packet in order to parse the
contents of a message
"""
def toWire(self, buf, opts):
"""
This should be implemented by the subclasses
Writes the packet to the buffer
"""
def __len__(self):
buf = six.BytesIO()
self.toWire(buf, None)
return len(buf.getvalue())
class Address(Packet):
"""
Not really a packet on its own but as it is serialized on several occasions
we just implement it as such.
"""
type = None
def __init__(self, ip=None, isIPv4=True, port=8333,
services=PROTOCOL_SERVICES, timestamp=None):
self.isIPv4 = isIPv4
if ip:
self.ip = socket.gethostbyname(ip)
else:
self.ip = None
self.timestamp = timestamp
self.port = port
self.services = services
def parse(self, payload, opts):
Packet.parse(self, payload, opts)
if get_opt(opts, 'version', PROTOCOL_VERSION) >= 31402:
self.timestamp, = struct.unpack_from("<I", payload.read(4))
self.services, ip, = struct.unpack_from("<Q16s", payload.read(24))
self.port, = struct.unpack_from(">H", payload.read(2))
if ip[:12] == IPV4_PREFIX:
self.isIPv4 = True
self.ip = socket.inet_ntop(socket.AF_INET, ip[12:])
else:
self.isIPv4 = False
self.ip = socket.inet_ntop(socket.AF_INET6, ip)
def toWire(self, buf, opts):
Packet.toWire(self, buf, opts)
if get_opt(opts, 'version', 70001) >= 31402:
buf.write(struct.pack("<i", int(self.timestamp)))
buf.write(struct.pack("<Q", self.services))
if self.isIPv4:
buf.write(IPV4_PREFIX)
buf.write(socket.inet_pton(socket.AF_INET, self.ip))
else:
buf.write(socket.inet_pton(socket.AF_INET6, self.ip))
buf.write(struct.pack(">H", self.port))
class VersionPacket(Packet):
type = "version"
def __init__(self):
self.timestamp = time()
self.services = PROTOCOL_SERVICES
self.version = PROTOCOL_VERSION
self.nonce = "__ETHZ__"
self.user_agent = USER_AGENT
self.best_height = 0
self.relay = True
self.addr_from = None
self.addr_recv = None
def is_segwit(self):
return self.services & NODE_WITNESS != 0
def parse(self, payload, opts=None):
Packet.parse(self, payload, opts)
self.version, self.services, self.timestamp = struct.unpack(
"<IQQ", payload.read(20))
if self.version >= 106:
# Pretend to be version 0, this doesn't include timestamps yet.
self.addr_recv = Address()
self.addr_recv.parse(payload, {'version': 0})
self.addr_from = Address()
self.addr_from.parse(payload, {'version': 0})
self.nonce = payload.read(8)
self.user_agent = decodeVarString(payload)
self.best_height, = struct.unpack("<I", payload.read(4))
if self.version >= 70001:
relay_flag = payload.read(1)
# Some clients advertise 70001 but then do not include a relay_flag
if len(relay_flag):
self.relay = bool(struct.unpack('B', relay_flag)[0] & 1)
def toWire(self, buf, opts=None):
Packet.toWire(self, buf, opts)
buf.write(struct.pack("<IQQ", self.version, self.services,
self.timestamp))
self.addr_recv.toWire(buf, {'version': 0})
self.addr_from.toWire(buf, {'version': 0})
buf.write(self.nonce)
buf.write(encodeVarString(self.user_agent))
buf.write(struct.pack("<I", self.best_height))
if self.version >= 70001:
buf.write(struct.pack('B', 1 if self.relay else 0))
class InvPacket(Packet):
type = "inv"
def __init__(self):
self.hashes = []
def parse(self, payload, opts):
length = decodeVarLength(payload)
while len(self.hashes) < length:
t, = struct.unpack("<I", payload.read(4))
h = payload.read(32)[::-1]
self.hashes.append((t, h))
def toWire(self, buf, opts):
buf.write(encodeVarLength(len(self.hashes)))
for h in self.hashes:
buf.write(struct.pack("<I", h[0]))
buf.write(h[1][::-1])
class GetDataPacket(InvPacket):
type = 'getdata'
def convertToWitness(self):
for i in xrange(len(self.hashes)):
h = self.hashes[i]
self.hashes[i] = (h[0] | WITNESS_FLAG, h[1])
class PingPacket(Packet):
type = 'ping'
def __init__(self):
self.nonce = None
def parse(self, payload, opts):
if payload:
self.nonce = payload
def toWire(self, buf, opts):
if self.nonce:
buf.write(self.nonce)
class PongPacket(PingPacket):
"""Response to ping."""
type = 'pong'
class TxPacket(Packet):
type = "tx"
def __init__(self):
self._hash = None
self.inputs = []
self.outputs = []
self.lock_time = 0
self.version = 1
self.witnesses = []
self.is_segwit = False
def parseSegwit(self, payload, opts):
if decodeVarLength(payload) == 0:
return False
self.is_segwit = True
txInputCount = decodeVarLength(payload)
for _i in range(0, txInputCount):
prev_out = (
payload.read(32)[::-1],
struct.unpack("<I", payload.read(4))[0]
)
script_length = decodeVarLength(payload)
script = payload.read(script_length)
sequence, = struct.unpack("<I", payload.read(4))
self.inputs.append((prev_out, script, sequence))
txOutputCount = decodeVarLength(payload)
for _i in range(0, txOutputCount):
value, = struct.unpack("<Q", payload.read(8))
script = decodeVarString(payload)
self.outputs.append((value, script))
for i in range(0, txInputCount):
nelements = decodeVarLength(payload)
self.witnesses.append([decodeVarString(payload) for _ in range(nelements)])
self.lock_time, = struct.unpack("<I", payload.read(4))
return True
def parse(self, payload, opts):
Packet.parse(self, payload, opts)
self.version, = struct.unpack("<I", payload.read(4))
txInputCount = decodeVarLength(payload)
if txInputCount == 0:
return self.parseSegwit(payload, opts)
for _i in range(0, txInputCount):
prev_out = (
payload.read(32)[::-1],
struct.unpack("<I", payload.read(4))[0]
)
script_length = decodeVarLength(payload)
script = payload.read(script_length)
sequence, = struct.unpack("<I", payload.read(4))
self.inputs.append((prev_out, script, sequence))
txOutputCount = decodeVarLength(payload)
for _i in range(0, txOutputCount):
value, = struct.unpack("<Q", payload.read(8))
script = decodeVarString(payload)
self.outputs.append((value, script))
self.lock_time, = struct.unpack("<I", payload.read(4))
return True
def toWire(self, buf, opts=None):
Packet.toWire(self, buf, opts)
buf.write(struct.pack("<I", self.version))
if get_opt(opts, 'segwit', False):
buf.write("\x00\x01")
buf.write(encodeVarLength(len(self.inputs)))
for i in self.inputs:
prev_out, script, sequence = i
buf.write(prev_out[0][::-1])
buf.write(struct.pack("<I", prev_out[1]))
buf.write(encodeVarString(script))
buf.write(struct.pack("<I", sequence))
buf.write(encodeVarLength(len(self.outputs)))
for o in self.outputs:
value, script = o
buf.write(struct.pack("<Q", value))
buf.write(encodeVarString(script))
if get_opt(opts, 'segwit', False):
for w in self.witnesses:
buf.write(encodeVarLength(len(w)))
for e in w:
buf.write(encodeVarString(e))
buf.write(struct.pack("<I", self.lock_time))
def hash(self):
"""
If we have the hash saved from a parsing action we just return it
otherwise we serialize this transaction and calculate the 2xSha256.
If the hash is derived from a serialization we do not cache the result
should happen rarely though.
"""
buf = BytesIO()
self.toWire(buf, {'segwit': False})
return doubleSha256(buf.getvalue())[::-1]
def whash(self):
if self.is_coinbase():
return "00".decode('hex')*32
buf = BytesIO()
self.toWire(buf, {'segwit': self.is_segwit})
return doubleSha256(buf.getvalue())[::-1]
def is_coinbase(self):
return (len(self.inputs) == 1 and
self.inputs[0][0][0] == '\0'*32 and
self.inputs[0][0][1] == 4294967295)
def normalized_hash(self):
if self.is_coinbase():
return self.hash()
else:
copy = TxPacket()
buf = BytesIO()
self.toWire(buf, None)
copy.parse(BytesIO(buf.getvalue()), None)
for pos, iput in enumerate(copy.inputs):
copy.inputs[pos] = (iput[0], "", iput[2])
buf = BytesIO()
copy.toWire(buf, None)
buf.write(struct.pack('<I', 1))
return doubleSha256(buf.getvalue())[::-1]
class BlockPacket(Packet):
type = "block"
def __init__(self):
self._hash = None
self.version = 1
self.prev_block = None
self.merkle_root = None
self.timestamp = time()
self.bits = None
self.nonce = None
self.transactions = []
def parse(self, payload, opts):
Packet.parse(self, payload, opts)
self.version, self.prev_block, self.merkle_root = struct.unpack(
'<I32s32s', payload.read(68))
self.prev_block = self.prev_block[::-1]
self.merkle_root = self.merkle_root[::-1]
self.timestamp, self.bits, self.nonce = struct.unpack(
'<III', payload.read(12))
transactionCount = decodeVarLength(payload)
while len(self.transactions) < transactionCount:
t = TxPacket()
t.parse(payload, opts)
self.transactions.append(t)
self._hash = doubleSha256(payload.getvalue()[:80])[::-1]
def toWire(self, buf, opts):
Packet.toWire(self, buf, opts)
buf.write(struct.pack("<I32s32sIII",
self.version,
self.prev_block[::-1],
self.merkle_root[::-1],
self.timestamp,
self.bits,
self.nonce))
buf.write(encodeVarLength(len(self.transactions)))
for t in self.transactions:
t.toWire(buf, opts)
def hash(self):
"""
If we have the hash saved from a parsing action we just return it
otherwise we serialize this transaction and calculate the 2xSha256.
If the hash is derived from a serialization we do not cache the result
should happen rarely though.
"""
if self._hash:
return self._hash
else:
buf = BytesIO()
self.toWire(buf, {'segwit': False, 'version': PROTOCOL_VERSION})
return doubleSha256(buf.getvalue()[:80])[::-1]
class GetaddrPacket(Packet):
type = 'getaddr'
class AddrPacket(Packet):
type = "addr"
def __init__(self):
self.addresses = []
def parse(self, payload, opts):
l = decodeVarLength(payload)
for _ in range(0, l):
a = Address()
a.parse(payload, opts)
self.addresses.append(a)
def toWire(self, buf, opts):
buf.write(encodeVarLength(len(self.addresses)))
for a in self.addresses:
a.toWire(buf, opts)
class VerackMessage(Packet):
type = 'verack'
class DummyPacket(Packet):
""" Class of packets that are not really parsed.
This is just until we implement the actual parsing. It reads/writes the
packet's binary representation.
If you need to parse a subclass of DummyPacket, i.e., the packets below
feel free to implement and send us a pull request :-)
"""
def __init__(self):
self.type = None
self.binrep = ""
def parse(self, payload, opts):
self.binrep = payload.getvalue()
def toWire(self, buf, opts):
buf.write(self.binrep)
def __str__(self):
return "<DummyPacket[%s]>" % (self.type)
class FilterloadPacket(DummyPacket):
type = 'filterload'
class FilteraddPacket(DummyPacket):
type = 'filteradd'
class FilterclearPacket(DummyPacket):
type = 'filterclear'
class MerkleblockPacket(DummyPacket):
type = 'merkleblock'
class GetheadersPacket(DummyPacket):
type = 'getheaders'
parsers = {
AddrPacket.type: AddrPacket,
TxPacket.type: TxPacket,
PongPacket.type: PongPacket,
PingPacket.type: PingPacket,
InvPacket.type: InvPacket,
GetDataPacket.type: GetDataPacket,
BlockPacket.type: BlockPacket,
VersionPacket.type: VersionPacket,
VerackMessage.type: VerackMessage,
FilterloadPacket.type: FilterloadPacket,
FilteraddPacket.type: FilteraddPacket,
FilterclearPacket.type: FilterclearPacket,
MerkleblockPacket.type: MerkleblockPacket,
GetheadersPacket.type: GetheadersPacket,
GetaddrPacket.type: GetaddrPacket,
}
|
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.common import utils as common_utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
FIP_NS_PREFIX = 'fip-'
FIP_EXT_DEV_PREFIX = 'fg-'
FIP_2_ROUTER_DEV_PREFIX = 'fpr-'
ROUTER_2_FIP_DEV_PREFIX = namespaces.ROUTER_2_FIP_DEV_PREFIX
# Route Table index for FIPs
FIP_RT_TBL = 16
FIP_LL_SUBNET = '169.254.30.0/23'
# Rule priority range for FIPs
FIP_PR_START = 32768
FIP_PR_END = FIP_PR_START + 40000
class FipNamespace(namespaces.Namespace):
def __init__(self, ext_net_id, agent_conf, driver, use_ipv6):
name = self._get_ns_name(ext_net_id)
super(FipNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
self._ext_net_id = ext_net_id
self.agent_conf = agent_conf
self.driver = driver
self.use_ipv6 = use_ipv6
self.agent_gateway_port = None
self._subscribers = set()
path = os.path.join(agent_conf.state_path, 'fip-priorities')
self._rule_priorities = frpa.FipRulePriorityAllocator(path,
FIP_PR_START,
FIP_PR_END)
self._iptables_manager = iptables_manager.IptablesManager(
namespace=self.get_name(),
use_ipv6=self.use_ipv6)
path = os.path.join(agent_conf.state_path, 'fip-linklocal-networks')
self.local_subnets = lla.LinkLocalAllocator(path, FIP_LL_SUBNET)
self.destroyed = False
@classmethod
def _get_ns_name(cls, ext_net_id):
return namespaces.build_ns_name(FIP_NS_PREFIX, ext_net_id)
def get_name(self):
return self._get_ns_name(self._ext_net_id)
def get_ext_device_name(self, port_id):
return (FIP_EXT_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_int_device_name(self, router_id):
return (FIP_2_ROUTER_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
def get_rtr_ext_device_name(self, router_id):
return (ROUTER_2_FIP_DEV_PREFIX + router_id)[:self.driver.DEV_NAME_LEN]
def has_subscribers(self):
return len(self._subscribers) != 0
def subscribe(self, router_id):
is_first = not self.has_subscribers()
self._subscribers.add(router_id)
return is_first
def unsubscribe(self, router_id):
self._subscribers.discard(router_id)
return not self.has_subscribers()
def allocate_rule_priority(self, floating_ip):
return self._rule_priorities.allocate(floating_ip)
def deallocate_rule_priority(self, floating_ip):
self._rule_priorities.release(floating_ip)
def _gateway_added(self, ex_gw_port, interface_name):
"""Add Floating IP gateway port."""
LOG.debug("add gateway interface(%s)", interface_name)
ns_name = self.get_name()
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
bridge=self.agent_conf.external_network_bridge,
namespace=ns_name,
prefix=FIP_EXT_DEV_PREFIX)
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name,
clean_connections=True)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_ip_addr_adv_notif(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf)
for subnet in ex_gw_port['subnets']:
gw_ip = subnet.get('gateway_ip')
if gw_ip:
ipd = ip_lib.IPDevice(interface_name,
namespace=ns_name)
ipd.route.add_gateway(gw_ip)
cmd = ['sysctl', '-w', 'net.ipv4.conf.%s.proxy_arp=1' % interface_name]
# TODO(Carl) mlavelle's work has self.ip_wrapper
ip_wrapper = ip_lib.IPWrapper(namespace=ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def create(self):
# TODO(Carl) Get this functionality from mlavelle's namespace baseclass
LOG.debug("add fip-namespace(%s)", self.name)
ip_wrapper_root = ip_lib.IPWrapper()
ip_wrapper_root.netns.execute(['sysctl',
'-w',
'net.ipv4.ip_nonlocal_bind=1'],
run_as_root=True)
ip_wrapper = ip_wrapper_root.ensure_namespace(self.get_name())
ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1'])
if self.use_ipv6:
ip_wrapper.netns.execute(['sysctl', '-w',
'net.ipv6.conf.all.forwarding=1'])
# no connection tracking needed in fip namespace
self._iptables_manager.ipv4['raw'].add_rule('PREROUTING',
'-j CT --notrack')
self._iptables_manager.apply()
def delete(self):
self.destroyed = True
ip_wrapper = ip_lib.IPWrapper(namespace=self.name)
for d in ip_wrapper.get_devices(exclude_loopback=True):
if d.name.startswith(FIP_2_ROUTER_DEV_PREFIX):
# internal link between IRs and FIP NS
ip_wrapper.del_veth(d.name)
elif d.name.startswith(FIP_EXT_DEV_PREFIX):
# single port from FIP NS to br-ext
# TODO(carl) Where does the port get deleted?
LOG.debug('DVR: unplug: %s', d.name)
ext_net_bridge = self.agent_conf.external_network_bridge
self.driver.unplug(d.name,
bridge=ext_net_bridge,
namespace=self.name,
prefix=FIP_EXT_DEV_PREFIX)
self.agent_gateway_port = None
# TODO(mrsmith): add LOG warn if fip count != 0
LOG.debug('DVR: destroy fip ns: %s', self.name)
super(FipNamespace, self).delete()
def create_gateway_port(self, agent_gateway_port):
"""Create Floating IP gateway port.
Request port creation from Plugin then creates
Floating IP namespace and adds gateway port.
"""
self.agent_gateway_port = agent_gateway_port
self.create()
iface_name = self.get_ext_device_name(agent_gateway_port['id'])
self._gateway_added(agent_gateway_port, iface_name)
def _internal_ns_interface_added(self, ip_cidr,
interface_name, ns_name):
ip_wrapper = ip_lib.IPWrapper(namespace=ns_name)
ip_wrapper.netns.execute(['ip', 'addr', 'add',
ip_cidr, 'dev', interface_name])
def create_rtr_2_fip_link(self, ri):
"""Create interface between router and Floating IP namespace."""
LOG.debug("Create FIP link interfaces for router %s", ri.router_id)
rtr_2_fip_name = self.get_rtr_ext_device_name(ri.router_id)
fip_2_rtr_name = self.get_int_device_name(ri.router_id)
fip_ns_name = self.get_name()
# add link local IP to interface
if ri.rtr_fip_subnet is None:
ri.rtr_fip_subnet = self.local_subnets.allocate(ri.router_id)
rtr_2_fip, fip_2_rtr = ri.rtr_fip_subnet.get_pair()
ip_wrapper = ip_lib.IPWrapper(namespace=ri.ns_name)
device_exists = ip_lib.device_exists(rtr_2_fip_name,
namespace=ri.ns_name)
if not device_exists:
int_dev = ip_wrapper.add_veth(rtr_2_fip_name,
fip_2_rtr_name,
fip_ns_name)
self._internal_ns_interface_added(str(rtr_2_fip),
rtr_2_fip_name,
ri.ns_name)
self._internal_ns_interface_added(str(fip_2_rtr),
fip_2_rtr_name,
fip_ns_name)
if self.agent_conf.network_device_mtu:
int_dev[0].link.set_mtu(self.agent_conf.network_device_mtu)
int_dev[1].link.set_mtu(self.agent_conf.network_device_mtu)
int_dev[0].link.set_up()
int_dev[1].link.set_up()
# add default route for the link local interface
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=ri.ns_name)
device.route.add_gateway(str(fip_2_rtr.ip), table=FIP_RT_TBL)
#setup the NAT rules and chains
ri._handle_fip_nat_rules(rtr_2_fip_name)
def scan_fip_ports(self, ri):
# don't scan if not dvr or count is not None
if ri.dist_fip_count is not None:
return
# scan system for any existing fip ports
ri.dist_fip_count = 0
rtr_2_fip_interface = self.get_rtr_ext_device_name(ri.router_id)
if ip_lib.device_exists(rtr_2_fip_interface, namespace=ri.ns_name):
device = ip_lib.IPDevice(rtr_2_fip_interface, namespace=ri.ns_name)
existing_cidrs = [addr['cidr'] for addr in device.addr.list()]
fip_cidrs = [c for c in existing_cidrs if
common_utils.is_cidr_host(c)]
for fip_cidr in fip_cidrs:
fip_ip = fip_cidr.split('/')[0]
rule_pr = self._rule_priorities.allocate(fip_ip)
ri.floating_ips_dict[fip_ip] = rule_pr
ri.dist_fip_count = len(fip_cidrs)
|
|
# coding: utf-8
"""
Twisted Deferred Request
========================
Introduction
------------
This module contains an implementation of a Twisted Request that allows
Resources rendering to return Deferreds instead of ``NOT_DONT_YET``. By
using deferreds one can ensure that a request will always be finished
regardless of any errors while rendering, and allows for the convenient
use of ``inlineCallbacks``.
Source
------
The source code for Twisted Deferred Request is available from the
GitHub repo `cpburnz/twisted-deferred-request`_.
.. _`cpburnz/twisted-deferred-request`: https://github.com/cpburnz/twisted-deferred-request.git
Use
---
Here's an example using the deferred request::
from twisted.internet import defer, reactor
from twisted.web import resource, server
from deferred_request import DeferredRequest
class DeferredResource(resource.Resource):
@defer.inlineCallbacks
def render(self, request):
data = yield deferredDatabaseRequest()
request.write(data)
request.finish() # This is optional
class Site(server.Site):
requestFactory = DeferredRequest
displayTracebacks = True
def main():
resrc = DeferredResource()
site = server.Site(resrc)
reactor.listenTCP(8080, site)
reactor.run()
return 0
if __name__ == '__main__':
exit(main())
"""
__author__ = "Caleb P. Burns <[email protected]>"
__copyright__ = "Copyright (C) 2012 by Caleb P. Burns"
__license__ = "MIT"
__version__ = "0.8.0"
__status__ = "Development"
import codecs
import datetime
import os
import time
import urllib
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from dateutil import parser as dt_parser, tz
from twisted.internet import defer
from twisted.python import failure, log, urlpath
from twisted.web import http, server, util as webutil
__all__ = ['DeferredRequest', 'RequestException', 'RequestDisconnected', 'ResponseStarted']
_utc = tz.tzutc()
# Static HTML responses.
_server_error_html = u"""
<!DOCTYPE html>
<html>
<head><title>500 Internal Server Error</title></head>
<body>500: There was an internal server error processing your request for <em>{path}</em></body>
</html>
""".strip()
_server_traceback_html = u"""
<!DOCTYPE html>
<html>
<head><title>500 Internal Server Error</title></head>
<body>
<h1>Server Traceback for</h1>
<h2>{path}</h2>
<div>{traceback}</div>
</body>
</html>
"""
def datetime_to_rfc2822(dt):
"""
Formats a datetime as a RFC 2822 string.
*dt* (``datetime`` or ``float``) is the datetime. If a ``float``,
this will be considered the seconds passed since the UNIX epoch
relative to UTC. If a **naive** ``datetime``, this will also be
considered relative to UTC.
Returns the formatted datetime (``str``).
"""
if isinstance(dt, datetime.datetime):
tm = dt.utctimetuple()
elif isinstance(dt, (float, int, long)):
tm = time.gmtime(dt)
else:
raise TypeError("dt:%r is not a datetime or float." % dt)
return time.strftime('%a, %d %b %Y %H:%M:%S GMT', tm)
class RequestException(Exception):
"""
The ``RequestException`` class is the base class that all request
exceptions will inherit from.
"""
class RequestDisconnected(RequestException):
"""
The ``ResponseStarted`` error is raised when an action is performed on
a request that cannot be performed after the response has been
disconnected (e.g., after writing the response).
"""
class ResponseStarted(RequestException):
"""
The ``ResponseStarted`` error is raised when an action is performed on
a request that cannot be performed after the response has started
(e.g., setting headers and cookies).
"""
class DeferredRequest(http.Request):
"""
The ``DeferredRequest`` class represents a Twisted HTTP request. This
class implementes the ``twisted.web.iweb.IRequest`` interface. It can
be used like ``twisted.web.server.Request`` as a drop-in replacement
(except for undocumented attributes).
This implementation allows ``twisted.internet.defer.Deferred``s to be
returned when rendering ``twisted.web.resource.Resource``s.
.. TODO: Test to see if this is compatible with being queued.
"""
def __init__(self, channel, queued):
"""
Initializes a ``DeferredRequest`` instance.
*channel* (``twisted.web.http.HTTPChannel``) is the channel we are
connected to.
*queued* (``bool``) is whether we are in the request queue
(``True``), or if we can start writing to the transport (``False``).
"""
self.args = None
"""
*args* (``dict``) is a mapping that maps decoded query argument and
POST argument name (``str``) to a ``list`` of its values (``str``).
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self._disconnected = False
"""
*_disconnected* (``bool``) indicates whether the client connection
is disconnected (``True``), or still connected (``False``).
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.finished = False
"""
*finished* (``bool``) is whether the request is finished (``True``),
or not (``False``).
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.method = http.Request.method
"""
*method* (``str``) is the HTTP method that was used (e.g., "GET" or
"POST").
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.notifications = None
"""
*notifications* (``list``) contains the list of
``twisted.internet.defer.Deferred``s to be called once this request
is finished.
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.path = None
"""
*path* (``str``) is the path component of the URL (no query
arguments).
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.prepath = None
"""
*prepath* (``list``) contains the path components traversed up to
the current resource.
"""
self.postpath = None
"""
*postpath* (``list``) contains the path components remaining at the
current resource.
"""
self.requestHeaders = None
"""
*requestHeaders* (``twisted.web.http.Headers``) contains all
received request headers.
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.responseHeaders = None
"""
*responseHeaders* (``twisted.web.http.Headers``) contains all
response headers to be sent.
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self._resp_buffer = None
"""
*_resp_buffer* (``StringIO.StringIO``) is the response buffer when
*_resp_buffered* is ``True``.
"""
self._resp_buffered = True
"""
*_resp_buffered* (``bool``) is whether the response is buffered
(``True``), or unbuffered (``False``). Default is ``True`` for
``buffered``.
"""
self._resp_chunked = None
"""
*_resp_chunked* (``bool``) indicates whether the response will be
chunked (``True``), or not (``False``).
"""
self._resp_code = http.OK
"""
*_resp_code* (``int``) is the HTTP response status code. The default
code is 200 for "OK".
"""
self._resp_code_message = http.RESPONSES[http.OK]
"""
*_resp_code_message* (``str``) is the HTTP response status code
message. The default message is "OK".
"""
self._resp_content_type = 'text/html'
"""
*_resp_content_type* (``str``) is the response content type. Default
is "text/html".
"""
self._resp_cookies = []
"""
*_resp_cookies* (``list``) contains the list of response cookies.
"""
self._resp_enc = None
"""
*_resp_enc* (``str``) is the response encoding. Default is ``None``.
"""
self._resp_error_cb = None
"""
*_resp_error_cb* (**callable**) is the callback function for when
there is an error rendering a request.
"""
self._resp_error_cb_args = None
"""
*_resp_error_cb_args* (**sequence**) contains any positional
arguments to pass to *_resp_error_cb*.
"""
self._resp_error_cb_kw = None
"""
*_resp_error_cb_kw* (``dict``) contains any keyword arguments to
pass to *_resp_error_cb*.
"""
self._resp_last_modified = None
"""
*_resp_last_modified* (``datetime``) is when the resource was last
modified.
"""
self._resp_nobody = None
"""
*_resp_nobody* (``bool``) indicates whether there should be a
message body in the response based upon *code*.
"""
self._resp_started = False
"""
*_resp_started* (``bool``) indicates whether the response has been
started (``True``), or not (``False``).
"""
self._root_url = None
"""
*_root_url* (``str``) is the URL remembered from a call to
*rememberRootURL()*.
"""
self.sentLength = 0
"""
*sentLength* (``int``) is the total number of bytes sent as part of
response body.
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
self.site = None
"""
*site* (``twisted.internet.server.Site``) is the site that created
the request.
"""
self.sitepath = None
"""
*sitepath* (``list``) contains the path components (``str``)
traversed up to the site. This is used to determine cookie names
between distributed servers and dosconnected sites.
.. NOTE: Set by ``twisted.web.server.Site.getResourceFor()``.
"""
self.uri = http.Request.uri
"""
*uri* (``str``) is the full URI that was requested (including query
arguments).
.. NOTE: Inherited from ``twisted.web.http.Request``.
"""
http.Request.__init__(self, channel, queued)
@staticmethod
def buildPath(path):
"""
The the specified path.
"""
return '/' + '/'.join(map(urllib.quote, path))
def buildURL(self, path, host=None, port=None, secure=None, query=None):
"""
Builds the URL for the specified path.
*path* (``str`` or **sequence**) is the path. If **sequence**,
contains the path segments (``str``).
*host* (``str``) is the host name. Default is ``None`` to use the
host from the request.
*port* (``int``) is the port number. Default is ``None`` to use the
port from the request.
*secure* (``bool``) is whether the URL should be "https" (``True``),
or "http" (``False``). Default is ``None`` to use the whether the
request connection was secure or not.
*query* (``dict`` or **sequence**) contains the query arguments. If
a ``dict``, maps argument *key* to *value*. If a **sequence**,
contains 2-``tuple``s of *key* and *value* pairs. *key* is a
``str``, and *value* can be either a single value (``str``) or a
**sequence** of values (``str``).
Returns the built URL (``str``).
"""
if not path:
path = '/'
elif (not callable(getattr(path, '__getitem__', None)) or isinstance(path, unicode)):
raise TypeError("path:%r is not a str or sequence." % path)
if not host:
host = self.getRequestHostname()
elif not isinstance(host, str):
raise TypeError("host:%r is not a str." % host)
if not port:
port = self.getHost().port
elif not isinstance(port, int):
raise TypeError("port:%r is not an int." % port)
if query and (not callable(getattr(query, '__getitem__', None)) or isinstance(path, unicode)):
raise TypeError("query:%r is not a dict or sequence." % query)
if not isinstance(path, basestring):
path = self.buildPath(path)
return "{proto}://{host}{port}{path}{query}".format(
proto='https' if secure else 'http',
host=host,
port=(":%i" % port) if port != (443 if secure else 80) else '',
path=path,
query=('?' + urllib.urlencode(query, True)) if query else ''
)
def addCookie(self, key, value, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Sets an outgoing HTTP cookie.
*key* (``str``) is the name of the cookie.
*value* (``str``) is the value of the cookie.
*expires* (``str`` or ``datetime``) optionally specifies the
expiration of the cookie. Default is ``None``.
*domain* (``str``) optionally specifies the cookie domain. Default
is ``None``.
*path* (``str``) optionally specifies the cookie path. Default is
``None``.
*max_age* (``int``) optionally is the number of seconds until the
cookie expires. Default is ``None``.
*comment* (``str``) optionally is the cookie comment. Default is
``None``.
*secure* (``bool``) is whether the cookie can only be communicated
over a secure connection (``True``), or not (``False``). Default is
``None`` for ``False``.
"""
# NOTE: Overrides ``twisted.web.http.Request.addCookie()``.
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
cookie = ["%s=%s" % (key, value)]
if expires is not None:
if isinstance(expires, (float, int, long, datetime.datetime)):
expires = datetime_to_rfc2822(expires)
cookie.append("Expires=%s" % expires)
if domain is not None:
cookie.append("Domain=%s" % domain)
if path is not None:
cookie.append("Path=%s" % path)
if max_age is not None:
cookie.append("Max-Age=%i" % max_age)
if comment is not None:
cookie.append("Comment=%s" % comment)
if secure:
cookie.append("Secure")
self._resp_cookies.append("; ".join(cookie))
def disableBuffering(self):
"""
Enables response output buffering.
"""
if self._resp_buffer:
# Since we have buffered data, write it.
self._write_buffer()
self._resp_buffered = False
def enableBuffering(self):
"""
Enables response output buffering.
"""
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
self._resp_buffered = True
def finish(self):
"""
Indicates that the request and its response are finished.
"""
# NOTE: Overrides ``twisted.web.http.Request.finish()``.
if self.finished or self._disconnected:
return
if self._resp_buffered:
# Since data is buffered data, write it.
self._write_buffer(set_len=True)
elif not self._resp_started:
# Since header have not been written, write them.
# Write headers because they have not been written.
self._write_headers()
if self._resp_chunked:
# Write last chunk.
self.transport.write("0\r\n\r\n")
self.finished = True
if not self.queued:
# If this request is not currently queued, clean up.
self._cleanup()
def getCookie(self, key, default=None):
"""
Gets the specified request cookie.
*key* (``str``) is the name of the cookie.
*default* (**mixed**) optionally is the value to return if the
cookie does not exist. Default is ``None``.
Returns the cookie value (``str``) if it exists; otherwise,
*default*.
"""
# NOTE: Overrides ``twisted.web.http.Request.getCookie()``.
return self.received_cookies.get(key, default)
def getHeader(self, key, default=None):
"""
Gets the specified request header.
*key* (``str``) is the name of the header.
*default* (**mixed**) optionally is the value to return if the
header does not exist. Default is ``None``.
Returns the header value (``str``) if it exists; otherwise,
*default*.
"""
# NOTE: Overrides ``twisted.web.http.Request.getHeader()``.
return self.requestHeaders.getRawHeaders(key, default=(default,))[-1]
def getRootURL(self):
"""
Gets the previously remembered URL.
Returns the root URL (``str``).
"""
return self._root_url
def prePathURL(self):
"""
Gets the absolute URL to the most nested resource which as yet been
reached.
Returns the pre-path URL (``str``).
"""
# NOTE: Derived from ``twisted.web.server.Request.prePathURL()``.
return self.buildURL(self.prepath)
def process(self):
"""
Called when the entire request has been received and is ready to be
processed.
"""
# NOTE: Overrides ``twisted.web.http.Request.process()``.
try:
# Normalize path
path = urllib.unquote(self.path)
is_dir = path[-1:] == '/'
path = os.path.normpath('/' + path.strip('/'))
if is_dir and path != '/':
path += '/'
# Setup attributes.
self.site = self.channel.site
self.path = path
self.prepath = []
self.postpath = path.split('/')[1:]
# Set default headers.
self.setHeader('server', server.version)
self.setHeader('date', datetime_to_rfc2822(time.time()))
# Render request.
resrc = self.site.getResourceFor(self)
result = resrc.render(self)
if isinstance(result, defer.Deferred):
# Ensure request will be finished.
result.addCallbacks(self._process_finish, self._process_error)
elif isinstance(result, basestring):
# Write result as body and finish request.
self.write(result)
self.finish()
elif result is None:
# Finish request.
self.finish()
elif result == server.NOT_DONE_YET:
# Disable buffering because this causes NOT_DONE_YET resources
# to hang (e.g., ``twisted.web.static.File``).
self.disableBuffering()
else:
# Invalid result.
raise ValueError("Resource:%r rendered result:%r is not a Deferred, string, None or NOT_DONE_YET:%r." % (resrc, result, server.NOT_DONE_YET))
except Exception:
self._process_error(failure.Failure())
def _process_error(self, reason):
"""
Called when there is an error processing the request.
*reason* (``twisted.internet.failure.Failure``) is the reason for
failure.
"""
try:
if not isinstance(reason, failure.Failure):
# LIES! This is not an error.
return
# Log errors.
log.err(reason, str(self))
if self._disconnected or self.finished:
# Since we are disconnected, return and do nothing.
return
if self._resp_error_cb:
# Call error callback.
try:
self._resp_error_cb(self, reason, self._resp_started, *self._resp_error_args, **self._resp_error_kw)
except Exception as e:
log.err(e, str(self))
if not self._resp_started:
# Display Internal Server Error.
code = http.INTERNAL_SERVER_ERROR
if self.site.displayTracebacks:
body = _server_traceback_html.format(path=urllib.escape(self.uri), traceback=webutil.formatFailure(reason))
else:
body = _server_error_html.format(path=urllib.escape(self.uri))
self.setResponseCode(code)
self.setResponseEncoding(self._resp_enc or 'UTF-8')
self.write(body)
elif 'text/html' in self.responseHeaders.getRawHeaders('content-type', ('',))[0]:
# Since an error occured but we've already started writing the
# response, do what we can.
if self.site.displayTracebacks:
body = _server_traceback_html.format(path=urllib.escape(self.uri), traceback=webutil.formatFailure(reason))
else:
body = "<h1>...Internal Server Error!</h1>"
self.write(body)
except Exception as e:
log.err(e, str(self))
finally:
self.finish()
def _process_finish(self, _):
"""
Called at the end of the deferred chain to ensure that the request
is finished.
*_* is ignored.
"""
self.finish()
def redirect(self, url):
"""
Utility function that does a redirect. The request should have
*finish()* called after this.
"""
# NOTE: Overrides ``twisted.web.http.Request.redirect()``.
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
if not isinstance(url, str):
raise TypeError("url:%r is not a str." % url)
self.setResponseCode(http.FOUND)
self.setHeader('location', url)
def rememberRootURL(self, url=None):
"""
Remembers the currently processed part of the URL for later
recalling.
*url* (``str``) is the URL to remember. Default is ``None`` for
twisted's implementation of using *prepath* minus the last segment.
"""
# NOTE: Derived from ``twisted.web.server.Request.rememberRootURL()``.
self._root_url = url if url else self.buildURL(self.prepath[:-1])
def setContentType(self, content_type):
"""
Sets the content type for the response.
*content_type* (``str``) is the content type of the response.
"""
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
if not isinstance(content_type, str):
raise TypeError("content_type:%r is not a str." % content_type)
elif not content_type:
raise ValueError("content_type:%r cannot be empty." % content_type)
self._resp_content_type = content_type
def setHeader(self, key, value):
"""
Sets the HTTP response header, overriding any previously set values
for this header.
*key* (``str``) is the name of the header.
*value* (``str``) is the value of the header.
"""
# NOTE: Overrides ``twisted.web.http.Request.setHeader()``.
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
if not isinstance(key, str):
raise TypeError("key:%r is not a str." % key)
if not isinstance(value, str):
value = str(value)
self.responseHeaders.setRawHeaders(key, [value])
def setLastModified(self, when):
"""
Sets the last modified time for the response.
If this is called more than once, the latest Last-Modified value
will be used.
If this is a conditional request (i.e., If-Modified-Since header was
received), the response code will be set to Not Modified.
*when* (``float`` or ``datetime``) is that last time the resource
was modified. If a ``float``, then the seconds since the UNIX epoch.
Returns ``twisted.web.http.CACHED`` if this is condition request and
If-Modified-Since is ealier than Last-Modified; otherwise, ``None``.
"""
# NOTE: Overrides ``twisted.web.http.Request.setLastModified()``.
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
if not isinstance(when, datetime.datetime):
if isinstance(when, (float, int, long)):
when = datetime.datetime.fromtimestamp(when, _utc)
else:
raise TypeError("when:%r is not a float or datetime." % when)
if not self._resp_last_modified or when > self._resp_last_modified:
self._resp_last_modified = when
mod_since = self.requestHeaders.getRawHeaders('if-modified-since', (None,))[0]
if mod_since:
try:
mod_since = dt_parser.parse(mod_since.split(';', 1)[0])
except ValueError:
return
if mod_since >= when:
self.setResponseCode(http.NOT_MODIFIED)
return http.CACHED
def setResponseCode(self, code, message=None):
"""
Sets the HTTP response status code.
*code* (``int``) is the status code.
*message* (``str``) is a custom status code message. Default is
``None`` for the default status code message.
"""
if self._resp_started:
raise ResponseStarted(self.path, "Response for %r has already started." % self)
if not isinstance(code, int):
raise TypeError("code:%r is not an int." % code)
if message is not None and not isinstance(message, str):
raise TypeError("message:%r is not a str." % message)
self._resp_code = code
self._resp_code_message = message or http.RESPONSES.get(code, "Unknown Status")
def setResponseEncoding(self, encoding):
"""
Sets the response encoding.
*encoding* (``str``) is the response encoding.
"""
try:
codecs.lookup(encoding)
except LookupError:
raise ValueError("encoding:%r is not recognized." % encoding)
self._resp_enc = encoding
def getSession(self):
# TODO
raise NotImplementedError("TODO: getSession()")
def setErrorCallback(self, callback, args=None, kwargs=None):
"""
Sets the callback function which will be called when there is an
error rendering a resource.
*callback* (**callable**) is the method called on an error. This
will be passed: *request*, *reason* and *started*
- *request* (``lib.twisted.DeferredRequest``) is the request.
- *reason* (``twisted.python.failure.Failure``) describes the error that
occured.
- *started* (``bool``) is whether the response has started
(``True``), or not (``False``).
If a custom error response is to be written the to *request*,
*started* should be checked because this indicates whether a
response was already started.
*args* (**sequence**) contains any positional arguments to pass to
*callback*. Default is ``None`` for no positional arguments.
*kwargs* (``dict``) contains any keyword arguments to pass to
*callback*. Default is ``None`` for not keyword arguments.
"""
self._resp_error_cb = callback
self._resp_error_args = args or ()
self._resp_error_kw = kwargs or {}
def URLPath(self):
"""
Gets the URL Path that identifies this requested URL.
Returns the URL Path (``twisted.python.urlpath.URLPath``).
"""
# NOTE: Derived from ``twisted.web.server.Request.URLPath()``.
return urlpath.URLPath.fromString(self.prePathURL())
def write(self, data):
"""
Writes (or buffers) some data in response to this request while
ensuring that the data is encoded properly.
.. NOTE: To bypass encoding, use *write_raw()*.
*data* (**string**) is the data to write.
"""
# NOTE: Overrides ``twisted.web.http.Request.write()``.
if self._resp_enc:
self.write_raw(unicode(data).encode(self._resp_enc))
elif isinstance(data, str):
self.write_raw(data)
elif isinstance(data, unicode):
raise UnicodeError("data:%r is unicode, but no response encoding set." % data)
else:
raise TypeError("data:%r is not a string." % data)
def write_raw(self, data):
"""
Writes (or buffers) some data in response to this request.
Arguments:
- data (``str``)
- The data to write.
"""
if data and not self._resp_nobody:
if self._resp_buffered:
if not self._resp_buffer:
self._resp_buffer = StringIO.StringIO()
self._resp_buffer.write(data)
else:
self._write_response(data)
def _write_buffer(self, set_len=None):
"""
Writes (flushes) the response buffer.
*set_len* (``bool``) is whether the Content-Length header should be
set to the amount of buffered data. Default is ``None`` for
``False``.
"""
if self._resp_buffer:
data = self._resp_buffer.getvalue()
self._resp_buffer = None
else:
data = ''
if set_len:
self.setHeader('content-length', len(data))
self._write_response(data)
def _write_headers(self):
"""
Writes the response headers.
"""
# NOTE: Derived from ``twisted.web.http.Request.write()``.
if self._disconnected:
raise RequestDisconnected("Request %r is disconnected." % self)
if self._resp_started:
raise RuntimeError("Request %r already started." % self)
version = self.clientproto.upper()
method = self.method.upper()
# Write first line.
lines = ["%s %s %s\r\n" % (version, self._resp_code, self._resp_code_message)]
# Determine if the there should be a response body and if it is
# going to be chunked.
if method == 'HEAD' or self._resp_code in http.NO_BODY_CODES:
self._resp_nobody = True
self.responseHeaders.removeHeader('content-type')
self.responseHeaders.removeHeader('content-length')
else:
if not self.responseHeaders.hasHeader('content-type'):
# Ensure a content type is set.
if self._resp_content_type == 'text/html' and self._resp_enc:
self.setHeader('content-type', 'text/html; charset=' + self._resp_enc)
else:
self.setHeader('content-type', self._resp_content_type)
if not self.responseHeaders.hasHeader('last-modified') and self._resp_last_modified:
self.setHeader('last-modified', datetime_to_rfc2822(self._resp_last_modified))
if version == 'HTTP/1.1' and not self.responseHeaders.hasHeader('content-length'):
# If no content length was set, use chunking.
lines.append("Transfer-Encoding: chunked\r\n")
self._resp_chunked = True
# Write headers.
for name, values in self.responseHeaders.getAllRawHeaders():
for value in values:
lines.append("%s: %s\r\n" % (name, value))
# Write cookies.
for cookie in self._resp_cookies:
lines.append("Set-Cookie: %s\r\n" % cookie)
# Write end of headers.
lines.append("\r\n")
# Actually write headers.
self._resp_started = True
self.transport.writeSequence(lines)
def _write_response(self, data):
"""
Actually writes data in response to this request.
*data* (``str``) is the data to write.
"""
# NOTE: Derived from ``twisted.web.http.Request.write()``.
if self._disconnected:
raise RequestDisconnected("Request %r is disconnected." % self)
if not self._resp_started:
self._write_headers()
if self._resp_nobody:
return
if data:
self.sentLength += len(data)
if self._resp_chunked:
self.transport.writeSequence(http.toChunk(data))
else:
self.transport.write(data)
|
|
#!/usr/bin/python
# Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Wrapper script around Rietveld's upload.py that groups files into
# changelists.
import getpass
import os
import random
import re
import string
import subprocess
import sys
import tempfile
import upload
import urllib2
CODEREVIEW_SETTINGS = {
# Default values.
"CODE_REVIEW_SERVER": "rietku.appspot.com",
"CC_LIST": "[email protected]",
"VIEW_VC": "http://code.google.com/p/jaikuengine/source/detail?r=",
}
# Use a shell for subcommands on Windows to get a PATH search, and because svn
# may be a batch file.
use_shell = sys.platform.startswith("win")
# globals that store the root of the current repository and the directory where
# we store information about changelists.
repository_root = ""
gcl_info_dir = ""
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Caches whether we read the codereview.settings file yet or not.
read_gcl_info = False
def DiffPrint(filename):
"""Return file content in unified diff format as though it's been added"""
# The file is "new" in the patch sense. Generate a homebrew diff.
# We can't use ReadFile() since it's not using binary mode.
file_handle = open(filename, 'rb')
file_content = file_handle.read()
file_handle.close()
# Prepend '+ ' to every lines.
file_content = ['+ ' + i for i in file_content.splitlines(True)]
nb_lines = len(file_content)
# We need to use / since patch on unix will fail otherwise.
filename = filename.replace('\\', '/')
data = "Index: %s\n" % filename
data += ("============================================================="
"======\n")
# Note: Should we use /dev/null instead?
data += "--- %s\n" % filename
data += "+++ %s\n" % filename
data += "@@ -0,0 +1,%d @@\n" % nb_lines
data += ''.join(file_content)
return data
def IsSVNMoved(filename):
"""Determine if a file has been added through svn mv"""
info = GetSVNFileInfo(filename)
return (info.get('Copied From URL') and
info.get('Copied From Rev') and
info.get('Schedule') == 'add')
def GetSVNFileInfo(file):
"""Returns a dictionary from the svn info output for the given file."""
output = RunShell(["svn", "info", file])
result = {}
re_key_value_pair = re.compile('^(.*)\: (.*)$')
for line in output.splitlines():
key_value_pair = re_key_value_pair.match(line)
if key_value_pair:
result[key_value_pair.group(1)] = key_value_pair.group(2)
return result
def GetSVNFileProperty(file, property_name):
"""Returns the value of an SVN property for the given file.
Args:
file: The file to check
property_name: The name of the SVN property, e.g. "svn:mime-type"
Returns:
The value of the property, which will be the empty string if the property
is not set on the file. If the file is not under version control, the
empty string is also returned.
"""
output = RunShell(["svn", "propget", property_name, file])
if (output.startswith("svn: ") and
output.endswith("is not under version control")):
return ""
else:
return output
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global repository_root
if not repository_root:
cur_dir_repo_root = GetSVNFileInfo(os.getcwd()).get("Repository Root")
if not cur_dir_repo_root:
ErrorExit("gcl run outside of repository")
repository_root = os.getcwd()
while True:
parent = os.path.dirname(repository_root)
if GetSVNFileInfo(parent).get("Repository Root") != cur_dir_repo_root:
break
repository_root = parent
return repository_root
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
global gcl_info_dir
if not gcl_info_dir:
gcl_info_dir = os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
return gcl_info_dir
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
global read_gcl_info
if not read_gcl_info:
read_gcl_info = True
# First we check if we have a cached version.
cached_settings_file = os.path.join(GetInfoDir(), CODEREVIEW_SETTINGS_FILE)
if (not os.path.exists(cached_settings_file) or
os.stat(cached_settings_file).st_mtime > 60 * 60 * 24 * 3):
dir_info = GetSVNFileInfo(".")
repo_root = dir_info["Repository Root"]
url_path = dir_info["URL"]
settings = ""
while True:
# Look for the codereview.settings file at the current level.
svn_path = url_path + "/" + CODEREVIEW_SETTINGS_FILE
settings, rc = RunShellWithReturnCode(["svn", "cat", svn_path])
if not rc:
# Exit the loop if the file was found.
break
# Make sure to mark settings as empty if not found.
settings = ""
if url_path == repo_root:
# Reached the root. Abandoning search.
break;
# Go up one level to try again.
url_path = os.path.dirname(url_path)
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time.
WriteFile(cached_settings_file, settings)
output = ReadFile(cached_settings_file)
for line in output.splitlines():
if not line or line.startswith("#"):
continue
k, v = line.split(": ", 1)
CODEREVIEW_SETTINGS[k] = v
return CODEREVIEW_SETTINGS.get(key, "")
def IsTreeOpen():
"""Fetches the tree status and returns either True or False."""
url = GetCodeReviewSetting('STATUS')
status = ""
if url:
status = urllib2.urlopen(url).read()
return status.find('0') == -1
def Warn(msg):
ErrorExit(msg, exit=False)
def ErrorExit(msg, exit=True):
"""Print an error message to stderr and optionally exit."""
print >> sys.stderr, msg
if exit:
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=use_shell,
universal_newlines=True)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
return RunShellWithReturnCode(command, print_output)[0]
def ReadFile(filename):
"""Returns the contents of a file."""
file = open(filename, 'r')
result = file.read()
file.close()
return result
def WriteFile(filename, contents):
"""Overwrites the file with the given contents."""
file = open(filename, 'w')
file.write(contents)
file.close()
class ChangeInfo:
"""Holds information about a changelist.
issue: the Rietveld issue number, of "" if it hasn't been uploaded yet.
description: the description.
files: a list of 2 tuple containing (status, filename) of changed files,
with paths being relative to the top repository directory.
"""
def __init__(self, name="", issue="", description="", files=[]):
self.name = name
self.issue = issue
self.description = description
self.files = files
self.patch = None
def FileList(self):
"""Returns a list of files."""
return [file[1] for file in self.files]
def _NonDeletedFileList(self):
"""Returns a list of files in this change, not including deleted files."""
return [file[1] for file in self.files if not file[0].startswith("D")]
def _AddedFileList(self):
"""Returns a list of files added in this change."""
return [file[1] for file in self.files if file[0].startswith("A")]
def Save(self):
"""Writes the changelist information to disk."""
data = SEPARATOR.join([self.issue,
"\n".join([f[0] + f[1] for f in self.files]),
self.description])
WriteFile(GetChangelistInfoFile(self.name), data)
def Delete(self):
"""Removes the changelist information from disk."""
os.remove(GetChangelistInfoFile(self.name))
def CloseIssue(self):
"""Closes the Rietveld issue for this changelist."""
data = [("description", self.description), ]
ctype, body = upload.EncodeMultipartFormData(data, [])
SendToRietveld("/" + self.issue + "/close", body, ctype)
def UpdateRietveldDescription(self):
"""Sets the description for an issue on Rietveld."""
data = [("description", self.description), ]
ctype, body = upload.EncodeMultipartFormData(data, [])
SendToRietveld("/" + self.issue + "/description", body, ctype)
def GetMoved(self, root=None):
"""Return a list of files that have been added through svn mv"""
moved = [f[1] for f in self.files if IsSVNMoved(f[1])]
if root:
moved = [os.path.join(root, f) for f in moved]
return moved
def MissingTests(self):
"""Returns True if the change looks like it needs unit tests but has none.
A change needs unit tests if it contains any new source files or methods.
"""
SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
# Ignore third_party entirely.
files = [file for file in self._NonDeletedFileList()
if file.find("third_party") == -1]
added_files = [file for file in self._AddedFileList()
if file.find("third_party") == -1]
# If the change is entirely in third_party, we're done.
if len(files) == 0:
return False
# Any new or modified test files?
# A test file's name ends with "test.*" or "tests.*".
test_files = [test for test in files
if os.path.splitext(test)[0].rstrip("s").endswith("test")]
if len(test_files) > 0:
return False
# Any new source files?
source_files = [file for file in added_files
if os.path.splitext(file)[1] in SOURCE_SUFFIXES]
if len(source_files) > 0:
return True
# Do the long test, checking the files for new methods.
return self._HasNewMethod()
def _HasNewMethod(self):
"""Returns True if the changeset contains any new functions, or if a
function signature has been changed.
A function is identified by starting flush left, containing a "(" before
the next flush-left line, and either ending with "{" before the next
flush-left line or being followed by an unindented "{".
Currently this returns True for new methods, new static functions, and
methods or functions whose signatures have been changed.
Inline methods added to header files won't be detected by this. That's
acceptable for purposes of determining if a unit test is needed, since
inline methods should be trivial.
"""
# To check for methods added to source or header files, we need the diffs.
# We'll generate them all, since there aren't likely to be many files
# apart from source and headers; besides, we'll want them all if we're
# uploading anyway.
if self.patch is None:
self.patch = GenerateDiff(self.FileList())
definition = ""
for line in self.patch.splitlines():
if not line.startswith("+"):
continue
line = line.strip("+").rstrip(" \t")
# Skip empty lines, comments, and preprocessor directives.
# TODO(pamg): Handle multiline comments if it turns out to be a problem.
if line == "" or line.startswith("/") or line.startswith("#"):
continue
# A possible definition ending with "{" is complete, so check it.
if definition.endswith("{"):
if definition.find("(") != -1:
return True
definition = ""
# A { or an indented line, when we're in a definition, continues it.
if (definition != "" and
(line == "{" or line.startswith(" ") or line.startswith("\t"))):
definition += line
# A flush-left line starts a new possible function definition.
elif not line.startswith(" ") and not line.startswith("\t"):
definition = line
return False
SEPARATOR = "\n-----\n"
# The info files have the following format:
# issue_id\n
# SEPARATOR\n
# filepath1\n
# filepath2\n
# .
# .
# filepathn\n
# SEPARATOR\n
# description
def GetChangelistInfoFile(changename):
"""Returns the file that stores information about a changelist."""
if not changename or re.search(r'[^\w-]', changename):
ErrorExit("Invalid changelist name: " + changename)
return os.path.join(GetInfoDir(), changename)
def LoadChangelistInfoForMultiple(changenames, fail_on_not_found=True,
update_status=False):
"""Loads many changes and merge their files list into one pseudo change.
This is mainly usefull to concatenate many changes into one for a 'gcl try'.
"""
changes = changenames.split(',')
aggregate_change_info = ChangeInfo(name=changenames)
for change in changes:
aggregate_change_info.files += LoadChangelistInfo(change,
fail_on_not_found,
update_status).files
return aggregate_change_info
def LoadChangelistInfo(changename, fail_on_not_found=True,
update_status=False):
"""Gets information about a changelist.
Args:
fail_on_not_found: if True, this function will quit the program if the
changelist doesn't exist.
update_status: if True, the svn status will be updated for all the files
and unchanged files will be removed.
Returns: a ChangeInfo object.
"""
info_file = GetChangelistInfoFile(changename)
if not os.path.exists(info_file):
if fail_on_not_found:
ErrorExit("Changelist " + changename + " not found.")
return ChangeInfo(changename)
data = ReadFile(info_file)
split_data = data.split(SEPARATOR, 2)
if len(split_data) != 3:
os.remove(info_file)
ErrorExit("Changelist file %s was corrupt and deleted" % info_file)
issue = split_data[0]
files = []
for line in split_data[1].splitlines():
status = line[:7]
file = line[7:]
files.append((status, file))
description = split_data[2]
save = False
if update_status:
for file in files:
filename = os.path.join(GetRepositoryRoot(), file[1])
status = RunShell(["svn", "status", filename])[:7]
if not status: # File has been reverted.
save = True
files.remove(file)
elif status != file[0]:
save = True
files[files.index(file)] = (status, file[1])
change_info = ChangeInfo(changename, issue, description, files)
if save:
change_info.Save()
return change_info
def GetCLs():
"""Returns a list of all the changelists in this repository."""
cls = os.listdir(GetInfoDir())
if CODEREVIEW_SETTINGS_FILE in cls:
cls.remove(CODEREVIEW_SETTINGS_FILE)
return cls
def GenerateChangeName():
"""Generate a random changelist name."""
random.seed()
current_cl_names = GetCLs()
while True:
cl_name = (random.choice(string.ascii_lowercase) +
random.choice(string.digits) +
random.choice(string.ascii_lowercase) +
random.choice(string.digits))
if cl_name not in current_cl_names:
return cl_name
def GetModifiedFiles():
"""Returns a set that maps from changelist name to (status,filename) tuples.
Files not in a changelist have an empty changelist name. Filenames are in
relation to the top level directory of the current repository. Note that
only the current directory and subdirectories are scanned, in order to
improve performance while still being flexible.
"""
files = {}
# Since the files are normalized to the root folder of the repositary, figure
# out what we need to add to the paths.
dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
# Get a list of all files in changelists.
files_in_cl = {}
for cl in GetCLs():
change_info = LoadChangelistInfo(cl)
for status, filename in change_info.files:
files_in_cl[filename] = change_info.name
# Get all the modified files.
status = RunShell(["svn", "status"])
for line in status.splitlines():
if not len(line) or line[0] == "?":
continue
status = line[:7]
filename = line[7:]
if dir_prefix:
filename = os.path.join(dir_prefix, filename)
change_list_name = ""
if filename in files_in_cl:
change_list_name = files_in_cl[filename]
files.setdefault(change_list_name, []).append((status, filename))
return files
def GetFilesNotInCL():
"""Returns a list of tuples (status,filename) that aren't in any changelists.
See docstring of GetModifiedFiles for information about path of files and
which directories are scanned.
"""
modified_files = GetModifiedFiles()
if "" not in modified_files:
return []
return modified_files[""]
def SendToRietveld(request_path, payload=None,
content_type="application/octet-stream", timeout=None):
"""Send a POST/GET to Rietveld. Returns the response body."""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = upload.GetEmail()
password = getpass.getpass("Password for %s: " % email)
return email, password
server = GetCodeReviewSetting("CODE_REVIEW_SERVER")
rpc_server = upload.HttpRpcServer(server,
GetUserCredentials,
host_override=server,
save_cookies=True)
try:
return rpc_server.Send(request_path, payload, content_type, timeout)
except urllib2.URLError, e:
if timeout is None:
ErrorExit("Error accessing url %s" % request_path)
else:
return None
def GetIssueDescription(issue):
"""Returns the issue description from Rietveld."""
return SendToRietveld("/" + issue + "/description")
def UnknownFiles(extra_args):
"""Runs svn status and prints unknown files.
Any args in |extra_args| are passed to the tool to support giving alternate
code locations.
"""
args = ["svn", "status"]
args += extra_args
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=use_shell)
while 1:
line = p.stdout.readline()
if not line:
break
if line[0] != '?':
continue # Not an unknown file to svn.
# The lines look like this:
# "? foo.txt"
# and we want just "foo.txt"
print line[7:].strip()
p.wait()
p.stdout.close()
def Opened():
"""Prints a list of modified files in the current directory down."""
files = GetModifiedFiles()
cl_keys = files.keys()
cl_keys.sort()
for cl_name in cl_keys:
if cl_name:
note = ""
if len(LoadChangelistInfo(cl_name).files) != len(files[cl_name]):
note = " (Note: this changelist contains files outside this directory)"
print "\n--- Changelist " + cl_name + note + ":"
for file in files[cl_name]:
print "".join(file)
def Help(argv=None):
if argv and argv[0] == 'try':
TryChange(None, ['--help'])
return
print (
"""GCL is a wrapper for Subversion that simplifies working with groups of files.
Basic commands:
-----------------------------------------
gcl change change_name
Add/remove files to a changelist. Only scans the current directory and
subdirectories.
gcl upload change_name [-r [email protected],[email protected],...]
[--send_mail] [--no_try] [--no_presubmit]
Uploads the changelist to the server for review.
gcl commit change_name [--force]
Commits the changelist to the repository.
gcl lint change_name
Check all the files in the changelist for possible style violations.
Advanced commands:
-----------------------------------------
gcl delete change_name
Deletes a changelist.
gcl diff change_name
Diffs all files in the changelist.
gcl presubmit change_name
Runs presubmit checks without uploading the changelist.
gcl diff
Diffs all files in the current directory and subdirectories that aren't in
a changelist.
gcl changes
Lists all the the changelists and the files in them.
gcl nothave [optional directory]
Lists files unknown to Subversion.
gcl opened
Lists modified files in the current directory and subdirectories.
gcl settings
Print the code review settings for this directory.
gcl status
Lists modified and unknown files in the current directory and
subdirectories.
gcl try change_name
Sends the change to the tryserver so a trybot can do a test run on your
code. To send multiple changes as one path, use a comma-separated list
of changenames.
--> Use 'gcl help try' for more information.
""")
def GetEditor():
editor = os.environ.get("SVN_EDITOR")
if not editor:
editor = os.environ.get("EDITOR")
if not editor:
if sys.platform.startswith("win"):
editor = "notepad"
else:
editor = "vi"
return editor
def GenerateDiff(files, root=None):
"""Returns a string containing the diff for the given file list.
The files in the list should either be absolute paths or relative to the
given root. If no root directory is provided, the repository root will be
used.
"""
previous_cwd = os.getcwd()
if root is None:
os.chdir(GetRepositoryRoot())
else:
os.chdir(root)
diff = []
for file in files:
# Use svn info output instead of os.path.isdir because the latter fails
# when the file is deleted.
if GetSVNFileInfo(file).get("Node Kind") == "directory":
continue
# If the user specified a custom diff command in their svn config file,
# then it'll be used when we do svn diff, which we don't want to happen
# since we want the unified diff. Using --diff-cmd=diff doesn't always
# work, since they can have another diff executable in their path that
# gives different line endings. So we use a bogus temp directory as the
# config directory, which gets around these problems.
if sys.platform.startswith("win"):
parent_dir = tempfile.gettempdir()
else:
parent_dir = sys.path[0] # tempdir is not secure.
bogus_dir = os.path.join(parent_dir, "temp_svn_config")
if not os.path.exists(bogus_dir):
os.mkdir(bogus_dir)
diff.append(RunShell(["svn", "diff", "--config-dir", bogus_dir, file]))
os.chdir(previous_cwd)
return "".join(diff)
def UploadCL(change_info, args):
if not change_info.FileList():
print "Nothing to upload, changelist is empty."
return
if not "--no_presubmit" in args:
if not DoPresubmitChecks(change_info, committing=False):
return
else:
args.remove("--no_presubmit")
no_try = "--no_try" in args
if no_try:
args.remove("--no_try")
else:
# Support --no-try as --no_try
no_try = "--no-try" in args
if no_try:
args.remove("--no-try")
# Map --send-mail to --send_mail
if "--send-mail" in args:
args.remove("--send-mail")
args.append("--send_mail")
# TODO(pamg): Do something when tests are missing. The plan is to upload a
# message to Rietveld and have it shown in the UI attached to this patch.
upload_arg = ["upload.py", "-y"]
upload_arg.append("--server=" + GetCodeReviewSetting("CODE_REVIEW_SERVER"))
upload_arg.extend(args)
desc_file = ""
if change_info.issue: # Uploading a new patchset.
found_message = False
for arg in args:
if arg.startswith("--message") or arg.startswith("-m"):
found_message = True
break
if not found_message:
upload_arg.append("--message=''")
upload_arg.append("--issue=" + change_info.issue)
else: # First time we upload.
handle, desc_file = tempfile.mkstemp(text=True)
os.write(handle, change_info.description)
os.close(handle)
upload_arg.append("--cc=" + GetCodeReviewSetting("CC_LIST"))
upload_arg.append("--description_file=" + desc_file + "")
if change_info.description:
subject = change_info.description[:77]
if subject.find("\r\n") != -1:
subject = subject[:subject.find("\r\n")]
if subject.find("\n") != -1:
subject = subject[:subject.find("\n")]
if len(change_info.description) > 77:
subject = subject + "..."
upload_arg.append("--message=" + subject)
# Change the current working directory before calling upload.py so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(GetRepositoryRoot())
# If we have a lot of files with long paths, then we won't be able to fit
# the command to "svn diff". Instead, we generate the diff manually for
# each file and concatenate them before passing it to upload.py.
if change_info.patch is None:
change_info.patch = GenerateDiff(change_info.FileList())
issue, patchset = upload.RealMain(upload_arg, change_info.patch)
if issue and issue != change_info.issue:
change_info.issue = issue
change_info.Save()
if desc_file:
os.remove(desc_file)
# Do background work on Rietveld to lint the file so that the results are
# ready when the issue is viewed.
SendToRietveld("/lint/issue%s_%s" % (issue, patchset), timeout=0.5)
# Once uploaded to Rietveld, send it to the try server.
if not no_try and GetCodeReviewSetting('TRY_ON_UPLOAD').lower() == 'true':
# Use the local diff.
TryChange(change_info, [], True)
os.chdir(previous_cwd)
def PresubmitCL(change_info):
"""Reports what presubmit checks on the change would report."""
if not change_info.FileList():
print "Nothing to presubmit check, changelist is empty."
return
print "*** Presubmit checks for UPLOAD would report: ***"
DoPresubmitChecks(change_info, committing=False)
print "\n\n*** Presubmit checks for COMMIT would report: ***"
DoPresubmitChecks(change_info, committing=True)
def TryChange(change_info, args, swallow_exception=False, patchset=None):
"""Create a diff file of change_info and send it to the try server."""
try:
import trychange
except ImportError:
if swallow_exception:
return
ErrorExit("You need to install trychange.py to use the try server.")
if change_info:
trychange.TryChange(args, change_info.name, change_info.FileList(),
swallow_exception, patchset)
else:
trychange.TryChange(args)
def Commit(change_info, args):
if not change_info.FileList():
print "Nothing to commit, changelist is empty."
return
if not "--no_presubmit" in args:
if not DoPresubmitChecks(change_info, committing=True):
return
else:
args.remove("--no_presubmit")
no_tree_status_check = ("--force" in args or "-f" in args)
if not no_tree_status_check and not IsTreeOpen():
print ("Error: The tree is closed. Try again later or use --force to force"
" the commit. May the --force be with you.")
return
commit_cmd = ["svn", "commit"]
filename = ''
if change_info.issue:
# Get the latest description from Rietveld.
change_info.description = GetIssueDescription(change_info.issue)
commit_message = change_info.description.replace('\r\n', '\n')
if change_info.issue:
commit_message += ('\nReview URL: http://%s/%s' %
(GetCodeReviewSetting("CODE_REVIEW_SERVER"),
change_info.issue))
handle, commit_filename = tempfile.mkstemp(text=True)
os.write(handle, commit_message)
os.close(handle)
handle, targets_filename = tempfile.mkstemp(text=True)
os.write(handle, "\n".join(change_info.FileList()))
os.close(handle)
commit_cmd += ['--file=' + commit_filename]
commit_cmd += ['--targets=' + targets_filename]
# Change the current working directory before calling commit.
previous_cwd = os.getcwd()
os.chdir(GetRepositoryRoot())
output = RunShell(commit_cmd, True)
os.remove(commit_filename)
os.remove(targets_filename)
if output.find("Committed revision") != -1:
change_info.Delete()
if change_info.issue:
revision = re.compile(".*?\nCommitted revision (\d+)",
re.DOTALL).match(output).group(1)
viewvc_url = GetCodeReviewSetting("VIEW_VC")
change_info.description = (change_info.description +
"\n\nCommitted: " + viewvc_url + revision)
change_info.CloseIssue()
os.chdir(previous_cwd)
def Change(change_info):
"""Creates/edits a changelist."""
if change_info.issue:
try:
description = GetIssueDescription(change_info.issue)
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = ""
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
description = change_info.description
other_files = GetFilesNotInCL()
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + GetRepositoryRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.files]) + separator2 +
'\n'.join([f[0] + f[1] for f in other_files]) + '\n')
handle, filename = tempfile.mkstemp(text=True)
os.write(handle, text)
os.close(handle)
os.system(GetEditor() + " " + filename)
result = ReadFile(filename)
os.remove(filename)
if not result:
return
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n" + result)
new_description = split_result[0]
cl_files_text = split_result[1]
if new_description != description:
change_info.description = new_description
if change_info.issue:
# Update the Rietveld issue with the new description.
change_info.UpdateRietveldDescription()
new_cl_files = []
for line in cl_files_text.splitlines():
if not len(line):
continue
if line.startswith("---"):
break
status = line[:7]
file = line[7:]
new_cl_files.append((status, file))
change_info.files = new_cl_files
change_info.Save()
print change_info.name + " changelist saved."
if change_info.MissingTests():
Warn("WARNING: " + MISSING_TEST_MSG)
# We don't lint files in these path prefixes.
IGNORE_PATHS = ("webkit",)
# Valid extensions for files we want to lint.
CPP_EXTENSIONS = ("cpp", "cc", "h")
def Lint(change_info, args):
"""Runs cpplint.py on all the files in |change_info|"""
try:
import cpplint
except ImportError:
ErrorExit("You need to install cpplint.py to lint C++ files.")
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(GetRepositoryRoot())
# Process cpplints arguments if any.
filenames = cpplint.ParseArguments(args + change_info.FileList())
for file in filenames:
if len([file for suffix in CPP_EXTENSIONS if file.endswith(suffix)]):
if len([file for prefix in IGNORE_PATHS if file.startswith(prefix)]):
print "Ignoring non-Google styled file %s" % file
else:
cpplint.ProcessFile(file, cpplint._cpplint_state.verbose_level)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
os.chdir(previous_cwd)
def DoPresubmitChecks(change_info, committing):
"""Imports presubmit, then calls presubmit.DoPresubmitChecks."""
# TODO(termie): we don't have presubmit checks, so we don't do any yet
# but it'd be great to add some
return True
# Need to import here to avoid circular dependency.
import presubmit
result = presubmit.DoPresubmitChecks(change_info,
committing,
verbose=False,
output_stream=sys.stdout,
input_stream=sys.stdin)
if not result:
print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
return result
def Changes():
"""Print all the changelists and their files."""
for cl in GetCLs():
change_info = LoadChangelistInfo(cl, True, True)
print "\n--- Changelist " + change_info.name + ":"
for file in change_info.files:
print "".join(file)
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1:
Help()
return 0;
# Create the directory where we store information about changelists if it
# doesn't exist.
if not os.path.exists(GetInfoDir()):
os.mkdir(GetInfoDir())
# Commands that don't require an argument.
command = argv[1]
if command == "opened":
Opened()
return 0
if command == "status":
Opened()
print "\n--- Not in any changelist:"
UnknownFiles([])
return 0
if command == "nothave":
UnknownFiles(argv[2:])
return 0
if command == "changes":
Changes()
return 0
if command == "help":
Help(argv[2:])
return 0
if command == "diff" and len(argv) == 2:
files = GetFilesNotInCL()
print GenerateDiff([x[1] for x in files])
return 0
if command == "settings":
ignore = GetCodeReviewSetting("UNKNOWN");
print CODEREVIEW_SETTINGS
return 0
if len(argv) == 2:
if command == "change":
# Generate a random changelist name.
changename = GenerateChangeName()
else:
ErrorExit("Need a changelist name.")
else:
changename = argv[2]
# When the command is 'try' and --patchset is used, the patch to try
# is on the Rietveld server. 'change' creates a change so it's fine if the
# change didn't exist. All other commands require an existing change.
fail_on_not_found = command != "try" and command != "change"
if command == "try" and changename.find(',') != -1:
change_info = LoadChangelistInfoForMultiple(changename, True, True)
else:
change_info = LoadChangelistInfo(changename, fail_on_not_found, True)
if command == "change":
Change(change_info)
elif command == "lint":
Lint(change_info, argv[3:])
elif command == "upload":
UploadCL(change_info, argv[3:])
elif command == "presubmit":
PresubmitCL(change_info)
elif command in ("commit", "submit"):
Commit(change_info, argv[3:])
elif command == "delete":
change_info.Delete()
elif command == "try":
# When the change contains no file, send the "changename" positional
# argument to trychange.py.
if change_info.files:
args = argv[3:]
else:
change_info = None
args = argv[2:]
TryChange(change_info, args)
else:
# Everything else that is passed into gcl we redirect to svn, after adding
# the files. This allows commands such as 'gcl diff xxx' to work.
args = ["svn", command]
root = GetRepositoryRoot()
args.extend([os.path.join(root, x) for x in change_info.FileList()])
if command == "diff":
moved_files = change_info.GetMoved(root=root)
for f in moved_files:
args.remove(f)
print DiffPrint(f)
RunShell(args, True)
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
from typing import Type
import filters as f
from filters.macros import filter_macro
from urllib.parse import urlparse
from iota import Address, TryteString, TrytesCompatible
from iota.crypto.addresses import AddressGenerator
__all__ = [
'AddressNoChecksum',
'GeneratedAddress',
'NodeUri',
'SecurityLevel',
'StringifiedTrytesArray',
'Trytes',
]
class GeneratedAddress(f.BaseFilter):
"""
Validates an incoming value as a generated :py:class:`Address` (must
have ``key_index`` and ``security_level`` set).
When a value doesn't pass the filter, a ``ValueError`` is raised with lots
of contextual info attached to it.
:return:
:py:class:`GeneratedAddress` object.
"""
CODE_NO_KEY_INDEX = 'no_key_index'
CODE_NO_SECURITY_LEVEL = 'no_security_level'
templates = {
CODE_NO_KEY_INDEX:
'Address must have ``key_index`` attribute set.',
CODE_NO_SECURITY_LEVEL:
'Address must have ``security_level`` attribute set.',
}
def _apply(self, value):
value: Address = self._filter(value, f.Type(Address))
if self._has_errors:
return None
if value.key_index is None:
return self._invalid_value(value, self.CODE_NO_KEY_INDEX)
if value.security_level is None:
return self._invalid_value(value, self.CODE_NO_SECURITY_LEVEL)
return value
class NodeUri(f.BaseFilter):
"""
Validates a string as a node URI.
When a value doesn't pass the filter, a ``ValueError`` is raised with lots
of contextual info attached to it.
:return:
:py:class:`NodeUri` object.
"""
SCHEMES = {'tcp', 'udp'}
"""
Allowed schemes for node URIs.
"""
CODE_NOT_NODE_URI = 'not_node_uri'
templates = {
CODE_NOT_NODE_URI:
'This value does not appear to be a valid node URI.',
}
def _apply(self, value):
value: str = self._filter(value, f.Type(str))
if self._has_errors:
return None
parsed = urlparse(value)
if parsed.scheme not in self.SCHEMES:
return self._invalid_value(value, self.CODE_NOT_NODE_URI)
return value
@filter_macro
def SecurityLevel() -> f.FilterChain:
"""
Generates a filter chain for validating a security level.
:return:
:py:class:`filters.FilterChain` object.
"""
return (
f.Type(int) |
f.Min(1) |
f.Max(3) |
f.Optional(default=AddressGenerator.DEFAULT_SECURITY_LEVEL)
)
class Trytes(f.BaseFilter):
"""
Validates a sequence as a sequence of trytes.
When a value doesn't pass the filter, a ``ValueError`` is raised with lots
of contextual info attached to it.
:param TryteString result_type:
Any subclass of :py:class:`~iota.TryteString` that you want the filter
to validate.
:raises TypeError: if value is not of ``result_type``.
:raises ValueError:
if ``result_type`` is not of :py:class:`~iota.TryteString` type.
:return:
:py:class:`Trytes` object.
"""
CODE_NOT_TRYTES = 'not_trytes'
CODE_WRONG_FORMAT = 'wrong_format'
templates = {
CODE_NOT_TRYTES: 'This value is not a valid tryte sequence.',
CODE_WRONG_FORMAT: 'This value is not a valid {result_type}.',
}
def __init__(self, result_type: type = TryteString) -> None:
super(Trytes, self).__init__()
if not isinstance(result_type, type):
raise TypeError(
'Invalid result_type for {filter_type} '
'(expected subclass of TryteString, '
'actual instance of {result_type}).'.format(
filter_type=type(self).__name__,
result_type=type(result_type).__name__,
),
)
if not issubclass(result_type, TryteString):
raise ValueError(
'Invalid result_type for {filter_type} '
'(expected TryteString, actual {result_type}).'.format(
filter_type=type(self).__name__,
result_type=result_type.__name__,
),
)
self.result_type = result_type
def _apply(self, value):
value: TrytesCompatible = self._filter(
filter_chain=f.Type(
(bytes, bytearray, str, TryteString)
),
value=value,
)
if self._has_errors:
return None
# If the incoming value already has the correct type, then we're
# done.
if isinstance(value, self.result_type):
return value
# First convert to a generic TryteString, to make sure that the
# sequence doesn't contain any invalid characters.
try:
value = TryteString(value)
except ValueError:
return self._invalid_value(
value=value,
reason=self.CODE_NOT_TRYTES,
exc_info=True,
)
if self.result_type is TryteString:
return value
# Now coerce to the expected type and verify that there are no
# type-specific errors.
try:
return self.result_type(value)
except ValueError:
return self._invalid_value(
value=value,
reason=self.CODE_WRONG_FORMAT,
exc_info=True,
template_vars={
'result_type': self.result_type.__name__,
},
)
@filter_macro
def StringifiedTrytesArray(trytes_type: Type = TryteString) -> f.FilterChain:
"""
Validates that the incoming value is an array containing tryte
strings corresponding to the specified type (e.g.,
``TransactionHash``).
When a value doesn't pass the filter, a ``ValueError`` is raised with lots
of contextual info attached to it.
:param TryteString trytes_type:
Any subclass of :py:class:`~iota.TryteString` that you want the filter
to validate.
:return:
:py:class:`filters.FilterChain` object.
.. important::
This filter will return string values, suitable for inclusion in
an API request. If you are expecting objects (e.g.,
:py:class:`Address`), then this is not the filter to use!
.. note::
This filter will allow empty arrays and `None`. If this is not
desirable, chain this filter with ``f.NotEmpty`` or
``f.Required``, respectively.
"""
return f.Array | f.FilterRepeater(
f.Required |
Trytes(trytes_type) |
f.Unicode(encoding='ascii', normalize=False),
)
class AddressNoChecksum(Trytes):
"""
Validates a sequence as an :py:class:`Address`, then chops off the checksum
if present.
When a value doesn't pass the filter, a ``ValueError`` is raised with lots
of contextual info attached to it.
:return:
:py:class:`AddressNoChecksum` object.
"""
ADDRESS_BAD_CHECKSUM = 'address_bad_checksum'
templates = {
ADDRESS_BAD_CHECKSUM:
'Checksum is {supplied_checksum}, should be {expected_checksum}?',
}
def __init__(self) -> None:
super(AddressNoChecksum, self).__init__(result_type=Address)
def _apply(self, value):
super(AddressNoChecksum, self)._apply(value)
if self._has_errors:
return None
# Possible it's still just a TryteString.
if not isinstance(value, Address):
value = Address(value)
# Bail out if we have a bad checksum.
if value.checksum and not value.is_checksum_valid():
return self._invalid_value(
value=value,
reason=self.ADDRESS_BAD_CHECKSUM,
exc_info=True,
context={
'supplied_checksum': value.checksum,
'expected_checksum': value.with_valid_checksum().checksum,
},
)
return Address(value.address)
|
|
""""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any, no_type_check
import aiohttp
from google.auth.credentials import Credentials
import json
import pytest # noqa F401 Needed to run the tests
from mock import AsyncMock, Mock, patch
from google.cloud.sql.connector.refresh_utils import _get_ephemeral, _get_metadata
from google.cloud.sql.connector.utils import generate_keys
class FakeClientSessionGet:
"""Helper class to return mock data for get request."""
async def text(self) -> str:
response = {
"kind": "sql#connectSettings",
"serverCaCert": {
"kind": "sql#sslCert",
"certSerialNumber": "0",
"cert": "-----BEGIN CERTIFICATE-----\nabc123\n-----END CERTIFICATE-----",
"commonName": "Google",
"sha1Fingerprint": "abc",
"instance": "my-instance",
"createTime": "2021-10-18T18:48:03.785Z",
"expirationTime": "2031-10-16T18:49:03.785Z",
},
"ipAddresses": [
{"type": "PRIMARY", "ipAddress": "0.0.0.0"},
{"type": "PRIVATE", "ipAddress": "1.0.0.0"},
],
"region": "my-region",
"databaseVersion": "MYSQL_8_0",
"backendType": "SECOND_GEN",
}
return json.dumps(response)
class FakeClientSessionPost:
"""Helper class to return mock data for post request."""
async def text(self) -> str:
response = {
"ephemeralCert": {
"kind": "sql#sslCert",
"certSerialNumber": "",
"cert": "-----BEGIN CERTIFICATE-----\nabc123\n-----END CERTIFICATE-----",
}
}
return json.dumps(response)
@pytest.fixture
def credentials() -> Credentials:
credentials = Mock(spec=Credentials)
credentials.valid = True
credentials.token = "12345"
return credentials
@pytest.mark.asyncio
@patch("aiohttp.ClientSession.post", new_callable=AsyncMock)
async def test_get_ephemeral(mock_post: AsyncMock, credentials: Credentials) -> None:
"""
Test to check whether _get_ephemeral runs without problems given valid
parameters.
"""
mock_post.return_value = FakeClientSessionPost()
project = "my-project"
instance = "my-instance"
_, pub_key = await generate_keys()
async with aiohttp.ClientSession() as client_session:
result: Any = await _get_ephemeral(
client_session, credentials, project, instance, pub_key
)
result = result.split("\n")
assert (
result[0] == "-----BEGIN CERTIFICATE-----"
and result[len(result) - 1] == "-----END CERTIFICATE-----"
)
@pytest.mark.asyncio
@no_type_check
async def test_get_ephemeral_TypeError(credentials: Credentials) -> None:
"""
Test to check whether _get_ephemeral throws proper TypeError
when given incorrect input arg types.
"""
client_session = Mock(aiohttp.ClientSession)
project = "my-project"
instance = "my-instance"
pub_key = "key"
# incorrect credentials type
with pytest.raises(TypeError):
await _get_ephemeral(
client_session=client_session,
credentials="bad-credentials",
project=project,
instance=instance,
pub_key=pub_key,
)
# incorrect project type
with pytest.raises(TypeError):
await _get_ephemeral(
client_session=client_session,
credentials=credentials,
project=12345,
instance=instance,
pub_key=pub_key,
)
# incorrect instance type
with pytest.raises(TypeError):
await _get_ephemeral(
client_session=client_session,
credentials=credentials,
project=project,
instance=12345,
pub_key=pub_key,
)
# incorrect pub_key type
with pytest.raises(TypeError):
await _get_ephemeral(
client_session=client_session,
credentials=credentials,
project=project,
instance=instance,
pub_key=12345,
)
@pytest.mark.asyncio
@patch("aiohttp.ClientSession.get", new_callable=AsyncMock)
async def test_get_metadata(mock_get: AsyncMock, credentials: Credentials) -> None:
"""
Test to check whether _get_metadata runs without problems given valid
parameters.
"""
mock_get.return_value = FakeClientSessionGet()
project = "my-project"
instance = "my-instance"
async with aiohttp.ClientSession() as client_session:
result = await _get_metadata(client_session, credentials, project, instance)
assert result["ip_addresses"] is not None and isinstance(
result["server_ca_cert"], str
)
@pytest.mark.asyncio
@no_type_check
async def test_get_metadata_TypeError(credentials: Credentials) -> None:
"""
Test to check whether _get_metadata throws proper TypeError
when given incorrect input arg types.
"""
client_session = Mock(aiohttp.ClientSession)
project = "my-project"
instance = "my-instance"
# incorrect credentials type
with pytest.raises(TypeError):
await _get_metadata(
client_session=client_session,
credentials="bad-credentials",
project=project,
instance=instance,
)
# incorrect project type
with pytest.raises(TypeError):
await _get_metadata(
client_session=client_session,
credentials=credentials,
project=12345,
instance=instance,
)
# incorrect instance type
with pytest.raises(TypeError):
await _get_metadata(
client_session=client_session,
credentials=credentials,
project=project,
instance=12345,
)
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HdaughterMother_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HdaughterMother_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HdaughterMother_CompleteLHS, self).__init__(name='HdaughterMother_CompleteLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = []
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'daughterMother')
# Nodes that represent match classes
# match class Family() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__Family"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Member() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Member"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Member() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__Member"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
#Nodes that represent apply classes
# match class Man() node
self.add_node()
self.vs[3]["MT_subtypeMatching__"] = False
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_subtypes__"] = []
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__Man"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the match associations of the property.
# match association Family--mother-->Member node
self.add_node()
self.vs[4]["MT_subtypeMatching__"] = False
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "mother"
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_subtypes__"] = []
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__directLink_S"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc4')
# match association Family--daughter-->Member node
self.add_node()
self.vs[5]["MT_subtypeMatching__"] = False
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "daughter"
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_subtypes__"] = []
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__directLink_S"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc5')
# Nodes that represent the apply associations of the property.
# Nodes that represent trace relations
# backward association Family---->Man node
self.add_node()
self.vs[6]["MT_subtypeMatching__"] = False
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_subtypes__"] = []
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__trace_link"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'blink6')
# Add the edges
self.add_edges([
(3,6), # apply_class Man() -> backward_association
(6,0), # backward_association -> apply_class Family()
(0,4), # match_class Family() -> association mother
(4,2), # association mother -> match_class Member()
(0,5), # match_class Family() -> association daughter
(5,1) # association daughter -> match_class Member()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "mother"
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "daughter"
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
|
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Required for Django 1.5.
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
#ALLOWED_HOSTS = ['horizon.example.com', ]
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be "2.0" or "3".
# OPENSTACK_API_VERSIONS = {
# "data_processing": 1.1,
# "identity": 3,
# "volume": 2
# }
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# Set Console type:
# valid options would be "AUTO"(default), "VNC", "SPICE", "RDP" or None
# Set to None explicitly if you want to deactivate the console.
# CONSOLE_TYPE = "AUTO"
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG = {
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
'modal_backdrop': 'static',
'angular_modules': [],
'js_files': [],
}
# Specify a regular expression to validate user passwords.
# HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements.")
# }
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
# HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for forms including the login form and
# the database creation workflow if so desired.
# HORIZON_CONFIG["password_autocomplete"] = "off"
# Setting this to True will disable the reveal button for password fields,
# including on the login form.
# HORIZON_CONFIG["disable_password_reveal"] = False
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizon generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
# may be situations where you would want to set this explicitly, e.g. when
# multiple dashboard instances are distributed on different machines (usually
# behind a load-balancer). Either you have to make sure that a session gets all
# requests routed to the same dashboard instance or you set the same SECRET_KEY
# for all of them.
from horizon.utils import secret_key
SECRET_KEY = secret_key.generate_or_read_from_file(
os.path.join(LOCAL_PATH, '.secret_key_store'))
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
AVAILABLE_REGIONS = [
('http://r1-controller:5000/v3', 'regionOne'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
]
OPENSTACK_HOST = "r1-controller"
#OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
### to support keystone v3
OPENSTACK_API_VERSIONS = {
"identity": 3
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
# Disable SSL certificate checks (useful for self-signed certificates):
# OPENSTACK_SSL_NO_VERIFY = True
# The CA certificate to use to verify SSL connections
# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
#Setting this to True, will add a new "Retrieve Password" action on instance,
#allowing Admin session password retrieval/decryption.
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
# The Xen Hypervisor has the ability to set the mount point for volumes
# attached to instances (other Hypervisors currently do not). Setting
# can_set_mount_point to True will add the option to set the mount point
# from the UI.
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
}
# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
# services provided by cinder that is not exposed by its extension API.
OPENSTACK_CINDER_FEATURES = {
'enable_backup': False,
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': True,
'enable_ipv6': True,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': True,
'enable_firewall': True,
'enable_vpn': True,
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
# Set which provider network types are supported. Only the network types
# in this list will be available to choose from when creating a network.
# Network types include local, flat, vlan, gre, and vxlan.
'supported_provider_types': ['*'],
}
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
# OPENSTACK_IMAGE_BACKEND = {
# 'image_formats': [
# ('', _('Select format')),
# ('aki', _('AKI - Amazon Kernel Image')),
# ('ami', _('AMI - Amazon Machine Image')),
# ('ari', _('ARI - Amazon Ramdisk Image')),
# ('iso', _('ISO - Optical Disk Image')),
# ('ova', _('OVA - Open Virtual Appliance')),
# ('qcow2', _('QCOW2 - QEMU Emulator')),
# ('raw', _('Raw')),
# ('vdi', _('VDI - Virtual Disk Image')),
# ('vhd', ('VHD - Virtual Hard Disk')),
# ('vmdk', _('VMDK - Virtual Machine Disk'))
# ]
# }
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type")
}
# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
# custom properties should not be displayed in the Image Custom Properties
# table.
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
#SECONDARY_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 30
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
# CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
# }
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
#POLICY_FILES = {
# 'identity': 'keystone_policy.json',
# 'compute': 'nova_policy.json',
# 'volume': 'cinder_policy.json',
# 'image': 'glance_policy.json',
# 'orchestration': 'heat_policy.json',
# 'network': 'neutron_policy.json',
#}
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
# TROVE_ADD_USER_PERMS = []
# TROVE_ADD_DATABASE_PERMS = []
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'neutronclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'heatclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'ceilometerclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'troveclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'swiftclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nikola_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
'scss': {
'handlers': ['null'],
'propagate': False,
},
}
}
# 'direction' should not be specified for all_tcp/udp/icmp.
# It is specified in the form.
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1433',
'to_port': '1433',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
# Deprecation Notice:
#
# The setting FLAVOR_EXTRA_KEYS has been deprecated.
# Please load extra spec metadata into the Glance Metadata Definition Catalog.
#
# The sample quota definitions can be found in:
# <glance_source>/etc/metadefs/compute-quota.json
#
# The metadata definition catalog supports CLI and API:
# $glance --os-image-api-version 2 help md-namespace-import
# $glance-manage db_load_metadefs <directory_with_definition_files>
#
# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
# Indicate to the Sahara data processing service whether or not
# automatic floating IP allocation is in effect. If it is not
# in effect, the user will be prompted to choose a floating IP
# pool for use in their cluster. False by default. You would want
# to set this to True if you were running Nova Networking with
# auto_assign_floating_ip = True.
# SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
# The hash algorithm to use for authentication tokens. This must
# match the hash algorithm that the identity server and the
# auth_token middleware are using. Allowed values are the
# algorithms supported by Python's hashlib library.
# OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
|
|
# -*- coding: utf-8 -*-
"""Models for the builds app."""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import logging
import os.path
import re
from builtins import object
from shutil import rmtree
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from guardian.shortcuts import assign
from taggit.managers import TaggableManager
from readthedocs.core.utils import broadcast
from readthedocs.projects.constants import (
BITBUCKET_URL, GITHUB_URL, GITLAB_URL, PRIVACY_CHOICES, PRIVATE)
from readthedocs.projects.models import APIProject, Project
from .constants import (
BRANCH, BUILD_STATE, BUILD_STATE_FINISHED, BUILD_TYPES, LATEST,
NON_REPOSITORY_VERSIONS, STABLE, TAG, VERSION_TYPES)
from .managers import VersionManager
from .querysets import BuildQuerySet, RelatedBuildQuerySet, VersionQuerySet
from .utils import (
get_bitbucket_username_repo, get_github_username_repo,
get_gitlab_username_repo)
from .version_slug import VersionSlugField
DEFAULT_VERSION_PRIVACY_LEVEL = getattr(
settings, 'DEFAULT_VERSION_PRIVACY_LEVEL', 'public')
log = logging.getLogger(__name__)
@python_2_unicode_compatible
class Version(models.Model):
"""Version of a ``Project``."""
project = models.ForeignKey(
Project,
verbose_name=_('Project'),
related_name='versions',
)
type = models.CharField(
_('Type'),
max_length=20,
choices=VERSION_TYPES,
default='unknown',
)
# used by the vcs backend
#: The identifier is the ID for the revision this is version is for. This
#: might be the revision number (e.g. in SVN), or the commit hash (e.g. in
#: Git). If the this version is pointing to a branch, then ``identifier``
#: will contain the branch name.
identifier = models.CharField(_('Identifier'), max_length=255)
#: This is the actual name that we got for the commit stored in
#: ``identifier``. This might be the tag or branch name like ``"v1.0.4"``.
#: However this might also hold special version names like ``"latest"``
#: and ``"stable"``.
verbose_name = models.CharField(_('Verbose Name'), max_length=255)
#: The slug is the slugified version of ``verbose_name`` that can be used
#: in the URL to identify this version in a project. It's also used in the
#: filesystem to determine how the paths for this version are called. It
#: must not be used for any other identifying purposes.
slug = VersionSlugField(
_('Slug'), max_length=255, populate_from='verbose_name')
supported = models.BooleanField(_('Supported'), default=True)
active = models.BooleanField(_('Active'), default=False)
built = models.BooleanField(_('Built'), default=False)
uploaded = models.BooleanField(_('Uploaded'), default=False)
privacy_level = models.CharField(
_('Privacy Level'),
max_length=20,
choices=PRIVACY_CHOICES,
default=DEFAULT_VERSION_PRIVACY_LEVEL,
help_text=_('Level of privacy for this Version.'),
)
tags = TaggableManager(blank=True)
machine = models.BooleanField(_('Machine Created'), default=False)
objects = VersionManager.from_queryset(VersionQuerySet)()
class Meta(object):
unique_together = [('project', 'slug')]
ordering = ['-verbose_name']
permissions = (
# Translators: Permission around whether a user can view the
# version
('view_version', _('View Version')),)
def __str__(self):
return ugettext(
'Version {version} of {project} ({pk})'.format(
version=self.verbose_name,
project=self.project,
pk=self.pk,
))
@property
def commit_name(self):
"""
Return the branch name, the tag name or the revision identifier.
The result could be used as ref in a git repo, e.g. for linking to
GitHub, Bitbucket or GitLab.
"""
# LATEST is special as it is usually a branch but does not contain the
# name in verbose_name.
if self.slug == LATEST:
if self.project.default_branch:
return self.project.default_branch
return self.project.vcs_repo().fallback_branch
if self.slug == STABLE:
if self.type == BRANCH:
# Special case, as we do not store the original branch name
# that the stable version works on. We can only interpolate the
# name from the commit identifier, but it's hacky.
# TODO: Refactor ``Version`` to store more actual info about
# the underlying commits.
if self.identifier.startswith('origin/'):
return self.identifier[len('origin/'):]
return self.identifier
# By now we must have handled all special versions.
assert self.slug not in NON_REPOSITORY_VERSIONS
if self.type in (BRANCH, TAG):
# If this version is a branch or a tag, the verbose_name will
# contain the actual name. We cannot use identifier as this might
# include the "origin/..." part in the case of a branch. A tag
# would contain the hash in identifier, which is not as pretty as
# the actual tag name.
return self.verbose_name
# If we came that far it's not a special version nor a branch or tag.
# Therefore just return the identifier to make a safe guess.
log.debug('TODO: Raise an exception here. Testing what cases it happens')
return self.identifier
def get_absolute_url(self):
if not self.built and not self.uploaded:
return reverse(
'project_version_detail',
kwargs={
'project_slug': self.project.slug,
'version_slug': self.slug,
},
)
private = self.privacy_level == PRIVATE
return self.project.get_docs_url(
version_slug=self.slug, private=private)
def save(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Add permissions to the Version for all owners on save."""
from readthedocs.projects import tasks
obj = super(Version, self).save(*args, **kwargs)
for owner in self.project.users.all():
assign('view_version', owner, self)
try:
self.project.sync_supported_versions()
except Exception:
log.exception('failed to sync supported versions')
broadcast(
type='app', task=tasks.symlink_project, args=[self.project.pk])
return obj
def delete(self, *args, **kwargs): # pylint: disable=arguments-differ
from readthedocs.projects import tasks
log.info('Removing files for version %s', self.slug)
broadcast(type='app', task=tasks.clear_artifacts, args=[self.pk])
broadcast(
type='app', task=tasks.symlink_project, args=[self.project.pk])
super(Version, self).delete(*args, **kwargs)
@property
def identifier_friendly(self):
"""Return display friendly identifier."""
if re.match(r'^[0-9a-f]{40}$', self.identifier, re.I):
return self.identifier[:8]
return self.identifier
def get_subdomain_url(self):
private = self.privacy_level == PRIVATE
return self.project.get_docs_url(
version_slug=self.slug,
lang_slug=self.project.language,
private=private,
)
def get_downloads(self, pretty=False):
project = self.project
data = {}
if pretty:
if project.has_pdf(self.slug):
data['PDF'] = project.get_production_media_url('pdf', self.slug)
if project.has_htmlzip(self.slug):
data['HTML'] = project.get_production_media_url(
'htmlzip', self.slug)
if project.has_epub(self.slug):
data['Epub'] = project.get_production_media_url(
'epub', self.slug)
else:
if project.has_pdf(self.slug):
data['pdf'] = project.get_production_media_url('pdf', self.slug)
if project.has_htmlzip(self.slug):
data['htmlzip'] = project.get_production_media_url(
'htmlzip', self.slug)
if project.has_epub(self.slug):
data['epub'] = project.get_production_media_url(
'epub', self.slug)
return data
def get_conf_py_path(self):
conf_py_path = self.project.conf_dir(self.slug)
checkout_prefix = self.project.checkout_path(self.slug)
conf_py_path = os.path.relpath(conf_py_path, checkout_prefix)
return conf_py_path
def get_build_path(self):
"""Return version build path if path exists, otherwise `None`."""
path = self.project.checkout_path(version=self.slug)
if os.path.exists(path):
return path
return None
def clean_build_path(self):
"""
Clean build path for project version.
Ensure build path is clean for project version. Used to ensure stale
build checkouts for each project version are removed.
"""
try:
path = self.get_build_path()
if path is not None:
log.debug('Removing build path %s for %s', path, self)
rmtree(path)
except OSError:
log.exception('Build path cleanup failed')
def get_github_url(
self, docroot, filename, source_suffix='.rst', action='view'):
"""
Return a GitHub URL for a given filename.
:param docroot: Location of documentation in repository
:param filename: Name of file
:param source_suffix: File suffix of documentation format
:param action: `view` (default) or `edit`
"""
repo_url = self.project.repo
if 'github' not in repo_url:
return ''
if not docroot:
return ''
else:
if docroot[0] != '/':
docroot = '/{}'.format(docroot)
if docroot[-1] != '/':
docroot = '{}/'.format(docroot)
if action == 'view':
action_string = 'blob'
elif action == 'edit':
action_string = 'edit'
user, repo = get_github_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return GITHUB_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
action=action_string,
)
def get_gitlab_url(
self, docroot, filename, source_suffix='.rst', action='view'):
repo_url = self.project.repo
if 'gitlab' not in repo_url:
return ''
if not docroot:
return ''
else:
if docroot[0] != '/':
docroot = '/{}'.format(docroot)
if docroot[-1] != '/':
docroot = '{}/'.format(docroot)
if action == 'view':
action_string = 'blob'
elif action == 'edit':
action_string = 'edit'
user, repo = get_gitlab_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return GITLAB_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
action=action_string,
)
def get_bitbucket_url(self, docroot, filename, source_suffix='.rst'):
repo_url = self.project.repo
if 'bitbucket' not in repo_url:
return ''
if not docroot:
return ''
user, repo = get_bitbucket_username_repo(repo_url)
if not user and not repo:
return ''
repo = repo.rstrip('/')
return BITBUCKET_URL.format(
user=user,
repo=repo,
version=self.commit_name,
docroot=docroot,
path=filename,
source_suffix=source_suffix,
)
class APIVersion(Version):
"""
Version proxy model for API data deserialization.
This replaces the pattern where API data was deserialized into a mocked
:py:class:`Version` object.
This pattern was confusing, as it was not explicit
as to what form of object you were working with -- API backed or database
backed.
This model preserves the Version model methods, allowing for overrides on
model field differences. This model pattern will generally only be used on
builder instances, where we are interacting solely with API data.
"""
project = None
class Meta:
proxy = True
def __init__(self, *args, **kwargs):
self.project = APIProject(**kwargs.pop('project', {}))
# These fields only exist on the API return, not on the model, so we'll
# remove them to avoid throwing exceptions due to unexpected fields
for key in ['resource_uri', 'absolute_url', 'downloads']:
try:
del kwargs[key]
except KeyError:
pass
super(APIVersion, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
return 0
@python_2_unicode_compatible
class VersionAlias(models.Model):
"""Alias for a ``Version``."""
project = models.ForeignKey(
Project, verbose_name=_('Project'), related_name='aliases')
from_slug = models.CharField(_('From slug'), max_length=255, default='')
to_slug = models.CharField(
_('To slug'), max_length=255, default='', blank=True)
largest = models.BooleanField(_('Largest'), default=False)
def __str__(self):
return ugettext(
'Alias for {project}: {_from} -> {to}'.format(
project=self.project,
_from=self.from_slug,
to=self.to_slug,
))
@python_2_unicode_compatible
class Build(models.Model):
"""Build data."""
project = models.ForeignKey(
Project, verbose_name=_('Project'), related_name='builds')
version = models.ForeignKey(
Version, verbose_name=_('Version'), null=True, related_name='builds')
type = models.CharField(
_('Type'), max_length=55, choices=BUILD_TYPES, default='html')
state = models.CharField(
_('State'), max_length=55, choices=BUILD_STATE, default='finished')
date = models.DateTimeField(_('Date'), auto_now_add=True)
success = models.BooleanField(_('Success'), default=True)
setup = models.TextField(_('Setup'), null=True, blank=True)
setup_error = models.TextField(_('Setup error'), null=True, blank=True)
output = models.TextField(_('Output'), default='', blank=True)
error = models.TextField(_('Error'), default='', blank=True)
exit_code = models.IntegerField(_('Exit code'), null=True, blank=True)
commit = models.CharField(
_('Commit'), max_length=255, null=True, blank=True)
length = models.IntegerField(_('Build Length'), null=True, blank=True)
builder = models.CharField(
_('Builder'), max_length=255, null=True, blank=True)
cold_storage = models.NullBooleanField(
_('Cold Storage'), help_text='Build steps stored outside the database.')
# Manager
objects = BuildQuerySet.as_manager()
class Meta(object):
ordering = ['-date']
get_latest_by = 'date'
index_together = [['version', 'state', 'type']]
def __str__(self):
return ugettext(
'Build {project} for {usernames} ({pk})'.format(
project=self.project,
usernames=' '.join(
self.project.users.all().values_list('username', flat=True),
),
pk=self.pk,
))
@models.permalink
def get_absolute_url(self):
return ('builds_detail', [self.project.slug, self.pk])
@property
def finished(self):
"""Return if build has a finished state."""
return self.state == BUILD_STATE_FINISHED
class BuildCommandResultMixin(object):
"""
Mixin for common command result methods/properties.
Shared methods between the database model :py:class:`BuildCommandResult` and
non-model respresentations of build command results from the API
"""
@property
def successful(self):
"""Did the command exit with a successful exit code."""
return self.exit_code == 0
@property
def failed(self):
"""
Did the command exit with a failing exit code.
Helper for inverse of :py:meth:`successful`
"""
return not self.successful
@python_2_unicode_compatible
class BuildCommandResult(BuildCommandResultMixin, models.Model):
"""Build command for a ``Build``."""
build = models.ForeignKey(
Build, verbose_name=_('Build'), related_name='commands')
command = models.TextField(_('Command'))
description = models.TextField(_('Description'), blank=True)
output = models.TextField(_('Command output'), blank=True)
exit_code = models.IntegerField(_('Command exit code'))
start_time = models.DateTimeField(_('Start time'))
end_time = models.DateTimeField(_('End time'))
class Meta(object):
ordering = ['start_time']
get_latest_by = 'start_time'
objects = RelatedBuildQuerySet.as_manager()
def __str__(self):
return (
ugettext('Build command {pk} for build {build}')
.format(pk=self.pk, build=self.build))
@property
def run_time(self):
"""Total command runtime in seconds."""
if self.start_time is not None and self.end_time is not None:
diff = self.end_time - self.start_time
return diff.seconds
|
|
"""
Models the MicrobiomeAssayPrep object.
"""
import json
import logging
from itertools import count
from cutlass.iHMPSession import iHMPSession
from cutlass.Base import Base
from cutlass.Util import *
# pylint: disable=W0703, C0302, C1801
# Create a module logger named after the module
module_logger = logging.getLogger(__name__)
# Add a NullHandler for the case if no logging is configured by the application
module_logger.addHandler(logging.NullHandler())
class MicrobiomeAssayPrep(Base):
"""
The class encapsulates iHMP microbiome assay prep data. It contains all
the fields required to save a such an object in OSDF.
Attributes:
namespace (str): The namespace this class will use in OSDF.
"""
namespace = "ihmp"
def __init__(self, *args, **kwargs):
"""
Constructor for the MicrobiomeAssayPrep class. This initializes the
fields specific to the class, and inherits from the Base class.
Args:
None
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.logger.addHandler(logging.NullHandler())
self._id = None
self._version = None
self._links = {}
self._tags = []
# Required properties
self._comment = None
self._pride_id = None
self._sample_name = None
self._title = None
self._center = None
self._contact = None
self._prep_id = None
self._storage_duration = None
self._experiment_type = None
self._study = None
# Optional properties
self._short_label = None
self._url = None
self._species = None
self._cell_type = None
self._tissue = None
self._reference = None
self._protocol_name = None
self._protocol_steps = None
self._exp_description = None
self._sample_description = None
super(MicrobiomeAssayPrep, self).__init__(*args, **kwargs)
@property
def comment(self):
"""
str: A descriptive comment for the object.
"""
self.logger.debug("In 'comment' getter.")
return self._comment
@comment.setter
@enforce_string
def comment(self, comment):
"""
The setter for a descriptive comment for the object.
Args:
comment (str): The comment text.
Returns:
None
"""
self.logger.debug("In 'comment' setter.")
self._comment = comment
@property
def pride_id(self):
"""
str: PRIDE identifier corresponding to study.
"""
self.logger.debug("In 'pride_id' getter.")
return self._pride_id
@pride_id.setter
@enforce_string
def pride_id(self, pride_id):
"""
The setter for the PRIDE identifier corresponding to study.
Args:
pride_id (str): The PRIDE identifier
Returns:
None
"""
self.logger.debug("In 'pride_id' setter.")
self._pride_id = pride_id
@property
def sample_name(self):
"""
str: The short label that is referable to the sample used to
generate the dataset.
"""
self.logger.debug("In 'sample_name' getter.")
return self._sample_name
@sample_name.setter
@enforce_string
def sample_name(self, sample_name):
"""
The setter for the short label that is referable to the sample
used to generate the dataset.
Args:
sample_name (str): Short label for the sample.
Returns:
None
"""
self.logger.debug("In 'sample_name' setter.")
self._sample_name = sample_name
@property
def title(self):
"""
str: The description of the particular experiment.
"""
self.logger.debug("In 'title' getter.")
return self._title
@title.setter
def title(self, title):
"""
The setter for the description of the particular experiment.
Args:
title (str): Experiment title
Returns:
None
"""
self.logger.debug("In title setter.")
if type(title) is not str:
raise ValueError("Invalid type for title.")
self._title = title
@property
def short_label(self):
"""
str: The short label/nomenclature used to group/organize experiments.
"""
self.logger.debug("In 'short_label' getter.")
return self._short_label
@short_label.setter
@enforce_string
def short_label(self, short_label):
"""
Set the short label/nomenclature used to group/organize experiments.
Args:
short_label (str): Short label used to group experiments
Returns:
None
"""
self.logger.debug("In 'short_label' setter.")
self._short_label = short_label
@property
def center(self):
"""
str: The center responsible for generating the microbiome assay Prep.
"""
self.logger.debug("In 'center' getter.")
return self._center
@center.setter
@enforce_string
def center(self, center):
"""
Set the center responsible for generating the microbiome assay prep.
Args:
center (str): The center responsible for generating the microbiome assay prep.
Returns:
None
"""
self.logger.debug("In 'center' setter.")
self._center = center
@property
def contact(self):
"""
str: Get the name and email of the primary contact at the center.
"""
self.logger.debug("In 'contact' getter.")
return self._contact
@contact.setter
@enforce_string
def contact(self, contact):
"""
Set the name and email of the primary contact at the center.
Args:
contact (str): Name and email of the primary contact at the center.
Returns:
None
"""
self.logger.debug("In 'contact' setter.")
self._contact = contact
@property
def prep_id(self):
"""
str: Get the internal assay prep ID.
"""
self.logger.debug("In 'prep_id' getter.")
return self._prep_id
@prep_id.setter
@enforce_string
def prep_id(self, prep_id):
"""
Set the internal assay prep ID.
Args:
prep_id (str): Internal assay prep ID.
Returns:
None
"""
self.logger.debug("In 'prep_id' setter.")
self._prep_id = prep_id
@property
def storage_duration(self):
"""
int: Get the MIGS/MIMS storage duration in days.
"""
self.logger.debug("In storage_duration getter.")
return self._storage_duration
@storage_duration.setter
@enforce_int
def storage_duration(self, storage_duration):
"""
Set the MIGS/MIMS storage duration in days.
Args:
storage_duration (int): Storage duration in days.
Returns:
None
"""
self.logger.debug("In 'storage_duration' setter.")
self._storage_duration = storage_duration
@property
def experiment_type(self):
"""
str: Get the PRIDE experiment type.
"""
self.logger.debug("In 'experiment_type' getter.")
return self._experiment_type
@experiment_type.setter
@enforce_string
def experiment_type(self, experiment_type):
"""
Set the PRIDE experiment type.
Args:
experiment_type (str): Experiment type, as defined by PRIDE.
Returns:
None
"""
self.logger.debug("In 'experiment_type' setter.")
self._experiment_type = experiment_type
@property
def species(self):
"""
str: Controlled vocabulary term to describe a single species. NEWT CV
terms are allowed.
"""
self.logger.debug("In 'species' getter.")
return self._species
@species.setter
@enforce_string
def species(self, species):
"""
Controlled vocabulary term to describe a single species. NEWT CV
terms are allowed.
Args:
species (str): Term to describe a single species.
Returns:
None
"""
self.logger.debug("In 'species' setter.")
self._species = species
@property
def cell_type(self):
"""
str: Controlled vocabulary term to describe a single cell type. Cell
type ontology CV terms are allowed.
"""
self.logger.debug("In 'cell_type' getter.")
return self._cell_type
@cell_type.setter
@enforce_string
def cell_type(self, cell_type):
"""
Controlled vocabulary term to describe a single cell type. Cell
type ontology CV terms are allowed.
Args:
cell_type (str): Term to describe the cell type.
Returns:
None
"""
self.logger.debug("In 'cell_type' setter.")
self._cell_type = cell_type
@property
def tissue(self):
"""
str: Controlled vocabulary term to describe a single tissue. BRENDA
Tissue CV terms are allowed.
"""
self.logger.debug("In 'tissue' getter.")
return self._tissue
@tissue.setter
@enforce_string
def tissue(self, tissue):
"""
Controlled vocabulary term to describe a single tissue. BRENDA Tissue
CV terms are allowed.
Args:
tissue (str): Term to describe the tissue.
Returns:
None
"""
self.logger.debug("In 'tissue' setter.")
self._tissue = tissue
@property
def reference(self):
"""
str: Link to literature citation for which this experiment provides
supporting evidence.
"""
self.logger.debug("In 'reference' getter.")
return self._reference
@reference.setter
@enforce_string
def reference(self, reference):
"""
Set the literature citation for which this experiment provides
supporting evidence.
Args:
reference (str): Supporting evidence link.
Returns:
None
"""
self.logger.debug("In 'reference' setter.")
self._reference = reference
@property
def protocol_name(self):
"""
str: The protocol title with versioning.
"""
self.logger.debug("In 'protocol_name' getter.")
return self._protocol_name
@protocol_name.setter
@enforce_string
def protocol_name(self, protocol_name):
"""
Set the protocol title with versioning.
Args:
protocol_name (str): Protocol title with versioning, ideally,
pointing to a URL.
Returns:
None
"""
self.logger.debug("In 'protocol_name' setter.")
self._protocol_name = protocol_name
@property
def protocol_steps(self):
"""
str: Description of the sample processing steps.
"""
self.logger.debug("In 'protocol_steps' getter.")
return self._protocol_steps
@protocol_steps.setter
@enforce_string
def protocol_steps(self, protocol_steps):
"""
Set the description of the sample processing steps.
Args:
protocol_name (str): Protocol title with versioning, ideally,
pointing to a URL.
Returns:
None
"""
self.logger.debug("In 'protocol_steps' setter.")
self._protocol_steps = protocol_steps
@property
def exp_description(self):
"""
str: Description of the goals and objectives of this study.
"""
self.logger.debug("In 'exp_description' getter.")
return self._exp_description
@exp_description.setter
@enforce_string
def exp_description(self, exp_description):
"""
Set the description of the goals and objectives of this study,
summary of the abstract, optimally 2-3 sentences.
Args:
exp_description (str): Description of the goals/objectives
of the study.
Returns:
None
"""
self.logger.debug("In 'exp_description' setter.")
self._exp_description = exp_description
@property
def sample_description(self):
"""
str: Expansible description of the sample used to generate the
dataset.
"""
self.logger.debug("In 'sample_description' getter.")
return self._sample_description
@sample_description.setter
@enforce_string
def sample_description(self, sample_description):
"""
Set the expansible description of the sample used to generate the
dataset
Args:
sample_description (str): Expansible description of the sample
used to generate the dataset
Returns:
None
"""
self.logger.debug("In 'sample_description' setter.")
self._sample_description = sample_description
@property
def study(self):
"""
str: One of the 3 studies that are part of the iHMP.
"""
self.logger.debug("In 'study' getter.")
return self._study
@study.setter
@enforce_string
def study(self, study):
"""
One of the 3 studies that are part of the iHMP.
Args:
study (str): One of the 3 studies that are part of the iHMP.
Returns:
None
"""
self.logger.debug("In 'study' setter.")
self._study = study
def validate(self):
"""
Validates the current object's data/JSON against the current
schema in the OSDF instance for that specific object. All required
fields for that specific object must be present.
Args:
None
Returns:
A list of strings, where each string is the error that the
validation raised during OSDF validation
"""
self.logger.debug("In validate.")
document = self._get_raw_doc()
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
(valid, error_message) = session.get_osdf().validate_node(document)
problems = []
if not valid:
self.logger.info("Validation did not succeed.")
problems.append(error_message)
if 'prepared_from' not in self._links.keys():
problems.append("Must have a 'prepared_from' link to a sample.")
self.logger.debug("Number of validation problems: %s.", len(problems))
return problems
def is_valid(self):
"""
Validates the current object's data/JSON against the current schema
in the OSDF instance for the specific object. However, unlike
validates(), this method does not provide exact error messages,
it states if the validation was successful or not.
Args:
None
Returns:
True if the data validates, False if the current state of
fields in the instance do not validate with the OSDF instance
"""
self.logger.debug("In is_valid.")
document = self._get_raw_doc()
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
(valid, _error_message) = session.get_osdf().validate_node(document)
if 'prepared_from' not in self._links.keys():
valid = False
self.logger.debug("Valid? %s", str(valid))
return valid
def _get_raw_doc(self):
"""
Generates the raw JSON document for the current object. All required fields are
filled into the JSON document, regardless they are set or not. Any remaining
fields are included only if they are set. This allows the user to visualize
the JSON to ensure fields are set appropriately before saving into the
database.
Args:
None
Returns:
An object representation of the JSON document.
"""
self.logger.debug("In _get_raw_doc.")
prep_doc = {
'acl': {
'read': ['all'],
'write': [MicrobiomeAssayPrep.namespace]
},
'linkage': self._links,
'ns': MicrobiomeAssayPrep.namespace,
'node_type': 'microb_assay_prep',
'meta': {
'center': self._center,
'comment': self._comment,
'contact': self._contact,
'experiment_type': self._experiment_type,
'prep_id': self._prep_id,
'pride_id': self._pride_id,
'sample_name': self._sample_name,
'storage_duration': self._storage_duration,
'study': self._study,
'subtype': self._study,
'tags': self._tags,
'title': self._title
}
}
if self._id is not None:
self.logger.debug("%s object has the OSDF id set.", __name__)
prep_doc['id'] = self._id
if self._version is not None:
self.logger.debug("%s object has the OSDF version set.", __name__)
prep_doc['ver'] = self._version
# Handle optional properties
if self._short_label is not None:
self.logger.debug("%s object has the 'short_label' property set.", __name__)
prep_doc['meta']['short_label'] = self._short_label
if self._url is not None:
self.logger.debug("%s object has the 'url' property set.", __name__)
prep_doc['meta']['url'] = self._url
if self._species is not None:
self.logger.debug("%s object has the 'species' property set.", __name__)
prep_doc['meta']['species'] = self._species
if self._cell_type is not None:
self.logger.debug("%s object has the 'cell_type' property set.", __name__)
prep_doc['meta']['cell_type'] = self._cell_type
if self._tissue is not None:
self.logger.debug("%s object has the 'tissue' property set.", __name__)
prep_doc['meta']['tissue'] = self._tissue
if self._reference is not None:
self.logger.debug("%s object has the 'reference' property set.", __name__)
prep_doc['meta']['reference'] = self._reference
if self._protocol_name is not None:
self.logger.debug("%s object has the 'protocol_name' property set.", __name__)
prep_doc['meta']['protocol_name'] = self._protocol_name
if self._protocol_steps is not None:
self.logger.debug("%s object has the 'protocol_steps' property set.", __name__)
prep_doc['meta']['protocol_steps'] = self._protocol_steps
if self._exp_description is not None:
self.logger.debug("%s object has the 'exp_description' property set.", __name__)
prep_doc['meta']['exp_description'] = self._exp_description
if self._sample_description is not None:
self.logger.debug("%s object has the 'sample_description' property set.", __name__)
prep_doc['meta']['sample_description'] = self._sample_description
return prep_doc
@staticmethod
def required_fields():
"""
A static method. The required fields for the class.
Args:
None
Returns:
Tuple of strings of required properties.
"""
module_logger.debug("In required fields.")
return ("comment", "pride_id", "sample_name", "title",
"center", "contact", "prep_id", "storage_duration",
"experiment_type", "study", "tags")
def delete(self):
"""
Deletes the current object (self) from OSDF. If the object has not been
previously saved (node ID is not set), then an error message will be
logged stating the object was not deleted. If the ID is set, and exists
in the OSDF instance, then the object will be deleted from the OSDF
instance, and this object must be re-saved in order to use it again.
Args:
None
Returns:
True upon successful deletion, False otherwise.
"""
self.logger.debug("In delete.")
if self._id is None:
self.logger.warn("Attempt to delete a %s with no ID.", __name__)
raise Exception("{} does not have an ID.".format(__name__))
prep_id = self._id
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
# Assume failure
success = False
try:
self.logger.info("Deleting %s with ID %s.", __name__, prep_id)
session.get_osdf().delete_node(prep_id)
success = True
except Exception as delete_exception:
self.logger.exception(delete_exception)
self.logger.error("An error occurred when deleting %s.", self)
return success
@staticmethod
def search(query="\"microb_assay_prep\"[node_type]"):
"""
Searches OSDF for MicrobiomeAssayPrep nodes. Any criteria the user
wishes to add is provided by the user in the query language
specifications provided in the OSDF documentation. A general format is
(including the quotes and brackets):
"search criteria"[field to search]
If there are any results, they are returned as MicrobiomeAssayPrep instances,
otherwise an empty list will be returned.
Args:
query (str): The query for the OSDF framework. Defaults to the
MicrobiomeAssayPrep node type.
Returns:
Returns an array of MicrobiomeAssayPrep objects. It returns an
empty list if there are no results.
"""
module_logger.debug("In search.")
session = iHMPSession.get_session()
module_logger.info("Got iHMP session.")
if query != '"microb_assay_prep"[node_type]':
query = '({}) && "microb_assay_prep"[node_type]'.format(query)
module_logger.debug("Submitting OQL query: %s", query)
prep_data = session.get_osdf().oql_query(
MicrobiomeAssayPrep.namespace, query
)
all_results = prep_data['results']
result_list = list()
if len(all_results) > 0:
for result in all_results:
prep_result = MicrobiomeAssayPrep.load_microassayprep(result)
result_list.append(prep_result)
return result_list
@staticmethod
def load_microassayprep(prep_data):
"""
Takes the provided JSON string and converts it to a
MicrobiomeAssayPrep object
Args:
prep_data (str): The JSON string to convert
Returns:
Returns a MicrobiomeAssayPrep instance.
"""
module_logger.info("Creating a template %s.", __name__)
prep = MicrobiomeAssayPrep()
module_logger.debug("Filling in %s details.", __name__)
prep._set_id(prep_data['id'])
prep.links = prep_data['linkage']
prep.version = prep_data['ver']
# Required fields
prep.comment = prep_data['meta']['comment']
prep.contact = prep_data['meta']['contact']
prep.pride_id = prep_data['meta']['pride_id']
prep.sample_name = prep_data['meta']['sample_name']
prep.title = prep_data['meta']['title']
prep.center = prep_data['meta']['center']
prep.prep_id = prep_data['meta']['prep_id']
prep.storage_duration = prep_data['meta']['storage_duration']
prep.experiment_type = prep_data['meta']['experiment_type']
prep.study = prep_data['meta']['study']
prep.tags = prep_data['meta']['tags']
# Optional fields
if 'short_label' in prep_data['meta']:
prep.short_label = prep_data['meta']['short_label']
if 'url' in prep_data['meta']:
prep._url = prep_data['meta']['url']
if 'species' in prep_data['meta']:
prep.species = prep_data['meta']['species']
if 'cell_type' in prep_data['meta']:
prep.cell_type = prep_data['meta']['cell_type']
if 'tissue' in prep_data['meta']:
prep.tissue = prep_data['meta']['tissue']
if 'reference' in prep_data['meta']:
prep.reference = prep_data['meta']['reference']
if 'protocol_name' in prep_data['meta']:
prep.protocol_name = prep_data['meta']['protocol_name']
if 'protocol_steps' in prep_data['meta']:
prep.protocol_steps = prep_data['meta']['protocol_steps']
if 'exp_description' in prep_data['meta']:
prep.exp_description = prep_data['meta']['exp_description']
if 'sample_description' in prep_data['meta']:
prep.sample_description = prep_data['meta']['sample_description']
module_logger.debug("Returning loaded %s.", __name__)
return prep
@staticmethod
def load(node_id):
"""
Loads the data for the specified input ID from the OSDF instance to this object.
If the provided ID does not exist, then an error message is provided stating the
project does not exist.
Args:
node_id (str): The OSDF ID for the document to load.
Returns:
A MicrobiomeAssayPrep object with all the available OSDF data loaded into it.
"""
module_logger.debug("In load. Specified ID: %s", node_id)
session = iHMPSession.get_session()
module_logger.info("Got iHMP session.")
node_data = session.get_osdf().get_node(node_id)
node = MicrobiomeAssayPrep.load_microassayprep(node_data)
module_logger.debug("Returning loaded %s.", __name__)
return node
def save(self):
"""
Saves the data in OSDF. The JSON form of the current data for the
instance is validated in the save function. If the data is not valid,
then the data will not be saved. If the instance was saved previously,
then the node ID is assigned the alpha numeric found in the OSDF
instance. If not saved previously, then the node ID is 'None', and upon
a successful, will be assigned to the alpha numeric ID found in OSDF.
Also, the version is updated as the data is saved in OSDF.
Args:
None
Returns;
True if successful, False otherwise.
"""
self.logger.debug("In save.")
# If node previously saved, use edit_node instead since ID
# is given (an update in a way)
# can also use get_node to check if the node already exists
if not self.is_valid():
self.logger.error("Cannot save, data is invalid.")
return False
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
osdf = session.get_osdf()
success = False
if self._id is None:
self.logger.info("About to insert a new %s OSDF node.", __name__)
# Get the JSON form of the data and load it
self.logger.debug("Converting %s to parsed JSON form.", __name__)
data = json.loads(self.to_json())
try:
node_id = osdf.insert_node(data)
self._set_id(node_id)
self._version = 1
success = True
except Exception as save_exception:
self.logger.exception(save_exception)
self.logger.error("An error occurred when saving %s.", self)
else:
self.logger.info("%s already has an ID, so we do an update "
"(not an insert).", __name__)
try:
prep_data = self._get_raw_doc()
self.logger.info("%s already has an ID, "
"so we do an update (not an insert).", __name__)
prep_id = self._id
self.logger.debug("%s OSDF ID to update: %s.", __name__, prep_id)
osdf.edit_node(prep_data)
prep_data = osdf.get_node(prep_id)
latest_version = prep_data['ver']
self.logger.debug("The version of this %s is now: %s",
__name__, str(latest_version))
self._version = latest_version
success = True
except Exception as update_exception:
self.logger.exception(update_exception)
self.logger.error("An error occurred when updating %s.", self)
return success
def cytokines(self):
"""
Returns an iterator of all Cytokines connected to this MicrobiomeAssayPrep.
"""
self.logger.debug("In cytokines().")
linkage_query = '"{}"[linkage.derived_from] && "cytokine"[node_type]'.format(self.id)
query = iHMPSession.get_session().get_osdf().oql_query
from cutlass.Cytokine import Cytokine
for page_no in count(1):
res = query(MicrobiomeAssayPrep.namespace, linkage_query, page=page_no)
res_count = res['result_count']
for doc in res['results']:
yield Cytokine.load_cytokine(doc)
res_count -= len(res['results'])
if res_count < 1:
break
def lipidomes(self):
"""
Returns an iterator of all Lipidomes connected to this
MicrobiomeAssayPrep.
"""
self.logger.debug("In lipidomes().")
linkage_query = '"{}"[linkage.derived_from] and "lipidome"[node_type]'.format(self.id)
query = iHMPSession.get_session().get_osdf().oql_query
from cutlass.Lipidome import Lipidome
for page_no in count(1):
res = query(MicrobiomeAssayPrep.namespace, linkage_query, page=page_no)
res_count = res['result_count']
for doc in res['results']:
yield Lipidome.load_lipidome(doc)
res_count -= len(res['results'])
if res_count < 1:
break
def metabolomes(self):
"""
Returns an iterator of all Metabolomes connected to this
MicrobiomeAssayPrep.
"""
self.logger.debug("In metabolomes().")
linkage_query = '"{}"[linkage.derived_from] && "metabolome"[node_type]'.format(self.id)
query = iHMPSession.get_session().get_osdf().oql_query
from cutlass.Metabolome import Metabolome
for page_no in count(1):
res = query(MicrobiomeAssayPrep.namespace, linkage_query, page=page_no)
res_count = res['result_count']
for doc in res['results']:
yield Metabolome.load_metabolome(doc)
res_count -= len(res['results'])
if res_count < 1:
break
def proteomes(self):
"""
Returns an iterator of all Proteomes connected to this
MicrobiomeAssayPrep.
"""
self.logger.debug("In proteomes().")
linkage_query = '"{}"[linkage.derived_from] && "proteome"[node_type]'.format(self.id)
query = iHMPSession.get_session().get_osdf().oql_query
from cutlass.Proteome import Proteome
for page_no in count(1):
res = query(MicrobiomeAssayPrep.namespace, linkage_query, page=page_no)
res_count = res['result_count']
for doc in res['results']:
yield Proteome.load_proteome(doc)
res_count -= len(res['results'])
if res_count < 1:
break
def _derived_docs(self):
self.logger.debug("In _derived_docs.")
linkage_query = '"{}"[linkage.derived_from]'.format(self.id)
query = iHMPSession.get_session().get_osdf().oql_query
for page_no in count(1):
res = query(MicrobiomeAssayPrep.namespace, linkage_query, page=page_no)
res_count = res['result_count']
for doc in res['results']:
yield doc
res_count -= len(res['results'])
if res_count < 1:
break
def derivations(self):
"""
Return an iterator of all the derived nodes from this prep, including
lipidomes, metabolomes, cytokines, proteomes, etc...
"""
self.logger.debug("In _derived_docs.")
from cutlass.Cytokine import Cytokine
from cutlass.Lipidome import Lipidome
from cutlass.Metabolome import Metabolome
from cutlass.Proteome import Proteome
for doc in self._derived_docs():
if doc['node_type'] == "cytokine":
yield Cytokine.load_cytokine(doc)
elif doc['node_type'] == "lipidome":
yield Lipidome.load_lipidome(doc)
elif doc['node_type'] == "metabolome":
yield Metabolome.load_metabolome(doc)
elif doc['node_type'] == "proteome":
yield Proteome.load_proteome(doc)
|
|
import numpy as np
from nltk import wordpunct_tokenize
import operator
import re, string
import math
SENTENCE_START_TOKEN = "sentence_start"
SENTENCE_END_TOKEN = "sentence_end"
UNKNOWN_TOKEN = "unknown_token"
def load_data(loc='./data/', _train=False, _test=False):
"Load the MSRP dataset."
trainloc = loc + 'msr_paraphrase_train.txt'
testloc = loc + 'msr_paraphrase_test.txt'
sent1_train, sent2_train, sent1_test, sent2_test = [], [], [], []
label_train, label_dev, label_test = [], [], []
if _train:
with open(trainloc, 'r', encoding='utf8') as f:
f.readline() # skipping the header of the file
for line in f:
text = line.strip().split('\t')
sent1_train.append("%s %s %s" % (SENTENCE_START_TOKEN, text[3], SENTENCE_END_TOKEN))
sent2_train.append("%s %s %s" % (SENTENCE_START_TOKEN, text[4], SENTENCE_END_TOKEN))
label_train.append(int(text[0]))
if _test:
with open(testloc, 'r', encoding='utf8') as f:
f.readline() # skipping the header of the file
for line in f:
text = line.strip().split('\t')
sent1_test.append("%s %s %s" % (SENTENCE_START_TOKEN, text[3], SENTENCE_END_TOKEN))
sent2_test.append("%s %s %s" % (SENTENCE_START_TOKEN, text[4], SENTENCE_END_TOKEN))
label_test.append(int(text[0]))
if _train and _test:
return [sent1_train, sent2_train], [sent1_test, sent2_test], [label_train, label_test]
elif _train:
return [sent1_train, sent2_train], label_train
elif _test:
return [sent1_test, sent2_test], label_test
def build_dictionary(loc='./data/', vocabulary_size=-1):
"""Construct a dictionary from the MSRP dataset."""
trainloc = loc + 'msr_paraphrase_train.txt'
testloc = loc + 'msr_paraphrase_test.txt'
document_frequency = {}
total_document = 0
with open(trainloc, 'r', encoding='utf8') as f:
f.readline() # skipping the header of the file
for line in f:
text = line.strip().split('\t')
sentence1 = my_tokenizer(text[3])
sentence2 = my_tokenizer(text[4])
for token in set(sentence1):
if token in document_frequency:
document_frequency[token] = document_frequency[token] + 1
else:
document_frequency[token] = 1
for token in set(sentence2):
if token in document_frequency:
document_frequency[token] = document_frequency[token] + 1
else:
document_frequency[token] = 1
total_document = total_document + 2
with open(testloc, 'r', encoding='utf8') as f:
f.readline() # skipping the header of the file
for line in f:
text = line.strip().split('\t')
sentence1 = my_tokenizer(text[3])
sentence2 = my_tokenizer(text[4])
for token in set(sentence1):
if token in document_frequency:
document_frequency[token] = document_frequency[token] + 1
else:
document_frequency[token] = 1
for token in set(sentence2):
if token in document_frequency:
document_frequency[token] = document_frequency[token] + 1
else:
document_frequency[token] = 1
total_document = total_document + 2
for key, value in document_frequency.items():
document_frequency[key] = math.log(total_document / document_frequency[key])
vocab = sorted(document_frequency.items(), key=operator.itemgetter(1), reverse=True)
word_to_index = dict()
index_to_word = dict()
word_to_index[SENTENCE_START_TOKEN] = 0
word_to_index[SENTENCE_END_TOKEN] = 1
word_to_index[UNKNOWN_TOKEN] = 2
index_to_word[0] = SENTENCE_START_TOKEN
index_to_word[1] = SENTENCE_END_TOKEN
index_to_word[2] = UNKNOWN_TOKEN
counter = 3
for key, value in vocab:
if len(key) < 4:
continue
elif counter == vocabulary_size:
break
word_to_index[key] = counter
index_to_word[counter] = key
counter = counter + 1
return word_to_index, index_to_word
def my_tokenizer(input):
"""Tokenizer to tokenize and normalize text."""
tokenList = []
tokens = wordpunct_tokenize(input.lower())
tokenList.extend([x for x in tokens if not re.fullmatch('[' + string.punctuation + ']+', x)])
return tokenList
def get_train_data(vocabulary_size):
"""Get training sentences."""
word_to_index, index_to_word = build_dictionary(vocabulary_size=vocabulary_size)
[sent1_train, sent2_train], label_train = load_data(_train=True)
sent1_train_tokenized = [my_tokenizer(sent) for sent in sent1_train]
sent2_train_tokenized = [my_tokenizer(sent) for sent in sent2_train]
for i, sent in enumerate(sent1_train_tokenized):
sent1_train_tokenized[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
for i, sent in enumerate(sent2_train_tokenized):
sent2_train_tokenized[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
sent1_train_indices = []
for sentence in sent1_train_tokenized:
sent1_train_indices.append([word_to_index[word] for word in sentence])
sent2_train_indices = []
for sentence in sent2_train_tokenized:
sent2_train_indices.append([word_to_index[word] for word in sentence])
return sent1_train_indices, sent2_train_indices, word_to_index, index_to_word, label_train
def get_train_data_reversed(vocabulary_size):
"""Get training sentences in reversed order."""
sent1_train_indices, sent2_train_indices, word_to_index, index_to_word, label_train = get_train_data(
vocabulary_size)
sent1_train_indices_reversed = []
for index_list in sent1_train_indices:
temp = []
temp.extend(index_list)
temp.reverse()
sent1_train_indices_reversed.append(temp)
sent2_train_indices_reversed = []
for index_list in sent2_train_indices:
temp = []
temp.extend(index_list)
temp.reverse()
sent2_train_indices_reversed.append(temp)
return sent1_train_indices_reversed, sent2_train_indices_reversed, word_to_index, index_to_word, label_train
def get_train_sentences(vocabulary_size):
"""Get training sentences with word to index map and vice versa."""
sent1_train_indices, sent2_train_indices, word_to_index, index_to_word, label_train = get_train_data(
vocabulary_size)
all_sentences = []
all_sentences.extend(sent1_train_indices)
all_sentences.extend(sent2_train_indices)
X_train = np.asarray([[w for w in sentence[:-1]] for sentence in all_sentences])
y_train = np.asarray([[w for w in sentence[1:]] for sentence in all_sentences])
return X_train, y_train, word_to_index, index_to_word
def get_train_sentences_reversed(vocabulary_size):
"""Get training sentences in reverse order with word to index map and vice versa."""
sent1_train_indices_reversed, sent2_train_indices_reversed, word_to_index, index_to_word, label_train = get_train_data_reversed(
vocabulary_size)
all_sentences = []
all_sentences.extend(sent1_train_indices_reversed)
all_sentences.extend(sent2_train_indices_reversed)
X_train = np.asarray([[w for w in sentence[:-1]] for sentence in all_sentences])
y_train = np.asarray([[w for w in sentence[1:]] for sentence in all_sentences])
return X_train, y_train, word_to_index, index_to_word
def get_test_data(vocabulary_size):
"""Get testing sentences."""
word_to_index, index_to_word = build_dictionary(vocabulary_size=vocabulary_size)
[sent1_test, sent2_test], label_test = load_data(_test=True)
sent1_test_tokenized = [my_tokenizer(sent) for sent in sent1_test]
sent2_test_tokenized = [my_tokenizer(sent) for sent in sent2_test]
for i, sent in enumerate(sent1_test_tokenized):
sent1_test_tokenized[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
for i, sent in enumerate(sent2_test_tokenized):
sent2_test_tokenized[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
sent1_test_indices = []
for sentence in sent1_test_tokenized:
sent1_test_indices.append([word_to_index[word] for word in sentence])
sent2_test_indices = []
for sentence in sent2_test_tokenized:
sent2_test_indices.append([word_to_index[word] for word in sentence])
return sent1_test_indices, sent2_test_indices, word_to_index, index_to_word, label_test
def get_test_data_reversed(vocabulary_size):
"""Get testing sentences in reverse order."""
sent1_test_indices, sent2_test_indices, word_to_index, index_to_word, label_test = get_test_data(vocabulary_size)
sent1_test_indices_reversed = []
for index_list in sent1_test_indices:
temp = []
temp.extend(index_list)
temp.reverse()
sent1_test_indices_reversed.append(temp)
sent2_test_indices_reversed = []
for index_list in sent2_test_indices:
temp = []
temp.extend(index_list)
temp.reverse()
sent2_test_indices_reversed.append(temp)
return sent1_test_indices_reversed, sent2_test_indices_reversed, word_to_index, index_to_word, label_test
def get_test_sentences(vocabulary_size):
"""Get testing sentences with word to index map and vice versa."""
sent1_test_indices, sent2_test_indices, word_to_index, index_to_word, label_test = get_test_data(vocabulary_size)
all_sentences = []
all_sentences.extend(sent1_test_indices)
all_sentences.extend(sent2_test_indices)
x_test = np.asarray([[w for w in sentence] for sentence in all_sentences])
return x_test, word_to_index, index_to_word
def get_test_sentences_reversed(vocabulary_size):
"""Get testing sentences in reverse order with word to index map and vice versa."""
sent1_test_indices, sent2_test_indices, word_to_index, index_to_word, label_test = get_test_data_reversed(
vocabulary_size)
all_sentences = []
all_sentences.extend(sent1_test_indices)
all_sentences.extend(sent2_test_indices)
x_test = np.asarray([[w for w in sentence] for sentence in all_sentences])
return x_test, word_to_index, index_to_word
|
|
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for the Hyper-V driver and related APIs.
"""
import contextlib
import datetime
import io
import os
import platform
import shutil
import time
import uuid
import mock
import mox
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova.openstack.common import fileutils
from nova.openstack.common import units
from nova import test
from nova.tests import fake_network
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova.tests.virt.hyperv import db_fakes
from nova.tests.virt.hyperv import fake
from nova.tests.virt import test_driver
from nova import utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import driver as driver_hyperv
from nova.virt.hyperv import hostops
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import ioutils
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import networkutilsv2
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import rdpconsoleutils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from nova.virt.hyperv import volumeops
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
from nova.virt import images
CONF = cfg.CONF
CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
class HyperVAPIBaseTestCase(test.NoDBTestCase):
"""Base unit tests class for Hyper-V driver calls."""
def __init__(self, test_case_name):
self._mox = mox.Mox()
super(HyperVAPIBaseTestCase, self).__init__(test_case_name)
def setUp(self):
super(HyperVAPIBaseTestCase, self).setUp()
self._user_id = 'fake'
self._project_id = 'fake'
self._instance_data = None
self._image_metadata = None
self._fetched_image = None
self._update_image_raise_exception = False
self._volume_target_portal = 'testtargetportal:3260'
self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
self._context = context.RequestContext(self._user_id, self._project_id)
self._instance_ide_disks = []
self._instance_ide_dvds = []
self._instance_volume_disks = []
self._test_vm_name = None
self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
self._check_min_windows_version_satisfied = True
self._setup_stubs()
self.flags(instances_path=r'C:\Hyper-V\test\instances',
network_api_class='nova.network.neutronv2.api.API')
self.flags(force_volumeutils_v1=True, group='hyperv')
self.flags(force_hyperv_utils_v1=True, group='hyperv')
self._conn = driver_hyperv.HyperVDriver(None)
def _setup_stubs(self):
db_fakes.stub_out_db_instance_api(self.stubs)
fake_image.stub_out_image_service(self.stubs)
fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
def fake_fetch(context, image_id, target, user, project):
self._fetched_image = target
self.stubs.Set(images, 'fetch', fake_fetch)
def fake_get_remote_image_service(context, name):
class FakeGlanceImageService(object):
def update(self_fake, context, image_id, image_metadata, f):
if self._update_image_raise_exception:
raise vmutils.HyperVException(
"Simulated update failure")
self._image_metadata = image_metadata
return (FakeGlanceImageService(), 1)
self.stubs.Set(glance, 'get_remote_image_service',
fake_get_remote_image_service)
def fake_check_min_windows_version(fake_self, major, minor):
if [major, minor] >= [6, 3]:
return False
return self._check_min_windows_version_satisfied
self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
fake_check_min_windows_version)
def fake_sleep(ms):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
class FakeIOThread(object):
def __init__(self, src, dest, max_bytes):
pass
def start(self):
pass
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self.stubs.Set(ioutils, 'IOThread', FakeIOThread)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
self._mox.StubOutWithMock(fake.PathUtils, 'copy')
self._mox.StubOutWithMock(fake.PathUtils, 'remove')
self._mox.StubOutWithMock(fake.PathUtils, 'rename')
self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
self._mox.StubOutWithMock(fake.PathUtils,
'get_instance_migr_revert_dir')
self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
self._mox.StubOutWithMock(vmutils.VMUtils,
'attach_volume_to_controller')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_mounted_disk_by_drive_number')
self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_controller_volume_paths')
self._mox.StubOutWithMock(vmutils.VMUtils,
'enable_vm_metrics_collection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_vm_serial_port_connection')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils,
'get_internal_vhd_size_by_file_size')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_format')
self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_dynamic_vhd')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
self._mox.StubOutWithMock(hostutils.HostUtils,
'is_cpu_feature_present')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'get_external_vswitch')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'create_vswitch_port')
self._mox.StubOutWithMock(networkutils.NetworkUtils,
'vswitch_port_needed')
self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
'live_migrate_vm')
self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
'check_live_migration_config')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'volume_in_mapping')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_session_id_from_mounted_disk')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_device_number_for_target')
self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
'get_target_from_disk_path')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'login_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'execute_log_out')
self._mox.StubOutWithMock(volumeutils.VolumeUtils,
'get_iscsi_initiator')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'login_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'logout_storage_target')
self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
'execute_log_out')
self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils,
'get_rdp_console_port')
self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'metadata_for_config_drive')
# Can't use StubOutClassWithMocks due to __exit__ and __enter__
self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
self._mox.StubOutWithMock(fileutils, 'delete_if_exists')
self._mox.StubOutWithMock(utils, 'execute')
def tearDown(self):
self._mox.UnsetStubs()
super(HyperVAPIBaseTestCase, self).tearDown()
class HyperVAPITestCase(HyperVAPIBaseTestCase,
test_driver.DriverAPITestHelper):
"""Unit tests for Hyper-V driver calls."""
def test_public_api_signatures(self):
self.assertPublicAPISignatures(self._conn)
def test_get_available_resource(self):
cpu_info = {'Architecture': 'fake',
'Name': 'fake',
'Manufacturer': 'ACME, Inc.',
'NumberOfCores': 2,
'NumberOfLogicalProcessors': 4}
tot_mem_kb = 2000000L
free_mem_kb = 1000000L
tot_hdd_b = 4L * 1024 ** 3
free_hdd_b = 3L * 1024 ** 3
windows_version = '6.2.9200'
hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
free_mem_kb))
m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
m.AndReturn((tot_hdd_b, free_hdd_b))
hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
m.MultipleTimes()
m = hostutils.HostUtils.get_windows_version()
m.AndReturn(windows_version)
self._mox.ReplayAll()
dic = self._conn.get_available_resource(None)
self._mox.VerifyAll()
self.assertEqual(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
self.assertEqual(dic['hypervisor_hostname'], platform.node())
self.assertEqual(dic['memory_mb'], tot_mem_kb / units.Ki)
self.assertEqual(dic['memory_mb_used'],
tot_mem_kb / units.Ki - free_mem_kb / units.Ki)
self.assertEqual(dic['local_gb'], tot_hdd_b / units.Gi)
self.assertEqual(dic['local_gb_used'],
tot_hdd_b / units.Gi - free_hdd_b / units.Gi)
self.assertEqual(dic['hypervisor_version'],
windows_version.replace('.', ''))
self.assertEqual(dic['supported_instances'],
'[["i686", "hyperv", "hvm"], ["x86_64", "hyperv", "hvm"]]')
def test_get_host_stats(self):
tot_mem_kb = 2000000L
free_mem_kb = 1000000L
tot_hdd_b = 4L * 1024 ** 3
free_hdd_b = 3L * 1024 ** 3
hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
free_mem_kb))
m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
m.AndReturn((tot_hdd_b, free_hdd_b))
self._mox.ReplayAll()
dic = self._conn.get_host_stats(True)
self._mox.VerifyAll()
self.assertEqual(dic['disk_total'], tot_hdd_b / 1024 ** 3)
self.assertEqual(dic['disk_available'], free_hdd_b / 1024 ** 3)
self.assertEqual(dic['host_memory_total'], tot_mem_kb / 1024)
self.assertEqual(dic['host_memory_free'], free_mem_kb / 1024)
self.assertEqual(dic['disk_total'],
dic['disk_used'] + dic['disk_available'])
self.assertEqual(dic['host_memory_total'],
dic['host_memory_overhead'] +
dic['host_memory_free'])
def test_list_instances(self):
fake_instances = ['fake1', 'fake2']
vmutils.VMUtils.list_instances().AndReturn(fake_instances)
self._mox.ReplayAll()
instances = self._conn.list_instances()
self._mox.VerifyAll()
self.assertEqual(instances, fake_instances)
def test_get_host_uptime(self):
fake_host = "fake_host"
with mock.patch.object(self._conn._hostops,
"get_host_uptime") as mock_uptime:
self._conn._hostops.get_host_uptime(fake_host)
mock_uptime.assert_called_once_with(fake_host)
def test_get_info(self):
self._instance_data = self._get_instance_data()
summary_info = {'NumberOfProcessors': 2,
'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
'MemoryUsage': 1000,
'UpTime': 1}
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.get_vm_summary_info(func)
m.AndReturn(summary_info)
self._mox.ReplayAll()
info = self._conn.get_info(self._instance_data)
self._mox.VerifyAll()
self.assertEqual(info["state"], power_state.RUNNING)
def test_get_info_instance_not_found(self):
# Tests that InstanceNotFound is raised if the instance isn't found
# from the vmutils.vm_exists method.
self._instance_data = self._get_instance_data()
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(False)
self._mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound, self._conn.get_info,
self._instance_data)
self._mox.VerifyAll()
def test_spawn_cow_image(self):
self._test_spawn_instance(True)
def test_spawn_cow_image_vhdx(self):
self._test_spawn_instance(True, vhd_format=constants.DISK_FORMAT_VHDX)
def test_spawn_no_cow_image(self):
self._test_spawn_instance(False)
def test_spawn_dynamic_memory(self):
CONF.set_override('dynamic_memory_ratio', 2.0, 'hyperv')
self._test_spawn_instance()
def test_spawn_no_cow_image_vhdx(self):
self._test_spawn_instance(False, vhd_format=constants.DISK_FORMAT_VHDX)
def _setup_spawn_config_drive_mocks(self, use_cdrom):
instance_metadata.InstanceMetadata(mox.IgnoreArg(),
content=mox.IsA(list),
extra_md=mox.IsA(dict))
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
cdb = self._mox.CreateMockAnything()
m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
m.AndReturn(cdb)
# __enter__ and __exit__ are required by "with"
cdb.__enter__().AndReturn(cdb)
cdb.make_drive(mox.IsA(str))
cdb.__exit__(None, None, None).AndReturn(None)
if not use_cdrom:
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
mox.IsA(str),
mox.IsA(str),
attempts=1)
fake.PathUtils.remove(mox.IsA(str))
m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk)
def _test_spawn_config_drive(self, use_cdrom, format_error=False):
self.flags(force_config_drive=True)
self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
self.flags(mkisofs_cmd='mkisofs.exe')
if use_cdrom:
expected_ide_disks = 1
expected_ide_dvds = 1
else:
expected_ide_disks = 2
expected_ide_dvds = 0
if format_error:
self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
self._test_spawn_instance,
with_exception=True,
config_drive=True,
use_cdrom=use_cdrom)
else:
self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
expected_ide_dvds=expected_ide_dvds,
config_drive=True,
use_cdrom=use_cdrom)
def test_spawn_config_drive(self):
self._test_spawn_config_drive(False)
def test_spawn_config_drive_format_error(self):
CONF.set_override('config_drive_format', 'wrong_format')
self._test_spawn_config_drive(True, True)
def test_spawn_config_drive_cdrom(self):
self._test_spawn_config_drive(True)
def test_spawn_no_config_drive(self):
self.flags(force_config_drive=False)
expected_ide_disks = 1
expected_ide_dvds = 0
self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
expected_ide_dvds=expected_ide_dvds)
def _test_spawn_nova_net_vif(self, with_port):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
def setup_vif_mocks():
fake_vswitch_path = 'fake vswitch path'
fake_vswitch_port = 'fake port'
m = networkutils.NetworkUtils.get_external_vswitch(
CONF.hyperv.vswitch_name)
m.AndReturn(fake_vswitch_path)
m = networkutils.NetworkUtils.vswitch_port_needed()
m.AndReturn(with_port)
if with_port:
m = networkutils.NetworkUtils.create_vswitch_port(
fake_vswitch_path, mox.IsA(str))
m.AndReturn(fake_vswitch_port)
vswitch_conn_data = fake_vswitch_port
else:
vswitch_conn_data = fake_vswitch_path
vmutils.VMUtils.set_nic_connection(mox.IsA(str),
mox.IsA(str), vswitch_conn_data)
self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
def test_spawn_nova_net_vif_with_port(self):
self._test_spawn_nova_net_vif(True)
def test_spawn_nova_net_vif_without_port(self):
self._test_spawn_nova_net_vif(False)
def test_spawn_nova_net_vif_no_vswitch_exception(self):
self.flags(network_api_class='nova.network.api.API')
# Reinstantiate driver, as the VIF plugin is loaded during __init__
self._conn = driver_hyperv.HyperVDriver(None)
def setup_vif_mocks():
m = networkutils.NetworkUtils.get_external_vswitch(
CONF.hyperv.vswitch_name)
m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
setup_vif_mocks_func=setup_vif_mocks,
with_exception=True)
def test_spawn_with_metrics_collection(self):
self.flags(enable_instance_metrics_collection=True, group='hyperv')
self._test_spawn_instance(False)
def test_spawn_with_ephemeral_storage(self):
self._test_spawn_instance(True, expected_ide_disks=2,
ephemeral_storage=True)
def _check_instance_name(self, vm_name):
return vm_name == self._instance_data['name']
def _test_vm_state_change(self, action, from_state, to_state):
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
to_state)
if to_state in (constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_REBOOT):
self._setup_delete_vm_log_mocks()
if to_state in (constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_REBOOT):
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
action(self._instance_data)
self._mox.VerifyAll()
def test_pause(self):
self._test_vm_state_change(self._conn.pause, None,
constants.HYPERV_VM_STATE_PAUSED)
def test_pause_already_paused(self):
self._test_vm_state_change(self._conn.pause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_PAUSED)
def test_unpause(self):
self._test_vm_state_change(self._conn.unpause,
constants.HYPERV_VM_STATE_PAUSED,
constants.HYPERV_VM_STATE_ENABLED)
def test_unpause_already_running(self):
self._test_vm_state_change(self._conn.unpause, None,
constants.HYPERV_VM_STATE_ENABLED)
def test_suspend(self):
self._test_vm_state_change(self._conn.suspend, None,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_suspend_already_suspended(self):
self._test_vm_state_change(self._conn.suspend,
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_SUSPENDED)
def test_resume(self):
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None),
constants.HYPERV_VM_STATE_SUSPENDED,
constants.HYPERV_VM_STATE_ENABLED)
def test_resume_already_running(self):
self._test_vm_state_change(lambda i: self._conn.resume(self._context,
i, None), None,
constants.HYPERV_VM_STATE_ENABLED)
def test_power_off(self):
self._test_vm_state_change(self._conn.power_off, None,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_already_powered_off(self):
self._test_vm_state_change(self._conn.power_off,
constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_DISABLED)
def test_power_on(self):
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()
def test_power_on_already_running(self):
self._instance_data = self._get_instance_data()
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
self._conn.power_on(self._context, self._instance_data, network_info)
self._mox.VerifyAll()
def test_reboot(self):
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self._instance_data = self._get_instance_data()
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_REBOOT)
self._setup_delete_vm_log_mocks()
self._setup_log_vm_output_mocks()
self._mox.ReplayAll()
self._conn.reboot(self._context, self._instance_data, network_info,
None)
self._mox.VerifyAll()
def _setup_destroy_mocks(self, destroy_disks=True):
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
self._setup_delete_vm_log_mocks()
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([], []))
vmutils.VMUtils.destroy_vm(func)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
def test_destroy(self):
self._instance_data = self._get_instance_data()
self._setup_destroy_mocks()
self._mox.ReplayAll()
self._conn.destroy(self._context, self._instance_data, None)
self._mox.VerifyAll()
def test_live_migration_unsupported_os(self):
self._check_min_windows_version_satisfied = False
self._conn = driver_hyperv.HyperVDriver(None)
self._test_live_migration(unsupported_os=True)
def test_live_migration_without_volumes(self):
self._test_live_migration()
def test_live_migration_with_volumes(self):
self._test_live_migration(with_volumes=True)
def test_live_migration_with_target_failure(self):
self._test_live_migration(test_failure=True)
def _test_live_migration(self, test_failure=False,
with_volumes=False,
unsupported_os=False):
dest_server = 'fake_server'
instance_data = self._get_instance_data()
fake_post_method = self._mox.CreateMockAnything()
if not test_failure and not unsupported_os:
fake_post_method(self._context, instance_data, dest_server,
False)
fake_recover_method = self._mox.CreateMockAnything()
if test_failure:
fake_recover_method(self._context, instance_data, dest_server,
False)
if with_volumes:
fake_target_iqn = 'fake_target_iqn'
fake_target_lun = 1
if not unsupported_os:
m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
m.AndReturn(('fake_local_vm_log_path', 'fake_vm_log_path.1'))
m = fake.PathUtils.get_vm_console_log_paths(
mox.IsA(str), remote_server=mox.IsA(str))
m.AndReturn(('fake_remote_vm_log_path',
'fake_remote_vm_log_path.1'))
self._mox.StubOutWithMock(fake.PathUtils, 'exists')
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(False)
fake.PathUtils.copy(mox.IsA(str), mox.IsA(str))
m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
instance_data['name'], dest_server)
if test_failure:
m.AndRaise(vmutils.HyperVException('Simulated failure'))
if with_volumes:
m.AndReturn([(fake_target_iqn, fake_target_lun)])
volumeutils.VolumeUtils.logout_storage_target(fake_target_iqn)
else:
m.AndReturn([])
self._mox.ReplayAll()
try:
hyperv_exception_raised = False
unsupported_os_exception_raised = False
self._conn.live_migration(self._context, instance_data,
dest_server, fake_post_method,
fake_recover_method)
except vmutils.HyperVException:
hyperv_exception_raised = True
except NotImplementedError:
unsupported_os_exception_raised = True
self.assertTrue(not test_failure ^ hyperv_exception_raised)
self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
self._mox.VerifyAll()
def test_pre_live_migration_cow_image(self):
self._test_pre_live_migration(True, False)
def test_pre_live_migration_no_cow_image(self):
self._test_pre_live_migration(False, False)
def test_pre_live_migration_with_volumes(self):
self._test_pre_live_migration(False, True)
def _test_pre_live_migration(self, cow, with_volumes):
self.flags(use_cow_images=cow)
instance_data = self._get_instance_data()
instance = db.instance_create(self._context, instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
m.AndReturn(True)
if cow:
self._setup_get_cached_image_mocks(cow)
if with_volumes:
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
else:
block_device_info = None
self._mox.ReplayAll()
self._conn.pre_live_migration(self._context, instance,
block_device_info, None, network_info)
self._mox.VerifyAll()
if cow:
self.assertIsNotNone(self._fetched_image)
else:
self.assertIsNone(self._fetched_image)
def test_get_instance_disk_info_is_implemented(self):
# Ensure that the method has been implemented in the driver
try:
disk_info = self._conn.get_instance_disk_info('fake_instance_name')
self.assertIsNone(disk_info)
except NotImplementedError:
self.fail("test_get_instance_disk_info() should not raise "
"NotImplementedError")
def test_snapshot_with_update_failure(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._update_image_raise_exception = True
self._mox.ReplayAll()
self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
self._mox.VerifyAll()
# Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
def _setup_snapshot_mocks(self):
expected_calls = [
{'args': (),
'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
fake_hv_snapshot_path = 'fake_snapshot_path'
fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
self._instance_data = self._get_instance_data()
func = mox.Func(self._check_instance_name)
m = vmutils.VMUtils.take_vm_snapshot(func)
m.AndReturn(fake_hv_snapshot_path)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
m.AndReturn(fake_parent_vhd_path)
self._fake_dest_disk_path = None
def copy_dest_disk_path(src, dest):
self._fake_dest_disk_path = dest
m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m.WithSideEffects(copy_dest_disk_path)
self._fake_dest_base_disk_path = None
def copy_dest_base_disk_path(src, dest):
self._fake_dest_base_disk_path = dest
m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
m.WithSideEffects(copy_dest_base_disk_path)
def check_dest_disk_path(path):
return path == self._fake_dest_disk_path
def check_dest_base_disk_path(path):
return path == self._fake_dest_base_disk_path
func1 = mox.Func(check_dest_disk_path)
func2 = mox.Func(check_dest_base_disk_path)
# Make sure that the hyper-v base and differential VHDs are merged
vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
vhdutils.VHDUtils.merge_vhd(func1, func2)
def check_snapshot_path(snapshot_path):
return snapshot_path == fake_hv_snapshot_path
# Make sure that the Hyper-V snapshot is removed
func = mox.Func(check_snapshot_path)
vmutils.VMUtils.remove_vm_snapshot(func)
fake.PathUtils.rmtree(mox.IsA(str))
m = fake.PathUtils.open(func2, 'rb')
m.AndReturn(io.BytesIO(b'fake content'))
return (snapshot_name, func_call_matcher)
def test_snapshot(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
self._mox.ReplayAll()
self._conn.snapshot(self._context, self._instance_data, snapshot_name,
func_call_matcher.call)
self._mox.VerifyAll()
self.assertTrue(self._image_metadata)
self.assertIn("disk_format", self._image_metadata)
self.assertEqual("vhd", self._image_metadata["disk_format"])
# Assert states changed in correct order
self.assertIsNone(func_call_matcher.match())
def _get_instance_data(self):
instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
return db_fakes.get_fake_instance_data(instance_name,
self._project_id,
self._user_id)
def _spawn_instance(self, cow, block_device_info=None,
ephemeral_storage=False):
self.flags(use_cow_images=cow)
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
if ephemeral_storage:
instance['ephemeral_gb'] = 1
image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self._conn.spawn(self._context, instance, image,
injected_files=[], admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
def _add_ide_disk(self, vm_name, path, ctrller_addr,
drive_addr, drive_type):
if drive_type == constants.IDE_DISK:
self._instance_ide_disks.append(path)
elif drive_type == constants.IDE_DVD:
self._instance_ide_dvds.append(path)
def _add_volume_disk(self, vm_name, controller_path, address,
mounted_disk_path):
self._instance_volume_disks.append(mounted_disk_path)
def _check_img_path(self, image_path):
return image_path == self._fetched_image
def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
boot_from_volume=False,
block_device_info=None,
admin_permissions=True,
ephemeral_storage=False):
vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
mox.IsA(int), mox.IsA(bool),
CONF.hyperv.dynamic_memory_ratio,
mox.IsA(list))
if not boot_from_volume:
m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk).InAnyOrder()
if ephemeral_storage:
m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk).InAnyOrder()
func = mox.Func(self._check_vm_name)
m = vmutils.VMUtils.create_scsi_controller(func)
m.InAnyOrder()
if boot_from_volume:
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
target_lun, target_portal, True)
vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name),
mox.IsA(str), mox.IsA(unicode)).InAnyOrder()
if setup_vif_mocks_func:
setup_vif_mocks_func()
if CONF.hyperv.enable_instance_metrics_collection:
vmutils.VMUtils.enable_vm_metrics_collection(
mox.Func(self._check_vm_name))
vmutils.VMUtils.get_vm_serial_port_connection(
mox.IsA(str), update_connection=mox.IsA(str))
def _set_vm_name(self, vm_name):
self._test_vm_name = vm_name
def _check_vm_name(self, vm_name):
return vm_name == self._test_vm_name
def _setup_check_admin_permissions_mocks(self, admin_permissions=True):
self._mox.StubOutWithMock(vmutils.VMUtils,
'check_admin_permissions')
m = vmutils.VMUtils.check_admin_permissions()
if admin_permissions:
m.AndReturn(None)
else:
m.AndRaise(vmutils.HyperVAuthorizationException(_(
'Simulated failure')))
def _setup_log_vm_output_mocks(self):
m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
ioutils.IOThread('fake_pipe', 'fake_vm_log_path',
units.Mi).start()
def _setup_delete_vm_log_mocks(self):
m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
fileutils.delete_if_exists(mox.IsA(str))
fileutils.delete_if_exists(mox.IsA(str))
def _setup_get_cached_image_mocks(self, cow=True,
vhd_format=constants.DISK_FORMAT_VHD):
m = vhdutils.VHDUtils.get_vhd_format(
mox.Func(self._check_img_path))
m.AndReturn(vhd_format)
def check_img_path_with_ext(image_path):
return image_path == self._fetched_image + '.' + vhd_format.lower()
fake.PathUtils.rename(mox.Func(self._check_img_path),
mox.Func(check_img_path_with_ext))
if cow and vhd_format == constants.DISK_FORMAT_VHD:
m = vhdutils.VHDUtils.get_vhd_info(
mox.Func(check_img_path_with_ext))
m.AndReturn({'MaxInternalSize': 1024})
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
is_file_max_size=False)
def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
with_exception=False,
block_device_info=None,
boot_from_volume=False,
config_drive=False,
use_cdrom=False,
admin_permissions=True,
vhd_format=constants.DISK_FORMAT_VHD,
ephemeral_storage=False):
m = vmutils.VMUtils.vm_exists(mox.IsA(str))
m.WithSideEffects(self._set_vm_name).AndReturn(False)
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
if block_device_info:
m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
'fake_root_device_name', block_device_info)
m.AndReturn(boot_from_volume)
if not boot_from_volume:
m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
m.AndReturn(self._test_instance_dir)
self._setup_get_cached_image_mocks(cow, vhd_format)
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
'Type': 2})
if cow:
m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
m.AndReturn(vhd_format)
if vhd_format == constants.DISK_FORMAT_VHD:
vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
mox.IsA(str))
else:
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
mox.IsA(str),
mox.IsA(int))
else:
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
is_file_max_size=False)
self._setup_check_admin_permissions_mocks(
admin_permissions=admin_permissions)
if ephemeral_storage:
m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
m.AndReturn(self._test_instance_dir)
vhdutils.VHDUtils.create_dynamic_vhd(mox.IsA(str), mox.IsA(int),
mox.IsA(str))
self._setup_create_instance_mocks(setup_vif_mocks_func,
boot_from_volume,
block_device_info,
ephemeral_storage=ephemeral_storage)
if config_drive and not with_exception:
self._setup_spawn_config_drive_mocks(use_cdrom)
# TODO(alexpilotti) Based on where the exception is thrown
# some of the above mock calls need to be skipped
if with_exception:
self._setup_destroy_mocks()
else:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
def _test_spawn_instance(self, cow=True,
expected_ide_disks=1,
expected_ide_dvds=0,
setup_vif_mocks_func=None,
with_exception=False,
config_drive=False,
use_cdrom=False,
admin_permissions=True,
vhd_format=constants.DISK_FORMAT_VHD,
ephemeral_storage=False):
self._setup_spawn_instance_mocks(cow,
setup_vif_mocks_func,
with_exception,
config_drive=config_drive,
use_cdrom=use_cdrom,
admin_permissions=admin_permissions,
vhd_format=vhd_format,
ephemeral_storage=ephemeral_storage)
self._mox.ReplayAll()
self._spawn_instance(cow, ephemeral_storage=ephemeral_storage)
self._mox.VerifyAll()
self.assertEqual(len(self._instance_ide_disks), expected_ide_disks)
self.assertEqual(len(self._instance_ide_dvds), expected_ide_dvds)
vhd_path = os.path.join(self._test_instance_dir, 'root.' +
vhd_format.lower())
self.assertEqual(vhd_path, self._instance_ide_disks[0])
def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
fake_mounted_disk,
fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
fake_mounted_disk, fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
volumeutils.VolumeUtils.login_storage_target(target_lun,
target_iqn,
target_portal)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
target_portal=None, boot_from_volume=False):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
self._mox.StubOutWithMock(self._conn._volumeops,
'_get_free_controller_slot')
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
if boot_from_volume:
m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
m.AndReturn(fake_controller_path)
fake_free_slot = 0
else:
m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
m.AndReturn(fake_controller_path)
fake_free_slot = 1
m = self._conn._volumeops._get_free_controller_slot(
fake_controller_path)
m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
fake_controller_path,
fake_free_slot,
fake_mounted_disk)
m.WithSideEffects(self._add_volume_disk)
def _test_util_class_version(self, v1_class, v2_class,
get_instance_action, is_hyperv_2012,
force_v1_flag, force_utils_v1):
self._check_min_windows_version_satisfied = is_hyperv_2012
CONF.set_override(force_v1_flag, force_v1_flag, 'hyperv')
self._conn = driver_hyperv.HyperVDriver(None)
instance = get_instance_action()
is_v1 = isinstance(instance, v1_class)
# v2_class can inherit from v1_class
is_v2 = isinstance(instance, v2_class)
self.assertTrue((is_hyperv_2012 and not force_v1_flag) ^
(is_v1 and not is_v2))
def test_volumeutils_version_hyperv_2012(self):
self._test_util_class_version(volumeutils.VolumeUtils,
volumeutilsv2.VolumeUtilsV2,
lambda: utilsfactory.get_volumeutils(),
True, 'force_volumeutils_v1', False)
def test_volumeutils_version_hyperv_2012_force_v1(self):
self._test_util_class_version(volumeutils.VolumeUtils,
volumeutilsv2.VolumeUtilsV2,
lambda: utilsfactory.get_volumeutils(),
True, 'force_volumeutils_v1', True)
def test_volumeutils_version_hyperv_2008R2(self):
self._test_util_class_version(volumeutils.VolumeUtils,
volumeutilsv2.VolumeUtilsV2,
lambda: utilsfactory.get_volumeutils(),
False, 'force_volumeutils_v1', False)
def test_vmutils_version_hyperv_2012(self):
self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
lambda: utilsfactory.get_vmutils(),
True, 'force_hyperv_utils_v1', False)
def test_vmutils_version_hyperv_2012_force_v1(self):
self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
lambda: utilsfactory.get_vmutils(),
True, 'force_hyperv_utils_v1', True)
def test_vmutils_version_hyperv_2008R2(self):
self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
lambda: utilsfactory.get_vmutils(),
False, 'force_hyperv_utils_v1', False)
def test_vhdutils_version_hyperv_2012(self):
self._test_util_class_version(vhdutils.VHDUtils,
vhdutilsv2.VHDUtilsV2,
lambda: utilsfactory.get_vhdutils(),
True, 'force_hyperv_utils_v1', False)
def test_vhdutils_version_hyperv_2012_force_v1(self):
self._test_util_class_version(vhdutils.VHDUtils,
vhdutilsv2.VHDUtilsV2,
lambda: utilsfactory.get_vhdutils(),
True, 'force_hyperv_utils_v1', True)
def test_vhdutils_version_hyperv_2008R2(self):
self._test_util_class_version(vhdutils.VHDUtils,
vhdutilsv2.VHDUtilsV2,
lambda: utilsfactory.get_vhdutils(),
False, 'force_hyperv_utils_v1', False)
def test_networkutils_version_hyperv_2012(self):
self._test_util_class_version(networkutils.NetworkUtils,
networkutilsv2.NetworkUtilsV2,
lambda: utilsfactory.get_networkutils(),
True, 'force_hyperv_utils_v1', False)
def test_networkutils_version_hyperv_2012_force_v1(self):
self._test_util_class_version(networkutils.NetworkUtils,
networkutilsv2.NetworkUtilsV2,
lambda: utilsfactory.get_networkutils(),
True, 'force_hyperv_utils_v1', True)
def test_networkutils_version_hyperv_2008R2(self):
self._test_util_class_version(networkutils.NetworkUtils,
networkutilsv2.NetworkUtilsV2,
lambda: utilsfactory.get_networkutils(),
False, 'force_hyperv_utils_v1', False)
def test_attach_volume(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
target_portal)
self._mox.ReplayAll()
self._conn.attach_volume(None, connection_info, instance_data,
mount_point)
self._mox.VerifyAll()
self.assertEqual(len(self._instance_volume_disks), 1)
def _mock_get_mounted_disk_from_lun_error(self, target_iqn, target_lun,
fake_mounted_disk,
fake_device_number):
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndRaise(vmutils.HyperVException('Simulated failure'))
def _mock_attach_volume_target_logout(self, instance_name, target_iqn,
target_lun, target_portal=None,
boot_from_volume=False):
fake_mounted_disk = "fake_mounted disk"
fake_device_number = 0
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
fake_mounted_disk,
fake_device_number)
self._mock_get_mounted_disk_from_lun_error(target_iqn, target_lun,
fake_mounted_disk,
fake_device_number)
volumeutils.VolumeUtils.logout_storage_target(target_iqn)
def test_attach_volume_logout(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
mount_point = '/dev/sdc'
self._mock_attach_volume_target_logout(instance_data['name'],
target_iqn, target_lun,
target_portal)
self._mox.ReplayAll()
self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
None, connection_info, instance_data, mount_point)
self._mox.VerifyAll()
def test_attach_volume_connection_error(self):
instance_data = self._get_instance_data()
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
mount_point = '/dev/sdc'
def fake_login_storage_target(connection_info):
raise vmutils.HyperVException('Fake connection exception')
self.stubs.Set(self._conn._volumeops, '_login_storage_target',
fake_login_storage_target)
self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
None, connection_info, instance_data, mount_point)
def _mock_detach_volume(self, target_iqn, target_lun):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
target_lun)
m.AndReturn(fake_device_number)
m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
fake_device_number)
m.AndReturn(fake_mounted_disk)
vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
volumeutils.VolumeUtils.logout_storage_target(mox.IsA(str))
def test_detach_volume(self):
instance_data = self._get_instance_data()
self.assertIn('name', instance_data)
connection_info = db_fakes.get_fake_volume_info_data(
self._volume_target_portal, self._volume_id)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
self.assertIn('target_portal', data)
mount_point = '/dev/sdc'
self._mock_detach_volume(target_iqn, target_lun)
self._mox.ReplayAll()
self._conn.detach_volume(connection_info, instance_data, mount_point)
self._mox.VerifyAll()
def test_boot_from_volume(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
self._setup_spawn_instance_mocks(cow=False,
block_device_info=block_device_info,
boot_from_volume=True)
self._mox.ReplayAll()
self._spawn_instance(False, block_device_info)
self._mox.VerifyAll()
self.assertEqual(len(self._instance_volume_disks), 1)
def test_get_volume_connector(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
fake_my_ip = "fake_ip"
fake_host = "fake_host"
fake_initiator = "fake_initiator"
self.flags(my_ip=fake_my_ip)
self.flags(host=fake_host)
m = volumeutils.VolumeUtils.get_iscsi_initiator()
m.AndReturn(fake_initiator)
self._mox.ReplayAll()
data = self._conn.get_volume_connector(instance)
self._mox.VerifyAll()
self.assertEqual(fake_my_ip, data.get('ip'))
self.assertEqual(fake_host, data.get('host'))
self.assertEqual(fake_initiator, data.get('initiator'))
def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
copy_exception=False,
size_exception=False):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
instance['root_gb'] = 10
fake_local_ip = '10.0.0.1'
if same_host:
fake_dest_ip = fake_local_ip
else:
fake_dest_ip = '10.0.0.2'
if size_exception:
flavor = 'm1.tiny'
else:
flavor = 'm1.small'
flavor = db.flavor_get_by_name(self._context, flavor)
if not size_exception:
fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
func = mox.Func(self._check_instance_name)
vmutils.VMUtils.set_vm_state(func,
constants.HYPERV_VM_STATE_DISABLED)
self._setup_delete_vm_log_mocks()
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([fake_root_vhd_path], []))
m = hostutils.HostUtils.get_local_ips()
m.AndReturn([fake_local_ip])
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = pathutils.PathUtils.get_instance_migr_revert_dir(
instance['name'], remove_dir=True)
m.AndReturn(fake_revert_path)
if same_host:
fake.PathUtils.makedirs(mox.IsA(str))
m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
if copy_exception:
m.AndRaise(shutil.Error('Simulated copy error'))
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
mox.IsA(str),
remove_dir=True)
m.AndReturn(self._test_instance_dir)
else:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
destroy_disks = True
if same_host:
fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
destroy_disks = False
self._setup_destroy_mocks(False)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
mox.IsA(str),
remove_dir=True)
m.AndReturn(self._test_instance_dir)
return (instance, fake_dest_ip, network_info, flavor)
def test_migrate_disk_and_power_off(self):
(instance,
fake_dest_ip,
network_info,
flavor) = self._setup_test_migrate_disk_and_power_off_mocks()
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, flavor,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_same_host(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
same_host=True)
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
self._conn.migrate_disk_and_power_off(self._context, instance,
fake_dest_ip, flavor,
network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_copy_exception(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
copy_exception=True)
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip,
flavor, network_info)
self._mox.VerifyAll()
def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
args = self._setup_test_migrate_disk_and_power_off_mocks(
size_exception=True)
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip,
flavor, network_info)
self._mox.VerifyAll()
def _mock_attach_config_drive(self, instance, config_drive_format):
instance['config_drive'] = True
self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
m = fake.PathUtils.lookup_configdrive_path(
mox.Func(self._check_instance_name))
if config_drive_format in constants.DISK_FORMAT_MAP:
m.AndReturn(self._test_instance_dir + '/configdrive.' +
config_drive_format)
else:
m.AndReturn(None)
m = vmutils.VMUtils.attach_ide_drive(
mox.Func(self._check_instance_name),
mox.IsA(str),
mox.IsA(int),
mox.IsA(int),
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk).InAnyOrder()
def _verify_attach_config_drive(self, config_drive_format):
if config_drive_format == constants.IDE_DISK_FORMAT.lower():
self.assertEqual(self._instance_ide_disks[1],
self._test_instance_dir + '/configdrive.' +
config_drive_format)
elif config_drive_format == constants.IDE_DVD_FORMAT.lower():
self.assertEqual(self._instance_ide_dvds[0],
self._test_instance_dir + '/configdrive.' +
config_drive_format)
def _test_finish_migration(self, power_on, ephemeral_storage=False,
config_drive=False,
config_drive_format='iso'):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
self._mox.StubOutWithMock(fake.PathUtils, 'exists')
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
instance["image_ref"]))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'ParentPath': fake_parent_vhd_path,
'MaxInternalSize': 1})
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
m.AndReturn({'MaxInternalSize': 1024})
m = fake.PathUtils.exists(mox.IsA(str))
m.AndReturn(True)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
if ephemeral_storage:
return m.AndReturn(self._test_instance_dir)
else:
m.AndReturn(None)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False,
ephemeral_storage=ephemeral_storage)
if power_on:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
if config_drive:
self._mock_attach_config_drive(instance, config_drive_format)
self._mox.ReplayAll()
self._conn.finish_migration(self._context, None, instance, "",
network_info, None, False, None, power_on)
self._mox.VerifyAll()
if config_drive:
self._verify_attach_config_drive(config_drive_format)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def test_finish_migration_with_ephemeral_storage(self):
self._test_finish_migration(False, ephemeral_storage=True)
def test_finish_migration_attach_config_drive_iso(self):
self._test_finish_migration(False, config_drive=True,
config_drive_format=constants.IDE_DVD_FORMAT.lower())
def test_finish_migration_attach_config_drive_vhd(self):
self._test_finish_migration(False, config_drive=True,
config_drive_format=constants.IDE_DISK_FORMAT.lower())
def test_confirm_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
remove_dir=True)
self._mox.ReplayAll()
self._conn.confirm_migration(None, instance, network_info)
self._mox.VerifyAll()
def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
config_drive=False,
config_drive_format='iso'):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
instance['name'])
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
remove_dir=True)
m.AndReturn(self._test_instance_dir)
m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
m.AndReturn(fake_revert_path)
fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
m.AndReturn(self._test_instance_dir)
m = fake.PathUtils.get_instance_dir(mox.IsA(str))
if ephemeral_storage:
m.AndReturn(self._test_instance_dir)
else:
m.AndReturn(None)
self._set_vm_name(instance['name'])
self._setup_create_instance_mocks(None, False,
ephemeral_storage=ephemeral_storage)
if power_on:
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
self._setup_log_vm_output_mocks()
if config_drive:
self._mock_attach_config_drive(instance, config_drive_format)
self._mox.ReplayAll()
self._conn.finish_revert_migration(self._context, instance,
network_info, None,
power_on)
self._mox.VerifyAll()
if config_drive:
self._verify_attach_config_drive(config_drive_format)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def test_spawn_no_admin_permissions(self):
self.assertRaises(vmutils.HyperVAuthorizationException,
self._test_spawn_instance,
with_exception=True,
admin_permissions=False)
def test_finish_revert_migration_with_ephemeral_storage(self):
self._test_finish_revert_migration(False, ephemeral_storage=True)
def test_finish_revert_migration_attach_config_drive_iso(self):
self._test_finish_revert_migration(False, config_drive=True,
config_drive_format=constants.IDE_DVD_FORMAT.lower())
def test_finish_revert_migration_attach_config_drive_vhd(self):
self._test_finish_revert_migration(False, config_drive=True,
config_drive_format=constants.IDE_DISK_FORMAT.lower())
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self._conn.plug_vifs,
instance=self._test_spawn_instance,
network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self._conn.unplug_vifs,
instance=self._test_spawn_instance,
network_info=None)
def test_rollback_live_migration_at_destination(self):
with mock.patch.object(self._conn, "destroy") as mock_destroy:
self._conn.rollback_live_migration_at_destination(self._context,
self._test_spawn_instance, [], None)
mock_destroy.assert_called_once_with(self._context,
self._test_spawn_instance, [], None)
def test_refresh_instance_security_rules(self):
self.assertRaises(NotImplementedError,
self._conn.refresh_instance_security_rules,
instance=None)
def test_get_rdp_console(self):
self.flags(my_ip="192.168.1.1")
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
fake_port = 9999
fake_vm_id = "fake_vm_id"
m = rdpconsoleutils.RDPConsoleUtils.get_rdp_console_port()
m.AndReturn(fake_port)
m = vmutils.VMUtils.get_vm_id(mox.IsA(str))
m.AndReturn(fake_vm_id)
self._mox.ReplayAll()
connect_info = self._conn.get_rdp_console(self._context, instance)
self._mox.VerifyAll()
self.assertEqual(CONF.my_ip, connect_info.host)
self.assertEqual(fake_port, connect_info.port)
self.assertEqual(fake_vm_id, connect_info.internal_access_path)
class VolumeOpsTestCase(HyperVAPIBaseTestCase):
"""Unit tests for VolumeOps class."""
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self.volumeops = volumeops.VolumeOps()
def test_get_mounted_disk_from_lun(self):
with contextlib.nested(
mock.patch.object(self.volumeops._volutils,
'get_device_number_for_target'),
mock.patch.object(self.volumeops._vmutils,
'get_mounted_disk_by_drive_number')
) as (mock_get_device_number_for_target,
mock_get_mounted_disk_by_drive_number):
mock_get_device_number_for_target.return_value = 0
mock_get_mounted_disk_by_drive_number.return_value = 'disk_path'
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
disk = self.volumeops._get_mounted_disk_from_lun(target_iqn,
target_lun)
self.assertEqual(disk, 'disk_path')
def test_get_mounted_disk_from_lun_failure(self):
self.flags(mounted_disk_query_retry_count=1, group='hyperv')
with mock.patch.object(self.volumeops._volutils,
'get_device_number_for_target') as m_device_num:
m_device_num.side_effect = [None, -1]
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
for attempt in xrange(1):
self.assertRaises(exception.NotFound,
self.volumeops._get_mounted_disk_from_lun,
target_iqn, target_lun)
def test_get_free_controller_slot_exception(self):
fake_drive = mock.MagicMock()
type(fake_drive).AddressOnParent = mock.PropertyMock(
side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
fake_scsi_controller_path = 'fake_scsi_controller_path'
with mock.patch.object(self.volumeops._vmutils,
'get_attached_disks') as fake_get_attached_disks:
fake_get_attached_disks.return_value = (
[fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
self.assertRaises(vmutils.HyperVException,
self.volumeops._get_free_controller_slot,
fake_scsi_controller_path)
class HostOpsTestCase(HyperVAPIBaseTestCase):
"""Unit tests for the Hyper-V hostops class."""
def setUp(self):
self._hostops = hostops.HostOps()
self._hostops._hostutils = mock.MagicMock()
self._hostops.time = mock.MagicMock()
super(HostOpsTestCase, self).setUp()
@mock.patch('nova.virt.hyperv.hostops.time')
def test_host_uptime(self, mock_time):
self._hostops._hostutils.get_host_tick_count64.return_value = 100
mock_time.strftime.return_value = "01:01:01"
result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % (
str(datetime.timedelta(
milliseconds = long(100))))
actual_uptime = self._hostops.get_host_uptime()
self.assertEqual(result_uptime, actual_uptime)
|
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xmltodict
import logging
from os import getcwd
from os.path import join
MCU_TEMPLATE = {
'mcu' : {
'vendor' : ['Manually add vendor (st, freescale, etc) instead of this text'],
'name' : [''],
'core' : ['Manually add core (cortex-mX) instead of this text'],
},
}
class UvisionDefinition:
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
project_file = join(getcwd(), project_file)
uvproj_dic = xmltodict.parse(open(project_file, "rb"), dict_constructor=dict)
# Generic Target, should get from Target class !
mcu = MCU_TEMPLATE
try:
mcu['tool_specific'] = {
# legacy device
'uvision' : {
'TargetOption' : {
'Device' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Device']],
'DeviceId' : [None if not uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'] else
int(uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'])],
'Vendor' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Vendor']],
'Cpu' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Cpu']],
'FlashDriverDll' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['FlashDriverDll']],
'SFDFile' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['SFDFile']],
'RegisterFile': [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['RegisterFile']],
}
}
}
except KeyError:
# validity check for uvision project
logging.debug("The project_file %s seems to be not valid .uvproj file.")
return mcu
return mcu
class UvisionDefinition5:
# TODO: create comomn uvision class (4 and 5 have many common keys)
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
project_file = join(getcwd(), project_file)
uvproj_dic = xmltodict.parse(open(project_file, "rb"), dict_constructor=dict)
# Generic Target, should get from Target class !
mcu = MCU_TEMPLATE
try:
mcu['tool_specific'] = {
'uvision5' : {
'TargetOption' : {
'Device' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Device']],
'DeviceId' : [None if not uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'] else
int(uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['DeviceId'])],
'Vendor' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Vendor']],
'Cpu' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['Cpu']],
'FlashDriverDll' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['FlashDriverDll']],
'SFDFile' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['SFDFile']],
'PackID' : [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['PackID']],
'RegisterFile': [uvproj_dic['Project']['Targets']['Target']['TargetOption']['TargetCommonOption']['RegisterFile']],
}
}
}
except KeyError:
# validity check for uvision project
logging.debug("The project_file %s seems to be not valid .uvproj file.")
return mcu
return mcu
class IARDefinitions:
def _get_option(self, settings, find_key):
for option in settings:
if option['name'] == find_key:
return settings.index(option)
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
# TODO: check the extension here if it's valid IAR project or we
# should at least check if syntax is correct check something IAR defines and return error if not
project_file = join(getcwd(), project_file)
ewp_dic = xmltodict.parse(open(project_file, "rb"), dict_constructor=dict)
mcu = MCU_TEMPLATE
try:
ewp_dic['project']['configuration']
except KeyError:
# validity check for iar project
logging.debug("The project_file %s seems to be not valid .ewp file.")
return mcu
# Fill in only must-have values, fpu will be added if defined for mcu
mcu['tool_specific'] = {
'iar' : {
# MCU selection
'OGChipSelectEditMenu' : {
'state' : [],
},
# we use mcu
'OGCoreOrChip' : {
'state' : [1],
},
}
}
# we take 0 configuration or just configuration, as multiple configuration possible
# debug, release, for mcu - does not matter, try and adjust
try:
index_general = self._get_option(ewp_dic['project']['configuration'][0]['settings'], 'General')
configuration = ewp_dic['project']['configuration'][0]
except KeyError:
index_general = self._get_option(ewp_dic['project']['configuration']['settings'], 'General')
configuration = ewp_dic['project']['configuration']
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'OGChipSelectEditMenu')
OGChipSelectEditMenu = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['OGChipSelectEditMenu']['state'].append(OGChipSelectEditMenu['state'].replace('\t', ' ', 1))
# we keep this as the internal version. FPU - version 1, FPU2 version 2.
# TODO:We shall look at IAR versioning to get this right
fileVersion = 1
try:
if self._get_option(configuration['settings'][index_general]['data']['option'], 'FPU2'):
fileVersion = 2
except TypeError:
pass
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GBECoreSlave')
GBECoreSlave = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GBECoreSlave'] = { 'state': [int(GBECoreSlave['state'])] }
if fileVersion == 2:
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GFPUCoreSlave2')
GFPUCoreSlave2 = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GFPUCoreSlave2'] = { 'state': [int(GFPUCoreSlave2['state'])] }
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'CoreVariant')
CoreVariant = configuration['settings'][index_general]['data']['option'][index_option]
# not all projects have CoreVariant filled in
try:
mcu['tool_specific']['iar']['CoreVariant'] = { 'state': [int(CoreVariant['state'])] }
except TypeError:
pass
else:
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GFPUCoreSlave')
GFPUCoreSlave = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GFPUCoreSlave'] = { 'state': [int(GFPUCoreSlave['state'])] }
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'Variant')
Variant = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['Variant'] = { 'state': [int(Variant['state'])] }
return mcu
class CoIDEdefinitions:
def _coproj_find_option(self, option_dic, key_to_find, value_to_match):
i = 0
for option in option_dic:
for k,v in option.items():
if k == key_to_find and value_to_match == v:
return i
i += 1
return None
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
project_file = join(getcwd(), project_file)
coproj_dic = xmltodict.parse(open(project_file, "rb"), dict_constructor=dict)
mcu = MCU_TEMPLATE
IROM1_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IROM1')
IROM2_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IROM2')
IRAM1_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IRAM1')
IRAM2_index = self._coproj_find_option(coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'], '@name', 'IRAM2')
defaultAlgorithm_index = self._coproj_find_option(coproj_dic['Project']['Target']['DebugOption']['Option'], '@name', 'org.coocox.codebugger.gdbjtag.core.defaultAlgorithm')
mcu['tool_specific'] = {
'coide' : {
'Device' : {
'manufacturerId' : [coproj_dic['Project']['Target']['Device']['@manufacturerId']],
'manufacturerName': [coproj_dic['Project']['Target']['Device']['@manufacturerName']],
'chipId': [coproj_dic['Project']['Target']['Device']['@chipId']],
'chipName': [coproj_dic['Project']['Target']['Device']['@chipName']],
},
'DebugOption': {
'defaultAlgorithm': [coproj_dic['Project']['Target']['DebugOption']['Option'][defaultAlgorithm_index]['@value']],
},
'MemoryAreas': {
'IROM1': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM1_index]['@type']],
},
'IRAM1': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM1_index]['@type']],
},
'IROM2': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IROM2_index]['@type']],
},
'IRAM2': {
'name': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@name']],
'size': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@size']],
'startValue': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@startValue']],
'type': [coproj_dic['Project']['Target']['BuildOption']['Link']['MemoryAreas']['Memory'][IRAM2_index]['@type']],
}
}
}
}
return mcu
|
|
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.shortcuts import render_to_response, get_object_or_404
from django.template import loader, Context, RequestContext
from django.template.loader import select_template
from django.http import (Http404, HttpResponseNotAllowed,
HttpResponseRedirect)
from django.core.urlresolvers import reverse
from newsletters.models import Newsletter, Advertisement, Subscription
from newsletters.settings import (DEFAULT_TEMPLATE, AUTO_CONFIRM, FROM_EMAIL,
EMAIL_NOTIFICATION_SUBJECT)
from newsletters.forms import NewsletterForm, get_newsletters_with_subs
from newsletters.jsonresponse import JSONResponse
from newsletters.signals import subscription, unsubscription
def is_json_request(request):
"""
Provide True/False for if the request wants the result in json
with format=json in the request
"""
return ('format' in request.REQUEST and
request.REQUEST['format'].lower() == 'json')
def sync_subscriptions(sub_form):
"""
Do all the work of (un)subscribing newsletters for an account
"""
old_subs = Subscription.objects.filter(email=sub_form.cleaned_data['email'])
old_subs_nl = [item.newsletter for item in old_subs]
new_subs = Newsletter.objects.filter(name__in=sub_form.get_subscriptions())
unsubs = [sub for sub in old_subs if sub.newsletter not in new_subs]
unsub_nl = [sub.newsletter for sub in unsubs]
subs = [nl for nl in new_subs if nl not in old_subs_nl]
for item in unsubs:
unsubscription.send(
sender=item.newsletter,
email=sub_form.cleaned_data['email'],
newsletter=item.newsletter)
item.delete()
for item in subs:
sub = Subscription.objects.create(
email=sub_form.cleaned_data['email'],
newsletter=item,
confirmed=AUTO_CONFIRM,
)
subscription.send(
sender=item,
email=sub_form.cleaned_data['email'],
newsletter=item,
)
send_notification(unsub_nl, subs, sub_form.cleaned_data['email'])
return unsub_nl, subs
def send_notification(unsub_newsletters, sub_newsletters, email):
"""
Send an email notifying the ``email`` recipient they have been
subscribed to the newsletters in sub_newsletters, and/or unsubscribed
from the newsletters in unsub_newsletters.
"""
current_site = Site.objects.get_current()
t = loader.get_template('newsletters/notification_email.txt')
c = Context({
'unsubscriptions': unsub_newsletters,
'subscriptions': sub_newsletters,
'site': current_site,
'email': email,
})
send_mail(EMAIL_NOTIFICATION_SUBJECT, t.render(c), FROM_EMAIL, [email],
fail_silently=False)
def detail(request, newsletter_slug):
"""
Provide a rendered HTML version of the newsletter
"""
newsletter = get_object_or_404(Newsletter, slug=newsletter_slug)
templates = [
'newsletters/%s.html' % newsletter_slug,
'newsletters/%s.html' % newsletter.category.slug,
DEFAULT_TEMPLATE
]
if newsletter.template:
templates.insert(newsletter.template, 0)
return render_to_response(templates, {
'newsletter': newsletter,
'category': newsletter.category,
'ads': Advertisement.objects.current_set(newsletter)
}, RequestContext(request))
def manage(request, email=None):
"""
Provide a way to manage all subscriptions for a user
"""
message = []
if request.method == 'GET' and email:
if is_json_request(request):
return JSONResponse({
'newsletters': get_newsletters_with_subs(email)})
form = NewsletterForm(initial={'email': email})
elif request.method == 'GET' and email is None:
return HttpResponseRedirect(
reverse('newsletters_list'))
elif request.method == 'POST':
form = NewsletterForm(request.POST)
if form.is_valid():
unsubs, subs = sync_subscriptions(form)
if unsubs:
message.append(
"Successfully unsubscribed from %s" %
', '.join(map(unicode, unsubs)))
if subs:
message.append(
"Successfully subscribed to %s" %
', '.join(map(unicode,subs)))
if is_json_request(request):
return JSONResponse({
'newsletters': get_newsletters_with_subs(email),
'messages': message})
return render_to_response('newsletters/manage.html', {
'newsletters': Newsletter.objects.all(),
'form': form,
'messages': message
}, RequestContext(request))
def newsletter_list(request):
"""
Return a list of newsletters
"""
if request.method == 'GET' and 'u' in request.GET:
return HttpResponseRedirect(
reverse('newsletters_manage', kwargs={'email': request.GET['u']}))
if is_json_request(request):
if 'category__id' in request.REQUEST:
cat = request.REQUEST['category__id']
newsletters = Newsletter.objects.filter(category__id=cat).values()
elif 'category__slug' in request.REQUEST:
cat = request.REQUEST['category__slug']
newsletters = Newsletter.objects.filter(category__slug=cat).values()
else:
newsletters = Newsletter.objects.values()
return JSONResponse(
{'newsletters': newsletters,
'signup_url': reverse('newsletters_bulk_subscribe')})
return render_to_response('newsletters/list.html', {
'newsletters': Newsletter.objects.all(),
'form': NewsletterForm(),
}, RequestContext(request))
def is_subscribed(request, newsletter_slug):
"""
Is the user subscribed to the newsletter. Requires GET params of
email
"""
if 'email' in request.REQUEST and 'id' in request.REQUEST:
try:
Subscription.objects.get(
email=request.REQUEST['email'],
newsletter__slug=newsletter_slug)
return JSONResponse({'is_subscribed': True})
except Subscription.ObjectDoesNotExist:
return JSONResponse({'is_subscribed': False})
def bulk_subscribe(request):
"""
Subscribe to many newsletters at once. Only for use with JSON and POST
"""
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
try:
if request.method != 'POST':
return JSONResponse({
'success': False,
'message': 'Form must be submitted using POST.'})
values = dict(request.POST.items())
if 'csrfmiddlewaretoken' in values:
del values['csrfmiddlewaretoken']
try:
validate_email(values['email'])
email = values['email']
newsletters = [key for key in values.keys() if key != 'email']
if not newsletters:
return JSONResponse({
'success': False,
'message': "Please select at least one newsletter."})
nletters = Newsletter.objects.filter(slug__in=newsletters)
for newsletter in nletters:
try:
sub = Subscription.objects.get(email=email,
newsletter=newsletter)
except Subscription.DoesNotExist:
# The user wasn't subscribed, so we'll create it.
sub = Subscription.objects.create(email=email,
newsletter=newsletter)
subscription.send(
sender=newsletter,
email=email,
newsletter=newsletter,
)
send_notification([], nletters, email)
return JSONResponse({
'success': True,
'message': 'You signed up for ' + ", ".join([x.name for x in nletters])})
except ValidationError, e:
return JSONResponse({
'success': False,
'message': " ".join(e.messages)})
except Exception, e:
return JSONResponse({
'success': False,
'message': "We're sorry but a strange error occurred. (%s)" % str(e)})
def subscribe(request, newsletter_slug):
"""
Subscribe a user to the newsletter
Requires a POST with an email
"""
newsletter = get_object_or_404(Newsletter, slug=newsletter_slug)
if request.method != 'POST':
if is_json_request:
return JSONResponse({
'success': False,
'message': 'Only POST methods are allowed.'})
return HttpResponseNotAllowed(['POST',])
if 'email' not in request.POST:
if is_json_request:
return JSONResponse({
'success': False,
'message': 'No email field was included.'})
raise Http404("email not included in POST")
try:
sub = Subscription.objects.get(email=request.POST['email'],
newsletter=newsletter)
except Subscription.DoesNotExist:
# The user wasn't subscribed, so we'll create it.
sub = Subscription.objects.create(email=request.POST['email'],
newsletter=newsletter)
send_notification([], [newsletter], request.POST['email'])
subscription.send(sender=newsletter, email=request.POST['email'],
newsletter=newsletter)
if is_json_request(request):
return JSONResponse({'success': True, 'message': ''})
return render_to_response('newsletters/subscribe.html', {
'newsletter': newsletter
}, RequestContext(request))
def unsubscribe(request, newsletter_slug):
"""
Unsubscribe a user from the newsletter.
Requires a POST with an email
"""
newsletter = get_object_or_404(Newsletter, slug=newsletter_slug)
if request.method != 'POST':
if is_json_request:
return JSONResponse({
'success': False,
'message': 'Only POST methods are allowed.'})
return HttpResponseNotAllowed(['POST',])
if 'email' not in request.POST:
if is_json_request:
return JSONResponse({
'success': False,
'message': 'No email field was included.'})
raise Http404("email not included in POST")
try:
sub = Subscription.objects.get(email=request.POST['email'],
newsletter=newsletter)
sub.delete()
unsubscription.send(sender=newsletter, email=request.POST['email'],
newsletter=newsletter)
send_notification([newsletter], [], request.POST['email'])
except Subscription.DoesNotExist:
pass # The user wasn't subscribed, so just fail gracefully.
if is_json_request(request):
return JSONResponse({'success': True, 'message': ''})
return render_to_response('newsletters/unsubscribe.html', {
'newsletter': newsletter
}, RequestContext(request))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
import unittest
from quikql import *
from sqlite3 import IntegrityError
class QuikqlTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.path = os.getcwd() + '/radio.db'
remove_db(self.path)
self.testdb = Quikql('radio.db')
self.testdb.create_table('artists', artists_schema, pkey=('artist',))
self.testdb.create_table('music', music_schema,
fkey={'artist':('artists', 'artist')})
with open('recordings.txt') as f:
raw_json_str = f.read()
self.json_data = json.loads(raw_json_str)
for artist in self.json_data['artists']:
self.testdb.insert_row('artists', {'artist':artist})
for title in self.json_data['artists'][artist]['titles']:
self.testdb.insert_row('music', {'artist':artist,
'track':title})
@classmethod
def tearDownClass(self):
remove_db(self.path)
def test_create_table(self):
tables_before = [i[0] for i in self.testdb.get_tables()]
self.assertNotIn(test_table, tables_before)
self.testdb.create_table(test_table, test_schema)
tables_after = [i[0] for i in self.testdb.get_tables()]
self.assertIn(test_table, tables_after)
self.testdb.delete_table(test_table)
def test_delete_table(self):
self.testdb.create_table(test_table, test_schema)
tables_before = [i[0] for i in self.testdb.get_tables()]
self.assertIn(test_table, tables_before)
self.testdb.delete_table(test_table)
tables_after = [i[0] for i in self.testdb.get_tables()]
self.assertNotIn(test_table, tables_after)
def test_get_schema(self):
test_artists_schema = self.testdb.get_schema('artists')
test_music_schema = self.testdb.get_schema('music')
artists_schema_map = {i[1]:i[2] for i in test_artists_schema}
music_schema_map = {i[1]:i[2] for i in test_music_schema}
self.assertEqual(artists_schema, artists_schema_map)
self.assertEqual(music_schema, music_schema_map)
def test_get_tables(self):
test_tables = self.testdb.get_tables()
self.assertSequenceEqual(['artists', 'music'],
[t[0] for t in test_tables])
def test_insert_row(self):
artists_row = {'artist':'Lifetones'}
get_field = ('Lifetones',)
get_before = self.testdb.get_row('artists', artists_row)
self.assertIsNone(get_before)
self.testdb.insert_row('artists', artists_row)
get_after = self.testdb.get_row('artists', artists_row)
self.assertEqual(get_field, get_after)
self.testdb.delete_row('artists', artists_row)
def test_insert_rows(self):
artists_rows = [{'artist':'Leon Vynehall'},
{'artist':'JaimeXX'},
{'artist':'Caribou'},
{'artist':'Dusky'}]
for artist in artists_rows:
get_before = self.testdb.get_row('artists', artist)
self.assertIsNone(get_before)
self.testdb.insert_rows('artists', *artists_rows)
for artist in artists_rows:
get_after = self.testdb.get_row('artists', artist)
self.assertEqual(artist['artist'], *get_after)
self.testdb.delete_row('artists', artist)
def test_get_row(self):
artist_row = {'artist':'Lifetones'}
self.testdb.insert_row('artists', artist_row)
test_get = self.testdb.get_row('artists', artist_row, size=ALL)
self.assertEqual(('Lifetones',), test_get[0])
self.testdb.delete_row('artists', artist_row)
def test_delete_row(self):
row = {'artist':'Neal Howard'}
self.testdb.insert_row('artists', row)
row_before = self.testdb.get_row('artists', row)
self.assertIsNotNone(row_before)
self.testdb.delete_row('artists', row)
row_after = self.testdb.get_row('artists', row)
self.assertIsNone(row_after)
def test_delete_row_InvalidRow(self):
del_row = {'artist':'The Doors'}
table_before_delete = self.testdb.dump_table('artists')
self.testdb.delete_row('artists', del_row)
table_after_delete = self.testdb.dump_table('artists')
self.assertEqual(table_before_delete, table_after_delete)
def test_count_field(self):
artist_count = len(self.json_data['artists'])
self.assertEqual(artist_count, self.testdb.count('artists', 'artist')[0])
def test_count_InvalidArg(self):
self.assertRaises(InvalidArg, self.testdb.count,
'artists', ['field1', 'field2'])
def test_minimum_field(self):
min_artist = 'aphex twin'
self.testdb.insert_row('artists', {'artist':min_artist})
self.assertEqual(min_artist, self.testdb.min('artists', 'artist')[0])
self.testdb.delete_row('artists', {'artist':min_artist})
def test_minimum_InvalidArg(self):
self.assertRaises(InvalidArg, self.testdb.min, 'artists', ['field1',
'field2'])
def test_maximum_field(self):
max_artist = 'zz top'
self.testdb.insert_row('artists', {'artist':max_artist})
self.assertEqual(max_artist, self.testdb.max('artists', 'artist')[0])
self.testdb.delete_row('artists', {'artist':max_artist})
def test_maximum_InvalidArg(self):
self.assertRaises(InvalidArg, self.testdb.max, 'artists', ['field1',
'field2'])
def test_sum(self):
duration_updates = [({'duration':4.02}, {'track':'Blue Moon'}),
({'duration':8.12}, {'track':'Player'}),
({'duration':2.08}, {'track':'Nettle Leaves'})]
test_total = sum([i[0]['duration'] for i in duration_updates])
for update in duration_updates:
self.testdb.update_row('music', update[0], row=update[1])
self.assertEqual(test_total, self.testdb.sum('music', 'duration')[0])
def test_sum_InvalidArg(self):
self.assertRaises(InvalidArg, self.testdb.sum, 'artists', ['field1',
'field2'])
def test_retrieve_table_content(self):
artists = [entry for entry in self.json_data['artists']]
table_artists = [i[0] for i in self.testdb.dump_table('artists')]
self.assertEqual(artists, table_artists)
def test_update_row(self):
update_row = {'artist':'deadmau5', 'track':'Fallen'}
update_column = {'duration':2.31}
before_duration = self.testdb.get_row('music', update_row)
self.assertNotIn(2.31, before_duration)
self.testdb.update_row('music', update_column, update_row)
after_duration = self.testdb.get_row('music', update_row)
self.assertIn(2.31, after_duration)
def test_update_row_InvalidRow(self):
invalid_update_row = {'artist':'Led Zeppelin'}
update_column = {'track':'Misty Mountain Top'}
self.testdb.update_row('music', update_column, invalid_update_row)
get_row = self.testdb.get_row('music', {'artist':'Led Zeppelin',
'track':'Misty Mountain Top'})
self.assertIsNone(get_row)
def test_insert_row_InvalidArg(self):
invalid_insert_row = [('artist', 'Diplo')]
self.assertRaises(InvalidArg, self.testdb.insert_row,
'artists', invalid_insert_row)
def test_delete_row_InvalidArg(self):
invalid_delete_row = [('artist', 'Frank Sinatra')]
self.assertRaises(InvalidArg, self.testdb.delete_row,
'artists', invalid_delete_row)
def test_get_row_InvalidArg(self):
invalid_get_row = [('artist', 'Franz Schubert')]
self.assertRaises(InvalidArg, self.testdb.get_row,
'artists', invalid_get_row)
def test_create_table_InvalidSQLType(self):
table = 'Foo'
schema = {'Bar':'Baz'}
self.assertRaises(InvalidSQLType, self.testdb.create_table,
table, schema)
def test_insert_row_InvalidForeignKey(self):
invalid_foreignkey = {'artist':'foo', 'track':'bar', 'album':'baz'}
self.assertRaises(IntegrityError, self.testdb.insert_row,
'music', invalid_foreignkey)
def remove_db(path):
if os.path.isfile(path):
os.unlink(path)
if __name__ == '__main__':
artists_schema = {'artist':TEXT}
music_schema = {'track':TEXT, 'duration':REAL,
'album':TEXT, 'artist':TEXT}
test_table = 'Open_Source_Software'
test_schema = {'name':TEXT, 'language':TEXT, 'loc':INTEGER}
unittest.main(verbosity=3)
|
|
from __future__ import annotations
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block_2d,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
from pandas.core.internals.blocks import Block
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
# Assertions disabled for performance
# for tup in mgrs_indexers:
# # caller is responsible for ensuring this
# indexers = tup[1]
# assert concat_axis not in indexers
if concat_axis == 0:
return _concat_managers_axis0(mgrs_indexers, axes, copy)
mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
# Assertion disabled for performance
# assert all(not x[1] for x in mgrs_indexers)
concat_plans = [_get_mgr_concatenation_plan(mgr) for mgr, _ in mgrs_indexers]
concat_plan = _combine_concat_plans(concat_plans)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
if len(join_units) == 1:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, ndim=2)
values = ensure_wrapped_if_datetimelike(values)
fastpath = blk.values.dtype == values.dtype
else:
values = _concatenate_join_units(join_units, copy=copy)
fastpath = False
if fastpath:
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block_2d(values, placement=placement)
blocks.append(b)
return BlockManager(tuple(blocks), axes)
def _concat_managers_axis0(
mgrs_indexers, axes: list[Index], copy: bool
) -> BlockManager:
"""
concat_managers specialized to concat_axis=0, with reindexing already
having been done in _maybe_reindex_columns_na_proxy.
"""
had_reindexers = {
i: len(mgrs_indexers[i][1]) > 0 for i in range(len(mgrs_indexers))
}
mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
mgrs = [x[0] for x in mgrs_indexers]
offset = 0
blocks = []
for i, mgr in enumerate(mgrs):
# If we already reindexed, then we definitely don't need another copy
made_copy = had_reindexers[i]
for blk in mgr.blocks:
if made_copy:
nb = blk.copy(deep=False)
elif copy:
nb = blk.copy()
else:
# by slicing instead of copy(deep=False), we get a new array
# object, see test_concat_copy
nb = blk.getitem_block(slice(None))
nb._mgr_locs = nb._mgr_locs.add(offset)
blocks.append(nb)
offset += len(mgr.items)
return BlockManager(tuple(blocks), axes)
def _maybe_reindex_columns_na_proxy(
axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]]
) -> list[tuple[BlockManager, dict[int, np.ndarray]]]:
"""
Reindex along columns so that all of the BlockManagers being concatenated
have matching columns.
Columns added in this reindexing have dtype=np.void, indicating they
should be ignored when choosing a column's final dtype.
"""
new_mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]] = []
for mgr, indexers in mgrs_indexers:
# For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
# is a cheap reindexing.
for i, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[i],
indexers[i],
axis=i,
copy=False,
only_slice=True, # only relevant for i==0
allow_dups=True,
use_na_proxy=True, # only relevant for i==0
)
new_mgrs_indexers.append((mgr, {}))
return new_mgrs_indexers
def _get_mgr_concatenation_plan(mgr: BlockManager):
"""
Construct concatenation plan for given block manager.
Parameters
----------
mgr : BlockManager
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
mgr_shape = tuple(mgr_shape_list)
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape))]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
assert blkno != -1
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
if not unit_no_ax0_reindexing:
# create block from subset of columns
blk = blk.getitem_block(ax0_blk_indexer)
# Assertions disabled for performance
# assert blk._mgr_locs.as_slice == placements.as_slice
# assert blk.shape[0] == shape[0]
unit = JoinUnit(blk, shape)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block: Block, shape: Shape):
# Passing shape explicitly is required for cases when block is None.
self.block = block
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)})"
@cache_readonly
def is_na(self) -> bool:
blk = self.block
if blk.dtype.kind == "V":
return True
return False
def get_reindexed_values(self, empty_dtype: DtypeObj) -> ArrayLike:
values: ArrayLike
if self.is_na:
return make_na_array(empty_dtype, self.shape)
else:
if not self.block._can_consolidate:
# preserve these for validation in concat_compat
return self.block.values
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.values
return values
def make_na_array(dtype: DtypeObj, shape: Shape) -> ArrayLike:
"""
Construct an np.ndarray or ExtensionArray of the given dtype and shape
holding all-NA values.
"""
if is_datetime64tz_dtype(dtype):
# NaT here is analogous to dtype.na_value below
i8values = np.full(shape, NaT.value)
return DatetimeArray(i8values, dtype=dtype)
elif is_1d_only_ea_dtype(dtype):
dtype = cast(ExtensionDtype, dtype)
cls = dtype.construct_array_type()
missing_arr = cls._from_sequence([], dtype=dtype)
nrows = shape[-1]
taker = -1 * np.ones((nrows,), dtype=np.intp)
return missing_arr.take(taker, allow_fill=True, fill_value=dtype.na_value)
elif isinstance(dtype, ExtensionDtype):
# TODO: no tests get here, a handful would if we disabled
# the dt64tz special-case above (which is faster)
cls = dtype.construct_array_type()
missing_arr = cls._empty(shape=shape, dtype=dtype)
missing_arr[:] = dtype.na_value
return missing_arr
else:
# NB: we should never get here with dtype integer or bool;
# if we did, the missing_arr.fill would cast to gibberish
missing_arr = np.empty(shape, dtype=dtype)
fill_value = _dtype_to_na_value(dtype)
missing_arr.fill(fill_value)
return missing_arr
def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
"""
Concatenate values from several join units along axis=1.
"""
empty_dtype = _get_empty_dtype(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype) for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
elif any(is_1d_only_ea_obj(t) for t in to_concat):
# TODO(EA2D): special case not needed if all EAs used HybridBlocks
# NB: we are still assuming here that Hybrid blocks have shape (1, N)
# concatting with at least one EA means we are concatting a single column
# the non-EA values are 2D arrays with shape (1, n)
# error: No overload variant of "__getitem__" of "ExtensionArray" matches
# argument type "Tuple[int, slice]"
to_concat = [
t if is_1d_only_ea_obj(t) else t[0, :] # type: ignore[call-overload]
for t in to_concat
]
concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
concat_values = ensure_block_shape(concat_values, 2)
else:
concat_values = concat_compat(to_concat, axis=1)
return concat_values
def _dtype_to_na_value(dtype: DtypeObj):
"""
Find the NA value to go with this dtype.
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif dtype.kind in ["m", "M"]:
return dtype.type("NaT")
elif dtype.kind in ["f", "c"]:
return dtype.type("NaN")
elif dtype.kind == "b":
# different from missing.na_value_for_dtype
return None
elif dtype.kind in ["i", "u"]:
return np.nan
elif dtype.kind == "O":
return np.nan
raise NotImplementedError
def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
"""
if len(join_units) == 1:
blk = join_units[0].block
return blk.dtype
if _is_uniform_reindex(join_units):
# FIXME: integrate property
empty_dtype = join_units[0].block.dtype
return empty_dtype
needs_can_hold_na = any(unit.is_na for unit in join_units)
dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
dtype = find_common_type(dtypes)
if needs_can_hold_na:
dtype = ensure_dtype_can_hold_na(dtype)
return dtype
def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
_concatenate_join_units (which uses `concat_compat`).
"""
first = join_units[0].block
if first.dtype.kind == "V":
return False
return (
# exclude cases where a) ju.block is None or b) we have e.g. Int64+int64
all(type(ju.block) is type(first) for ju in join_units)
and
# e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform
all(
is_dtype_equal(ju.block.dtype, first.dtype)
# GH#42092 we only want the dtype_equal check for non-numeric blocks
# (for now, may change but that would need a deprecation)
or ju.block.dtype.kind in ["b", "i", "u"]
for ju in join_units
)
and
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
# only use this path when there is something to concatenate
len(join_units) > 1
)
def _is_uniform_reindex(join_units) -> bool:
return (
# TODO: should this be ju.block._can_hold_na?
all(ju.block.is_extension for ju in join_units)
and len({ju.block.dtype.name for ju in join_units}) == 1
)
def _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, shape=extra_shape)
def _combine_concat_plans(plans):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
else:
# singleton list so we can modify it as a side-effect within _next_or_none
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# _trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
|
|
#! /usr/bin/python
import sys, os, timeit, pprint
moddir = os.path.join( os.path.dirname( __file__ ), '..' )
sys.path = [moddir] + sys.path
import pytest
from dynconfig.read import *
from utils import *
def test_simple():
data = '''
var1 : 1
var2 : some string
var3 : 3
var4 : "$({var3} + m.pi + 2)"
var5 : "$({var4} + 2.0)"
nest1 : &nest
var1 : 11
var2 : "$({var3} + 12)"
var3 : "$({var1} + 12)"
var4 : "$({var3} + 12)"
var5 : "$({../nest1/var3} + 12)"
list1 :
- 01
- "$({0})"
- 03
nest2 :
var1 : 111
var2 : 112
var3 : "$({var1})"
var4 : "$({/var1})"
var5 : "$({/nest1/var1})"
'''
data = readConfig( data )
assert data['var1'] == 1
assert data['var2'] == 'some string'
assert data['var3'] == 3
assert data['var4'] == Approx(3 + 3.14159 + 2 )
assert data['var5'] == Approx(3 + 3.14159 + 2 + 2.0 )
assert data['nest1']['var1'] == 11
assert data['nest1']['var2'] == 11 + 12 + 12
assert data['nest1']['var3'] == 11 + 12
assert data['nest1']['var4'] == 11 + 12 + 12
assert data['nest1']['var5'] == 11 + 12 + 12
assert data['nest1']['list1'][0] == 1
assert data['nest1']['list1'][1] == 1
assert data['nest1']['list1'][2] == 3
assert data['nest1']['nest2']['var1'] == 111
assert data['nest1']['nest2']['var2'] == 112
assert data['nest1']['nest2']['var3'] == 111
assert data['nest1']['nest2']['var4'] == 1
assert data['nest1']['nest2']['var5'] == 11
def test_large():
data = '''
var1 : 1
var2 : some string
var3 : 3
var4 : '$({var3} + m.pi + 2)'
var5 : '$({var4} + 2.0)'
nest1 : &nest
var1 : 11
var2 : '$({var3} + 12)'
var3 : '$({var1} + 12)'
var4 : '$({var3} + 12)'
var5 : '$({../nest1/var3} + 12)'
nest2 :
var1 : 111
var2 : 112
var3 : '$({var1})'
var4 : '$({/var1})'
var5 : '$({/nest1/var1})'
nest2 :
<< : *nest
nest3 :
<< : *nest
nest4 :
<< : *nest
nest5 :
<< : *nest
nest6 :
<< : *nest
nest7 :
<< : *nest
nest8 :
<< : *nest
nest9 :
<< : *nest
nest10 :
<< : *nest
nest10 :
<< : *nest
nest11 :
<< : *nest
nest12 :
<< : *nest
nest13 :
<< : *nest
nest14 :
<< : *nest
nest15 :
<< : *nest
'''
data = readConfig( data )
assert data['var1'] == 1
assert data['var2'] == 'some string'
assert data['var3'] == 3
assert data['var4'] == Approx( 3 + 3.14159 + 2 )
assert data['var5'] == Approx( 3 + 3.14159 + 2 + 2.0 )
assert data['nest10']['var1'] == 11
assert data['nest10']['var2'] == 11 + 12 + 12
assert data['nest10']['var3'] == 11 + 12
assert data['nest10']['var4'] == 11 + 12 + 12
assert data['nest10']['var5'] == 11 + 12 + 12
assert data['nest10']['nest2']['var1'] == 111
assert data['nest10']['nest2']['var2'] == 112
assert data['nest10']['nest2']['var3'] == 111
assert data['nest10']['nest2']['var4'] == 1
assert data['nest10']['nest2']['var5'] == 11
assert data['nest15']['nest2']['var5'] == 11
@pytest.mark.skip(reason="need to re-implement include function.")
def test_includes():
nesteddata = { 'one' : 1, 'two' : 2 }
data = r'''
var1 : 1
var2 : some string
nest1 : include('example.yaml')
nest2 : include('example.yaml')
'''
with open('example.yaml','w') as f:
f.write(yaml.dump(nesteddata))
data = readConfig( data )
assert data['nest1']['one'] == 1
assert data['nest1']['two'] == 2
assert data['nest2']['one'] == 1
assert data['nest2']['two'] == 2
@pytest.mark.skip(reason="need to re-implement datatable function.")
def test_datatable():
with open('example-data.txt', 'w') as f:
f.write('''
# units: cm 1/cm
1.0 4
1.1 3
1.2 2
''')
data = r'''
var1 : 1
var2 : some string
data : DataTable('example-data.txt')
'''
data = readConfig( data, render_filters=[toNum] )
assert data['data'](0,0) == 1.0
assert data['data'](0,1) == 4
assert data['data'](1,0) == 1.1
assert data['data'](1,1) == 3
assert data['data'](2,0) == 1.2
assert data['data'](2,1) == 2
def test_passthrough():
'''test that a config file containing no template expressions works'''
data = '''
# heat solver config file
grid:
x:
min : 0
max : 10
N : 100
y:
min : 0
max : 20
N : 200
time:
start : 0
stop : 10
dt : 0.001
'''
data = readConfig( data )
assert data['grid']['x']['min'] == 0
assert data['grid']['x']['max'] == 10
assert data['grid']['x']['N'] == 100
assert data['grid']['y']['min'] == 0
assert data['grid']['y']['max'] == 20
assert data['grid']['y']['N'] == 200
assert data['time']['start'] == 0
assert data['time']['stop'] == 10
assert data['time']['dt'] == 0.001
def test_physicsy():
'''test a config typical of physics simulations'''
data = '''
# heat solver config file
res: 0.001
grid:
x:
min : 0
max : 10
N : $( ({max} - {min})/{/res} | int )
y:
min : 0
max : $(2*{../x/max})
N : $( ({max} - {min})/{/res} | int )
z:
min : 0
max : $(2*{../y/max})
N : $( ({max} - {min})/{/res} | int )
time:
start : 0
stop : $(m.pow(10,2))
dt : 0.001
'''
data = readConfig( data, return_pdict=True )
assert data['/grid/x/min'] == 0
assert data['/grid/x/max'] == 10
assert data['/grid/x/N'] == 10000
assert data['/grid/y/min'] == 0
assert data['/grid/y/max'] == 20
assert data['/grid/y/N'] == 20000
assert data['/grid/z/min'] == 0
assert data['/grid/z/max'] == 40
assert data['/grid/z/N'] == 40000
assert data['/time/start'] == 0
assert data['/time/stop'] == 100
assert data['/time/dt'] == 0.001
@pytest.mark.skip(reason="need to port to new render function.")
def test_datatable2():
with open('abscoe-data.txt', 'w') as f:
f.write('''
# units: nm 1/cm
400 100
450 200
500 300
550 400
600 500
650 600
700 700
750 800
''')
data = '''
{{py:
import math
}}
res: 0.001
wavelength : 500 nm
grid:
x:
min : 0
max : 10
N : '{{ (c["max",int] - c["min",int])/c["/res"] }}'
time:
start : 0
stop : {{math.pow(10,2)}}
dt : 0.001
materials :
- desc : 'absorbing material'
abscoe_data : DataTable('abscoe-data.txt')
abscoe :
- "{{ c['../abscoe_data'].rowstr( c['/wavelength'], 1, '1/cm' ) }}"
'''
data = readConfig( data, return_DataTree=True )
# pprint.pprint(data.data)
assert data['/materials/0/abscoe/0'] == "500 300.0"
|
|
# coding: utf-8
'''
------------------------------------------------------------------------------
Copyright 2015 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
==================================================
UnitTestUtiliites.py
--------------------------------------------------
requirments:
* ArcGIS Desktop 10.X+ or ArcGIS Pro 1.X+
* Python 2.7 or Python 3.4
author: ArcGIS Solutions
company: Esri
==================================================
description:
Basic methods used in unit tests.
==================================================
history:
5/10/2016 - JH - initial creation
==================================================
'''
import datetime
import logging
import glob
import os
import platform
import traceback
import sys
import arcpy
import Configuration
def getLoggerName():
''' get unique log file name '''
if Configuration.DEBUG == True:
print("UnitTestUtilities - getLoggerName")
seq = 0
name = nameFromDate(seq)
#add +=1 to seq until name doesn't exist as a path
while os.path.exists(os.path.join(Configuration.logPath, name)):
seq += 1
name = nameFromDate(seq)
#logFilePath = os.path.join(Configuration.logPath, name)
return name
def getCurrentDateTimeForLogFile():
''' Get current date/time string as: YYYY-MM-DD_HH-MM-SS '''
return datetime.datetime.now().strftime("%Y-%B-%d_%H-%M-%S")
def getCurrentDateTime():
''' Get current date/time string as: DD/MM/YYYY HH:MM:SS '''
return datetime.datetime.now().strftime("%d/%B/%Y %H:%M:%S")
def nameFromDate(seq):
''' Make log file name'''
return 'MTGT_' + str(getCurrentDateTimeForLogFile()) + '_seq' + str(seq) + '.log'
def makeFileFromPath(filePath):
''' make a file object from a path to that
file if it doesn't already exist '''
if not checkExists(filePath):
try:
fd = open(filePath, 'a')
fd.close()
except:
print("Can't make file for some reason.")
return filePath
def makeFolderFromPath(folderPath):
''' make a folder(s) from a path if it doesn't
already exist '''
if not checkExists(folderPath):
try:
os.makedirs(folderPath)
except:
print("Can't make the folder for some reason.")
return folderPath
def initializeLogger(name, logLevel = logging.DEBUG):
''' get and return named logger '''
if Configuration.DEBUG == True:
print("UnitTestUtilities - initializeLogger")
# Check if the path to the log files exists, and create if not
if not os.path.exists(Configuration.logPath):
dummy = makeFolderFromPath(Configuration.logPath)
# get a unique log file name if we don't have a name already
if name == None or name == "":
name = getLoggerName()
logFile = os.path.join(Configuration.logPath, name)
Configuration.LoggerFile = logFile
# if the log file does NOT exist, create it
if not os.path.exists(logFile):
logFile = makeFileFromPath(logFile)
logger = logging.getLogger(name)
logger.setLevel(logLevel)
logFormatter = logging.Formatter('%(levelname)s: %(asctime)s %(message)s')
fileHandler = logging.FileHandler(logFile)
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
return logger
def setUpLogFileHeader():
''' Add a header to log file when initialized '''
Configuration.Logger.debug("UnitTestUtilities - setUpLogFileHeader")
Configuration.Logger.info("------------ Begin Tests ------------------")
Configuration.Logger.info("Platform: {0}".format(platform.platform()))
Configuration.Logger.info("Host Name: " + platform.node())
Configuration.Logger.info("Python Version {0}".format(sys.version))
agsInstallInfo = arcpy.GetInstallInfo()
Configuration.Logger.info("Product: {0}, Version: {1}, Installed on: {2}, Build: {3}.".format(agsInstallInfo['ProductName'], \
agsInstallInfo['Version'], agsInstallInfo['InstallDate'], agsInstallInfo['BuildNumber']))
Configuration.Logger.info("-------------------------------------------")
def checkArcPy():
''' sanity check that ArcPy is working '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkArcPy")
arcpy.AddMessage("ArcPy works")
def checkExists(p):
''' Python check for existence '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkExists")
return os.path.exists(p)
def createScratch(scratchPath):
''' create scratch geodatabase '''
if Configuration.DEBUG == True: print("UnitTestUtilities - createScratch")
scratchName = 'scratch.gdb'
scratchGDB = os.path.join(scratchPath, scratchName)
if arcpy.Exists(scratchGDB):
print("Scratch already exists")
return scratchGDB
try:
if Configuration.DEBUG == True: print("Creating scratch geodatabase...")
arcpy.CreateFileGDB_management(scratchPath, scratchName)
if Configuration.DEBUG == True: print("Created scratch gdb.")
except:
print("Problem creating scratch.gdb")
return scratchGDB
def deleteScratch(scratchPath):
''' delete scratch geodatabase '''
if Configuration.DEBUG == True: print("UnitTestUtilities - deleteScratch")
# WORKAROUND: To problem encountered running arcpy from the unit test harness
# in Pro where Delete_management deletes everything in the scratch.gdb folder
# *except* the lock files and the scratch.gdb folder remains
# corrupting the scratch.gdb
findLocksPattern = os.path.normpath(os.path.join(scratchPath, '*.lock'))
lockFileList = glob.glob(findLocksPattern)
if (len(lockFileList) > 0):
print("*** WARNING: scratch.gdb contains lock files, skipping delete ***")
return
try:
arcpy.Delete_management(scratchPath)
if Configuration.DEBUG == True: print("Deleted scratch gdb.")
except:
print("scratch.gdb delete failed")
return
def checkFilePaths(paths):
''' check file/folder paths exist '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkFilePaths")
for path2check in paths:
if os.path.exists(path2check):
if Configuration.DEBUG == True: print("Valid Path: " + path2check)
else:
raise Exception('Bad Path: ' + str(path2check))
def checkGeoObjects(objects):
''' check geospatial stuff exists '''
if Configuration.DEBUG == True: print("UnitTestUtilities - checkGeoObjects")
for object2Check in objects:
if not arcpy.Exists(object2Check):
# Might also just be normal file that describe doesn't recognize,
# so check the file/path case also
if not os.path.exists(str(object2Check)) :
print("--> Invalid Object: " + str(object2Check))
arcpy.AddError('Bad Input: ' + str(object2Check))
raise Exception('Bad Input' + str(object2Check))
else:
if Configuration.DEBUG == True: print("Valid Object: " + object2Check)
def handleArcPyError():
''' Basic GP error handling, errors printed to console and logger '''
if Configuration.DEBUG == True: print("UnitTestUtilities - handleArcPyError")
# Get the arcpy error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
print(msgs)
Configuration.Logger.error(msgs)
# Raise exception so test will fail
raise Exception('ArcPy Error')
def handleGeneralError(exception = None):
''' Basic error handler, errors printed to console and logger '''
if Configuration.DEBUG == True: print("UnitTestUtilities - handleGeneralError")
if isinstance(exception, Exception):
print(str(exception))
Configuration.Logger.error(str(exception))
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Print Python error messages for use in Python / Python Window
print(pymsg + "\n")
Configuration.Logger.error(pymsg)
print(msgs)
Configuration.Logger.error(msgs)
if isinstance(exception, Exception):
raise exception
else:
raise Exception('General Error')
def geoObjectsExist(objects):
''' Return true if all of the input list of geo-objects exist, false otherwise '''
allExist = True
for obj in objects:
if not arcpy.Exists(obj):
allExist = False
return allExist
def folderPathsExist(paths):
''' Return true if all input paths exist, false otherwise '''
allExist = True
for p in paths:
if not os.path.exists(p):
allExist = False
return allExist
def deleteIfExists(dataset):
''' Delete the input dataset if it exists '''
if (arcpy.Exists(dataset)):
arcpy.Delete_management(dataset)
arcpy.AddMessage("deleted dataset: " + dataset)
|
|
#!/usr/bin/env python
"""
Job manager utility.
This does not run the jobs but simply manages the records in the PlaceJob
table.
This is easier to managing jobs using SQL. Though this could been done in a
simpler way using a SQL UI or even a YAML config. Also, once I set this up I
didn't need that much flexibility, so the inserting of configured data and
adding a few records with the SINGLE options are the most import. The rest
doesn't have to exist in this tool - but this tool exists and works now so use
it.
Usage: $ ./job.py --help
# Or use functions of module in python console.
$ python
>>> from utils.manager import jobs
>>> jobs.insertPlaceByName('United Kingdom')
"""
import os
import sys
from sqlobject.dberrors import DuplicateEntryError
# Allow imports to be done when executing this file directly.
sys.path.insert(
0,
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
),
)
from lib import database as db, jobs
from lib.config import AppConf
conf = AppConf()
def getCounts():
"""
Print count stats for the PlaceJob table.
"""
print("PlaceJob stats")
print()
total = db.PlaceJob.select()
enabled = db.PlaceJob.selectBy(enabled=True)
queued = enabled.filter(jobs.orCondition())
print("total: {0:,d}".format(total.count()))
print(" * enabled: {0:,d}".format(enabled.count()))
print(" * queued to run: {0:,d}".format(queued.count()))
print(" * not queued to run: {0:,d}".format(enabled.count() - queued.count()))
print(" * disabled: {0:,d}".format(total.count() - enabled.count()))
print()
def getRecords():
"""
Print all records in the PlaceJob table.
:return: None
"""
print("PlaceJob records")
print("Ordered by enabled first then oldest completed and oldest" " attempted.")
print()
template = "{0:>7} | {1:20} | {2:^8} | {3:^17} | {4:^17} | {5:^10} | {6:^7}"
print(
template.format(
"Job ID",
"Place Name",
"Status",
"Attempted",
"Completed",
"Created",
"Enabled",
)
)
for x in db.PlaceJob.select():
data = (
x.id,
x.place.name,
x.getStatus(asText=True),
x.lastAttempted.strftime("%x %X") if x.lastAttempted else "-" * 17,
x.lastCompleted.strftime("%x %X") if x.lastCompleted else "-" * 17,
str(x.created.date()),
"Y" if x.enabled else "N",
)
print(template.format(*data))
print()
def resetTimes(jobID=None):
"""
Reset the times for one PlaceJob record.
Set the last completed and last attempted times to None. This is useful
in testing in order to reset a job which may have just run.
:param jobID: Database record ID for PlaceJob table.
:return: None
"""
if not jobID:
jobID = int(
input(
"jobManager. Reset last attempted and last"
" completed times - enter PlaceJob ID /> "
)
)
db.PlaceJob.get(jobID).set(lastAttempted=None, lastCompleted=None)
print("Removed attempted and completed times for job ID {0}".format(jobID))
def enableOne(jobID=None):
"""
Enable one record in PlaceJob table.
:return: None
"""
if not jobID:
jobID = int(input("jobManager. Enable - enter PlaceJob ID /> "))
db.PlaceJob.get(jobID).setEnabled()
print("Enabled job ID {0}".format(jobID))
def disableOne(jobID=None):
"""
Disable one record in PlaceJob table.
:param jobID: Database record ID for PlaceJob table.
:return: None
"""
if not jobID:
jobID = int(input("jobManager. Disable - enter PlaceJob ID /> "))
db.PlaceJob.get(jobID).setDisabled()
print("Disabled job ID {0}".format(jobID))
def deleteOne(jobID=None):
"""
Delete one record in PlaceJob table.
:param jobID: Database record ID for PlaceJob table.
:return: None
"""
if not jobID:
jobID = int(input("jobManager. Delete - PlaceJob ID /> "))
db.PlaceJob.deleteBy(id=jobID)
print("Deleted job ID {0}".format(jobID))
def deleteAll():
"""
Remove all records from PlaceJob table.
:return: None
"""
db.PlaceJob.clearTable()
print("All PlaceJob records deleted.")
def enableAll():
"""
Set all records in PlaceJob table to enabled.
:return: None
"""
count = 0
for p in db.PlaceJob.selectBy(enabled=False):
p.setEnabled()
count += 1
print("{0} records enabled".format(count))
def disableAll():
"""
Set all records in PlaceJob table to disabled.
:return: None
"""
count = 0
for p in db.PlaceJob.selectBy(enabled=True):
p.setDisabled()
count += 1
print("{0} records disabled".format(count))
def insertPlaceByName(placeName=None):
"""
Expect a Place name and add a record in PlaceJob for it.
A Place could be Supername, Country or a Town. Continents should not
be looked up.
Multiples places with the same name will all be added e.g. add both
towns for input 'Valencia'.
:param placeName: Default name of place to add job for, as a string.
If not supplied, prompt user for input text.
:return: None
"""
if not placeName:
placeName = input("jobManager. Insert - enter place name /> ")
results = db.Place.selectBy(name=placeName)
if results.count():
for place in results:
output = (place.woeid, place.name)
try:
db.PlaceJob(placeID=place.id)
print("{0:10} | {1:15} | -> added".format(*output))
except DuplicateEntryError:
print("{0:10} | {1:15} | -> already exists".format(*output))
else:
raise ValueError(
"The name `{0}` was not found in Place table.".format(placeName)
)
def insertTownsOfCountry(countryName=None):
"""
Add all towns of a country to trend job list, excluding country itself.
Expect country name and add its child towns to the Place Job table, if
the country exists in the Country table and if it has child towns.
Existing values are skipped.
:param countryName: Default None. Name of country to look up towns for
and then add as jobs.
:return: None
"""
if not countryName:
countryName = input("jobManager. Intert towns - enter country" " name /> ")
results = db.Country.selectBy(name=countryName)
if results.count():
# Expect one result exactly, since country names will be never
# duplicated, unlike towns.
country = results.getOne()
towns = country.hasTowns
if not towns:
raise ValueError(
"Country `{0}` has no towns linked to it which"
" can be added.".format(countryName)
)
# Add each town on the country.
for town in towns:
# Include country code of town.
output = (town.woeid, town.name, country.countryCode)
try:
db.PlaceJob(placeID=town.id)
print("{0:10} | {1:15} | {2:2} | -> added".format(*output))
except DuplicateEntryError:
print("{0:10} | {1:15} | {2:2} | -> already exists".format(*output))
else:
raise ValueError("Country `{0}` was not found.".format(countryName))
def _getConfiguredValues():
"""
Get configured values for fields in job section of config file and return.
:return countries: list of configured country name strings.
:return townsForCountries: list of configured country names where towns
are needed.
:return towns: list of configured town names.
"""
countriesStr = conf.get("PlaceJob", "countries")
countries = [v.strip() for v in countriesStr.split("\n") if v]
townsForCountriesStr = conf.get("PlaceJob", "townsForCountries")
townsForCountries = [v.strip() for v in townsForCountriesStr.split("\n") if v]
townsStr = conf.get("PlaceJob", "towns")
towns = [v.strip() for v in townsStr.split("\n") if v]
return countries, townsForCountries, towns
def printConfiguredValues():
"""
Print configured values in job section of config file.
:return: None
"""
countries, townsForCountries, towns = _getConfiguredValues()
print("World")
print("-----")
for superObj in db.Supername.select():
print(superObj.name)
print()
print("Countries")
print("---------")
for c in countries:
print(c)
print()
print("Towns for Countries")
print("-------------------")
for tc in townsForCountries:
print(tc)
print()
print("Towns")
print("-----")
for t in towns:
print(t)
print()
def insertDefaults():
"""
Add default data to PlaceJob table.
Lookup configured data in trendJobs file and insert records for countries
and for towns of certain countries.
The World is always added before reading from the configured default
values.
:return: None
"""
print("World")
print("-----")
for superObj in db.Supername.select():
insertPlaceByName(superObj.name)
print()
# Get user-configured text values of job records to add.
countries, townsForCountries, towns = _getConfiguredValues()
print("Countries")
print("---------")
for c in countries:
insertPlaceByName(c)
print()
print("Towns For Countries")
print("-------------------")
for tc in townsForCountries:
insertTownsOfCountry(tc)
print()
print("Towns")
print("-----")
for t in towns:
insertPlaceByName(t)
print()
def main(args):
"""
Give ability to enter command-line interactive mode.
Options are printed on startup or if input is empty. If input not valid
for the menu options, standard errors are raised with appropriate messages.
:param args: list of command-line arguments as strings.
:return: None
"""
if not args or set(args) & {"-h", "--help"}:
print("Usage: python utils/job_manager.py [-i|--interactive]" " [-h|--help]")
print("--help : show help message")
print("--interactive : enter interactive mode and show options.")
else:
if set(args) & {"-i", "--interactive"}:
options = [
("QUIT", sys.exit),
("VIEW counts", getCounts),
("VIEW records", getRecords),
("SINGLE - enable one PlaceJob record", enableOne),
("SINGLE - disable", disableOne),
("SINGLE - delete", deleteOne),
("SINGLE - reset times", resetTimes),
("ALL - enable all PlaceJob records", enableAll),
("ALL - disable", disableAll),
("ALL - delete", deleteAll),
("CREATE a job from specified town or country name", insertPlaceByName),
(
"CREATE a job for all towns within a specified country name",
insertTownsOfCountry,
),
(
"VIEW the list of pre-configured places to watch, set in the"
" app config file",
printConfiguredValues,
),
("INSERT - Watch all pre-configured places", insertDefaults),
]
print("Job Manager interactive mode.")
print()
print(
"You are now viewing and editing the PlaceJob table"
" records. Actions like enable only work after a record"
" has been created using CREATE or INSERT options."
)
print()
assert db.PlaceJob.tableExists(), "PlaceJob table must be created still."
# Loop until exit option is selected.
while True:
print("OPTIONS")
for i, option in enumerate(options):
print("{0:2d}) {1:s}".format(i, option[0]))
print()
print("TIPS")
print(
"- Enter an option number then hit enter."
"\n- The 'VIEW ...' options do not alter data."
"\n- To see menu options again, leave text blank and"
" press enter."
)
print()
choice = True
# Loop until choice is empty string.
while choice:
choice = input("jobManager /> ")
try:
index = int(choice)
command = options[index][1]
command()
except Exception as e:
print("{0}. {1}".format(type(e).__name__, str(e)))
print()
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=redefined-outer-name
# pylint: disable=g-bad-import-order
"""Build and train neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import os
from data_load import DataLoader
import numpy as np
import tensorflow as tf
logdir = "logs/scalars/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
def reshape_function(data, label):
reshaped_data = tf.reshape(data, [-1, 3, 1])
return reshaped_data, label
def calculate_model_size(model):
print(model.summary())
var_sizes = [
np.product(list(map(int, v.shape))) * v.dtype.size
for v in model.trainable_variables
]
print("Model size:", sum(var_sizes) / 1024, "KB")
def build_cnn(seq_length):
"""Builds a convolutional neural network in Keras."""
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(
8, (4, 3),
padding="same",
activation="relu",
input_shape=(seq_length, 3, 1)), # output_shape=(batch, 128, 3, 8)
tf.keras.layers.MaxPool2D((3, 3)), # (batch, 42, 1, 8)
tf.keras.layers.Dropout(0.1), # (batch, 42, 1, 8)
tf.keras.layers.Conv2D(16, (4, 1), padding="same",
activation="relu"), # (batch, 42, 1, 16)
tf.keras.layers.MaxPool2D((3, 1), padding="same"), # (batch, 14, 1, 16)
tf.keras.layers.Dropout(0.1), # (batch, 14, 1, 16)
tf.keras.layers.Flatten(), # (batch, 224)
tf.keras.layers.Dense(16, activation="relu"), # (batch, 16)
tf.keras.layers.Dropout(0.1), # (batch, 16)
tf.keras.layers.Dense(4, activation="softmax") # (batch, 4)
])
model_path = os.path.join("./netmodels", "CNN")
print("Built CNN.")
if not os.path.exists(model_path):
os.makedirs(model_path)
model.load_weights("./netmodels/CNN/weights.h5")
return model, model_path
def build_lstm(seq_length):
"""Builds an LSTM in Keras."""
model = tf.keras.Sequential([
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(22),
input_shape=(seq_length, 3)), # output_shape=(batch, 44)
tf.keras.layers.Dense(4, activation="sigmoid") # (batch, 4)
])
model_path = os.path.join("./netmodels", "LSTM")
print("Built LSTM.")
if not os.path.exists(model_path):
os.makedirs(model_path)
return model, model_path
def load_data(train_data_path, valid_data_path, test_data_path, seq_length):
data_loader = DataLoader(train_data_path,
valid_data_path,
test_data_path,
seq_length=seq_length)
data_loader.format()
return data_loader.train_len, data_loader.train_data, data_loader.valid_len, \
data_loader.valid_data, data_loader.test_len, data_loader.test_data
def build_net(args, seq_length):
if args.model == "CNN":
model, model_path = build_cnn(seq_length)
elif args.model == "LSTM":
model, model_path = build_lstm(seq_length)
else:
print("Please input correct model name.(CNN LSTM)")
return model, model_path
def train_net(
model,
model_path, # pylint: disable=unused-argument
train_len, # pylint: disable=unused-argument
train_data,
valid_len,
valid_data, # pylint: disable=unused-argument
test_len,
test_data,
kind):
"""Trains the model."""
calculate_model_size(model)
epochs = 50
batch_size = 64
model.compile(optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
if kind == "CNN":
train_data = train_data.map(reshape_function)
test_data = test_data.map(reshape_function)
valid_data = valid_data.map(reshape_function)
test_labels = np.zeros(test_len)
idx = 0
for data, label in test_data: # pylint: disable=unused-variable
test_labels[idx] = label.numpy()
idx += 1
train_data = train_data.batch(batch_size).repeat()
valid_data = valid_data.batch(batch_size)
test_data = test_data.batch(batch_size)
model.fit(train_data,
epochs=epochs,
validation_data=valid_data,
steps_per_epoch=1000,
validation_steps=int((valid_len - 1) / batch_size + 1),
callbacks=[tensorboard_callback])
loss, acc = model.evaluate(test_data)
pred = np.argmax(model.predict(test_data), axis=1)
confusion = tf.math.confusion_matrix(labels=tf.constant(test_labels),
predictions=tf.constant(pred),
num_classes=4)
print(confusion)
print("Loss {}, Accuracy {}".format(loss, acc))
# Convert the model to the TensorFlow Lite format without quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
# Save the model to disk
open("model.tflite", "wb").write(tflite_model)
# Convert the model to the TensorFlow Lite format with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
# Save the model to disk
open("model_quantized.tflite", "wb").write(tflite_model)
basic_model_size = os.path.getsize("model.tflite")
print("Basic model is %d bytes" % basic_model_size)
quantized_model_size = os.path.getsize("model_quantized.tflite")
print("Quantized model is %d bytes" % quantized_model_size)
difference = basic_model_size - quantized_model_size
print("Difference is %d bytes" % difference)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m")
parser.add_argument("--person", "-p")
args = parser.parse_args()
seq_length = 128
print("Start to load data...")
if args.person == "true":
train_len, train_data, valid_len, valid_data, test_len, test_data = \
load_data("./person_split/train", "./person_split/valid",
"./person_split/test", seq_length)
else:
train_len, train_data, valid_len, valid_data, test_len, test_data = \
load_data("./data/train", "./data/valid", "./data/test", seq_length)
print("Start to build net...")
model, model_path = build_net(args, seq_length)
print("Start training...")
train_net(model, model_path, train_len, train_data, valid_len, valid_data,
test_len, test_data, args.model)
print("Training finished!")
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.gaming_v1.types import game_server_deployments
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-game-servers",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GameServerDeploymentsServiceTransport(abc.ABC):
"""Abstract transport class for GameServerDeploymentsService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "gameservices.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_game_server_deployments: gapic_v1.method.wrap_method(
self.list_game_server_deployments,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_game_server_deployment: gapic_v1.method.wrap_method(
self.get_game_server_deployment,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_game_server_deployment: gapic_v1.method.wrap_method(
self.create_game_server_deployment,
default_timeout=60.0,
client_info=client_info,
),
self.delete_game_server_deployment: gapic_v1.method.wrap_method(
self.delete_game_server_deployment,
default_timeout=60.0,
client_info=client_info,
),
self.update_game_server_deployment: gapic_v1.method.wrap_method(
self.update_game_server_deployment,
default_timeout=60.0,
client_info=client_info,
),
self.get_game_server_deployment_rollout: gapic_v1.method.wrap_method(
self.get_game_server_deployment_rollout,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.update_game_server_deployment_rollout: gapic_v1.method.wrap_method(
self.update_game_server_deployment_rollout,
default_timeout=60.0,
client_info=client_info,
),
self.preview_game_server_deployment_rollout: gapic_v1.method.wrap_method(
self.preview_game_server_deployment_rollout,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.fetch_deployment_state: gapic_v1.method.wrap_method(
self.fetch_deployment_state,
default_retry=retries.Retry(
initial=1.0,
maximum=10.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_game_server_deployments(
self,
) -> Callable[
[game_server_deployments.ListGameServerDeploymentsRequest],
Union[
game_server_deployments.ListGameServerDeploymentsResponse,
Awaitable[game_server_deployments.ListGameServerDeploymentsResponse],
],
]:
raise NotImplementedError()
@property
def get_game_server_deployment(
self,
) -> Callable[
[game_server_deployments.GetGameServerDeploymentRequest],
Union[
game_server_deployments.GameServerDeployment,
Awaitable[game_server_deployments.GameServerDeployment],
],
]:
raise NotImplementedError()
@property
def create_game_server_deployment(
self,
) -> Callable[
[game_server_deployments.CreateGameServerDeploymentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_game_server_deployment(
self,
) -> Callable[
[game_server_deployments.DeleteGameServerDeploymentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_game_server_deployment(
self,
) -> Callable[
[game_server_deployments.UpdateGameServerDeploymentRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_game_server_deployment_rollout(
self,
) -> Callable[
[game_server_deployments.GetGameServerDeploymentRolloutRequest],
Union[
game_server_deployments.GameServerDeploymentRollout,
Awaitable[game_server_deployments.GameServerDeploymentRollout],
],
]:
raise NotImplementedError()
@property
def update_game_server_deployment_rollout(
self,
) -> Callable[
[game_server_deployments.UpdateGameServerDeploymentRolloutRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def preview_game_server_deployment_rollout(
self,
) -> Callable[
[game_server_deployments.PreviewGameServerDeploymentRolloutRequest],
Union[
game_server_deployments.PreviewGameServerDeploymentRolloutResponse,
Awaitable[
game_server_deployments.PreviewGameServerDeploymentRolloutResponse
],
],
]:
raise NotImplementedError()
@property
def fetch_deployment_state(
self,
) -> Callable[
[game_server_deployments.FetchDeploymentStateRequest],
Union[
game_server_deployments.FetchDeploymentStateResponse,
Awaitable[game_server_deployments.FetchDeploymentStateResponse],
],
]:
raise NotImplementedError()
__all__ = ("GameServerDeploymentsServiceTransport",)
|
|
import json
import pytest
from awx.main.models.credential import CredentialType, Credential
from awx.api.versioning import reverse
@pytest.mark.django_db
def test_list_as_unauthorized_xfail(get):
response = get(reverse('api:credential_type_list'))
assert response.status_code == 401
@pytest.mark.django_db
@pytest.mark.parametrize('method, valid', [
('GET', sorted(dict(CredentialType.KIND_CHOICES).keys())),
('POST', ['cloud', 'net']),
])
def test_options_valid_kinds(method, valid, options, admin):
response = options(reverse('api:credential_type_list'), admin)
choices = sorted(dict(response.data['actions'][method]['kind']['choices']).keys())
assert valid == choices
@pytest.mark.django_db
def test_options_valid_put_kinds(options, admin):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = options(reverse('api:credential_type_detail', kwargs={'pk': ssh.pk}), admin)
choices = sorted(dict(response.data['actions']['PUT']['kind']['choices']).keys())
assert ['cloud', 'net'] == choices
@pytest.mark.django_db
def test_list_as_normal_user(get, alice):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = get(reverse('api:credential_type_list'), alice)
assert response.status_code == 200
assert response.data['count'] == 1
@pytest.mark.django_db
def test_list_as_admin(get, admin):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = get(reverse('api:credential_type_list'), admin)
assert response.status_code == 200
assert response.data['count'] == 1
@pytest.mark.django_db
def test_create_as_unauthorized_xfail(get, post):
response = post(reverse('api:credential_type_list'), {
'name': 'Custom Credential Type',
})
assert response.status_code == 401
@pytest.mark.django_db
def test_update_as_unauthorized_xfail(patch, delete):
ssh = CredentialType.defaults['ssh']()
ssh.save()
url = reverse('api:credential_type_detail', kwargs={'pk': ssh.pk})
response = patch(url, {'name': 'Some Other Name'})
assert response.status_code == 401
assert delete(url).status_code == 401
@pytest.mark.django_db
def test_update_managed_by_tower_xfail(patch, delete, admin):
ssh = CredentialType.defaults['ssh']()
ssh.save()
url = reverse('api:credential_type_detail', kwargs={'pk': ssh.pk})
response = patch(url, {'name': 'Some Other Name'}, admin)
assert response.status_code == 403
assert delete(url, admin).status_code == 403
@pytest.mark.django_db
def test_update_credential_type_in_use_xfail(patch, delete, admin):
_type = CredentialType(kind='cloud', inputs={'fields': []})
_type.save()
Credential(credential_type=_type, name='My Custom Cred').save()
url = reverse('api:credential_type_detail', kwargs={'pk': _type.pk})
response = patch(url, {'name': 'Some Other Name'}, admin)
assert response.status_code == 200
url = reverse('api:credential_type_detail', kwargs={'pk': _type.pk})
response = patch(url, {'inputs': {}}, admin)
assert response.status_code == 403
assert delete(url, admin).status_code == 403
@pytest.mark.django_db
def test_update_credential_type_success(get, patch, delete, admin):
_type = CredentialType(kind='cloud')
_type.save()
url = reverse('api:credential_type_detail', kwargs={'pk': _type.pk})
response = patch(url, {'name': 'Some Other Name'}, admin)
assert response.status_code == 200
assert get(url, admin).data.get('name') == 'Some Other Name'
assert delete(url, admin).status_code == 204
@pytest.mark.django_db
def test_delete_as_unauthorized_xfail(delete):
ssh = CredentialType.defaults['ssh']()
ssh.save()
response = delete(
reverse('api:credential_type_detail', kwargs={'pk': ssh.pk}),
)
assert response.status_code == 401
@pytest.mark.django_db
def test_create_as_normal_user_xfail(get, post, alice):
response = post(reverse('api:credential_type_list'), {
'name': 'Custom Credential Type',
}, alice)
assert response.status_code == 403
assert get(reverse('api:credential_type_list'), alice).data['count'] == 0
@pytest.mark.django_db
def test_create_as_admin(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'Custom Credential Type',
'inputs': {},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
assert response.data['results'][0]['name'] == 'Custom Credential Type'
assert response.data['results'][0]['inputs'] == {}
assert response.data['results'][0]['injectors'] == {}
assert response.data['results'][0]['managed_by_tower'] is False
@pytest.mark.django_db
def test_create_managed_by_tower_readonly(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'Custom Credential Type',
'inputs': {},
'injectors': {},
'managed_by_tower': True
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
assert response.data['results'][0]['managed_by_tower'] is False
@pytest.mark.django_db
def test_create_dependencies_not_supported(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'Custom Credential Type',
'inputs': {'dependencies': {'foo': ['bar']}},
'injectors': {},
}, admin)
assert response.status_code == 400
assert response.data['inputs'] == ["'dependencies' is not supported for custom credentials."]
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 0
@pytest.mark.django_db
@pytest.mark.parametrize('kind', ['cloud', 'net'])
def test_create_valid_kind(kind, get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': kind,
'name': 'My Custom Type',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
@pytest.mark.django_db
@pytest.mark.parametrize('kind', ['ssh', 'vault', 'scm', 'insights'])
def test_create_invalid_kind(kind, get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': kind,
'name': 'My Custom Type',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {}
}, admin)
assert response.status_code == 400
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 0
@pytest.mark.django_db
def test_create_with_valid_inputs(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
fields = response.data['results'][0]['inputs']['fields']
assert len(fields) == 1
assert fields[0]['id'] == 'api_token'
assert fields[0]['label'] == 'API Token'
assert fields[0]['secret'] is True
assert fields[0]['type'] == 'string'
@pytest.mark.django_db
def test_create_with_required_inputs(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}],
'required': ['api_token'],
},
'injectors': {}
}, admin)
assert response.status_code == 201
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
required = response.data['results'][0]['inputs']['required']
assert required == ['api_token']
@pytest.mark.django_db
@pytest.mark.parametrize('inputs', [
True,
100,
[1, 2, 3, 4],
'malformed',
{'feelds': {}},
{'fields': [123, 234, 345]},
{'fields': [{'id':'one', 'label':'One'}, 234]},
{'feelds': {}, 'fields': [{'id':'one', 'label':'One'}, 234]}
])
def test_create_with_invalid_inputs_xfail(post, admin, inputs):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': inputs,
'injectors': {}
}, admin)
assert response.status_code == 400
@pytest.mark.django_db
@pytest.mark.parametrize('injectors', [
True,
100,
[1, 2, 3, 4],
'malformed',
{'mal': 'formed'},
{'env': {'ENV_VAR': 123}, 'mal': 'formed'},
{'env': True},
{'env': [1, 2, 3]},
{'file': True},
{'file': [1, 2, 3]},
{'extra_vars': True},
{'extra_vars': [1, 2, 3]},
])
def test_create_with_invalid_injectors_xfail(post, admin, injectors):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {},
'injectors': injectors,
}, admin)
assert response.status_code == 400
@pytest.mark.django_db
def test_ask_at_runtime_xfail(get, post, admin):
# ask_at_runtime is only supported by the built-in SSH and Vault types
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True,
'ask_at_runtime': True
}]
},
'injectors': {
'env': {
'ANSIBLE_MY_CLOUD_TOKEN': '{{api_token}}'
}
}
}, admin)
assert response.status_code == 400
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 0
@pytest.mark.django_db
def test_create_with_valid_injectors(get, post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {
'env': {
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
}
}
}, admin, expect=201)
response = get(reverse('api:credential_type_list'), admin)
assert response.data['count'] == 1
injectors = response.data['results'][0]['injectors']
assert len(injectors) == 1
assert injectors['env'] == {
'AWX_MY_CLOUD_TOKEN': '{{api_token}}'
}
@pytest.mark.django_db
def test_create_with_undefined_template_variable_xfail(post, admin):
response = post(reverse('api:credential_type_list'), {
'kind': 'cloud',
'name': 'MyCloud',
'inputs': {
'fields': [{
'id': 'api_token',
'label': 'API Token',
'type': 'string',
'secret': True
}]
},
'injectors': {
'env': {'AWX_MY_CLOUD_TOKEN': '{{api_tolkien}}'}
}
}, admin)
assert response.status_code == 400
assert "'api_tolkien' is undefined" in json.dumps(response.data)
|
|
"""
Module for building a complete dataset from local directory with csv files.
"""
import os
import sys
from logbook import Logger, StreamHandler
from numpy import empty
from pandas import DataFrame, read_csv, Index, Timedelta, NaT
from trading_calendars import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from . import core as bundles
handler = StreamHandler(sys.stdout, format_string=" | {record.message}")
logger = Logger(__name__)
logger.handlers.append(handler)
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest
class CSVDIRBundle:
"""
Wrapper class to call csvdir_bundle with provided
list of time frames and a path to the csvdir directory
"""
def __init__(self, tframes=None, csvdir=None):
self.tframes = tframes
self.csvdir = csvdir
def ingest(self,
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
self.tframes,
self.csvdir)
@bundles.register("csvdir")
def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs'])
def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress):
with maybe_show_progress(symbols, show_progress,
label='Loading custom pricing data: ') as it:
files = os.listdir(csvdir)
for sid, symbol in enumerate(it):
logger.debug('%s: sid %s' % (symbol, sid))
try:
fname = [fname for fname in files
if '%s.csv' % symbol in fname][0]
except IndexError:
raise ValueError("%s.csv file is not in %s" % (symbol, csvdir))
dfr = read_csv(os.path.join(csvdir, fname),
parse_dates=[0],
infer_datetime_format=True,
index_col=0).sort_index()
start_date = dfr.index[0]
end_date = dfr.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
if 'split' in dfr.columns:
tmp = 1. / dfr[dfr['split'] != 1.0]['split']
split = DataFrame(data=tmp.index.tolist(),
columns=['effective_date'])
split['ratio'] = tmp.tolist()
split['sid'] = sid
splits = divs_splits['splits']
index = Index(range(splits.shape[0],
splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
divs_splits['splits'] = splits.append(split)
if 'dividend' in dfr.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = dfr[dfr['dividend'] != 0.0]['dividend']
div = DataFrame(data=tmp.index.tolist(), columns=['ex_date'])
div['record_date'] = NaT
div['declared_date'] = NaT
div['pay_date'] = NaT
div['amount'] = tmp.tolist()
div['sid'] = sid
divs = divs_splits['divs']
ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
divs_splits['divs'] = divs.append(div)
yield sid, dfr
register_calendar_alias("CSVDIR", "NYSE")
|
|
# -*- coding: utf-8 -*-
"""
celery.app.trace
~~~~~~~~~~~~~~~~
This module defines how the task execution is traced:
errors are recorded, handlers are applied and so on.
"""
from __future__ import absolute_import
# ## ---
# This is the heart of the worker, the inner loop so to speak.
# It used to be split up into nice little classes and methods,
# but in the end it only resulted in bad performance and horrible tracebacks,
# so instead we now use one closure per task class.
import os
import socket
import sys
from warnings import warn
from billiard.einfo import ExceptionInfo
from kombu.utils import kwdict
from celery import current_app
from celery import states, signals
from celery._state import _task_stack
from celery.app import set_default_app
from celery.app.task import Task as BaseTask, Context
from celery.exceptions import Ignore, Reject, Retry
from celery.utils.log import get_logger
from celery.utils.objects import mro_lookup
from celery.utils.serialization import (
get_pickleable_exception,
get_pickleable_etype,
)
__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task',
'setup_worker_optimizations', 'reset_worker_optimizations']
_logger = get_logger(__name__)
send_prerun = signals.task_prerun.send
send_postrun = signals.task_postrun.send
send_success = signals.task_success.send
STARTED = states.STARTED
SUCCESS = states.SUCCESS
IGNORED = states.IGNORED
REJECTED = states.REJECTED
RETRY = states.RETRY
FAILURE = states.FAILURE
EXCEPTION_STATES = states.EXCEPTION_STATES
IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED])
#: set by :func:`setup_worker_optimizations`
_tasks = None
_patched = {}
def task_has_custom(task, attr):
"""Return true if the task or one of its bases
defines ``attr`` (excluding the one in BaseTask)."""
return mro_lookup(task.__class__, attr, stop=(BaseTask, object),
monkey_patched=['celery.app.task'])
class TraceInfo(object):
__slots__ = ('state', 'retval')
def __init__(self, state, retval=None):
self.state = state
self.retval = retval
def handle_error_state(self, task, eager=False):
store_errors = not eager
if task.ignore_result:
store_errors = task.store_errors_even_if_ignored
return {
RETRY: self.handle_retry,
FAILURE: self.handle_failure,
}[self.state](task, store_errors=store_errors)
def handle_retry(self, task, store_errors=True):
"""Handle retry exception."""
# the exception raised is the Retry semi-predicate,
# and it's exc' attribute is the original exception raised (if any).
req = task.request
type_, _, tb = sys.exc_info()
try:
reason = self.retval
einfo = ExceptionInfo((type_, reason, tb))
if store_errors:
task.backend.mark_as_retry(
req.id, reason.exc, einfo.traceback, request=req,
)
task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
signals.task_retry.send(sender=task, request=req,
reason=reason, einfo=einfo)
return einfo
finally:
del(tb)
def handle_failure(self, task, store_errors=True):
"""Handle exception."""
req = task.request
type_, _, tb = sys.exc_info()
try:
exc = self.retval
einfo = ExceptionInfo()
einfo.exception = get_pickleable_exception(einfo.exception)
einfo.type = get_pickleable_etype(einfo.type)
if store_errors:
task.backend.mark_as_failure(
req.id, exc, einfo.traceback, request=req,
)
task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
signals.task_failure.send(sender=task, task_id=req.id,
exception=exc, args=req.args,
kwargs=req.kwargs,
traceback=tb,
einfo=einfo)
return einfo
finally:
del(tb)
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
Info=TraceInfo, eager=False, propagate=False, app=None,
IGNORE_STATES=IGNORE_STATES):
"""Return a function that traces task execution; catches all
exceptions and updates result backend with the state and result
If the call was successful, it saves the result to the task result
backend, and sets the task status to `"SUCCESS"`.
If the call raises :exc:`~@Retry`, it extracts
the original exception, uses that as the result and sets the task state
to `"RETRY"`.
If the call results in an exception, it saves the exception as the task
result, and sets the task state to `"FAILURE"`.
Return a function that takes the following arguments:
:param uuid: The id of the task.
:param args: List of positional args to pass on to the function.
:param kwargs: Keyword arguments mapping to pass on to the function.
:keyword request: Request dict.
"""
# If the task doesn't define a custom __call__ method
# we optimize it away by simply calling the run method directly,
# saving the extra method call and a line less in the stack trace.
fun = task if task_has_custom(task, '__call__') else task.run
loader = loader or app.loader
backend = task.backend
ignore_result = task.ignore_result
track_started = task.track_started
track_started = not eager and (task.track_started and not ignore_result)
publish_result = not eager and not ignore_result
hostname = hostname or socket.gethostname()
loader_task_init = loader.on_task_init
loader_cleanup = loader.on_process_cleanup
task_on_success = None
task_after_return = None
if task_has_custom(task, 'on_success'):
task_on_success = task.on_success
if task_has_custom(task, 'after_return'):
task_after_return = task.after_return
store_result = backend.store_result
backend_cleanup = backend.process_cleanup
pid = os.getpid()
request_stack = task.request_stack
push_request = request_stack.push
pop_request = request_stack.pop
push_task = _task_stack.push
pop_task = _task_stack.pop
on_chord_part_return = backend.on_chord_part_return
prerun_receivers = signals.task_prerun.receivers
postrun_receivers = signals.task_postrun.receivers
success_receivers = signals.task_success.receivers
from celery import canvas
signature = canvas.maybe_signature # maybe_ does not clone if already
def trace_task(uuid, args, kwargs, request=None):
R = I = None
kwargs = kwdict(kwargs)
try:
push_task(task)
task_request = Context(request or {}, args=args,
called_directly=False, kwargs=kwargs)
push_request(task_request)
try:
# -*- PRE -*-
if prerun_receivers:
send_prerun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs)
loader_task_init(uuid, task)
if track_started:
store_result(
uuid, {'pid': pid, 'hostname': hostname}, STARTED,
request=task_request,
)
# -*- TRACE -*-
try:
R = retval = fun(*args, **kwargs)
state = SUCCESS
except Reject as exc:
I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
except Ignore as exc:
I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
state, retval = I.state, I.retval
except Retry as exc:
I = Info(RETRY, exc)
state, retval = I.state, I.retval
R = I.handle_error_state(task, eager=eager)
except Exception as exc:
if propagate:
raise
I = Info(FAILURE, exc)
state, retval = I.state, I.retval
R = I.handle_error_state(task, eager=eager)
[signature(errback, app=app).apply_async((uuid, ))
for errback in task_request.errbacks or []]
except BaseException as exc:
raise
else:
# callback tasks must be applied before the result is
# stored, so that result.children is populated.
[signature(callback, app=app).apply_async((retval, ))
for callback in task_request.callbacks or []]
if publish_result:
store_result(
uuid, retval, SUCCESS, request=task_request,
)
if task_on_success:
task_on_success(retval, uuid, args, kwargs)
if success_receivers:
send_success(sender=task, result=retval)
# -* POST *-
if state not in IGNORE_STATES:
if task_request.chord:
on_chord_part_return(task)
if task_after_return:
task_after_return(
state, retval, uuid, args, kwargs, None,
)
if postrun_receivers:
send_postrun(sender=task, task_id=uuid, task=task,
args=args, kwargs=kwargs,
retval=retval, state=state)
finally:
pop_task()
pop_request()
if not eager:
try:
backend_cleanup()
loader_cleanup()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as exc:
_logger.error('Process cleanup failed: %r', exc,
exc_info=True)
except MemoryError:
raise
except Exception as exc:
if eager:
raise
R = report_internal_error(task, exc)
return R, I
return trace_task
def trace_task(task, uuid, args, kwargs, request={}, **opts):
try:
if task.__trace__ is None:
task.__trace__ = build_tracer(task.name, task, **opts)
return task.__trace__(uuid, args, kwargs, request)[0]
except Exception as exc:
return report_internal_error(task, exc)
def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts):
return trace_task((app or current_app).tasks[name],
uuid, args, kwargs, request, app=app, **opts)
trace_task_ret = _trace_task_ret
def _fast_trace_task(task, uuid, args, kwargs, request={}):
# setup_worker_optimizations will point trace_task_ret to here,
# so this is the function used in the worker.
return _tasks[task].__trace__(uuid, args, kwargs, request)[0]
def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
opts.setdefault('eager', True)
return build_tracer(task.name, task, **opts)(
uuid, args, kwargs, request)
def report_internal_error(task, exc):
_type, _value, _tb = sys.exc_info()
try:
_value = task.backend.prepare_exception(exc)
exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
warn(RuntimeWarning(
'Exception raised outside body: {0!r}:\n{1}'.format(
exc, exc_info.traceback)))
return exc_info
finally:
del(_tb)
def setup_worker_optimizations(app):
global _tasks
global trace_task_ret
# make sure custom Task.__call__ methods that calls super
# will not mess up the request/task stack.
_install_stack_protection()
# all new threads start without a current app, so if an app is not
# passed on to the thread it will fall back to the "default app",
# which then could be the wrong app. So for the worker
# we set this to always return our app. This is a hack,
# and means that only a single app can be used for workers
# running in the same process.
app.set_current()
set_default_app(app)
# evaluate all task classes by finalizing the app.
app.finalize()
# set fast shortcut to task registry
_tasks = app._tasks
trace_task_ret = _fast_trace_task
from celery.worker import job as job_module
job_module.trace_task_ret = _fast_trace_task
job_module.__optimize__()
def reset_worker_optimizations():
global trace_task_ret
trace_task_ret = _trace_task_ret
try:
delattr(BaseTask, '_stackprotected')
except AttributeError:
pass
try:
BaseTask.__call__ = _patched.pop('BaseTask.__call__')
except KeyError:
pass
from celery.worker import job as job_module
job_module.trace_task_ret = _trace_task_ret
def _install_stack_protection():
# Patches BaseTask.__call__ in the worker to handle the edge case
# where people override it and also call super.
#
# - The worker optimizes away BaseTask.__call__ and instead
# calls task.run directly.
# - so with the addition of current_task and the request stack
# BaseTask.__call__ now pushes to those stacks so that
# they work when tasks are called directly.
#
# The worker only optimizes away __call__ in the case
# where it has not been overridden, so the request/task stack
# will blow if a custom task class defines __call__ and also
# calls super().
if not getattr(BaseTask, '_stackprotected', False):
_patched['BaseTask.__call__'] = orig = BaseTask.__call__
def __protected_call__(self, *args, **kwargs):
stack = self.request_stack
req = stack.top
if req and not req._protected and \
len(stack) == 1 and not req.called_directly:
req._protected = 1
return self.run(*args, **kwargs)
return orig(self, *args, **kwargs)
BaseTask.__call__ = __protected_call__
BaseTask._stackprotected = True
|
|
# These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
from __future__ import division, absolute_import, print_function
import os
import doctest
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src
import numpy
from .nosetester import get_package_name
import inspect
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
#print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
return module.__dict__ is object.__globals__
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
#print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
return module.__name__ == object.__self__.__class__.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
#print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
#print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self, tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import (
isroutine, isclass, ismodule, isfunction, ismethod
)
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val)):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
#valname1 = '%s.%s' % (name, valname) # dbg
#print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>", "'<")
want = want.replace("'>", "'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4, 8]:
got = got.replace("'<i%d'" % sz, "int")
want = want.replace("'<i%d'" % sz, "int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
score = 1000 # load late, after doctest builtin
# always use whitespace and ellipsis options for doctests
doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
# files that should be ignored for doctests
doctest_ignore = ['generate_numpy_api.py',
'setup.py']
# Custom classes; class variables to allow subclassing
doctest_case_class = NumpyDocTestCase
out_check_class = NumpyOutputChecker
test_finder_class = NumpyDocTestFinder
# Don't use the standard doctest option handler; hard-code the option values
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
# Test doctests in 'test' files / directories. Standard plugin default
# is False
self.doctest_tests = True
# Variable name; if defined, doctest results stored in this variable in
# the top-level namespace. None is the standard default
self.doctest_result_var = None
def configure(self, options, config):
# parent method sets enabled flag from command line --with-numpydoctest
Plugin.configure(self, options, config)
self.finder = self.test_finder_class()
self.parser = doctest.DocTestParser()
if self.enabled:
# Pull standard doctest out of plugin list; there's no reason to run
# both. In practice the Unplugger plugin above would cover us when
# run from a standard numpy.test() call; this is just in case
# someone wants to run our plugin outside the numpy.test() machinery
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != 'doctest']
def set_test_context(self, test):
""" Configure `test` object to set test context
We set the numpy / scipy standard doctest namespace
Parameters
----------
test : test object
with ``globs`` dictionary defining namespace
Returns
-------
None
Notes
-----
`test` object modified in place
"""
# set the namespace for tests
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# Override test loading to customize test context (with set_test_context
# method), set standard docstring options, and install our own test output
# checker
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
# Set test namespace; test altered in place
self.set_test_context(test)
yield self.doctest_case_class(test,
optionflags=self.doctest_optflags,
checker=self.out_check_class(),
result_var=self.doctest_result_var)
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in self.doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class Unplugger(object):
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
"""
name = 'unplugger'
enabled = True # always enabled
score = 4000 # load late in order to be after builtins
def __init__(self, to_unplug='doctest'):
self.to_unplug = to_unplug
def options(self, parser, env):
pass
def configure(self, options, config):
# Pull named plugin out of plugins list
config.plugins.plugins = [p for p in config.plugins.plugins
if p.name != self.to_unplug]
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.