file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
SocialMediaButton.js | import { Button } from 'reactstrap';
import React from 'react';
import UncontrolledTooltip from 'reactstrap/lib/UncontrolledTooltip';
function SocialMediaButton({description, icon, color, link}) {
return (
<>
<Button
className="btn-icon-only rounded-circle"
color={color} | href={link}
id={`button-${icon}`}
target="_blank"
>
<span className="btn-inner--icon">
<i className={`fa fa-${icon}`} />
</span>
</Button>
<UncontrolledTooltip delay={0} target={`button-${icon}`}>
{description}
</UncontrolledTooltip>
</>
)
}
export default SocialMediaButton; | |
convert_video_to_image_frames.py | ## @example convert_video_to_image_frames.py
# This example loads a video and converts to a stream of image frames and display the
# individual frames with matplotlib.
#
# Note that additional dependencies are required to stream videos in FAST:
# Linux: sudo apt install ubuntu-restricted-extras libgstreamer1.0-dev libgstreamer-plugins-bad1.0-dev libgstreamer-plugins-base1.0-dev libgstreamer-plugins-good1.0-dev
# Windows: K-lite codec pack https://codecguide.com/download_kl.htm
import fast
import matplotlib.pyplot as plt
import numpy as np
#fast.Reporter.setGlobalReportMethod(fast.Reporter.COUT) # Uncomment to show debug info
fast.downloadTestDataIfNotExists() # This will download the test data needed to run the example |
dataChannel = streamer.getOutputPort()
streamer.update() # Start pipeline
frame_list = []
counter = 0
while True:
frame = dataChannel.getNextImage()
counter += 1
if frame.isLastFrame():
break
# Only show every X frame
if counter % 20 == 0: frame_list.append((np.asarray(frame), counter))
if len(frame_list) == 9:
# Display the 9 last frames
f, axes = plt.subplots(3,3, figsize=(10,10))
for i in range(3):
for j in range(3):
axes[j, i].set_title('Frame: ' + str(frame_list[i + j*3][1]))
axes[j, i].imshow(frame_list[i + j*3][0][..., 0], cmap='gray')
plt.show()
frame_list.clear() |
streamer = fast.MovieStreamer.New()
streamer.setFilename(fast.Config.getTestDataPath() + 'US/sagittal_spine.avi') |
IsoFit.py | try:
runCount += 1
except:
isoIn = False
clIn = False
cataIn = False
closePlots = False
resultsIn = False
clusterList = []
clusters=[]
isochrones = []
isoList = []
catalogue = []
runCount = 1
class resultClusterObj:
def __init__(self,cl):
import numpy as np
#Automatically populates variables based on those from the cluster it was given, except the data arrays
global properties
#List of all of the variables defined for the cluster cl, strips out the __functions__
properties = [a for a in dir(cl) if not a.startswith('_')]
for prop in properties:
#Saves all 'number' type variables to the memory of the result cluster object
if eval(f"type(cl.{prop})") == float or eval(f"type(cl.{prop})") == np.float64 or eval(f"type(cl.{prop})") == int:
exec(f"self.{prop} = float(cl.{prop})")
elif eval(f"type(cl.{prop})") == str:
exec(f"self.{prop} = cl.{prop}")
#Manually defined properties
self.name = cl.name
self.clType = cl.clType
class clusterObj:
def __init__(self,name='genericCluster',basedir='clusters/',brightThreshold=15):
#Declare instance variables
self.basedir = basedir
self.dataPath = self.basedir + f"{name}/data/"
self.imgPath = self.basedir + f"{name}/plots/"
self.unfilteredWide = []
self.unfilteredNarrow = []
self.filtered = []
self.mag = []
self.iso = []
self.condensed = []
self.condensed0 = []
self.condensedInit=[]
self.unfilteredBright = []
self.filteredBright = []
self.brightmag = []
self.distFiltered = []
self.binaries = []
self.stars = []
self.brightThreshold = brightThreshold
self.mean_par = 0
self.stdev_par = 0
self.mean_ra = 0
self.mean_dec = 0
self.stdev_ra = 0
self.stdev_dec = 0
self.mean_pmra = 0
self.stdev_pmra = 0
self.mean_pmdec = 0
self.stdev_pmdec = 0
self.mean_a_g = 0
self.stdev_a_g = 0
self.mean_e_bp_rp = 0
self.stdev_e_bp_rp = 0
self.mean_par_over_ra = 0
self.stdev_par_over_ra = 0
self.dist_mod = 0
self.turnPoint = 0
self.reddening = 0
self.radDist = 0
self.massLoaded = False
#Catalogued properties
self.name = name
self.clType = "None"
self.pmra_min = -99
self.pmra_max = -99
self.pmdec_min = -99
self.pmdec_max = -99
self.par_min = -99
self.par_max = -99
self.cltpx = -99
self.cltpy = -99
self.noise_cutoff = -99
#Check directory locations
import os
if not os.path.isdir(self.dataPath):
os.mkdir(self.dataPath)
if not os.path.isdir(self.imgPath):
os.mkdir(self.imgPath)
if not os.path.isdir(f"{self.imgPath}/png"):
os.mkdir(f"{self.imgPath}/png")
#Gaia DR2 Implementation
# class starObj:
# def __init__(self,name,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err,ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr,astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_match_obs,astro_sigma5d,match_obs,g_mag,b_mag,r_mag,b_r,b_g,g_r,radvel,radvel_err,variable,teff,a_g,e_bp_rp,lum):
# #Declare instance variables
# self.name = name
# self.ra = float(ra)
# self.ra_err = float(ra_err)
# self.dec = float(dec)
# self.dec_err = float(dec_err)
# self.par = float(par)
# self.par_err = float(par_err)
# self.par_over_err = float(par_over_err)
# self.pmra = float(pmra)
# self.pmra_err = float(pmra_err)
# self.pmdec = float(pmdec)
# self.pmdec_err = float(pmdec_err)
# self.ra_dec_corr = float(ra_dec_corr)
# self.ra_par_corr = float(ra_par_corr)
# self.ra_pmra_corr = float(ra_pmra_corr)
# self.ra_pmdec_corr = float(ra_pmdec_corr)
# self.dec_par_corr = float(dec_par_corr)
# self.dec_pmra_corr = float(dec_pmra_corr)
# self.dec_pmdec_corr = float(dec_pmdec_corr)
# self.par_pmra_corr = float(par_pmra_corr)
# self.par_pmdec_corr = float(par_pmdec_corr)
# self.pmra_pmdec_corr = float(pmra_pmdec_corr)
# self.astro_n_obs = float(astro_n_obs)
# self.astro_n_good_obs = float(astro_n_good_obs)
# self.astro_n_bad_obs = float(astro_n_bad_obs)
# self.astro_gof = float(astro_gof)
# self.astro_chi2 = float(astro_chi2)
# self.astro_noise = float(astro_noise)
# self.astro_noise_sig = float(astro_noise_sig)
# self.astro_match_obs = float(astro_match_obs)
# self.astro_sigma5d = float(astro_sigma5d)
# self.match_obs = float(match_obs)
# self.g_mag = float(g_mag)
# self.b_mag = float(b_mag)
# self.r_mag = float(r_mag)
# self.b_r = float(b_r)
# self.b_g = float(b_g)
# self.g_r = float(g_r)
# self.radvel = float(radvel)
# self.radvel_err = float(radvel_err)
# self.variable = variable
# self.teff = float(teff)
# self.a_g = float(a_g)
# self.e_bp_rp = float(e_bp_rp)
# self.lum = float(lum)
# self.member = 0
# self.binary = 0
# self.radDist = 0
# self.par_over_ra = float(par)/float(ra)
# self.par_over_dec = float(par)/float(dec)
# self.par_over_pmra = float(par)/float(pmra)
# self.par_over_pmdec = float(par)/float(pmdec)
# self.vosaPoints = []
# self.excess = 0
#Gaia DR3 implementation
class starObj:
def __init__(self,name,source_id,ra,ra_err,dec,dec_err,par,par_err,par_over_err,pmra,pmra_err,pmdec,pmdec_err, #Basic astrometrics
ra_dec_corr,ra_par_corr,ra_pmra_corr,ra_pmdec_corr,dec_par_corr,dec_pmra_corr,dec_pmdec_corr,par_pmra_corr,par_pmdec_corr,pmra_pmdec_corr, #Correlations
astro_n_obs,astro_n_good_obs,astro_n_bad_obs,astro_gof,astro_chi2,astro_noise,astro_noise_sig,astro_nu_eff, #Assorted astrometric properties
pseudocolor,pseudocolor_err,ra_pseudocolor_corr,dec_pseudocolor_corr,par_pseudocolor_corr,pmra_pseudoclor_corr,pmdec_pseudocolor_corr, #Pseudocolor
astro_sigma5d,duplicated_source, #More assorted properties
g_flux,g_flux_err,g_mag, #Gaia_G
b_flux,b_flux_err,b_mag, #Gaia_BP
r_flux,r_flux_err,r_mag, #Gaia_RP
b_over_r_excess,b_r,b_g,g_r, #Color indices and excess
radvel,radvel_err,radvel_num_transits,radvel_teff,radvel_feh, #Template Teff and Fe/H used to calculate the radvel
l,b,long,lat): #Galactic l and b, ecliptic long and lat
import numpy as np
#Declare instance variables
self.name = name
self.source_id = source_id
self.ra = float(ra)
self.ra_err = float(ra_err)
self.dec = float(dec)
self.dec_err = float(dec_err)
self.par = float(par)
self.par_err = float(par_err)
self.par_over_err = float(par_over_err)
self.pmra = float(pmra)
self.pmra_err = float(pmra_err)
self.pmdec = float(pmdec)
self.pmdec_err = float(pmdec_err)
self.ra_dec_corr = float(ra_dec_corr)
self.ra_par_corr = float(ra_par_corr)
self.ra_pmra_corr = float(ra_pmra_corr)
self.ra_pmdec_corr = float(ra_pmdec_corr)
self.dec_par_corr = float(dec_par_corr)
self.dec_pmra_corr = float(dec_pmra_corr)
self.dec_pmdec_corr = float(dec_pmdec_corr)
self.par_pmra_corr = float(par_pmra_corr)
self.par_pmdec_corr = float(par_pmdec_corr)
self.pmra_pmdec_corr = float(pmra_pmdec_corr)
self.astro_n_obs = float(astro_n_obs)
self.astro_n_good_obs = float(astro_n_good_obs)
self.astro_n_bad_obs = float(astro_n_bad_obs)
self.astro_gof = float(astro_gof)
self.astro_chi2 = float(astro_chi2)
self.astro_noise = float(astro_noise)
self.astro_noise_sig = float(astro_noise_sig)
self.astro_nu_eff = float(astro_nu_eff)
self.astro_sigma5d = float(astro_sigma5d)
self.duplicated_source = bool(duplicated_source)
self.g_flux = float(g_flux)
self.g_flux_err = float(g_flux_err)
self.g_mag = float(g_mag)
self.b_flux = float(b_flux)
self.b_flux_err = float(b_flux_err)
self.b_mag = float(b_mag)
self.r_flux = float(r_flux)
self.r_flux_err = float(r_flux_err)
self.r_mag = float(r_mag)
self.b_over_r_excess = float(b_over_r_excess)
self.b_r = float(b_r)
self.b_g = float(b_g)
self.g_r = float(g_r)
self.radvel = float(radvel)
self.radvel_err = float(radvel_err)
self.radvel_num_transits=float(radvel_num_transits)
self.radvel_teff = float(radvel_teff)
self.radvel_feh = float(radvel_feh)
self.l = float(l)
self.b = float(b)
self.long = float(long)
self.lat = float(lat)
self.member = 0
self.binary = 0
self.radDist = 0
self.par_over_ra = float(par)/float(ra)
self.par_over_dec = float(par)/float(dec)
self.par_over_pmra = float(par)/float(pmra)
self.par_over_pmdec = float(par)/float(pmdec)
self.normRA = self.ra*np.cos(self.dec*np.pi/180)
self.vosaPoints = []
self.excess = 0
class isochroneObj:
def __init__(self,age=404,feh=404,afe=404,y=404,basedir='isochrones/',subdir='processed',isodir=''):
#Declare instance variables
self.basedir = basedir
self.subdir = subdir
self.isodir = isodir
self.starList = []
self.age = age
self.feh = feh
self.afe = afe
self.y = y
self.name = f"feh_{feh}_afe_{afe}_age_{age}_y_{y}"
self.distance = 0
self.coeff = []
self.g = []
self.br = []
class fakeStarObj:
def __init__(self,g_mag,b_mag,r_mag):
#Declare instance variables
self.g_mag = g_mag
self.b_mag = b_mag
self.r_mag = r_mag
self.b_r = self.b_mag-self.r_mag
self.b_g = self.b_mag-self.g_mag
self.g_r = self.g_mag-self.r_mag
self.score = 0
class mistStar:
def __init__(self,properties):
#Declare instance variables
for prop,val in properties:
if "inf" in str(val):
val = 50
exec(f"self.{prop} = {val}")
class condensedPoint:
def __init__(self,b_r,g_mag,weight):
self.b_r = b_r
self.g_mag = g_mag
self.weight = weight
class vosaPoint:
def __init__(self,filterID,wavelength,obs_flux,obs_error,flux,flux_error,excess):
self.filterID = filterID
self.wavelength = wavelength
self.obs_flux = obs_flux
self.obs_error = obs_error
self.flux = flux
self.flux_error = flux_error
self.excess = excess
class cataloguedCluster():
def __init__(self,name,clType,pmra_min,pmra_max,pmdec_min,pmdec_max,par_min,par_max,cltpx,cltpy,noise_cutoff):
#Catalogued properties
self.name = str(name)
self.clType = str(clType)
self.pmra_min = float(pmra_min)
self.pmra_max = float(pmra_max)
self.pmdec_min = float(pmdec_min)
self.pmdec_max = float(pmdec_max)
self.par_min = float(par_min)
self.par_max = float(par_max)
self.cltpx = float(cltpx)
self.cltpy = float(cltpy)
self.noise_cutoff = float(noise_cutoff)
class Datum:
from matplotlib import colors as mcolors
colorin = mcolors.to_rgba("red")
colorout = mcolors.to_rgba("blue")
def __init__(self, x, y, include=False):
self.x = x
self.y = y
if include:
self.color = self.colorin
else:
self.color = self.colorout
class LassoManager:
def __init__(self, ax, data, cluster):
from matplotlib.collections import RegularPolyCollection
self.axes = ax
self.canvas = ax.figure.canvas
self.data = data
self.cluster = cluster
self.Nxy = len(data)
facecolors = [d.color for d in data]
self.xys = [(d.x, d.y) for d in data]
self.collection = RegularPolyCollection(
6, sizes=(5,),
facecolors=facecolors,
offsets=self.xys,
transOffset=ax.transData)
ax.add_collection(self.collection)
self.cid = self.canvas.mpl_connect('button_press_event', self.on_press)
def callback(self, verts):
from matplotlib import path
global coords
global clusters
cluster = clusters[self.cluster.name]
facecolors = self.collection.get_facecolors()
p = path.Path(verts)
ind = p.contains_points(self.xys)
cluster.binaries = []
for i in range(len(self.xys)):
if ind[i]:
facecolors[i] = Datum.colorin
star = cluster.filtered[[a.b_r for a in cluster.filtered].index(self.xys[i][0])]
cluster.binaries.append(star)
else:
facecolors[i] = Datum.colorout
self.canvas.draw_idle()
self.canvas.widgetlock.release(self.lasso)
del self.lasso
def on_press(self, event):
from matplotlib.widgets import Lasso
if self.canvas.widgetlock.locked():
return
if event.inaxes is None:
return
self.lasso = Lasso(event.inaxes,
(event.xdata, event.ydata),
self.callback)
# acquire a lock on the widget drawing
self.canvas.widgetlock(self.lasso)
def clusterCatalogue(types='all'):
import numpy as np
import pandas as pd
global data
global catalogue
global cataIn
data = pd.read_csv("catalogue.csv",sep=',',dtype=str)
data = data.to_numpy(dtype=str)
cata = []
for row in data:
cata.append(cataloguedCluster(*row))
if types == 'all':
catalogue = cata
cataIn = True
return
def readClusters(cList=["M67"],basedir="clusters/",smRad=0.35):
#Imports
import numpy as np
import pandas as pd
global clusterList
global clusters
global stars
global clIn
global catalogue
try:
if clIn and len(clusterList) > 0:
for clname in cList:
if clname in clusters:
unloadClusters([clname])
except:
clusterList=[]
#Check the cluster catalogue to load the catalogued properties
if not cataIn:
clusterCatalogue()
#Loop through clusters
for clname in cList:
#Create cluster objects
cluster = clusterObj(name=clname,basedir=basedir)
reference = None
for cl in catalogue:
if str(cl.name) == str(clname):
reference = cl
print(f"Catalogue match for {clname} found")
break
if reference == None:
print(f"Catalogue match for {clname} was not found, please create one")
continue
#Filter all of the methods out of the properties list
properties = [a for a in dir(reference) if not a.startswith('_')]
print(properties)
#exec(f"print(reference.{properties[1]})")
#print(properties)
#Now we have a list of all the attributes assigned to the catalogue (the self.variables)
for p in properties:
prop = getattr(reference,p)
#print(prop)
exec(f"cluster.{p} = prop")
try:
if prop <= -98:
print(f"{clname} does not have a specified catalogue value for {p}")
except:
continue
# if cluster.name == 'NGC752' or cluster.name == 'NGC188':
# cluster.brightThreshold=18
# if "M67" in clname:
# cluster.type = "open"
# if "M35" in clname:
# cluster.type = "open"
# if "NGC188" in clname:
# cluster.type = "open"
# if "NGC752" in clname:
# cluster.type = "open"
# if "IC4651" in clname:
# cluster.type = "open"
# if "NGC2451" in clname:
# cluster.type = "open"
# if "AlphaPer" in clname:
# cluster.type = "open"
# if "M12" in clname:
# cluster.type = "globular"
# if "M3" in clname:
# cluster.type = "globular"
# if "M5" in clname:
# cluster.type = "globular"
# if "M15" in clname:
# cluster.type = "globular"
# if "M53" in clname:
# cluster.type = "globular"
# if "NGC6426" in clname:
# cluster.type = "globular"
# if "NGC6934" in clname:
# cluster.type = "globular"
"""
#Generate wide-field star list
starlist = np.genfromtxt(cluster.dataPath+"narrow.csv", delimiter=",", skip_header=1, usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17))
starlist = preFilter(starlist)
for s in starlist:
star = starObj(s[0],s[1],s[2],s[3],s[4],s[5],s[6],s[7],s[8],s[9],s[10],s[11],s[12],s[13],s[14],s[15],s[16],s[17])
cluster.unfilteredNarrow.append(star)
"""
#Generate narrow-field star list
starlist = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
stars = pd.read_csv(cluster.dataPath+"wide.csv",sep=',',dtype=str)
starlist = starlist.to_numpy(dtype=str)
#starlist = np.genfromtxt(cluster.dataPath+"wide.csv", delimiter=",", skip_header=1)
print(f"{clname} initial length: {len(starlist)}")
starlist = preFilter(starlist)
print(f"{clname} post-prefiltered length: {len(starlist)}")
ramean = np.mean([float(x) for x in starlist[:,1]])
decmean = np.mean([float(x) for x in starlist[:,3]])
for s in starlist:
star = starObj(*s)
cluster.unfilteredWide.append(star)
if np.less_equal(star.g_mag,cluster.brightThreshold):
cluster.unfilteredBright.append(star)
# if np.less_equal(np.sqrt(((star.ra-ramean)*np.cos(np.pi/180*star.dec))**2+(star.dec-decmean)**2),smRad):
# cluster.unfilteredNarrow.append(star)
clusterList.append(cluster)
calcStats(cluster,mode='narrow')
if not 'YSO' in clname:
rmOutliers()
clIn = True
toDict()
def pad(string, pads):
spl = string.split(',')
return '\n'.join([','.join(spl[i:i+pads]) for i in range(0,len(spl),pads)])
def readIso(basedir='isochrones/',subdir='MIST_raw/'):
#Important note: The ages are rounded to a few decimal places in the Gyr range
#This has the effect of making it such that a few dozen isochrones in the kyr range
#are overwritten because they all round to the same value. I found this to be an issue
#worth overlooking given that a cluster of that age hasn't been identified yet
#Imports
import os
import re
global isochrone_headers
global isoList
global isoIn
path = basedir + subdir
isoList = []
for fn in os.listdir(path):
#Read in file
main = open(path+fn).read()
main = main.split("\n")
#Relevant variables from headers
N_iso = int(main[7].split("=")[1])
index = 13
varList = re.sub("\s+", ",", main[5].strip()).split(",")
afe = varList[4]
feh = varList[3]
y = varList[1]
z = varList[2]
v_vcrit = varList[5]
#Column labels
#Replace any number of spaces with a single comma, then replace a few problematic phrases and split the list by commas
isochrone_headers = re.sub("\s+", ",", main[12].replace("2MASS","TwoMASS").replace("[Fe/H]","feh").strip()).split(",")[1:]
for idx in range(0,N_iso):
N_stars = int(re.sub("\s+", "," , main[index-3].split("=")[1]).split(",")[1])
#print(f"Iso = {idx} N_stars = {N_stars}")
#Populate a single isochrone
stars = []
for i in range(index,index+N_stars):
#Send the header and values to the mistStar object
#print(f"i = {i}")
values = [float(a) for a in re.sub("\s+", "," , main[i].strip()).split(",")]
properties = zip(isochrone_headers,values)
stars.append(mistStar(properties))
#Create the isochrone from the list of stars
age = round(10**values[1]/1e9,3)
iso = isochroneObj(age,feh,afe,y)
iso.starList = stars
iso.br = [star.Gaia_BP_EDR3-star.Gaia_RP_EDR3 for star in stars]
iso.g = [star.Gaia_G_EDR3 for star in stars]
isoList.append(iso)
index += N_stars + 5
isoIn = True
toDict()
def checkIsoDupes():
global isochrones
global isoList
names = []
for iso in isoList:
if iso.name in names:
print(iso.name)
else:
names.append(iso.name)
def processIso(basedir='isochrones/',subdir='raw/'):
#Imports
import os
import re
path = basedir + subdir
for fn in os.listdir(path):
main = open(path+fn).read()
part = main.split('\n\n\n')
part[0] = part[0].split('#----------------------------------------------------')[3].split('\n',1)[1]
for a in range(len(part)):
temp = part[a].split('#AGE=')[1].split(' EEPS=')[0]
age = temp.strip()
out = part[a].split('\n',2)[2]
out = re.sub("\s+", ",", out.strip())
out = pad(out,8)
filename = f"{basedir}processed/"+fn.split('.')[0]+'/'+age+".csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename,"w") as f:
f.write(out)
def readIsochrones(basedir='isochrones/',subdir='processed/'):
#Imports
import os
import numpy as np
global isoList
global isoIn
isoList=[]
for folder in os.listdir(basedir+subdir):
for fn in os.listdir(basedir+subdir+folder):
#Get the age and metallicities of the isochrones
ageStr = fn.split('.csv')[0]
fehStr = folder.split('feh')[1].split('afe')[0]
afeStr = folder.split('afe')[1].split('y')[0]
if 'y' in folder:
yStr = folder.split('y')[1]
else:
yStr = '0'
feh = float(fehStr[1]+fehStr[2])/10
afe = float(afeStr[1])/10
age = float(ageStr)
y = int(yStr)
if fehStr[0] == 'm':
feh = feh*-1
if afeStr[0] == 'm':
afe = afe*-1
#Debug
#print(f"folder:{folder} fn:{fn} fehStr:{fehStr} feh:{feh} afeStr:{afeStr} afe:{afe} ageStr:{ageStr} age:{age}")
#Create isochone object
iso = isochroneObj(age=age,feh=feh,afe=afe,y=y,basedir=basedir,subdir=subdir,isodir=folder+'/')
isoArr = np.genfromtxt(basedir+subdir+folder+"/"+fn, delimiter=",")
for s in isoArr:
star = fakeStarObj(s[5],s[6],s[7])
iso.starList.append(star)
iso.br.append(s[6]-s[7])
iso.g.append(s[5])
isoList.append(iso)
isoIn = True
toDict()
def preFilter(starList):
#Imports
import numpy as np
final = []
#Columns to be checked for NaN values. If an NaN is present in this column, the entry(star) is discarded from the "unfiltered" list
#2-12 is the astrometry
#42,45,48 are the g,bp,rp magnitudes
#50-52 are the color indices
cols = list(range(2,13))+[42]+[45]+[48]+list(range(50,53))
#Filters out NaN values except for the last two columns
for n,s in enumerate(starList):
dump = False
for c in cols:
if np.isnan(float(s[c])):
dump = True
if not dump:
final.append(starList[n])
#Reshapes array
final = np.array(final)
return final
def rmOutliers():
#Imports
global clusterList
import numpy as np
for cluster in clusterList:
if cluster.clType.lower() == "globular":
scale = 4
else:
scale = 1.5
#Variables
pmthreshold = 5
pmpthreshold = 50
parthreshold = 5
posthreshold = 5
toRemove=[]
#print(cluster.mean_pmra,cluster.mean_pmdec,cluster.stdev_pmra,cluster.stdev_pmdec)
#print(len(cluster.unfilteredWide))
#Classifies outliers
for star in cluster.unfilteredWide:
if cluster.name == "NGC188":
if star.ra > 100:
toRemove.append(star)
#print(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),star.pmra,star.pmdec)
if np.greater(np.sqrt(((star.pmra-cluster.mean_pmra)*np.cos(np.pi/180*star.pmdec))**2+(star.pmdec-cluster.mean_pmdec)**2),pmthreshold) or np.greater(np.sqrt(((star.ra-cluster.mean_ra)*np.cos(np.pi/180*star.dec))**2+(star.dec-cluster.mean_dec)**2),posthreshold) or np.greater(abs(star.par),parthreshold):
#if np.greater(np.sqrt((star.pmra-cluster.mean_pmra)**2+(star.pmdec-cluster.mean_pmdec)**2),threshold):
toRemove.append(star)
#Removes the outliers from the array
for rm in toRemove:
cluster.unfilteredWide.remove(rm)
try:
cluster.unfilteredNarrow.remove(rm)
except ValueError:
pass
#print(len(cluster.unfilteredWide))
def calcStats(cluster,mode='filtered'):
#Imports
import numpy as np
#Reads in all the values for a cluster
par=[]
par_err=[]
ra=[]
dec=[]
pmra=[]
pmdec=[]
gmag = []
br = []
# a_g=[]
# e_bp_rp=[]
loopList=[]
checkLoaded([cluster])
if type(cluster) == str:
cluster = clusters[cluster]
if mode == 'bright':
loopList = cluster.filteredBright
elif mode == 'narrow':
loopList = cluster.unfilteredNarrow
elif mode == 'filtered':
loopList = cluster.filtered
for star in loopList:
par.append(star.par)
par_err.append(star.par_err)
pmra.append(star.pmra)
pmdec.append(star.pmdec)
ra.append(star.ra)
dec.append(star.dec)
gmag.append(star.g_mag)
br.append(star.b_r)
# if not np.isnan(star.a_g) and not star.a_g == 0:
# a_g.append(star.a_g)
# if not np.isnan(star.e_bp_rp) and not star.e_bp_rp == 0:
# e_bp_rp.append(star.e_bp_rp)
#Calculate the statistics
cluster.mean_par = np.mean(par[:])
cluster.mean_ra = np.mean(ra[:])
cluster.mean_dec = np.mean(dec[:])
cluster.stdev_ra = np.std(ra[:])
cluster.stdev_dec = np.std(dec[:])
cluster.stdev_par = np.std(par[:])
cluster.mean_pmra = np.mean(pmra[:])
cluster.stdev_pmra = np.std(pmra[:])
cluster.mean_pmdec = np.mean(pmdec[:])
cluster.stdev_pmdec = np.std(pmdec[:])
# cluster.mean_a_g = np.mean(a_g[:])
# cluster.stdev_a_g = np.std(a_g[:])
# cluster.mean_e_bp_rp = np.mean(e_bp_rp[:])
# cluster.stdev_e_bp_rp = np.std(e_bp_rp[:])
cluster.mean_par_over_ra = np.mean([x/y for x,y in zip(par,ra)])
cluster.stdev_par_over_ra = np.std([x/y for x,y in zip(par,ra)])
cluster.mean_par_err = np.mean(par_err[:])
cluster.dist_mod = 5*np.log10(1000/cluster.mean_par)-5
for star in loopList:
star.radDist = np.sqrt((star.ra-cluster.mean_ra)**2+(star.dec-cluster.mean_dec)**2)
star.normRadDist = np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-cluster.mean_ra*np.cos(cluster.mean_dec*np.pi/180))**2+(star.dec-cluster.mean_dec)**2)
def saveClusters(cList):
#Imports
import dill
saveResults(cList)
#Creates a pickle file with all of the saved instances
for cl in cList:
cluster = clusters[cl]
#print(cluster.name,id(cluster))
with open(f"{cluster.dataPath}filtered.pk1", 'wb') as output:
dill.dump(cluster, output)
def saveIsochrones():
#Imports
import dill
global clusterList
#Creates a pickle file with all of the saved instances
for iso in isoList:
with open(f"{iso.basedir}pickled/{iso.name}.pk1", 'wb') as output:
dill.dump(iso, output)
def loadClusters(clusterNames=["M67"],basedir='clusters/'):
#Imports
import dill
global clusterList
global clusters
global clIn
for clusterName in clusterNames:
if clusterName in clusters:
unloadClusters([clusterName])
#Reads in instances from the saved pickle file
with open(f"{basedir}{clusterName}/data/filtered.pk1",'rb') as input:
cluster = dill.load(input)
clusterList.append(cluster)
clIn = True
toDict()
def loadIsochrones(basedir='isochrones/'):
#Imports
import dill
import os
global isoList
global isoIn
isoList=[]
for fn in os.listdir(basedir+"pickled/"):
#Reads in instances from the saved pickle file
with open(f"{basedir}pickled/{fn}",'rb') as input:
iso = dill.load(input)
isoList.append(iso)
isoIn = True
toDict()
def unloadClusters(cList=['all']):
#Imports
global clusterList
global clusters
if 'all' in cList:
cList = [cluster.name for cluster in clusterList]
for cl in cList:
cluster = clusters[cl]
clusterList.remove(cluster)
clusters.pop(cl)
del cluster
def dataProcess(cList,load=False,fit=True,unload=True,plotting=True,member=True,save=True,close=True):
#This method is largely intended for re-processing a bulk batch of clusters that have already been processed before,
#meaning they already have condensed point lists or you are already aware of their fitting quality
#Imports
import matplotlib.pyplot as plt
global clusterList
global clusters
global closePlots
if not isoIn:
loadIsochrones()
loadList = ["M15","M12","M39","M46","M67","NGC188","NGC2355","NGC2158","IC4651","NGC6791","NGC2360","NGC2204"]
for cl in cList:
if cl in loadList:
condensing = "load"
else:
condensing = "auto"
if load:
loadClusters([cl])
else:
readClusters([cl])
turboFilter([cl])
if close:
plt.close('all')
if fit:
turboFit([cl],condensing=condensing)
if plotting:
plot([cl],['pos','pm','cmd','quiver','iso'])
if close:
plt.close('all')
if member:
proxyMatch([cl])
boundedStats([cl],saveCl=False,unloadCl=False)
membership(cl,mode='filtered')
membership(cl,mode='bounded',N=75)
plt.close('all')
if save:
saveClusters([cl])
saveResults([cl])
if unload:
unloadClusters([cl])
def turboFilter(cl=["all"]):
#Imports
global clusterList
cList = checkLoaded(cl)
for clus in cList:
cluster = clusters[clus]
cluster.filteredBright,cluster.brightmag = pmFilter(cluster.unfilteredBright,cluster.name)
print(f"==========================={cluster.name}===========================")
print(f"bright unf/pm fil: {len(cluster.unfilteredBright)} / {len(cluster.filteredBright)}")
calcStats(cluster,mode='bright')
distFilter(cluster)
print(f"dist(all): {len(cluster.distFiltered)}")
cluster.filtered,cluster.mag = pmFilter(cluster.distFiltered,cluster.name)
#Manual filtering of extraneous points
cluster.filtered,cluster.mag = manualFilter(cluster)
print(f"pm(all): {len(cluster.filtered)}")
customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')
magnitude = cutNoise(cluster)
print(f"noise cutoff: mag {magnitude} length {len(cluster.filtered)}")
customPlot('b_r','g_mag',cluster.name,'filtered',iso=True,square=False,color='astro_sigma5d')
"""
for i in range(10):
print(f"{cluster.filtered[i].b_r} {cluster.mag[i,0]}")
"""
calcStats(cluster,mode='filtered')
setFlag()
def manualFilter(cluster):
#This exists to remove any points that may or may not be relevant to the cluster but are prohibiting the fit from happening
if "M35" in cluster.name:
filtered = [star for star in cluster.filtered if star.g_mag > 9 or star.b_r < 1]
return filtered,magList(filtered)
else:
return cluster.filtered,cluster.mag
def magList(filtered):
import numpy as np
mag = np.empty((0,2))
for star in filtered:
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
def pmFilter(starList,name):
#Imports
import numpy as np
filtered = []
mag = np.empty((0,2))
cluster = clusters[name]
assert cluster.name == name
#Apply an elliptical filter to the proper motion space
pmra_width = (cluster.pmra_max-cluster.pmra_min)/2
pmdec_width = (cluster.pmdec_max-cluster.pmdec_min)/2
pmra_center = cluster.pmra_min+pmra_width
pmdec_center = cluster.pmdec_min+pmdec_width
print(pmra_center,pmdec_center)
for star in starList:
if (star.pmra-pmra_center)**2/pmra_width**2 + (star.pmdec-pmdec_center)**2/pmdec_width**2 <= 1:
filtered.append(star)
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
assert len(filtered) > 1
print(len(filtered))
return filtered,mag
def distFilter(cluster):
#Imports
import numpy as np
if cluster.par_min == 0 or cluster.par_max == 0:
threshold = 1.5*cluster.mean_par
print(f"{cluster.name} filtered using mean parallax")
for star in cluster.unfilteredWide:
if not np.greater(np.abs(star.par-cluster.mean_par),threshold*cluster.stdev_par):
cluster.distFiltered.append(star)
else:
print(f"{cluster.name} filtered using min & max parallax values")
for star in cluster.unfilteredWide:
if star.par > cluster.par_min and star.par < cluster.par_max:
cluster.distFiltered.append(star)
def cutNoise(cluster):
#Imports
import numpy as np
stars = sorted(cluster.filtered,key=lambda x: x.g_mag)
new = []
newMag = np.empty((0,2))
if cluster.noise_cutoff <= -98:
threshold = 1
print(f"{cluster.name} noise cutoff undefined, using default")
else:
threshold = cluster.noise_cutoff
bad = 0
badCut = 5
for i,s in enumerate(stars):
if s.astro_sigma5d > threshold:
bad += 1
if bad >= badCut:
break
else:
new.append(s)
newMag = np.r_[newMag,[[s.b_r,s.g_mag]]]
cluster.filtered = new
cluster.mag = newMag
return s.g_mag
def turboFit(cl=["all"],condensing='auto',weighting='pos',tp="catalogue",minScore=0.001):
#Typical use cases are auto, pos, catalogue --OR-- manual, equal, catalogue
#Imports
import time
global clusterList
cList = checkLoaded(cl)
print("=========================Fitting=========================")
t0 = time.time()
status = condense(cList,condensing,weighting,tp,minScore)
if status == "Suspended":
return
for cluster in cList:
redFitting(cluster,minScore,weighting)
t1 = time.time()
print(f"Total {cluster.name} fit runtime: {t1-t0} seconds")
def redFitting(cluster,minScore,weighting):
#Imports
import numpy as np
import math
from sys import stdout
from time import sleep
global clusterList
if type(cluster) == str:
cluster = clusters[cluster]
cluster.iso = []
redMin = 0
redMax = 0.7
step = 0.05
redList = [round(x,2) for x in np.arange(redMin,redMax+step,step)]
for reddening in redList:
stdout.write(f"\rCurrent reddening value for {cluster.name}: {reddening:.2f} / ({redList[0]:.2f}->{redList[-1]:.2f})")
shapeFit(cluster,reddening,minScore,weighting)
stdout.flush()
sleep(0.1)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
best = float(cluster.iso[0][2])
print(f"\nCoarse-step reddening for {cluster.name}: {best}")
subMin = best - 0.05
subMax = best + 0.05
substep = 0.01
if subMin < 0:
subMin = 0
subList = [round(x,2) for x in np.arange(subMin,subMax+substep,substep) if not round(x,2) in redList and round(x,2) > subMin and round(x,2) < subMax]
for reddening in subList:
stdout.write(f"\rCurrent fine-step reddening value for {cluster.name}: {reddening:.2f} / ({subList[0]:.2f}->{subList[-1]:.2f})")
shapeFit(cluster,reddening,minScore,weighting)
stdout.flush()
sleep(0.1)
cluster.iso = sorted(cluster.iso,key=lambda x: x[1])
cluster.reddening = float(cluster.iso[0][2])
cluster.fit_age = float(isochrones[cluster.iso[0][0]].age)
cluster.fit_feh = float(isochrones[cluster.iso[0][0]].feh)
cluster.fit_afe = float(isochrones[cluster.iso[0][0]].afe)
cluster.fit_y = float(isochrones[cluster.iso[0][0]].y)
#Unrelated properties but I needed somewhere to assign them
setattr(cluster,'meanDist',1000/cluster.mean_par)
meanL = np.mean([a.l*np.pi/180 for a in cluster.filtered])
galDist = 8000 #pc
gd = cluster.meanDist**2 + galDist**2 - 2*cluster.meanDist*galDist*np.cos(meanL)
setattr(cluster,'meanGalacticDist',gd**0.5)
print(f"\nReddening for {cluster.name}: {best}")
def shapeFit(cluster,reddening,minScore,weighting):
#Imports
import numpy as np
import shapely.geometry as geom
global isoList
conversion = 2.1
isoFitList = np.empty((0,3))
for iso in isoList:
isoLine = geom.LineString(tuple(zip([x+reddening for x in iso.br],[x+cluster.dist_mod+conversion*reddening for x in iso.g])))
dist = []
for star in cluster.condensed:
starPt = geom.Point(star.b_r,star.g_mag)
#print(starPt.distance(isoLine))
pointDist = np.abs(starPt.distance(isoLine))*star.weight
if pointDist < minScore*star.weight:
pointDist = minScore*star.weight
dist.append(pointDist**2)
isoScore = np.sum(dist[:])
#print(isoScore,dist)
#print(list(geom.shape(isoLine).coords))
isoFitList = np.r_[isoFitList,[[iso.name,float(isoScore),float(reddening)]]]
#compareInstances(iso,cluster.iso[-1][0])
#print(isoScore)
cluster.iso.extend(isoFitList)
#best = cluster.iso[1][0]
#specificPlot(cluster.name,best.name,reddening)
#print(f"\nFirst point of best fit: {best.br[0]+reddening},{best.g[0]+conversion*reddening+cluster.dist_mod}")
def onclick(x,y,fig,ax,cluster,minScore,weighting,newList):
def func(event):
import matplotlib.pyplot as plt
global coords
ix, iy = event.xdata, event.ydata
if str(event.button) == "MouseButton.RIGHT":
for i,(cx,cy) in enumerate(coords):
if abs(ix-cx) <= 0.075 and abs(iy-cy) <= 0.25:
coords.pop(i)
ax.clear()
ax.scatter(x,y,s=0.5,color='dimgray')
ax.invert_yaxis()
ax.scatter([a[0] for a in coords],[a[1] for a in coords],c='red',s=10)
plt.gcf().canvas.draw_idle()
if str(event.button) == "MouseButton.LEFT":
coords.append((ix, iy))
ax.scatter(ix,iy,c='red',s=10)
plt.gcf().canvas.draw_idle()
if str(event.button) == "MouseButton.MIDDLE":
fig.canvas.mpl_disconnect(cid)
plt.close(fig)
updateCondensed(cluster,minScore,weighting,newList)
if len(coords) >= 100:
fig.canvas.mpl_disconnect(cid)
plt.close(fig)
updateCondensed(cluster,minScore,weighting,newList)
return
return func
def updateCondensed(cluster,minScore,weighting,newList):
#Imports
import numpy as np
global coords
condensed = []
for point in coords:
if cluster.clType.lower() == "globular" or weighting.lower() == "equal":
weight = 1
else:
#Automatic weighting scheme currently unsupported for manual condensed point definition,
#but the framework is here to be able to insert it without having to worry about it being
#passed around from function to function
weight = 1
condensed.append(condensedPoint(point[0],point[1],weight))
if cluster.reddening == 0:
cluster.condensed0 = condensed
cluster.condensed = condensed
np.savetxt(f"{cluster.dataPath}condensed.csv",coords,delimiter=',')
redFitting(cluster,minScore,weighting)
if len(newList) > 0:
turboFit(newList,'manual',weighting,'catalogue',minScore)
return
def find_nearest(array, value):
#Imports
import numpy as np
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def testCluster(name='feh_0.00_afe_0.00_age_0.141_y_0.2703'):
#Imports
import numpy as np
global clusterList
global clIn
iso = isochrones[name]
test = clusterObj('test')
filtered = [starObj('fake',0,1,0,1,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,a.Gaia_G_EDR3,0,0,0,0,0,0,0,a.Gaia_BP_EDR3-a.Gaia_RP_EDR3,0,0,0,0,0,0,0,0,0,0,0) for a in iso.starList]
test.filtered = filtered
mag = np.empty((0,2))
for star in test.filtered:
mag = np.r_[mag,[[star.b_r,star.g_mag]]]
test.mag = mag
if not 'test' in clusters:
clusterList.append(test)
else:
idx = clusterList.index(clusters['test'])
clusterList.pop(idx)
clusterList.append(test)
clIn = True
toDict()
def condense(cList,condensing,weighting,tp,minScore=0.001):
#Imports
import numpy as np
global isoList
global mag
for cluster in cList:
if type(cluster) == str:
cluster = clusters[cluster]
cList[cList.index(cluster.name)] = cluster
#Creates mag arrays to be used in place of the filtered star objects
mag = cluster.mag[:,:]
mag[mag[:,1].argsort()]
gmag = list(mag[:,1])
gmin = mag[0,1]
gmax = mag[-1,1]
div = 50
seg = (gmax-gmin)/div
minpoints = 1
#The array that will become the condensed points list
condensed = np.empty((0,3))
turnPoints = []
if condensing.lower() == "load":
global pts
pts = np.genfromtxt(f"{cluster.dataPath}condensed.csv",delimiter=',')
condensed = []
for point in pts:
#Missing alternate weighting schemes, but can be imlemented *here*
condensed.append(condensedPoint(point[0],point[1],1))
cluster.condensed = condensed
cluster.condensed0 = condensed
continue
#Manual point definition
if condensing.lower() == "manual":
import matplotlib.pyplot as plt
global cid
global coords
coords = []
if len(cList) == 1:
newList = []
else:
newList = cList[cList.index(cluster)+1:]
x,y = mag[:,0],mag[:,1]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x,y,s=0.25,color='dimgray')
ax.invert_yaxis()
hook = onclick(x,y,fig,ax,cluster,minScore,weighting,newList)
cid = fig.canvas.mpl_connect('button_press_event', hook)
return "Suspended"
#Vertically stacked slices in brightness
for i in range(div):
sliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
#print(np.array(sliced).shape)
#Skip forseen problems with empty arrays
if len(sliced) < minpoints:
continue
condensed = np.r_[condensed,[[np.median(sliced[:,0]),np.median(sliced[:,1]),0]]]
condensed = condensed[::-1]
#Uses defined turning points in the cluster catalogue
if tp.lower() == "catalogue":
if cluster.cltpx <= -98 and cluster.cltpy <= -98:
tp == "auto"
#If no turning point is found, or auto is specified, then this section of code
#attempts to find the turning point through steep gradient changes in the main sequence
if tp.lower() == "auto":
#Criteria for the line that forms the basis of the gradient change method
start = 4
end = 11
theta_crit = 5
#Creates a slope-intercept fit for the lower main sequence
basex = [a[0] for a in condensed[start:end]]
basey = [a[1] for a in condensed[start:end]]
base = np.polyfit(basex,basey,1)
#Travels up the main sequence
for i,point in enumerate(condensed):
if i == start:
continue
#Creates a fit line between the start point and the current point
x = [point[0],condensed[start,0]]
y = [point[1],condensed[start,1]]
lin = np.polyfit(x,y,1)
#Calculates an angle between the new line and the lower main sequence
point[2] = 180/np.pi*np.arctan(abs( (base[0]-lin[0])/(1+base[0]*lin[0]) ))
#If the angle between the two lines is large enough, the point is considered
#to be a candidate turning point, and is appended to the list of candidates
if point[2] > theta_crit and i > end:
turnPoints.append(point)
#Analysis plot showing the theta value for each condensed point
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(condensed[:,0],condensed[:,1],c=condensed[:,2])
plt.set_cmap('brg')
plt.gca().invert_yaxis()
clb = plt.colorbar()
clb.ax.set_title("Theta")
plt.savefig(f'condensed_{cluster.name}')
#If no automatic turning point is found, ends the method here
if len(turnPoints) == 0:
print("No turning point identified for {cluster.name}")
return
else:
#Identifies the proper turning point as a 5% color offset of the dimmest turning point candidate
turnPoints = sorted(turnPoints,key=lambda x: x[1])
tp = turnPoints[-1]
tp[0] = tp[0] - 0.05*np.abs(tp[0])
cluster.turnPoint = tp
#Stores the condensed point list
cl = []
for point in condensed:
cl.append(condensedPoint(point[0],point[1],point[2]))
cluster.condensedInit = cl
# [ B-R , G , Theta ]
print(f"{cluster.name} Turning Point: {cluster.turnPoint}")
#Assuming the undefined catch for manual would be caught the first time around
if tp.lower() == "catalogue":
cluster.turnPoint = [cluster.cltpx,cluster.cltpy]
if cluster.clType.lower() == "open":
#Recalc with the turnPoint limit enforced - Ignore blue stragglers
condensed = np.empty((0,3))
condensed_giant = np.empty((0,3))
yList = []
#Vertically stacked slices in brightness
for i in range(div):
rawSliced = mag[gmag.index(find_nearest(gmag,gmin+i*seg)):gmag.index(find_nearest(gmag,gmin+(i+1)*seg))]
sliced = np.empty((0,2))
sliced_giant = np.empty((0,2))
for point in rawSliced:
#print(point)
if point[0] >= cluster.turnPoint[0]:
sliced = np.r_[sliced,[[point[0],point[1]]]]
else:
sliced_giant = np.r_[sliced_giant,[[point[0],point[1]]]]
#Skip forseen problems with empty arrays
if len(sliced) > 0:
x = np.median(sliced[:,0])
y = np.median(sliced[:,1])
yList.append(y)
condensed = np.r_[condensed,[[x,y,1]]]
if len(sliced_giant) > 3:
xg = np.median(sliced_giant[:,0])
yg = np.median(sliced_giant[:,1])
condensed_giant = np.r_[condensed_giant,[[xg,yg,1]]]
#New turning point found from the reduced data set
newTP = find_nearest(yList,cluster.turnPoint[1])
index = 0
for i,point in enumerate(condensed):
if newTP == point[1]:
index = i
#print(f"{point} found to be TP")
break
assert not index == 0
#Binary star list
tpcut = index + 3
xset = condensed[tpcut:-1,0]
yset = condensed[tpcut:-1,1]
#print(cluster.name,yset)
fit = np.polyfit(xset,yset,1)
#Distance from the main sequence linear fit
for star in cluster.filtered:
x0 = star.b_r
y0 = star.g_mag
dist = abs( y0 - fit[0]*x0 - fit[1] ) / np.sqrt(fit[0]**2 + 1)
star.distance_MS = dist
if dist > 0.05 and y0 < fit[0]*x0+fit[1] and x0 > xset[0] and y0 > condensed[index,1]:
cluster.binaries.append(star)
star.binary = 1
else:
star.binary = 0
#Fit weight parameters
N = len(condensed)
beta = -2
index = index - 7
for i,point in enumerate(condensed):
#point[2] = 5/(1+np.abs(index-i))
if weighting.lower() == 'pos':
point[2] = np.exp(beta*((i-index)/N)**2)
# if cluster.type == "globular":
# condensed = np.vstack((condensed,condensed_giant))
condensed = condensed[::-1]
cl = []
coords = []
for point in condensed:
cl.append(condensedPoint(point[0],point[1],point[2]))
coords.append((point[0],point[1]))
np.savetxt(f"{cluster.dataPath}condensed.csv",coords,delimiter=',')
if cluster.reddening == 0:
cluster.condensed0 = cl
cluster.condensed = cl
# def checkLoaded(cList):
# needsLoading = []
# loaded = []
# for cl in cList:
# if not cl in clusters:
# needsLoading.append(cl)
# else:
# loaded.append(cl)
# return loaded,needsLoading()
def toDict():
#Imports
global clusterList
global clusters
global isoList
global isochrones
global resultList
global results
global clIn
global isoIn
global resultsIn
if clIn:
clName = []
for cluster in clusterList:
clName.append(cluster.name)
clusters = dict(zip(clName,clusterList))
if isoIn:
isoName = []
for iso in isoList:
isoName.append(iso.name)
isochrones = dict(zip(isoName,isoList))
if resultsIn:
resName=[]
for res in resultList:
resName.append(res.name)
results = dict(zip(resName,resultList))
def plot(cList=['all'],modes=['pos','pm','cmd','quiver','iso'],closePlots=False):
#Imports
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import numpy as np
import os
global clusterList
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}/png"):
os.mkdir(f"{cluster.imgPath}/png")
#Position plots
if 'pos' in modes:
unfra=[star.ra for star in cluster.unfilteredWide]
unfdec=[star.dec for star in cluster.unfilteredWide]
ra=[star.ra for star in cluster.filtered]
dec=[star.dec for star in cluster.filtered]
unfnormra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide]
normra=[star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered]
#Unfiltered position plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unfra[:],unfdec[:],s=0.5,c='dimgray')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered.png",dpi=500)
#Filtered position plot
plt.figure(f"{cluster.name}_ra_dec_filtered")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Filtered")
plt.scatter(ra[:],dec[:],s=0.5,c='midnightblue')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered.png",dpi=500)
#Position overlay
plt.figure(f"{cluster.name}_ra_dec_overlay")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unfra[:],unfdec[:],s=0.5,c='lightgray')
plt.scatter(ra[:],dec[:],s=1,c='midnightblue')
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay.png",dpi=500)
#Normalized
#NormRA = RA*cos(DEC)
#Unfiltered normalized position plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered Normalized")
plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='dimgray')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_normalized.png",dpi=500)
#Filtered normalized position plot
plt.figure(f"{cluster.name}_ra_dec_filtered_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Filtered Normalized")
plt.scatter(normra[:],dec[:],s=0.5,c='midnightblue')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_filtered_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_filtered_normalized.png",dpi=500)
#Position overlay normalized
plt.figure(f"{cluster.name}_ra_dec_overlay_normalized")
plt.xlabel('RA*cos(DEC) (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Overlay Normalized")
plt.scatter(unfnormra[:],unfdec[:],s=0.5,c='lightgray')
plt.scatter(normra[:],dec[:],s=1,c='midnightblue')
#plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_overlay_normalized.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_overlay_normalized.png",dpi=500)
#Proper motion plots
if 'pm' in modes:
unfpmra=[star.pmra for star in cluster.unfilteredWide]
unfpmdec=[star.pmdec for star in cluster.unfilteredWide]
pmra=[star.pmra for star in cluster.filtered]
pmdec=[star.pmdec for star in cluster.filtered]
unfpara=[star.par for star in cluster.unfilteredWide]
para=[star.par for star in cluster.filtered]
x0 = cluster.pmra_min
x1 = cluster.pmra_max
y0 = cluster.pmdec_min
y1 = cluster.pmdec_max
width = x1-x0
scale = 5
subscale = 2
xmin = x0-scale*width
xmax = x1+scale*width
ymin = y0-scale*width
ymax = y1+scale*width
sxmin = x0-subscale*width
sxmax = x1+subscale*width
symin = y0-subscale*width
symax = y1+subscale*width
#Unfiltered proper motion plot
plt.figure(f"{cluster.name}_pm_unfiltered")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_unfiltered.png",dpi=500)
plt.xlim([sxmin,sxmax])
plt.ylim([symin,symax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_unfiltered_closeup.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_unfiltered_closeup.png",dpi=500)
#Filtered proper motion plot
plt.figure(f"{cluster.name}_pm_filtered")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Filtered")
plt.scatter(pmra[:],pmdec[:],s=0.5,c='midnightblue')
# plt.xlim([xmin,xmax])
# plt.ylim([ymin,ymax])
plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_filtered.png",dpi=500)
#Proper motion overlay
plt.figure(f"{cluster.name}_pm_overlay")
plt.xlabel(r'PMRA ($mas*yr^{-1}$)')
plt.ylabel(r'PMDEC ($mas*yr^{-1}$)')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unfpmra[:],unfpmdec[:],s=0.5,c='lightgray')
plt.scatter(pmra[:],pmdec[:],s=1,c='midnightblue')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_overlay.png",dpi=500)
plt.xlim([sxmin,sxmax])
plt.ylim([symin,symax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_overlay_closeup.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_overlay_closeup.png",dpi=500)
#Unfiltered PM/Parallax
plt.figure(f"{cluster.name}_pm_over_parallax_unfiltered")
plt.xlabel('PMRA / Parallax')
plt.ylabel('PMDEC / Parallax')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter([a/b for a,b in zip(unfpmra,unfpara)],[a/b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_over_parallax_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_over_parallax_unfiltered.png",dpi=500)
#Unfiltered PM*Parallax
plt.figure(f"{cluster.name}_pm_times_parallax_unfiltered")
plt.xlabel('PMRA * Parallax')
plt.ylabel('PMDEC * Parallax')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter([a*b for a,b in zip(unfpmra,unfpara)],[a*b for a,b in zip(unfpmdec,unfpara)],s=0.5,c='dimgray')
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
# plt.axis("square")
plt.savefig(f"{cluster.imgPath}{cluster.name}_pm_times_parallax_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_pm_times_parallax_unfiltered.png",dpi=500)
#CMD plots
if 'cmd' in modes:
unfgmag=[star.g_mag for star in cluster.unfilteredWide]
unf_b_r=[star.b_r for star in cluster.unfilteredWide]
gmag=[star.g_mag for star in cluster.filtered]
b_r=[star.b_r for star in cluster.filtered]
bright_b_r = [x.b_r for x in cluster.filteredBright]
bright_gmag = [x.g_mag for x in cluster.filteredBright]
par_b_r = [x.b_r for x in cluster.distFiltered]
par_gmag = [x.g_mag for x in cluster.distFiltered]
#Reddening Correction
plt.figure(f"{cluster.name}_reddening_CMD")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('G Mag')
plt.title(f"{cluster.name} Reddening = {cluster.reddening:.2f}")
plt.scatter(b_r[:],gmag[:],s=0.5,c='dimgray',label='Observed')
plt.arrow(b_r[int(len(b_r)/2)]-cluster.reddening,gmag[int(len(gmag)/2)]-2.1*cluster.reddening,cluster.reddening,2.1*cluster.reddening,color='red')
plt.scatter([s-cluster.reddening for s in b_r[:]],[s-2.1*cluster.reddening for s in gmag[:]],s=1,c='midnightblue',label='Corrected')
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_reddening_CMD.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_reddening_CMD.png",dpi=500)
#Unfiltered CMD plot
plt.figure(f"{cluster.name}_CMD_unfiltered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Unfiltered")
plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_unfiltered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_unfiltered.png",dpi=500)
#Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Parallax & Proper Motion Filtered")
plt.scatter(b_r[:],gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_filtered.png",dpi=500)
#CMD overlay
plt.figure(f"{cluster.name}_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Overlay")
plt.scatter(unf_b_r[:],unfgmag[:],s=0.5,c='dimgray')
plt.scatter(b_r[:],gmag[:],s=1,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_overlay.png",dpi=500)
#Condensed CMD overlay
plt.figure(f"{cluster.name}_condensed_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Condensed Overlay")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c='red',label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_condensed_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_condensed_CMD_overlay.png",dpi=500)
#Weighted CMD overlay
plt.figure(f"{cluster.name}_weighted_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Weighted Overlay")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening for s in gmag],s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title("Weight")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_weighted_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_weighted_CMD_overlay.png",dpi=500)
#Initial Condensed CMD overlay
plt.figure(f"{cluster.name}_initial_condensed_CMD_overlay")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Initial Condensed Overlay")
plt.scatter(b_r,gmag,s=0.5,c='dimgray',label='Data')
plt.scatter([s.b_r for s in cluster.condensedInit],[s.g_mag for s in cluster.condensedInit],s=5,c='red',label='Proxy Points')
try:
plt.axvline(x=cluster.turnPoint[0] - cluster.reddening,linestyle='--',color='midnightblue',linewidth=0.8,label='95% of Turning Point')
except:
print(f"No turning point found for {cluster.name}")
plt.legend()
plt.savefig(f"{cluster.imgPath}{cluster.name}_initial_condensed_CMD_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_initial_condensed_CMD_overlay.png",dpi=500)
#Brightness-PM Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_bright_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Bright-Only Proper Motion Filtered")
plt.scatter(bright_b_r[:],bright_gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_bright_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_bright_filtered.png",dpi=500)
#Parallax Filtered CMD plot
plt.figure(f"{cluster.name}_CMD_parallax_filtered")
plt.gca().invert_yaxis()
plt.xlabel('BP-RP')
plt.ylabel('Apparent G Mag')
plt.title(f"{cluster.name} Parallax Filtered")
plt.scatter(par_b_r[:],par_gmag[:],s=0.5,c='midnightblue')
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_parallax_filtered.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_parallax_filtered.png",dpi=500)
if 'quiver' in modes:
unfra=[star.ra for star in cluster.unfilteredWide]
unfdec=[star.dec for star in cluster.unfilteredWide]
unfpmra=[star.pmra for star in cluster.unfilteredWide]
unfpmdec=[star.pmdec for star in cluster.unfilteredWide]
x0 = min([s.ra for s in cluster.filtered])
x1 = max([s.ra for s in cluster.filtered])
y0 = min([s.dec for s in cluster.filtered])
y1 = max([s.dec for s in cluster.filtered])
width = x1-x0
scale = 0.25
xmin = x0+scale*width
xmax = x1-scale*width
ymin = y0+scale*width
ymax = y1-scale*width
#Unfiltered position quiver plot
plt.figure(f"{cluster.name}_ra_dec_unfiltered_quiver")
plt.xlabel('RA (Deg)')
plt.ylabel('DEC (Deg)')
plt.title(f"{cluster.name} Unfiltered")
ax = plt.gca()
ax.quiver(unfra[:],unfdec[:],unfpmra[:],unfpmdec[:],color='midnightblue',width=0.003,scale=400,scale_units='width')
plt.axis("square")
plt.gcf().set_size_inches(10,10)
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver.png",dpi=500)
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.savefig(f"{cluster.imgPath}{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_ra_dec_unfiltered_pm_quiver_zoom.png",dpi=500)
#Isochrone plots
if 'iso' in modes:
gmag=[star.g_mag for star in cluster.filtered]
b_r=[star.b_r for star in cluster.filtered]
isochrone = isochrones[cluster.iso[0][0]]
#Isochrone best fit
plt.figure(f"{cluster.name}_Iso_best")
plt.gca().invert_yaxis()
plt.xlabel('Dereddened BP-RP')
plt.ylabel('Corrected Absolute G Mag')
plt.title(f"{cluster.name} Isochrone Best Fit")
plt.scatter([s - cluster.reddening for s in b_r],[s - 2.1*cluster.reddening-cluster.dist_mod for s in gmag],s=0.5,c='dimgray',label='Cluster')
isoLabels = isochrone.name.split('_')
isoLabel = r"$[\frac{Fe}{H}]$" + "=" + isoLabels[1] + "\n" \
+ r"$[\frac{\alpha}{Fe}]$" + "=" + isoLabels[3] + "\n" \
+ r"$[Y]$" + "=" + isoLabels[7] + "\n" \
+ "Age" + "=" + isoLabels[5] + " Gyr"
plt.plot(isochrone.br,isochrone.g,c='midnightblue',label=isoLabel)
plt.scatter([s.b_r - cluster.reddening for s in cluster.condensed],[s.g_mag - 2.1*cluster.reddening-cluster.dist_mod for s in cluster.condensed],s=5,c='red',label='Cluster Proxy')
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
h,l = plt.gca().get_legend_handles_labels()
h.insert(0,extra)
l.insert(0,f"Reddening: {cluster.reddening}")
plt.legend(h,l)
plt.savefig(f"{cluster.imgPath}{cluster.name}_CMD_Iso_BestFit.pdf")
plt.savefig(f"{cluster.imgPath}png/{cluster.name}_CMD_Iso_BestFit.png",dpi=500)
#Membership plots
if 'membership' in modes:
proxyMatch([cl])
boundedStats([cl],saveCl=False,unloadCl=False)
membership(cl,mode='filtered')
membership(cl,mode='bounded',N=50)
#3D Position plots
if '3D' in modes:
A = [a.ra * np.pi/180 for a in cluster.filtered]
B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
C = [1/(1000*c.par) for c in cluster.filtered]
x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
z = [c*np.sin(b) for b,c in zip(B,C)]
r = [np.sqrt(a**2+b**2) for a,b in zip(x,y)]
theta = [np.arctan(b/a) for a,b in zip(x,y)]
plt.figure(f"{cluster.name}_3D_Position")
ax = plt.axes(projection='3d')
ax.scatter3D(x,y,z)
ax.scatter(0,0,0,color='red')
scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
if closePlots:
plt.close('all')
# def Plot3D(cList):
# #Imports
# import matplotlib.pyplot as plt
# import numpy as np
# global clusterList
# needsLoading=[]
# plt.figure(f"3D_Position_Ensemble")
# ax = plt.axes(projection='3d')
# for cl in cList:
# if not cl in clusters:
# needsLoading.append(cl)
# if not len(needsLoading) == 0:
# loadClusters(needsLoading)
# for cl in cList:
# cluster = clusters[cl]
# A = [a.ra * np.pi/180 for a in cluster.filtered]
# B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
# C = [1/(0.001*c.par) for c in cluster.filtered]
# #Flatten radially
# C = [np.mean(C)]*len(C)
# x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
# y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
# z = [c*np.sin(b) for b,c in zip(B,C)]
# #Force Cluster to origin
# # x = [a-np.mean(x) for a in x]
# # y = [a-np.mean(y) for a in y]
# # z = [a-np.mean(z) for a in z]
# ax.scatter3D(x,y,z,label=cluster.name)
# scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
# ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
# #ax.scatter(0,0,0,color='black')
# plt.legend()
def yso_lookup():
#Imports
from astroquery.simbad import Simbad
import numpy as np
import os
import re
global names
global sect
global results
global ra
global dec
main = open("Excess Examples/YSO_object_list.dat").read()
main = main.split("\n")[:-1]
#Get the names of all of the objects identified
names = []
ra = []
dec = []
validNames = []
for row in main:
sect = re.split('\s+',row)
if sect[0] == '':
sect = sect[1:]
if sect[2] == 'none':
continue
name = sect[2]
blacklist = ['A','Ab','AB','ABC','B','AaB']
for entry in sect[3:]:
if '.' in entry or entry in blacklist:
break
name = name + " " + entry
names.append(name)
#Perform a SIMBAD query for the identified objects
results = []
for name in names:
result = Simbad.query_object(name)
if not type(result) == type(None):
results.append(result)
validNames.append(name.replace(' ',''))
ra1 = str(result.columns['RA']).split('\n')[-1]
ra1 = re.split('\s+',ra1)
if '' in ra1:
ra.append('---')
else:
ra.append(str(round(float(ra1[0])*15+float(ra1[1])/4+float(ra1[2])/240,5)))
dec1 = str(result.columns['DEC']).split('\n')[-1]
dec1 = re.split('\s+',dec1)
if '' in dec1:
dec.append('---')
else:
dec.append(str(round(float(dec1[0])+float(dec1[1])/60+float(dec1[2])/3600,5)))
#Create a text file in the VOSA readable format
VOSAdata = []
gaiadata = []
for i in range(len(validNames)):
line1 = f"{validNames[i]} {ra[i]} {dec[i]} --- --- --- --- --- --- ---"
line2 = f"{ra[i]} {dec[i]}"
VOSAdata.append(line1)
if '-' in line2:
continue
gaiadata.append(line2)
np.savetxt("Excess Examples/yso_vosa_output.txt",VOSAdata,fmt="%s")
np.savetxt("Excess Examples/yso_gaia_output.txt",gaiadata,fmt="%s")
def exportVOSA(cl):
#Imports
import numpy as np
if not cl in clusters:
loadClusters([cl])
cluster = clusters[cl]
#objname RA DEC DIS Av Filter Flux Error PntOpts ObjOpts
data = []
for star in cluster.filtered:
name = star.name.replace(" ","")
line = f"{name} {star.ra} {star.dec} {1000/star.par} --- --- --- --- --- ---"
data.append(line)
np.savetxt(f"{cluster.dataPath}{cluster.name}_VOSA.txt",data,fmt="%s")
def readSED(cList=['all'],printMissing=False):
#imports
import numpy as np
import re
import os
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
objPath = cluster.dataPath + "vosa_results/objects/"
names = []
for star in cluster.filtered:
flat = star.name.replace(" ","").replace("DR2","").replace("EDR3","").replace("DR3","")
names.append(flat)
star.flatName = flat
cluster.stars = dict(zip(names,cluster.filtered))
idx = 0
newStars = dict()
#Each star in a cluster has its own folder, and each folder contains several data sets
for folder in os.listdir(objPath):
fileName = folder.replace("DR2","").replace("EDR3","").replace("DR3","")
#Weed out VOSA stars not in current filtered members list
if not fileName in cluster.stars:
if printMissing:
print(f"{fileName} is missing from filtered list, skipping it...")
continue
main = open(objPath+folder+"/sed/"+folder+".sed.dat").read()
main = main.split("\n")
data = main[10:-1]
#Create a list of measurement object pointers to attach to the stars later
measurements = []
#Convert every line of the data set into a vosaPoint object
for row in data:
sect = re.split('\s+',row)[1:-1]
measurements.append(vosaPoint(str(sect[0]),float(sect[1]),float(sect[2]),float(sect[3]),float(sect[4]),float(sect[5]),float(sect[6])))
cluster.stars[fileName].vosaPoints = measurements
#Weed out cluster.stars members who do not have a vosa table
newStars[fileName] = cluster.stars[fileName]
idx += 1
cluster.stars = newStars
def checkBinary(cl):
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
cluster = clusters[cl]
global lman
data = [Datum(star.b_r,star.g_mag) for star in cluster.filtered]
# ax = plt.axes(xlim=(cluster.min_b_r-0.25,cluster.max_b_r+0.25), ylim=(cluster.min_g_mag-1,cluster.max_g_mag+1),autoscale_on=False)
ax = plt.axes(xlim=(0, 2.5), ylim=(8, 20), autoscale_on=False)
ax.invert_yaxis()
ax.set_title('Lasso points using left mouse button')
lman = LassoManager(ax, data,cluster)
plt.show()
def vosaBinaries(cl):
#Imports
import numpy as np
import matplotlib.pyplot as plt
import os
checkLoaded([cl])
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}vosaBinaries/"):
os.mkdir(f"{cluster.imgPath}vosaBinaries/")
for star in cluster.stars.values():
if not star.binary == 1:
return
def excessIR(cl,plot=True):
#Imports
import numpy as np
import matplotlib.pyplot as plt
import os
checkLoaded([cl])
cluster = clusters[cl]
if not os.path.isdir(f"{cluster.imgPath}excessIR/"):
os.mkdir(f"{cluster.imgPath}excessIR/")
for star in cluster.stars.values():
excess = False
for vp in star.vosaPoints:
if vp.excess > 0:
excess = True
if excess:
#print(f"{star.name} has {len(star.vosaPoints)} VOSA points")
star.hasExcess = 1
if plot:
plt.figure(f'{cluster.name} - {star.name}')
plt.title(f'{cluster.name} : {star.name}')
ax = plt.gca()
ax.set_yscale('log')
ax.set_xscale('log')
plt.ylabel(r'Flux ($ergs^{-1}cm^{-2}\AA^{-1}$)')
plt.xlabel(r'Wavelength ($\AA$)')
plt.scatter([a.wavelength for a in star.vosaPoints],[a.flux for a in star.vosaPoints])
plt.savefig(f"{cluster.imgPath}excessIR/{star.name}.pdf")
plt.savefig(f"{cluster.imgPath}excessIR/{star.name}.png",dpi=500)
def proxyMatch(cList,plot=False):
#Imports
import matplotlib.pyplot as plt
import numpy as np
checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
iso = isochrones[cluster.iso[0][0]]
isoPoints = []
for pt in iso.starList:
isoPoints.append(pt)
# if pt.Gaia_G_EDR3+cluster.dist_mod > cluster.turnPoint[1]:
# isoPoints.append(pt)
for star in cluster.filtered:
minDist = 0.2
smallestDist = 10
vertCutoff = 1
minPoint = None
for point in isoPoints:
dist = abs(point.Gaia_BP_EDR3-point.Gaia_RP_EDR3-star.b_r+cluster.reddening)
if dist < minDist:
if abs(point.Gaia_G_EDR3+cluster.dist_mod - star.g_mag + 2.1*cluster.reddening) < vertCutoff:
minDist = dist
minPoint = point
elif dist < smallestDist:
smallestDist = dist
try:
assert minDist < 0.2
except:
print(f"[{cluster.name}] Star too distant from isochrone to make a good proxy: BP-RP: {star.b_r} | G: {star.g_mag} | Dist: {smallestDist}")
star.proxyMass = 0
star.proxyLogTemp = 0
star.proxyFeH = 0
star.proxyLogAge = 0
star.proxy = None
continue
#print(minDist)
star.proxyMass = minPoint.star_mass
star.proxyLogTemp = minPoint.log_Teff
star.proxyFeH = minPoint.feh
star.proxyLogAge = minPoint.log10_isochrone_age_yr
star.proxy = minPoint
cluster.massLoaded = True
cluster.meanProxyMass = np.mean([a.proxyMass for a in cluster.filtered])
cluster.totalProxyMass = np.sum([a.proxyMass for a in cluster.filtered])
cluster.min_g_mag = min([a.g_mag for a in cluster.filtered])
cluster.max_g_mag = max([a.g_mag for a in cluster.filtered])
cluster.min_b_r = min([a.b_r for a in cluster.filtered])
cluster.max_b_r = max([a.b_r for a in cluster.filtered])
# if plot:
# plt.figure(f"{cluster.name}_proxy_fit")
def variableHistogram(cl,var):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
cluster = clusters[cl]
plt.figure()
plt.title(f"{cluster.name} Histogram of {var}")
plt.xlabel(f"{var}")
plt.ylabel("Count")
plt.hist([eval(f"a.{var}") for a in cluster.filtered],bins='auto')
def varHist2D(cl,var1,var2,color='default',listType='filtered'):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([cl])
#Check allowed entries
allowedTypes = ['filtered','unfilteredWide','unfilteredBright,filteredBright,binaries']
if not listType in allowedTypes:
print(f"{listType} is not a valid list type, defaulting to filtered")
listType = "filtered"
cluster = clusters[cl]
plt.figure(figsize=(8,8))
#Axis size and spacing
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
ax_scatter = plt.axes(rect_scatter)
ax_scatter.tick_params(direction='in', top=True, right=True)
ax_histx = plt.axes(rect_histx)
ax_histx.tick_params(direction='in', labelbottom=False)
ax_histy = plt.axes(rect_histy)
ax_histy.tick_params(direction='in', labelleft=False)
x = [eval(f"a.{var1}") for a in eval(f"cluster.{listType}")]
y = [eval(f"a.{var2}") for a in eval(f"cluster.{listType}")]
if color == 'default':
ax_scatter.scatter(x, y, s=5)
else:
colorMap = plt.get_cmap('coolwarm')#.reversed()
ax_scatter.scatter(x, y, s=5, c=[eval(f"a.{color}") for a in eval(f"cluster.{listType}")], cmap = colorMap)
# clb = plt.colorbar(ax_scatter)
# clb.ax.set_title(f"{color}")
ax_histx.hist(x,bins='auto')
ax_histy.hist(y,bins='auto',orientation='horizontal')
ax_histx.set_title(f"Histogram of {listType} {cluster.name} in {var1} and {var2}")
ax_scatter.set_xlabel(f"{var1}")
ax_scatter.set_ylabel(f"{var2}")
def Plot3D(cList=['all'],showEarth=True,flatten=True):
#Imports
import plotly.express as px
import plotly.io as pio
import numpy as np
global clusterList
pio.renderers.default='browser'
fig = px.scatter_3d()
if showEarth:
fig.add_scatter3d(x=[0],y=[0],z=[0],marker=dict(color='lightblue'),name="Earth")
cList = checkLoaded(cList)
big = []
for cl in cList:
cluster = clusters[cl]
A = [a.ra * np.pi/180 for a in cluster.filtered]
B = [abs(b.dec) * np.pi/180 for b in cluster.filtered]
C = [1/(0.001*c.par) for c in cluster.filtered]
#Flatten radially
if flatten:
C = [np.mean(C)]*len(C)
x = [c*np.cos(b)*np.cos(a) for a,b,c in zip(A,B,C)]
y = [c*np.cos(b)*np.sin(a) for a,b,c in zip(A,B,C)]
z = [c*np.sin(b) for b,c in zip(B,C)]
#Force Cluster to origin
# x = [a-np.mean(x) for a in x]
# y = [a-np.mean(y) for a in y]
# z = [a-np.mean(z) for a in z]
fig.add_scatter3d(x=x,y=y,z=z,name=cl,mode="markers",marker=dict(size=2))
big.append(np.amax(x))
big.append(np.amax(y))
big.append(np.amax(z))
#fig.layout.scene = dict(aspectmode="manual",aspectratio=dict(x=1,y=1,z=1))
#fig.update_layout(scene=dict(aspectmode="cube",xaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),yaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)]),zaxis=dict(showbackground=False,range=[-1*np.amax(big),np.amax(big)])))
fig.update_layout(scene=dict(aspectmode="cube",xaxis=dict(showbackground=False),yaxis=dict(showbackground=False),zaxis=dict(showbackground=False,visible=False)))
fig.show()
def specificPlot(cl,iso,reddening,score):
#Imports
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import os
checkLoaded([cl])
cluster = clusters[f"{cl}"]
isochrone = isochrones[f"{iso}"]
#These are displayed on the plot
# score = 0
reddening = float(reddening)
#Directory for saving plot outputs
if not os.path.isdir("SpecificPlots/pdf/"):
os.makedirs("SpecificPlots/pdf/")
if not os.path.isdir("SpecificPlots/png/"):
os.makedirs("SpecificPlots/png/")
# #Find the score of the associated isochrone
# for chrone in cluster.iso:
# if chrone[0] == iso and chrone[2] == reddening:
# score = chrone[1]
# break
#Plots the CMD and the isochrone, with all of the points adjusted to reddening, extinction, and distance modulus
plt.figure()
plt.gca().invert_yaxis()
plt.xlabel('B-R')
plt.ylabel('G Mag')
plt.title(f"{cl} {iso}")
plt.scatter([s.b_r for s in cluster.filtered],[s.g_mag for s in cluster.filtered],s=0.05,c='dimgray',label='Cluster')
plt.plot([x + reddening for x in isochrone.br],[x+cluster.dist_mod+2.1*reddening for x in isochrone.g],c='midnightblue',label=f"Score: {float(score):.7f}")
plt.scatter([s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed],s=5,c=[s.weight for s in cluster.condensed],label='Cluster Proxy')
#Colors the points by their fitting weight
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title("Weight")
#Label for the reddening
extra = Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0)
h,l = plt.gca().get_legend_handles_labels()
h.insert(0,extra)
l.insert(0,f"Reddening: {reddening}")
plt.legend(h,l)
#Save figure output to disk
plt.savefig(f"SpecificPlots/pdf/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.pdf")
plt.savefig(f"SpecificPlots/png/Requested_Plot_{cl}_{iso}_Reddening_{reddening}.png",dpi=500)
def plotRange(cl,a,b):
global clusters
checkLoaded([cl])
#Plots the top fitting isochrones over the range a to b for a given cluster
#Does this by calling the specificPlot() method for each isochrone over the range
for isochrone in clusters[f"{cl}"].iso[a:b]:
specificPlot(cl,isochrones[isochrone[0]].name,isochrone[2],isochrone[1])
def getIsoScore(cl,iso,red,output=True):
#Return the score for a given cluster's isochrone fit
for i in cl.iso:
if i[0] == iso.name and float(i[2]) == red:
return i[1]
if output:
print(f"No score found for {cl.name} | {iso.name} | {red}")
return 0
def onkey(x,y,cx,cy,fig,ax,cluster,iso,reddening):
global curIso
global curReddening
curIso = iso
curReddening = reddening
def func(event):
import matplotlib.patches as patches
global curIso
global curReddening
global isochrones
key = str(event.key)
#print(key)
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]
age_index = ageSorted.index(curIso)
feh_index = fehSorted.index(curIso)
#Move up or down in the desired variable space, with wrap-around at the ends of the lists
if key == "w":
#Increase metallicity
try:
curIso = fehSorted[feh_index+1]
feh_index = feh_index+1
except:
curIso = fehSorted[0]
feh_index = 0
if key == "s":
#Decrease metallicity
curIso = fehSorted[feh_index-1]
feh_index = feh_index-1
if feh_index < 0:
feh_index = len(fehSorted)+feh_index
if key == "a":
#Increase age
curIso = ageSorted[age_index-1]
age_index = age_index-1
if age_index < 0:
age_index = len(ageSorted)+age_index
if key == "d":
#Decrease age
try:
curIso = ageSorted[age_index+1]
age_index = age_index+1
except:
curIso = ageSorted[0]
age_index = 0
if key == "q":
#Decrease metallicity
curReddening = round(curReddening-0.01,2)
if key == "e":
#Increase metalicity
curReddening = round(curReddening+0.01,2)
if key == "r":
#Reset to originally requested isochrone
curIso = iso
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == curIso.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == curIso.age]
age_index = ageSorted.index(curIso)
feh_index = fehSorted.index(curIso)
if key == " ":
#Print currently highlighted isochrone to console
score = getIsoScore(cluster,curIso,curReddening)
fig.savefig(f"Jamboree Images/frames/{curIso.name}.png",dpi=500)
print(f"{curIso.name} | {curReddening} | {score}")
score = getIsoScore(cluster,curIso,curReddening,output=False)
#Replots everything with the updated isochrone
ax.clear()
ax.scatter(x,y,s=0.25,color='dimgray')
ax.scatter(cx,cy,s=4,color='red')
ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+curReddening for a in curIso.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*curReddening for a in curIso.starList],color='darkblue')
ax.set_title(f"{curIso.name}\n {curReddening}\n {score}")
ax.set_xlabel("Apparent BP-RP")
ax.set_ylabel("Apparent G Mag")
ax.invert_yaxis()
#Progress bar indicators for the interactive plot
#Sets the dimensons of the boxes
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
margin = 0.01
width = 0.05 * (x1-x0)
height = 0.6 * (y1-y0)
xmargin = margin * (x1-x0)
ymargin = margin * (y1-y0)
#The two main progress bars
rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
#rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
ax.add_patch(rect1)
ax.add_patch(rect2)
#ax.add_patch(rect3)
#The segments filling up the progress bars
n = len(ageSorted)
#Adds cells bottom to top
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == age_index:
color = 'red'
else:
color = 'black'
#Age progress bar
ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
n = len(fehSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == feh_index:
color = 'red'
else:
color = 'black'
#Metallicity progress bar
ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
fig.canvas.draw_idle()
return func
def interactivePlot(cl,iso=0,reddening="auto"):
#Imports
import matplotlib.pyplot as plt
import matplotlib.patches as patches
global clusters
global isochrones
global kid
checkLoaded([cl])
cluster = clusters[f"{cl}"]
#Select the starting isochrone based on user input
if type(iso) == str:
isochrone = isochrones[f"{iso}"]
elif type(iso) == int:
assert iso >= 0
isochrone = isochrones[cluster.iso[iso][0]]
else:
print("Invalid declaration of 'iso'")
return
name = isochrone.name
#Get the reddening if not manually defined
if reddening == "auto":
reddening = cluster.reddening
assert type(reddening) == float or type(reddening) == int
score = getIsoScore(cluster,isochrone,reddening)
# #Sorted and secondary-sorted isochrone lists
# ageSorted = sorted(isoList,key=lambda x: (x.age,x.feh))
# fehSorted = sorted(isoList,key=lambda x: (x.feh,x.age))
ageSorted = [a for a in sorted(isoList,key=lambda x: float(x.age)) if a.feh == isochrone.feh]
fehSorted = [a for a in sorted(isoList,key=lambda x: float(x.feh)) if a.age == isochrone.age]
age_index = ageSorted.index(isochrone)
feh_index = fehSorted.index(isochrone)
#Coordinate lists to plot in addition to the isochrones
x,y = cluster.mag[:,0],cluster.mag[:,1]
cx,cy = [s.b_r for s in cluster.condensed],[s.g_mag for s in cluster.condensed]
#Systematically remove some of the conflicting default keymaps in Pyplot
letters = ['w','s','a','d','q','e','r']
for letter in letters:
#Finds all keymap references in the rcParams
for param in [key for key in plt.rcParams if key.startswith("keymap") ]:
try:
plt.rcParams[param].remove(letter)
except:
continue
#Initialize the plot that will be updated every time
fig = plt.figure(f"Interactive plot of {cl}")
ax = fig.add_subplot(111)
ax.scatter(x,y,s=0.25,color='dimgray')
ax.scatter(cx,cy,s=4,color='red')
ax.plot([a.Gaia_BP_EDR3-a.Gaia_RP_EDR3+reddening for a in isochrone.starList],[a.Gaia_G_EDR3+cluster.dist_mod+2.1*reddening for a in isochrone.starList],color='darkblue')
ax.set_title(f"{name}\n {reddening}\n {score}")
ax.set_xlabel("Apparent BP-RP")
ax.set_ylabel("Apparent G Mag")
ax.invert_yaxis()
x0,x1 = ax.get_xlim()
y0,y1 = ax.get_ylim()
margin = 0.01
width = 0.05 * (x1-x0)
height = 0.6 * (y1-y0)
xmargin = margin * (x1-x0)
ymargin = margin * (y1-y0)
rect1 = patches.Rectangle((x1-width-xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
rect2 = patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
#rect3 = patches.Rectangle((x1-3*width-3*xmargin,y0+ymargin),width,height,linewidth=1,edgecolor='black',facecolor='none',alpha=0.5)
ax.add_patch(rect1)
ax.add_patch(rect2)
#ax.add_patch(rect3)
n = len(ageSorted)
#Adds cells bottom to top
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == age_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-2*width-2*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
n = len(fehSorted)
for i in range(n):
offset = i*height/n
alpha = 0.25
if i == feh_index:
color = 'red'
else:
color = 'black'
ax.add_patch(patches.Rectangle((x1-1*width-1*xmargin,y0+ymargin+offset),width,height/n,linewidth=0.01,edgecolor='black',facecolor=color,alpha=alpha))
#Launch the key_press listener
hook = onkey(x,y,cx,cy,fig,ax,cluster,isochrone,reddening)
kid = fig.canvas.mpl_connect('key_press_event',hook)
def printList(cList,varList):
cList = checkLoaded(cList)
for cl in cList:
cluster = clusters[cl]
for a in varList:
clStr = f"[{cl}] {a} ="
exec(f"print(clStr,cluster.{a})")
def statRange(cl,a,b):
import numpy as np
global clusters
checkLoaded([cl])
if not isoIn:
loadIsochrones()
ages = []
fehs = []
ys = []
reds = []
#Computes the mean age, metallicity, and reddening for the top fitting isochrones over the range a to b for a given cluster
#For example, a=0, b=10 will average the top 10 isochrone fits
for isochrone in clusters[cl].iso[a:b]:
iso = isochrones[isochrone[0]]
print(f"{iso.name} Reddening:{isochrone[2]}")
ages.append(float(iso.age))
fehs.append(float(iso.feh))
ys.append(float(iso.y))
reds.append(float(isochrone[2]))
print(f"[{cl}] Mean age= {np.mean(ages)} Mean feh= {np.mean(fehs)} Mean y= {np.mean(ys)} Mean Reddening= {np.mean(reds)}")
def setFlag():
#Imports
global clusterlist
#Goes back and sets membership flags for all of the clusters loaded in memory to ensure that this tag can be used later
#This takes place automatically after running turboFilter()
#Example use case for this variable is in the customPlot() method
for cluster in clusterList:
for star in cluster.filtered:
for unfStar in cluster.unfilteredWide:
if star == unfStar:
unfStar.member = 1
def customPlot(var1,var2,clname,mode='filtered',iso=False,square=True,color='default',title='default',close=False,save=True):
#Imports
import matplotlib.pyplot as plt
global closePlots
#Load the cluster if it isn't yet
checkLoaded([clname])
cluster = clusters[f"{clname}"]
#Set the list of stars to be used for the given cluster
#Using a mode not specified will return a referenced before assignment error
if mode == 'filtered':
starlist = cluster.filtered
elif mode == 'unfiltered':
starlist = cluster.unfilteredWide
elif mode == 'bright_filtered':
starlist = cluster.filteredBright
elif mode == 'dist_filtered':
starlist = cluster.distFiltered
elif mode == 'bright_unfiltered':
starlist = cluster.unfilteredBright
elif mode == 'duo':
starlist = cluster.unfilteredWide
starlistF = cluster.filtered
elif mode == 'binary':
starlist = cluster.binaries
elif mode == 'duoBinary':
starlist = cluster.filtered
starlistF = cluster.binaries
elif mode == 'duoBright':
starlist = cluster.unfilteredBright
starlistF = cluster.filteredBright
elif mode == 'duoDist':
starlist = cluster.distFiltered
starlistF = cluster.filtered
elif mode == 'condensed':
starlist = cluster.condensed
elif mode == 'duoCondensed':
starlist = cluster.filtered
starlistF = cluster.condensed
elif mode == 'bounded':
starlist = cluster.bounded
elif mode == 'duoBounded':
starlist = cluster.filtered
starlistF = cluster.bounded
else:
print("No preset star list configuration found with that alias")
return
#Basic plot features with axis labels and a title
plt.figure()
if title == 'default':
plt.title(f"{clname} {mode} | {var1} vs {var2} | {color} color")
else:
plt.title(f"{title}")
plt.xlabel(f"{var1}".upper())
plt.ylabel(f"{var2}".upper())
#Plots differently depending on the mode
#The color tag can be used to add distinction of a third variable while limited to two axes
#If unspecified, filtered starlist with midnight blue coloring will be the result
if iso:
plt.gca().invert_yaxis()
if 'duo' in mode:
#plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=[0.1+a.member*1.4 for a in starlist],c=[list(('lightgray',eval('z.par')))[z.member] for z in starlist])
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=2,c='gray')
if color == 'default':
plt.scatter([eval(f"x.{var1}") for x in starlistF],[eval(f"y.{var2}") for y in starlistF],s=2.5,c='red')
else:
plt.scatter([eval(f"x.{var1}") for x in starlistF],[eval(f"y.{var2}") for y in starlistF],s=2.5,c=[eval(f"z.{color}") for z in starlistF])
plt.set_cmap('brg')
clb = plt.colorbar()
clb.ax.set_title(f"{color}")
else:
if color == 'default':
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=1,c='midnightblue')
else:
plt.scatter([eval(f"x.{var1}") for x in starlist],[eval(f"y.{var2}") for y in starlist],s=2,c=[eval(f"z.{color}") for z in starlist])
plt.set_cmap('cool')
clb = plt.colorbar()
clb.ax.set_title(f"{color}")
#By default, squares the axes to avoid misinformation from stretched axes
#Turn this off and iso to true for a color magnitude diagram
if square:
plt.axis("square")
if save:
plt.savefig(f"SpecificPlots/pdf/{clname}_{mode}_{var1}_{var2}.pdf")
plt.savefig(f"SpecificPlots/png/{clname}_{mode}_{var1}_{var2}.png",dpi=500)
if close or closePlots:
plt.close()
if save:
print(f"Custom Plot {clname}_{mode}_{var1}_{var2} saved and closed")
else:
print(f"Custom Plot {clname}_{mode}_{var1}_{var2} closed")
def splitMS(clname='M67',slope=3,offset=12.2):
#Imports
import numpy as np
import matplotlib.pyplot as plt
checkLoaded([clname])
cluster = clusters[clname]
xlist = [s.b_r for s in cluster.filtered]
ylist = [s.g_mag for s in cluster.filtered]
x = np.linspace(1,2,100)
#Create a diagram showing the lower edge and upper edge of the main sequence, which in theory are separated by 0.75mag
plt.figure()
plt.title('Main and Binary Sequences')
plt.xlabel('B-R')
plt.ylabel('Apparent G Mag')
plt.scatter(xlist,ylist,s=0.5,label='Filtered Star Data')
plt.plot(x,[slope*a + offset for a in x],color='r',label='Main Sequence')
plt.plot(x,[slope*a + offset - 0.75 for a in x],'--',color='r',label='MS shifted 0.75 mag')
plt.xlim(0.6,2.2)
plt.ylim(13,19)
plt.legend()
plt.gca().invert_yaxis()
plt.savefig(f"SpecificPlots/png/{clname}_MS_Spread.png",dpi=500)
plt.savefig(f"SpecificPlots/pdf/{clname}_MS_Spread.pdf")
def kingProfile(r,K,R):
return K*(1+r**2/R**2)**(-1)
def kingError(r,K,R,dK,dR):
import numpy as np
dfdK = (1+r**2/R**2)**(-1)
dfdR = 2*K*r**2*R*(r**2+R**2)**(-2)
return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)
def densityProfile(r,K,R):
import numpy as np
#The exponential that is fit for the membership profile
#R is a characteristic radius, typically negative but the absolute value is used for comparison
#K is a scalar constant
return K*np.exp(-1*r/R)
def densityError(r,K,R,dK,dR):
import numpy as np
dfdK = abs(np.exp(-1*r/R))
dfdR = abs(K*r/(R**2)*np.exp(-1*r/R))
return np.sqrt((dfdK*dK)**2 + (dfdR*dR)**2)
def toIntensity(mag):
msun = -26.74 #apparent magnitude
Isun = 1360 #w/m^)
return Isun*10**( 0.4*(msun-mag) )
def membership(clname='M67',N=100,mode='filtered',numPercentileBins=5,percentile=0.2,delta=5,normalize=True):
#Imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import scipy.optimize as so
import scipy.stats as st
import math
global volume
checkLoaded([clname])
cluster = clusters[clname]
mode = mode.lower()
#Default mode is filtered, but unfiltered data can be processed
if "filtered" in mode:
starList = cluster.filtered
elif "bounded" in mode:
starList = cluster.bounded
else:
starList = cluster.unfilteredWide
#Load mass estimates from isochrone fitting
if not cluster.massLoaded:
proxyMatch([cluster.name])
assert cluster.massLoaded
assert len(starList) > 0
#Assign x and y lists based on normalization or not
if normalize:
starX = [a.ra*np.cos(a.dec*np.pi/180) for a in starList]
starY = [a.dec for a in starList]
mode = mode + "_normalized"
else:
starX = [a.ra for a in starList]
starY = [a.dec for a in starList]
#Determine bounds of the field of view (post-filtering)
xmax = max(starX)
ymax = max(starY)
x0 = np.mean(starX)
y0 = np.mean(starY)
newN = N
#Determine radius of the field of view
rx = xmax-x0
ry = ymax-y0
#r = np.mean([rx,ry])
radiusFOV = ry
#Using the mean ra and dec radius caused problems with clusters
#like NGC188, which are close to the celestial pole and have
#a very stretched mapping to the RA DEC space
ringBins = list(np.linspace(0,radiusFOV,N))
#The bins are divided up such that 50% of the bins are located in the inner 25% of the cluster radius
#The remaining 50% of the bins are divided from 25% to 100% of the radius
rings = list(np.linspace(0,radiusFOV/4,math.ceil(N/2)))
ring2 = list(np.linspace(radiusFOV/4,radiusFOV,math.floor(N/2)+1))
ring2 = ring2[1:-1]
rings.extend(ring2)
x=rings[:-1]
# for i in range(0,len(rings[:-1])):
# x.append((rings[i+1]+rings[i])/2)
counts = list(np.zeros(N-1,dtype=int))
masses = list(np.zeros(N-1,dtype=int))
rads=[]
for star in starList:
#Radial distance from the mean RA and Dec of the cluster
if normalize:
rads.append(np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2))
else:
rads.append(np.sqrt((star.ra-x0)**2+(star.dec-y0)**2))
#Find the nearest ring to the star
r = find_nearest(rings, rads[-1])
i = rings.index(r)
#Check bounds
if i < len(counts):
#If outside last ring, add to that count
if r > rads[-1]:
counts[i-1] += 1
masses [i-1] += star.proxyMass
else:
counts[i] += 1
masses [i] += star.proxyMass
#Worth noting here that the way that this is set up, the rings don't actually mark the bounds of the bins but rather the midpoints.
#There is no check to see if you are exterior or interior to the nearest ring, but rather what ring you are nearest to,
#so the rings mark the midpoints of their bins not the boundaries
#Histogram of the counts in each radial bin
plt.figure(f"{clname}_membership_{mode}")
plt.hist(rads,bins=ringBins)
plt.xlabel("Radius (deg)")
plt.ylabel("Number of Stars")
plt.title(f"{clname} Membership")
plt.savefig(f"{cluster.imgPath}{clname}_membership_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_membership_{mode}.png",dpi=500)
#Calculates the volume of each region bounded by two concentric rings and the number density of the stars counted in those regions
volume = []
for i in range(0,len(rings[:-1])):
volume.append(np.pi*(rings[i+1]**2-rings[i]**2))
numDensity = [a/b for a,b in zip(counts,volume)]
massDensity = [a/b for a,b in zip(masses,volume)]
error_num = [np.sqrt(a)/b for a,b in zip(counts,volume)]
error_mass = [np.sqrt(a)/b for a,b in zip(masses,volume)]
for i in range(0,len(error_num)):
if error_num[i] < 0.1:
error_num[i] = 0.1
#Cut out the inner 5% because overbinning in the center of a circle doesn't help
x = x[math.ceil(N/20):-1]
counts = counts[math.ceil(N/20):-1]
numDensity = numDensity[math.ceil(N/20):-1]
massDensity = massDensity[math.ceil(N/20):-1]
error_num = error_num[math.ceil(N/20):-1]
error_mass = error_mass[math.ceil(N/20):-1]
#Further filter the data based on outliers, either extremely low density or extremely big jumps in density from bin to bin
i = 0
numSmall = 0
numGrad = 0
while i < len(x)-1:
if numDensity[i] < 0.5 or numDensity[i] < numDensity[i+1]/delta or massDensity[i] < 0.1:
x.pop(i)
counts.pop(i)
numDensity.pop(i)
massDensity.pop(i)
error_num.pop(i)
error_mass.pop(i)
numSmall += 1
newN -= 1
elif abs(numDensity[i]) > abs(numDensity[i+1])*delta:# or abs(numDensity[i]) < abs(numDensity[i-1])/3:
x.pop(i)
counts.pop(i)
numDensity.pop(i)
massDensity.pop(i)
error_num.pop(i)
error_mass.pop(i)
numGrad += 1
newN -= 1
else:
i += 1
if numDensity[-1] < 0.01 or massDensity[-1] < 0.01:
x.pop(-1)
counts.pop(-1)
numDensity.pop(-1)
massDensity.pop(-1)
error_num.pop(-1)
error_mass.pop(-1)
numSmall += 1
newN -= 1
print(f"[{cluster.name}] Removed {numSmall} points with too small of a density and {numGrad} points with too extreme of a delta")
#========= Number Density =========
#Number density vs radial bin plot
plt.figure(f"{clname}_density_{mode}")
plt.errorbar(x,numDensity,yerr=error_num,ls='None')
plt.scatter(x,numDensity)
plt.xlabel("Radius (deg)")
plt.ylabel(r"Surface Number Density ($deg^{-2}$)")
plt.title(f"{clname} {mode.capitalize()} Number Density".replace("_normalized",' Normalized'))
#Fit an exponential curve to the density plot based on the densityProfile function defined above
if "NGC2355" in cluster.name:
p0=[5000,0.1]
else:
p0=[5000,0.1]
#print([b/a for a,b in zip(numDensity,error_num)])
fit,var = so.curve_fit(kingProfile,x,numDensity,p0,maxfev=1000)
#Std. Dev. from variance
err = np.sqrt(var[1][1])
err_coeff = np.sqrt(var[0][0])
scale = np.abs(fit[1]*3600/206265)/(cluster.mean_par/1000)
#scaleVar = (3600/206265)*(err/(cluster.mean_par/1000) ) + 2*fit[1]/(cluster.mean_par_err/1000)
scaleVar = np.abs(scale*np.sqrt((var[1][1]/fit[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))
#Scale radius from count in parsecs
setattr(cluster,f"scaleRad_{mode}",scale)
setattr(cluster,f"scaleRad_err_{mode}",scaleVar)
#Scale radius from count in degrees
setattr(cluster,f"scaleAngle_{mode}",abs(fit[1]))
setattr(cluster,f"scaleAngle_err_{mode}",err)
setattr(cluster,f"numDensity_coeff_{mode}",fit[0])
setattr(cluster,f"numDensity_coeff_err_{mode}",err_coeff)
#Plot the curve fit
numLabel = ( f"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"
+ fr"K={fit[0]:.3f} $\pm$ {err_coeff:.3f}" + "\n"
+ fr"$\rho$={np.abs(fit[1]):.3f}$\degree$ $\pm$ {err:.3f}$\degree$"+ "\n"
+ fr"R={scale:.3f}pc $\pm$ {scaleVar:.3f}pc" )
plt.plot(x,[kingProfile(a,*fit) for a in x],color='red',label=numLabel)
plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],label=r'$1\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_{mode}.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_log_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_log_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_density_filtered")
plt.title(f"{clname} Overlaid Number Density")
plt.errorbar(x,numDensity,yerr=error_num,ls='None',color='midnightblue')
plt.scatter(x,numDensity,color='midnightblue')
plt.plot(x,[kingProfile(a,*fit) for a in x],color='darkred',label=numLabel)
plt.fill_between(x,[kingProfile(a,*fit)-kingError(a,fit[0],fit[1],err_coeff,err) for a in x],[kingProfile(a,*fit)+kingError(a,fit[0],fit[1],err_coeff,err) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.yscale('linear')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_overlay.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_numDensity_log_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_numDensity_log_overlay.png",dpi=500)
#========= Mass Density =========
#Mass density vs radial bin plot
plt.figure(f"{clname}_mass_density_{mode}")
plt.errorbar(x,massDensity,yerr=error_mass,ls='None')
plt.scatter(x,massDensity)
plt.xlabel("Radius (deg)")
plt.ylabel(r"Surface Mass Density ($M_{\odot}*deg^{-2}$)")
plt.title(f"{clname} {mode.capitalize()} Mass Density".replace("_normalized",' Normalized'))
#Fit an exponential curve to the density plot based on the densityProfile function defined above
fit_mass,var_mass = so.curve_fit(kingProfile,x,massDensity,p0,maxfev=1000)
#Std. Dev. from variance
err_mass = np.sqrt(var[1][1])
err_mass_coeff = np.sqrt(var[0][0])
scale_mass = np.abs(fit_mass[1]*3600/206265)/(cluster.mean_par/1000)
#scaleVar_mass = (3600/206265)*(err_mass/(cluster.mean_par/1000) ) + 2*fit_mass[1]/(cluster.mean_par_err/1000)
scaleVar_mass = np.abs(scale_mass*np.sqrt((var_mass[1][1]/fit_mass[1])**2 + (cluster.mean_par_err/cluster.mean_par)**2))
#Scale radius from mass in parsecs
setattr(cluster,f"scaleRad_mass_{mode}",scale_mass)
setattr(cluster,f"scaleRad_mass_err_{mode}",scaleVar_mass)
#Scale radius from mass in degrees
setattr(cluster,f"scaleAngle_mass_{mode}",abs(fit_mass[1]))
setattr(cluster,f"scaleAngle_mass_err_{mode}",err_mass)
setattr(cluster,f"massDensity_coeff_{mode}",fit_mass[0])
setattr(cluster,f"massDensity_coeff_err_{mode}",err_mass_coeff)
#Plot the curve fit
massLabel = ( f"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"
+ fr"K={fit_mass[0]:.3f} $\pm$ {err_mass_coeff:.3f}" + "\n"
+ fr"$\rho$={np.abs(fit_mass[1]):.3f}$\degree$ $\pm$ {err_mass:.3f}$\degree$"+ "\n"
+ fr"R={scale_mass:.3f}pc $\pm$ {scaleVar_mass:.3f}pc" )
plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='red',label=massLabel)
plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],label=r'$1\sigma$',edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_{mode}.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_log_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_log_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mass_density_filtered")
plt.title(f"{clname} Overlaid Mass Density")
plt.errorbar(x,massDensity,yerr=error_mass,ls='None',color='midnightblue')
plt.scatter(x,massDensity,color='midnightblue')
plt.plot(x,[kingProfile(a,*fit_mass) for a in x],color='darkred',label=massLabel)
plt.fill_between(x,[kingProfile(a,*fit_mass)-kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],[kingProfile(a,*fit_mass)+kingError(a,fit_mass[0],fit_mass[1],err_mass_coeff,err_mass) for a in x],edgecolor='none',alpha=0.8,facecolor='salmon')
plt.legend(fontsize=8,loc='upper right')
plt.yscale('linear')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_overlay.png",dpi=500)
plt.yscale('log')
plt.savefig(f"{cluster.imgPath}{clname}_massDensity_log_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massDensity_log_overlay.png",dpi=500)
#========= Average Mass =========
averageMass = [a/b for a,b in zip(massDensity,numDensity)]
xDist = [np.abs(a*3600/206265)/(cluster.mean_par/1000) for a in x]
#Average Mass plot
plt.figure(f"{clname}_average_mass_{mode}")
plt.scatter(xDist,averageMass,label=fr"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"+f"{numPercentileBins} Percentile Bins")
plt.xlabel("Distance from Center (pc)")
plt.ylabel(r"Average Stellar Mass ($M_{\odot}$)")
plt.title(f"{clname} {mode.capitalize()} Average Mass".replace("_normalized",' Normalized'))
#Split average mass data into numPercentileBins number of bins
if "filtered" in mode:
cluster.pMin = xDist[0]
cluster.pMax = xDist[-1]
pBins = np.linspace(cluster.pMin,cluster.pMax,numPercentileBins+1)
xBins = []
for i in range(len(pBins)-1):
xBins.append((pBins[i]+pBins[i+1])/2)
pBins = np.delete(pBins,0)
pBins = np.delete(pBins,-1)
for b in pBins:
plt.axvline(x=b,color='black',linestyle='--')
binned = []
for n in range(numPercentileBins):
binned.append([])
#Assign the average mass data points to the bins
for i in range(len(xDist)):
#Finds the nearest xBin to each x value and sorts the corresponding averageMass into that bin
val = find_nearest(xBins,xDist[i])
idx = xBins.index(val)
binned[idx].append(averageMass[i])
#Creates arrays that are numPercentileBins long that store the standard and quantile means of the points in those bins
quantileMean = []
binMean = []
meanBins = []
for b in binned:
if len(b) == 0:
continue
binSorted = sorted(b)
#Finds the index of the lower percentile marker (ex. 20%)
lower = binSorted.index(find_nearest(binSorted, np.quantile(b,percentile)))
#Finds the index of the upper percentile marker (ex. 80%)
upper = binSorted.index(find_nearest(binSorted, np.quantile(b,1-percentile)))
#Means between lower and upper percentile markers
quantileMean.append(np.mean(binSorted[lower:upper+1]))
#Standard Mean
binMean.append(np.mean(b))
#Bins
meanBins.append(xBins[binned.index(b)])
try:
fit, var = so.curve_fit(kingProfile,xDist,[kingProfile(a,*fit_mass)/kingProfile(a,*fit) for a in x])
residual_coeff, residual_scaleAngle = fit[0],fit[1]
except:
print(f"Unable to fit the residuals for {cluster.name}")
residual_coeff, residual_scaleAngle = -99, -99
massFit = st.linregress(meanBins,quantileMean)
fitslope, intercept, rval, pval, fitslope_err, intercept_err = massFit.slope, massFit.intercept, massFit.rvalue, massFit.pvalue, massFit.stderr, massFit.intercept_stderr
residual_scaleRad = np.abs(residual_scaleAngle*3600/206265)/(cluster.mean_par/1000)
setattr(cluster,f"residual_coeff_{mode}",residual_coeff)
setattr(cluster,f"residual_scaleAngle_{mode}",residual_scaleAngle)
setattr(cluster,f"residual_scaleRad_{mode}",residual_scaleRad)
setattr(cluster,f"mass_slope_{mode}",fitslope)
setattr(cluster,f"mass_slope_err_{mode}",fitslope_err)
setattr(cluster,f"mass_intercept_{mode}",intercept)
setattr(cluster,f"mass_intercept_err_{mode}",intercept_err)
setattr(cluster,f"mass_fit_r2_{mode}",rval**2) | + fr"Intercept = {intercept:.3f} $\pm$ {intercept_err:.3f}" + "\n"
+ fr"$r^2$ = {rval**2:.3f} ({mode.capitalize()})".replace("_normalized",' Normalized'))
#Plot the quantile and standard means on the existing average mass plot
plt.scatter(meanBins,quantileMean,color='red',label=f'Interquartile Mean ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='red',label=fitLabel)
#plt.scatter(meanBins,binMean,color='dimgray',label=f'{mode.capitalize()} Standard Mean')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_averageMass_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_averageMass_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_average_mass_filtered")
plt.title(f"{clname} Overlaid Average Mass")
plt.scatter(xDist,averageMass,color='midnightblue',label=fr"N={newN} ({mode.capitalize()})".replace("_normalized",' Normalized')+"\n"+f"{numPercentileBins} Percentile Bins")
plt.plot(xDist,[fitslope*a+intercept for a in xDist],color='darkred',label=fitLabel)
plt.scatter(meanBins,quantileMean,color='darkred',label=f'Interquartile Mean ({mode.capitalize()})'.replace("_normalized",' Normalized'))
#plt.scatter(meanBins,binMean,color='black',label=f'{mode.capitalize()} Standard Mean')
plt.legend(fontsize=8,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_averageMass_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_averageMass_overlay.png",dpi=500)
#========= Radius Plot =========
plt.figure(f"{clname}_characteristic_radius_{mode}")
if normalize:
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (Deg)")
else:
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (Deg)")
pltRad = abs(getattr(cluster,f"scaleAngle_{mode}"))
outline1 = Circle([x0,y0],1*pltRad,color='red',fill=False,ls='--',label=fr"$\rho$={1*pltRad:0.3f}$\degree$",alpha=0.7)
outline2 = Circle([x0,y0],5*pltRad,color='red',fill=False,ls='--',label=fr"5$\rho$={5*pltRad:0.3f}$\degree$",alpha=0.7)
#outline3 = Circle([x0,y0],10*abs(getattr(cluster,f"scaleAngle_{mode}")),color='red',fill=False,ls='--',label=fr"10$\rho$={3*abs(fit[1]):0.3f}$\degree$",alpha=0.7)
plt.gca().add_patch(outline1)
plt.gca().add_patch(outline2)
#plt.gca().add_patch(outline3)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} {mode.capitalize()} Characteristic Radius".replace("_normalized",' Normalized'))
plt.gcf().set_size_inches(8,8)
plt.savefig(f"{cluster.imgPath}{clname}_radialMembership_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_radialMembership_{mode}.png",dpi=500)
if "M67" in clname and "filtered" in mode:
plt.figure(f"{clname}_rings_{mode}")
if normalize:
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (Deg)")
else:
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (Deg)")
for i in range(0,len(rings)):
outline = Circle([x0,y0],rings[i],color='red',fill=False)
plt.gca().add_patch(outline)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} Radial Bins")
plt.gcf().set_size_inches(8,8)
plt.savefig(f"SpecificPlots/pdf/{clname}_radialBins_{mode}.pdf".replace("_filtered",''))
plt.savefig(f"SpecificPlots/png/{clname}_radialBins_{mode}.png".replace("_filtered",''),dpi=500)
plt.xlim(x0-0.15,x0+0.15)
plt.ylim(y0-0.15,y0+0.15)
plt.savefig(f"SpecificPlots/pdf/{clname}_radialBins_center_{mode}.pdf".replace("_filtered",''))
plt.savefig(f"SpecificPlots/png/{clname}_radialBins_center_{mode}.png".replace("_filtered",''),dpi=500)
#========= Stars by Mass =========
massList = []
innerMassList = []
for star in starList:
massList.append(star.proxyMass)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMassList.append(star.proxyMass)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMassList.append(star.proxyMass)
mBins = np.arange(min(massList),max(massList)+0.1,0.1)
inBins = np.arange(min(innerMassList),max(innerMassList)+0.1,0.1)
plt.figure(f"{clname}_mass_frequency_{mode}")
plt.xlabel(r"Stellar Mass ($M_{\odot}$)")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Mass Frequency".replace("_normalized",' Normalized'))
plt.hist(massList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerMassList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mass_frequency_filtered")
plt.title(f"{clname} Overlaid Mass Frequency")
plt.hist(massList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerMassList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_massFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_massFrequency_overlay.png",dpi=500)
#========= Stars by Magnitude =========
magList = []
innerMagList = []
for star in starList:
magList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerMagList.append(star.g_mag-2.1*cluster.reddening-cluster.dist_mod)
mBins = np.arange(min(magList),max(magList)+0.1,0.1)
inBins = np.arange(min(innerMagList),max(innerMagList)+0.1,0.1)
plt.figure(f"{clname}_mag_frequency_{mode}")
plt.xlabel(r"Absolute G Mag")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Absolute Magnitude Frequency".replace("_normalized",' Normalized'))
plt.hist(magList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerMagList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_magFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_magFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_mag_frequency_filtered")
plt.title(f"{clname} Overlaid Absolute Magnitude Frequency")
plt.hist(magList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerMagList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_magFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_magFrequency_overlay.png",dpi=500)
#========= Stars by Color =========
colorList = []
innerColorList = []
for star in starList:
colorList.append(star.b_r-cluster.reddening)
if normalize:
if np.sqrt((star.ra*np.cos(star.dec*np.pi/180)-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerColorList.append(star.b_r-cluster.reddening)
else:
if np.sqrt((star.ra-x0)**2+(star.dec-y0)**2) <= getattr(cluster,f"scaleAngle_{mode}"):
innerColorList.append(star.b_r-cluster.reddening)
mBins = np.arange(min(colorList),max(colorList)+0.1,0.1)
inBins = np.arange(min(innerColorList),max(innerColorList)+0.1,0.1)
plt.figure(f"{clname}_color_frequency_{mode}")
plt.xlabel(r"Dereddened BP-RP")
plt.ylabel("Number of Stars")
plt.title(f"{clname} {mode.capitalize()} Dereddened Color Index Frequency".replace("_normalized",' Normalized'))
plt.hist(colorList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'))
plt.hist(innerColorList,bins=inBins,color='midnightblue',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_colorFrequency_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_colorFrequency_{mode}.png",dpi=500)
#Double plot for bounded regions
if "bounded" in mode:
plt.figure(f"{clname}_color_frequency_filtered")
plt.title(f"{clname} Overlaid Dereddened Color Index Frequency")
plt.hist(colorList,bins=mBins,label=f"Total {mode.capitalize()}".replace("_normalized",' Normalized'),color='red')
plt.hist(innerColorList,bins=inBins,color='darkred',label=f'Inside Core Radius ({mode.capitalize()})'.replace("_normalized",' Normalized'))
plt.legend(fontsize=10,loc='upper right')
plt.savefig(f"{cluster.imgPath}{clname}_colorFrequency_overlay.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_colorFrequency_overlay.png",dpi=500)
#========= Other Radii =========
massSum = np.sum([star.proxyMass for star in starList])
intensitySum = np.sum([toIntensity(star.g_mag) for star in starList])
curMassSum = 0
curIntSum = 0
massFound = False
intFound = False
if normalize:
setattr(cluster,f"medianRad_{mode}",np.median([np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))
setattr(cluster,f"medianAngle_{mode}",np.median([star.normRadDist for star in starList]))
radialStarList = sorted(starList,key=lambda x: x.normRadDist)
for star in radialStarList:
curMassSum += star.proxyMass
curIntSum += toIntensity(star.g_mag)
if curMassSum > massSum/2 and not massFound:
setattr(cluster,f"halfMassRad_{mode}",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfMassAngle_{mode}",star.normRadDist)
massFound = True
if curIntSum > intensitySum/2 and not intFound:
setattr(cluster,f"halfLightRad_{mode}",np.abs(star.normRadDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfLightAngle_{mode}",star.normRadDist)
intFound = True
if massFound and intFound:
break
plt.figure(f"{clname}_other_radii_{mode}")
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra*np.cos(star.dec*np.pi/180) for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA*cos(Dec) (deg)")
else:
setattr(cluster,f"medianRad_{mode}",np.median([np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000) for star in starList]))
setattr(cluster,f"medianAngle_{mode}",np.median([star.radDist for star in starList]))
radialStarList = sorted(starList,key=lambda x: x.radDist)
for star in radialStarList:
curMassSum += star.proxyMass
curIntSum += toIntensity(star.g_mag)
if curMassSum > massSum/2 and not massFound:
setattr(cluster,f"halfMassRad_{mode}",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfMassAngle_{mode}",star.radDist)
massFound = True
if curIntSum > intensitySum/2 and not intFound:
setattr(cluster,f"halfLightRad_{mode}",np.abs(star.radDist*3600/206265)/(cluster.mean_par/1000))
setattr(cluster,f"halfLightAngle_{mode}",star.radDist)
intFound = True
if massFound and intFound:
break
plt.figure(f"{clname}_other_radii_{mode}")
plt.scatter([star.ra for star in cluster.unfilteredWide],[star.dec for star in cluster.unfilteredWide],s=1,c='lightgray',label='Unfiltered')
plt.scatter([star.ra for star in cluster.filtered],[star.dec for star in cluster.filtered],s=2,c='midnightblue',label='Filtered')
plt.xlabel("RA (deg)")
medRad = getattr(cluster,f"medianRad_{mode}")
medAngle = getattr(cluster,f"medianAngle_{mode}")
mRad = getattr(cluster,f"halfMassRad_{mode}")
mAngle = getattr(cluster,f"halfMassAngle_{mode}")
lRad = getattr(cluster,f"halfLightRad_{mode}")
lAngle = getattr(cluster,f"halfLightAngle_{mode}")
print(medAngle)
outline1 = Circle([x0,y0],medAngle,color='red',fill=False,ls='--',label=fr"Median Star Distance = {medAngle:.3f}$\degree$, {medRad:.3f}pc",alpha=1)
outline2 = Circle([x0,y0],mAngle,color='darkgreen',fill=False,ls='--',label=fr"Half Mass Radius = {mAngle:.3f}$\degree$, {mRad:.3f}pc",alpha=1)
outline3 = Circle([x0,y0],lAngle,color='purple',fill=False,ls='--',label=fr"Half Light Radius = {lAngle:.3f}$\degree$, {lRad:.3f}pc",alpha=1)
plt.gca().add_patch(outline1)
plt.gca().add_patch(outline2)
plt.gca().add_patch(outline3)
plt.legend(fontsize=10,loc='upper right')
plt.axis('square')
plt.ylabel("DEC (Deg)")
plt.title(f"{clname} {mode.capitalize()} Various Radii".replace("_normalized",' Normalized'))
plt.gcf().set_size_inches(8,8)
plt.savefig(f"{cluster.imgPath}{clname}_otherRadii_{mode}.pdf")
plt.savefig(f"{cluster.imgPath}png/{clname}_otherRadii_{mode}.png",dpi=500)
def checkLoaded(cList):
if 'all' in cList:
cList = [c.name for c in clusterList]
else:
for cl in cList:
if not cl in clusters:
loadClusters([cl])
return cList
def saveResults(cList,outdir="results"):
#Imports
import numpy as np
import dill
import os
global clusters
global clusterList
checkLoaded(cList)
#Check and create the relevant directory paths to save/load the results
if not os.path.isdir(f"{outdir}/"):
os.mkdir(f"{outdir}/")
if not os.path.isdir(f"{outdir}/pickled/"):
os.mkdir(f"{outdir}/pickled/")
else:
for cl in cList:
cluster = clusters[cl]
#Creates a "result cluster" object from the cluster, effectively just stripping away lists
rCl = resultClusterObj(cluster)
#Pickle the result cluster object
with open(f"{outdir}/pickled/{cluster.name}.pk1", 'wb') as output:
dill.dump(rCl, output)
#Store variables into an array to be printed as csv
properties = [a for a in dir(rCl) if not a.startswith('_')]
res = [getattr(rCl,p) for p in properties]
#Stack into an array of 2 rows with variable names and values
fin = np.vstack((properties,res))
np.savetxt(f"{outdir}/{cluster.name}.csv",fin,delimiter=',',fmt='%s')
def loadResults(filter="None",indir="results"):
#Imports
import numpy as np
import dill
import os
global resultList
global resultsIn
assert os.path.isdir("results/")
resultList = []
for fn in os.listdir(indir+"/pickled/"):
#Reads in instances from the saved pickle file
with open(f"{indir}/pickled/{fn}",'rb') as input:
res = dill.load(input)
resultList.append(res)
resultsIn = True
toDict()
def refreshProperties(cList=['all']):
import numpy as np
global catalogue
global clusterList
global clusters
clusterCatalogue()
checkLoaded(cList)
#Loop through clusters
for cluster in cList:
reference = None
for cl in catalogue:
if str(cl.name) == str(cluster.name):
reference = cl
print(f"Catalogue match for {cluster.name} found")
break
if reference == None:
print(f"Catalogue match for {cluster.name} was not found, please create one")
continue
#Filter all of the methods out of the properties list
properties = [a for a in dir(reference) if not a.startswith('_')]
#print(properties)
#exec(f"print(reference.{properties[1]})")
#print(properties)
#Now we have a list of all the attributes assigned to the catalogue (the self.variables)
for p in properties:
prop = getattr(reference,p)
#print(prop)
exec(f"cluster.{p} = prop")
try:
if prop <= -98:
print(f"{cluster.name} does not have a specified catalogue value for {p}")
except:
continue
#Additional properties that may be useful
for star in cluster.filtered:
star.normRA = star.pmra*np.cos(star.dec*np.pi/180)
print(f"{cluster.name} properties refreshed from catalogue")
def statPlot(statX,statY,population="open",color="default",square=True,invertY=False,logX=False,logY=False,pointLabels=True,linFit=False,directory='default'):
#Create plots of stat X vs stat Y across a population of clusters, similar to customPlot()
#Can be set to use a custom list of clusters, or all clusters of a given type
#
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
global clusters
global clusterList
global catalogue
global resultsIn
global resultList
if not resultsIn:
loadResults()
#Filter out incorrect inputs
if type(population) == str:
population = population.lower()
try:
assert population == "open" or population == "globular"
except:
print("Specified population type not recognized")
else:
try:
assert type(population) == list
assert type(population[0]) == str
except:
print("Population type given is not valid, must be either a list of cluster name strings or a single string \'open\' or \'closed\'")
return
try:
assert len(population) > 1
except:
print("Population statistic plots cannot be made with fewer than 2 clusters given")
return
#Load cluster information from cList
#This is going to involve using the resultCluster object to read data from each cluster folder in the cList
cList = []
banList = ['NGC2204']
if type(population) == str:
for res in resultList:
if res.clType.lower() == population and not res.name in banList:
cList.append(res)
else:
for res in resultList:
if res.name in population:
cList.append(res)
if statX.lower() == "b_r" and statY.lower() == "g_mag":
#Corrected CMD overlay
NUM_COLORS = len(cList)
cm = plt.get_cmap('nipy_spectral')
plt.figure("uncorrected")
plt.title("Cluster Overlay")
plt.xlabel("Observed B-R")
plt.ylabel("Apparent G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
plt.figure("unshifted")
plt.title("Corrected Cluster Overlay")
plt.xlabel("Dereddened B-R")
plt.ylabel("Absolute G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
plt.figure("shifted")
plt.title("Corrected Cluster Overlay - Offset")
plt.xlabel("Dereddened B-R")
plt.ylabel("Absolute G Mag")
plt.gca().invert_yaxis()
plt.gca().set_prop_cycle('color', [cm(1.025*i/NUM_COLORS) for i in range(NUM_COLORS)])
index = 0
offset = 2.5
for cluster in cList:
try:
path = cluster.dataPath
except:
path = f"clusters/{cluster.name}/data/"
condensed = np.genfromtxt(f"{path}condensed.csv",delimiter=",")
cluster.condensed = condensed
#Adjust by cluster.reddening and cluster.dist_mod
x1 = [a[0] for a in condensed]
y1 = [a[1] for a in condensed]
x2 = [a[0]-cluster.reddening for a in condensed]
y2 = [a[1]-2.1*cluster.reddening-cluster.dist_mod for a in condensed]
x3 = [a[0]-cluster.reddening for a in condensed]
y3 = [a[1]-2.1*cluster.reddening-cluster.dist_mod+index*offset for a in condensed]
index += 1
plt.figure("uncorrected")
plt.scatter(x1,y1,label=f"{cluster.name}")
plt.figure("unshifted")
plt.axvline(x=1.6,ymax=0.5,color='black',linestyle='--')
plt.axhline(y=4,xmin=0.59,color='black',linestyle='--')
plt.scatter(x2,y2,label=f"{cluster.name}")
plt.figure("shifted")
plt.scatter(x3,y3,label=f"{cluster.name}")
plt.axvline(x=1.6,color='black',linestyle='--')
# if 'NGC2301' in cluster.name:
# for a,b in zip(x2,y2):
# print(f"{a},{b}")
plt.figure("uncorrected")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_apparent.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_apparent.png",dpi=500)
plt.figure("unshifted")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_absolute.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_absolute.png",dpi=500)
plt.figure("shifted")
plt.legend(fontsize=10,loc='upper right')
plt.gcf().set_size_inches(8,6)
plt.savefig(f"results/plots/pdf/{population}_clusters_stacked_cmd_shifted.pdf")
plt.savefig(f"results/plots/png/{population}_clusters_stacked_cmd_shifted.png",dpi=500)
else:
x = [getattr(a, statX) for a in cList]
y = [getattr(a, statY) for a in cList]
plt.figure()
plt.xlabel(f"{statX}")
plt.ylabel(f"{statY}")
if pointLabels:
for cluster in cList:
plt.scatter(getattr(cluster, statX),getattr(cluster, statY),label=cluster.name)
plt.legend(fontsize="small")
else:
plt.scatter(x,y)
if linFit:
reg = linregress(x,y)
plt.plot(x,[reg[0]*a+reg[1] for a in x])
plt.savefig(f"SpecificPlots/pdf/{population}_{statX}_{statY}.pdf")
plt.savefig(f"SpecificPlots/png/{population}_{statX}_{statY}.png",dpi=500)
return
def ageMassFit(t,m0,k):
import numpy as np
return 1 + m0*np.exp(-1*k*t)
def extinctionLaw(d,M0):
import numpy as np
return M0 -2.5*np.log10(1/(4*np.pi*d**2))
def resultPlots():
#Imports
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import linregress
from scipy.optimize import curve_fit
global clusters
global clusterList
global catalogue
global resultsIn
global resultList
if not resultsIn:
loadResults()
#Select open clusters from resultList
banList = ['NGC2204']
cList = []
for res in resultList:
if res.clType.lower() == "open" and not res.name in banList:
cList.append(res)
#Filtered mass versus age
fname = "mass_vs_age_filtered"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Cluster Member Mass ($M_{\odot}$)")
plt.scatter([c.fit_age for c in cList],[c.meanProxyMass for c in cList])
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Bounded mass versus age
fname = "mass_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Cluster Member Mass ($M_{\odot}$)")
x,y = [c.fit_age for c in cList],[c.meanBoundedProxyMass for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
fitLabel = fr"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$" + "\n" + fr"Uncertainties = $\pm{var[0][0]:.3f}, \pm{var[1][1]:.3f}$"
plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Mass intercept versus age
fname = "mass_intercept_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"Mean Stellar Mass in Core ($M_{\odot}$)")
x,y = [c.fit_age for c in cList],[c.mass_intercept_bounded for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(ageMassFit,x,y,p0=[8,1],maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
fitLabel = fr"$y = 1+{fit[0]:.3f}e^{{-{fit[1]:.3f}t}}$" + "\n" + fr"Uncertainties = $\pm{var[0][0]:.3f}, \pm{var[1][1]:.3f}$"
plt.plot(xr,[ageMassFit(a,fit[0],fit[1]) for a in xr],label=fitLabel)
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Mass slope versus age
fname = "mass_slop_vs_age_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Fit Age (Gyr)")
plt.ylabel(r"IQM Stellar Mass Dropoff ($\frac{M_{\odot}}{pc}$)")
x,y = [c.fit_age for c in cList],[c.mass_slope_bounded for c in cList]
plt.scatter(x,y)
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Magnitude versus distance (Extinction law)
fname = "mag_vs_dist_bounded"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Cluster Distance from Earth (pc)")
plt.ylabel(r"Mean Apparent G Magnitude")
x,y = [c.meanDist for c in cList],[c.mean_bounded_g_mag for c in cList]
plt.scatter(x,y)
fit,var = curve_fit(extinctionLaw,x,y,maxfev=1000)
xr = list(np.linspace(min(x),max(x),101))
plt.plot(xr,[extinctionLaw(a,fit[0]) for a in xr],label="Inverse Square Law \n" + fr" $M_0 = {fit[0]:.3f} \pm {var[0][0]:.3f}$")
plt.gca().invert_yaxis()
plt.legend()
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Bounded fraction versus distance
fname = "bounded_fraction_vs_dist"
plt.figure(fname)
plt.title(f"{len(cList)} Open Clusters - BR-RP Limit Enforced")
plt.xlabel("Cluster Distance from Earth (pc)")
plt.ylabel("Fraction Unaffected by BP-RP Limit")
x,y = [c.meanDist for c in cList],[c.fractionBounded for c in cList]
plt.scatter(x,y)
plt.savefig(f"results/plots/pdf/{fname}.pdf")
plt.savefig(f"results/plots/png/{fname}.png",dpi=500)
#Radii
plt.figure()
plt.scatter([c.meanGalacticDist for c in cList],[c.halfLightRad_bounded/c.medianRad_bounded for c in cList])
def boundedStats(cList,xmax=1.6,saveCl=True,unloadCl=True):
import numpy as np
global clusters
global subList
for cl in cList:
checkLoaded([cl])
cluster = clusters[cl]
subList = [star for star in cluster.filtered if not (star.b_r-cluster.reddening > xmax and star.g_mag > cluster.cltpy)]
cluster.bounded = subList
#Windowed properties (over the xmin to xmax range)
cluster.meanBoundedProxyMass = np.mean([a.proxyMass for a in subList])
cluster.totalBoundedProxyMass = np.sum([a.proxyMass for a in subList])
cluster.numBounded = len(subList)
cluster.fractionBounded = len(subList)/len(cluster.filtered)
cluster.mean_bounded_b_r = np.mean([a.b_r for a in subList])
cluster.mean_bounded_g_mag = np.mean([a.g_mag for a in subList])
if saveCl:
saveClusters([cl])
saveResults([cl])
if unloadCl:
unloadClusters([cl])
def tryFits(fitVar='fit_age'):
from scipy.stats import linregress
global resultsIn
global resultList
global props
global r2
if not resultsIn:
loadResults()
cList = []
for res in resultList:
if res.clType.lower() == "open":
cList.append(res)
if 'all' in fitVar:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
propList = [a for a in props if type(getattr(cList[0],a)) == float]
propList.remove('turnPoint')
r2 = []
for pr in propList:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
props = [a for a in props if type(getattr(cList[0],a)) == float]
props.remove('turnPoint')
props.remove(pr)
for prop in props:
x = [getattr(a, pr) for a in cList]
y = [getattr(a, prop) for a in cList]
reg = linregress(x,y)
r2.append((pr,prop,reg[2]**2))
r2 = sorted(r2,key = lambda x: x[2],reverse=True)
print("Top 100 r^2 values:")
for r in r2[:200]:
print(f"{r[0]} | {r[1]} | {r[2]}")
else:
#List of plottable variables
props = dir(cList[0])
props = [a for a in props if not '__' in a]
props = [a for a in props if type(getattr(cList[0],a)) == float]
props.remove('turnPoint')
props.remove(fitVar)
r2 = []
for prop in props:
x = [getattr(a, fitVar) for a in cList]
y = [getattr(a, prop) for a in cList]
reg = linregress(x,y)
r2.append((prop,reg[2]**2))
r2 = sorted(r2,key = lambda x: x[1],reverse=True)
print("Top 20 r^2 values:")
for r in r2[:20]:
print(f"{r[0]} | {r[1]}")
def prelimPlot(cl):
import matplotlib.pyplot as plt
cluster = clusters[cl]
plt.scatter([a.ra for a in cluster.unfilteredWide],[a.dec for a in cluster.unfilteredWide],s=0.1)
plt.figure()
plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1)
# plt.figure()
# plt.scatter([a.pmra for a in cluster.unfilteredWide],[a.pmdec for a in cluster.unfilteredWide],s=0.1,c=[a.par for a in cluster.unfilteredWide])
# plt.set_cmap('cool')
# clb = plt.colorbar()
plt.figure()
plt.scatter([a.b_r for a in cluster.unfilteredWide],[a.g_mag for a in cluster.unfilteredWide],s=0.1)
plt.gca().invert_yaxis()
# plt.figure()
# plt.scatter([a.par for a in cluster.unfilteredWide],[a.par for a in cluster.unfilteredWide],s=0.1,c=[(a.pmra**2 + a.pmdec**2)**0.5 for a in cluster.unfilteredWide])
# plt.set_cmap('cool') | setattr(cluster,f"mass_fit_p_{mode}",pval)
fitLabel = ( fr"Slope = {fitslope:.3f} $\pm$ {fitslope_err:.3f}" + "\n" |
0004_auto_20200221_1956.py | # Generated by Django 3.0.3 on 2020-02-21 19:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('letters', '0003_auto_20200110_0200'),
]
operations = [
migrations.AlterField(
model_name='letter',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='letter_created_by', to=settings.AUTH_USER_MODEL, verbose_name='Created by'),
),
migrations.AlterField(
model_name='letter',
name='created_on',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date of creation'),
),
migrations.AlterField(
model_name='letter',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='letter_modified_by', to=settings.AUTH_USER_MODEL, verbose_name='Modified by'),
),
migrations.AlterField(
model_name='letter',
name='modified_on',
field=models.DateTimeField(auto_now=True, verbose_name='Date of the modification'),
),
] |
|
lib.rs | //! Contains the ingredients needed to create wrappers over tokio AsyncRead/AsyncWrite items
//! to automatically reconnect upon failures. This is done so that a user can use them without worrying
//! that their application logic will terminate simply due to an event like a temporary network failure.
//!
//! This crate will try to provide commonly used io items, for example, the [StubbornTcpStream](StubbornTcpStream).
//! If you need to create your own, you simply need to implement the [UnderlyingIo](crate::tokio::UnderlyingIo) trait.
//! Once implemented, you can construct it easily by creating a [StubbornIo](crate::tokio::StubbornIo) type as seen below.
//!
//! *This crate requires at least version 1.39 of the Rust compiler.*
//!
//! ### Motivations
//! This crate was created because I was working on a service that needed to fetch data from a remote server
//! via a tokio TcpConnection. It normally worked perfectly (as does all of my code ☺), but every time the
//! remote server had a restart or turnaround, my application logic would stop working.
//! **stubborn-io** was born because I did not want to complicate my service's logic with TcpStream
//! reconnect and disconnect handling code. With stubborn-io, I can keep the service exactly the same,
//! knowing that the StubbornTcpStream's sensible defaults will perform reconnects in a way to keep my service running.
//! Once I realized that the implementation could apply to all IO items and not just TcpStream, I made it customizable as
//! seen below.
//!
//! ## Example on how a Stubborn IO item might be created
//! ``` ignore
//! use std::io;
//! use std::future::Future;
//! use std::path::PathBuf;
//! use std::pin::Pin;
//! use stubborn_io::tokio::{StubbornIo, UnderlyingIo};
//! use tokio::fs::File;
//!
//! impl UnderlyingIo<PathBuf> for File {
//! // Establishes an io connection.
//! // Additionally, this will be used when reconnect tries are attempted.
//! fn establish(path: PathBuf) -> Pin<Box<dyn Future<Output = io::Result<Self>> + Send>> {
//! Box::pin(async move {
//! // In this case, we are trying to "connect" a file that
//! // should exist on the system
//! Ok(File::open(path).await?)
//! })
//! }
//! }
//!
//! // Because StubbornIo implements deref, you are able to invoke
//! // the original methods on the File struct.
//! type HomemadeStubbornFile = StubbornIo<File, PathBuf>;
//! let path = PathBuf::from("./foo/bar.txt");
//!
//! let stubborn_file = HomemadeStubbornFile::connect(&path).await?;
//! // ... application logic here
//! ```
pub mod config;
// in the future, there may be a mod for synchronous regular io too, which is why
// tokio is specifically chosen to place the async stuff
pub mod tokio;
#[doc(inline)]
pub use self::config::ReconnectOptions;
#[doc(inline)]
pub use self::tokio::{StubbornTcpStream, StubbornTlsStream};
// needed because the above doc example can't compile due to the fact that a consumer of this crate
// does not own the struct for tokio::fs::File.
#[test]
fn test_compilation_for_doc_example() {
use self::tokio::{StubbornIo, UnderlyingIo};
use ::tokio::fs::File;
use std::future::Future;
use std::io;
use std::path::PathBuf;
use std::pin::Pin;
impl UnderlyingIo<PathBuf> for File {
// Implementing the creation function that will be used to establish an io connection.
fn establish(path: PathBuf) -> Pin<Box<dyn Future<Output = io::Result<Self>> + Send>> {
| }
type HomemadeStubbornFile = StubbornIo<File, PathBuf>;
let _ = HomemadeStubbornFile::connect(PathBuf::from("foo"));
}
| Box::pin(async move { Ok(File::open(path).await?) })
}
|
coordinator.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The coordinator runs the majority of the Go build system.
//
// It is responsible for finding build work and executing it,
// reporting the results to build.golang.org for public display.
//
// For an overview of the Go build system, see the README at
// the root of the x/build repo.
package main // import "golang.org/x/build/cmd/coordinator"
import (
"bytes"
"context"
"crypto/rand"
"crypto/sha1"
"crypto/tls"
"encoding/csv"
"errors"
"flag"
"fmt"
"html"
"html/template"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
_ "net/http/pprof"
"net/url"
"os"
"path"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"go4.org/syncutil"
"grpc.go4.org"
"cloud.google.com/go/storage"
"golang.org/x/build"
"golang.org/x/build/autocertcache"
"golang.org/x/build/buildenv"
"golang.org/x/build/buildlet"
"golang.org/x/build/cmd/coordinator/spanlog"
"golang.org/x/build/dashboard"
"golang.org/x/build/gerrit"
"golang.org/x/build/internal/buildgo"
"golang.org/x/build/internal/sourcecache"
"golang.org/x/build/livelog"
"golang.org/x/build/maintner/maintnerd/apipb"
"golang.org/x/build/types"
"golang.org/x/crypto/acme/autocert"
perfstorage "golang.org/x/perf/storage"
"golang.org/x/time/rate"
)
const (
subrepoPrefix = "golang.org/x/"
// eventDone is a build event name meaning the build was
// completed (either successfully or with remote errors).
// Notably, it is NOT included for network/communication
// errors.
eventDone = "done"
// eventSkipBuildMissingDep is a build event name meaning
// the builder type is not applicable to the commit being
// tested because the commit lacks a necessary dependency
// in its git history.
eventSkipBuildMissingDep = "skipped_build_missing_dep"
)
var (
processStartTime = time.Now()
processID = "P" + randHex(9)
)
var Version string // set by linker -X
// devPause is a debug option to pause for 5 minutes after the build
// finishes before destroying buildlets.
const devPause = false
// stagingTryWork is a debug option to enable or disable running
// trybot work in staging.
//
// If enabled, only open CLs containing "DO NOT SUBMIT" and "STAGING"
// in their commit message (in addition to being marked Run-TryBot+1)
// will be run.
const stagingTryWork = true
var (
masterKeyFile = flag.String("masterkey", "", "Path to builder master key. Else fetched using GCE project attribute 'builder-master-key'.")
mode = flag.String("mode", "", "Valid modes are 'dev', 'prod', or '' for auto-detect. dev means localhost development, not be confused with staging on go-dashboard-dev, which is still the 'prod' mode.")
buildEnvName = flag.String("env", "", "The build environment configuration to use. Not required if running on GCE.")
devEnableGCE = flag.Bool("dev_gce", false, "Whether or not to enable the GCE pool when in dev mode. The pool is enabled by default in prod mode.")
shouldRunBench = flag.Bool("run_bench", false, "Whether or not to run benchmarks on trybot commits. Override by GCE project attribute 'farmer-run-bench'.")
perfServer = flag.String("perf_server", "", "Upload benchmark results to `server`. Overrides buildenv default for testing.")
)
// LOCK ORDER:
// statusMu, buildStatus.mu, trySet.mu
// (Other locks, such as the remoteBuildlet mutex should
// not be used along with other locks)
var (
statusMu sync.Mutex // guards the following four structures; see LOCK ORDER comment above
status = map[buildgo.BuilderRev]*buildStatus{}
statusDone []*buildStatus // finished recently, capped to maxStatusDone
tries = map[tryKey]*trySet{} // trybot builds
tryList []tryKey
)
var (
tryBuilders []dashboard.BuildConfig // for testing the go repo
subTryBuilders []dashboard.BuildConfig // for testing sub-repos
)
var maintnerClient apipb.MaintnerServiceClient
func initTryBuilders() {
for _, name := range dashboard.TrybotBuilderNames() {
conf := dashboard.Builders[name]
tryBuilders = append(tryBuilders, conf)
if conf.BuildSubrepos() {
subTryBuilders = append(subTryBuilders, conf)
}
}
}
const (
maxStatusDone = 30
// vmDeleteTimeout and podDeleteTimeout is how long before we delete a VM.
// In practice this need only be as long as the slowest
// builder (plan9 currently), because on startup this program
// already deletes all buildlets it doesn't know about
// (i.e. ones from a previous instance of the coordinator).
vmDeleteTimeout = 45 * time.Minute
podDeleteTimeout = 45 * time.Minute
)
// Fake keys signed by a fake CA.
// These are used in localhost dev mode. (Not to be confused with the
// staging "dev" instance under GCE project "go-dashboard-dev")
var testFiles = map[string]string{
"farmer-cert.pem": build.DevCoordinatorCA,
"farmer-key.pem": build.DevCoordinatorKey,
}
func listenAndServeTLS() {
addr := ":443"
if *mode == "dev" {
addr = "localhost:8119"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
log.Fatalf("net.Listen(%s): %v", addr, err)
}
serveTLS(ln)
}
func serveTLS(ln net.Listener) {
config := &tls.Config{
NextProtos: []string{"http/1.1"},
}
if bucket := buildEnv.AutoCertCacheBucket; bucket != "" {
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: func(_ context.Context, host string) error {
if !strings.HasSuffix(host, ".golang.org") {
return fmt.Errorf("bogus host %q", host)
}
return nil
},
Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, bucket),
}
config.GetCertificate = m.GetCertificate
} else {
certPEM, err := readGCSFile("farmer-cert.pem")
if err != nil {
log.Printf("cannot load TLS cert, skipping https: %v", err)
return
}
keyPEM, err := readGCSFile("farmer-key.pem")
if err != nil {
log.Printf("cannot load TLS key, skipping https: %v", err)
return
}
cert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
log.Printf("bad TLS cert: %v", err)
return
}
config.Certificates = []tls.Certificate{cert}
}
server := &http.Server{
Addr: ln.Addr().String(),
Handler: httpRouter{},
}
tlsLn := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config)
log.Printf("Coordinator serving on: %v", tlsLn.Addr())
if err := server.Serve(tlsLn); err != nil {
log.Fatalf("serve https: %v", err)
}
}
type tcpKeepAliveListener struct {
*net.TCPListener
}
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
type loggerFunc func(event string, optText ...string)
func (fn loggerFunc) LogEventTime(event string, optText ...string) {
fn(event, optText...)
}
func (fn loggerFunc) CreateSpan(event string, optText ...string) spanlog.Span {
return createSpan(fn, event, optText...)
}
func main() {
flag.Parse()
if Version == "" && *mode == "dev" {
Version = "dev"
}
log.Printf("coordinator version %q starting", Version)
err := initGCE()
if err != nil {
if *mode == "" {
*mode = "dev"
}
log.Printf("VM support disabled due to error initializing GCE: %v", err)
} else {
if *mode == "" {
*mode = "prod"
}
}
// TODO(evanbrown: disable kubePool if init fails)
err = initKube()
if err != nil {
kubeErr = err
log.Printf("Kube support disabled due to error initializing Kubernetes: %v", err)
}
go updateInstanceRecord()
switch *mode {
case "dev", "prod":
log.Printf("Running in %s mode", *mode)
default:
log.Fatalf("Unknown mode: %q", *mode)
}
cc, err := grpc.NewClient(http.DefaultClient, "https://maintner.golang.org")
if err != nil {
log.Fatal(err)
}
maintnerClient = apipb.NewMaintnerServiceClient(cc)
http.HandleFunc("/", handleStatus)
http.HandleFunc("/debug/goroutines", handleDebugGoroutines)
http.HandleFunc("/debug/watcher/", handleDebugWatcher)
http.HandleFunc("/builders", handleBuilders)
http.HandleFunc("/temporarylogs", handleLogs)
http.HandleFunc("/reverse", handleReverse)
http.HandleFunc("/style.css", handleStyleCSS)
http.HandleFunc("/try", handleTryStatus)
http.HandleFunc("/status/reverse.json", reversePool.ServeReverseStatusJSON)
http.Handle("/buildlet/create", requireBuildletProxyAuth(http.HandlerFunc(handleBuildletCreate)))
http.Handle("/buildlet/list", requireBuildletProxyAuth(http.HandlerFunc(handleBuildletList)))
go func() {
if *mode == "dev" {
return
}
err := http.ListenAndServe(":80", httpRouter{})
if err != nil {
log.Fatalf("http.ListenAndServe:80: %v", err)
}
}()
workc := make(chan buildgo.BuilderRev)
if *mode == "dev" {
// TODO(crawshaw): do more in dev mode
gcePool.SetEnabled(*devEnableGCE)
http.HandleFunc("/dosomework/", handleDoSomeWork(workc))
} else {
go gcePool.cleanUpOldVMs()
if kubeErr == nil {
go kubePool.cleanUpOldPodsLoop(context.Background())
}
if inStaging {
dashboard.Builders = stagingClusterBuilders()
}
initTryBuilders()
go findWorkLoop(workc)
go findTryWorkLoop()
go reportMetrics(context.Background())
// TODO(cmang): gccgo will need its own findWorkLoop
}
go listenAndServeTLS()
go listenAndServeSSH() // ssh proxy to remote buildlets; remote.go
ticker := time.NewTicker(1 * time.Minute)
for {
select {
case work := <-workc:
if !mayBuildRev(work) {
if inStaging {
if _, ok := dashboard.Builders[work.Name]; ok && logCantBuildStaging.Allow() {
log.Printf("may not build %v; skipping", work)
}
}
continue
}
st, err := newBuild(work)
if err != nil {
log.Printf("Bad build work params %v: %v", work, err)
} else {
st.start()
}
case <-ticker.C:
if numCurrentBuilds() == 0 && time.Now().After(processStartTime.Add(10*time.Minute)) {
// TODO: halt the whole machine to kill the VM or something
}
}
}
}
// watcherProxy is the proxy which forwards from
// https://farmer.golang.org/ to the gitmirror kubernetes service (git
// cache+sync).
// This is used for /debug/watcher/<reponame> status pages, which are
// served at the same URL paths for both the farmer.golang.org host
// and the internal backend. (The name "watcher" is old; it's now called
// "gitmirror" but the URL path remains for now.)
var watcherProxy *httputil.ReverseProxy
func init() {
u, err := url.Parse("http://gitmirror/") // unused hostname
if err != nil {
log.Fatal(err)
}
watcherProxy = httputil.NewSingleHostReverseProxy(u)
watcherProxy.Transport = &http.Transport{
IdleConnTimeout: 30 * time.Second,
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
return goKubeClient.DialServicePort(ctx, "gitmirror", "")
},
}
}
func handleDebugWatcher(w http.ResponseWriter, r *http.Request) {
watcherProxy.ServeHTTP(w, r)
}
func stagingClusterBuilders() map[string]dashboard.BuildConfig {
m := map[string]dashboard.BuildConfig{}
for _, name := range []string{
"linux-arm",
"linux-arm-arm5spacemonkey",
"linux-amd64",
"linux-386-387",
"windows-386-gce",
} {
if c, ok := dashboard.Builders[name]; ok {
m[name] = c
} else {
panic(fmt.Sprintf("unknown builder %q", name))
}
}
// Also permit all the reverse buildlets:
for name, bc := range dashboard.Builders {
if bc.IsReverse() {
m[name] = bc
}
}
return m
}
func numCurrentBuilds() int {
statusMu.Lock()
defer statusMu.Unlock()
return len(status)
}
func numCurrentBuildsOfType(typ string) (n int) {
statusMu.Lock()
defer statusMu.Unlock()
for rev := range status {
if rev.Name == typ {
n++
}
}
return
}
func isBuilding(work buildgo.BuilderRev) bool {
statusMu.Lock()
defer statusMu.Unlock()
_, building := status[work]
return building
}
var (
logUnknownBuilder = rate.NewLimiter(rate.Every(5*time.Second), 2)
logCantBuildStaging = rate.NewLimiter(rate.Every(1*time.Second), 2)
)
// mayBuildRev reports whether the build type & revision should be started.
// It returns true if it's not already building, and if a reverse buildlet is
// required, if an appropriate machine is registered.
func mayBuildRev(rev buildgo.BuilderRev) bool {
if isBuilding(rev) {
return false
}
if buildEnv.MaxBuilds > 0 && numCurrentBuilds() >= buildEnv.MaxBuilds {
return false
}
buildConf, ok := dashboard.Builders[rev.Name]
if !ok {
if logUnknownBuilder.Allow() {
log.Printf("unknown builder %q", rev.Name)
}
return false
}
if buildConf.MaxAtOnce > 0 && numCurrentBuildsOfType(rev.Name) >= buildConf.MaxAtOnce {
return false
}
if buildConf.IsReverse() && !reversePool.CanBuild(buildConf.HostType) {
return false
}
if buildConf.IsKube() && kubeErr != nil {
return false
}
return true
}
func setStatus(work buildgo.BuilderRev, st *buildStatus) {
statusMu.Lock()
defer statusMu.Unlock()
// TODO: panic if status[work] already exists. audit all callers.
// For instance, what if a trybot is running, and then the CL is merged
// and the findWork goroutine picks it up and it has the same commit,
// because it didn't need to be rebased in Gerrit's cherrypick?
// Could we then have two running with the same key?
status[work] = st
}
func markDone(work buildgo.BuilderRev) {
statusMu.Lock()
defer statusMu.Unlock()
st, ok := status[work]
if !ok {
return
}
delete(status, work)
if len(statusDone) == maxStatusDone {
copy(statusDone, statusDone[1:])
statusDone = statusDone[:len(statusDone)-1]
}
statusDone = append(statusDone, st)
}
// statusPtrStr disambiguates which status to return if there are
// multiple in the history (e.g. recent failures where the build
// didn't finish for reasons outside of all.bash failing)
func getStatus(work buildgo.BuilderRev, statusPtrStr string) *buildStatus {
statusMu.Lock()
defer statusMu.Unlock()
match := func(st *buildStatus) bool {
return statusPtrStr == "" || fmt.Sprintf("%p", st) == statusPtrStr
}
if st, ok := status[work]; ok && match(st) {
return st
}
for _, st := range statusDone {
if st.BuilderRev == work && match(st) {
return st
}
}
for k, ts := range tries {
if k.Commit == work.Rev {
ts.mu.Lock()
for _, st := range ts.builds {
if st.BuilderRev == work && match(st) {
ts.mu.Unlock()
return st
}
}
ts.mu.Unlock()
}
}
return nil
}
type byAge []*buildStatus
func (s byAge) Len() int { return len(s) }
func (s byAge) Less(i, j int) bool { return s[i].startTime.Before(s[j].startTime) }
func (s byAge) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func handleTryStatus(w http.ResponseWriter, r *http.Request) {
ts := trySetOfCommitPrefix(r.FormValue("commit"))
if ts == nil {
http.Error(w, "TryBot result not found (already done, invalid, or not yet discovered from Gerrit). Check Gerrit for results.", http.StatusNotFound)
return
}
ts.mu.Lock()
tss := ts.trySetState.clone()
ts.mu.Unlock()
w.Header().Set("Content-Type", "text/html; charset=utf-8")
fmt.Fprintf(w, "<html><head><title>trybot status</title></head><body>[<a href='/'>overall status</a>] > %s\n", ts.ChangeID)
fmt.Fprintf(w, "<h1>trybot status</h1>")
fmt.Fprintf(w, "Change-ID: <a href='https://go-review.googlesource.com/#/q/%s'>%s</a><br>\n", ts.ChangeID, ts.ChangeID)
fmt.Fprintf(w, "Commit: <a href='https://go-review.googlesource.com/#/q/%s'>%s</a><br>\n", ts.Commit, ts.Commit)
fmt.Fprintf(w, "<p>Builds remain: %d</p>\n", tss.remain)
fmt.Fprintf(w, "<p>Builds failed: %v</p>\n", tss.failed)
fmt.Fprintf(w, "<p>Builds</p><table cellpadding=5 border=1>\n")
for _, bs := range tss.builds {
status := "<i>(running)</i>"
bs.mu.Lock()
if !bs.done.IsZero() {
if bs.succeeded {
status = "pass"
} else {
status = "<b>FAIL</b>"
}
}
bs.mu.Unlock()
fmt.Fprintf(w, "<tr valign=top><td align=left>%s</td><td align=center>%s</td><td><pre>%s</pre></td></tr>\n",
bs.Name,
status,
bs.HTMLStatusLine())
}
fmt.Fprintf(w, "</table></body></html>")
}
func trySetOfCommitPrefix(commitPrefix string) *trySet {
if commitPrefix == "" {
return nil
}
statusMu.Lock()
defer statusMu.Unlock()
for k, ts := range tries {
if strings.HasPrefix(k.Commit, commitPrefix) {
return ts
}
}
return nil
}
func handleLogs(w http.ResponseWriter, r *http.Request) {
br := buildgo.BuilderRev{
Name: r.FormValue("name"),
Rev: r.FormValue("rev"),
SubName: r.FormValue("subName"), // may be empty
SubRev: r.FormValue("subRev"), // may be empty
}
st := getStatus(br, r.FormValue("st"))
if st == nil {
http.NotFound(w, r)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
writeStatusHeader(w, st)
nostream := r.FormValue("nostream") != ""
if nostream || !st.isRunning() {
if nostream {
fmt.Fprintf(w, "\n\n(live streaming disabled; reload manually to see status)\n")
}
w.Write(st.output.Bytes())
return
}
if !st.hasEvent("make_and_test") && !st.hasEvent("make_cross_compile_kube") {
fmt.Fprintf(w, "\n\n(buildlet still starting; no live streaming. reload manually to see status)\n")
return
}
w.(http.Flusher).Flush()
output := st.output.Reader()
go func() {
<-w.(http.CloseNotifier).CloseNotify()
output.Close()
}()
buf := make([]byte, 65536)
for {
n, err := output.Read(buf)
if _, err2 := w.Write(buf[:n]); err2 != nil {
return
}
w.(http.Flusher).Flush()
if err != nil {
break
}
}
}
func handleDebugGoroutines(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
buf := make([]byte, 1<<20)
buf = buf[:runtime.Stack(buf, true)]
w.Write(buf)
}
func writeStatusHeader(w http.ResponseWriter, st *buildStatus) {
st.mu.Lock()
defer st.mu.Unlock()
fmt.Fprintf(w, " builder: %s\n", st.Name)
fmt.Fprintf(w, " rev: %s\n", st.Rev)
workaroundFlush(w)
fmt.Fprintf(w, " buildlet: %s\n", st.bc)
fmt.Fprintf(w, " started: %v\n", st.startTime)
done := !st.done.IsZero()
if done {
fmt.Fprintf(w, " ended: %v\n", st.done)
fmt.Fprintf(w, " success: %v\n", st.succeeded)
} else {
fmt.Fprintf(w, " status: still running\n")
}
if len(st.events) > 0 {
io.WriteString(w, "\nEvents:\n")
st.writeEventsLocked(w, false)
}
io.WriteString(w, "\nBuild log:\n")
workaroundFlush(w)
}
// workaroundFlush is an unnecessary flush to work around a bug in Chrome.
// See https://code.google.com/p/chromium/issues/detail?id=2016 for the details.
// In summary: a couple unnecessary chunk flushes bypass the content type
// sniffing which happen (even if unused?), even if you set nosniff as we do
// in func handleLogs.
func workaroundFlush(w http.ResponseWriter) {
w.(http.Flusher).Flush()
}
// findWorkLoop polls https://build.golang.org/?mode=json looking for new work
// for the main dashboard. It does not support gccgo.
// TODO(bradfitz): it also currently does not support subrepos.
func findWorkLoop(work chan<- buildgo.BuilderRev) {
// Useful for debugging a single run:
if inStaging && false {
//work <- buildgo.BuilderRev{name: "linux-arm", rev: "c9778ec302b2e0e0d6027e1e0fca892e428d9657", subName: "tools", subRev: "ac303766f5f240c1796eeea3dc9bf34f1261aa35"}
const debugArm = false
if debugArm {
for !reversePool.CanBuild("host-linux-arm") {
log.Printf("waiting for ARM to register.")
time.Sleep(time.Second)
}
log.Printf("ARM machine(s) registered.")
work <- buildgo.BuilderRev{Name: "linux-arm", Rev: "3129c67db76bc8ee13a1edc38a6c25f9eddcbc6c"}
} else {
work <- buildgo.BuilderRev{Name: "windows-amd64-2008", Rev: "3129c67db76bc8ee13a1edc38a6c25f9eddcbc6c"}
work <- buildgo.BuilderRev{Name: "windows-386-gce", Rev: "3129c67db76bc8ee13a1edc38a6c25f9eddcbc6c"}
}
// Still run findWork but ignore what it does.
ignore := make(chan buildgo.BuilderRev)
go func() {
for range ignore {
}
}()
work = ignore
}
ticker := time.NewTicker(15 * time.Second)
for {
if err := findWork(work); err != nil {
log.Printf("failed to find new work: %v", err)
}
<-ticker.C
}
}
func findWork(work chan<- buildgo.BuilderRev) error {
var bs types.BuildStatus
if err := dash("GET", "", url.Values{"mode": {"json"}}, nil, &bs); err != nil {
return err
}
knownToDashboard := map[string]bool{} // keys are builder
for _, b := range bs.Builders {
knownToDashboard[b] = true
}
var goRevisions []string // revisions of repo "go", branch "master" revisions
seenSubrepo := make(map[string]bool)
for _, br := range bs.Revisions {
if br.Repo == "grpc-review" {
// Skip the grpc repo. It's only for reviews
// for now (using LetsUseGerrit).
continue
}
awaitSnapshot := false
if br.Repo == "go" {
if br.Branch == "master" {
goRevisions = append(goRevisions, br.Revision)
}
} else {
// If this is the first time we've seen this sub-repo
// in this loop, then br.GoRevision is the go repo
// HEAD. To save resources, we only build subrepos
// against HEAD once we have a snapshot.
// The next time we see this sub-repo in this loop, the
// GoRevision is one of the release branches, for which
// we may not have a snapshot (if the release was made
// a long time before this builder came up), so skip
// the snapshot check.
awaitSnapshot = !seenSubrepo[br.Repo]
seenSubrepo[br.Repo] = true
}
if len(br.Results) != len(bs.Builders) {
return errors.New("bogus JSON response from dashboard: results is too long.")
}
for i, res := range br.Results {
if res != "" {
// It's either "ok" or a failure URL.
continue
}
builder := bs.Builders[i]
if skipBranchForBuilder(br.Repo, br.Branch, builder) {
continue
}
builderInfo, ok := dashboard.Builders[builder]
if !ok || builderInfo.TryOnly {
// Not managed by the coordinator, or a trybot-only one.
continue
}
if br.Repo != "go" && !builderInfo.BuildSubrepos() {
// This builder can't build subrepos; skip.
continue
}
var rev buildgo.BuilderRev
if br.Repo == "go" {
rev = buildgo.BuilderRev{
Name: bs.Builders[i],
Rev: br.Revision,
}
} else {
rev = buildgo.BuilderRev{
Name: bs.Builders[i],
Rev: br.GoRevision,
SubName: br.Repo,
SubRev: br.Revision,
}
if awaitSnapshot && !rev.SnapshotExists(context.TODO(), buildEnv) {
continue
}
}
if skipBuild(rev) {
continue
}
if !isBuilding(rev) {
work <- rev
}
}
}
// And to bootstrap new builders, see if we have any builders
// that the dashboard doesn't know about.
for b, builderInfo := range dashboard.Builders {
if builderInfo.TryOnly || knownToDashboard[b] {
continue
}
if skipBranchForBuilder("go", "master", b) {
continue
}
for _, rev := range goRevisions {
br := buildgo.BuilderRev{Name: b, Rev: rev}
if !skipBuild(br) && !isBuilding(br) {
work <- br
}
}
}
return nil
}
// findTryWorkLoop is a goroutine which loops periodically and queries
// Gerrit for TryBot work.
func findTryWorkLoop() {
if errTryDeps != nil {
return
}
ticker := time.NewTicker(1 * time.Second)
for {
if err := findTryWork(); err != nil {
log.Printf("failed to find trybot work: %v", err)
}
<-ticker.C
}
}
func findTryWork() error {
if inStaging && !stagingTryWork {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) // should be milliseconds
defer cancel()
tryRes, err := maintnerClient.GoFindTryWork(ctx, &apipb.GoFindTryWorkRequest{ForStaging: inStaging})
if err != nil {
return err
}
now := time.Now()
statusMu.Lock()
defer statusMu.Unlock()
tryList = tryList[:0]
for _, work := range tryRes.Waiting {
if work.ChangeId == "" || work.Commit == "" {
log.Printf("Warning: skipping incomplete %#v", work)
continue
}
if work.Project == "build" || work.Project == "grpc-review" {
// Skip trybot request in build repo.
// Also skip grpc-review, which is only for reviews for now.
continue
}
key := tryWorkItemKey(work)
tryList = append(tryList, key)
if ts, ok := tries[key]; ok {
// already in progress
ts.wantedAsOf = now
continue
} else {
ts := newTrySet(work)
ts.wantedAsOf = now
tries[key] = ts
}
}
for k, ts := range tries {
if ts.wantedAsOf != now {
delete(tries, k)
go ts.cancelBuilds()
}
}
return nil
}
type tryKey struct {
Project string // "go", "net", etc
Branch string // master
ChangeID string // I1a27695838409259d1586a0adfa9f92bccf7ceba
Commit string // ecf3dffc81dc21408fb02159af352651882a8383
}
// ChangeTriple returns the Gerrit (project, branch, change-ID) triple
// uniquely identifying this change. Several Gerrit APIs require this
// form of if there are multiple changes with the same Change-ID.
func (k *tryKey) ChangeTriple() string {
return fmt.Sprintf("%s~%s~%s", k.Project, k.Branch, k.ChangeID)
}
// trySet is a the state of a set of builds of different
// configurations, all for the same (Change-ID, Commit) pair. The
// sets which are still wanted (not already submitted or canceled) are
// stored in the global 'tries' map.
type trySet struct {
// immutable
tryKey
tryID string // "T" + 9 random hex
// wantedAsOf is guarded by statusMu and is used by
// findTryWork. It records the last time this tryKey was still
// wanted.
wantedAsOf time.Time
// mu guards state and errMsg
// See LOCK ORDER comment above.
mu sync.Mutex
trySetState
errMsg bytes.Buffer
}
type trySetState struct {
remain int
failed []string // build names
builds []*buildStatus
benchResults []string // builder names
}
func (ts trySetState) clone() trySetState {
return trySetState{
remain: ts.remain,
failed: append([]string(nil), ts.failed...),
builds: append([]*buildStatus(nil), ts.builds...),
benchResults: append([]string(nil), ts.benchResults...),
}
}
var errHeadUnknown = errors.New("Cannot create trybot set without a known Go head (transient error)")
func tryWorkItemKey(work *apipb.GerritTryWorkItem) tryKey {
return tryKey{
Project: work.Project,
Branch: work.Branch,
ChangeID: work.ChangeId,
Commit: work.Commit,
}
}
// newTrySet creates a new trySet group of builders for a given
// work item, the (Project, Branch, Change-ID, Commit) tuple.
// It also starts goroutines for each build.
//
// Must hold statusMu.
func newTrySet(work *apipb.GerritTryWorkItem) *trySet {
builders := tryBuilders
key := tryWorkItemKey(work)
if key.Project != "go" {
builders = subTryBuilders
}
log.Printf("Starting new trybot set for %v", key)
ts := &trySet{
tryKey: key,
tryID: "T" + randHex(9),
trySetState: trySetState{
remain: len(builders),
builds: make([]*buildStatus, len(builders)),
},
}
// For now, for subrepos, we only support building one repo.
// TODO: Issue 17626: test subrepos against Go master and past two
// releases. But to save resources, we'll probably only want
// to do that for linux-amd64 (the Kubernetes cheap builder) to
// not blow up usage? Or maybe it doesn't matter.
var goRev string
if len(work.GoCommit) > 0 {
goRev = work.GoCommit[0]
}
go ts.notifyStarting()
for i, bconf := range builders {
brev := tryKeyToBuilderRev(bconf.Name, key, goRev)
bs, err := newBuild(brev)
if err != nil {
log.Printf("can't create build for %q: %v", brev, err)
continue
}
bs.trySet = ts
status[brev] = bs
ts.builds[i] = bs
go bs.start() // acquires statusMu itself, so in a goroutine
go ts.awaitTryBuild(i, bconf, bs, brev)
}
return ts
}
// Note: called in some paths where statusMu is held; do not make RPCs.
func tryKeyToBuilderRev(builder string, key tryKey, goRev string) buildgo.BuilderRev {
// This function is called from within newTrySet, holding statusMu, s
if key.Project == "go" {
return buildgo.BuilderRev{
Name: builder,
Rev: key.Commit,
}
}
return buildgo.BuilderRev{
Name: builder,
Rev: goRev,
SubName: key.Project,
SubRev: key.Commit,
}
}
// state returns a copy of the trySet's state.
func (ts *trySet) state() trySetState {
ts.mu.Lock()
defer ts.mu.Unlock()
return ts.trySetState.clone()
}
// notifyStarting runs in its own goroutine and posts to Gerrit that
// the trybots have started on the user's CL with a link of where to watch.
func (ts *trySet) notifyStarting() {
msg := "TryBots beginning. Status page: https://farmer.golang.org/try?commit=" + ts.Commit[:8]
ctx := context.Background()
if ci, err := gerritClient.GetChangeDetail(ctx, ts.ChangeTriple()); err == nil {
if len(ci.Messages) == 0 {
log.Printf("No Gerrit comments retrieved on %v", ts.ChangeTriple())
}
for _, cmi := range ci.Messages {
if strings.Contains(cmi.Message, msg) {
// Dup. Don't spam.
return
}
}
} else {
log.Printf("Error getting Gerrit comments on %s: %v", ts.ChangeTriple(), err)
}
// Ignore error. This isn't critical.
gerritClient.SetReview(ctx, ts.ChangeTriple(), ts.Commit, gerrit.ReviewInput{Message: msg})
}
// awaitTryBuild runs in its own goroutine and waits for a build in a
// trySet to complete.
//
// If the build fails without getting to the end, it sleeps and
// reschedules it, as long as it's still wanted.
func (ts *trySet) awaitTryBuild(idx int, bconf dashboard.BuildConfig, bs *buildStatus, brev buildgo.BuilderRev) {
for {
WaitCh:
for {
timeout := time.NewTimer(10 * time.Minute)
select {
case <-bs.ctx.Done():
timeout.Stop()
break WaitCh
case <-timeout.C:
if !ts.wanted() {
// Build was canceled.
return
}
}
}
if bs.hasEvent(eventDone) || bs.hasEvent(eventSkipBuildMissingDep) {
ts.noteBuildComplete(bconf, bs)
return
}
// TODO(bradfitz): rethink this logic. we should only
// start a new build if the old one appears dead or
// hung.
// Sleep a bit and retry.
time.Sleep(30 * time.Second)
if !ts.wanted() {
return
}
bs, _ = newBuild(brev)
bs.trySet = ts
go bs.start()
ts.mu.Lock()
ts.builds[idx] = bs
ts.mu.Unlock()
}
}
// wanted reports whether this trySet is still active.
//
// If the commmit has been submitted, or change abandoned, or the
// checkbox unchecked, wanted returns false.
func (ts *trySet) wanted() bool {
statusMu.Lock()
defer statusMu.Unlock()
_, ok := tries[ts.tryKey]
return ok
}
// cancelBuilds run in its own goroutine and cancels this trySet's
// currently-active builds because they're no longer wanted.
func (ts *trySet) cancelBuilds() {
// TODO(bradfitz): implement
}
func (ts *trySet) noteBuildComplete(bconf dashboard.BuildConfig, bs *buildStatus) {
bs.mu.Lock()
succeeded := bs.succeeded
var buildLog string
if !succeeded {
buildLog = bs.output.String()
}
hasBenchResults := bs.hasBenchResults
bs.mu.Unlock()
ts.mu.Lock()
if hasBenchResults {
ts.benchResults = append(ts.benchResults, bs.Name)
}
ts.remain--
remain := ts.remain
if !succeeded {
ts.failed = append(ts.failed, bconf.Name)
}
numFail := len(ts.failed)
benchResults := append([]string(nil), ts.benchResults...)
ts.mu.Unlock()
if !succeeded {
s1 := sha1.New()
io.WriteString(s1, buildLog)
objName := fmt.Sprintf("%s/%s_%x.log", bs.Rev[:8], bs.Name, s1.Sum(nil)[:4])
wr, failLogURL := newFailureLogBlob(objName)
if _, err := io.WriteString(wr, buildLog); err != nil {
log.Printf("Failed to write to GCS: %v", err)
return
}
if err := wr.Close(); err != nil {
log.Printf("Failed to write to GCS: %v", err)
return
}
bs.mu.Lock()
bs.failURL = failLogURL
bs.mu.Unlock()
ts.mu.Lock()
fmt.Fprintf(&ts.errMsg, "Failed on %s: %s\n", bs.Name, failLogURL)
ts.mu.Unlock()
if numFail == 1 && remain > 0 {
if err := gerritClient.SetReview(context.Background(), ts.ChangeTriple(), ts.Commit, gerrit.ReviewInput{
Message: fmt.Sprintf(
"Build is still in progress...\n"+
"This change failed on %s:\n"+
"See %s\n\n"+
"Consult https://build.golang.org/ to see whether it's a new failure. Other builds still in progress; subsequent failure notices suppressed until final report.",
bs.Name, failLogURL),
}); err != nil {
log.Printf("Failed to call Gerrit: %v", err)
return
}
}
}
if remain == 0 {
score, msg := 1, "TryBots are happy."
if numFail > 0 {
ts.mu.Lock()
errMsg := ts.errMsg.String()
ts.mu.Unlock()
score, msg = -1, fmt.Sprintf("%d of %d TryBots failed:\n%s\nConsult https://build.golang.org/ to see whether they are new failures.",
numFail, len(ts.builds), errMsg)
}
if len(benchResults) > 0 {
// TODO: restore this functionality
// msg += fmt.Sprintf("\nBenchmark results are available at:\nhttps://perf.golang.org/search?q=cl:%d+try:%s", ts.ci.ChangeNumber, ts.tryID)
}
if err := gerritClient.SetReview(context.Background(), ts.ChangeTriple(), ts.Commit, gerrit.ReviewInput{
Message: msg,
Labels: map[string]int{
"TryBot-Result": score,
},
}); err != nil {
log.Printf("Failed to call Gerrit: %v", err)
return
}
}
}
func skipBuild(br buildgo.BuilderRev) bool {
if br.Name == "freebsd-arm-paulzhol" {
// This was a fragile little machine with limited memory.
// Only run a few of the core subrepos for now while
// we figure out what's killing it.
switch br.SubName {
case "", "sys", "net":
return false
}
return true
}
switch br.SubName {
case "build", // has external deps
"exp", // always broken, depends on mobile which is broken
"mobile", // always broken (gl, etc). doesn't compile.
"term", // no code yet in repo: "warning: "golang.org/x/term/..." matched no packages"
"oauth2": // has external deps
return true
case "perf":
if br.Name == "linux-amd64-nocgo" {
// The "perf" repo requires sqlite, which
// requires cgo. Skip the no-cgo builder.
return true
}
case "net":
if br.Name == "darwin-amd64-10_8" || br.Name == "darwin-386-10_8" {
// One of the tests seems to panic the kernel
// and kill our buildlet which goes in a loop.
return true
}
}
return false
}
type eventTimeLogger interface {
LogEventTime(event string, optText ...string)
}
// logger is the logging interface used within the coordinator.
// It can both log a message at a point in time, as well
// as log a span (something having a start and end time, as well as
// a final success status).
type logger interface {
eventTimeLogger // point in time
spanlog.Logger // action spanning time
}
// buildletTimeoutOpt is a context.Value key for BuildletPool.GetBuildlet.
type buildletTimeoutOpt struct{} // context Value key; value is time.Duration
// highPriorityOpt is a context.Value key for BuildletPool.GetBuildlet.
// If its value is true, that means the caller should be prioritized.
type highPriorityOpt struct{} // value is bool
type BuildletPool interface {
// GetBuildlet returns a new buildlet client.
//
// The hostType is the key into the dashboard.Hosts
// map (such as "host-linux-kubestd"), NOT the buidler type
// ("linux-386").
//
// Users of GetBuildlet must both call Client.Close when done
// with the client as well as cancel the provided Context.
//
// The ctx may have context values of type buildletTimeoutOpt
// and highPriorityOpt.
GetBuildlet(ctx context.Context, hostType string, lg logger) (*buildlet.Client, error)
String() string // TODO(bradfitz): more status stuff
}
// GetBuildlets creates up to n buildlets and sends them on the returned channel
// before closing the channel.
func GetBuildlets(ctx context.Context, pool BuildletPool, n int, hostType string, lg logger) <-chan *buildlet.Client {
ch := make(chan *buildlet.Client) // NOT buffered
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
go func(i int) {
defer wg.Done()
sp := lg.CreateSpan("get_helper", fmt.Sprintf("helper %d/%d", i+1, n))
bc, err := pool.GetBuildlet(ctx, hostType, lg)
sp.Done(err)
if err != nil {
if err != context.Canceled {
log.Printf("failed to get a %s buildlet: %v", hostType, err)
}
return
}
lg.LogEventTime("empty_helper_ready", bc.Name())
select {
case ch <- bc:
case <-ctx.Done():
lg.LogEventTime("helper_killed_before_use", bc.Name())
bc.Close()
return
}
}(i)
}
go func() {
wg.Wait()
close(ch)
}()
return ch
}
var testPoolHook func(dashboard.BuildConfig) BuildletPool
func poolForConf(conf dashboard.BuildConfig) BuildletPool {
if testPoolHook != nil {
return testPoolHook(conf)
}
switch {
case conf.IsGCE():
return gcePool
case conf.IsKube():
return kubePool // Kubernetes
case conf.IsReverse():
return reversePool
default:
panic(fmt.Sprintf("no buildlet pool for builder type %q", conf.Name))
}
}
func newBuild(rev buildgo.BuilderRev) (*buildStatus, error) {
// Note: can't acquire statusMu in newBuild, as this is called
// from findTryWork -> newTrySet, which holds statusMu.
conf, ok := dashboard.Builders[rev.Name]
if !ok {
return nil, fmt.Errorf("unknown builder type %q", rev.Name)
}
ctx, cancel := context.WithCancel(context.Background())
return &buildStatus{
buildID: "B" + randHex(9),
BuilderRev: rev,
conf: conf,
startTime: time.Now(),
ctx: ctx,
cancel: cancel,
}, nil
}
// start starts the build in a new goroutine.
// The buildStatus's context is closed when the build is complete,
// successfully or not.
func (st *buildStatus) start() {
setStatus(st.BuilderRev, st)
go func() {
err := st.build()
if err == errSkipBuildDueToDeps {
st.setDone(true)
} else {
if err != nil {
fmt.Fprintf(st, "\n\nError: %v\n", err)
log.Println(st.BuilderRev, "failed:", err)
}
st.setDone(err == nil)
putBuildRecord(st.buildRecord())
}
markDone(st.BuilderRev)
}()
}
func (st *buildStatus) buildletPool() BuildletPool {
return poolForConf(st.conf)
}
// parentRev returns the parent of this build's commit (but only if this build comes from a trySet).
func (st *buildStatus) parentRev() (pbr buildgo.BuilderRev, err error) {
err = errors.New("TODO: query maintner")
return
}
func (st *buildStatus) expectedMakeBashDuration() time.Duration {
// TODO: base this on historical measurements, instead of statically configured.
// TODO: move this to dashboard/builders.go? But once we based on on historical
// measurements, it'll need GCE services (bigtable/bigquery?), so it's probably
// better in this file.
goos, goarch := st.conf.GOOS(), st.conf.GOARCH()
if goos == "linux" {
if goarch == "arm" {
return 4 * time.Minute
}
return 45 * time.Second
}
return 60 * time.Second
}
func (st *buildStatus) expectedBuildletStartDuration() time.Duration {
// TODO: move this to dashboard/builders.go? But once we based on on historical
// measurements, it'll need GCE services (bigtable/bigquery?), so it's probably
// better in this file.
pool := st.buildletPool()
switch pool.(type) {
case *gceBuildletPool:
return time.Minute
case *reverseBuildletPool:
goos, arch := st.conf.GOOS(), st.conf.GOARCH()
if goos == "darwin" {
if arch == "arm" || arch == "arm64" {
// iOS; idle or it's not.
return 0
}
if arch == "amd64" || arch == "386" {
return 0 // TODO: remove this once we're using VMware
// return 1 * time.Minute // VMware boot of hermetic OS X
}
}
if goos == "linux" && arch == "arm" {
// Scaleway. Ready or not.
return 0
}
}
return 0
}
// getHelpersReadySoon waits a bit (as a function of the build
// configuration) and starts getting the buildlets for test sharding
// ready, such that they're ready when make.bash is done. But we don't
// want to start too early, lest we waste idle resources during make.bash.
func (st *buildStatus) getHelpersReadySoon() {
if st.IsSubrepo() || st.conf.NumTestHelpers(st.isTry()) == 0 || st.conf.IsReverse() {
return
}
time.AfterFunc(st.expectedMakeBashDuration()-st.expectedBuildletStartDuration(),
func() {
st.LogEventTime("starting_helpers")
st.getHelpers() // and ignore the result.
})
}
// getHelpers returns a channel of buildlet test helpers, with an item
// sent as they become available. The channel is closed at the end.
func (st *buildStatus) getHelpers() <-chan *buildlet.Client {
st.onceInitHelpers.Do(st.onceInitHelpersFunc)
return st.helpers
}
func (st *buildStatus) onceInitHelpersFunc() {
pool := st.buildletPool()
st.helpers = GetBuildlets(st.ctx, pool, st.conf.NumTestHelpers(st.isTry()), st.conf.HostType, st)
}
// useSnapshot reports whether this type of build uses a snapshot of
// make.bash if it exists (anything can SplitMakeRun) and that the
// snapshot exists.
func (st *buildStatus) useSnapshot() bool {
if st.conf.SkipSnapshot {
return false
}
st.mu.Lock()
defer st.mu.Unlock()
if st.useSnapshotMemo != nil {
return *st.useSnapshotMemo
}
b := st.conf.SplitMakeRun() && st.BuilderRev.SnapshotExists(context.TODO(), buildEnv)
st.useSnapshotMemo = &b
return b
}
func (st *buildStatus) forceSnapshotUsage() {
st.mu.Lock()
defer st.mu.Unlock()
truth := true
st.useSnapshotMemo = &truth
}
func (st *buildStatus) getCrossCompileConfig() *crossCompileConfig {
if kubeErr != nil {
return nil
}
config := crossCompileConfigs[st.Name]
if config == nil {
return nil
}
if config.AlwaysCrossCompile {
return config
}
if inStaging || st.isTry() {
return config
}
return nil
}
func (st *buildStatus) checkDep(ctx context.Context, dep string) (have bool, err error) {
span := st.CreateSpan("ask_maintner_has_ancestor")
defer func() { span.Done(err) }()
tries := 0
for {
tries++
res, err := maintnerClient.HasAncestor(ctx, &apipb.HasAncestorRequest{
Commit: st.Rev,
Ancestor: dep,
})
if err != nil {
if tries == 3 {
span.Done(err)
return false, err
}
time.Sleep(1 * time.Second)
continue
}
if res.UnknownCommit {
select {
case <-ctx.Done():
return false, ctx.Err()
case <-time.After(1 * time.Second):
}
continue
}
return res.HasAncestor, nil
}
}
var errSkipBuildDueToDeps = errors.New("build was skipped due to missing deps")
func (st *buildStatus) build() error {
if deps := st.conf.GoDeps; len(deps) > 0 {
ctx, cancel := context.WithTimeout(st.ctx, 30*time.Second)
defer cancel()
for _, dep := range deps {
has, err := st.checkDep(ctx, dep)
if err != nil {
fmt.Fprintf(st, "Error checking whether commit %s includes ancestor %s: %v\n", st.Rev, dep, err)
return err
}
if !has {
st.LogEventTime(eventSkipBuildMissingDep)
fmt.Fprintf(st, "skipping build; commit %s lacks ancestor %s\n", st.Rev, dep)
return errSkipBuildDueToDeps
}
}
cancel()
}
putBuildRecord(st.buildRecord())
sp := st.CreateSpan("checking_for_snapshot")
if inStaging {
err := storageClient.Bucket(buildEnv.SnapBucket).Object(st.SnapshotObjectName()).Delete(context.Background())
st.LogEventTime("deleted_snapshot", fmt.Sprint(err))
}
snapshotExists := st.useSnapshot()
sp.Done(nil)
if config := st.getCrossCompileConfig(); !snapshotExists && config != nil {
if err := st.crossCompileMakeAndSnapshot(config); err != nil {
return err
}
st.forceSnapshotUsage()
}
sp = st.CreateSpan("get_buildlet")
pool := st.buildletPool()
bc, err := pool.GetBuildlet(st.ctx, st.conf.HostType, st)
sp.Done(err)
if err != nil {
err = fmt.Errorf("failed to get a buildlet: %v", err)
go st.reportErr(err)
return err
}
atomic.StoreInt32(&st.hasBuildlet, 1)
defer bc.Close()
st.mu.Lock()
st.bc = bc
st.mu.Unlock()
st.LogEventTime("using_buildlet", bc.IPPort())
if st.useSnapshot() {
sp := st.CreateSpan("write_snapshot_tar")
if err := bc.PutTarFromURL(st.SnapshotURL(buildEnv), "go"); err != nil {
return sp.Done(fmt.Errorf("failed to put snapshot to buildlet: %v", err))
}
sp.Done(nil)
} else {
// Write the Go source and bootstrap tool chain in parallel.
var grp syncutil.Group
grp.Go(st.writeGoSource)
grp.Go(st.writeBootstrapToolchain)
if err := grp.Err(); err != nil {
return err
}
}
execStartTime := time.Now()
fmt.Fprintf(st, "%s at %v", st.Name, st.Rev)
if st.IsSubrepo() {
fmt.Fprintf(st, " building %v at %v", st.SubName, st.SubRev)
}
fmt.Fprint(st, "\n\n")
makeTest := st.CreateSpan("make_and_test") // warning: magic event named used by handleLogs
var remoteErr error
if st.conf.SplitMakeRun() {
remoteErr, err = st.runAllSharded()
} else {
remoteErr, err = st.runAllLegacy()
}
makeTest.Done(err)
// bc (aka st.bc) may be invalid past this point, so let's
// close it to make sure we we don't accidentally use it.
bc.Close()
doneMsg := "all tests passed"
if remoteErr != nil {
doneMsg = "with test failures"
} else if err != nil {
doneMsg = "comm error: " + err.Error()
}
if err != nil {
// Return the error *before* we create the magic
// "done" event. (which the try coordinator looks for)
return err
}
st.LogEventTime(eventDone, doneMsg)
if devPause {
st.LogEventTime("DEV_MAIN_SLEEP")
time.Sleep(5 * time.Minute)
}
if st.trySet == nil {
var buildLog string
if remoteErr != nil {
buildLog = st.logs()
// If we just have the line-or-so little
// banner at top, that means we didn't get any
// interesting output from the remote side, so
// include the remoteErr text. Otherwise
// assume that remoteErr is redundant with the
// buildlog text itself.
if strings.Count(buildLog, "\n") < 10 {
buildLog += "\n" + remoteErr.Error()
}
}
if err := recordResult(st.BuilderRev, remoteErr == nil, buildLog, time.Since(execStartTime)); err != nil {
if remoteErr != nil {
return fmt.Errorf("Remote error was %q but failed to report it to the dashboard: %v", remoteErr, err)
}
return fmt.Errorf("Build succeeded but failed to report it to the dashboard: %v", err)
}
}
if remoteErr != nil {
return remoteErr
}
return nil
}
func (st *buildStatus) isTry() bool { return st.trySet != nil }
func (st *buildStatus) buildRecord() *types.BuildRecord {
rec := &types.BuildRecord{
ID: st.buildID,
ProcessID: processID,
StartTime: st.startTime,
IsTry: st.isTry(),
GoRev: st.Rev,
Rev: st.SubRevOrGoRev(),
Repo: st.RepoOrGo(),
Builder: st.Name,
OS: st.conf.GOOS(),
Arch: st.conf.GOARCH(),
}
st.mu.Lock()
defer st.mu.Unlock()
// TODO: buildlet instance name
if !st.done.IsZero() {
rec.EndTime = st.done
rec.FailureURL = st.failURL
rec.Seconds = rec.EndTime.Sub(rec.StartTime).Seconds()
if st.succeeded {
rec.Result = "ok"
} else {
rec.Result = "fail"
}
}
return rec
}
func (st *buildStatus) spanRecord(sp *span, err error) *types.SpanRecord {
rec := &types.SpanRecord{
BuildID: st.buildID,
IsTry: st.isTry(),
GoRev: st.Rev,
Rev: st.SubRevOrGoRev(),
Repo: st.RepoOrGo(),
Builder: st.Name,
OS: st.conf.GOOS(),
Arch: st.conf.GOARCH(),
Event: sp.event,
Detail: sp.optText,
StartTime: sp.start,
EndTime: sp.end,
Seconds: sp.end.Sub(sp.start).Seconds(),
}
if err != nil {
rec.Error = err.Error()
}
return rec
}
// shouldBench returns whether we should attempt to run benchmarks
func (st *buildStatus) shouldBench() bool {
if !*shouldRunBench {
return false
}
return st.isTry() && !st.IsSubrepo() && st.conf.RunBench
}
// goBuilder returns a GoBuilder for this buildStatus.
func (st *buildStatus) goBuilder() buildgo.GoBuilder {
return buildgo.GoBuilder{
Logger: st,
BuilderRev: st.BuilderRev,
Conf: st.conf,
Goroot: "go",
}
}
// runAllSharded runs make.bash and then shards the test execution.
// remoteErr and err are as described at the top of this file.
//
// After runAllSharded returns, the caller must assume that st.bc
// might be invalid (It's possible that only one of the helper
// buildlets survived).
func (st *buildStatus) runAllSharded() (remoteErr, err error) {
st.getHelpersReadySoon()
if !st.useSnapshot() {
remoteErr, err = st.goBuilder().RunMake(st.bc, st)
if err != nil {
return nil, err
}
if remoteErr != nil {
return fmt.Errorf("build failed: %v", remoteErr), nil
}
}
if st.conf.StopAfterMake {
return nil, nil
}
if err := st.doSnapshot(st.bc); err != nil {
return nil, err
}
if st.IsSubrepo() {
remoteErr, err = st.runSubrepoTests()
} else {
remoteErr, err = st.runTests(st.getHelpers())
}
if err != nil {
return nil, fmt.Errorf("runTests: %v", err)
}
if remoteErr != nil {
return fmt.Errorf("tests failed: %v", remoteErr), nil
}
return nil, nil
}
type crossCompileConfig struct {
Buildlet string
CCForTarget string
GOARM string
AlwaysCrossCompile bool
}
var crossCompileConfigs = map[string]*crossCompileConfig{
"linux-arm": {
Buildlet: "host-linux-armhf-cross",
CCForTarget: "arm-linux-gnueabihf-gcc",
GOARM: "7",
AlwaysCrossCompile: false,
},
"linux-arm-arm5spacemonkey": {
Buildlet: "host-linux-armel-cross",
CCForTarget: "arm-linux-gnueabi-gcc",
GOARM: "5",
AlwaysCrossCompile: true,
},
}
func (st *buildStatus) crossCompileMakeAndSnapshot(config *crossCompileConfig) (err error) {
// TODO: currently we ditch this buildlet when we're done with
// the make.bash & snapshot. For extra speed later, we could
// keep it around and use it to "go test -c" each stdlib
// package's tests, and push the binary to each ARM helper
// machine. That might be too little gain for the complexity,
// though, or slower once we ship everything around.
ctx, cancel := context.WithCancel(st.ctx)
defer cancel()
sp := st.CreateSpan("get_buildlet_cross")
kubeBC, err := kubePool.GetBuildlet(ctx, config.Buildlet, st)
sp.Done(err)
if err != nil {
err = fmt.Errorf("cross-compile and snapshot: failed to get a buildlet: %v", err)
go st.reportErr(err)
return err
}
defer kubeBC.Close()
if err := st.writeGoSourceTo(kubeBC); err != nil {
return err
}
makeSpan := st.CreateSpan("make_cross_compile_kube")
defer func() { makeSpan.Done(err) }()
goos, goarch := st.conf.GOOS(), st.conf.GOARCH()
remoteErr, err := kubeBC.Exec("/bin/bash", buildlet.ExecOpts{
SystemLevel: true,
Args: []string{
"-c",
"cd $WORKDIR/go/src && " +
"./make.bash && " +
"cd .. && " +
"mv bin/*_*/* bin && " +
"rmdir bin/*_* && " +
"rm -rf pkg/linux_amd64 pkg/tool/linux_amd64 pkg/bootstrap pkg/obj",
},
Output: st,
ExtraEnv: []string{
"GOROOT_BOOTSTRAP=/go1.4",
"CGO_ENABLED=1",
"CC_FOR_TARGET=" + config.CCForTarget,
"GOOS=" + goos,
"GOARCH=" + goarch,
"GOARM=" + config.GOARM, // harmless if GOARCH != "arm"
},
Debug: true,
})
if err != nil {
return err
}
if remoteErr != nil {
// Add the "done" event if make.bash fails, otherwise
// try builders will loop forever:
st.LogEventTime(eventDone, fmt.Sprintf("make.bash failed: %v", remoteErr))
return fmt.Errorf("remote error: %v", remoteErr)
}
if err := st.doSnapshot(kubeBC); err != nil {
return err
}
return nil
}
// runAllLegacy executes all.bash (or .bat, or whatever) in the traditional way.
// remoteErr and err are as described at the top of this file.
//
// TODO(bradfitz,adg): delete this function when all builders
// can split make & run (and then delete the SplitMakeRun method)
func (st *buildStatus) runAllLegacy() (remoteErr, err error) {
allScript := st.conf.AllScript()
sp := st.CreateSpan("legacy_all_path", allScript)
remoteErr, err = st.bc.Exec(path.Join("go", allScript), buildlet.ExecOpts{
Output: st,
ExtraEnv: st.conf.Env(),
Debug: true,
Args: st.conf.AllScriptArgs(),
})
if err != nil {
sp.Done(err)
return nil, err
}
if remoteErr != nil {
sp.Done(err)
return fmt.Errorf("all script failed: %v", remoteErr), nil
}
sp.Done(nil)
return nil, nil
}
func (st *buildStatus) doSnapshot(bc *buildlet.Client) error {
// If we're using a pre-built snapshot, don't make another.
if st.useSnapshot() {
return nil
}
if st.conf.SkipSnapshot {
return nil
}
if err := st.cleanForSnapshot(bc); err != nil {
return fmt.Errorf("cleanForSnapshot: %v", err)
}
if err := st.writeSnapshot(bc); err != nil {
return fmt.Errorf("writeSnapshot: %v", err)
}
return nil
}
func (st *buildStatus) writeGoSource() error {
return st.writeGoSourceTo(st.bc)
}
func (st *buildStatus) writeGoSourceTo(bc *buildlet.Client) error {
// Write the VERSION file.
sp := st.CreateSpan("write_version_tar")
if err := bc.PutTar(buildgo.VersionTgz(st.Rev), "go"); err != nil {
return sp.Done(fmt.Errorf("writing VERSION tgz: %v", err))
}
srcTar, err := sourcecache.GetSourceTgz(st, "go", st.Rev)
if err != nil {
return err
}
sp = st.CreateSpan("write_go_src_tar")
if err := bc.PutTar(srcTar, "go"); err != nil {
return sp.Done(fmt.Errorf("writing tarball from Gerrit: %v", err))
}
return sp.Done(nil)
}
func (st *buildStatus) writeBootstrapToolchain() error {
u := st.conf.GoBootstrapURL(buildEnv)
if u == "" {
return nil
}
const bootstrapDir = "go1.4" // might be newer; name is the default
sp := st.CreateSpan("write_go_bootstrap_tar")
return sp.Done(st.bc.PutTarFromURL(u, bootstrapDir))
}
func (st *buildStatus) cleanForSnapshot(bc *buildlet.Client) error {
sp := st.CreateSpan("clean_for_snapshot")
return sp.Done(bc.RemoveAll(
"go/doc/gopher",
"go/pkg/bootstrap",
))
}
func (st *buildStatus) writeSnapshot(bc *buildlet.Client) (err error) {
sp := st.CreateSpan("write_snapshot_to_gcs")
defer func() { sp.Done(err) }()
// This should happen in 15 seconds or so, but I saw timeouts
// a couple times at 1 minute. Some buildlets might be far
// away on the network, so be more lenient. The timeout mostly
// is here to prevent infinite hangs.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
tsp := st.CreateSpan("fetch_snapshot_reader_from_buildlet")
tgz, err := bc.GetTar(ctx, "go")
tsp.Done(err)
if err != nil {
return err
}
defer tgz.Close()
wr := storageClient.Bucket(buildEnv.SnapBucket).Object(st.SnapshotObjectName()).NewWriter(ctx)
wr.ContentType = "application/octet-stream"
wr.ACL = append(wr.ACL, storage.ACLRule{Entity: storage.AllUsers, Role: storage.RoleReader})
if _, err := io.Copy(wr, tgz); err != nil {
st.logf("failed to write snapshot to GCS: %v", err)
wr.CloseWithError(err)
return err
}
return wr.Close()
}
// reportErr reports an error to Stackdriver.
func (st *buildStatus) reportErr(err error) {
if errorsClient == nil {
// errorsClient is nil in dev environments.
return
}
var noRequest *http.Request
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err = fmt.Errorf("buildID: %v, name: %s, hostType: %s, error: %v", st.buildID, st.conf.Name, st.conf.HostType, err)
errorsClient.Report(ctx, noRequest, err)
}
func (st *buildStatus) distTestList() (names []string, remoteErr, err error) {
workDir, err := st.bc.WorkDir()
if err != nil {
err = fmt.Errorf("distTestList, WorkDir: %v", err)
return
}
goroot := st.conf.FilePathJoin(workDir, "go")
args := []string{"tool", "dist", "test", "--no-rebuild", "--list"}
if st.conf.IsRace() {
args = append(args, "--race")
}
if st.conf.CompileOnly {
args = append(args, "--compile-only")
}
var buf bytes.Buffer
remoteErr, err = st.bc.Exec(path.Join("go", "bin", "go"), buildlet.ExecOpts{
Output: &buf,
ExtraEnv: append(st.conf.Env(), "GOROOT="+goroot),
OnStartExec: func() { st.LogEventTime("discovering_tests") },
Path: []string{"$WORKDIR/go/bin", "$PATH"},
Args: args,
})
if remoteErr != nil {
remoteErr = fmt.Errorf("Remote error: %v, %s", remoteErr, buf.Bytes())
err = nil
return
}
if err != nil {
err = fmt.Errorf("Exec error: %v, %s", err, buf.Bytes())
return
}
for _, test := range strings.Fields(buf.String()) {
if st.shouldSkipTest(test) {
continue
}
names = append(names, test)
}
return names, nil, nil
}
// shouldSkipTest reports whether this test should be skipped. We
// only do this for slow builders running redundant tests. (That is,
// tests which have identical behavior across different ports)
func (st *buildStatus) shouldSkipTest(testName string) bool {
if inStaging && st.Name == "linux-arm" && false {
if strings.HasPrefix(testName, "go_test:") && testName < "go_test:runtime" {
return true
}
}
switch testName {
case "vet/all":
// Old vetall test name, before the sharding in CL 37572.
return true
case "api":
return st.isTry() && st.Name != "linux-amd64"
}
return false
}
func (st *buildStatus) newTestSet(names []string, benchmarks []*buildgo.BenchmarkItem) *testSet {
set := &testSet{
st: st,
}
for _, name := range names {
set.items = append(set.items, &testItem{
set: set,
name: name,
duration: testDuration(st.BuilderRev.Name, name),
take: make(chan token, 1),
done: make(chan token),
})
}
for _, bench := range benchmarks {
name := "bench:" + bench.Name()
set.items = append(set.items, &testItem{
set: set,
name: name,
bench: bench,
duration: testDuration(st.BuilderRev.Name, name),
take: make(chan token, 1),
done: make(chan token),
})
}
return set
}
func partitionGoTests(builderName string, tests []string) (sets [][]string) {
var srcTests []string
var cmdTests []string
for _, name := range tests {
if strings.HasPrefix(name, "go_test:cmd/") {
cmdTests = append(cmdTests, name)
} else if strings.HasPrefix(name, "go_test:") {
srcTests = append(srcTests, name)
}
}
sort.Strings(srcTests)
sort.Strings(cmdTests)
goTests := append(srcTests, cmdTests...)
const sizeThres = 10 * time.Second
var curSet []string
var curDur time.Duration
flush := func() {
if len(curSet) > 0 {
sets = append(sets, curSet)
curSet = nil
curDur = 0
}
}
for _, testName := range goTests {
d := testDuration(builderName, testName)
if curDur+d > sizeThres {
flush() // no-op if empty
}
curSet = append(curSet, testName)
curDur += d
}
flush()
return
}
func secondsToDuration(sec float64) time.Duration {
return time.Duration(float64(sec) * float64(time.Second))
}
type testDurationMap map[string]map[string]time.Duration // builder name => test name => avg
var (
testDurations atomic.Value // of testDurationMap
testDurationsMu sync.Mutex // held while updating testDurations
)
func getTestDurations() testDurationMap {
if m, ok := testDurations.Load().(testDurationMap); ok {
return m
}
testDurationsMu.Lock()
defer testDurationsMu.Unlock()
if m, ok := testDurations.Load().(testDurationMap); ok {
return m
}
updateTestDurationsLocked()
return testDurations.Load().(testDurationMap)
}
func updateTestDurations() {
testDurationsMu.Lock()
defer testDurationsMu.Unlock()
updateTestDurationsLocked()
}
func updateTestDurationsLocked() {
defer time.AfterFunc(1*time.Hour, updateTestDurations)
m := loadTestDurations()
testDurations.Store(m)
}
// The csv file on cloud storage looks like:
// Builder,Event,MedianSeconds,count
// linux-arm-arm5,run_test:runtime:cpu124,334.49922194,10
// linux-arm,run_test:runtime:cpu124,284.609130993,26
// linux-arm-arm5,run_test:go_test:cmd/compile/internal/gc,260.0241916,12
// linux-arm,run_test:go_test:cmd/compile/internal/gc,224.425924681,26
// solaris-amd64-smartosbuildlet,run_test:test:2_5,199.653975717,9
// solaris-amd64-smartosbuildlet,run_test:test:1_5,169.89733442,9
// solaris-amd64-smartosbuildlet,run_test:test:3_5,163.770453839,9
// solaris-amd64-smartosbuildlet,run_test:test:0_5,158.250119402,9
// openbsd-386-gce58,run_test:runtime:cpu124,146.494229388,12
func loadTestDurations() (m testDurationMap) {
m = make(testDurationMap)
r, err := storageClient.Bucket(buildEnv.BuildletBucket).Object("test-durations.csv").NewReader(context.Background())
if err != nil {
log.Printf("loading test durations object from GCS: %v", err)
return
}
defer r.Close()
recs, err := csv.NewReader(r).ReadAll()
if err != nil {
log.Printf("reading test durations CSV: %v", err)
return
}
for _, rec := range recs {
if len(rec) < 3 || rec[0] == "Builder" {
continue
}
builder, testName, secondsStr := rec[0], rec[1], rec[2]
secs, err := strconv.ParseFloat(secondsStr, 64)
if err != nil {
log.Printf("unexpected seconds value in test durations CSV: %v", err)
continue
}
mm := m[builder]
if mm == nil {
mm = make(map[string]time.Duration)
m[builder] = mm
}
mm[testName] = secondsToDuration(secs)
}
return
}
var minGoTestSpeed = (func() time.Duration {
var min Seconds
for name, secs := range fixedTestDuration {
if !strings.HasPrefix(name, "go_test:") {
continue
}
if min == 0 || secs < min {
min = secs
}
}
return min.Duration()
})()
type Seconds float64
func (s Seconds) Duration() time.Duration {
return time.Duration(float64(s) * float64(time.Second))
}
// in seconds on Linux/amd64 (once on 2015-05-28), each
// by themselves. There seems to be a 0.6s+ overhead
// from the go tool which goes away if they're combined.
var fixedTestDuration = map[string]Seconds{
"go_test:archive/tar": 1.30,
"go_test:archive/zip": 1.68,
"go_test:bufio": 1.61,
"go_test:bytes": 1.50,
"go_test:compress/bzip2": 0.82,
"go_test:compress/flate": 1.73,
"go_test:compress/gzip": 0.82,
"go_test:compress/lzw": 0.86,
"go_test:compress/zlib": 1.78,
"go_test:container/heap": 0.69,
"go_test:container/list": 0.72,
"go_test:container/ring": 0.64,
"go_test:crypto/aes": 0.79,
"go_test:crypto/cipher": 0.96,
"go_test:crypto/des": 0.96,
"go_test:crypto/dsa": 0.75,
"go_test:crypto/ecdsa": 0.86,
"go_test:crypto/elliptic": 1.06,
"go_test:crypto/hmac": 0.67,
"go_test:crypto/md5": 0.77,
"go_test:crypto/rand": 0.89,
"go_test:crypto/rc4": 0.71,
"go_test:crypto/rsa": 1.17,
"go_test:crypto/sha1": 0.75,
"go_test:crypto/sha256": 0.68,
"go_test:crypto/sha512": 0.67,
"go_test:crypto/subtle": 0.56,
"go_test:crypto/tls": 3.29,
"go_test:crypto/x509": 2.81,
"go_test:database/sql": 1.75,
"go_test:database/sql/driver": 0.64,
"go_test:debug/dwarf": 0.77,
"go_test:debug/elf": 1.41,
"go_test:debug/gosym": 1.45,
"go_test:debug/macho": 0.97,
"go_test:debug/pe": 0.79,
"go_test:debug/plan9obj": 0.73,
"go_test:encoding/ascii85": 0.64,
"go_test:encoding/asn1": 1.16,
"go_test:encoding/base32": 0.79,
"go_test:encoding/base64": 0.82,
"go_test:encoding/binary": 0.96,
"go_test:encoding/csv": 0.67,
"go_test:encoding/gob": 2.70,
"go_test:encoding/hex": 0.66,
"go_test:encoding/json": 2.20,
"test:errors": 0.54,
"go_test:expvar": 1.36,
"go_test:flag": 0.92,
"go_test:fmt": 2.02,
"go_test:go/ast": 1.44,
"go_test:go/build": 1.42,
"go_test:go/constant": 0.92,
"go_test:go/doc": 1.51,
"go_test:go/format": 0.73,
"go_test:go/internal/gcimporter": 1.30,
"go_test:go/parser": 1.30,
"go_test:go/printer": 1.61,
"go_test:go/scanner": 0.89,
"go_test:go/token": 0.92,
"go_test:go/types": 5.24,
"go_test:hash/adler32": 0.62,
"go_test:hash/crc32": 0.68,
"go_test:hash/crc64": 0.55,
"go_test:hash/fnv": 0.66,
"go_test:html": 0.74,
"go_test:html/template": 1.93,
"go_test:image": 1.42,
"go_test:image/color": 0.77,
"go_test:image/draw": 1.32,
"go_test:image/gif": 1.15,
"go_test:image/jpeg": 1.32,
"go_test:image/png": 1.23,
"go_test:index/suffixarray": 0.79,
"go_test:internal/singleflight": 0.66,
"go_test:io": 0.97,
"go_test:io/ioutil": 0.73,
"go_test:log": 0.72,
"go_test:log/syslog": 2.93,
"go_test:math": 1.59,
"go_test:math/big": 3.75,
"go_test:math/cmplx": 0.81,
"go_test:math/rand": 1.15,
"go_test:mime": 1.01,
"go_test:mime/multipart": 1.51,
"go_test:mime/quotedprintable": 0.95,
"go_test:net": 6.71,
"go_test:net/http": 9.41,
"go_test:net/http/cgi": 2.00,
"go_test:net/http/cookiejar": 1.51,
"go_test:net/http/fcgi": 1.43,
"go_test:net/http/httptest": 1.36,
"go_test:net/http/httputil": 1.54,
"go_test:net/http/internal": 0.68,
"go_test:net/internal/socktest": 0.58,
"go_test:net/mail": 0.92,
"go_test:net/rpc": 1.95,
"go_test:net/rpc/jsonrpc": 1.50,
"go_test:net/smtp": 1.43,
"go_test:net/textproto": 1.01,
"go_test:net/url": 1.45,
"go_test:os": 1.88,
"go_test:os/exec": 2.13,
"go_test:os/signal": 4.22,
"go_test:os/user": 0.93,
"go_test:path": 0.68,
"go_test:path/filepath": 1.14,
"go_test:reflect": 3.42,
"go_test:regexp": 1.65,
"go_test:regexp/syntax": 1.40,
"go_test:runtime": 21.02,
"go_test:runtime/debug": 0.79,
"go_test:runtime/pprof": 8.01,
"go_test:sort": 0.96,
"go_test:strconv": 1.60,
"go_test:strings": 1.51,
"go_test:sync": 1.05,
"go_test:sync/atomic": 1.13,
"go_test:syscall": 1.69,
"go_test:testing": 3.70,
"go_test:testing/quick": 0.74,
"go_test:text/scanner": 0.79,
"go_test:text/tabwriter": 0.71,
"go_test:text/template": 1.65,
"go_test:text/template/parse": 1.25,
"go_test:time": 4.20,
"go_test:unicode": 0.68,
"go_test:unicode/utf16": 0.77,
"go_test:unicode/utf8": 0.71,
"go_test:cmd/addr2line": 1.73,
"go_test:cmd/api": 1.33,
"go_test:cmd/asm/internal/asm": 1.24,
"go_test:cmd/asm/internal/lex": 0.91,
"go_test:cmd/compile/internal/big": 5.26,
"go_test:cmd/cover": 3.32,
"go_test:cmd/fix": 1.26,
"go_test:cmd/go": 36,
"go_test:cmd/gofmt": 1.06,
"go_test:cmd/internal/goobj": 0.65,
"go_test:cmd/internal/obj": 1.16,
"go_test:cmd/internal/obj/x86": 1.04,
"go_test:cmd/internal/rsc.io/arm/armasm": 1.92,
"go_test:cmd/internal/rsc.io/x86/x86asm": 2.22,
"go_test:cmd/newlink": 1.48,
"go_test:cmd/nm": 1.84,
"go_test:cmd/objdump": 3.60,
"go_test:cmd/pack": 2.64,
"go_test:cmd/pprof/internal/profile": 1.29,
"go_test:cmd/compile/internal/gc": 18,
"gp_test:cmd/compile/internal/ssa": 8,
"runtime:cpu124": 44.78,
"sync_cpu": 1.01,
"cgo_stdio": 1.53,
"cgo_life": 1.56,
"cgo_test": 45.60,
"race": 42.55,
"testgodefs": 2.37,
"testso": 2.72,
"testcarchive": 11.11,
"testcshared": 15.80,
"testshared": 7.13,
"testasan": 2.56,
"cgo_errors": 7.03,
"testsigfwd": 2.74,
"doc_progs": 5.38,
"wiki": 3.56,
"shootout": 11.34,
"bench_go1": 3.72,
"test:0_5": 10,
"test:1_5": 10,
"test:2_5": 10,
"test:3_5": 10,
"test:4_5": 10,
"codewalk": 2.42,
"api": 7.38,
"go_test_bench:compress/bzip2": 3.059513602,
"go_test_bench:image/jpeg": 3.143345345,
"go_test_bench:encoding/hex": 3.182452293,
"go_test_bench:expvar": 3.490162906,
"go_test_bench:crypto/cipher": 3.609317114,
"go_test_bench:compress/lzw": 3.628982201,
"go_test_bench:database/sql": 3.693163398,
"go_test_bench:math/rand": 3.807438591,
"go_test_bench:bufio": 3.882166683,
"go_test_bench:context": 4.038173785,
"go_test_bench:hash/crc32": 4.107135055,
"go_test_bench:unicode/utf8": 4.205641826,
"go_test_bench:regexp/syntax": 4.587359311,
"go_test_bench:sort": 4.660599666,
"go_test_bench:math/cmplx": 5.311264213,
"go_test_bench:encoding/gob": 5.326788419,
"go_test_bench:reflect": 5.777081055,
"go_test_bench:image/png": 6.12439885,
"go_test_bench:html/template": 6.765132418,
"go_test_bench:fmt": 7.476528843,
"go_test_bench:sync": 7.526458261,
"go_test_bench:archive/zip": 7.782424696,
"go_test_bench:regexp": 8.428459563,
"go_test_bench:image/draw": 8.666510786,
"go_test_bench:strings": 10.836201759,
"go_test_bench:time": 10.952476479,
"go_test_bench:image/gif": 11.373276098,
"go_test_bench:encoding/json": 11.547950173,
"go_test_bench:crypto/tls": 11.548834754,
"go_test_bench:strconv": 12.819669296,
"go_test_bench:math": 13.7889302,
"go_test_bench:net": 14.845086695,
"go_test_bench:net/http": 15.288519219,
"go_test_bench:bytes": 15.809308703,
"go_test_bench:index/suffixarray": 23.69239388,
"go_test_bench:compress/flate": 26.906228664,
"go_test_bench:math/big": 28.82127674,
}
// testDuration predicts how long the dist test 'name' will take 'name' will take.
// It's only a scheduling guess.
func testDuration(builderName, testName string) time.Duration {
if false { // disabled for now. never tested. TODO: test, enable.
durs := getTestDurations()
bdur := durs[builderName]
if d, ok := bdur[testName]; ok {
return d
}
}
if secs, ok := fixedTestDuration[testName]; ok {
return secs.Duration()
}
if strings.HasPrefix(testName, "bench:") {
// Assume benchmarks are roughly 20 seconds per run.
return 2 * 5 * 20 * time.Second
}
return minGoTestSpeed * 2
}
func (st *buildStatus) runSubrepoTests() (remoteErr, err error) {
st.LogEventTime("fetching_subrepo", st.SubName)
workDir, err := st.bc.WorkDir()
if err != nil {
err = fmt.Errorf("error discovering workdir for helper %s: %v", st.bc.IPPort(), err)
return nil, err
}
goroot := st.conf.FilePathJoin(workDir, "go")
gopath := st.conf.FilePathJoin(workDir, "gopath")
fetched := map[string]bool{}
toFetch := []string{st.SubName}
// fetch checks out the provided sub-repo to the buildlet's workspace.
fetch := func(repo, rev string) error {
fetched[repo] = true
return buildgo.FetchSubrepo(st, st.bc, repo, rev)
}
// findDeps uses 'go list' on the checked out repo to find its
// dependencies, and adds any not-yet-fetched deps to toFetched.
findDeps := func(repo string) (rErr, err error) {
repoPath := subrepoPrefix + repo
var buf bytes.Buffer
rErr, err = st.bc.Exec(path.Join("go", "bin", "go"), buildlet.ExecOpts{
Output: &buf,
ExtraEnv: append(st.conf.Env(), "GOROOT="+goroot, "GOPATH="+gopath),
Path: []string{"$WORKDIR/go/bin", "$PATH"},
Args: []string{"list", "-f", `{{range .Deps}}{{printf "%v\n" .}}{{end}}`, repoPath + "/..."},
})
if err != nil {
return nil, fmt.Errorf("exec go list on buildlet: %v", err)
}
if rErr != nil {
fmt.Fprintf(st, "go list error:\n%s", &buf)
return rErr, nil
}
for _, p := range strings.Fields(buf.String()) {
if !strings.HasPrefix(p, subrepoPrefix) || strings.HasPrefix(p, repoPath) {
continue
}
repo = strings.TrimPrefix(p, subrepoPrefix)
if i := strings.Index(repo, "/"); i >= 0 {
repo = repo[:i]
}
if !fetched[repo] {
toFetch = append(toFetch, repo)
}
}
return nil, nil
}
// Recursively fetch the repo and its dependencies.
// Dependencies are always fetched at master, which isn't
// great but the dashboard data model doesn't track
// sub-repo dependencies. TODO(adg): fix this somehow??
for i := 0; i < len(toFetch); i++ {
repo := toFetch[i]
if fetched[repo] {
continue
}
// Fetch the HEAD revision by default.
rev, err := getRepoHead(repo)
if err != nil {
return nil, err
}
if rev == "" {
rev = "master" // should happen rarely; ok if it does.
}
// For the repo under test, choose that specific revision.
if i == 0 {
rev = st.SubRev
}
if err := fetch(repo, rev); err != nil {
return nil, err
}
if rErr, err := findDeps(repo); err != nil {
return nil, err
} else if rErr != nil {
// An issue with the package may cause "go list" to
// fail and this is a legimiate build error.
return rErr, nil
}
}
sp := st.CreateSpan("running_subrepo_tests", st.SubName)
defer func() { sp.Done(err) }()
return st.bc.Exec(path.Join("go", "bin", "go"), buildlet.ExecOpts{
Output: st,
// TODO(adg): remove vendor experiment variable after Go 1.6
ExtraEnv: append(st.conf.Env(),
"GOROOT="+goroot,
"GOPATH="+gopath,
"GO15VENDOREXPERIMENT=1"),
Path: []string{"$WORKDIR/go/bin", "$PATH"},
Args: []string{"test", "-short", subrepoPrefix + st.SubName + "/..."},
})
}
// affectedPkgs returns the name of every package affected by this commit.
// The returned list may contain duplicates and is unsorted.
// It is safe to call this on a nil trySet.
func (ts *trySet) affectedPkgs() (pkgs []string) {
// TODO(quentin): Support non-try commits by asking maintnerd for the affected files.
if ts == nil {
return
}
// TODO(bradfitz): query maintner for this. Old logic with a *gerrit.ChangeInfo was:
/*
rev := ts.ci.Revisions[ts.ci.CurrentRevision]
for p := range rev.Files {
if strings.HasPrefix(p, "src/") {
pkg := path.Dir(p[len("src/"):])
if pkg != "" {
pkgs = append(pkgs, pkg)
}
}
}
*/
return
}
// runTests is only called for builders which support a split make/run
// (should be everything, at least soon). Currently (2015-05-27) iOS
// and Android and Nacl do not.
//
// After runTests completes, the caller must assume that st.bc might be invalid
// (It's possible that only one of the helper buildlets survived).
func (st *buildStatus) runTests(helpers <-chan *buildlet.Client) (remoteErr, err error) {
testNames, remoteErr, err := st.distTestList()
if remoteErr != nil {
return fmt.Errorf("distTestList remote: %v", remoteErr), nil
}
if err != nil {
return nil, fmt.Errorf("distTestList exec: %v", err)
}
var benches []*buildgo.BenchmarkItem
if st.shouldBench() {
sp := st.CreateSpan("enumerate_benchmarks")
rev, err := getRepoHead("benchmarks")
if err != nil {
return nil, err
}
if rev == "" {
rev = "master" // should happen rarely; ok if it does.
}
b, err := st.goBuilder().EnumerateBenchmarks(st.bc, rev, st.trySet.affectedPkgs())
sp.Done(err)
if err == nil {
benches = b
}
}
set := st.newTestSet(testNames, benches)
st.LogEventTime("starting_tests", fmt.Sprintf("%d tests", len(set.items)))
startTime := time.Now()
workDir, err := st.bc.WorkDir()
if err != nil {
return nil, fmt.Errorf("error discovering workdir for main buildlet, %s: %v", st.bc.Name(), err)
}
mainBuildletGoroot := st.conf.FilePathJoin(workDir, "go")
// We use our original buildlet to run the tests in order, to
// make the streaming somewhat smooth and not incredibly
// lumpy. The rest of the buildlets run the largest tests
// first (critical path scheduling).
// The buildletActivity WaitGroup is used to track when all
// the buildlets are dead or done.
var buildletActivity sync.WaitGroup
buildletActivity.Add(2) // one per goroutine below (main + helper launcher goroutine)
go func() {
defer buildletActivity.Done() // for the per-goroutine Add(2) above
for !st.bc.IsBroken() {
tis, ok := set.testsToRunInOrder()
if !ok {
select {
case <-st.ctx.Done():
return
case <-time.After(5 * time.Second):
}
continue
}
st.runTestsOnBuildlet(st.bc, tis, mainBuildletGoroot)
}
st.LogEventTime("main_buildlet_broken", st.bc.Name())
}()
go func() {
defer buildletActivity.Done() // for the per-goroutine Add(2) above
for helper := range helpers {
buildletActivity.Add(1)
go func(bc *buildlet.Client) {
defer buildletActivity.Done() // for the per-helper Add(1) above
defer st.LogEventTime("closed_helper", bc.Name())
defer bc.Close()
if devPause {
defer time.Sleep(5 * time.Minute)
defer st.LogEventTime("DEV_HELPER_SLEEP", bc.Name())
}
st.LogEventTime("got_empty_test_helper", bc.String())
if err := bc.PutTarFromURL(st.SnapshotURL(buildEnv), "go"); err != nil {
log.Printf("failed to extract snapshot for helper %s: %v", bc.Name(), err)
return
}
workDir, err := bc.WorkDir()
if err != nil {
log.Printf("error discovering workdir for helper %s: %v", bc.Name(), err)
return
}
st.LogEventTime("test_helper_set_up", bc.Name())
goroot := st.conf.FilePathJoin(workDir, "go")
for !bc.IsBroken() {
tis, ok := set.testsToRunBiggestFirst()
if !ok {
st.LogEventTime("no_new_tests_remain", bc.Name())
return
}
st.runTestsOnBuildlet(bc, tis, goroot)
}
st.LogEventTime("test_helper_is_broken", bc.Name())
}(helper)
}
}()
// Convert a sync.WaitGroup into a channel.
// Aside: https://groups.google.com/forum/#!topic/golang-dev/7fjGWuImu5k
buildletsGone := make(chan struct{})
go func() {
buildletActivity.Wait()
close(buildletsGone)
}()
benchFiles := st.benchFiles()
var lastBanner string
var serialDuration time.Duration
for _, ti := range set.items {
AwaitDone:
for {
timer := time.NewTimer(30 * time.Second)
select {
case <-ti.done: // wait for success
timer.Stop()
break AwaitDone
case <-timer.C:
st.LogEventTime("still_waiting_on_test", ti.name)
case <-buildletsGone:
set.cancelAll()
return nil, fmt.Errorf("dist test failed: all buildlets had network errors or timeouts, yet tests remain")
}
}
if ti.bench != nil {
for i, s := range ti.bench.Output {
if i < len(benchFiles) {
benchFiles[i].out.WriteString(s)
}
}
}
serialDuration += ti.execDuration
if len(ti.output) > 0 {
banner, out := parseOutputAndBanner(ti.output)
if banner != lastBanner {
lastBanner = banner
fmt.Fprintf(st, "\n##### %s\n", banner)
}
if inStaging {
out = bytes.TrimSuffix(out, nl)
st.Write(out)
fmt.Fprintf(st, " (shard %s; par=%d)\n", ti.shardIPPort, ti.groupSize)
} else {
st.Write(out)
}
}
if ti.remoteErr != nil {
set.cancelAll()
return fmt.Errorf("dist test failed: %s: %v", ti.name, ti.remoteErr), nil
}
}
elapsed := time.Since(startTime)
var msg string
if st.conf.NumTestHelpers(st.isTry()) > 0 {
msg = fmt.Sprintf("took %v; aggregate %v; saved %v", elapsed, serialDuration, serialDuration-elapsed)
} else {
msg = fmt.Sprintf("took %v", elapsed)
}
st.LogEventTime("tests_complete", msg)
fmt.Fprintf(st, "\nAll tests passed.\n")
for _, f := range benchFiles {
if f.out.Len() > 0 {
st.hasBenchResults = true
}
}
if st.hasBenchResults {
sp := st.CreateSpan("upload_bench_results")
sp.Done(st.uploadBenchResults(st.ctx, benchFiles))
}
return nil, nil
}
func (st *buildStatus) uploadBenchResults(ctx context.Context, files []*benchFile) error {
s := *perfServer
if s == "" {
s = buildEnv.PerfDataURL
}
client := &perfstorage.Client{BaseURL: s, HTTPClient: oAuthHTTPClient}
u := client.NewUpload(ctx)
for _, b := range files {
w, err := u.CreateFile(b.name)
if err != nil {
u.Abort()
return err
}
if _, err := b.out.WriteTo(w); err != nil {
u.Abort()
return err
}
}
status, err := u.Commit()
if err != nil {
return err
}
st.LogEventTime("bench_upload", status.UploadID)
return nil
}
// TODO: what is a bench file?
type benchFile struct {
name string
out bytes.Buffer
}
func (st *buildStatus) benchFiles() []*benchFile {
if !st.shouldBench() {
return nil
}
// TODO: renable benchmarking. Or do it outside of the coordinator, if we end up
// making the coordinator into just a gomote proxy + scheduler.
// Old logic was:
/*
// We know rev and rev.Commit.Parents[0] exist because BenchmarkItem.buildParent has checked.
rev := st.trySet.ci.Revisions[st.trySet.ci.CurrentRevision]
ps := rev.PatchSetNumber
benchFiles := []*benchFile{
{name: "orig.txt"},
{name: fmt.Sprintf("ps%d.txt", ps)},
}
fmt.Fprintf(&benchFiles[0].out, "cl: %d\nps: %d\ntry: %s\nbuildlet: %s\nbranch: %s\nrepo: https://go.googlesource.com/%s\n",
st.trySet.ci.ChangeNumber, ps, st.trySet.tryID,
st.Name, st.trySet.ci.Branch, st.trySet.ci.Project,
)
if inStaging {
benchFiles[0].out.WriteString("staging: true\n")
}
benchFiles[1].out.Write(benchFiles[0].out.Bytes())
fmt.Fprintf(&benchFiles[0].out, "commit: %s\n", rev.Commit.Parents[0].CommitID)
fmt.Fprintf(&benchFiles[1].out, "commit: %s\n", st.BuilderRev.Rev)
return benchFiles
*/
return nil
}
const (
banner = "XXXBANNERXXX:" // flag passed to dist
bannerPrefix = "\n" + banner // with the newline added by dist
)
var bannerPrefixBytes = []byte(bannerPrefix)
func parseOutputAndBanner(b []byte) (banner string, out []byte) {
if bytes.HasPrefix(b, bannerPrefixBytes) {
b = b[len(bannerPrefixBytes):]
nl := bytes.IndexByte(b, '\n')
if nl != -1 {
banner = string(b[:nl])
b = b[nl+1:]
}
}
return banner, b
}
// maxTestExecError is the number of test execution failures at which
// we give up and stop trying and instead permanently fail the test.
// Note that this is not related to whether the test failed remotely,
// but whether we were unable to start or complete watching it run.
// (A communication error)
const maxTestExecErrors = 3
func execTimeout(testNames []string) time.Duration {
// TODO(bradfitz): something smarter probably.
return 20 * time.Minute
}
// runTestsOnBuildlet runs tis on bc, using the optional goroot environment variable.
func (st *buildStatus) runTestsOnBuildlet(bc *buildlet.Client, tis []*testItem, goroot string) {
names := make([]string, len(tis))
for i, ti := range tis {
names[i] = ti.name
if i > 0 && (!strings.HasPrefix(ti.name, "go_test:") || !strings.HasPrefix(names[0], "go_test:")) {
panic("only go_test:* tests may be merged")
}
}
var spanName string
var detail string
if len(names) == 1 {
spanName = "run_test:" + names[0]
detail = bc.Name()
} else {
spanName = "run_tests_multi"
detail = fmt.Sprintf("%s: %v", bc.Name(), names)
}
sp := st.CreateSpan(spanName, detail)
args := []string{"tool", "dist", "test", "--no-rebuild", "--banner=" + banner}
if st.conf.IsRace() {
args = append(args, "--race")
}
if st.conf.CompileOnly {
args = append(args, "--compile-only")
}
args = append(args, names...)
var buf bytes.Buffer
t0 := time.Now()
timeout := execTimeout(names)
var remoteErr, err error
if ti := tis[0]; ti.bench != nil {
pbr, perr := st.parentRev()
// TODO(quentin): Error if parent commit could not be determined?
if perr == nil {
remoteErr, err = ti.bench.Run(buildEnv, st, st.conf, bc, &buf, []buildgo.BuilderRev{st.BuilderRev, pbr})
}
} else {
remoteErr, err = bc.Exec(path.Join("go", "bin", "go"), buildlet.ExecOpts{
// We set Dir to "." instead of the default ("go/bin") so when the dist tests
// try to run os/exec.Command("go", "test", ...), the LookPath of "go" doesn't
// return "./go.exe" (which exists in the current directory: "go/bin") and then
// fail when dist tries to run the binary in dir "$GOROOT/src", since
// "$GOROOT/src" + "./go.exe" doesn't exist. Perhaps LookPath should return
// an absolute path.
Dir: ".",
Output: &buf, // see "maybe stream lines" TODO below
ExtraEnv: append(st.conf.Env(), "GOROOT="+goroot),
Timeout: timeout,
Path: []string{"$WORKDIR/go/bin", "$PATH"},
Args: args,
})
}
execDuration := time.Since(t0)
sp.Done(err)
if err != nil {
bc.MarkBroken() // prevents reuse
for _, ti := range tis {
ti.numFail++
st.logf("Execution error running %s on %s: %v (numFails = %d)", ti.name, bc, err, ti.numFail)
if err == buildlet.ErrTimeout {
ti.failf("Test %q ran over %v limit (%v)", ti.name, timeout, execDuration)
} else if ti.numFail >= maxTestExecErrors {
ti.failf("Failed to schedule %q test after %d tries.\n", ti.name, maxTestExecErrors)
} else {
ti.retry()
}
}
return
}
out := buf.Bytes()
out = bytes.Replace(out, []byte("\nALL TESTS PASSED (some were excluded)\n"), nil, 1)
out = bytes.Replace(out, []byte("\nALL TESTS PASSED\n"), nil, 1)
for _, ti := range tis {
ti.output = out
ti.remoteErr = remoteErr
ti.execDuration = execDuration
ti.groupSize = len(tis)
ti.shardIPPort = bc.IPPort()
close(ti.done)
// After the first one, make the rest succeed with no output.
// TODO: maybe stream lines (set Output to a line-reading
// Writer instead of &buf). for now we just wait for them in
// ~10 second batches. Doesn't look as smooth on the output,
// though.
out = nil
remoteErr = nil
execDuration = 0
}
}
type testSet struct {
st *buildStatus
items []*testItem
mu sync.Mutex
inOrder [][]*testItem
biggestFirst [][]*testItem
}
// cancelAll cancels all pending tests.
func (s *testSet) cancelAll() {
for _, ti := range s.items {
ti.tryTake() // ignore return value
}
}
func (s *testSet) testsToRunInOrder() (chunk []*testItem, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.inOrder == nil {
s.initInOrder()
}
return s.testsFromSlice(s.inOrder)
}
func (s *testSet) testsToRunBiggestFirst() (chunk []*testItem, ok bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.biggestFirst == nil {
s.initBiggestFirst()
}
return s.testsFromSlice(s.biggestFirst)
}
func (s *testSet) testsFromSlice(chunkList [][]*testItem) (chunk []*testItem, ok bool) {
for _, candChunk := range chunkList {
for _, ti := range candChunk {
if ti.tryTake() {
chunk = append(chunk, ti)
}
}
if len(chunk) > 0 {
return chunk, true
}
}
return nil, false
}
func (s *testSet) initInOrder() {
names := make([]string, len(s.items))
namedItem := map[string]*testItem{}
for i, ti := range s.items {
names[i] = ti.name
namedItem[ti.name] = ti
}
// First do the go_test:* ones. partitionGoTests
// only returns those, which are the ones we merge together.
stdSets := partitionGoTests(s.st.BuilderRev.Name, names)
for _, set := range stdSets {
tis := make([]*testItem, len(set))
for i, name := range set {
tis[i] = namedItem[name]
}
s.inOrder = append(s.inOrder, tis)
}
// Then do the misc tests, which are always by themselves.
// (No benefit to merging them)
for _, ti := range s.items {
if !strings.HasPrefix(ti.name, "go_test:") {
s.inOrder = append(s.inOrder, []*testItem{ti})
}
}
}
func (s *testSet) initBiggestFirst() {
items := append([]*testItem(nil), s.items...)
sort.Sort(sort.Reverse(byTestDuration(items)))
for _, item := range items {
s.biggestFirst = append(s.biggestFirst, []*testItem{item})
}
}
type testItem struct {
set *testSet
name string // "go_test:sort"
duration time.Duration // optional approximate size
bench *buildgo.BenchmarkItem // If populated, this is a benchmark instead of a regular test.
take chan token // buffered size 1: sending takes ownership of rest of fields:
done chan token // closed when done; guards output & failed
numFail int // how many times it's failed to execute
// groupSize is the number of tests which were run together
// along with this one with "go dist test".
// This is 1 for non-std/cmd tests, and usually >1 for std/cmd tests.
groupSize int
shardIPPort string // buildlet's IPPort, for debugging
// the following are only set for the first item in a group:
output []byte
remoteErr error // real test failure (not a communications failure)
execDuration time.Duration // actual time
}
func (ti *testItem) tryTake() bool {
select {
case ti.take <- token{}:
return true
default:
return false
}
}
func (ti *testItem) isDone() bool {
select {
case <-ti.done:
return true
default:
return false
}
}
// retry reschedules the test to run again, if a machine died before
// or during execution, so its results aren't yet known.
// The caller must own the 'take' semaphore.
func (ti *testItem) retry() {
// release it to make it available for somebody else to try later:
<-ti.take
}
func (ti *testItem) failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
ti.output = []byte(msg)
ti.remoteErr = errors.New(msg)
close(ti.done)
}
type byTestDuration []*testItem
func (s byTestDuration) Len() int { return len(s) }
func (s byTestDuration) Less(i, j int) bool { return s[i].duration < s[j].duration }
func (s byTestDuration) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
type eventAndTime struct {
t time.Time
evt string
text string
}
// buildStatus is the status of a build.
type buildStatus struct {
// Immutable:
buildgo.BuilderRev
buildID string // "B" + 9 random hex
conf dashboard.BuildConfig
startTime time.Time // actually time of newBuild (~same thing); TODO(bradfitz): rename this createTime
trySet *trySet // or nil
onceInitHelpers sync.Once // guards call of onceInitHelpersFunc
helpers <-chan *buildlet.Client
ctx context.Context // used to start the build
cancel context.CancelFunc // used to cancel context; for use by setDone only
hasBuildlet int32 // atomic: non-zero if this build has a buildlet; for status.go.
hasBenchResults bool // set by runTests, may only be used when build() returns.
mu sync.Mutex // guards following
failURL string // if non-empty, permanent URL of failure
bc *buildlet.Client // nil initially, until pool returns one
done time.Time // finished running
succeeded bool // set when done
output livelog.Buffer // stdout and stderr
startedPinging bool // started pinging the go dashboard
events []eventAndTime
useSnapshotMemo *bool // if non-nil, memoized result of useSnapshot
}
func (st *buildStatus) setDone(succeeded bool) {
st.mu.Lock()
defer st.mu.Unlock()
st.succeeded = succeeded
st.done = time.Now()
st.output.Close()
st.cancel()
}
func (st *buildStatus) isRunning() bool {
st.mu.Lock()
defer st.mu.Unlock()
return st.isRunningLocked()
}
func (st *buildStatus) isRunningLocked() bool { return st.done.IsZero() }
func (st *buildStatus) logf(format string, args ...interface{}) {
log.Printf("[build %s %s]: %s", st.Name, st.Rev, fmt.Sprintf(format, args...))
}
// span is an event covering a region of time.
// A span ultimately ends in an error or success, and will eventually
// be visualized and logged.
type span struct {
event string // event name like "get_foo" or "write_bar"
optText string // optional details for event
start time.Time
end time.Time
el eventTimeLogger // where we log to at the end; TODO: this will change
}
func createSpan(el eventTimeLogger, event string, optText ...string) *span {
if len(optText) > 1 {
panic("usage")
}
start := time.Now()
var opt string
if len(optText) > 0 {
opt = optText[0]
}
el.LogEventTime(event, opt)
return &span{
el: el,
event: event,
start: start,
optText: opt,
}
}
// Done ends a span.
// It is legal to call Done multiple times. Only the first call
// logs.
// Done always returns its input argument.
func (s *span) Done(err error) error {
if !s.end.IsZero() {
return err
}
t1 := time.Now()
s.end = t1
td := t1.Sub(s.start)
var text bytes.Buffer
fmt.Fprintf(&text, "after %s", friendlyDuration(td))
if err != nil {
fmt.Fprintf(&text, "; err=%v", err)
}
if s.optText != "" {
fmt.Fprintf(&text, "; %v", s.optText)
}
if st, ok := s.el.(*buildStatus); ok {
putSpanRecord(st.spanRecord(s, err))
}
s.el.LogEventTime("finish_"+s.event, text.String())
return err
}
func (st *buildStatus) CreateSpan(event string, optText ...string) spanlog.Span {
return createSpan(st, event, optText...)
}
func (st *buildStatus) LogEventTime(event string, optText ...string) {
if len(optText) > 1 {
panic("usage")
}
if inStaging {
st.logf("%s %v", event, optText)
}
st.mu.Lock()
defer st.mu.Unlock()
switch event {
case "finish_get_buildlet", "create_gce_buildlet":
if !st.startedPinging {
st.startedPinging = true
go st.pingDashboard()
}
}
var text string
if len(optText) > 0 {
text = optText[0]
}
st.events = append(st.events, eventAndTime{
t: time.Now(),
evt: event,
text: text,
})
}
func (st *buildStatus) hasEvent(event string) bool {
st.mu.Lock()
defer st.mu.Unlock()
for _, e := range st.events {
if e.evt == event {
return true
}
}
return false
}
// HTMLStatusLine returns the HTML to show within the <pre> block on
// the main page's list of active builds.
func (st *buildStatus) HTMLStatusLine() template.HTML { return st.htmlStatusLine(true) }
func (st *buildStatus) HTMLStatusLine_done() template.HTML { return st.htmlStatusLine(false) }
func (st *buildStatus) htmlStatusLine(full bool) template.HTML {
st.mu.Lock()
defer st.mu.Unlock()
urlPrefix := "https://go-review.googlesource.com/#/q/"
var buf bytes.Buffer
fmt.Fprintf(&buf, "<a href='https://github.com/golang/go/wiki/DashboardBuilders'>%s</a> rev <a href='%s%s'>%s</a>",
st.Name, urlPrefix, st.Rev, st.Rev[:8])
if st.IsSubrepo() {
fmt.Fprintf(&buf, " (sub-repo %s rev <a href='%s%s'>%s</a>)",
st.SubName, urlPrefix, st.SubRev, st.SubRev[:8])
}
if ts := st.trySet; ts != nil {
fmt.Fprintf(&buf, " (<a href='/try?commit=%v'>trybot set</a> for <a href='https://go-review.googlesource.com/#/q/%s'>%s</a>)",
ts.Commit[:8],
ts.ChangeTriple(), ts.ChangeID[:8])
}
var state string
if st.done.IsZero() {
state = "running"
} else if st.succeeded {
state = "succeeded"
} else {
state = "<font color='#700000'>failed</font>"
}
if full {
fmt.Fprintf(&buf, "; <a href='%s'>%s</a>; %s", st.logsURLLocked(), state, html.EscapeString(st.bc.String()))
} else {
fmt.Fprintf(&buf, "; <a href='%s'>%s</a>", st.logsURLLocked(), state)
}
t := st.done
if t.IsZero() {
t = st.startTime
}
fmt.Fprintf(&buf, ", %v ago", time.Since(t))
if full {
buf.WriteByte('\n')
st.writeEventsLocked(&buf, true)
}
return template.HTML(buf.String())
}
func (st *buildStatus) logsURLLocked() string {
var urlPrefix string
if buildEnv == buildenv.Production {
urlPrefix = "https://farmer.golang.org"
} else {
urlPrefix = "http://" + buildEnv.StaticIP
}
if *mode == "dev" {
urlPrefix = "https://localhost:8119"
}
u := fmt.Sprintf("%v/temporarylogs?name=%s&rev=%s&st=%p", urlPrefix, st.Name, st.Rev, st)
if st.IsSubrepo() {
u += fmt.Sprintf("&subName=%v&subRev=%v", st.SubName, st.SubRev)
}
return u
}
// st.mu must be held.
func (st *buildStatus) writeEventsLocked(w io.Writer, htmlMode bool) {
var lastT time.Time
for _, evt := range st.events {
lastT = evt.t
e := evt.evt
text := evt.text
if htmlMode {
if e == "running_exec" {
e = fmt.Sprintf("<a href='%s'>%s</a>", st.logsURLLocked(), e)
}
e = "<b>" + e + "</b>"
text = "<i>" + html.EscapeString(text) + "</i>"
}
fmt.Fprintf(w, " %v %s %s\n", evt.t.Format(time.RFC3339), e, text)
}
if st.isRunningLocked() {
fmt.Fprintf(w, " %7s (now)\n", fmt.Sprintf("+%0.1fs", time.Since(lastT).Seconds()))
}
}
func (st *buildStatus) logs() string {
return st.output.String()
}
func (st *buildStatus) Write(p []byte) (n int, err error) {
return st.output.Write(p)
}
func useGitMirror() bool |
var nl = []byte("\n")
// getRepoHead returns the commit hash of the latest master HEAD
// for the given repo ("go", "tools", "sys", etc).
func getRepoHead(repo string) (string, error) {
// This gRPC call should only take a couple milliseconds, but set some timeout
// to catch network problems. 5 seconds is overkill.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
res, err := maintnerClient.GetRef(ctx, &apipb.GetRefRequest{
GerritServer: "go.googlesource.com",
GerritProject: repo,
Ref: "refs/heads/master",
})
if err != nil {
return "", fmt.Errorf("looking up ref for %q: %v", repo, err)
}
if res.Value == "" {
return "", fmt.Errorf("no master ref found for %q", repo)
}
return res.Value, nil
}
// newFailureLogBlob creates a new object to record a public failure log.
// The objName should be a Google Cloud Storage object name.
// When developing on localhost, the WriteCloser may be of a different type.
func newFailureLogBlob(objName string) (obj io.WriteCloser, url_ string) {
if *mode == "dev" {
// TODO(bradfitz): write to disk or something, or
// something testable. Maybe memory.
return struct {
io.Writer
io.Closer
}{
os.Stderr,
ioutil.NopCloser(nil),
}, "devmode://fail-log/" + objName
}
if storageClient == nil {
panic("nil storageClient in newFailureBlob")
}
bucket := buildEnv.LogBucket
wr := storageClient.Bucket(bucket).Object(objName).NewWriter(context.Background())
wr.ContentType = "text/plain; charset=utf-8"
wr.ACL = append(wr.ACL, storage.ACLRule{
Entity: storage.AllUsers,
Role: storage.RoleReader,
})
return wr, fmt.Sprintf("https://storage.googleapis.com/%s/%s", bucket, objName)
}
func randHex(n int) string {
buf := make([]byte, n/2+1)
if _, err := rand.Read(buf); err != nil {
log.Fatalf("randHex: %v", err)
}
return fmt.Sprintf("%x", buf)[:n]
}
func skipBranchForBuilder(repo, branch, builder string) bool {
if strings.HasPrefix(builder, "darwin-") {
switch builder {
case "darwin-amd64-10_8", "darwin-amd64-10_10", "darwin-amd64-10_11",
"darwin-386-10_8", "darwin-386-10_10", "darwin-386-10_11":
// OS X before Sierra can build any branch.
// (We've never had a 10.9 builder.)
default:
// Sierra or after, however, requires the 1.7 branch:
switch branch {
case "release-branch.go1.6",
"release-branch.go1.5",
"release-branch.go1.4",
"release-branch.go1.3",
"release-branch.go1.2",
"release-branch.go1.1",
"release-branch.go1":
return true
}
}
}
return false
}
| {
return *mode != "dev"
} |
package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gnutls(AutotoolsPackage):
"""GnuTLS is a secure communications library implementing the SSL, TLS
and DTLS protocols and technologies around them. It provides a simple C
language application programming interface (API) to access the secure
communications protocols as well as APIs to parse and write X.509, PKCS
#12, OpenPGP and other required structures. It is aimed to be portable
and efficient with focus on security and interoperability."""
homepage = "http://www.gnutls.org"
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v3.5/gnutls-3.5.19.tar.xz"
version('3.5.19', sha256='1936eb64f03aaefd6eb16cef0567457777618573826b94d03376bb6a4afadc44')
version('3.5.13', '4fd41ad86572933c2379b4cc321a0959')
version('3.5.10', '336c03a71ba90184ffd0388075dde504')
version('3.5.9', '0ab25eb6a1509345dd085bc21a387951')
version('3.3.9', 'ff61b77e39d09f1140ab5a9cf52c58b6')
variant('zlib', default=True, description='Enable zlib compression support')
# Note that version 3.3.9 of gnutls doesn't support nettle 3.0.
depends_on('nettle@:2.9', when='@3.3.9')
depends_on('nettle', when='@3.5:')
depends_on('libidn2@:2.0.99', when='@:3.5.99')
depends_on('zlib', when='+zlib')
depends_on('gettext')
depends_on('pkgconfig', type='build')
build_directory = 'spack-build'
def url_for_version(self, version):
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v{0}/gnutls-{1}.tar.xz"
return url.format(version.up_to(2), version)
def | (self):
spec = self.spec
args = [
'--enable-static',
]
if spec.satisfies('@3.5:'):
# use shipped libraries, might be turned into variants
args.append('--with-included-libtasn1')
args.append('--with-included-unistring')
args.append('--without-p11-kit') # [email protected]: ...
if '+zlib' in spec:
args.append('--with-zlib')
else:
args.append('--without-zlib')
if self.run_tests:
args.extend([
'--enable-tests',
'--enable-valgrind-tests',
'--enable-full-test-suite',
])
else:
args.extend([
'--disable-tests',
'--disable-valgrind-tests',
'--disable-full-test-suite',
])
return args
| configure_args |
metadata.rs | use cargo::core::Workspace;
use cargo::ops::{output_metadata, OutputMetadataOptions, ExportInfo};
use cargo::util::important_paths::find_root_manifest_for_wd;
use cargo::util::{CliResult, Config};
#[derive(RustcDecodable)]
pub struct Options {
flag_color: Option<String>,
flag_features: Vec<String>,
flag_all_features: bool,
flag_format_version: u32,
flag_manifest_path: Option<String>,
flag_no_default_features: bool,
flag_no_deps: bool,
flag_quiet: Option<bool>,
flag_verbose: u32,
flag_frozen: bool,
flag_locked: bool,
}
pub const USAGE: &'static str = "
Output the resolved dependencies of a project, the concrete used versions
including overrides, in machine-readable format.
Usage:
cargo metadata [options]
Options:
-h, --help Print this message
--features FEATURES Space-separated list of features
--all-features Build all available features
--no-default-features Do not include the `default` feature
--no-deps Output information only about the root package
and don't fetch dependencies.
--manifest-path PATH Path to the manifest
--format-version VERSION Format version [default: 1]
Valid values: 1
-v, --verbose ... Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
--frozen Require Cargo.lock and cache are up to date
--locked Require Cargo.lock is up to date
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<ExportInfo>> {
config.configure(options.flag_verbose,
options.flag_quiet,
&options.flag_color,
options.flag_frozen,
options.flag_locked)?;
let manifest = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?;
let options = OutputMetadataOptions {
features: options.flag_features,
all_features: options.flag_all_features,
no_default_features: options.flag_no_default_features,
no_deps: options.flag_no_deps,
version: options.flag_format_version,
};
let ws = Workspace::new(&manifest, config)?; | let result = output_metadata(&ws, &options)?;
Ok(Some(result))
} | |
routers.go | /*
* NudmUEAU
*
* UDM UE Authentication Service
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package UEAuthentication
import (
"github.com/sirupsen/logrus"
"free5gc/src/udm/logger"
"net/http"
"strings"
"github.com/gin-gonic/gin"
)
var HttpLog *logrus.Entry
func init() {
HttpLog = logger.HttpLog
}
// Route is the information for every URI.
type Route struct {
// Name is the name of this Route.
Name string
// Method is the string for the HTTP method. ex) GET, POST etc..
Method string
// Pattern is the pattern of the URI.
Pattern string
// HandlerFunc is the handler function of this route.
HandlerFunc gin.HandlerFunc
}
// Routes is the list of the generated Route.
type Routes []Route
// NewRouter returns a new router.
func NewRouter() *gin.Engine {
router := gin.Default()
AddService(router)
return router
}
func | (c *gin.Context) {
c.Params = append(c.Params, gin.Param{Key: "supiOrSuci", Value: c.Param("supi")})
if strings.ToUpper("Post") == c.Request.Method {
GenerateAuthData(c)
return
}
c.String(http.StatusNotFound, "404 page not found")
}
func AddService(engine *gin.Engine) *gin.RouterGroup {
group := engine.Group("/nudm-ueau/v1")
for _, route := range routes {
switch route.Method {
case "GET":
group.GET(route.Pattern, route.HandlerFunc)
case "POST":
group.POST(route.Pattern, route.HandlerFunc)
case "PUT":
group.PUT(route.Pattern, route.HandlerFunc)
case "DELETE":
group.DELETE(route.Pattern, route.HandlerFunc)
case "PATCH":
group.PATCH(route.Pattern, route.HandlerFunc)
}
}
var genAuthDataPath = "/:supi/security-information/generate-auth-data"
group.Any(genAuthDataPath, genAuthDataHandlerFunc)
return group
}
// Index is the index handler.
func Index(c *gin.Context) {
c.String(http.StatusOK, "Hello World!")
}
var routes = Routes{
{
"Index",
"GET",
"/",
Index,
},
{
"ConfirmAuth",
strings.ToUpper("Post"),
"/:supi/auth-events",
ConfirmAuth,
},
}
var specialRoutes = Routes{
{
"GenerateAuthData",
strings.ToUpper("Post"),
"/:supiOrSuci/security-information/generate-auth-data",
GenerateAuthData,
},
}
| genAuthDataHandlerFunc |
Train_YOLO.py | """
MODIFIED FROM keras-yolo3 PACKAGE, https://github.com/qqwweee/keras-yolo3
Retrain the YOLO model for your own dataset.
10-26-20 MODIFIED by bertelschmitt to use new repo name if changed to something else than "TrainYourOwnYOLO"
10-31-20 UPDATED by bertelschmitt to reflect TrainYourOwnYOLO versions as of 10-31-20
"""
import os
import sys
import argparse
import warnings
def get_parent_dir(n=1):
"""returns the n-th parent dicrectory of the current
working directory"""
current_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(n):
current_path = os.path.dirname(current_path)
return current_path
src_path = os.path.join(get_parent_dir(0), "src")
sys.path.append(src_path)
utils_path = os.path.join(get_parent_dir(1), "Utils")
sys.path.append(utils_path)
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import (
TensorBoard,
ModelCheckpoint,
ReduceLROnPlateau,
EarlyStopping,
)
from keras_yolo3.yolo3.model import (
preprocess_true_boxes,
yolo_body,
tiny_yolo_body,
yolo_loss,
)
from keras_yolo3.yolo3.utils import get_random_data
from PIL import Image
from time import time
import tensorflow.compat.v1 as tf
import pickle
from Train_Utils import (
get_classes,
get_anchors,
create_model,
create_tiny_model,
data_generator,
data_generator_wrapper,
ChangeToOtherMachine,
)
keras_path = os.path.join(src_path, "keras_yolo3")
Data_Folder = os.path.join(get_parent_dir(1), "Data")
Image_Folder = os.path.join(Data_Folder, "Source_Images", "Training_Images")
VoTT_Folder = os.path.join(Image_Folder, "vott-csv-export")
YOLO_filename = os.path.join(VoTT_Folder, "data_train.txt")
Model_Folder = os.path.join(Data_Folder, "Model_Weights")
YOLO_classname = os.path.join(Model_Folder, "data_classes.txt")
log_dir = Model_Folder
anchors_path = os.path.join(keras_path, "model_data", "yolo_anchors.txt")
weights_path = os.path.join(keras_path, "yolo.h5")
#10-26-20 get name of current repo, which should be the directory one down from ours
current_repo = get_parent_dir(1).rsplit('/', 1)[1]
FLAGS = None
if __name__ == "__main__":
# Delete all default flags
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
"""
Command line options
"""
parser.add_argument(
"--annotation_file",
type=str,
default=YOLO_filename,
help="Path to annotation file for Yolo. Default is " + YOLO_filename,
)
parser.add_argument(
"--classes_file",
type=str,
default=YOLO_classname,
help="Path to YOLO classnames. Default is " + YOLO_classname,
)
parser.add_argument(
"--log_dir",
type=str,
default=log_dir,
help="Folder to save training logs and trained weights to. Default is "
+ log_dir,
)
parser.add_argument(
"--anchors_path",
type=str,
default=anchors_path,
help="Path to YOLO anchors. Default is " + anchors_path,
)
parser.add_argument(
"--weights_path",
type=str,
default=weights_path,
help="Path to pre-trained YOLO weights. Default is " + weights_path,
)
parser.add_argument(
"--val_split",
type=float,
default=0.1,
help="Percentage of training set to be used for validation. Default is 10%.",
)
parser.add_argument(
"--is_tiny",
default=False,
action="store_true",
help="Use the tiny Yolo version for better performance and less accuracy. Default is False.",
)
parser.add_argument(
"--random_seed",
type=float,
default=None,
help="Random seed value to make script deterministic. Default is 'None', i.e. non-deterministic.",
)
parser.add_argument(
"--epochs",
type=int,
default=51,
help="Number of epochs for training last layers and number of epochs for fine-tuning layers. Default is 51.",
)
parser.add_argument(
"--warnings",
default=False,
action="store_true",
help="Display warning messages. Default is False.",
)
FLAGS = parser.parse_args()
if not FLAGS.warnings:
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
warnings.filterwarnings("ignore")
#Backported w/o change 10/31/20 from TrainYourOwnYOLO version as of 10/31/20 by BS
# Get WandB integration if setup
try:
import wandb
from wandb.integration.keras import WandbCallback # type: ignore
wandb.ensure_configured()
if wandb.api.api_key is None:
_has_wandb = False
wandb.termwarn(
"W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable."
)
else:
_has_wandb = False if os.getenv("WANDB_DISABLED") else True
except (ImportError, AttributeError):
_has_wandb = False
np.random.seed(FLAGS.random_seed)
log_dir = FLAGS.log_dir
class_names = get_classes(FLAGS.classes_file)
num_classes = len(class_names)
if FLAGS.is_tiny and FLAGS.weights_path == weights_path:
weights_path = os.path.join(os.path.dirname(FLAGS.weights_path), "yolo-tiny.h5")
if FLAGS.is_tiny and FLAGS.anchors_path == anchors_path:
anchors_path = os.path.join(
os.path.dirname(FLAGS.anchors_path), "yolo-tiny_anchors.txt"
)
anchors = get_anchors(anchors_path)
input_shape = (416, 416) # multiple of 32, height, width
epoch1, epoch2 = FLAGS.epochs, FLAGS.epochs
is_tiny_version = len(anchors) == 6 # default setting
if FLAGS.is_tiny:
model = create_tiny_model(
input_shape, anchors, num_classes, freeze_body=2, weights_path=weights_path
)
else:
model = create_model(
input_shape, anchors, num_classes, freeze_body=2, weights_path=weights_path
) # make sure you know what you freeze
log_dir_time = os.path.join(log_dir, "{}".format(int(time())))
logging = TensorBoard(log_dir=log_dir_time)
checkpoint = ModelCheckpoint(
os.path.join(log_dir, "checkpoint.h5"),
monitor="val_loss",
save_weights_only=True, | early_stopping = EarlyStopping(
monitor="val_loss", min_delta=0, patience=10, verbose=1
)
val_split = FLAGS.val_split
with open(FLAGS.annotation_file) as f:
lines = f.readlines()
# This step makes sure that the path names correspond to the local machine
# This is important if annotation and training are done on different machines (e.g. training on AWS)
# 10-26-20 Changed by bertelschmitt to call with current_repo
lines = ChangeToOtherMachine(lines, remote_machine="", repo=current_repo)
np.random.shuffle(lines)
num_val = int(len(lines) * val_split)
num_train = len(lines) - num_val
# From here on down, all backported w/o change 10/31/20 from TrainYourOwnYOLO version as of 10/31/20 by BS
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a decent model.
frozen_callbacks = [logging, checkpoint]
if _has_wandb:
wandb.init(
project="TrainYourOwnYOLO", config=vars(FLAGS), sync_tensorboard=False
)
wandb_callback = WandbCallback(save_model=False)
frozen_callbacks.append(wandb_callback)
model.compile(
optimizer=Adam(lr=1e-3),
loss={
# use custom yolo_loss Lambda layer.
"yolo_loss": lambda y_true, y_pred: y_pred
},
)
batch_size = 32
print(
"Train on {} samples, val on {} samples, with batch size {}.".format(
num_train, num_val, batch_size
)
)
history = model.fit_generator(
data_generator_wrapper(
lines[:num_train], batch_size, input_shape, anchors, num_classes
),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=data_generator_wrapper(
lines[num_train:], batch_size, input_shape, anchors, num_classes
),
validation_steps=max(1, num_val // batch_size),
epochs=epoch1,
initial_epoch=0,
callbacks=frozen_callbacks,
)
model.save_weights(os.path.join(log_dir, "trained_weights_stage_1.h5"))
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is unsatisfactory.
full_callbacks = [logging, checkpoint, reduce_lr, early_stopping]
if _has_wandb:
full_callbacks.append(wandb_callback)
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(
optimizer=Adam(lr=1e-4), loss={"yolo_loss": lambda y_true, y_pred: y_pred}
) # recompile to apply the change
print("Unfreeze all layers.")
batch_size = 4 # note that more GPU memory is required after unfreezing the body
print(
"Train on {} samples, val on {} samples, with batch size {}.".format(
num_train, num_val, batch_size
)
)
history = model.fit_generator(
data_generator_wrapper(
lines[:num_train], batch_size, input_shape, anchors, num_classes
),
steps_per_epoch=max(1, num_train // batch_size),
validation_data=data_generator_wrapper(
lines[num_train:], batch_size, input_shape, anchors, num_classes
),
validation_steps=max(1, num_val // batch_size),
epochs=epoch1 + epoch2,
initial_epoch=epoch1,
callbacks=full_callbacks,
)
model.save_weights(os.path.join(log_dir, "trained_weights_final.h5")) | save_best_only=True,
period=5,
)
reduce_lr = ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=3, verbose=1) |
addon_validator_test.go | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package addon
import (
"fmt"
"reflect"
"strings"
"testing"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic/fake"
addonmgrv1alpha1 "github.com/keikoproj/addon-manager/api/v1alpha1"
)
var dynClient = fake.NewSimpleDynamicClient(runtime.NewScheme())
func TestNewAddonValidator(t *testing.T) |
func Test_addonValidator_Validate_Workflow_Template(t *testing.T) {
var cache = NewAddonVersionCacheClient()
type fields struct {
addon *addonmgrv1alpha1.Addon
}
tests := []struct {
name string
fields fields
want bool
wantErr bool
}{
{name: "workflow-template-valid", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
Lifecycle: addonmgrv1alpha1.LifecycleWorkflowSpec{
Install: addonmgrv1alpha1.WorkflowType{
NamePrefix: "test",
Role: "arn:12345",
Template: `
apiVersion: argoproj.io/v1alpha1
kind: Workflow
spec:
entrypoint: entry
serviceAccountName: addon-manager-workflow-installer-sa
templates:
- name: entry
steps:
- - name: prereq-resources
template: submit
arguments:
artifacts:
- name: doc
path: /tmp/doc
raw:
data: |
apiVersion: v1
kind: Namespace
metadata:
name: "{{workflow.parameters.namespace}}"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: event-router-sa
namespace: "{{workflow.parameters.namespace}}"
---
apiVersion: v1
data:
config.json: |-
{
"sink": "stdout"
}
kind: ConfigMap
metadata:
name: event-router-cm
namespace: "{{workflow.parameters.namespace}}"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: event-router-cr
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "watch", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: event-router-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: event-router-cr
subjects:
- kind: ServiceAccount
name: event-router-sa
namespace: "{{workflow.parameters.namespace}}"
- name: submit
inputs:
artifacts:
- name: doc
path: /tmp/doc
container:
image: expert360/kubectl-awscli:v1.11.2
command: [sh, -c]
args: ["kubectl apply -f /tmp/doc"]
`,
},
},
},
}}, want: true, wantErr: false},
{name: "workflow-template-valid-empty-template", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
Lifecycle: addonmgrv1alpha1.LifecycleWorkflowSpec{
Install: addonmgrv1alpha1.WorkflowType{
NamePrefix: "test",
Role: "arn:12345",
Template: "",
},
},
},
}}, want: true, wantErr: false},
{name: "workflow-template-invalid-kind", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
Lifecycle: addonmgrv1alpha1.LifecycleWorkflowSpec{
Prereqs: addonmgrv1alpha1.WorkflowType{
NamePrefix: "test",
Role: "arn:12345",
Template: `
apiVersion: argoproj.io/v1alpha1
kind: Workflowz
spec:
entrypoint: entry
serviceAccountName: addon-manager-workflow-installer-sa
`,
},
},
},
}}, want: false, wantErr: true},
{name: "workflow-template-invalid-missing-spec", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
Lifecycle: addonmgrv1alpha1.LifecycleWorkflowSpec{
Delete: addonmgrv1alpha1.WorkflowType{
NamePrefix: "test",
Role: "arn:12345",
Template: `
apiVersion: argoproj.io/v1alpha1
kind: Workflow
`,
},
},
},
}}, want: false, wantErr: true},
{name: "workflow-invalid-missing-namespace", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Data: map[string]addonmgrv1alpha1.FlexString{
"foo": "difval",
},
},
Lifecycle: addonmgrv1alpha1.LifecycleWorkflowSpec{
Delete: addonmgrv1alpha1.WorkflowType{
NamePrefix: "test",
Role: "arn:12345",
Template: `
apiVersion: argoproj.io/v1alpha1
kind: Workflow
spec:
entrypoint: entry
serviceAccountName: addon-manager-workflow-installer-sa
templates:
- name: entry
steps:
- - name: prereq-resources
template: submit
arguments:
parameters:
- name: foo
value: bar
`,
},
},
},
}}, want: false, wantErr: true},
{name: "workflow-invalid-overlapping-params", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
Data: map[string]addonmgrv1alpha1.FlexString{
"foo": "difval",
},
},
Lifecycle: addonmgrv1alpha1.LifecycleWorkflowSpec{
Delete: addonmgrv1alpha1.WorkflowType{
NamePrefix: "test",
Role: "arn:12345",
Template: `
apiVersion: argoproj.io/v1alpha1
kind: Workflow
spec:
entrypoint: entry
serviceAccountName: addon-manager-workflow-installer-sa
arguments:
parameters:
- name: foo
value: bar
templates:
- name: entry
steps:
- - name: prereq-resources
template: submit
`,
},
},
},
}}, want: false, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
av := &addonValidator{
addon: tt.fields.addon,
cache: cache,
dynClient: dynClient,
}
got, err := av.Validate()
if (err != nil) != tt.wantErr {
t.Errorf("addonValidator.Validate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("addonValidator.Validate() = %v, want %v", got, tt.want)
}
})
}
}
func Test_addonValidator_Validate_Fail_NameLength(t *testing.T) {
var cache = NewAddonVersionCacheClient()
type fields struct {
addon *addonmgrv1alpha1.Addon
}
tests := []struct {
name string
fields fields
want bool
wantErr bool
}{
// TODO: Add test cases.
{name: "addon-fails-with-name-too-long", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
}}, want: false, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
av := &addonValidator{
addon: tt.fields.addon,
cache: cache,
dynClient: dynClient,
}
got, err := av.Validate()
if (err != nil) != tt.wantErr {
t.Errorf("addonValidator.Validate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("addonValidator.Validate() = %v, want %v", got, tt.want)
}
})
}
}
func Test_addonValidator_Validate_NoDeps(t *testing.T) {
var cache = NewAddonVersionCacheClient()
type fields struct {
addon *addonmgrv1alpha1.Addon
}
tests := []struct {
name string
fields fields
want bool
wantErr bool
}{
// TODO: Add test cases.
{name: "addon-validates-no-dependencies", fields: fields{addon: &addonmgrv1alpha1.Addon{Spec: addonmgrv1alpha1.AddonSpec{Params: addonmgrv1alpha1.AddonParams{Namespace: "addon-test-ns"}}}}, want: true, wantErr: false},
{name: "addon-fails-with-uninstalled-dependencies", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/B": "v1.0.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
}}, want: false, wantErr: true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
av := &addonValidator{
addon: tt.fields.addon,
cache: cache,
dynClient: dynClient,
}
got, err := av.Validate()
if (err != nil) != tt.wantErr {
t.Errorf("addonValidator.Validate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("addonValidator.Validate() = %v, want %v", got, tt.want)
}
})
}
}
func Test_addonValidator_Validate_With_Installed_Deps(t *testing.T) {
// Pre-cache installed dependencies
var (
cache = NewAddonVersionCacheClient()
versionA = Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/A",
PkgVersion: "v1.0.0",
PkgDeps: map[string]string{
"core/C": "v1.2.0",
},
},
PkgPhase: addonmgrv1alpha1.Succeeded,
}
versionB = Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/B",
PkgVersion: "v1.0.0",
PkgDeps: map[string]string{
"core/C": "v1.2.0",
},
},
PkgPhase: addonmgrv1alpha1.Succeeded,
}
versionC = Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/C",
PkgVersion: "v1.2.0",
},
PkgPhase: addonmgrv1alpha1.Succeeded,
}
versionD = Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/D",
PkgVersion: "v1.3.0",
},
PkgPhase: addonmgrv1alpha1.Pending,
}
versionE = Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/E",
PkgVersion: "v1.2.0",
},
PkgPhase: addonmgrv1alpha1.Failed,
}
versionF = Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/F",
PkgVersion: "v1.2.0",
PkgDeps: map[string]string{
"core/E": "v1.2.0",
},
},
}
)
cache.AddVersion(versionA)
cache.AddVersion(versionB)
cache.AddVersion(versionC)
cache.AddVersion(versionD)
cache.AddVersion(versionE)
cache.AddVersion(versionF)
type fields struct {
addon *addonmgrv1alpha1.Addon
}
tests := []struct {
name string
fields fields
want bool
wantErr bool
errStartsWith string
}{
{name: "addon-validates-dependencies", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/B": "v1.0.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
}}, want: true, wantErr: false},
{name: "addon-fails-with-uninstalled-dependencies", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/B": "v1.0.1",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
}}, want: false, wantErr: true, errStartsWith: "required dependency is not installed: unable to resolve required dependency"},
{name: "addon-throws-error-for-dependencies-in-pending-state", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/D": "v1.3.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
}}, want: false, wantErr: true, errStartsWith: ErrDepPending},
{name: "addon-fails-with-failed-dependencies", fields: fields{addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/F": "v1.2.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
}}, want: false, wantErr: true, errStartsWith: ErrDepNotInstalled},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
av := &addonValidator{
addon: tt.fields.addon,
cache: cache,
dynClient: dynClient,
}
got, err := av.Validate()
if tt.wantErr && err == nil {
t.Errorf("addonValidator.Validate() Errors want = %t, got = nil", tt.wantErr)
}
if tt.wantErr && err != nil {
if !strings.HasPrefix(err.Error(), tt.errStartsWith) {
t.Errorf("addonValidator.Validate() Error Message want = %q, got = %q", tt.errStartsWith, err)
}
}
if got != tt.want {
t.Errorf("addonValidator.Validate() Output want = %v, got = %v", tt.want, got)
}
})
}
}
func Test_addonValidator_validateDependencies(t *testing.T) {
var cache = NewAddonVersionCacheClient()
type fields struct {
addon *addonmgrv1alpha1.Addon
}
tests := []struct {
name string
fields fields
wantErr bool
}{
// TODO: Add test cases.
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
av := &addonValidator{
addon: tt.fields.addon,
cache: cache,
dynClient: dynClient,
}
if err := av.validateDependencies(); (err != nil) != tt.wantErr {
t.Errorf("addonValidator.validateDependencies() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func Test_resolveDependencies(t *testing.T) {
g := gomega.NewGomegaWithT(t)
cached := NewAddonVersionCacheClient()
// Add core/A
cached.AddVersion(Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/A",
PkgVersion: "1.0.3",
PkgDeps: map[string]string{
"core/C": "*",
},
},
PkgPhase: addonmgrv1alpha1.Pending,
})
// Add core/B
cached.AddVersion(Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/B",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/C": "*",
},
},
PkgPhase: addonmgrv1alpha1.Pending,
})
// Add core/C
cached.AddVersion(Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/C",
PkgVersion: "1.0.1",
},
PkgPhase: addonmgrv1alpha1.Succeeded,
})
av := &addonValidator{
addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/B": "v1.0.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
},
cache: cached,
dynClient: dynClient,
}
var visited = make(map[string]*Version)
g.Expect(av.resolveDependencies(&Version{
PackageSpec: av.addon.GetPackageSpec(),
PkgPhase: addonmgrv1alpha1.Pending,
}, visited, 0)).Should(gomega.BeNil(), "Should validate")
}
func Test_resolveDependencies_Fail(t *testing.T) {
g := gomega.NewGomegaWithT(t)
cached := NewAddonVersionCacheClient()
// Add core/A
cached.AddVersion(Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/A",
PkgVersion: "1.0.3",
PkgDeps: map[string]string{
"core/C": "*",
},
},
PkgPhase: addonmgrv1alpha1.Pending,
})
// Add core/B
cached.AddVersion(Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/B",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/C": "*",
},
},
PkgPhase: addonmgrv1alpha1.Pending,
})
// Add core/C
cached.AddVersion(Version{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/C",
PkgVersion: "1.0.1",
PkgDeps: map[string]string{
"core/A": "*", // Invalid cyclic dependency
},
},
PkgPhase: addonmgrv1alpha1.Succeeded,
})
av := &addonValidator{
addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/B": "v1.0.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
},
cache: cached,
dynClient: dynClient,
}
var visited = make(map[string]*Version)
g.Expect(av.resolveDependencies(&Version{
PackageSpec: av.addon.GetPackageSpec(),
PkgPhase: addonmgrv1alpha1.Pending,
}, visited, 0)).ShouldNot(gomega.Succeed(), "Should not validate")
}
func Test_validateDuplicate_Fail(t *testing.T) {
g := gomega.NewGomegaWithT(t)
cached := NewAddonVersionCacheClient()
// Add core/A
cached.AddVersion(Version{
Name: "core-a",
Namespace: "default",
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/A",
PkgVersion: "1.0.3",
PkgDeps: map[string]string{
"core/C": "*",
},
},
PkgPhase: addonmgrv1alpha1.Succeeded,
})
// Add core/B
cached.AddVersion(Version{
Name: "core-b",
Namespace: "default",
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "core/B",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/C": "*",
},
},
PkgPhase: addonmgrv1alpha1.Succeeded,
})
// Add test-addon-1
cached.AddVersion(Version{
Name: "test-addon-1",
Namespace: "default",
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{},
},
PkgPhase: addonmgrv1alpha1.Succeeded,
})
av := &addonValidator{
addon: &addonmgrv1alpha1.Addon{
ObjectMeta: metav1.ObjectMeta{Name: "test-addon-2", Namespace: "default"},
Spec: addonmgrv1alpha1.AddonSpec{
PackageSpec: addonmgrv1alpha1.PackageSpec{
PkgType: addonmgrv1alpha1.CompositePkg,
// Duplicate package name and version
PkgName: "test/addon-1",
PkgVersion: "1.0.0",
PkgDeps: map[string]string{
"core/A": "*",
"core/B": "v1.0.0",
},
},
Params: addonmgrv1alpha1.AddonParams{
Namespace: "addon-test-ns",
},
},
},
cache: cached,
dynClient: dynClient,
}
err := av.validateDuplicate(&Version{
Name: av.addon.Name,
Namespace: av.addon.Namespace,
PackageSpec: av.addon.GetPackageSpec(),
PkgPhase: addonmgrv1alpha1.Pending,
})
errMsg := fmt.Sprintf("package version %s:%s already exists and cannot be installed as a duplicate", av.addon.Spec.PkgName, av.addon.Spec.PkgVersion)
g.Expect(err).Should(gomega.HaveOccurred(), "Should not validate")
g.Expect(err).Should(gomega.MatchError(errMsg))
}
| {
var cache = NewAddonVersionCacheClient()
var addon = &addonmgrv1alpha1.Addon{}
type args struct {
addon *addonmgrv1alpha1.Addon
}
tests := []struct {
name string
args args
want *addonValidator
}{
{name: "test-valid", args: args{addon: addon}, want: &addonValidator{cache: cache, addon: addon, dynClient: dynClient}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := NewAddonValidator(tt.args.addon, cache, dynClient); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewAddonValidator() = %v, want %v", got, tt.want)
}
})
}
} |
DescribeRPSDKRequest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudauth.endpoint import endpoint_data
class DescribeRPSDKRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cloudauth', '2019-03-07', 'DescribeRPSDK','cloudauth')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
| self.add_query_param('TaskId',TaskId) |
|
cloudtasks-gen.go | // Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
// Package cloudtasks provides access to the Cloud Tasks API.
//
// This package is DEPRECATED. Use package cloud.google.com/go/cloudtasks/apiv2beta2 instead.
//
// For product documentation, see: https://cloud.google.com/tasks/
//
// Creating a client
//
// Usage example:
//
// import "google.golang.org/api/cloudtasks/v2"
// ...
// ctx := context.Background()
// cloudtasksService, err := cloudtasks.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// cloudtasksService, err := cloudtasks.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// cloudtasksService, err := cloudtasks.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package cloudtasks // import "google.golang.org/api/cloudtasks/v2"
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
googleapi "google.golang.org/api/googleapi"
gensupport "google.golang.org/api/internal/gensupport"
option "google.golang.org/api/option"
internaloption "google.golang.org/api/option/internaloption"
htransport "google.golang.org/api/transport/http"
)
// Always reference these packages, just in case the auto-generated code
// below doesn't.
var _ = bytes.NewBuffer
var _ = strconv.Itoa
var _ = fmt.Sprintf
var _ = json.NewDecoder
var _ = io.Copy
var _ = url.Parse
var _ = gensupport.MarshalJSON
var _ = googleapi.Version
var _ = errors.New
var _ = strings.Replace
var _ = context.Canceled
var _ = internaloption.WithDefaultEndpoint
const apiId = "cloudtasks:v2"
const apiName = "cloudtasks"
const apiVersion = "v2"
const basePath = "https://cloudtasks.googleapis.com/"
const mtlsBasePath = "https://cloudtasks.mtls.googleapis.com/"
// OAuth2 scopes used by this API.
const (
// View and manage your data across Google Cloud Platform services
CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
)
// NewService creates a new Service.
func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) {
scopesOption := option.WithScopes(
"https://www.googleapis.com/auth/cloud-platform",
)
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
s, err := New(client)
if err != nil {
return nil, err
}
if endpoint != "" {
s.BasePath = endpoint
}
return s, nil
}
// New creates a new Service. It uses the provided http.Client for requests.
//
// Deprecated: please use NewService instead.
// To provide a custom HTTP client, use option.WithHTTPClient.
// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.
func New(client *http.Client) (*Service, error) {
if client == nil {
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
s.Projects = NewProjectsService(s)
return s, nil
}
type Service struct {
client *http.Client
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
Projects *ProjectsService
}
func (s *Service) userAgent() string {
if s.UserAgent == "" {
return googleapi.UserAgent
}
return googleapi.UserAgent + " " + s.UserAgent
}
func NewProjectsService(s *Service) *ProjectsService {
rs := &ProjectsService{s: s}
rs.Locations = NewProjectsLocationsService(s)
return rs
}
type ProjectsService struct {
s *Service
Locations *ProjectsLocationsService
}
func NewProjectsLocationsService(s *Service) *ProjectsLocationsService {
rs := &ProjectsLocationsService{s: s}
rs.Queues = NewProjectsLocationsQueuesService(s)
return rs
}
type ProjectsLocationsService struct {
s *Service
Queues *ProjectsLocationsQueuesService
}
func NewProjectsLocationsQueuesService(s *Service) *ProjectsLocationsQueuesService {
rs := &ProjectsLocationsQueuesService{s: s}
rs.Tasks = NewProjectsLocationsQueuesTasksService(s)
return rs
}
type ProjectsLocationsQueuesService struct {
s *Service
Tasks *ProjectsLocationsQueuesTasksService
}
func NewProjectsLocationsQueuesTasksService(s *Service) *ProjectsLocationsQueuesTasksService |
type ProjectsLocationsQueuesTasksService struct {
s *Service
}
// AppEngineHttpRequest: App Engine HTTP request. The message defines
// the HTTP request that is sent to an App Engine app when the task is
// dispatched. Using AppEngineHttpRequest requires
// [`appengine.applications.get`](https://cloud.google.com/appengine/docs
// /admin-api/access-control) Google IAM permission for the project and
// the following scope: `https://www.googleapis.com/auth/cloud-platform`
// The task will be delivered to the App Engine app which belongs to the
// same project as the queue. For more information, see [How Requests
// are
// Routed](https://cloud.google.com/appengine/docs/standard/python/how-re
// quests-are-routed) and how routing is affected by [dispatch
// files](https://cloud.google.com/appengine/docs/python/config/dispatchr
// ef). Traffic is encrypted during transport and never leaves Google
// datacenters. Because this traffic is carried over a communication
// mechanism internal to Google, you cannot explicitly set the protocol
// (for example, HTTP or HTTPS). The request to the handler, however,
// will appear to have used the HTTP protocol. The AppEngineRouting used
// to construct the URL that the task is delivered to can be set at the
// queue-level or task-level: * If app_engine_routing_override is set on
// the queue, this value is used for all tasks in the queue, no matter
// what the setting is for the task-level app_engine_routing. The `url`
// that the task will be sent to is: * `url =` host `+` relative_uri
// Tasks can be dispatched to secure app handlers, unsecure app
// handlers, and URIs restricted with [`login:
// admin`](https://cloud.google.com/appengine/docs/standard/python/config
// /appref). Because tasks are not run as any user, they cannot be
// dispatched to URIs restricted with [`login:
// required`](https://cloud.google.com/appengine/docs/standard/python/con
// fig/appref) Task dispatches also do not follow redirects. The task
// attempt has succeeded if the app's request handler returns an HTTP
// response code in the range [`200` - `299`]. The task attempt has
// failed if the app's handler returns a non-2xx response code or Cloud
// Tasks does not receive response before the deadline. Failed tasks
// will be retried according to the retry configuration. `503` (Service
// Unavailable) is considered an App Engine system error instead of an
// application error and will cause Cloud Tasks' traffic congestion
// control to temporarily throttle the queue's dispatches. Unlike other
// types of task targets, a `429` (Too Many Requests) response from an
// app handler does not cause traffic congestion control to throttle the
// queue.
type AppEngineHttpRequest struct {
// AppEngineRouting: Task-level setting for App Engine routing. * If
// app_engine_routing_override is set on the queue, this value is used
// for all tasks in the queue, no matter what the setting is for the
// task-level app_engine_routing.
AppEngineRouting *AppEngineRouting `json:"appEngineRouting,omitempty"`
// Body: HTTP request body. A request body is allowed only if the HTTP
// method is POST or PUT. It is an error to set a body on a task with an
// incompatible HttpMethod.
Body string `json:"body,omitempty"`
// Headers: HTTP request headers. This map contains the header field
// names and values. Headers can be set when the task is created.
// Repeated headers are not supported but a header value can contain
// commas. Cloud Tasks sets some headers to default values: *
// `User-Agent`: By default, this header is "AppEngine-Google;
// (+http://code.google.com/appengine)". This header can be modified,
// but Cloud Tasks will append "AppEngine-Google;
// (+http://code.google.com/appengine)" to the modified `User-Agent`.
// If the task has a body, Cloud Tasks sets the following headers: *
// `Content-Type`: By default, the `Content-Type` header is set to
// "application/octet-stream". The default can be overridden by
// explicitly setting `Content-Type` to a particular media type when the
// task is created. For example, `Content-Type` can be set to
// "application/json". * `Content-Length`: This is computed by Cloud
// Tasks. This value is output only. It cannot be changed. The headers
// below cannot be set or overridden: * `Host` * `X-Google-*` *
// `X-AppEngine-*` In addition, Cloud Tasks sets some headers when the
// task is dispatched, such as headers containing information about the
// task; see [request
// headers](https://cloud.google.com/tasks/docs/creating-appengine-handle
// rs#reading_request_headers). These headers are set only when the task
// is dispatched, so they are not visible when the task is returned in a
// Cloud Tasks response. Although there is no specific limit for the
// maximum number of headers or the size, there is a limit on the
// maximum size of the Task. For more information, see the CreateTask
// documentation.
Headers map[string]string `json:"headers,omitempty"`
// HttpMethod: The HTTP method to use for the request. The default is
// POST. The app's request handler for the task's target URL must be
// able to handle HTTP requests with this http_method, otherwise the
// task attempt fails with error code 405 (Method Not Allowed). See
// [Writing a push task request
// handler](https://cloud.google.com/appengine/docs/java/taskqueue/push/c
// reating-handlers#writing_a_push_task_request_handler) and the App
// Engine documentation for your runtime on [How Requests are
// Handled](https://cloud.google.com/appengine/docs/standard/python3/how-
// requests-are-handled).
//
// Possible values:
// "HTTP_METHOD_UNSPECIFIED" - HTTP method unspecified
// "POST" - HTTP POST
// "GET" - HTTP GET
// "HEAD" - HTTP HEAD
// "PUT" - HTTP PUT
// "DELETE" - HTTP DELETE
// "PATCH" - HTTP PATCH
// "OPTIONS" - HTTP OPTIONS
HttpMethod string `json:"httpMethod,omitempty"`
// RelativeUri: The relative URI. The relative URI must begin with "/"
// and must be a valid HTTP relative URI. It can contain a path and
// query string arguments. If the relative URI is empty, then the root
// path "/" will be used. No spaces are allowed, and the maximum length
// allowed is 2083 characters.
RelativeUri string `json:"relativeUri,omitempty"`
// ForceSendFields is a list of field names (e.g. "AppEngineRouting") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineRouting") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *AppEngineHttpRequest) MarshalJSON() ([]byte, error) {
type NoMethod AppEngineHttpRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// AppEngineRouting: App Engine Routing. Defines routing characteristics
// specific to App Engine - service, version, and instance. For more
// information about services, versions, and instances see [An Overview
// of App
// Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-
// app-engine), [Microservices Architecture on Google App
// Engine](https://cloud.google.com/appengine/docs/python/microservices-o
// n-app-engine), [App Engine Standard request
// routing](https://cloud.google.com/appengine/docs/standard/python/how-r
// equests-are-routed), and [App Engine Flex request
// routing](https://cloud.google.com/appengine/docs/flexible/python/how-r
// equests-are-routed). Using AppEngineRouting requires
// [`appengine.applications.get`](https://cloud.google.com/appengine/docs
// /admin-api/access-control) Google IAM permission for the project and
// the following scope: `https://www.googleapis.com/auth/cloud-platform`
type AppEngineRouting struct {
// Host: Output only. The host that the task is sent to. The host is
// constructed from the domain name of the app associated with the
// queue's project ID (for example .appspot.com), and the service,
// version, and instance. Tasks which were created using the App Engine
// SDK might have a custom domain name. For more information, see [How
// Requests are
// Routed](https://cloud.google.com/appengine/docs/standard/python/how-re
// quests-are-routed).
Host string `json:"host,omitempty"`
// Instance: App instance. By default, the task is sent to an instance
// which is available when the task is attempted. Requests can only be
// sent to a specific instance if [manual scaling is used in App Engine
// Standard](https://cloud.google.com/appengine/docs/python/an-overview-o
// f-app-engine?hl=en_US#scaling_types_and_instance_classes). App Engine
// Flex does not support instances. For more information, see [App
// Engine Standard request
// routing](https://cloud.google.com/appengine/docs/standard/python/how-r
// equests-are-routed) and [App Engine Flex request
// routing](https://cloud.google.com/appengine/docs/flexible/python/how-r
// equests-are-routed).
Instance string `json:"instance,omitempty"`
// Service: App service. By default, the task is sent to the service
// which is the default service when the task is attempted. For some
// queues or tasks which were created using the App Engine Task Queue
// API, host is not parsable into service, version, and instance. For
// example, some tasks which were created using the App Engine SDK use a
// custom domain name; custom domains are not parsed by Cloud Tasks. If
// host is not parsable, then service, version, and instance are the
// empty string.
Service string `json:"service,omitempty"`
// Version: App version. By default, the task is sent to the version
// which is the default version when the task is attempted. For some
// queues or tasks which were created using the App Engine Task Queue
// API, host is not parsable into service, version, and instance. For
// example, some tasks which were created using the App Engine SDK use a
// custom domain name; custom domains are not parsed by Cloud Tasks. If
// host is not parsable, then service, version, and instance are the
// empty string.
Version string `json:"version,omitempty"`
// ForceSendFields is a list of field names (e.g. "Host") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Host") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *AppEngineRouting) MarshalJSON() ([]byte, error) {
type NoMethod AppEngineRouting
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Attempt: The status of a task attempt.
type Attempt struct {
// DispatchTime: Output only. The time that this attempt was dispatched.
// `dispatch_time` will be truncated to the nearest microsecond.
DispatchTime string `json:"dispatchTime,omitempty"`
// ResponseStatus: Output only. The response from the worker for this
// attempt. If `response_time` is unset, then the task has not been
// attempted or is currently running and the `response_status` field is
// meaningless.
ResponseStatus *Status `json:"responseStatus,omitempty"`
// ResponseTime: Output only. The time that this attempt response was
// received. `response_time` will be truncated to the nearest
// microsecond.
ResponseTime string `json:"responseTime,omitempty"`
// ScheduleTime: Output only. The time that this attempt was scheduled.
// `schedule_time` will be truncated to the nearest microsecond.
ScheduleTime string `json:"scheduleTime,omitempty"`
// ForceSendFields is a list of field names (e.g. "DispatchTime") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DispatchTime") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Attempt) MarshalJSON() ([]byte, error) {
type NoMethod Attempt
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Binding: Associates `members` with a `role`.
type Binding struct {
// Condition: The condition that is associated with this binding. If the
// condition evaluates to `true`, then this binding applies to the
// current request. If the condition evaluates to `false`, then this
// binding does not apply to the current request. However, a different
// role binding might grant the same role to one or more of the members
// in this binding. To learn which resources support conditions in their
// IAM policies, see the [IAM
// documentation](https://cloud.google.com/iam/help/conditions/resource-p
// olicies).
Condition *Expr `json:"condition,omitempty"`
// Members: Specifies the identities requesting access for a Cloud
// Platform resource. `members` can have the following values: *
// `allUsers`: A special identifier that represents anyone who is on the
// internet; with or without a Google account. *
// `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account. *
// `user:{emailid}`: An email address that represents a specific Google
// account. For example, `[email protected]` . *
// `serviceAccount:{emailid}`: An email address that represents a
// service account. For example,
// `[email protected]`. * `group:{emailid}`: An
// email address that represents a Google group. For example,
// `[email protected]`. * `deleted:user:{emailid}?uid={uniqueid}`: An
// email address (plus unique identifier) representing a user that has
// been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered
// user retains the role in the binding. *
// `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
// (plus unique identifier) representing a service account that has been
// recently deleted. For example,
// `[email protected]?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains
// the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`:
// An email address (plus unique identifier) representing a Google group
// that has been recently deleted. For example,
// `[email protected]?uid=123456789012345678901`. If the group is
// recovered, this value reverts to `group:{emailid}` and the recovered
// group retains the role in the binding. * `domain:{domain}`: The G
// Suite domain (primary) that represents all the users of that domain.
// For example, `google.com` or `example.com`.
Members []string `json:"members,omitempty"`
// Role: Role that is assigned to `members`. For example,
// `roles/viewer`, `roles/editor`, or `roles/owner`.
Role string `json:"role,omitempty"`
// ForceSendFields is a list of field names (e.g. "Condition") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Condition") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Binding) MarshalJSON() ([]byte, error) {
type NoMethod Binding
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// CreateTaskRequest: Request message for CreateTask.
type CreateTaskRequest struct {
// ResponseView: The response_view specifies which subset of the Task
// will be returned. By default response_view is BASIC; not all
// information is retrieved by default because some data, such as
// payloads, might be desirable to return only when needed because of
// its large size or because of the sensitivity of data that it
// contains. Authorization for FULL requires `cloudtasks.tasks.fullView`
// [Google IAM](https://cloud.google.com/iam/) permission on the Task
// resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
ResponseView string `json:"responseView,omitempty"`
// Task: Required. The task to add. Task names have the following
// format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_
// ID`. The user can optionally specify a task name. If a name is not
// specified then the system will generate a random unique task id,
// which will be set in the task returned in the response. If
// schedule_time is not set or is in the past then Cloud Tasks will set
// it to the current time. Task De-duplication: Explicitly specifying a
// task ID enables task de-duplication. If a task's ID is identical to
// that of an existing task or a task that was deleted or executed
// recently then the call will fail with ALREADY_EXISTS. If the task's
// queue was created using Cloud Tasks, then another task with the same
// name can't be created for ~1hour after the original task was deleted
// or executed. If the task's queue was created using queue.yaml or
// queue.xml, then another task with the same name can't be created for
// ~9days after the original task was deleted or executed. Because there
// is an extra lookup cost to identify duplicate task names, these
// CreateTask calls have significantly increased latency. Using hashed
// strings for the task id or for the prefix of the task id is
// recommended. Choosing task ids that are sequential or have sequential
// prefixes, for example using a timestamp, causes an increase in
// latency and error rates in all task commands. The infrastructure
// relies on an approximately uniform distribution of task ids to store
// and serve tasks efficiently.
Task *Task `json:"task,omitempty"`
// ForceSendFields is a list of field names (e.g. "ResponseView") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ResponseView") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *CreateTaskRequest) MarshalJSON() ([]byte, error) {
type NoMethod CreateTaskRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Empty: A generic empty message that you can re-use to avoid defining
// duplicated empty messages in your APIs. A typical example is to use
// it as the request or the response type of an API method. For
// instance: service Foo { rpc Bar(google.protobuf.Empty) returns
// (google.protobuf.Empty); } The JSON representation for `Empty` is
// empty JSON object `{}`.
type Empty struct {
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
}
// Expr: Represents a textual expression in the Common Expression
// Language (CEL) syntax. CEL is a C-like expression language. The
// syntax and semantics of CEL are documented at
// https://github.com/google/cel-spec. Example (Comparison): title:
// "Summary size limit" description: "Determines if a summary is less
// than 100 chars" expression: "document.summary.size() < 100" Example
// (Equality): title: "Requestor is owner" description: "Determines if
// requestor is the document owner" expression: "document.owner ==
// request.auth.claims.email" Example (Logic): title: "Public documents"
// description: "Determine whether the document should be publicly
// visible" expression: "document.type != 'private' && document.type !=
// 'internal'" Example (Data Manipulation): title: "Notification string"
// description: "Create a notification string with a timestamp."
// expression: "'New message received at ' +
// string(document.create_time)" The exact variables and functions that
// may be referenced within an expression are determined by the service
// that evaluates it. See the service documentation for additional
// information.
type Expr struct {
// Description: Optional. Description of the expression. This is a
// longer text which describes the expression, e.g. when hovered over it
// in a UI.
Description string `json:"description,omitempty"`
// Expression: Textual representation of an expression in Common
// Expression Language syntax.
Expression string `json:"expression,omitempty"`
// Location: Optional. String indicating the location of the expression
// for error reporting, e.g. a file name and a position in the file.
Location string `json:"location,omitempty"`
// Title: Optional. Title for the expression, i.e. a short string
// describing its purpose. This can be used e.g. in UIs which allow to
// enter the expression.
Title string `json:"title,omitempty"`
// ForceSendFields is a list of field names (e.g. "Description") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Description") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Expr) MarshalJSON() ([]byte, error) {
type NoMethod Expr
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetIamPolicyRequest: Request message for `GetIamPolicy` method.
type GetIamPolicyRequest struct {
// Options: OPTIONAL: A `GetPolicyOptions` object for specifying options
// to `GetIamPolicy`.
Options *GetPolicyOptions `json:"options,omitempty"`
// ForceSendFields is a list of field names (e.g. "Options") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Options") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *GetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod GetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// GetPolicyOptions: Encapsulates settings provided to GetIamPolicy.
type GetPolicyOptions struct {
// RequestedPolicyVersion: Optional. The policy format version to be
// returned. Valid values are 0, 1, and 3. Requests specifying an
// invalid value will be rejected. Requests for policies with any
// conditional bindings must specify version 3. Policies without any
// conditional bindings may specify any valid value or leave the field
// unset. To learn which resources support conditions in their IAM
// policies, see the [IAM
// documentation](https://cloud.google.com/iam/help/conditions/resource-p
// olicies).
RequestedPolicyVersion int64 `json:"requestedPolicyVersion,omitempty"`
// ForceSendFields is a list of field names (e.g.
// "RequestedPolicyVersion") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "RequestedPolicyVersion")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *GetPolicyOptions) MarshalJSON() ([]byte, error) {
type NoMethod GetPolicyOptions
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// HttpRequest: HTTP request. The task will be pushed to the worker as
// an HTTP request. If the worker or the redirected worker acknowledges
// the task by returning a successful HTTP response code ([`200` -
// `299`]), the task will be removed from the queue. If any other HTTP
// response code is returned or no response is received, the task will
// be retried according to the following: * User-specified throttling:
// retry configuration, rate limits, and the queue's state. * System
// throttling: To prevent the worker from overloading, Cloud Tasks may
// temporarily reduce the queue's effective rate. User-specified
// settings will not be changed. System throttling happens because: *
// Cloud Tasks backs off on all errors. Normally the backoff specified
// in rate limits will be used. But if the worker returns `429` (Too
// Many Requests), `503` (Service Unavailable), or the rate of errors is
// high, Cloud Tasks will use a higher backoff rate. The retry specified
// in the `Retry-After` HTTP response header is considered. * To prevent
// traffic spikes and to smooth sudden increases in traffic, dispatches
// ramp up slowly when the queue is newly created or idle and if large
// numbers of tasks suddenly become available to dispatch (due to spikes
// in create task rates, the queue being unpaused, or many tasks that
// are scheduled at the same time).
type HttpRequest struct {
// Body: HTTP request body. A request body is allowed only if the HTTP
// method is POST, PUT, or PATCH. It is an error to set body on a task
// with an incompatible HttpMethod.
Body string `json:"body,omitempty"`
// Headers: HTTP request headers. This map contains the header field
// names and values. Headers can be set when the task is created. These
// headers represent a subset of the headers that will accompany the
// task's HTTP request. Some HTTP request headers will be ignored or
// replaced. A partial list of headers that will be ignored or replaced
// is: * Host: This will be computed by Cloud Tasks and derived from
// HttpRequest.url. * Content-Length: This will be computed by Cloud
// Tasks. * User-Agent: This will be set to "Google-Cloud-Tasks". *
// X-Google-*: Google use only. * X-AppEngine-*: Google use only.
// `Content-Type` won't be set by Cloud Tasks. You can explicitly set
// `Content-Type` to a media type when the task is created. For example,
// `Content-Type` can be set to "application/octet-stream" or
// "application/json". Headers which can have multiple values
// (according to RFC2616) can be specified using comma-separated values.
// The size of the headers must be less than 80KB.
Headers map[string]string `json:"headers,omitempty"`
// HttpMethod: The HTTP method to use for the request. The default is
// POST.
//
// Possible values:
// "HTTP_METHOD_UNSPECIFIED" - HTTP method unspecified
// "POST" - HTTP POST
// "GET" - HTTP GET
// "HEAD" - HTTP HEAD
// "PUT" - HTTP PUT
// "DELETE" - HTTP DELETE
// "PATCH" - HTTP PATCH
// "OPTIONS" - HTTP OPTIONS
HttpMethod string `json:"httpMethod,omitempty"`
// OauthToken: If specified, an [OAuth
// token](https://developers.google.com/identity/protocols/OAuth2) will
// be generated and attached as an `Authorization` header in the HTTP
// request. This type of authorization should generally only be used
// when calling Google APIs hosted on *.googleapis.com.
OauthToken *OAuthToken `json:"oauthToken,omitempty"`
// OidcToken: If specified, an
// [OIDC](https://developers.google.com/identity/protocols/OpenIDConnect)
// token will be generated and attached as an `Authorization` header in
// the HTTP request. This type of authorization can be used for many
// scenarios, including calling Cloud Run, or endpoints where you intend
// to validate the token yourself.
OidcToken *OidcToken `json:"oidcToken,omitempty"`
// Url: Required. The full url path that the request will be sent to.
// This string must begin with either "http://" or "https://". Some
// examples are: `http://acme.com` and `https://acme.com/sales:8080`.
// Cloud Tasks will encode some characters for safety and compatibility.
// The maximum allowed URL length is 2083 characters after encoding. The
// `Location` header response from a redirect response [`300` - `399`]
// may be followed. The redirect is not counted as a separate attempt.
Url string `json:"url,omitempty"`
// ForceSendFields is a list of field names (e.g. "Body") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Body") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *HttpRequest) MarshalJSON() ([]byte, error) {
type NoMethod HttpRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListLocationsResponse: The response message for
// Locations.ListLocations.
type ListLocationsResponse struct {
// Locations: A list of locations that matches the specified filter in
// the request.
Locations []*Location `json:"locations,omitempty"`
// NextPageToken: The standard List next-page token.
NextPageToken string `json:"nextPageToken,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Locations") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Locations") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListLocationsResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListLocationsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListQueuesResponse: Response message for ListQueues.
type ListQueuesResponse struct {
// NextPageToken: A token to retrieve next page of results. To return
// the next page of results, call ListQueues with this value as the
// page_token. If the next_page_token is empty, there are no more
// results. The page token is valid for only 2 hours.
NextPageToken string `json:"nextPageToken,omitempty"`
// Queues: The list of queues.
Queues []*Queue `json:"queues,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListQueuesResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListQueuesResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// ListTasksResponse: Response message for listing tasks using
// ListTasks.
type ListTasksResponse struct {
// NextPageToken: A token to retrieve next page of results. To return
// the next page of results, call ListTasks with this value as the
// page_token. If the next_page_token is empty, there are no more
// results.
NextPageToken string `json:"nextPageToken,omitempty"`
// Tasks: The list of tasks.
Tasks []*Task `json:"tasks,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "NextPageToken") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "NextPageToken") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *ListTasksResponse) MarshalJSON() ([]byte, error) {
type NoMethod ListTasksResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Location: A resource that represents Google Cloud Platform location.
type Location struct {
// DisplayName: The friendly name for this location, typically a nearby
// city name. For example, "Tokyo".
DisplayName string `json:"displayName,omitempty"`
// Labels: Cross-service attributes for the location. For example
// {"cloud.googleapis.com/region": "us-east1"}
Labels map[string]string `json:"labels,omitempty"`
// LocationId: The canonical id for this location. For example:
// "us-east1".
LocationId string `json:"locationId,omitempty"`
// Metadata: Service-specific metadata. For example the available
// capacity at the given location.
Metadata googleapi.RawMessage `json:"metadata,omitempty"`
// Name: Resource name for the location, which may vary between
// implementations. For example:
// "projects/example-project/locations/us-east1"
Name string `json:"name,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "DisplayName") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "DisplayName") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Location) MarshalJSON() ([]byte, error) {
type NoMethod Location
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OAuthToken: Contains information needed for generating an [OAuth
// token](https://developers.google.com/identity/protocols/OAuth2). This
// type of authorization should generally only be used when calling
// Google APIs hosted on *.googleapis.com.
type OAuthToken struct {
// Scope: OAuth scope to be used for generating OAuth access token. If
// not specified, "https://www.googleapis.com/auth/cloud-platform" will
// be used.
Scope string `json:"scope,omitempty"`
// ServiceAccountEmail: [Service account
// email](https://cloud.google.com/iam/docs/service-accounts) to be used
// for generating OAuth token. The service account must be within the
// same project as the queue. The caller must have
// iam.serviceAccounts.actAs permission for the service account.
ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"`
// ForceSendFields is a list of field names (e.g. "Scope") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Scope") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OAuthToken) MarshalJSON() ([]byte, error) {
type NoMethod OAuthToken
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// OidcToken: Contains information needed for generating an [OpenID
// Connect
// token](https://developers.google.com/identity/protocols/OpenIDConnect)
// . This type of authorization can be used for many scenarios,
// including calling Cloud Run, or endpoints where you intend to
// validate the token yourself.
type OidcToken struct {
// Audience: Audience to be used when generating OIDC token. If not
// specified, the URI specified in target will be used.
Audience string `json:"audience,omitempty"`
// ServiceAccountEmail: [Service account
// email](https://cloud.google.com/iam/docs/service-accounts) to be used
// for generating OIDC token. The service account must be within the
// same project as the queue. The caller must have
// iam.serviceAccounts.actAs permission for the service account.
ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"`
// ForceSendFields is a list of field names (e.g. "Audience") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Audience") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *OidcToken) MarshalJSON() ([]byte, error) {
type NoMethod OidcToken
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PauseQueueRequest: Request message for PauseQueue.
type PauseQueueRequest struct {
}
// Policy: An Identity and Access Management (IAM) policy, which
// specifies access controls for Google Cloud resources. A `Policy` is a
// collection of `bindings`. A `binding` binds one or more `members` to
// a single `role`. Members can be user accounts, service accounts,
// Google groups, and domains (such as G Suite). A `role` is a named
// list of permissions; each `role` can be an IAM predefined role or a
// user-created custom role. For some types of Google Cloud resources, a
// `binding` can also specify a `condition`, which is a logical
// expression that allows access to a resource only if the expression
// evaluates to `true`. A condition can add constraints based on
// attributes of the request, the resource, or both. To learn which
// resources support conditions in their IAM policies, see the [IAM
// documentation](https://cloud.google.com/iam/help/conditions/resource-p
// olicies). **JSON example:** { "bindings": [ { "role":
// "roles/resourcemanager.organizationAdmin", "members": [
// "user:[email protected]", "group:[email protected]",
// "domain:google.com",
// "serviceAccount:[email protected]" ] }, {
// "role": "roles/resourcemanager.organizationViewer", "members": [
// "user:[email protected]" ], "condition": { "title": "expirable access",
// "description": "Does not grant access after Sep 2020", "expression":
// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ],
// "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: -
// members: - user:[email protected] - group:[email protected] -
// domain:google.com -
// serviceAccount:[email protected] role:
// roles/resourcemanager.organizationAdmin - members: -
// user:[email protected] role: roles/resourcemanager.organizationViewer
// condition: title: expirable access description: Does not grant access
// after Sep 2020 expression: request.time <
// timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version:
// 3 For a description of IAM and its features, see the [IAM
// documentation](https://cloud.google.com/iam/docs/).
type Policy struct {
// Bindings: Associates a list of `members` to a `role`. Optionally, may
// specify a `condition` that determines how and when the `bindings` are
// applied. Each of the `bindings` must contain at least one member.
Bindings []*Binding `json:"bindings,omitempty"`
// Etag: `etag` is used for optimistic concurrency control as a way to
// help prevent simultaneous updates of a policy from overwriting each
// other. It is strongly suggested that systems make use of the `etag`
// in the read-modify-write cycle to perform policy updates in order to
// avoid race conditions: An `etag` is returned in the response to
// `getIamPolicy`, and systems are expected to put that etag in the
// request to `setIamPolicy` to ensure that their change will be applied
// to the same version of the policy. **Important:** If you use IAM
// Conditions, you must include the `etag` field whenever you call
// `setIamPolicy`. If you omit this field, then IAM allows you to
// overwrite a version `3` policy with a version `1` policy, and all of
// the conditions in the version `3` policy are lost.
Etag string `json:"etag,omitempty"`
// Version: Specifies the format of the policy. Valid values are `0`,
// `1`, and `3`. Requests that specify an invalid value are rejected.
// Any operation that affects conditional role bindings must specify
// version `3`. This requirement applies to the following operations: *
// Getting a policy that includes a conditional role binding * Adding a
// conditional role binding to a policy * Changing a conditional role
// binding in a policy * Removing any role binding, with or without a
// condition, from a policy that includes conditions **Important:** If
// you use IAM Conditions, you must include the `etag` field whenever
// you call `setIamPolicy`. If you omit this field, then IAM allows you
// to overwrite a version `3` policy with a version `1` policy, and all
// of the conditions in the version `3` policy are lost. If a policy
// does not include any conditions, operations on that policy may
// specify any valid version or leave the field unset. To learn which
// resources support conditions in their IAM policies, see the [IAM
// documentation](https://cloud.google.com/iam/help/conditions/resource-p
// olicies).
Version int64 `json:"version,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Bindings") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Bindings") to include in
// API requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Policy) MarshalJSON() ([]byte, error) {
type NoMethod Policy
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// PurgeQueueRequest: Request message for PurgeQueue.
type PurgeQueueRequest struct {
}
// Queue: A queue is a container of related tasks. Queues are configured
// to manage how those tasks are dispatched. Configurable properties
// include rate limits, retry options, queue types, and others.
type Queue struct {
// AppEngineRoutingOverride: Overrides for task-level
// app_engine_routing. These settings apply only to App Engine tasks in
// this queue. Http tasks are not affected. If set,
// `app_engine_routing_override` is used for all App Engine tasks in the
// queue, no matter what the setting is for the task-level
// app_engine_routing.
AppEngineRoutingOverride *AppEngineRouting `json:"appEngineRoutingOverride,omitempty"`
// Name: Caller-specified and required in CreateQueue, after which it
// becomes output only. The queue name. The queue name must have the
// following format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` *
// `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens
// (-), colons (:), or periods (.). For more information, see
// [Identifying
// projects](https://cloud.google.com/resource-manager/docs/creating-mana
// ging-projects#identifying_projects) * `LOCATION_ID` is the canonical
// ID for the queue's location. The list of available locations can be
// obtained by calling ListLocations. For more information, see
// https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain
// letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum
// length is 100 characters.
Name string `json:"name,omitempty"`
// PurgeTime: Output only. The last time this queue was purged. All
// tasks that were created before this time were purged. A queue can be
// purged using PurgeQueue, the [App Engine Task Queue SDK, or the Cloud
// Console](https://cloud.google.com/appengine/docs/standard/python/taskq
// ueue/push/deleting-tasks-and-queues#purging_all_tasks_from_a_queue).
// Purge time will be truncated to the nearest microsecond. Purge time
// will be unset if the queue has never been purged.
PurgeTime string `json:"purgeTime,omitempty"`
// RateLimits: Rate limits for task dispatches. rate_limits and
// retry_config are related because they both control task attempts.
// However they control task attempts in different ways: * rate_limits
// controls the total rate of dispatches from a queue (i.e. all traffic
// dispatched from the queue, regardless of whether the dispatch is from
// a first attempt or a retry). * retry_config controls what happens to
// particular a task after its first attempt fails. That is,
// retry_config controls task retries (the second attempt, third
// attempt, etc). The queue's actual dispatch rate is the result of: *
// Number of tasks in the queue * User-specified throttling:
// rate_limits, retry_config, and the queue's state. * System throttling
// due to `429` (Too Many Requests) or `503` (Service Unavailable)
// responses from the worker, high error rates, or to smooth sudden
// large traffic spikes.
RateLimits *RateLimits `json:"rateLimits,omitempty"`
// RetryConfig: Settings that determine the retry behavior. * For tasks
// created using Cloud Tasks: the queue-level retry settings apply to
// all tasks in the queue that were created using Cloud Tasks. Retry
// settings cannot be set on individual tasks. * For tasks created using
// the App Engine SDK: the queue-level retry settings apply to all tasks
// in the queue which do not have retry settings explicitly set on the
// task and were created by the App Engine SDK. See [App Engine
// documentation](https://cloud.google.com/appengine/docs/standard/python
// /taskqueue/push/retrying-tasks).
RetryConfig *RetryConfig `json:"retryConfig,omitempty"`
// StackdriverLoggingConfig: Configuration options for writing logs to
// [Stackdriver Logging](https://cloud.google.com/logging/docs/). If
// this field is unset, then no logs are written.
StackdriverLoggingConfig *StackdriverLoggingConfig `json:"stackdriverLoggingConfig,omitempty"`
// State: Output only. The state of the queue. `state` can only be
// changed by called PauseQueue, ResumeQueue, or uploading
// [queue.yaml/xml](https://cloud.google.com/appengine/docs/python/config
// /queueref). UpdateQueue cannot be used to change `state`.
//
// Possible values:
// "STATE_UNSPECIFIED" - Unspecified state.
// "RUNNING" - The queue is running. Tasks can be dispatched. If the
// queue was created using Cloud Tasks and the queue has had no activity
// (method calls or task dispatches) for 30 days, the queue may take a
// few minutes to re-activate. Some method calls may return NOT_FOUND
// and tasks may not be dispatched for a few minutes until the queue has
// been re-activated.
// "PAUSED" - Tasks are paused by the user. If the queue is paused
// then Cloud Tasks will stop delivering tasks from it, but more tasks
// can still be added to it by the user.
// "DISABLED" - The queue is disabled. A queue becomes `DISABLED` when
// [queue.yaml](https://cloud.google.com/appengine/docs/python/config/que
// ueref) or
// [queue.xml](https://cloud.google.com/appengine/docs/standard/java/conf
// ig/queueref) is uploaded which does not contain the queue. You cannot
// directly disable a queue. When a queue is disabled, tasks can still
// be added to a queue but the tasks are not dispatched. To permanently
// delete this queue and all of its tasks, call DeleteQueue.
State string `json:"state,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g.
// "AppEngineRoutingOverride") to unconditionally include in API
// requests. By default, fields with empty values are omitted from API
// requests. However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineRoutingOverride")
// to include in API requests with the JSON null value. By default,
// fields with empty values are omitted from API requests. However, any
// field with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Queue) MarshalJSON() ([]byte, error) {
type NoMethod Queue
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RateLimits: Rate limits. This message determines the maximum rate
// that tasks can be dispatched by a queue, regardless of whether the
// dispatch is a first task attempt or a retry. Note: The debugging
// command, RunTask, will run a task even if the queue has reached its
// RateLimits.
type RateLimits struct {
// MaxBurstSize: Output only. The max burst size. Max burst size limits
// how fast tasks in queue are processed when many tasks are in the
// queue and the rate is high. This field allows the queue to have a
// high rate so processing starts shortly after a task is enqueued, but
// still limits resource usage when many tasks are enqueued in a short
// period of time. The [token
// bucket](https://wikipedia.org/wiki/Token_Bucket) algorithm is used to
// control the rate of task dispatches. Each queue has a token bucket
// that holds tokens, up to the maximum specified by `max_burst_size`.
// Each time a task is dispatched, a token is removed from the bucket.
// Tasks will be dispatched until the queue's bucket runs out of tokens.
// The bucket will be continuously refilled with new tokens based on
// max_dispatches_per_second. Cloud Tasks will pick the value of
// `max_burst_size` based on the value of max_dispatches_per_second. For
// queues that were created or updated using `queue.yaml/xml`,
// `max_burst_size` is equal to
// [bucket_size](https://cloud.google.com/appengine/docs/standard/python/
// config/queueref#bucket_size). Since `max_burst_size` is output only,
// if UpdateQueue is called on a queue created by `queue.yaml/xml`,
// `max_burst_size` will be reset based on the value of
// max_dispatches_per_second, regardless of whether
// max_dispatches_per_second is updated.
MaxBurstSize int64 `json:"maxBurstSize,omitempty"`
// MaxConcurrentDispatches: The maximum number of concurrent tasks that
// Cloud Tasks allows to be dispatched for this queue. After this
// threshold has been reached, Cloud Tasks stops dispatching tasks until
// the number of concurrent requests decreases. If unspecified when the
// queue is created, Cloud Tasks will pick the default. The maximum
// allowed value is 5,000. This field has the same meaning as
// [max_concurrent_requests in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#max_concurrent_requests).
MaxConcurrentDispatches int64 `json:"maxConcurrentDispatches,omitempty"`
// MaxDispatchesPerSecond: The maximum rate at which tasks are
// dispatched from this queue. If unspecified when the queue is created,
// Cloud Tasks will pick the default. * The maximum allowed value is
// 500. This field has the same meaning as [rate in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#rate).
MaxDispatchesPerSecond float64 `json:"maxDispatchesPerSecond,omitempty"`
// ForceSendFields is a list of field names (e.g. "MaxBurstSize") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MaxBurstSize") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RateLimits) MarshalJSON() ([]byte, error) {
type NoMethod RateLimits
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *RateLimits) UnmarshalJSON(data []byte) error {
type NoMethod RateLimits
var s1 struct {
MaxDispatchesPerSecond gensupport.JSONFloat64 `json:"maxDispatchesPerSecond"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.MaxDispatchesPerSecond = float64(s1.MaxDispatchesPerSecond)
return nil
}
// ResumeQueueRequest: Request message for ResumeQueue.
type ResumeQueueRequest struct {
}
// RetryConfig: Retry config. These settings determine when a failed
// task attempt is retried.
type RetryConfig struct {
// MaxAttempts: Number of attempts per task. Cloud Tasks will attempt
// the task `max_attempts` times (that is, if the first attempt fails,
// then there will be `max_attempts - 1` retries). Must be >= -1. If
// unspecified when the queue is created, Cloud Tasks will pick the
// default. -1 indicates unlimited attempts. This field has the same
// meaning as [task_retry_limit in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#retry_parameters).
MaxAttempts int64 `json:"maxAttempts,omitempty"`
// MaxBackoff: A task will be scheduled for retry between min_backoff
// and max_backoff duration after it fails, if the queue's RetryConfig
// specifies that the task should be retried. If unspecified when the
// queue is created, Cloud Tasks will pick the default. `max_backoff`
// will be truncated to the nearest second. This field has the same
// meaning as [max_backoff_seconds in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#retry_parameters).
MaxBackoff string `json:"maxBackoff,omitempty"`
// MaxDoublings: The time between retries will double `max_doublings`
// times. A task's retry interval starts at min_backoff, then doubles
// `max_doublings` times, then increases linearly, and finally retries
// at intervals of max_backoff up to max_attempts times. For example, if
// min_backoff is 10s, max_backoff is 300s, and `max_doublings` is 3,
// then the a task will first be retried in 10s. The retry interval will
// double three times, and then increase linearly by 2^3 * 10s. Finally,
// the task will retry at intervals of max_backoff until the task has
// been attempted max_attempts times. Thus, the requests will retry at
// 10s, 20s, 40s, 80s, 160s, 240s, 300s, 300s, .... If unspecified when
// the queue is created, Cloud Tasks will pick the default. This field
// has the same meaning as [max_doublings in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#retry_parameters).
MaxDoublings int64 `json:"maxDoublings,omitempty"`
// MaxRetryDuration: If positive, `max_retry_duration` specifies the
// time limit for retrying a failed task, measured from when the task
// was first attempted. Once `max_retry_duration` time has passed *and*
// the task has been attempted max_attempts times, no further attempts
// will be made and the task will be deleted. If zero, then the task age
// is unlimited. If unspecified when the queue is created, Cloud Tasks
// will pick the default. `max_retry_duration` will be truncated to the
// nearest second. This field has the same meaning as [task_age_limit in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#retry_parameters).
MaxRetryDuration string `json:"maxRetryDuration,omitempty"`
// MinBackoff: A task will be scheduled for retry between min_backoff
// and max_backoff duration after it fails, if the queue's RetryConfig
// specifies that the task should be retried. If unspecified when the
// queue is created, Cloud Tasks will pick the default. `min_backoff`
// will be truncated to the nearest second. This field has the same
// meaning as [min_backoff_seconds in
// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/pytho
// n/config/queueref#retry_parameters).
MinBackoff string `json:"minBackoff,omitempty"`
// ForceSendFields is a list of field names (e.g. "MaxAttempts") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "MaxAttempts") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RetryConfig) MarshalJSON() ([]byte, error) {
type NoMethod RetryConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// RunTaskRequest: Request message for forcing a task to run now using
// RunTask.
type RunTaskRequest struct {
// ResponseView: The response_view specifies which subset of the Task
// will be returned. By default response_view is BASIC; not all
// information is retrieved by default because some data, such as
// payloads, might be desirable to return only when needed because of
// its large size or because of the sensitivity of data that it
// contains. Authorization for FULL requires `cloudtasks.tasks.fullView`
// [Google IAM](https://cloud.google.com/iam/) permission on the Task
// resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
ResponseView string `json:"responseView,omitempty"`
// ForceSendFields is a list of field names (e.g. "ResponseView") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "ResponseView") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *RunTaskRequest) MarshalJSON() ([]byte, error) {
type NoMethod RunTaskRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// SetIamPolicyRequest: Request message for `SetIamPolicy` method.
type SetIamPolicyRequest struct {
// Policy: REQUIRED: The complete policy to be applied to the
// `resource`. The size of the policy is limited to a few 10s of KB. An
// empty policy is a valid policy but certain Cloud Platform services
// (such as Projects) might reject them.
Policy *Policy `json:"policy,omitempty"`
// ForceSendFields is a list of field names (e.g. "Policy") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Policy") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) {
type NoMethod SetIamPolicyRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// StackdriverLoggingConfig: Configuration options for writing logs to
// [Stackdriver Logging](https://cloud.google.com/logging/docs/).
type StackdriverLoggingConfig struct {
// SamplingRatio: Specifies the fraction of operations to write to
// [Stackdriver Logging](https://cloud.google.com/logging/docs/). This
// field may contain any value between 0.0 and 1.0, inclusive. 0.0 is
// the default and means that no operations are logged.
SamplingRatio float64 `json:"samplingRatio,omitempty"`
// ForceSendFields is a list of field names (e.g. "SamplingRatio") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "SamplingRatio") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *StackdriverLoggingConfig) MarshalJSON() ([]byte, error) {
type NoMethod StackdriverLoggingConfig
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
func (s *StackdriverLoggingConfig) UnmarshalJSON(data []byte) error {
type NoMethod StackdriverLoggingConfig
var s1 struct {
SamplingRatio gensupport.JSONFloat64 `json:"samplingRatio"`
*NoMethod
}
s1.NoMethod = (*NoMethod)(s)
if err := json.Unmarshal(data, &s1); err != nil {
return err
}
s.SamplingRatio = float64(s1.SamplingRatio)
return nil
}
// Status: The `Status` type defines a logical error model that is
// suitable for different programming environments, including REST APIs
// and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each
// `Status` message contains three pieces of data: error code, error
// message, and error details. You can find out more about this error
// model and how to work with it in the [API Design
// Guide](https://cloud.google.com/apis/design/errors).
type Status struct {
// Code: The status code, which should be an enum value of
// google.rpc.Code.
Code int64 `json:"code,omitempty"`
// Details: A list of messages that carry the error details. There is a
// common set of message types for APIs to use.
Details []googleapi.RawMessage `json:"details,omitempty"`
// Message: A developer-facing error message, which should be in
// English. Any user-facing error message should be localized and sent
// in the google.rpc.Status.details field, or localized by the client.
Message string `json:"message,omitempty"`
// ForceSendFields is a list of field names (e.g. "Code") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Code") to include in API
// requests with the JSON null value. By default, fields with empty
// values are omitted from API requests. However, any field with an
// empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *Status) MarshalJSON() ([]byte, error) {
type NoMethod Status
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// Task: A unit of scheduled work.
type Task struct {
// AppEngineHttpRequest: HTTP request that is sent to the App Engine app
// handler. An App Engine task is a task that has AppEngineHttpRequest
// set.
AppEngineHttpRequest *AppEngineHttpRequest `json:"appEngineHttpRequest,omitempty"`
// CreateTime: Output only. The time that the task was created.
// `create_time` will be truncated to the nearest second.
CreateTime string `json:"createTime,omitempty"`
// DispatchCount: Output only. The number of attempts dispatched. This
// count includes attempts which have been dispatched but haven't
// received a response.
DispatchCount int64 `json:"dispatchCount,omitempty"`
// DispatchDeadline: The deadline for requests sent to the worker. If
// the worker does not respond by this deadline then the request is
// cancelled and the attempt is marked as a `DEADLINE_EXCEEDED` failure.
// Cloud Tasks will retry the task according to the RetryConfig. Note
// that when the request is cancelled, Cloud Tasks will stop listening
// for the response, but whether the worker stops processing depends on
// the worker. For example, if the worker is stuck, it may not react to
// cancelled requests. The default and maximum values depend on the type
// of request: * For HTTP tasks, the default is 10 minutes. The deadline
// must be in the interval [15 seconds, 30 minutes]. * For App Engine
// tasks, 0 indicates that the request has the default deadline. The
// default deadline depends on the [scaling
// type](https://cloud.google.com/appengine/docs/standard/go/how-instance
// s-are-managed#instance_scaling) of the service: 10 minutes for
// standard apps with automatic scaling, 24 hours for standard apps with
// manual and basic scaling, and 60 minutes for flex apps. If the
// request deadline is set, it must be in the interval [15 seconds, 24
// hours 15 seconds]. Regardless of the task's `dispatch_deadline`, the
// app handler will not run for longer than than the service's timeout.
// We recommend setting the `dispatch_deadline` to at most a few seconds
// more than the app handler's timeout. For more information see
// [Timeouts](https://cloud.google.com/tasks/docs/creating-appengine-hand
// lers#timeouts). `dispatch_deadline` will be truncated to the nearest
// millisecond. The deadline is an approximate deadline.
DispatchDeadline string `json:"dispatchDeadline,omitempty"`
// FirstAttempt: Output only. The status of the task's first attempt.
// Only dispatch_time will be set. The other Attempt information is not
// retained by Cloud Tasks.
FirstAttempt *Attempt `json:"firstAttempt,omitempty"`
// HttpRequest: HTTP request that is sent to the worker. An HTTP task is
// a task that has HttpRequest set.
HttpRequest *HttpRequest `json:"httpRequest,omitempty"`
// LastAttempt: Output only. The status of the task's last attempt.
LastAttempt *Attempt `json:"lastAttempt,omitempty"`
// Name: Optionally caller-specified in CreateTask. The task name. The
// task name must have the following format:
// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_
// ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]),
// hyphens (-), colons (:), or periods (.). For more information, see
// [Identifying
// projects](https://cloud.google.com/resource-manager/docs/creating-mana
// ging-projects#identifying_projects) * `LOCATION_ID` is the canonical
// ID for the task's location. The list of available locations can be
// obtained by calling ListLocations. For more information, see
// https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain
// letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum
// length is 100 characters. * `TASK_ID` can contain only letters
// ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_). The
// maximum length is 500 characters.
Name string `json:"name,omitempty"`
// ResponseCount: Output only. The number of attempts which have
// received a response.
ResponseCount int64 `json:"responseCount,omitempty"`
// ScheduleTime: The time when the task is scheduled to be attempted or
// retried. `schedule_time` will be truncated to the nearest
// microsecond.
ScheduleTime string `json:"scheduleTime,omitempty"`
// View: Output only. The view specifies which subset of the Task has
// been returned.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
View string `json:"view,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g.
// "AppEngineHttpRequest") to unconditionally include in API requests.
// By default, fields with empty values are omitted from API requests.
// However, any non-pointer, non-interface field appearing in
// ForceSendFields will be sent to the server regardless of whether the
// field is empty or not. This may be used to include empty fields in
// Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "AppEngineHttpRequest") to
// include in API requests with the JSON null value. By default, fields
// with empty values are omitted from API requests. However, any field
// with an empty value appearing in NullFields will be sent to the
// server as null. It is an error if a field in this list has a
// non-empty value. This may be used to include null fields in Patch
// requests.
NullFields []string `json:"-"`
}
func (s *Task) MarshalJSON() ([]byte, error) {
type NoMethod Task
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsRequest: Request message for `TestIamPermissions`
// method.
type TestIamPermissionsRequest struct {
// Permissions: The set of permissions to check for the `resource`.
// Permissions with wildcards (such as '*' or 'storage.*') are not
// allowed. For more information see [IAM
// Overview](https://cloud.google.com/iam/docs/overview#permissions).
Permissions []string `json:"permissions,omitempty"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsRequest
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// TestIamPermissionsResponse: Response message for `TestIamPermissions`
// method.
type TestIamPermissionsResponse struct {
// Permissions: A subset of `TestPermissionsRequest.permissions` that
// the caller is allowed.
Permissions []string `json:"permissions,omitempty"`
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
// ForceSendFields is a list of field names (e.g. "Permissions") to
// unconditionally include in API requests. By default, fields with
// empty values are omitted from API requests. However, any non-pointer,
// non-interface field appearing in ForceSendFields will be sent to the
// server regardless of whether the field is empty or not. This may be
// used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
// NullFields is a list of field names (e.g. "Permissions") to include
// in API requests with the JSON null value. By default, fields with
// empty values are omitted from API requests. However, any field with
// an empty value appearing in NullFields will be sent to the server as
// null. It is an error if a field in this list has a non-empty value.
// This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
type NoMethod TestIamPermissionsResponse
raw := NoMethod(*s)
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
// method id "cloudtasks.projects.locations.get":
type ProjectsLocationsGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets information about a location.
func (r *ProjectsLocationsService) Get(name string) *ProjectsLocationsGetCall {
c := &ProjectsLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsGetCall) Context(ctx context.Context) *ProjectsLocationsGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.get" call.
// Exactly one of *Location or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Location.ServerResponse.Header or (if a response was returned at
// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
// to check whether the returned error was because
// http.StatusNotModified was returned.
func (c *ProjectsLocationsGetCall) Do(opts ...googleapi.CallOption) (*Location, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Location{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets information about a location.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Resource name for the location.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Location"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.list":
type ProjectsLocationsListCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists information about the supported locations for this
// service.
func (r *ProjectsLocationsService) List(name string) *ProjectsLocationsListCall {
c := &ProjectsLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Filter sets the optional parameter "filter": The standard list
// filter.
func (c *ProjectsLocationsListCall) Filter(filter string) *ProjectsLocationsListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": The standard list
// page size.
func (c *ProjectsLocationsListCall) PageSize(pageSize int64) *ProjectsLocationsListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": The standard list
// page token.
func (c *ProjectsLocationsListCall) PageToken(pageToken string) *ProjectsLocationsListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsListCall) Fields(s ...googleapi.Field) *ProjectsLocationsListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsListCall) IfNoneMatch(entityTag string) *ProjectsLocationsListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsListCall) Context(ctx context.Context) *ProjectsLocationsListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}/locations")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.list" call.
// Exactly one of *ListLocationsResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListLocationsResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsListCall) Do(opts ...googleapi.CallOption) (*ListLocationsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListLocationsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists information about the supported locations for this service.",
// "flatPath": "v2/projects/{projectsId}/locations",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.list",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "filter": {
// "description": "The standard list filter.",
// "location": "query",
// "type": "string"
// },
// "name": {
// "description": "The resource that owns the locations collection, if applicable.",
// "location": "path",
// "pattern": "^projects/[^/]+$",
// "required": true,
// "type": "string"
// },
// "pageSize": {
// "description": "The standard list page size.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "The standard list page token.",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+name}/locations",
// "response": {
// "$ref": "ListLocationsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsListCall) Pages(ctx context.Context, f func(*ListLocationsResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudtasks.projects.locations.queues.create":
type ProjectsLocationsQueuesCreateCall struct {
s *Service
parent string
queue *Queue
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a queue. Queues created with this method allow tasks
// to live for a maximum of 31 days. After a task is 31 days old, the
// task will be deleted regardless of whether it was dispatched or not.
// WARNING: Using this method may have unintended side effects if you
// are using an App Engine `queue.yaml` or `queue.xml` file to manage
// your queues. Read [Overview of Queue Management and
// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before
// using this method.
func (r *ProjectsLocationsQueuesService) Create(parent string, queue *Queue) *ProjectsLocationsQueuesCreateCall {
c := &ProjectsLocationsQueuesCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.queue = queue
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesCreateCall) Context(ctx context.Context) *ProjectsLocationsQueuesCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.queue)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/queues")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.create" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesCreateCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a queue. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The location name in which the queue will be created. For example: `projects/PROJECT_ID/locations/LOCATION_ID` The list of allowed locations can be obtained by calling Cloud Tasks' implementation of ListLocations.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/queues",
// "request": {
// "$ref": "Queue"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.delete":
type ProjectsLocationsQueuesDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a queue. This command will delete the queue even if
// it has tasks in it. Note: If you delete a queue, a queue with the
// same name can't be created for 7 days. WARNING: Using this method may
// have unintended side effects if you are using an App Engine
// `queue.yaml` or `queue.xml` file to manage your queues. Read
// [Overview of Queue Management and
// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before
// using this method.
func (r *ProjectsLocationsQueuesService) Delete(name string) *ProjectsLocationsQueuesDeleteCall {
c := &ProjectsLocationsQueuesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesDeleteCall) Context(ctx context.Context) *ProjectsLocationsQueuesDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a queue. This command will delete the queue even if it has tasks in it. Note: If you delete a queue, a queue with the same name can't be created for 7 days. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}",
// "httpMethod": "DELETE",
// "id": "cloudtasks.projects.locations.queues.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.get":
type ProjectsLocationsQueuesGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a queue.
func (r *ProjectsLocationsQueuesService) Get(name string) *ProjectsLocationsQueuesGetCall {
c := &ProjectsLocationsQueuesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesGetCall) Context(ctx context.Context) *ProjectsLocationsQueuesGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.get" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesGetCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a queue.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The resource name of the queue. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.getIamPolicy":
type ProjectsLocationsQueuesGetIamPolicyCall struct {
s *Service
resource string
getiampolicyrequest *GetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// GetIamPolicy: Gets the access control policy for a Queue. Returns an
// empty policy if the resource exists and does not have a policy set.
// Authorization requires the following [Google
// IAM](https://cloud.google.com/iam) permission on the specified
// resource parent: * `cloudtasks.queues.getIamPolicy`
func (r *ProjectsLocationsQueuesService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsLocationsQueuesGetIamPolicyCall {
c := &ProjectsLocationsQueuesGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.getiampolicyrequest = getiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesGetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsQueuesGetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:getIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.getIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets the access control policy for a Queue. Returns an empty policy if the resource exists and does not have a policy set. Authorization requires the following [Google IAM](https://cloud.google.com/iam) permission on the specified resource parent: * `cloudtasks.queues.getIamPolicy`",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:getIamPolicy",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.getIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+resource}:getIamPolicy",
// "request": {
// "$ref": "GetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.list":
type ProjectsLocationsQueuesListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists queues. Queues are returned in lexicographical order.
func (r *ProjectsLocationsQueuesService) List(parent string) *ProjectsLocationsQueuesListCall {
c := &ProjectsLocationsQueuesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// Filter sets the optional parameter "filter": `filter` can be used to
// specify a subset of queues. Any Queue field can be used as a filter
// and several operators as supported. For example: `<=, <, >=, >, !=,
// =, :`. The filter syntax is the same as described in [Stackdriver's
// Advanced Logs
// Filters](https://cloud.google.com/logging/docs/view/advanced_filters).
// Sample filter "state: PAUSED". Note that using filters might cause
// fewer queues than the requested page_size to be returned.
func (c *ProjectsLocationsQueuesListCall) Filter(filter string) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("filter", filter)
return c
}
// PageSize sets the optional parameter "pageSize": Requested page size.
// The maximum page size is 9800. If unspecified, the page size will be
// the maximum. Fewer queues than requested might be returned, even if
// more queues exist; use the next_page_token in the response to
// determine if more queues exist.
func (c *ProjectsLocationsQueuesListCall) PageSize(pageSize int64) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": A token
// identifying the page of results to return. To request the first page
// results, page_token must be empty. To request the next page of
// results, page_token must be the value of next_page_token returned
// from the previous call to ListQueues method. It is an error to switch
// the value of the filter while iterating through pages.
func (c *ProjectsLocationsQueuesListCall) PageToken(pageToken string) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesListCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesListCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesListCall) Context(ctx context.Context) *ProjectsLocationsQueuesListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/queues")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.list" call.
// Exactly one of *ListQueuesResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListQueuesResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsQueuesListCall) Do(opts ...googleapi.CallOption) (*ListQueuesResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListQueuesResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists queues. Queues are returned in lexicographical order.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "filter": {
// "description": "`filter` can be used to specify a subset of queues. Any Queue field can be used as a filter and several operators as supported. For example: `\u003c=, \u003c, \u003e=, \u003e, !=, =, :`. The filter syntax is the same as described in [Stackdriver's Advanced Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). Sample filter \"state: PAUSED\". Note that using filters might cause fewer queues than the requested page_size to be returned.",
// "location": "query",
// "type": "string"
// },
// "pageSize": {
// "description": "Requested page size. The maximum page size is 9800. If unspecified, the page size will be the maximum. Fewer queues than requested might be returned, even if more queues exist; use the next_page_token in the response to determine if more queues exist.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "A token identifying the page of results to return. To request the first page results, page_token must be empty. To request the next page of results, page_token must be the value of next_page_token returned from the previous call to ListQueues method. It is an error to switch the value of the filter while iterating through pages.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The location name. For example: `projects/PROJECT_ID/locations/LOCATION_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/queues",
// "response": {
// "$ref": "ListQueuesResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsQueuesListCall) Pages(ctx context.Context, f func(*ListQueuesResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudtasks.projects.locations.queues.patch":
type ProjectsLocationsQueuesPatchCall struct {
s *Service
name string
queue *Queue
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Patch: Updates a queue. This method creates the queue if it does not
// exist and updates the queue if it does exist. Queues created with
// this method allow tasks to live for a maximum of 31 days. After a
// task is 31 days old, the task will be deleted regardless of whether
// it was dispatched or not. WARNING: Using this method may have
// unintended side effects if you are using an App Engine `queue.yaml`
// or `queue.xml` file to manage your queues. Read [Overview of Queue
// Management and
// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before
// using this method.
func (r *ProjectsLocationsQueuesService) Patch(name string, queue *Queue) *ProjectsLocationsQueuesPatchCall {
c := &ProjectsLocationsQueuesPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.queue = queue
return c
}
// UpdateMask sets the optional parameter "updateMask": A mask used to
// specify which fields of the queue are being updated. If empty, then
// all fields will be updated.
func (c *ProjectsLocationsQueuesPatchCall) UpdateMask(updateMask string) *ProjectsLocationsQueuesPatchCall {
c.urlParams_.Set("updateMask", updateMask)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesPatchCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesPatchCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesPatchCall) Context(ctx context.Context) *ProjectsLocationsQueuesPatchCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesPatchCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.queue)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("PATCH", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.patch" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesPatchCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Updates a queue. This method creates the queue if it does not exist and updates the queue if it does exist. Queues created with this method allow tasks to live for a maximum of 31 days. After a task is 31 days old, the task will be deleted regardless of whether it was dispatched or not. WARNING: Using this method may have unintended side effects if you are using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. Read [Overview of Queue Management and queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using this method.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}",
// "httpMethod": "PATCH",
// "id": "cloudtasks.projects.locations.queues.patch",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Caller-specified and required in CreateQueue, after which it becomes output only. The queue name. The queue name must have the following format: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons (:), or periods (.). For more information, see [Identifying projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) * `LOCATION_ID` is the canonical ID for the queue's location. The list of available locations can be obtained by calling ListLocations. For more information, see https://cloud.google.com/about/locations/. * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or hyphens (-). The maximum length is 100 characters.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// },
// "updateMask": {
// "description": "A mask used to specify which fields of the queue are being updated. If empty, then all fields will be updated.",
// "format": "google-fieldmask",
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "request": {
// "$ref": "Queue"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.pause":
type ProjectsLocationsQueuesPauseCall struct {
s *Service
name string
pausequeuerequest *PauseQueueRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Pause: Pauses the queue. If a queue is paused then the system will
// stop dispatching tasks until the queue is resumed via ResumeQueue.
// Tasks can still be added when the queue is paused. A queue is paused
// if its state is PAUSED.
func (r *ProjectsLocationsQueuesService) Pause(name string, pausequeuerequest *PauseQueueRequest) *ProjectsLocationsQueuesPauseCall {
c := &ProjectsLocationsQueuesPauseCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.pausequeuerequest = pausequeuerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesPauseCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesPauseCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesPauseCall) Context(ctx context.Context) *ProjectsLocationsQueuesPauseCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesPauseCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesPauseCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.pausequeuerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:pause")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.pause" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesPauseCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Pauses the queue. If a queue is paused then the system will stop dispatching tasks until the queue is resumed via ResumeQueue. Tasks can still be added when the queue is paused. A queue is paused if its state is PAUSED.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:pause",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.pause",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:pause",
// "request": {
// "$ref": "PauseQueueRequest"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.purge":
type ProjectsLocationsQueuesPurgeCall struct {
s *Service
name string
purgequeuerequest *PurgeQueueRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Purge: Purges a queue by deleting all of its tasks. All tasks created
// before this method is called are permanently deleted. Purge
// operations can take up to one minute to take effect. Tasks might be
// dispatched before the purge takes effect. A purge is irreversible.
func (r *ProjectsLocationsQueuesService) Purge(name string, purgequeuerequest *PurgeQueueRequest) *ProjectsLocationsQueuesPurgeCall {
c := &ProjectsLocationsQueuesPurgeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.purgequeuerequest = purgequeuerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesPurgeCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesPurgeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesPurgeCall) Context(ctx context.Context) *ProjectsLocationsQueuesPurgeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesPurgeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesPurgeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.purgequeuerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:purge")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.purge" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesPurgeCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Purges a queue by deleting all of its tasks. All tasks created before this method is called are permanently deleted. Purge operations can take up to one minute to take effect. Tasks might be dispatched before the purge takes effect. A purge is irreversible.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:purge",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.purge",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:purge",
// "request": {
// "$ref": "PurgeQueueRequest"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.resume":
type ProjectsLocationsQueuesResumeCall struct {
s *Service
name string
resumequeuerequest *ResumeQueueRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Resume: Resume a queue. This method resumes a queue after it has been
// PAUSED or DISABLED. The state of a queue is stored in the queue's
// state; after calling this method it will be set to RUNNING. WARNING:
// Resuming many high-QPS queues at the same time can lead to target
// overloading. If you are resuming high-QPS queues, follow the 500/50/5
// pattern described in [Managing Cloud Tasks Scaling
// Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).
func (r *ProjectsLocationsQueuesService) Resume(name string, resumequeuerequest *ResumeQueueRequest) *ProjectsLocationsQueuesResumeCall {
c := &ProjectsLocationsQueuesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.resumequeuerequest = resumequeuerequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesResumeCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesResumeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesResumeCall) Context(ctx context.Context) *ProjectsLocationsQueuesResumeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesResumeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesResumeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.resumequeuerequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:resume")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.resume" call.
// Exactly one of *Queue or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Queue.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesResumeCall) Do(opts ...googleapi.CallOption) (*Queue, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Queue{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Resume a queue. This method resumes a queue after it has been PAUSED or DISABLED. The state of a queue is stored in the queue's state; after calling this method it will be set to RUNNING. WARNING: Resuming many high-QPS queues at the same time can lead to target overloading. If you are resuming high-QPS queues, follow the 500/50/5 pattern described in [Managing Cloud Tasks Scaling Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling).",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:resume",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.resume",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:resume",
// "request": {
// "$ref": "ResumeQueueRequest"
// },
// "response": {
// "$ref": "Queue"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.setIamPolicy":
type ProjectsLocationsQueuesSetIamPolicyCall struct {
s *Service
resource string
setiampolicyrequest *SetIamPolicyRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// SetIamPolicy: Sets the access control policy for a Queue. Replaces
// any existing policy. Note: The Cloud Console does not check
// queue-level IAM permissions yet. Project-level permissions are
// required to use the Cloud Console. Authorization requires the
// following [Google IAM](https://cloud.google.com/iam) permission on
// the specified resource parent: * `cloudtasks.queues.setIamPolicy`
func (r *ProjectsLocationsQueuesService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsLocationsQueuesSetIamPolicyCall {
c := &ProjectsLocationsQueuesSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.setiampolicyrequest = setiampolicyrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesSetIamPolicyCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Context(ctx context.Context) *ProjectsLocationsQueuesSetIamPolicyCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:setIamPolicy")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.setIamPolicy" call.
// Exactly one of *Policy or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Policy.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Policy{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Sets the access control policy for a Queue. Replaces any existing policy. Note: The Cloud Console does not check queue-level IAM permissions yet. Project-level permissions are required to use the Cloud Console. Authorization requires the following [Google IAM](https://cloud.google.com/iam) permission on the specified resource parent: * `cloudtasks.queues.setIamPolicy`",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:setIamPolicy",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.setIamPolicy",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+resource}:setIamPolicy",
// "request": {
// "$ref": "SetIamPolicyRequest"
// },
// "response": {
// "$ref": "Policy"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.testIamPermissions":
type ProjectsLocationsQueuesTestIamPermissionsCall struct {
s *Service
resource string
testiampermissionsrequest *TestIamPermissionsRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// TestIamPermissions: Returns permissions that a caller has on a Queue.
// If the resource does not exist, this will return an empty set of
// permissions, not a NOT_FOUND error. Note: This operation is designed
// to be used for building permission-aware UIs and command-line tools,
// not for authorization checking. This operation may "fail open"
// without warning.
func (r *ProjectsLocationsQueuesService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsLocationsQueuesTestIamPermissionsCall {
c := &ProjectsLocationsQueuesTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.resource = resource
c.testiampermissionsrequest = testiampermissionsrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTestIamPermissionsCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Context(ctx context.Context) *ProjectsLocationsQueuesTestIamPermissionsCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+resource}:testIamPermissions")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"resource": c.resource,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.testIamPermissions" call.
// Exactly one of *TestIamPermissionsResponse or error will be non-nil.
// Any non-2xx status code is an error. Response headers are in either
// *TestIamPermissionsResponse.ServerResponse.Header or (if a response
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsQueuesTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &TestIamPermissionsResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Returns permissions that a caller has on a Queue. If the resource does not exist, this will return an empty set of permissions, not a NOT_FOUND error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may \"fail open\" without warning.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}:testIamPermissions",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.testIamPermissions",
// "parameterOrder": [
// "resource"
// ],
// "parameters": {
// "resource": {
// "description": "REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+resource}:testIamPermissions",
// "request": {
// "$ref": "TestIamPermissionsRequest"
// },
// "response": {
// "$ref": "TestIamPermissionsResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.create":
type ProjectsLocationsQueuesTasksCreateCall struct {
s *Service
parent string
createtaskrequest *CreateTaskRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Create: Creates a task and adds it to a queue. Tasks cannot be
// updated after creation; there is no UpdateTask command. * The maximum
// task size is 100KB.
func (r *ProjectsLocationsQueuesTasksService) Create(parent string, createtaskrequest *CreateTaskRequest) *ProjectsLocationsQueuesTasksCreateCall {
c := &ProjectsLocationsQueuesTasksCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
c.createtaskrequest = createtaskrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksCreateCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksCreateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksCreateCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksCreateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksCreateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.createtaskrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/tasks")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.create" call.
// Exactly one of *Task or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Task.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsLocationsQueuesTasksCreateCall) Do(opts ...googleapi.CallOption) (*Task, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Task{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Creates a task and adds it to a queue. Tasks cannot be updated after creation; there is no UpdateTask command. * The maximum task size is 100KB.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.tasks.create",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "parent": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` The queue must already exist.",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+parent}/tasks",
// "request": {
// "$ref": "CreateTaskRequest"
// },
// "response": {
// "$ref": "Task"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.delete":
type ProjectsLocationsQueuesTasksDeleteCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Delete: Deletes a task. A task can be deleted if it is scheduled or
// dispatched. A task cannot be deleted if it has executed successfully
// or permanently failed.
func (r *ProjectsLocationsQueuesTasksService) Delete(name string) *ProjectsLocationsQueuesTasksDeleteCall {
c := &ProjectsLocationsQueuesTasksDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksDeleteCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksDeleteCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("DELETE", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.delete" call.
// Exactly one of *Empty or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Empty.ServerResponse.Header or (if a response was returned at all)
// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
// check whether the returned error was because http.StatusNotModified
// was returned.
func (c *ProjectsLocationsQueuesTasksDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Empty{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Deletes a task. A task can be deleted if it is scheduled or dispatched. A task cannot be deleted if it has executed successfully or permanently failed.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{tasksId}",
// "httpMethod": "DELETE",
// "id": "cloudtasks.projects.locations.queues.tasks.delete",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The task name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+/tasks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Empty"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.get":
type ProjectsLocationsQueuesTasksGetCall struct {
s *Service
name string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// Get: Gets a task.
func (r *ProjectsLocationsQueuesTasksService) Get(name string) *ProjectsLocationsQueuesTasksGetCall {
c := &ProjectsLocationsQueuesTasksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
return c
}
// ResponseView sets the optional parameter "responseView": The
// response_view specifies which subset of the Task will be returned. By
// default response_view is BASIC; not all information is retrieved by
// default because some data, such as payloads, might be desirable to
// return only when needed because of its large size or because of the
// sensitivity of data that it contains. Authorization for FULL requires
// `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Task resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
func (c *ProjectsLocationsQueuesTasksGetCall) ResponseView(responseView string) *ProjectsLocationsQueuesTasksGetCall {
c.urlParams_.Set("responseView", responseView)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksGetCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesTasksGetCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesTasksGetCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksGetCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.get" call.
// Exactly one of *Task or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Task.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsLocationsQueuesTasksGetCall) Do(opts ...googleapi.CallOption) (*Task, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Task{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Gets a task.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{tasksId}",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.tasks.get",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The task name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+/tasks/[^/]+$",
// "required": true,
// "type": "string"
// },
// "responseView": {
// "description": "The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.",
// "enum": [
// "VIEW_UNSPECIFIED",
// "BASIC",
// "FULL"
// ],
// "enumDescriptions": [
// "Unspecified. Defaults to BASIC.",
// "The basic view omits fields which can be large or can contain sensitive data. This view does not include the body in AppEngineHttpRequest. Bodies are desirable to return only when needed, because they can be large and because of the sensitivity of the data that you choose to store in it.",
// "All information is returned. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Queue resource."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+name}",
// "response": {
// "$ref": "Task"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// method id "cloudtasks.projects.locations.queues.tasks.list":
type ProjectsLocationsQueuesTasksListCall struct {
s *Service
parent string
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
header_ http.Header
}
// List: Lists the tasks in a queue. By default, only the BASIC view is
// retrieved due to performance considerations; response_view controls
// the subset of information which is returned. The tasks may be
// returned in any order. The ordering may change at any time.
func (r *ProjectsLocationsQueuesTasksService) List(parent string) *ProjectsLocationsQueuesTasksListCall {
c := &ProjectsLocationsQueuesTasksListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.parent = parent
return c
}
// PageSize sets the optional parameter "pageSize": Maximum page size.
// Fewer tasks than requested might be returned, even if more tasks
// exist; use next_page_token in the response to determine if more tasks
// exist. The maximum page size is 1000. If unspecified, the page size
// will be the maximum.
func (c *ProjectsLocationsQueuesTasksListCall) PageSize(pageSize int64) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
// PageToken sets the optional parameter "pageToken": A token
// identifying the page of results to return. To request the first page
// results, page_token must be empty. To request the next page of
// results, page_token must be the value of next_page_token returned
// from the previous call to ListTasks method. The page token is valid
// for only 2 hours.
func (c *ProjectsLocationsQueuesTasksListCall) PageToken(pageToken string) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
// ResponseView sets the optional parameter "responseView": The
// response_view specifies which subset of the Task will be returned. By
// default response_view is BASIC; not all information is retrieved by
// default because some data, such as payloads, might be desirable to
// return only when needed because of its large size or because of the
// sensitivity of data that it contains. Authorization for FULL requires
// `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Task resource.
//
// Possible values:
// "VIEW_UNSPECIFIED" - Unspecified. Defaults to BASIC.
// "BASIC" - The basic view omits fields which can be large or can
// contain sensitive data. This view does not include the body in
// AppEngineHttpRequest. Bodies are desirable to return only when
// needed, because they can be large and because of the sensitivity of
// the data that you choose to store in it.
// "FULL" - All information is returned. Authorization for FULL
// requires `cloudtasks.tasks.fullView` [Google
// IAM](https://cloud.google.com/iam/) permission on the Queue resource.
func (c *ProjectsLocationsQueuesTasksListCall) ResponseView(responseView string) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("responseView", responseView)
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksListCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// IfNoneMatch sets the optional parameter which makes the operation
// fail if the object's ETag matches the given value. This is useful for
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
func (c *ProjectsLocationsQueuesTasksListCall) IfNoneMatch(entityTag string) *ProjectsLocationsQueuesTasksListCall {
c.ifNoneMatch_ = entityTag
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksListCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
}
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+parent}/tasks")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("GET", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"parent": c.parent,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.list" call.
// Exactly one of *ListTasksResponse or error will be non-nil. Any
// non-2xx status code is an error. Response headers are in either
// *ListTasksResponse.ServerResponse.Header or (if a response was
// returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
func (c *ProjectsLocationsQueuesTasksListCall) Do(opts ...googleapi.CallOption) (*ListTasksResponse, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &ListTasksResponse{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Lists the tasks in a queue. By default, only the BASIC view is retrieved due to performance considerations; response_view controls the subset of information which is returned. The tasks may be returned in any order. The ordering may change at any time.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks",
// "httpMethod": "GET",
// "id": "cloudtasks.projects.locations.queues.tasks.list",
// "parameterOrder": [
// "parent"
// ],
// "parameters": {
// "pageSize": {
// "description": "Maximum page size. Fewer tasks than requested might be returned, even if more tasks exist; use next_page_token in the response to determine if more tasks exist. The maximum page size is 1000. If unspecified, the page size will be the maximum.",
// "format": "int32",
// "location": "query",
// "type": "integer"
// },
// "pageToken": {
// "description": "A token identifying the page of results to return. To request the first page results, page_token must be empty. To request the next page of results, page_token must be the value of next_page_token returned from the previous call to ListTasks method. The page token is valid for only 2 hours.",
// "location": "query",
// "type": "string"
// },
// "parent": {
// "description": "Required. The queue name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+$",
// "required": true,
// "type": "string"
// },
// "responseView": {
// "description": "The response_view specifies which subset of the Task will be returned. By default response_view is BASIC; not all information is retrieved by default because some data, such as payloads, might be desirable to return only when needed because of its large size or because of the sensitivity of data that it contains. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Task resource.",
// "enum": [
// "VIEW_UNSPECIFIED",
// "BASIC",
// "FULL"
// ],
// "enumDescriptions": [
// "Unspecified. Defaults to BASIC.",
// "The basic view omits fields which can be large or can contain sensitive data. This view does not include the body in AppEngineHttpRequest. Bodies are desirable to return only when needed, because they can be large and because of the sensitivity of the data that you choose to store in it.",
// "All information is returned. Authorization for FULL requires `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) permission on the Queue resource."
// ],
// "location": "query",
// "type": "string"
// }
// },
// "path": "v2/{+parent}/tasks",
// "response": {
// "$ref": "ListTasksResponse"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
func (c *ProjectsLocationsQueuesTasksListCall) Pages(ctx context.Context, f func(*ListTasksResponse) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
x, err := c.Do()
if err != nil {
return err
}
if err := f(x); err != nil {
return err
}
if x.NextPageToken == "" {
return nil
}
c.PageToken(x.NextPageToken)
}
}
// method id "cloudtasks.projects.locations.queues.tasks.run":
type ProjectsLocationsQueuesTasksRunCall struct {
s *Service
name string
runtaskrequest *RunTaskRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
header_ http.Header
}
// Run: Forces a task to run now. When this method is called, Cloud
// Tasks will dispatch the task, even if the task is already running,
// the queue has reached its RateLimits or is PAUSED. This command is
// meant to be used for manual debugging. For example, RunTask can be
// used to retry a failed task after a fix has been made or to manually
// force a task to be dispatched now. The dispatched task is returned.
// That is, the task that is returned contains the status after the task
// is dispatched but before the task is received by its target. If Cloud
// Tasks receives a successful response from the task's target, then the
// task will be deleted; otherwise the task's schedule_time will be
// reset to the time that RunTask was called plus the retry delay
// specified in the queue's RetryConfig. RunTask returns NOT_FOUND when
// it is called on a task that has already succeeded or permanently
// failed.
func (r *ProjectsLocationsQueuesTasksService) Run(name string, runtaskrequest *RunTaskRequest) *ProjectsLocationsQueuesTasksRunCall {
c := &ProjectsLocationsQueuesTasksRunCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
c.runtaskrequest = runtaskrequest
return c
}
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
func (c *ProjectsLocationsQueuesTasksRunCall) Fields(s ...googleapi.Field) *ProjectsLocationsQueuesTasksRunCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
func (c *ProjectsLocationsQueuesTasksRunCall) Context(ctx context.Context) *ProjectsLocationsQueuesTasksRunCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
func (c *ProjectsLocationsQueuesTasksRunCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
func (c *ProjectsLocationsQueuesTasksRunCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124")
for k, v := range c.header_ {
reqHeaders[k] = v
}
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.runtaskrequest)
if err != nil {
return nil, err
}
reqHeaders.Set("Content-Type", "application/json")
c.urlParams_.Set("alt", alt)
c.urlParams_.Set("prettyPrint", "false")
urls := googleapi.ResolveRelative(c.s.BasePath, "v2/{+name}:run")
urls += "?" + c.urlParams_.Encode()
req, err := http.NewRequest("POST", urls, body)
if err != nil {
return nil, err
}
req.Header = reqHeaders
googleapi.Expand(req.URL, map[string]string{
"name": c.name,
})
return gensupport.SendRequest(c.ctx_, c.s.client, req)
}
// Do executes the "cloudtasks.projects.locations.queues.tasks.run" call.
// Exactly one of *Task or error will be non-nil. Any non-2xx status
// code is an error. Response headers are in either
// *Task.ServerResponse.Header or (if a response was returned at all) in
// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check
// whether the returned error was because http.StatusNotModified was
// returned.
func (c *ProjectsLocationsQueuesTasksRunCall) Do(opts ...googleapi.CallOption) (*Task, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
if res.Body != nil {
res.Body.Close()
}
return nil, &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
}
}
if err != nil {
return nil, err
}
defer googleapi.CloseBody(res)
if err := googleapi.CheckResponse(res); err != nil {
return nil, err
}
ret := &Task{
ServerResponse: googleapi.ServerResponse{
Header: res.Header,
HTTPStatusCode: res.StatusCode,
},
}
target := &ret
if err := gensupport.DecodeResponse(target, res); err != nil {
return nil, err
}
return ret, nil
// {
// "description": "Forces a task to run now. When this method is called, Cloud Tasks will dispatch the task, even if the task is already running, the queue has reached its RateLimits or is PAUSED. This command is meant to be used for manual debugging. For example, RunTask can be used to retry a failed task after a fix has been made or to manually force a task to be dispatched now. The dispatched task is returned. That is, the task that is returned contains the status after the task is dispatched but before the task is received by its target. If Cloud Tasks receives a successful response from the task's target, then the task will be deleted; otherwise the task's schedule_time will be reset to the time that RunTask was called plus the retry delay specified in the queue's RetryConfig. RunTask returns NOT_FOUND when it is called on a task that has already succeeded or permanently failed.",
// "flatPath": "v2/projects/{projectsId}/locations/{locationsId}/queues/{queuesId}/tasks/{tasksId}:run",
// "httpMethod": "POST",
// "id": "cloudtasks.projects.locations.queues.tasks.run",
// "parameterOrder": [
// "name"
// ],
// "parameters": {
// "name": {
// "description": "Required. The task name. For example: `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`",
// "location": "path",
// "pattern": "^projects/[^/]+/locations/[^/]+/queues/[^/]+/tasks/[^/]+$",
// "required": true,
// "type": "string"
// }
// },
// "path": "v2/{+name}:run",
// "request": {
// "$ref": "RunTaskRequest"
// },
// "response": {
// "$ref": "Task"
// },
// "scopes": [
// "https://www.googleapis.com/auth/cloud-platform"
// ]
// }
}
| {
rs := &ProjectsLocationsQueuesTasksService{s: s}
return rs
} |
api_op_ListOriginationNumbers.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package sns
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sns/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Lists the calling account's dedicated origination numbers and their metadata.
// For more information about origination numbers, see Origination numbers
// (https://docs.aws.amazon.com/sns/latest/dg/channels-sms-originating-identities-origination-numbers.html)
// in the Amazon SNS Developer Guide.
func (c *Client) ListOriginationNumbers(ctx context.Context, params *ListOriginationNumbersInput, optFns ...func(*Options)) (*ListOriginationNumbersOutput, error) {
if params == nil {
params = &ListOriginationNumbersInput{}
}
result, metadata, err := c.invokeOperation(ctx, "ListOriginationNumbers", params, optFns, c.addOperationListOriginationNumbersMiddlewares)
if err != nil {
return nil, err
}
out := result.(*ListOriginationNumbersOutput)
out.ResultMetadata = metadata
return out, nil
}
type ListOriginationNumbersInput struct {
// The maximum number of origination numbers to return.
MaxResults *int32
// Token that the previous ListOriginationNumbers request returns.
NextToken *string
}
type ListOriginationNumbersOutput struct {
// A NextToken string is returned when you call the ListOriginationNumbers
// operation if additional pages of records are available.
NextToken *string
// A list of the calling account's verified and pending origination numbers.
PhoneNumbers []types.PhoneNumberInformation
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func (c *Client) addOperationListOriginationNumbersMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsquery_serializeOpListOriginationNumbers{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsquery_deserializeOpListOriginationNumbers{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListOriginationNumbers(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
// ListOriginationNumbersAPIClient is a client that implements the
// ListOriginationNumbers operation.
type ListOriginationNumbersAPIClient interface {
ListOriginationNumbers(context.Context, *ListOriginationNumbersInput, ...func(*Options)) (*ListOriginationNumbersOutput, error)
}
var _ ListOriginationNumbersAPIClient = (*Client)(nil)
// ListOriginationNumbersPaginatorOptions is the paginator options for
// ListOriginationNumbers
type ListOriginationNumbersPaginatorOptions struct {
// The maximum number of origination numbers to return.
Limit int32
// Set to true if pagination should stop if the service returns a pagination token
// that matches the most recent token provided to the service.
StopOnDuplicateToken bool
}
// ListOriginationNumbersPaginator is a paginator for ListOriginationNumbers
type ListOriginationNumbersPaginator struct {
options ListOriginationNumbersPaginatorOptions
client ListOriginationNumbersAPIClient
params *ListOriginationNumbersInput
nextToken *string
firstPage bool
}
// NewListOriginationNumbersPaginator returns a new ListOriginationNumbersPaginator
func NewListOriginationNumbersPaginator(client ListOriginationNumbersAPIClient, params *ListOriginationNumbersInput, optFns ...func(*ListOriginationNumbersPaginatorOptions)) *ListOriginationNumbersPaginator {
if params == nil {
params = &ListOriginationNumbersInput{}
}
options := ListOriginationNumbersPaginatorOptions{}
if params.MaxResults != nil {
options.Limit = *params.MaxResults
}
for _, fn := range optFns {
fn(&options)
}
return &ListOriginationNumbersPaginator{
options: options,
client: client,
params: params,
firstPage: true,
}
}
// HasMorePages returns a boolean indicating whether more pages are available
func (p *ListOriginationNumbersPaginator) HasMorePages() bool {
return p.firstPage || p.nextToken != nil
}
// NextPage retrieves the next ListOriginationNumbers page.
func (p *ListOriginationNumbersPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListOriginationNumbersOutput, error) {
if !p.HasMorePages() {
return nil, fmt.Errorf("no more pages available")
}
params := *p.params
params.NextToken = p.nextToken
var limit *int32
if p.options.Limit > 0 {
limit = &p.options.Limit
}
params.MaxResults = limit
result, err := p.client.ListOriginationNumbers(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
}
p.firstPage = false
prevToken := p.nextToken
p.nextToken = result.NextToken
if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken {
p.nextToken = nil
}
return result, nil
}
func | (region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "sns",
OperationName: "ListOriginationNumbers",
}
}
| newServiceMetadataMiddleware_opListOriginationNumbers |
manageddatabasevulnerabilityassessments.go | package sql
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// ManagedDatabaseVulnerabilityAssessmentsClient is the the Azure SQL Database management API provides a RESTful set of
// web services that interact with Azure SQL Database services to manage your databases. The API enables you to create,
// retrieve, update, and delete databases.
type ManagedDatabaseVulnerabilityAssessmentsClient struct {
BaseClient
}
// NewManagedDatabaseVulnerabilityAssessmentsClient creates an instance of the
// ManagedDatabaseVulnerabilityAssessmentsClient client.
func | (subscriptionID string) ManagedDatabaseVulnerabilityAssessmentsClient {
return NewManagedDatabaseVulnerabilityAssessmentsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewManagedDatabaseVulnerabilityAssessmentsClientWithBaseURI creates an instance of the
// ManagedDatabaseVulnerabilityAssessmentsClient client using a custom endpoint. Use this when interacting with an
// Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
func NewManagedDatabaseVulnerabilityAssessmentsClientWithBaseURI(baseURI string, subscriptionID string) ManagedDatabaseVulnerabilityAssessmentsClient {
return ManagedDatabaseVulnerabilityAssessmentsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates the database's vulnerability assessment.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database for which the vulnerability assessment is defined.
// parameters - the requested resource.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, parameters DatabaseVulnerabilityAssessment) (result DatabaseVulnerabilityAssessment, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseVulnerabilityAssessmentsClient.CreateOrUpdate")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, managedInstanceName, databaseName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "CreateOrUpdate", resp, "Failure responding to request")
return
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string, parameters DatabaseVulnerabilityAssessment) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vulnerabilityAssessmentName": autorest.Encode("path", "default"),
}
const APIVersion = "2017-10-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) CreateOrUpdateResponder(resp *http.Response) (result DatabaseVulnerabilityAssessment, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete removes the database's vulnerability assessment.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database for which the vulnerability assessment is defined.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) Delete(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (result autorest.Response, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseVulnerabilityAssessmentsClient.Delete")
defer func() {
sc := -1
if result.Response != nil {
sc = result.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DeletePreparer(ctx, resourceGroupName, managedInstanceName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "Delete", resp, "Failure responding to request")
return
}
return
}
// DeletePreparer prepares the Delete request.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) DeletePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vulnerabilityAssessmentName": autorest.Encode("path", "default"),
}
const APIVersion = "2017-10-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) DeleteSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the database's vulnerability assessment.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database for which the vulnerability assessment is defined.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) Get(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (result DatabaseVulnerabilityAssessment, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseVulnerabilityAssessmentsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, managedInstanceName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "Get", resp, "Failure responding to request")
return
}
return
}
// GetPreparer prepares the Get request.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) GetPreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"vulnerabilityAssessmentName": autorest.Encode("path", "default"),
}
const APIVersion = "2017-10-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) GetResponder(resp *http.Response) (result DatabaseVulnerabilityAssessment, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListByDatabase lists the vulnerability assessments of a managed database.
// Parameters:
// resourceGroupName - the name of the resource group that contains the resource. You can obtain this value
// from the Azure Resource Manager API or the portal.
// managedInstanceName - the name of the managed instance.
// databaseName - the name of the database for which the vulnerability assessment is defined.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) ListByDatabase(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (result DatabaseVulnerabilityAssessmentListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseVulnerabilityAssessmentsClient.ListByDatabase")
defer func() {
sc := -1
if result.dvalr.Response.Response != nil {
sc = result.dvalr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listByDatabaseNextResults
req, err := client.ListByDatabasePreparer(ctx, resourceGroupName, managedInstanceName, databaseName)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "ListByDatabase", nil, "Failure preparing request")
return
}
resp, err := client.ListByDatabaseSender(req)
if err != nil {
result.dvalr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "ListByDatabase", resp, "Failure sending request")
return
}
result.dvalr, err = client.ListByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "ListByDatabase", resp, "Failure responding to request")
return
}
if result.dvalr.hasNextLink() && result.dvalr.IsEmpty() {
err = result.NextWithContext(ctx)
}
return
}
// ListByDatabasePreparer prepares the ListByDatabase request.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) ListByDatabasePreparer(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"databaseName": autorest.Encode("path", databaseName),
"managedInstanceName": autorest.Encode("path", managedInstanceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-10-01-preview"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/vulnerabilityAssessments", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListByDatabaseSender sends the ListByDatabase request. The method will close the
// http.Response Body if it receives an error.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) ListByDatabaseSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListByDatabaseResponder handles the response to the ListByDatabase request. The method always
// closes the http.Response Body.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) ListByDatabaseResponder(resp *http.Response) (result DatabaseVulnerabilityAssessmentListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listByDatabaseNextResults retrieves the next set of results, if any.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) listByDatabaseNextResults(ctx context.Context, lastResults DatabaseVulnerabilityAssessmentListResult) (result DatabaseVulnerabilityAssessmentListResult, err error) {
req, err := lastResults.databaseVulnerabilityAssessmentListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "listByDatabaseNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListByDatabaseSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "listByDatabaseNextResults", resp, "Failure sending next results request")
}
result, err = client.ListByDatabaseResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "sql.ManagedDatabaseVulnerabilityAssessmentsClient", "listByDatabaseNextResults", resp, "Failure responding to next results request")
return
}
return
}
// ListByDatabaseComplete enumerates all values, automatically crossing page boundaries as required.
func (client ManagedDatabaseVulnerabilityAssessmentsClient) ListByDatabaseComplete(ctx context.Context, resourceGroupName string, managedInstanceName string, databaseName string) (result DatabaseVulnerabilityAssessmentListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ManagedDatabaseVulnerabilityAssessmentsClient.ListByDatabase")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.ListByDatabase(ctx, resourceGroupName, managedInstanceName, databaseName)
return
}
| NewManagedDatabaseVulnerabilityAssessmentsClient |
setItem.go | package main
import (
"context"
"encoding/json"
"time"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/lestrrat-go/strftime"
utils "github.com/seike460/utakata/src"
)
func handleRequest(ctx context.Context, request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
f, _ := strftime.New("%Y%m%d%H%M%S")
id := f.FormatString(time.Now())
svc := dynamodb.New(session.New())
// JsonをMapに変換
byt := []byte(request.Body)
var bodyMap map[string]interface{}
if err := json.Unmarshal(byt, &bodyMap); err != nil {
panic(err)
}
input := &dynamodb.PutItemInput{
Item: map[string]*dynamodb.AttributeValue{
"id": {
N: aws.String(id),
},
"name": {
S: aws.String(bodyMap["name"].(string)),
},
"dateTime": {
S: aws.String(bodyMap["dateTime"].(string)),
},
},
TableName: aws.String("tasks"),
}
_, err := svc.PutItem(input)
if err != nil {
util | n events.APIGatewayProxyResponse{Body: request.Body, StatusCode: 200}, nil
}
func main() {
lambda.Start(handleRequest)
}
| s.AwsErrorPrint(err)
}
retur |
util.py | # -*- encoding: utf-8 -*-
"""
Flask Boilerplate
Author: AppSeed.us - App Generator
"""
from flask import json
from app import app, db
from .common import *
# build a Json response
def response(data):
return app.response_class(response=json.dumps(data),
status=200,
mimetype='application/json')
def g_db_commit():
db.session.commit()
def g_db_add(obj):
|
def g_db_del(obj):
if obj:
db.session.delete(obj)
| if obj:
db.session.add(obj) |
errors.ts |
export function isError(value: any): value is Error {
return (value as Error).errorMessage !== undefined;
} | export interface Error {
errorMessage: string;
} |
|
__init__.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator. |
from ._device_update import DeviceUpdate
__all__ = ['DeviceUpdate'] | # Changes may cause incorrect behavior and will be lost if the code is regenerated.
# -------------------------------------------------------------------------- |
requests.js | function Vote(amount, requestid) {
if (typeof amount == 'undefined') {
amount = parseInt($('#amount').raw().value);
}
if (amount == 0) {
amount = 20 * 1024 * 1024;
}
var index;
var votecount;
if (!requestid) {
requestid = $('#requestid').raw().value;
votecount = $('#votecount').raw();
index = false;
} else {
votecount = $('#vote_count_' + requestid).raw();
bounty = $('#bounty_' + requestid).raw();
index = true;
}
if (amount > 20 * 1024 * 1024) {
upload = $('#current_uploaded').raw().value;
download = $('#current_downloaded').raw().value;
rr = $('#current_rr').raw().value;
if (amount > 0.3 * (upload - rr * download)) {
if (!confirm('This vote is more than 30% of your buffer. Please confirm that you wish to place this large of a vote.')) {
return false;
}
}
}
ajax.get('requests.php?action=takevote&id=' + requestid + '&auth=' + authkey + '&amount=' + amount, function (response) {
if (response == 'bankrupt') {
error_message("You do not have sufficient upload credit to add " + get_size(amount) + " to this request");
return;
} else if (response == 'missing') {
error_message("Cannot find this request");
return;
} else if (response == 'filled') {
error_message("This request has already been filled");
return;
} else if (response == 'success') {
votecount.innerHTML = (parseInt(votecount.innerHTML)) + 1;
}
if ($('#total_bounty').results() > 0) {
totalBounty = parseInt($('#total_bounty').raw().value);
totalBounty += (amount * (1 - $('#request_tax').raw().value));
var requestTax = $('#request_tax').raw().value;
$('#total_bounty').raw().value = totalBounty;
$('#formatted_bounty').raw().innerHTML = get_size(totalBounty);
if (requestTax > 0) {
save_message("Your vote of " + get_size(amount) + ", adding a " + get_size(amount * (1 - $('#request_tax').raw().value)) + " bounty, has been added");
} else {
save_message("Your vote of " + get_size(amount) + " has been added");
}
$('#button').raw().disabled = true;
} else {
save_message("Your vote of " + get_size(amount) + " has been added");
}
});
}
function Calculate() {
var mul = (($('#unit').raw().options[$('#unit').raw().selectedIndex].value == 'mb') ? (1024*1024) : (1024*1024*1024));
var amt = Math.floor($('#amount_box').raw().value * mul);
if (amt > $('#current_uploaded').raw().value) {
$('#new_uploaded').raw().innerHTML = "You can't afford that request!";
$('#new_bounty').raw().innerHTML = "0.00 MB";
$('#bounty_after_tax').raw().innerHTML = "0.00 MB";
$('#button').raw().disabled = true;
} else if (isNaN($('#amount_box').raw().value)
|| (window.location.search.indexOf('action=new') != -1 && $('#amount_box').raw().value * mul < 100 * 1024 * 1024)
|| (window.location.search.indexOf('action=view') != -1 && $('#amount_box').raw().value * mul < 20 * 1024 * 1024)) {
$('#new_uploaded').raw().innerHTML = get_size(($('#current_uploaded').raw().value));
$('#new_bounty').raw().innerHTML = "0.00 MB";
$('#bounty_after_tax').raw().innerHTML = "0.00 MB";
$('#button').raw().disabled = true;
} else {
$('#button').raw().disabled = false;
$('#amount').raw().value = amt;
$('#new_uploaded').raw().innerHTML = get_size(($('#current_uploaded').raw().value) - amt);
$('#new_ratio').raw().innerHTML = ratio($('#current_uploaded').raw().value - amt, $('#current_downloaded').raw().value);
$('#new_bounty').raw().innerHTML = get_size(mul * $('#amount_box').raw().value);
$('#bounty_after_tax').raw().innerHTML = get_size(mul * (1 - $('#request_tax').raw().value) * $('#amount_box').raw().value);
}
}
function AddArtistField() {
var ArtistCount = document.getElementsByName("artists[]").length;
if (ArtistCount >= 200) {
return;
}
var ArtistField = document.createElement("input");
ArtistField.type = "text";
ArtistField.id = "artist_" + ArtistCount;
ArtistField.name = "artists[]";
ArtistField.size = 45;
ArtistField.onblur = CheckVA;
var ImportanceField = document.createElement("select");
ImportanceField.id = "importance";
ImportanceField.name = "importance[]";
ImportanceField.options[0] = new Option("Main", "1");
ImportanceField.options[1] = new Option("Guest", "2");
ImportanceField.options[2] = new Option("Composer", "4");
ImportanceField.options[3] = new Option("Conductor", "5");
ImportanceField.options[4] = new Option("DJ / Compiler", "6");
ImportanceField.options[5] = new Option("Remixer", "3");
ImportanceField.options[6] = new Option("Producer", "7");
var x = $('#artistfields').raw();
x.appendChild(document.createElement("br"));
x.appendChild(ArtistField);
x.appendChild(document.createTextNode('\n'));
x.appendChild(ImportanceField);
if ($("#artist_0").data("gazelle-autocomplete")) {
$(ArtistField).live('focus', function() {
$(ArtistField).autocomplete({
serviceUrl : 'artist.php?action=autocomplete'
});
});
}
}
function CheckVA () {
var ArtistCount = document.getElementsByName("artists[]").length;
var shown = false;
for (var i = 0; i < ArtistCount; i++) {
var artistId = "#artist_" + i;
if ($(artistId).raw().value.toLowerCase().trim().match(/^(va|various(\sa|a)rtis(t|ts)|various)$/)) {
$('#vawarning').gshow();
shown = true;
break;
}
}
if (!shown) {
$('#vawarning').ghide();
}
}
function RemoveArtistField() {
var ArtistCount = document.getElementsByName("artists[]").length;
if (ArtistCount === 1) {
return;
}
var x = $('#artistfields').raw();
while (x.lastChild.tagName !== "INPUT") {
x.removeChild(x.lastChild);
}
x.removeChild(x.lastChild);
x.removeChild(x.lastChild); //Remove trailing new line.
ArtistCount--;
}
function Categories() {
var cat = $('#categories').raw().options[$('#categories').raw().selectedIndex].value;
if (cat == "Music") {
$('#artist_tr').gshow();
$('#releasetypes_tr').gshow();
$('#formats_tr').gshow();
$('#bitrates_tr').gshow();
$('#media_tr').gshow();
ToggleLogCue();
$('#year_tr').gshow();
$('#cataloguenumber_tr').gshow();
} else if (cat == "Audiobooks" || cat == "Comedy") {
$('#year_tr').gshow();
$('#artist_tr').ghide();
$('#releasetypes_tr').ghide();
$('#formats_tr').ghide();
$('#bitrates_tr').ghide();
$('#media_tr').ghide();
$('#logcue_tr').ghide();
$('#cataloguenumber_tr').ghide();
} else {
$('#artist_tr').ghide();
$('#releasetypes_tr').ghide();
$('#formats_tr').ghide();
$('#bitrates_tr').ghide();
$('#media_tr').ghide();
$('#logcue_tr').ghide();
$('#year_tr').ghide();
$('#cataloguenumber_tr').ghide();
}
}
function add_tag() {
if ($('#tags').raw().value == "") {
$('#tags').raw().value = $('#genre_tags').raw().options[$('#genre_tags').raw().selectedIndex].value;
} else if ($('#genre_tags').raw().options[$('#genre_tags').raw().selectedIndex].value == "---") {
} else {
$('#tags').raw().value = $('#tags').raw().value + ", " + $('#genre_tags').raw().options[$('#genre_tags').raw().selectedIndex].value;
}
}
function T | id, disable) {
var arr = document.getElementsByName(id + '[]');
var master = $('#toggle_' + id).raw().checked;
for (var x in arr) {
arr[x].checked = master;
if (disable == 1) {
arr[x].disabled = master;
}
}
if (id == "formats") {
ToggleLogCue();
}
}
function ToggleLogCue() {
var formats = document.getElementsByName('formats[]');
var flac = false;
if (formats[1].checked) {
flac = true;
}
if (flac) {
$('#logcue_tr').gshow();
} else {
$('#logcue_tr').ghide();
}
ToggleLogScore();
}
function ToggleLogScore() {
if ($('#needlog').raw().checked) {
$('#minlogscore_span').gshow();
} else {
$('#minlogscore_span').ghide();
}
}
document.addEventListener('DOMContentLoaded', function() {
document.getElementById('amount_box').addEventListener('input', function() {
Calculate();
});
Calculate();
});
| oggle( |
tags.go | package aws
import (
"fmt"
"log"
"regexp"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elbv2"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
// tagsSchema returns the schema to use for tags.
//
func tagsSchema() *schema.Schema {
return &schema.Schema{
Type: schema.TypeMap,
Optional: true,
}
}
func tagsSchemaComputed() *schema.Schema {
return &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Computed: true,
}
}
func setElbV2Tags(conn *elbv2.ELBV2, d *schema.ResourceData) error {
if d.HasChange("tags") {
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffElbV2Tags(tagsFromMapELBv2(o), tagsFromMapELBv2(n))
// Set tags
if len(remove) > 0 {
var tagKeys []*string
for _, tag := range remove {
tagKeys = append(tagKeys, tag.Key)
}
log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id())
_, err := conn.RemoveTags(&elbv2.RemoveTagsInput{
ResourceArns: []*string{aws.String(d.Id())},
TagKeys: tagKeys,
})
if err != nil {
return err
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
_, err := conn.AddTags(&elbv2.AddTagsInput{
ResourceArns: []*string{aws.String(d.Id())},
Tags: create,
})
if err != nil {
return err
}
}
}
return nil
}
func setVolumeTags(conn *ec2.EC2, d *schema.ResourceData) error {
if d.HasChange("volume_tags") {
oraw, nraw := d.GetChange("volume_tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTags(tagsFromMap(o), tagsFromMap(n))
volumeIds, err := getAwsInstanceVolumeIds(conn, d)
if err != nil {
return err
}
if len(remove) > 0 {
err := resource.Retry(2*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Removing volume tags: %#v from %s", remove, d.Id())
_, err := conn.DeleteTags(&ec2.DeleteTagsInput{
Resources: volumeIds,
Tags: remove,
})
if err != nil {
ec2err, ok := err.(awserr.Error)
if ok && strings.Contains(ec2err.Code(), ".NotFound") {
return resource.RetryableError(err) // retry
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
// Retry without time bounds for EC2 throttling
if isResourceTimeoutError(err) {
log.Printf("[DEBUG] Removing volume tags: %#v from %s", remove, d.Id())
_, err := conn.DeleteTags(&ec2.DeleteTagsInput{
Resources: volumeIds,
Tags: remove,
})
if err != nil {
return err
}
} else {
return err
}
}
}
if len(create) > 0 {
err := resource.Retry(2*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Creating vol tags: %s for %s", create, d.Id())
_, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: volumeIds,
Tags: create,
})
if err != nil |
return nil
})
if err != nil {
// Retry without time bounds for EC2 throttling
if isResourceTimeoutError(err) {
log.Printf("[DEBUG] Creating vol tags: %s for %s", create, d.Id())
_, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: volumeIds,
Tags: create,
})
if err != nil {
return err
}
} else {
return err
}
}
}
}
return nil
}
// setTags is a helper to set the tags for a resource. It expects the
// tags field to be named "tags"
func setTags(conn *ec2.EC2, d *schema.ResourceData) error {
if d.HasChange("tags") {
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTags(tagsFromMap(o), tagsFromMap(n))
// Set tags
if len(remove) > 0 {
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id())
_, err := conn.DeleteTags(&ec2.DeleteTagsInput{
Resources: []*string{aws.String(d.Id())},
Tags: remove,
})
if err != nil {
ec2err, ok := err.(awserr.Error)
if ok && strings.Contains(ec2err.Code(), ".NotFound") {
return resource.RetryableError(err) // retry
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
// Retry without time bounds for EC2 throttling
if isResourceTimeoutError(err) {
log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id())
_, err := conn.DeleteTags(&ec2.DeleteTagsInput{
Resources: []*string{aws.String(d.Id())},
Tags: remove,
})
if err != nil {
return err
}
} else {
return err
}
}
}
if len(create) > 0 {
err := resource.Retry(5*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
_, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: []*string{aws.String(d.Id())},
Tags: create,
})
if err != nil {
ec2err, ok := err.(awserr.Error)
if ok && strings.Contains(ec2err.Code(), ".NotFound") {
return resource.RetryableError(err) // retry
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
// Retry without time bounds for EC2 throttling
if isResourceTimeoutError(err) {
log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
_, err := conn.CreateTags(&ec2.CreateTagsInput{
Resources: []*string{aws.String(d.Id())},
Tags: create,
})
if err != nil {
return err
}
} else {
return err
}
}
}
}
return nil
}
// diffTags takes our tags locally and the ones remotely and returns
// the set of tags that must be created, and the set of tags that must
// be destroyed.
func diffTags(oldTags, newTags []*ec2.Tag) ([]*ec2.Tag, []*ec2.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
create[aws.StringValue(t.Key)] = aws.StringValue(t.Value)
}
// Build the list of what to remove
var remove []*ec2.Tag
for _, t := range oldTags {
old, ok := create[aws.StringValue(t.Key)]
if !ok || old != aws.StringValue(t.Value) {
remove = append(remove, t)
} else if ok {
// already present so remove from new
delete(create, aws.StringValue(t.Key))
}
}
return tagsFromMap(create), remove
}
// tagsFromMap returns the tags for the given map of data.
func tagsFromMap(m map[string]interface{}) []*ec2.Tag {
result := make([]*ec2.Tag, 0, len(m))
for k, v := range m {
t := &ec2.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
}
if !tagIgnored(t) {
result = append(result, t)
}
}
return result
}
// tagsToMap turns the list of tags into a map.
func tagsToMap(ts []*ec2.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
if !tagIgnored(t) {
result[*t.Key] = *t.Value
}
}
return result
}
func diffElbV2Tags(oldTags, newTags []*elbv2.Tag) ([]*elbv2.Tag, []*elbv2.Tag) {
// First, we're creating everything we have
create := make(map[string]interface{})
for _, t := range newTags {
create[*t.Key] = *t.Value
}
// Build the list of what to remove
var remove []*elbv2.Tag
for _, t := range oldTags {
old, ok := create[*t.Key]
if !ok || old != *t.Value {
// Delete it!
remove = append(remove, t)
}
}
return tagsFromMapELBv2(create), remove
}
// tagsToMapELBv2 turns the list of tags into a map.
func tagsToMapELBv2(ts []*elbv2.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
if !tagIgnoredELBv2(t) {
result[*t.Key] = *t.Value
}
}
return result
}
// tagsFromMapELBv2 returns the tags for the given map of data.
func tagsFromMapELBv2(m map[string]interface{}) []*elbv2.Tag {
var result []*elbv2.Tag
for k, v := range m {
t := &elbv2.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
}
if !tagIgnoredELBv2(t) {
result = append(result, t)
}
}
return result
}
// tagIgnored compares a tag against a list of strings and checks if it should
// be ignored or not
func tagIgnored(t *ec2.Tag) bool {
filter := []string{"^aws:"}
for _, v := range filter {
log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key)
if r, _ := regexp.MatchString(v, *t.Key); r == true {
log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value)
return true
}
}
return false
}
// and for ELBv2 as well
func tagIgnoredELBv2(t *elbv2.Tag) bool {
filter := []string{"^aws:"}
for _, v := range filter {
log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key)
if r, _ := regexp.MatchString(v, *t.Key); r == true {
log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value)
return true
}
}
return false
}
// tagsToMapDynamoDb turns the list of tags into a map for dynamoDB
func tagsToMapDynamoDb(ts []*dynamodb.Tag) map[string]string {
result := make(map[string]string)
for _, t := range ts {
result[*t.Key] = *t.Value
}
return result
}
// tagsFromMapDynamoDb returns the tags for a given map
func tagsFromMapDynamoDb(m map[string]interface{}) []*dynamodb.Tag {
result := make([]*dynamodb.Tag, 0, len(m))
for k, v := range m {
t := &dynamodb.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
}
result = append(result, t)
}
return result
}
// setTagsDynamoDb is a helper to set the tags for a dynamoDB resource
// This is needed because dynamodb requires a completely different set and delete
// method from the ec2 tag resource handling. Also the `UntagResource` method
// for dynamoDB only requires a list of tag keys, instead of the full map of keys.
func setTagsDynamoDb(conn *dynamodb.DynamoDB, d *schema.ResourceData) error {
arn := d.Get("arn").(string)
oraw, nraw := d.GetChange("tags")
o := oraw.(map[string]interface{})
n := nraw.(map[string]interface{})
create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n))
// Set tags
if len(remove) > 0 {
err := resource.Retry(2*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id())
_, err := conn.UntagResource(&dynamodb.UntagResourceInput{
ResourceArn: aws.String(arn),
TagKeys: remove,
})
if err != nil {
if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return err
}
}
if len(create) > 0 {
err := resource.Retry(2*time.Minute, func() *resource.RetryError {
log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id())
_, err := conn.TagResource(&dynamodb.TagResourceInput{
ResourceArn: aws.String(arn),
Tags: create,
})
if err != nil {
if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// diffTagsDynamoDb takes a local set of dynamodb tags and the ones found remotely
// and returns the set of tags that must be created as a map, and returns a list of tag keys
// that must be destroyed.
func diffTagsDynamoDb(oldTags, newTags []*dynamodb.Tag) ([]*dynamodb.Tag, []*string) {
create := make(map[string]interface{})
for _, t := range newTags {
create[*t.Key] = *t.Value
}
var remove []*string
for _, t := range oldTags {
// Verify the old tag is not a tag we're currently attempting to create
old, ok := create[*t.Key]
if !ok || old != *t.Value {
remove = append(remove, t.Key)
}
}
return tagsFromMapDynamoDb(create), remove
}
// tagsMapToHash returns a stable hash value for a raw tags map.
// The returned value map be negative (i.e. not suitable for a 'Set' function).
func tagsMapToHash(tags map[string]interface{}) int {
total := 0
for k, v := range tags {
total = total ^ hashcode.String(fmt.Sprintf("%s-%s", k, v))
}
return total
}
// tagsMapToRaw converts a tags map to its "raw" type.
func tagsMapToRaw(m map[string]string) map[string]interface{} {
raw := make(map[string]interface{})
for k, v := range m {
raw[k] = v
}
return raw
}
| {
ec2err, ok := err.(awserr.Error)
if ok && strings.Contains(ec2err.Code(), ".NotFound") {
return resource.RetryableError(err) // retry
}
return resource.NonRetryableError(err)
} |
vmi_test.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package watch
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/client-go/tools/cache"
k8sv1 "k8s.io/api/core/v1"
"github.com/golang/mock/gomock"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache/testing"
"k8s.io/client-go/tools/record"
"fmt"
"github.com/onsi/ginkgo/extensions/table"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/testing"
"k8s.io/apimachinery/pkg/types"
"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/pkg/log"
"kubevirt.io/kubevirt/pkg/testutils"
"kubevirt.io/kubevirt/pkg/virt-controller/services"
)
var _ = Describe("VirtualMachineInstance watcher", func() {
log.Log.SetIOWriter(GinkgoWriter)
var ctrl *gomock.Controller
var vmiInterface *kubecli.MockVirtualMachineInstanceInterface
var vmiSource *framework.FakeControllerSource
var podSource *framework.FakeControllerSource
var vmiInformer cache.SharedIndexInformer
var podInformer cache.SharedIndexInformer
var stop chan struct{}
var controller *VMIController
var recorder *record.FakeRecorder
var mockQueue *testutils.MockWorkQueue
var podFeeder *testutils.PodFeeder
var virtClient *kubecli.MockKubevirtClient
var kubeClient *fake.Clientset
var configMapInformer cache.SharedIndexInformer
shouldExpectPodCreation := func(uid types.UID) {
// Expect pod creation
kubeClient.Fake.PrependReactor("create", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
Expect(update.GetObject().(*k8sv1.Pod).Annotations[v1.OwnedByAnnotation]).To(Equal("virt-controller"))
Expect(update.GetObject().(*k8sv1.Pod).Annotations[v1.CreatedByAnnotation]).To(Equal(string(uid)))
return true, update.GetObject(), nil
})
}
shouldExpectPodDeletion := func(pod *k8sv1.Pod) {
// Expect pod creation
kubeClient.Fake.PrependReactor("delete", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.DeleteAction)
Expect(ok).To(BeTrue())
Expect(pod.Namespace).To(Equal(update.GetNamespace()))
Expect(pod.Name).To(Equal(update.GetName()))
return true, nil, nil
})
}
shouldExpectVirtualMachineHandover := func(vmi *v1.VirtualMachineInstance) {
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Phase).To(Equal(v1.Scheduled))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).To(BeEmpty())
Expect(arg.(*v1.VirtualMachineInstance).Finalizers).To(ContainElement(v1.VirtualMachineInstanceFinalizer))
}).Return(vmi, nil)
}
shouldExpectPodHandover := func() {
kubeClient.Fake.PrependReactor("update", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.UpdateAction)
Expect(ok).To(BeTrue())
Expect(update.GetObject().(*k8sv1.Pod).Annotations[v1.OwnedByAnnotation]).To(Equal("virt-handler"))
return true, update.GetObject(), nil
})
}
ignorePodUpdates := func() {
kubeClient.Fake.PrependReactor("update", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, _ := action.(testing.UpdateAction)
return true, update.GetObject(), nil
})
}
shouldExpectVirtualMachineSchedulingState := func(vmi *v1.VirtualMachineInstance) {
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Phase).To(Equal(v1.Scheduling))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).To(BeEmpty())
Expect(arg.(*v1.VirtualMachineInstance).Finalizers).To(ContainElement(v1.VirtualMachineInstanceFinalizer))
}).Return(vmi, nil)
}
shouldExpectVirtualMachineFailedState := func(vmi *v1.VirtualMachineInstance) {
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Phase).To(Equal(v1.Failed))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).To(BeEmpty())
Expect(arg.(*v1.VirtualMachineInstance).Finalizers).To(ContainElement(v1.VirtualMachineInstanceFinalizer))
}).Return(vmi, nil)
}
syncCaches := func(stop chan struct{}) {
go vmiInformer.Run(stop)
go podInformer.Run(stop)
Expect(cache.WaitForCacheSync(stop, vmiInformer.HasSynced, podInformer.HasSynced)).To(BeTrue())
}
BeforeEach(func() {
stop = make(chan struct{})
ctrl = gomock.NewController(GinkgoT())
virtClient = kubecli.NewMockKubevirtClient(ctrl)
vmiInterface = kubecli.NewMockVirtualMachineInstanceInterface(ctrl)
vmiInformer, vmiSource = testutils.NewFakeInformerFor(&v1.VirtualMachineInstance{})
podInformer, podSource = testutils.NewFakeInformerFor(&k8sv1.Pod{})
recorder = record.NewFakeRecorder(100)
configMapInformer, _ = testutils.NewFakeInformerFor(&k8sv1.Pod{})
controller = NewVMIController(services.NewTemplateService("a", "b", "c", configMapInformer.GetStore()), vmiInformer, podInformer, recorder, virtClient, configMapInformer)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.Queue)
controller.Queue = mockQueue
podFeeder = testutils.NewPodFeeder(mockQueue, podSource)
// Set up mock client
virtClient.EXPECT().VirtualMachineInstance(k8sv1.NamespaceDefault).Return(vmiInterface).AnyTimes()
kubeClient = fake.NewSimpleClientset()
virtClient.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes()
// Make sure that all unexpected calls to kubeClient will fail
kubeClient.Fake.PrependReactor("*", "*", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
Expect(action).To(BeNil())
return true, nil, nil
})
syncCaches(stop)
})
AfterEach(func() {
close(stop)
// Ensure that we add checks for expected events to every test
Expect(recorder.Events).To(BeEmpty())
ctrl.Finish()
})
addVirtualMachine := func(vmi *v1.VirtualMachineInstance) {
mockQueue.ExpectAdds(1)
vmiSource.Add(vmi)
mockQueue.Wait()
}
Context("On valid VirtualMachineInstance given", func() {
It("should create a corresponding Pod on VirtualMachineInstance creation", func() {
vmi := NewPendingVirtualMachine("testvmi")
addVirtualMachine(vmi)
shouldExpectPodCreation(vmi.UID)
controller.Execute()
testutils.ExpectEvent(recorder, SuccessfulCreatePodReason)
})
table.DescribeTable("should delete the corresponding Pod on VirtualMachineInstance deletion with vmi", func(phase v1.VirtualMachineInstancePhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = phase
vmi.DeletionTimestamp = now()
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectPodDeletion(pod)
if vmi.IsUnprocessed() {
shouldExpectVirtualMachineSchedulingState(vmi)
}
controller.Execute()
testutils.ExpectEvent(recorder, SuccessfulDeletePodReason)
},
table.Entry("in running state", v1.Running),
table.Entry("in unset state", v1.VmPhaseUnset),
table.Entry("in pending state", v1.Pending),
table.Entry("in succeeded state", v1.Succeeded),
table.Entry("in failed state", v1.Failed),
table.Entry("in scheduled state", v1.Scheduled),
table.Entry("in scheduling state", v1.Scheduling),
)
It("should not try to delete a pod again, which is already marked for deletion and go to failed state, when in sheduling state", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
vmi.DeletionTimestamp = now()
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectPodDeletion(pod)
controller.Execute()
testutils.ExpectEvent(recorder, SuccessfulDeletePodReason)
modifiedPod := pod.DeepCopy()
modifiedPod.DeletionTimestamp = now()
podFeeder.Modify(modifiedPod)
shouldExpectVirtualMachineFailedState(vmi)
controller.Execute()
})
table.DescribeTable("should not delete the corresponding Pod if the vmi is in", func(phase v1.VirtualMachineInstancePhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = phase
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
addVirtualMachine(vmi)
podFeeder.Add(pod)
controller.Execute()
},
table.Entry("succeeded state", v1.Failed),
table.Entry("failed state", v1.Succeeded),
)
It("should do nothing if the vmi is in final state", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Failed
vmi.Finalizers = []string{}
addVirtualMachine(vmi)
controller.Execute()
})
It("should set an error condition if creating the pod fails", func() {
vmi := NewPendingVirtualMachine("testvmi")
addVirtualMachine(vmi)
kubeClient.Fake.PrependReactor("create", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, nil, fmt.Errorf("random error")
})
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Reason).To(Equal("FailedCreate"))
}).Return(vmi, nil)
controller.Execute()
testutils.ExpectEvent(recorder, FailedCreatePodReason)
})
It("should remove the error condition if the sync finally succeeds", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Conditions = []v1.VirtualMachineInstanceCondition{{Type: v1.VirtualMachineInstanceSynchronized}}
addVirtualMachine(vmi)
// Expect pod creation
kubeClient.Fake.PrependReactor("create", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
update, ok := action.(testing.CreateAction)
Expect(ok).To(BeTrue())
Expect(update.GetObject().(*k8sv1.Pod).Annotations[v1.OwnedByAnnotation]).To(Equal("virt-controller"))
Expect(update.GetObject().(*k8sv1.Pod).Annotations[v1.CreatedByAnnotation]).To(Equal(string(vmi.UID)))
return true, update.GetObject(), nil
})
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).To(BeEmpty())
}).Return(vmi, nil)
controller.Execute()
testutils.ExpectEvent(recorder, SuccessfulCreatePodReason)
})
table.DescribeTable("should move the vmi to scheduling state if a pod exists", func(phase k8sv1.PodPhase, isReady bool) {
vmi := NewPendingVirtualMachine("testvmi")
pod := NewPodForVirtualMachine(vmi, phase)
pod.Status.ContainerStatuses[0].Ready = isReady
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectVirtualMachineSchedulingState(vmi)
if phase == k8sv1.PodRunning && isReady {
shouldExpectPodHandover()
}
controller.Execute()
if phase == k8sv1.PodRunning && isReady {
testutils.ExpectEvent(recorder, SuccessfulHandOverPodReason)
}
},
table.Entry(", not ready and in running state", k8sv1.PodRunning, false),
table.Entry(", not ready and in unknown state", k8sv1.PodUnknown, false),
table.Entry(", not ready and in succeeded state", k8sv1.PodSucceeded, false),
table.Entry(", not ready and in failed state", k8sv1.PodFailed, false),
table.Entry(", not ready and in pending state", k8sv1.PodPending, false),
table.Entry(", ready and in running state", k8sv1.PodRunning, true),
table.Entry(", ready and in unknown state", k8sv1.PodUnknown, true),
table.Entry(", ready and in succeeded state", k8sv1.PodSucceeded, true),
table.Entry(", ready and in failed state", k8sv1.PodFailed, true),
table.Entry(", ready and in pending state", k8sv1.PodPending, true),
)
Context("when pod failed to schedule", func() {
It("should set scheduling pod condition on the VirtualMachineInstance", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
pod := NewPodForVirtualMachine(vmi, k8sv1.PodPending)
pod.Status.Conditions = append(pod.Status.Conditions, k8sv1.PodCondition{
Message: "Insufficient memory",
Reason: "Unschedulable",
Status: k8sv1.ConditionFalse,
Type: k8sv1.PodScheduled,
})
addVirtualMachine(vmi)
podFeeder.Add(pod)
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Phase).To(Equal(v1.Scheduling))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).NotTo(BeEmpty())
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Message).To(Equal("Insufficient memory"))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Reason).To(Equal("Unschedulable"))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Status).To(Equal(k8sv1.ConditionFalse))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Type).To(Equal(v1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled)))
Expect(arg.(*v1.VirtualMachineInstance).Finalizers).To(ContainElement(v1.VirtualMachineInstanceFinalizer))
}).Return(vmi, nil)
controller.Execute()
})
})
Context("when Pod recovers from scheduling issues", func() {
table.DescribeTable("it should remove scheduling pod condition from the VirtualMachineInstance if the pod", func(owner string, podPhase k8sv1.PodPhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
pod := NewPodForVirtualMachine(vmi, podPhase)
pod.Annotations[v1.OwnedByAnnotation] = owner
vmi.Status.Conditions = append(vmi.Status.Conditions, v1.VirtualMachineInstanceCondition{
Message: "Insufficient memory",
Reason: "Unschedulable",
Status: k8sv1.ConditionFalse,
Type: v1.VirtualMachineInstanceConditionType(k8sv1.PodScheduled),
})
addVirtualMachine(vmi)
podFeeder.Add(pod)
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).To(BeEmpty())
}).Return(vmi, nil)
ignorePodUpdates()
controller.Execute()
testutils.IgnoreEvents(recorder)
},
table.Entry("is owned by virt-handler and is running", "virt-handler", k8sv1.PodRunning),
table.Entry("is owned by virt-controller and is running", "virt-controller", k8sv1.PodRunning),
table.Entry("is owned by virt-controller and is pending", "virt-controller", k8sv1.PodPending),
)
})
It("should move the vmi to failed state if the pod disappears and the vmi is in scheduling state", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
addVirtualMachine(vmi)
shouldExpectVirtualMachineFailedState(vmi)
controller.Execute() |
addVirtualMachine(vmi)
shouldExpectVirtualMachineFailedState(vmi)
controller.Execute()
})
It("should hand over pod to virt-handler if pod is ready and running", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectPodHandover()
controller.Execute()
testutils.ExpectEvent(recorder, SuccessfulHandOverPodReason)
})
It("should set an error condition if deleting the virtual machine pod fails", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.DeletionTimestamp = now()
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
// Expect pod delete
kubeClient.Fake.PrependReactor("delete", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, nil, fmt.Errorf("random error")
})
addVirtualMachine(vmi)
podFeeder.Add(pod)
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Reason).To(Equal(FailedDeletePodReason))
}).Return(vmi, nil)
controller.Execute()
testutils.ExpectEvent(recorder, FailedDeletePodReason)
})
It("should set an error condition if handing over the pod to virt-handler fails", func() {
vmi := NewPendingVirtualMachine("testvmi")
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
// Expect pod hand over
kubeClient.Fake.PrependReactor("update", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, nil, fmt.Errorf("random error")
})
addVirtualMachine(vmi)
podFeeder.Add(pod)
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Reason).To(Equal(FailedHandOverPodReason))
}).Return(vmi, nil)
controller.Execute()
testutils.ExpectEvent(recorder, FailedHandOverPodReason)
})
It("should set an error condition if creating the virtual machine pod fails", func() {
vmi := NewPendingVirtualMachine("testvmi")
// Expect pod creation
kubeClient.Fake.PrependReactor("create", "pods", func(action testing.Action) (handled bool, obj runtime.Object, err error) {
return true, nil, fmt.Errorf("random error")
})
addVirtualMachine(vmi)
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions[0].Reason).To(Equal(FailedCreatePodReason))
}).Return(vmi, nil)
controller.Execute()
testutils.ExpectEvent(recorder, FailedCreatePodReason)
})
It("should update the virtual machine to scheduled if pod is ready, runnning and handed over to virt-handler", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
pod := NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
pod.Annotations[v1.OwnedByAnnotation] = "virt-handler"
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectVirtualMachineHandover(vmi)
controller.Execute()
})
It("should update the virtual machine to scheduled if pod is ready, triggered by pod change", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
pod := NewPodForVirtualMachine(vmi, k8sv1.PodPending)
addVirtualMachine(vmi)
podFeeder.Add(pod)
controller.Execute()
pod = NewPodForVirtualMachine(vmi, k8sv1.PodRunning)
pod.Annotations[v1.OwnedByAnnotation] = "virt-handler"
podFeeder.Modify(pod)
shouldExpectVirtualMachineHandover(vmi)
controller.Execute()
})
It("should update the virtual machine to failed if pod was not ready, triggered by pod delete", func() {
vmi := NewPendingVirtualMachine("testvmi")
pod := NewPodForVirtualMachine(vmi, k8sv1.PodPending)
vmi.Status.Phase = v1.Scheduling
addVirtualMachine(vmi)
podFeeder.Add(pod)
controller.Execute()
podFeeder.Delete(pod)
shouldExpectVirtualMachineFailedState(vmi)
controller.Execute()
})
table.DescribeTable("should remove the finalizer if no pod is present and the vmi is in ", func(phase v1.VirtualMachineInstancePhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = phase
Expect(vmi.Finalizers).To(ContainElement(v1.VirtualMachineInstanceFinalizer))
addVirtualMachine(vmi)
vmiInterface.EXPECT().Update(gomock.Any()).Do(func(arg interface{}) {
Expect(arg.(*v1.VirtualMachineInstance).Status.Phase).To(Equal(phase))
Expect(arg.(*v1.VirtualMachineInstance).Status.Conditions).To(BeEmpty())
Expect(arg.(*v1.VirtualMachineInstance).Finalizers).ToNot(ContainElement(v1.VirtualMachineInstanceFinalizer))
}).Return(vmi, nil)
controller.Execute()
},
table.Entry("failed state", v1.Succeeded),
table.Entry("succeeded state", v1.Failed),
)
table.DescribeTable("should do nothing if pod is handed to virt-handler", func(phase k8sv1.PodPhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduled
pod := NewPodForVirtualMachine(vmi, phase)
pod.Annotations[v1.OwnedByAnnotation] = "virt-handler"
addVirtualMachine(vmi)
podFeeder.Add(pod)
controller.Execute()
},
table.Entry("and in running state", k8sv1.PodRunning),
table.Entry("and in unknown state", k8sv1.PodUnknown),
table.Entry("and in succeeded state", k8sv1.PodSucceeded),
table.Entry("and in failed state", k8sv1.PodFailed),
table.Entry("and in pending state", k8sv1.PodPending),
)
It("should do nothing if the vmi is handed over to virt-handler and the pod disappears", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduled
addVirtualMachine(vmi)
controller.Execute()
})
table.DescribeTable("should move the vmi to failed if pod is not handed over", func(phase k8sv1.PodPhase) {
vmi := NewPendingVirtualMachine("testvmi")
vmi.Status.Phase = v1.Scheduling
Expect(vmi.Finalizers).To(ContainElement(v1.VirtualMachineInstanceFinalizer))
pod := NewPodForVirtualMachine(vmi, phase)
addVirtualMachine(vmi)
podFeeder.Add(pod)
shouldExpectVirtualMachineFailedState(vmi)
controller.Execute()
},
table.Entry("and in succeeded state", k8sv1.PodSucceeded),
table.Entry("and in failed state", k8sv1.PodFailed),
)
})
})
func NewPendingVirtualMachine(name string) *v1.VirtualMachineInstance {
vmi := v1.NewMinimalVMI(name)
vmi.UID = "1234"
vmi.Status.Phase = v1.Pending
addInitializedAnnotation(vmi)
return vmi
}
func NewPodForVirtualMachine(vmi *v1.VirtualMachineInstance, phase k8sv1.PodPhase) *k8sv1.Pod {
return &k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: vmi.Namespace,
Labels: map[string]string{
v1.AppLabel: "virt-launcher",
v1.DomainLabel: vmi.Name,
},
Annotations: map[string]string{
v1.CreatedByAnnotation: string(vmi.UID),
v1.OwnedByAnnotation: "virt-controller",
},
},
Status: k8sv1.PodStatus{
Phase: phase,
ContainerStatuses: []k8sv1.ContainerStatus{
{Ready: true},
},
},
}
}
func now() *metav1.Time {
now := metav1.Now()
return &now
} | })
It("should move the vmi to failed state if the vmi is pending, no pod exists yet and gets deleted", func() {
vmi := NewPendingVirtualMachine("testvmi")
vmi.DeletionTimestamp = now() |
test_meetup.py | """Module for Testing the Meetup Endpoint."""
import json
# Local Import
from .basecase import TestBaseCase as base
class TestMeetup(base): |
def setUp(self):
base.setUp(self)
def test_create_meetup(self):
"""Testing Creation of a Meetup."""
response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type,
)
response_data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual(response_data["message"], "Meetup was created successfully.")
def test_fetching_all_meetups(self):
"""Testing Fetching of all meetups."""
post_response = self.client.post(
"/api/v1/meetups",
data=json.dumps(self.meetup_payload),
content_type=self.content_type
)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(
post_response_data["message"], "Meetup was created successfully."
)
response = self.client.get("/api/v1/meetups/upcoming", content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_fetch_single_meetup(self):
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Fetching Single Question.
response = self.client.get('api/v1/meetups/{}'.format(post_response_data["data"]["id"]), content_type=self.content_type)
self.assertEqual(response.status_code, 200)
def test_rsvp_to_meetup(self):
"""Test RSVPing to a meetup."""
"""Test fetching a single meetup."""
post_response = self.client.post('/api/v1/meetups', data=json.dumps(self.meetup_payload), content_type=self.content_type)
post_response_data = json.loads(post_response.data.decode())
self.assertEqual(post_response.status_code, 201)
self.assertEqual(post_response_data["message"], "Meetup was created successfully.")
# Posting RSVP.
response = self.client.post('/api/v1/meetups/{}/rsvps'.format(post_response_data["data"]["id"]), data=json.dumps(self.rsvp_payload), content_type=self.content_type)
self.assertEqual(response.status_code, 201) | """Testing the Meetup Endpoints with valid input.""" |
nestini.py | """ This code parses INI files in a nested manor. """
__author__ = "Brian Allen Vanderburg II"
__copyright__ = "Copyright 2016"
__license__ = "Apache License 2.0"
try:
from collections import OrderedDict as dict
except ImportError:
pass
class NestedIniParser(object):
def __init__(self, parser):
""" Initialize the nexted INI parser. """
self.parser = parser
def parse(self):
""" Parse the INI data and return the results. """
results = dict()
parser = self.parser
for section in parser.sections():
target = self._get_target(results, section)
for option in parser.options(section):
self._set_value(target, option, parser.get(section, option))
return results
def _isint(self, value):
""" Is a value an integer. """
try:
result = int(value)
return True
except ValueError:
return False
def _get_target(self, results, section):
""" Find out where we should put items. """
parts = section.split(".")
target = results
count = len(parts)
for pos in range(count):
# What kind is it:
part = parts[pos]
if self._isint(part):
# Item before us should be a list
if not isinstance(target, list):
raise ValueError("Must be a list")
value = int(part)
if value < 0 or value > len(target):
raise ValueError("Invalid index.")
if value == len(target):
if (pos == count - 1) or not self._isint(parts[pos + 1]):
target.append(dict())
else:
target.append([])
target = target[value]
else:
# Item before us should be a dict
if not isinstance(target, dict):
raise ValueError("Must be a dict")
value = part
if not value in target:
if (pos == count - 1) or not self._isint(parts[pos + 1]):
target[value] = dict()
else:
target[value] = []
| target = target[value]
if not isinstance(target, dict):
raise ValueError("Final result must be a dict.")
return target
def _set_value(self, target, name, data):
""" Set a value by parsing simlar to above. """
parts = name.split(".")
count = len(parts)
for pos in range(count):
# What kind is it
part = parts[pos]
if self._isint(part):
# Item before us should be a list
if not isinstance(target, list):
raise ValueError("Must be a list")
value = int(part)
if value < 0 or value > len(target):
raise ValueError("Invalid index.")
if pos == count - 1:
if value == len(target):
target.append(data)
else:
target[value] = data
return
else:
if value == len(target):
if self._isint(parts[pos + 1]):
target.append([])
else:
target.append(dict())
target = target[value]
else:
# Item before us should be a dict
if not isinstance(target, dict):
raise ValueError("Must be a dict.")
value = part
if pos == count - 1:
target[value] = data
return
else:
if not value in target:
if self._isint(parts[pos + 1]):
target[value] = []
else:
target[value] = dict()
target = target[value] | |
_app.tsx | import "../../styles/sass/globals.scss";
import BankContext from "../context/BankContext";
function | ({ Component, pageProps }) {
return (
<BankContext.Provider
value={{
name: process.env.NEXT_PUBLIC_BANK_NAME,
code: process.env.NEXT_PUBLIC_BANK_CODE,
get cssCode() {
return `bank${this.code}`;
}
}}
>
<Component {...pageProps} />
</BankContext.Provider>
);
}
export default MyApp;
| MyApp |
LexRuntimeServiceClient.ts | import {
EndpointsInputConfig,
EndpointsResolvedConfig,
RegionInputConfig,
RegionResolvedConfig,
resolveEndpointsConfig,
resolveRegionConfig,
} from "@aws-sdk/config-resolver";
import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length";
import {
getHostHeaderPlugin,
HostHeaderInputConfig,
HostHeaderResolvedConfig,
resolveHostHeaderConfig,
} from "@aws-sdk/middleware-host-header";
import { getLoggerPlugin } from "@aws-sdk/middleware-logger";
import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry";
import {
AwsAuthInputConfig,
AwsAuthResolvedConfig,
getAwsAuthPlugin,
resolveAwsAuthConfig,
} from "@aws-sdk/middleware-signing";
import {
getUserAgentPlugin,
resolveUserAgentConfig,
UserAgentInputConfig,
UserAgentResolvedConfig,
} from "@aws-sdk/middleware-user-agent";
import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http";
import {
Client as __Client,
SmithyConfiguration as __SmithyConfiguration,
SmithyResolvedConfiguration as __SmithyResolvedConfiguration,
} from "@aws-sdk/smithy-client";
import {
Credentials as __Credentials,
Decoder as __Decoder,
Encoder as __Encoder,
Hash as __Hash,
HashConstructor as __HashConstructor,
HttpHandlerOptions as __HttpHandlerOptions,
Logger as __Logger,
Provider as __Provider,
Provider,
RegionInfoProvider,
StreamCollector as __StreamCollector,
UrlParser as __UrlParser,
UserAgent as __UserAgent,
} from "@aws-sdk/types";
import { DeleteSessionCommandInput, DeleteSessionCommandOutput } from "./commands/DeleteSessionCommand";
import { GetSessionCommandInput, GetSessionCommandOutput } from "./commands/GetSessionCommand";
import { PostContentCommandInput, PostContentCommandOutput } from "./commands/PostContentCommand";
import { PostTextCommandInput, PostTextCommandOutput } from "./commands/PostTextCommand";
import { PutSessionCommandInput, PutSessionCommandOutput } from "./commands/PutSessionCommand";
import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig";
export type ServiceInputTypes =
| DeleteSessionCommandInput
| GetSessionCommandInput
| PostContentCommandInput
| PostTextCommandInput
| PutSessionCommandInput;
export type ServiceOutputTypes =
| DeleteSessionCommandOutput
| GetSessionCommandOutput
| PostContentCommandOutput
| PostTextCommandOutput
| PutSessionCommandOutput;
export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> {
/**
* The HTTP handler to use. Fetch in browser and Https in Nodejs.
*/
requestHandler?: __HttpHandler;
/**
* A constructor for a class implementing the {@link __Hash} interface
* that computes the SHA-256 HMAC or checksum of a string or binary buffer.
* @internal
*/
sha256?: __HashConstructor;
/**
* The function that will be used to convert strings into HTTP endpoints.
* @internal
*/
urlParser?: __UrlParser;
/**
* A function that can calculate the length of a request body.
* @internal
*/
bodyLengthChecker?: (body: any) => number | undefined;
/**
* A function that converts a stream into an array of bytes.
* @internal
*/
streamCollector?: __StreamCollector;
/**
* The function that will be used to convert a base64-encoded string to a byte array.
* @internal
*/
base64Decoder?: __Decoder;
/**
* The function that will be used to convert binary data to a base64-encoded string.
* @internal
*/
base64Encoder?: __Encoder;
/**
* The function that will be used to convert a UTF8-encoded string to a byte array.
* @internal
*/
utf8Decoder?: __Decoder;
/**
* The function that will be used to convert binary data to a UTF-8 encoded string.
* @internal
*/
utf8Encoder?: __Encoder;
/**
* The runtime environment.
* @internal
*/
runtime?: string;
/**
* Disable dyanamically changing the endpoint of the client based on the hostPrefix
* trait of an operation.
*/
disableHostPrefix?: boolean;
/**
* Value for how many times a request will be made at most in case of retry.
*/
maxAttempts?: number | __Provider<number>;
/**
* Specifies which retry algorithm to use.
*/
retryMode?: string | __Provider<string>;
/**
* Optional logger for logging debug/info/warn/error.
*/
logger?: __Logger;
| useDualstackEndpoint?: boolean | __Provider<boolean>;
/**
* Enables FIPS compatible endpoints.
*/
useFipsEndpoint?: boolean | __Provider<boolean>;
/**
* Unique service identifier.
* @internal
*/
serviceId?: string;
/**
* The AWS region to which this client will send requests
*/
region?: string | __Provider<string>;
/**
* Default credentials provider; Not available in browser runtime.
* @internal
*/
credentialDefaultProvider?: (input: any) => __Provider<__Credentials>;
/**
* Fetch related hostname, signing name or signing region with given region.
* @internal
*/
regionInfoProvider?: RegionInfoProvider;
/**
* The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header
* @internal
*/
defaultUserAgentProvider?: Provider<__UserAgent>;
}
type LexRuntimeServiceClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> &
ClientDefaults &
RegionInputConfig &
EndpointsInputConfig &
RetryInputConfig &
HostHeaderInputConfig &
AwsAuthInputConfig &
UserAgentInputConfig;
/**
* The configuration interface of LexRuntimeServiceClient class constructor that set the region, credentials and other options.
*/
export interface LexRuntimeServiceClientConfig extends LexRuntimeServiceClientConfigType {}
type LexRuntimeServiceClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> &
Required<ClientDefaults> &
RegionResolvedConfig &
EndpointsResolvedConfig &
RetryResolvedConfig &
HostHeaderResolvedConfig &
AwsAuthResolvedConfig &
UserAgentResolvedConfig;
/**
* The resolved configuration interface of LexRuntimeServiceClient class. This is resolved and normalized from the {@link LexRuntimeServiceClientConfig | constructor configuration interface}.
*/
export interface LexRuntimeServiceClientResolvedConfig extends LexRuntimeServiceClientResolvedConfigType {}
/**
* <p>Amazon Lex provides both build and runtime endpoints. Each endpoint
* provides a set of operations (API). Your conversational bot uses the
* runtime API to understand user utterances (user input text or voice). For
* example, suppose a user says "I want pizza", your bot sends this input to
* Amazon Lex using the runtime API. Amazon Lex recognizes that the user
* request is for the OrderPizza intent (one of the intents defined in the
* bot). Then Amazon Lex engages in user conversation on behalf of the bot to
* elicit required information (slot values, such as pizza size and crust
* type), and then performs fulfillment activity (that you configured when
* you created the bot). You use the build-time API to create and manage your
* Amazon Lex bot. For a list of build-time operations, see the build-time
* API, . </p>
*/
export class LexRuntimeServiceClient extends __Client<
__HttpHandlerOptions,
ServiceInputTypes,
ServiceOutputTypes,
LexRuntimeServiceClientResolvedConfig
> {
/**
* The resolved configuration of LexRuntimeServiceClient class. This is resolved and normalized from the {@link LexRuntimeServiceClientConfig | constructor configuration interface}.
*/
readonly config: LexRuntimeServiceClientResolvedConfig;
constructor(configuration: LexRuntimeServiceClientConfig) {
const _config_0 = __getRuntimeConfig(configuration);
const _config_1 = resolveRegionConfig(_config_0);
const _config_2 = resolveEndpointsConfig(_config_1);
const _config_3 = resolveRetryConfig(_config_2);
const _config_4 = resolveHostHeaderConfig(_config_3);
const _config_5 = resolveAwsAuthConfig(_config_4);
const _config_6 = resolveUserAgentConfig(_config_5);
super(_config_6);
this.config = _config_6;
this.middlewareStack.use(getRetryPlugin(this.config));
this.middlewareStack.use(getContentLengthPlugin(this.config));
this.middlewareStack.use(getHostHeaderPlugin(this.config));
this.middlewareStack.use(getLoggerPlugin(this.config));
this.middlewareStack.use(getAwsAuthPlugin(this.config));
this.middlewareStack.use(getUserAgentPlugin(this.config));
}
/**
* Destroy underlying resources, like sockets. It's usually not necessary to do this.
* However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed.
* Otherwise, sockets might stay open for quite a long time before the server terminates them.
*/
destroy(): void {
super.destroy();
}
} | /**
* Enables IPv6/IPv4 dualstack endpoint.
*/ |
setup.py | # -*- coding: UTF-8 -*-
#setup.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Peter Vágner, Joseph Lee
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import os
import copy
import gettext
gettext.install("nvda", unicode=True)
from distutils.core import setup
import py2exe as py2exeModule
from glob import glob
import fnmatch
from versionInfo import *
from py2exe import build_exe
import wx
import imp
MAIN_MANIFEST_EXTRA = r"""
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.dll">
<comClass
description="HtBrailleDriver Class"
clsid="{209445BA-92ED-4AB2-83EC-F24ACEE77EE0}"
threadingModel="Apartment"
progid="HtBrailleDriverServer.HtBrailleDriver"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
</file>
<file name="brailleDisplayDrivers\handyTech\HtBrailleDriverServer.tlb">
<typelib tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}"
version="1.0"
helpdir="" />
</file>
<comInterfaceExternalProxyStub
name="IHtBrailleDriverSink"
iid="{EF551F82-1C7E-421F-963D-D9D03548785A}"
proxyStubClsid32="{00020420-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<comInterfaceExternalProxyStub
name="IHtBrailleDriver"
iid="{43A71F9B-58EE-42D4-B58E-0F9FBA28D995}"
proxyStubClsid32="{00020424-0000-0000-C000-000000000046}"
baseInterface="{00000000-0000-0000-C000-000000000046}"
tlbid="{33257EFB-336F-4680-B94E-F5013BA6B9B3}" />
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows Vista -->
<supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
<!-- Windows 7 -->
<supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
<!-- Windows 8 -->
<supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
<!-- Windows 8.1 -->
<supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
"""
def getModuleExtention(thisModType):
for ext,mode,modType in imp.get_suffixes():
if modType==thisModType:
return ext
raise ValueError("unknown mod type %s"%thisModType)
# py2exe's idea of whether a dll is a system dll appears to be wrong sometimes, so monkey patch it.
origIsSystemDLL = build_exe.isSystemDLL
def isSystemDLL(pathname):
dll = os.path.basename(pathname).lower()
if dll in ("msvcp71.dll", "msvcp90.dll", "gdiplus.dll","mfc71.dll", "mfc90.dll"):
# These dlls don't exist on many systems, so make sure they're included.
return 0
elif dll.startswith("api-ms-win-") or dll in ("powrprof.dll", "mpr.dll", "crypt32.dll"):
# These are definitely system dlls available on all systems and must be excluded.
# Including them can cause serious problems when a binary build is run on a different version of Windows.
return 1
return origIsSystemDLL(pathname)
build_exe.isSystemDLL = isSystemDLL
class py2exe(build_exe.py2exe):
"""Overridden py2exe command to:
* Add a command line option --enable-uiAccess to enable uiAccess for the main executable
* Add extra info to the manifest
* Don't copy w9xpopen, as NVDA will never run on Win9x
"""
user_options = build_exe.py2exe.user_options + [
("enable-uiAccess", "u", "enable uiAccess for the main executable"),
]
def initialize_options(self):
build_exe.py2exe.initialize_options(self)
self.enable_uiAccess = False
def copy_w9xpopen(self, modules, dlls):
pass
def run(self):
d |
def build_manifest(self, target, template):
mfest, rid = build_exe.py2exe.build_manifest(self, target, template)
if getattr(target, "script", "").endswith(".pyw"):
# This is one of the main application executables.
mfest = mfest[:mfest.rindex("</assembly>")]
mfest += MAIN_MANIFEST_EXTRA + "</assembly>"
return mfest, rid
def getLocaleDataFiles():
wxDir=wx.__path__[0]
localeMoFiles=set()
for f in glob("locale/*/LC_MESSAGES"):
localeMoFiles.add((f, (os.path.join(f,"nvda.mo"),)))
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
lang=os.path.split(os.path.split(f)[0])[1]
if '_' in lang:
lang=lang.split('_')[0]
f=os.path.join('locale',lang,'lc_messages')
wxMoFile=os.path.join(wxDir,f,"wxstd.mo")
if os.path.isfile(wxMoFile):
localeMoFiles.add((f,(wxMoFile,)))
localeDicFiles=[(os.path.dirname(f), (f,)) for f in glob("locale/*/*.dic")]
NVDALocaleGestureMaps=[(os.path.dirname(f), (f,)) for f in glob("locale/*/gestures.ini")]
return list(localeMoFiles)+localeDicFiles+NVDALocaleGestureMaps
def getRecursiveDataFiles(dest,source,excludes=()):
rulesList=[]
rulesList.append((dest,
[f for f in glob("%s/*"%source) if not any(fnmatch.fnmatch(f,exclude) for exclude in excludes) and os.path.isfile(f)]))
[rulesList.extend(getRecursiveDataFiles(os.path.join(dest,dirName),os.path.join(source,dirName),excludes=excludes)) for dirName in os.listdir(source) if os.path.isdir(os.path.join(source,dirName)) and not dirName.startswith('.')]
return rulesList
compiledModExtention = getModuleExtention(imp.PY_COMPILED)
sourceModExtention = getModuleExtention(imp.PY_SOURCE)
setup(
name = name,
version=version,
description=description,
url=url,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Win32 (MS Windows)',
'Topic :: Adaptive Technologies'
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
],
cmdclass={"py2exe": py2exe},
windows=[
{
"script":"nvda.pyw",
"dest_base":"nvda_noUIAccess",
"uac_info": ("asInvoker", False),
"icon_resources":[(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description":"NVDA application",
"product_version":version,
"copyright":copyright,
"company_name":publisher,
},
# The nvda_uiAccess target will be added at runtime if required.
{
"script": "nvda_slave.pyw",
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": name,
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
{
"script": "nvda_eoaProxy.pyw",
# uiAccess will be enabled at runtime if appropriate.
"uac_info": ("asInvoker", False),
"icon_resources": [(1,"images/nvda.ico")],
"version":"%s.%s.%s.%s"%(version_year,version_major,version_minor,version_build),
"description": "NVDA Ease of Access proxy",
"product_version": version,
"copyright": copyright,
"company_name": publisher,
},
],
options = {"py2exe": {
"bundle_files": 3,
"excludes": ["Tkinter",
"serial.loopback_connection", "serial.rfc2217", "serial.serialcli", "serial.serialjava", "serial.serialposix", "serial.socket_connection"],
"packages": ["NVDAObjects","virtualBuffers","appModules","comInterfaces","brailleDisplayDrivers","synthDrivers"],
"includes": [
"nvdaBuiltin",
# #3368: bisect was implicitly included with Python 2.7.3, but isn't with 2.7.5.
"bisect",
# Also, the previous service executable used win32api, which some add-ons use for various purposes.
"win32api",
# #8628: include an import module for validate, which older add-ons import directly.
# Since configobj 5.1.0, validate is a part of the configobj package
# and should be imported as configobj.validate instead
"validate",
],
}},
data_files=[
(".",glob("*.dll")+glob("*.manifest")+["builtin.dic"]),
("documentation", ['../copying.txt', '../contributors.txt']),
("lib/%s"%version, glob("lib/*.dll")),
("lib64/%s"%version, glob("lib64/*.dll") + glob("lib64/*.exe")),
("waves", glob("waves/*.wav")),
("images", glob("images/*.ico")),
("louis/tables",glob("louis/tables/*")),
("COMRegistrationFixes", glob("COMRegistrationFixes/*.reg")),
(".", ['message.html' ])
] + (
getLocaleDataFiles()
+ getRecursiveDataFiles("synthDrivers", "synthDrivers",
excludes=("*%s" % sourceModExtention, "*%s" % compiledModExtention, "*.exp", "*.lib", "*.pdb"))
+ getRecursiveDataFiles("brailleDisplayDrivers", "brailleDisplayDrivers", excludes=("*%s"%sourceModExtention,"*%s"%compiledModExtention))
+ getRecursiveDataFiles('documentation', '../user_docs', excludes=('*.t2t', '*.t2tconf', '*/developerGuide.*'))
),
)
| ist = self.distribution
if self.enable_uiAccess:
# Add a target for nvda_uiAccess, using nvda_noUIAccess as a base.
target = copy.deepcopy(dist.windows[0])
target["dest_base"] = "nvda_uiAccess"
target["uac_info"] = (target["uac_info"][0], True)
dist.windows.insert(1, target)
# nvda_eoaProxy should have uiAccess.
target = dist.windows[3]
target["uac_info"] = (target["uac_info"][0], True)
build_exe.py2exe.run(self)
|
raw.rs | // Unless explicitly stated otherwise all files in this repository are licensed under the
// MIT/Apache-2.0 License, at your convenience
//
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2020 Datadog, Inc.
//
use alloc::alloc::Layout;
use core::cell::UnsafeCell;
use core::future::Future;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop};
use core::pin::Pin;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicUsize, Ordering};
use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker};
use crate::task::header::Header;
use crate::task::state::*;
use crate::task::utils::{abort, abort_on_panic, extend};
use crate::task::Task;
/// The vtable for a task.
pub(crate) struct TaskVTable {
/// Schedules the task.
pub(crate) schedule: unsafe fn(*const ()),
/// Drops the future inside the task.
pub(crate) drop_future: unsafe fn(*const ()),
/// Returns a pointer to the output stored after completion.
pub(crate) get_output: unsafe fn(*const ()) -> *const (),
/// Drops the task.
pub(crate) drop_task: unsafe fn(ptr: *const ()),
/// Destroys the task.
pub(crate) destroy: unsafe fn(*const ()),
/// Runs the task.
pub(crate) run: unsafe fn(*const ()) -> bool,
/// Creates a new waker associated with the task.
pub(crate) clone_waker: unsafe fn(ptr: *const ()) -> RawWaker,
}
/// Memory layout of a task.
///
/// This struct contains the following information:
///
/// 1. How to allocate and deallocate the task.
/// 2. How to access the fields inside the task.
#[derive(Clone, Copy)]
pub(crate) struct TaskLayout {
/// Memory layout of the whole task.
pub(crate) layout: Layout,
/// Offset into the task at which the tag is stored.
pub(crate) offset_t: usize,
/// Offset into the task at which the schedule function is stored.
pub(crate) offset_s: usize,
/// Offset into the task at which the future is stored.
pub(crate) offset_f: usize,
/// Offset into the task at which the output is stored.
pub(crate) offset_r: usize,
}
/// Raw pointers to the fields inside a task.
pub(crate) struct RawTask<F, R, S, T> {
/// The task header.
pub(crate) header: *const Header,
/// The schedule function.
pub(crate) schedule: *const S,
/// The tag inside the task.
pub(crate) tag: *mut T,
/// The future.
pub(crate) future: *mut F,
/// The output of the future.
pub(crate) output: *mut R,
}
impl<F, R, S, T> Copy for RawTask<F, R, S, T> {}
impl<F, R, S, T> Clone for RawTask<F, R, S, T> {
fn clone(&self) -> Self {
*self
}
}
impl<F, R, S, T> RawTask<F, R, S, T>
where
F: Future<Output = R> + 'static,
S: Fn(Task<T>) + 'static,
{
const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
Self::clone_waker,
Self::wake,
Self::wake_by_ref,
Self::drop_waker,
);
/// Allocates a task with the given `future` and `schedule` function.
///
/// It is assumed that initially only the `Task` reference and the `JoinHandle` exist.
pub(crate) fn allocate(future: F, schedule: S, tag: T) -> NonNull<()> {
// Compute the layout of the task for allocation. Abort if the computation fails.
let task_layout = abort_on_panic(|| Self::task_layout());
unsafe {
// Allocate enough space for the entire task.
let raw_task = match NonNull::new(alloc::alloc::alloc(task_layout.layout) as *mut ()) {
None => abort(),
Some(p) => p,
};
let raw = Self::from_ptr(raw_task.as_ptr());
// Write the header as the first field of the task.
(raw.header as *mut Header).write(Header {
state: AtomicUsize::new(SCHEDULED | HANDLE | REFERENCE),
awaiter: UnsafeCell::new(None),
vtable: &TaskVTable {
schedule: Self::schedule,
drop_future: Self::drop_future,
get_output: Self::get_output,
drop_task: Self::drop_task,
destroy: Self::destroy,
run: Self::run,
clone_waker: Self::clone_waker,
},
});
// Write the tag as the second field of the task.
(raw.tag as *mut T).write(tag);
// Write the schedule function as the third field of the task.
(raw.schedule as *mut S).write(schedule);
// Write the future as the fourth field of the task.
raw.future.write(future);
raw_task
}
}
/// Creates a `RawTask` from a raw task pointer.
#[inline]
pub(crate) fn from_ptr(ptr: *const ()) -> Self {
let task_layout = Self::task_layout();
let p = ptr as *const u8;
unsafe {
Self {
header: p as *const Header,
tag: p.add(task_layout.offset_t) as *mut T,
schedule: p.add(task_layout.offset_s) as *const S,
future: p.add(task_layout.offset_f) as *mut F,
output: p.add(task_layout.offset_r) as *mut R,
}
}
}
/// Returns the memory layout for a task.
#[inline]
fn task_layout() -> TaskLayout {
// Compute the layouts for `Header`, `T`, `S`, `F`, and `R`.
let layout_header = Layout::new::<Header>();
let layout_t = Layout::new::<T>();
let layout_s = Layout::new::<S>();
let layout_f = Layout::new::<F>();
let layout_r = Layout::new::<R>();
// Compute the layout for `union { F, R }`.
let size_union = layout_f.size().max(layout_r.size());
let align_union = layout_f.align().max(layout_r.align());
let layout_union = unsafe { Layout::from_size_align_unchecked(size_union, align_union) };
// Compute the layout for `Header` followed by `T`, then `S`, and finally `union { F, R }`.
let layout = layout_header;
let (layout, offset_t) = extend(layout, layout_t);
let (layout, offset_s) = extend(layout, layout_s);
let (layout, offset_union) = extend(layout, layout_union);
let offset_f = offset_union;
let offset_r = offset_union;
TaskLayout {
layout,
offset_t,
offset_s,
offset_f,
offset_r,
}
}
/// Wakes a waker.
unsafe fn wake(ptr: *const ()) {
// This is just an optimization. If the schedule function has captured variables, then
// we'll do less reference counting if we wake the waker by reference and then drop it.
if mem::size_of::<S>() > 0 {
Self::wake_by_ref(ptr);
Self::drop_waker(ptr);
return;
}
let raw = Self::from_ptr(ptr);
let mut state = (*raw.header).state.load(Ordering::Acquire);
loop {
// If the task is completed or closed, it can't be woken up.
if state & (COMPLETED | CLOSED) != 0 {
// Drop the waker.
Self::drop_waker(ptr);
break;
}
// If the task is already scheduled, we just need to synchronize with the thread that
// will run the task by "publishing" our current view of the memory.
if state & SCHEDULED != 0 {
// Update the state without actually modifying it.
match (*raw.header).state.compare_exchange_weak(
state,
state,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// Drop the waker.
Self::drop_waker(ptr);
break;
}
Err(s) => state = s,
}
} else {
// Mark the task as scheduled.
match (*raw.header).state.compare_exchange_weak(
state,
state | SCHEDULED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the task is not yet scheduled and isn't currently running, now is the
// time to schedule it.
if state & RUNNING == 0 {
// Schedule the task.
Self::schedule(ptr);
} else {
// Drop the waker.
Self::drop_waker(ptr);
}
break;
}
Err(s) => state = s,
}
}
}
}
/// Wakes a waker by reference.
unsafe fn wake_by_ref(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
let mut state = (*raw.header).state.load(Ordering::Acquire);
loop {
// If the task is completed or closed, it can't be woken up.
if state & (COMPLETED | CLOSED) != 0 {
break;
}
// If the task is already scheduled, we just need to synchronize with the thread that
// will run the task by "publishing" our current view of the memory.
if state & SCHEDULED != 0 {
// Update the state without actually modifying it.
match (*raw.header).state.compare_exchange_weak(
state,
state,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(s) => state = s,
}
} else {
// If the task is not running, we can schedule right away.
let new = if state & RUNNING == 0 {
(state | SCHEDULED) + REFERENCE
} else {
state | SCHEDULED
};
// Mark the task as scheduled.
match (*raw.header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the task is not running, now is the time to schedule.
if state & RUNNING == 0 {
// If the reference count overflowed, abort.
if state > isize::max_value() as usize {
abort();
}
// Schedule the task. There is no need to call `Self::schedule(ptr)`
// because the schedule function cannot be destroyed while the waker is
// still alive.
let task = Task {
raw_task: NonNull::new_unchecked(ptr as *mut ()),
_marker: PhantomData,
};
(*raw.schedule)(task);
}
break;
}
Err(s) => state = s,
}
}
}
}
/// Clones a waker.
unsafe fn clone_waker(ptr: *const ()) -> RawWaker {
let raw = Self::from_ptr(ptr);
// Increment the reference count. With any kind of reference-counted data structure,
// relaxed ordering is appropriate when incrementing the counter.
let state = (*raw.header).state.fetch_add(REFERENCE, Ordering::Relaxed);
// If the reference count overflowed, abort.
if state > isize::max_value() as usize {
abort();
}
RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)
}
/// Drops a waker.
///
/// This function will decrement the reference count. If it drops down to zero, the associated
/// join handle has been dropped too, and the task has not been completed, then it will get
/// scheduled one more time so that its future gets dropped by the executor.
#[inline]
unsafe fn drop_waker(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// Decrement the reference count.
let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE;
// If this was the last reference to the task and the `JoinHandle` has been dropped too,
// then we need to decide how to destroy the task.
if new & !(REFERENCE - 1) == 0 && new & HANDLE == 0 {
if new & (COMPLETED | CLOSED) == 0 {
// If the task was not completed nor closed, close it and schedule one more time so
// that its future gets dropped by the executor.
(*raw.header)
.state
.store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release);
Self::schedule(ptr);
} else { | // Otherwise, destroy the task right away.
Self::destroy(ptr);
}
}
}
/// Drops a task.
///
/// This function will decrement the reference count. If it drops down to zero and the
/// associated join handle has been dropped too, then the task gets destroyed.
#[inline]
unsafe fn drop_task(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// Decrement the reference count.
let new = (*raw.header).state.fetch_sub(REFERENCE, Ordering::AcqRel) - REFERENCE;
// If this was the last reference to the task and the `JoinHandle` has been dropped too,
// then destroy the task.
if new & !(REFERENCE - 1) == 0 && new & HANDLE == 0 {
Self::destroy(ptr);
}
}
/// Schedules a task for running.
///
/// This function doesn't modify the state of the task. It only passes the task reference to
/// its schedule function.
unsafe fn schedule(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// If the schedule function has captured variables, create a temporary waker that prevents
// the task from getting deallocated while the function is being invoked.
let _waker;
if mem::size_of::<S>() > 0 {
_waker = Waker::from_raw(Self::clone_waker(ptr));
}
let task = Task {
raw_task: NonNull::new_unchecked(ptr as *mut ()),
_marker: PhantomData,
};
(*raw.schedule)(task);
}
/// Drops the future inside a task.
#[inline]
unsafe fn drop_future(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
// We need a safeguard against panics because the destructor can panic.
abort_on_panic(|| {
raw.future.drop_in_place();
})
}
/// Returns a pointer to the output inside a task.
unsafe fn get_output(ptr: *const ()) -> *const () {
let raw = Self::from_ptr(ptr);
raw.output as *const ()
}
/// Cleans up task's resources and deallocates it.
///
/// The schedule function and the tag will be dropped, and the task will then get deallocated.
/// The task must be closed before this function is called.
#[inline]
unsafe fn destroy(ptr: *const ()) {
let raw = Self::from_ptr(ptr);
let task_layout = Self::task_layout();
// We need a safeguard against panics because destructors can panic.
abort_on_panic(|| {
// Drop the schedule function.
(raw.schedule as *mut S).drop_in_place();
// Drop the tag.
(raw.tag as *mut T).drop_in_place();
});
// Finally, deallocate the memory reserved by the task.
alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout);
}
/// Runs a task.
///
/// If polling its future panics, the task will be closed and the panic will be propagated into
/// the caller.
unsafe fn run(ptr: *const ()) -> bool {
let raw = Self::from_ptr(ptr);
// Create a context from the raw task pointer and the vtable inside the its header.
let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)));
let cx = &mut Context::from_waker(&waker);
let mut state = (*raw.header).state.load(Ordering::Acquire);
// Update the task's state before polling its future.
loop {
// If the task has already been closed, drop the task reference and return.
if state & CLOSED != 0 {
// Drop the future.
Self::drop_future(ptr);
// Mark the task as unscheduled.
let state = (*raw.header).state.fetch_and(!SCHEDULED, Ordering::AcqRel);
// Notify the awaiter that the future has been dropped.
if state & AWAITER != 0 {
(*raw.header).notify(None);
}
// Drop the task reference.
Self::drop_task(ptr);
return false;
}
// Mark the task as unscheduled and running.
match (*raw.header).state.compare_exchange_weak(
state,
(state & !SCHEDULED) | RUNNING,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// Update the state because we're continuing with polling the future.
state = (state & !SCHEDULED) | RUNNING;
break;
}
Err(s) => state = s,
}
}
// Poll the inner future, but surround it with a guard that closes the task in case polling
// panics.
let guard = Guard(raw);
let poll = <F as Future>::poll(Pin::new_unchecked(&mut *raw.future), cx);
mem::forget(guard);
match poll {
Poll::Ready(out) => {
// Replace the future with its output.
Self::drop_future(ptr);
raw.output.write(out);
// A place where the output will be stored in case it needs to be dropped.
let mut output = None;
// The task is now completed.
loop {
// If the handle is dropped, we'll need to close it and drop the output.
let new = if state & HANDLE == 0 {
(state & !RUNNING & !SCHEDULED) | COMPLETED | CLOSED
} else {
(state & !RUNNING & !SCHEDULED) | COMPLETED
};
// Mark the task as not running and completed.
match (*raw.header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
// If the handle is dropped or if the task was closed while running,
// now it's time to drop the output.
if state & HANDLE == 0 || state & CLOSED != 0 {
// Read the output.
output = Some(raw.output.read());
}
// Notify the awaiter that the task has been completed.
if state & AWAITER != 0 {
(*raw.header).notify(None);
}
// Drop the task reference.
Self::drop_task(ptr);
break;
}
Err(s) => state = s,
}
}
// Drop the output if it was taken out of the task.
drop(output);
}
Poll::Pending => {
let mut future_dropped = false;
// The task is still not completed.
loop {
// If the task was closed while running, we'll need to unschedule in case it
// was woken up and then destroy it.
let new = if state & CLOSED != 0 {
state & !RUNNING & !SCHEDULED
} else {
state & !RUNNING
};
if state & CLOSED != 0 && !future_dropped {
// The thread that closed the task didn't drop the future because it was
// running so now it's our responsibility to do so.
Self::drop_future(ptr);
future_dropped = true;
}
// Mark the task as not running.
match (*raw.header).state.compare_exchange_weak(
state,
new,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(state) => {
// If the task was closed while running, we need to notify the awaiter.
// If the task was woken up while running, we need to schedule it.
// Otherwise, we just drop the task reference.
if state & CLOSED != 0 {
// Notify the awaiter that the future has been dropped.
if state & AWAITER != 0 {
(*raw.header).notify(None);
}
// Drop the task reference.
Self::drop_task(ptr);
} else if state & SCHEDULED != 0 {
// The thread that woke the task up didn't reschedule it because
// it was running so now it's our responsibility to do so.
Self::schedule(ptr);
return true;
} else {
// Drop the task reference.
Self::drop_task(ptr);
}
break;
}
Err(s) => state = s,
}
}
}
}
return false;
/// A guard that closes the task if polling its future panics.
struct Guard<F, R, S, T>(RawTask<F, R, S, T>)
where
F: Future<Output = R> + 'static,
S: Fn(Task<T>) + 'static;
impl<F, R, S, T> Drop for Guard<F, R, S, T>
where
F: Future<Output = R> + 'static,
S: Fn(Task<T>) + 'static,
{
fn drop(&mut self) {
let raw = self.0;
let ptr = raw.header as *const ();
unsafe {
let mut state = (*raw.header).state.load(Ordering::Acquire);
loop {
// If the task was closed while running, then unschedule it, drop its
// future, and drop the task reference.
if state & CLOSED != 0 {
// The thread that closed the task didn't drop the future because it
// was running so now it's our responsibility to do so.
RawTask::<F, R, S, T>::drop_future(ptr);
// Mark the task as not running and not scheduled.
(*raw.header)
.state
.fetch_and(!RUNNING & !SCHEDULED, Ordering::AcqRel);
// Notify the awaiter that the future has been dropped.
if state & AWAITER != 0 {
(*raw.header).notify(None);
}
// Drop the task reference.
RawTask::<F, R, S, T>::drop_task(ptr);
break;
}
// Mark the task as not running, not scheduled, and closed.
match (*raw.header).state.compare_exchange_weak(
state,
(state & !RUNNING & !SCHEDULED) | CLOSED,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(state) => {
// Drop the future because the task is now closed.
RawTask::<F, R, S, T>::drop_future(ptr);
// Notify the awaiter that the future has been dropped.
if state & AWAITER != 0 {
(*raw.header).notify(None);
}
// Drop the task reference.
RawTask::<F, R, S, T>::drop_task(ptr);
break;
}
Err(s) => state = s,
}
}
}
}
}
}
} | |
grpc_asyncio.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.longrunning import operations_pb2 as operations # type: ignore
from .base import DatastoreAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatastoreAdminGrpcTransport
class DatastoreAdminGrpcAsyncIOTransport(DatastoreAdminTransport):
"""gRPC AsyncIO backend transport for DatastoreAdmin.
Google Cloud Datastore Admin API
The Datastore Admin API provides several admin services for
Cloud Datastore.
-----------------------------------------------------------------------------
## Concepts
Project, namespace, kind, and entity as defined in the Google
Cloud Datastore API.
Operation: An Operation represents work being performed in the
background.
EntityFilter: Allows specifying a subset of entities in a
project. This is specified as a combination of kinds and
namespaces (either or both of which may be all).
-----------------------------------------------------------------------------
## Services
# Export/Import
The Export/Import service provides the ability to copy all or a
subset of entities to/from Google Cloud Storage.
Exported data may be imported into Cloud Datastore for any
Google Cloud Platform project. It is not restricted to the
export source project. It is possible to export from one project
and then import into another.
Exported data can also be loaded into Google BigQuery for
analysis.
Exports and imports are performed asynchronously. An Operation
resource is created for each export/import. The state (including
any errors encountered) of the export/import may be queried via
the Operation resource.
# Index
The index service manages Cloud Datastore composite indexes.
Index creation and deletion are performed asynchronously. An
Operation resource is created for each such asynchronous
operation. The state of the operation (including any errors
encountered) may be queried via the Operation resource.
# Operation
The Operations collection provides a record of actions performed
for the specified project (including any operations in
progress). Operations are not created directly but through calls
on other collections or resources.
An operation that is not yet done may be cancelled. The request
to cancel is asynchronous and the operation may continue to run
for some time after the request to cancel is made.
An operation that is done may be deleted so that it is no longer
listed as part of the Operation collection.
ListOperations returns all pending operations, but not completed
operations.
Operations are created by service DatastoreAdmin,
but are accessed via service google.longrunning.Operations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file, | quota_project_id=quota_project_id,
)
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
client_info=client_info,
)
self._stubs = {}
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__["operations_client"]
@property
def export_entities(
self,
) -> Callable[
[datastore_admin.ExportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the export entities method over gRPC.
Exports a copy of all or a subset of entities from
Google Cloud Datastore to another storage system, such
as Google Cloud Storage. Recent updates to entities may
not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed
via the Operation resource that is created. The output
of an export may only be used once the associated
operation is done. If an export operation is cancelled
before completion it may leave partial data behind in
Google Cloud Storage.
Returns:
Callable[[~.ExportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_entities" not in self._stubs:
self._stubs["export_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ExportEntities",
request_serializer=datastore_admin.ExportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["export_entities"]
@property
def import_entities(
self,
) -> Callable[
[datastore_admin.ImportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the import entities method over gRPC.
Imports entities into Google Cloud Datastore.
Existing entities with the same key are overwritten. The
import occurs in the background and its progress can be
monitored and managed via the Operation resource that is
created. If an ImportEntities operation is cancelled, it
is possible that a subset of the data has already been
imported to Cloud Datastore.
Returns:
Callable[[~.ImportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_entities" not in self._stubs:
self._stubs["import_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ImportEntities",
request_serializer=datastore_admin.ImportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["import_entities"]
@property
def get_index(
self,
) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:
r"""Return a callable for the get index method over gRPC.
Gets an index.
Returns:
Callable[[~.GetIndexRequest],
Awaitable[~.Index]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/GetIndex",
request_serializer=datastore_admin.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[datastore_admin.ListIndexesRequest],
Awaitable[datastore_admin.ListIndexesResponse],
]:
r"""Return a callable for the list indexes method over gRPC.
Lists the indexes that match the specified filters.
Datastore uses an eventually consistent query to fetch
the list of indexes and may occasionally return stale
results.
Returns:
Callable[[~.ListIndexesRequest],
Awaitable[~.ListIndexesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ListIndexes",
request_serializer=datastore_admin.ListIndexesRequest.serialize,
response_deserializer=datastore_admin.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
__all__ = ("DatastoreAdminGrpcAsyncIOTransport",) | ssl_credentials=ssl_channel_credentials,
scopes=scopes or self.AUTH_SCOPES, |
mock.rs | //! Mocking Context for testing contracts
use crate::error::HostError;
use crate::storage::Storage;
use alloc::vec::Vec;
use core::cell::RefCell;
/// `MockStorage` mocks the storage for testing purpose.
pub struct MockStorage {
storage: RefCell<Vec<u8>>,
}
impl MockStorage {
/// instantiates a new mock
pub fn new(size: usize) -> Self {
let storage = RefCell::new(alloc::vec![0; size].to_vec());
Self { storage }
}
}
impl Storage for MockStorage {
fn swrite(&self, offset: u32, data: &[u8]) -> Result<(), HostError> {
if offset as usize + data.len() > self.storage.borrow().len() {
return Err(HostError { code: 1 });
} | }
Ok(())
}
fn sread(&self, offset: u32, length: u32) -> Result<Vec<u8>, HostError> {
if (offset + length) as usize > self.storage.borrow().len() {
return Err(HostError { code: 1 });
}
let c = &self.storage.borrow()[offset as usize..(offset + length) as usize];
Ok(c.into())
}
}
/// mocks the storage
pub fn mock_storage(storage_size: usize) -> MockStorage {
MockStorage::new(storage_size)
} | for (i, d) in data.iter().enumerate() {
self.storage.borrow_mut()[i + offset as usize] = *d; |
scraper.py | import requests
import urllib.request
import time
import xlwt
from bs4 import BeautifulSoup
def | (worksheet):
#Add Style for the Headers
style_text_wrap_font_bold_black_color = xlwt.easyxf('align:wrap on; font: bold on, color-index black')
col_width = 128*30
worksheet.write(0, 0, "BREED", style_text_wrap_font_bold_black_color)
worksheet.write(0, 1, "HEIGHT", style_text_wrap_font_bold_black_color)
worksheet.write(0, 2, "WEIGHT", style_text_wrap_font_bold_black_color)
worksheet.write(0, 3, "LIFE EXPECTANCY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 4, "CHARACTERISTICS", style_text_wrap_font_bold_black_color)
worksheet.write(0, 5, "GROOMING FREQUENCY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 6, "SHEDDING LEVEL", style_text_wrap_font_bold_black_color)
worksheet.write(0, 7, "ENERGY LEVEL", style_text_wrap_font_bold_black_color)
worksheet.write(0, 8, "TRAINABILITY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 9, "TEMPERAMENT/DEMEANOR", style_text_wrap_font_bold_black_color)
worksheet.col(0).width = col_width
worksheet.col(1).width = col_width
worksheet.col(2).width = col_width
worksheet.col(3).width = col_width
worksheet.col(4).width = col_width
worksheet.col(5).width = col_width
worksheet.col(6).width = col_width
worksheet.col(7).width = col_width
worksheet.col(8).width = col_width
worksheet.col(9).width = col_width
def insertDataInSheet(worksheet, currentDogCounter, dog):
breed = dog.find("div", {"id": "page-title"}).select('h1')[0].text.strip()
print(str(currentDogCounter) + " " + breed)
attributeList = dog.find("ul", {"class": "attribute-list"})
try:
characteristics = attributeList.find_all("li")[0].find("span", {"class": "attribute-list__description"}).string
except IndexError:
characteristics = "NA"
except AttributeError:
characteristics = "NA"
try:
height = attributeList.find_all("li")[2].find("span", {"class": "attribute-list__description"}).string
except IndexError:
height = "NA"
except AttributeError:
height = "NA"
try:
weight = attributeList.find_all("li")[3].find("span", {"class": "attribute-list__description"}).string
except IndexError:
weight = "NA"
except AttributeError:
weight = "NA"
try:
lifeExpancy = attributeList.find_all("li")[4].find("span", {"class": "attribute-list__description"}).string
except IndexError:
lifeExpancy = "NA"
except AttributeError:
lifeExpancy = "NA"
groomingTab = dog.find("div", {"id": "panel-GROOMING"})
try:
groomingFrequency = groomingTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
groomingFrequency = "NA"
except AttributeError:
groomingFrequency = "NA"
try:
shedding = groomingTab.find_all("div", {"class": "graph-section__inner"})[1].find("div", {"class": "bar-graph__text"}).string
except IndexError:
shedding = "NA"
except AttributeError:
shedding = "NA"
energyTab = dog.find("div", {"id": "panel-EXERCISE"})
try:
energyLevel = energyTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
energyLevel = "DOUBLE CHECK"
except AttributeError:
energyLevel = "NA"
trainingTab = dog.find("div", {"id": "panel-TRAINING"})
try:
trainability = trainingTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
trainability = "DOUBLE CHECK"
except AttributeError:
trainability = "NA"
try:
temperament = trainingTab.find_all("div", {"class": "graph-section__inner"})[1].find("div", {"class": "bar-graph__text"}).string
except IndexError:
temperament = "DOUBLE CHECK"
except AttributeError:
temperament = "NA"
worksheet.write(currentDogCounter, 0, breed)
worksheet.write(currentDogCounter, 1, height)
worksheet.write(currentDogCounter, 2, weight)
worksheet.write(currentDogCounter, 3, lifeExpancy)
worksheet.write(currentDogCounter, 4, characteristics)
worksheet.write(currentDogCounter, 5, groomingFrequency)
worksheet.write(currentDogCounter, 6, shedding)
worksheet.write(currentDogCounter, 7, energyLevel)
worksheet.write(currentDogCounter, 8, trainability)
worksheet.write(currentDogCounter, 9, temperament)
#Set Up the Excel File
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet("Dogs")
excel_file_path = "./Dog Options.xls"
addHeadersToSheet(worksheet)
currentDogCounter = 1
for i in range(24):
url = "https://www.akc.org/dog-breeds/page/" + str(i + 1)
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
topDiv = soup.find("div", {"class": "contents-grid-group"})
secondDiv = topDiv.find("div")
dogChoices = secondDiv.find_all("div", {"class": "grid-col"})
for dog in dogChoices:
href = dog.find("a").get("href")
nextResponse = requests.get(href)
dog = BeautifulSoup(nextResponse.text, "lxml")
insertDataInSheet(worksheet, currentDogCounter, dog)
currentDogCounter += 1
workbook.save(excel_file_path)
| addHeadersToSheet |
test_namedtuple.py | """Tests for the namedtuple implementation in collections_overlay.py."""
import textwrap
from pytype import file_utils
from pytype.overlays import collections_overlay
from pytype.pytd import escape
from pytype.pytd import pytd_utils
from pytype.tests import test_base
class NamedtupleTests(test_base.TargetIndependentTest):
"""Tests for collections.namedtuple."""
def _namedtuple_ast(self, name, fields):
return collections_overlay.namedtuple_ast(name, fields, self.python_version)
def _namedtuple_def(self, suffix="", **kws):
"""Generate the expected pyi for a simple namedtuple definition.
Args:
suffix: Optionally, extra text to append to the pyi.
**kws: Must contain exactly one argument of the form
alias=(name, [<fields>]). For example, to generate a definition for
X = namedtuple("_X", "y z"), the method call should be
_namedtuple_def(X=("_X", ["y", "z"])).
Returns:
The expected pyi for the namedtuple instance.
"""
(alias, (name, fields)), = kws.items() # pylint: disable=unbalanced-tuple-unpacking
name = escape.pack_namedtuple(name, fields)
suffix += textwrap.dedent("""
collections = ... # type: module
{alias} = {name}""").format(alias=alias, name=name)
return pytd_utils.Print(self._namedtuple_ast(name, fields)) + "\n" + suffix
def test_basic_namedtuple(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", ["y", "z"])
a = X(y=1, z=2)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["y", "z"]), suffix="a = ... # type: X"))
def test_no_fields(self):
ty = self.Infer("""
import collections
F = collections.namedtuple("F", [])
a = F()
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("F", []), suffix="a = ... # type: F"))
def test_str_args(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "a b c")
b = S(1, 2, 3)
""", deep=False)
self.assertTypesMatchPytd(ty, self._namedtuple_def(
S=("S", ["a", "b", "c"]), suffix="b = ... # type: S"))
def test_str_args2(self):
self.Check("""
import collections
collections.namedtuple("_", "a,b,c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a, b, c")
""")
self.Check("""
import collections
collections.namedtuple("_", "a ,b")
""")
def test_bad_fieldnames(self):
self.InferWithErrors("""
import collections
collections.namedtuple("_", ["abc", "def", "ghi"]) # invalid-namedtuple-arg
collections.namedtuple("_", "_") # invalid-namedtuple-arg
collections.namedtuple("_", "a, 1") # invalid-namedtuple-arg
collections.namedtuple("_", "a, !") # invalid-namedtuple-arg
collections.namedtuple("_", "a, b, c, a") # invalid-namedtuple-arg
collections.namedtuple("1", "") # invalid-namedtuple-arg
""")
def test_rename(self):
ty = self.Infer("""
import collections
S = collections.namedtuple("S", "abc def ghi abc", rename=True)
""", deep=False)
self.assertTypesMatchPytd(
ty, self._namedtuple_def(S=("S", ["abc", "_1", "ghi", "_3"])))
def test_bad_initialize(self):
self.InferWithErrors("""
from collections import namedtuple
X = namedtuple("X", "y z")
a = X(1) # missing-parameter
b = X(y = 2) # missing-parameter
c = X(w = 3) # wrong-keyword-args
d = X(y = "hello", z = 4j) # works
""")
def test_class_name(self):
ty = self.Infer(
"""
import collections
F = collections.namedtuple("S", ['a', 'b', 'c'])
""")
self.assertTypesMatchPytd(
ty, self._namedtuple_def(F=("S", ["a", "b", "c"])))
def test_constructors(self):
self.Check("""
import collections
X = collections.namedtuple("X", "a b c")
g = X(1, 2, 3)
i = X._make((7, 8, 9))
j = X._make((10, 11, 12), tuple.__new__, len)
""")
def test_instance_types(self):
ty = self.Infer(
"""
import collections
X = collections.namedtuple("X", "a b c")
a = X._make((1, 2, 3))
""")
self.assertTypesMatchPytd(ty, self._namedtuple_def(
X=("X", ["a", "b", "c"]), suffix="a = ... # type: X"))
def test_instantiate_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple('X', [('y', str), ('z', int)])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X() # missing-parameter[e1]
foo.X(0, "") # wrong-arg-types[e2]
foo.X(z="", y=0) # wrong-arg-types[e3]
foo.X("", 0)
foo.X(y="", z=0)
""", pythonpath=[d.path])
self.assertErrorRegexes(
errors, {"e1": r"y", "e2": r"str.*int", "e3": r"str.*int"})
def test_use_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [])): ...
""")
_, errors = self.InferWithErrors("""
import foo
foo.X()._replace()
foo.X().nonsense # attribute-error[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"nonsense.*X"})
def test_subclass_pyi_namedtuple(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class X(NamedTuple("X", [("y", int)])): ...
""")
self.Check("""
import foo
class Y(foo.X):
def __new__(cls):
return super(Y, cls).__new__(cls, 0)
Y()
""", pythonpath=[d.path])
def test_varargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
args = None # type: list
X(*args)
""")
def test_kwargs(self):
self.Check("""
import collections
X = collections.namedtuple("X", [])
kwargs = None # type: dict
X(**kwargs)
""")
def test_name_conflict(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("_", [])
Y = collections.namedtuple("_", [])
Z = collections.namedtuple("_", "a")
""", deep=False)
name_x = escape.pack_namedtuple("_", [])
name_z = escape.pack_namedtuple("_", ["a"])
ast_x = self._namedtuple_ast(name_x, [])
ast_z = self._namedtuple_ast(name_z, ["a"])
ast = pytd_utils.Concat(ast_x, ast_z)
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
X = {name_x}
Y = {name_x}
Z = {name_z}""").format(name_x=name_x, name_z=name_z)
self.assertTypesMatchPytd(ty, expected)
def test_subclass(self):
ty = self.Infer("""
import collections
class X(collections.namedtuple("X", [])):
def __new__(cls, _):
return super(X, cls).__new__(cls)
""")
name = escape.pack_namedtuple("X", [])
ast = self._namedtuple_ast(name, [])
expected = pytd_utils.Print(ast) + textwrap.dedent("""
collections = ... # type: module
_TX = TypeVar("_TX", bound=X)
class X({name}):
def __new__(cls: Type[_TX], _) -> _TX: ...""").format(name=name)
self.assertTypesMatchPytd(ty, expected)
def test_subclass_replace(self):
|
def test_subclass_make(self):
ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y._make([1])
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y")
def test_unpacking(self):
with file_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import NamedTuple
X = NamedTuple("X", [('a', str), ('b', int)])
""")
ty = self.Infer("""
import foo
v = None # type: foo.X
a, b = v
""", deep=False, pythonpath=[d.path])
self.assertTypesMatchPytd(ty, """
foo = ... # type: module
v = ... # type: foo.namedtuple_X_0
a = ... # type: str
b = ... # type: int
""")
test_base.main(globals(), __name__ == "__main__")
| ty = self.Infer("""
import collections
X = collections.namedtuple("X", "a")
class Y(X): pass
z = Y(1)._replace(a=2)
""")
self.assertEqual(pytd_utils.Print(ty.Lookup("z")), "z: Y") |
frame_byte_cnt0_3.rs | #[doc = "Register `frame_byte_cnt0_3` reader"]
pub struct R(crate::R<FRAME_BYTE_CNT0_3_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<FRAME_BYTE_CNT0_3_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<FRAME_BYTE_CNT0_3_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<FRAME_BYTE_CNT0_3_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `frame_byte_cnt0_3` writer"]
pub struct W(crate::W<FRAME_BYTE_CNT0_3_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<FRAME_BYTE_CNT0_3_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<FRAME_BYTE_CNT0_3_SPEC>> for W {
#[inline(always)] | fn from(writer: crate::W<FRAME_BYTE_CNT0_3_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `frame_byte_cnt_0_3` reader - "]
pub struct FRAME_BYTE_CNT_0_3_R(crate::FieldReader<u32, u32>);
impl FRAME_BYTE_CNT_0_3_R {
#[inline(always)]
pub(crate) fn new(bits: u32) -> Self {
FRAME_BYTE_CNT_0_3_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for FRAME_BYTE_CNT_0_3_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `frame_byte_cnt_0_3` writer - "]
pub struct FRAME_BYTE_CNT_0_3_W<'a> {
w: &'a mut W,
}
impl<'a> FRAME_BYTE_CNT_0_3_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = value;
self.w
}
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn frame_byte_cnt_0_3(&self) -> FRAME_BYTE_CNT_0_3_R {
FRAME_BYTE_CNT_0_3_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn frame_byte_cnt_0_3(&mut self) -> FRAME_BYTE_CNT_0_3_W {
FRAME_BYTE_CNT_0_3_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "frame_byte_cnt0_3.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [frame_byte_cnt0_3](index.html) module"]
pub struct FRAME_BYTE_CNT0_3_SPEC;
impl crate::RegisterSpec for FRAME_BYTE_CNT0_3_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [frame_byte_cnt0_3::R](R) reader structure"]
impl crate::Readable for FRAME_BYTE_CNT0_3_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [frame_byte_cnt0_3::W](W) writer structure"]
impl crate::Writable for FRAME_BYTE_CNT0_3_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets frame_byte_cnt0_3 to value 0"]
impl crate::Resettable for FRAME_BYTE_CNT0_3_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | |
__init__.py | from ..de import Provider as AddressProvider
class Provider(AddressProvider):
|
street_name_formats = (
'{{first_name}}-{{last_name}}-{{street_suffix_long}}',
'{{last_name}}{{street_suffix_short}}',
)
street_address_formats = ('{{street_name}} {{building_number}}', )
address_formats = ('{{street_address}}\n{{postcode}} {{city}}', )
building_number_formats = ('###', '##', '#', '#/#')
street_suffixes_long = (
'Gasse', 'Platz', 'Ring', 'Straße', 'Weg', 'Allee',
)
street_suffixes_short = (
'gasse', 'platz', 'ring', 'straße', 'str.', 'weg', 'allee',
)
postcode_formats = ('#####', )
cities = (
'Aachen', 'Ahaus', 'Altentreptow', 'Altötting', 'Amberg', 'Angermünde',
'Anklam', 'Ansbach', 'Apolda', 'Arnstadt', 'Artern', 'Aschaffenburg',
'Aue', 'Auerbach', 'Augsburg', 'Aurich', 'Backnang', 'Bad Brückenau',
'Bad Freienwalde', 'Bad Kissingen', 'Bad Kreuznach', 'Bad Langensalza',
'Bad Liebenwerda', 'Bad Mergentheim', 'Badalzungen', 'Badibling',
'Badoberan', 'Bamberg', 'Bautzen', 'Bayreuth', 'Beeskow', 'Beilngries',
'Belzig', 'Berchtesgaden', 'Bergzabern', 'Berlin', 'Bernburg',
'Bersenbrück', 'Biedenkopf', 'Bischofswerda', 'Bitterfeld', 'Bogen',
'Borken', 'Borna', 'Brand', 'Brandenburg', 'Bremen', 'Bremervörde',
'Brilon', 'Bruchsal', 'Burg', 'Burgdorf', 'Burglengenfeld',
'Böblingen', 'Büsingenm Hochrhein', 'Bützow', 'Calau', 'Calw', 'Celle',
'Chemnitz', 'Cloppenburg', 'Coburg', 'Cottbus', 'Crailsheim',
'Cuxhaven', 'Dachau', 'Darmstadt', 'Deggendorf', 'Delitzsch', 'Demmin',
'Dessau', 'Dieburg', 'Diepholz', 'Dinkelsbühl', 'Dinslaken',
'Donaueschingen', 'Dresden', 'Duderstadt', 'Döbeln', 'Düren',
'Ebermannstadt', 'Ebern', 'Ebersberg', 'Eberswalde', 'Eckernförde',
'Eggenfelden', 'Eichstätt', 'Eichstätt', 'Eilenburg', 'Einbeck',
'Eisenach', 'Eisenberg', 'Eisenhüttenstadt', 'Eisleben', 'Emmendingen',
'Erbisdorf', 'Erding', 'Erfurt', 'Erkelenz', 'Euskirchen', 'Eutin',
'Fallingbostel', 'Feuchtwangen', 'Finsterwalde', 'Flöha', 'Forchheim',
'Forst', 'Freising', 'Freital', 'Freudenstadt', 'Fulda',
'Fürstenfeldbruck', 'Fürstenwalde', 'Füssen', 'Gadebusch',
'Gardelegen', 'Garmisch-Partenkirchen', 'Geithain', 'Geldern',
'Gelnhausen', 'Genthin', 'Gera', 'Germersheim', 'Gerolzhofen',
'Gießen', 'Gifhorn', 'Goslar', 'Gotha', 'Grafenau', 'Gransee',
'Greifswald', 'Greiz', 'Grevenbroich', 'Grevesmühlen',
'Griesbach Rottal', 'Grimma', 'Grimmen', 'Groß-Gerau', 'Großenhain',
'Gräfenhainichen', 'Guben', 'Gunzenhausen', 'Göppingen', 'Görlitz',
'Göttingen', 'Günzburg', 'Güstrow', 'Gütersloh', 'Hagenow',
'Hainichen', 'Halberstadt', 'Haldensleben', 'Hamburg', 'Hammelburg',
'Hannover', 'Hannoversch Münden', 'Hansestadttralsund', 'Havelberg',
'Hechingen', 'Heiligenstadt', 'Heinsberg', 'Helmstedt', 'Herford',
'Hersbruck', 'Herzberg', 'Hettstedt', 'Hildburghausen', 'Hildesheim',
'Hofgeismar', 'Hohenmölsen', 'Hohenstein-Ernstthal', 'Holzminden',
'Hoyerswerda', 'Husum', 'Höxter', 'Hünfeld', 'Illertissen', 'Ilmenau',
'Ingolstadt', 'Iserlohn', 'Jena', 'Jessen', 'Jülich', 'Jüterbog',
'Kaiserslautern', 'Kamenz', 'Karlsruhe', 'Kassel', 'Kehl', 'Kelheim',
'Kemnath', 'Kitzingen', 'Kleve', 'Klötze', 'Koblenz', 'Konstanz',
'Kronach', 'Kulmbach', 'Kusel', 'Kyritz', 'Königs Wusterhausen',
'Kötzting', 'Leipziger Land', 'Lemgo', 'Lichtenfels', 'Lippstadt',
'Lobenstein', 'Luckau', 'Luckenwalde', 'Ludwigsburg', 'Ludwigslust',
'Lörrach', 'Lübben', 'Lübeck', 'Lübz', 'Lüdenscheid', 'Lüdinghausen',
'Lüneburg', 'Magdeburg', 'Main-Höchst', 'Mainburg', 'Malchin',
'Mallersdorf', 'Marienberg', 'Marktheidenfeld', 'Mayen', 'Meiningen',
'Meißen', 'Melle', 'Mellrichstadt', 'Melsungen', 'Meppen', 'Merseburg',
'Mettmann', 'Miesbach', 'Miltenberg', 'Mittweida', 'Moers', 'Monschau',
'Mühldorfm Inn', 'Mühlhausen', 'München', 'Nabburg', 'Naila', 'Nauen',
'Neu-Ulm', 'Neubrandenburg', 'Neunburg vorm Wald', 'Neuruppin',
'Neuss', 'Neustadtm Rübenberge', 'Neustadtner Waldnaab', 'Neustrelitz',
'Niesky', 'Norden', 'Nordhausen', 'Northeim', 'Nördlingen',
'Nürtingen', 'Oberviechtach', 'Ochsenfurt', 'Olpe', 'Oranienburg',
'Oschatz', 'Osterburg', 'Osterodem Harz', 'Paderborn', 'Parchim',
'Parsberg', 'Pasewalk', 'Passau', 'Pegnitz', 'Peine', 'Perleberg',
'Pfaffenhofenner Ilm', 'Pinneberg', 'Pirmasens', 'Plauen', 'Potsdam',
'Prenzlau', 'Pritzwalk', 'Pößneck', 'Quedlinburg', 'Querfurt',
'Rastatt', 'Rathenow', 'Ravensburg', 'Recklinghausen', 'Regen',
'Regensburg', 'Rehau', 'Reutlingen', 'Ribnitz-Damgarten', 'Riesa',
'Rochlitz', 'Rockenhausen', 'Roding', 'Rosenheim', 'Rostock', 'Roth',
'Rothenburg oberauber', 'Rottweil', 'Rudolstadt', 'Saarbrücken',
'Saarlouis', 'Sangerhausen', 'Sankt Goar', 'Sankt Goarshausen',
'Saulgau', 'Scheinfeld', 'Schleiz', 'Schlüchtern', 'Schmölln',
'Schongau', 'Schrobenhausen', 'Schwabmünchen', 'Schwandorf',
'Schwarzenberg', 'Schweinfurt', 'Schwerin', 'Schwäbisch Gmünd',
'Schwäbisch Hall', 'Sebnitz', 'Seelow', 'Senftenberg', 'Siegen',
'Sigmaringen', 'Soest', 'Soltau', 'Soltau', 'Sondershausen',
'Sonneberg', 'Spremberg', 'Stade', 'Stade', 'Stadtroda',
'Stadtsteinach', 'Staffelstein', 'Starnberg', 'Staßfurt', 'Steinfurt',
'Stendal', 'Sternberg', 'Stollberg', 'Strasburg', 'Strausberg',
'Stuttgart', 'Suhl', 'Sulzbach-Rosenberg', 'Säckingen', 'Sömmerda',
'Tecklenburg', 'Teterow', 'Tirschenreuth', 'Torgau', 'Tuttlingen',
'Tübingen', 'Ueckermünde', 'Uelzen', 'Uffenheim', 'Vechta',
'Viechtach', 'Viersen', 'Vilsbiburg', 'Vohenstrauß', 'Waldmünchen',
'Wanzleben', 'Waren', 'Warendorf', 'Weimar', 'Weißenfels',
'Weißwasser', 'Werdau', 'Wernigerode', 'Wertingen', 'Wesel', 'Wetzlar',
'Wiedenbrück', 'Wismar', 'Wittenberg', 'Wittmund', 'Wittstock',
'Witzenhausen', 'Wolfach', 'Wolfenbüttel', 'Wolfratshausen', 'Wolgast',
'Wolmirstedt', 'Worbis', 'Wunsiedel', 'Wurzen', 'Zerbst', 'Zeulenroda',
'Zossen', 'Zschopau',
)
states = (
'Baden-Württemberg', 'Bayern', 'Berlin', 'Brandenburg', 'Bremen',
'Hamburg', 'Hessen', 'Mecklenburg-Vorpommern', 'Niedersachsen',
'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Sachsen',
'Sachsen-Anhalt', 'Schleswig-Holstein', 'Thüringen',
)
def street_suffix_short(self):
return self.random_element(self.street_suffixes_short)
def street_suffix_long(self):
return self.random_element(self.street_suffixes_long)
def city_name(self):
return self.random_element(self.cities)
def state(self):
return self.random_element(self.states)
def city_with_postcode(self):
pattern = self.random_element(self.city_with_postcode_formats)
return self.generator.parse(pattern) | city_formats = ('{{city_name}}', )
city_with_postcode_formats = ('{{postcode}} {{city}}', ) |
models.rs | #![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[doc = "Represents an instance of an Analysis Services resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AnalysisServicesServer {
#[serde(flatten)]
pub resource: Resource,
#[doc = "Properties of Analysis Services resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AnalysisServicesServerProperties>,
}
impl AnalysisServicesServer {
pub fn new(resource: Resource) -> Self {
Self {
resource,
properties: None,
}
}
}
#[doc = "An object that represents a set of mutable Analysis Services resource properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AnalysisServicesServerMutableProperties {
#[doc = "An array of administrator user identities."]
#[serde(rename = "asAdministrators", default, skip_serializing_if = "Option::is_none")]
pub as_administrators: Option<ServerAdministrators>,
#[doc = "The SAS container URI to the backup container."]
#[serde(rename = "backupBlobContainerUri", default, skip_serializing_if = "Option::is_none")]
pub backup_blob_container_uri: Option<String>,
#[doc = "The gateway details."]
#[serde(rename = "gatewayDetails", default, skip_serializing_if = "Option::is_none")]
pub gateway_details: Option<GatewayDetails>,
#[doc = "An array of firewall rules."]
#[serde(rename = "ipV4FirewallSettings", default, skip_serializing_if = "Option::is_none")]
pub ip_v4_firewall_settings: Option<IPv4FirewallSettings>,
#[doc = "How the read-write server's participation in the query pool is controlled.<br/>It can have the following values: <ul><li>readOnly - indicates that the read-write server is intended not to participate in query operations</li><li>all - indicates that the read-write server can participate in query operations</li></ul>Specifying readOnly when capacity is 1 results in error."]
#[serde(rename = "querypoolConnectionMode", default, skip_serializing_if = "Option::is_none")]
pub querypool_connection_mode: Option<analysis_services_server_mutable_properties::QuerypoolConnectionMode>,
#[doc = "The managed mode of the server (0 = not managed, 1 = managed)."]
#[serde(rename = "managedMode", default, skip_serializing_if = "Option::is_none")]
pub managed_mode: Option<analysis_services_server_mutable_properties::ManagedMode>,
#[doc = "The server monitor mode for AS server"]
#[serde(rename = "serverMonitorMode", default, skip_serializing_if = "Option::is_none")]
pub server_monitor_mode: Option<analysis_services_server_mutable_properties::ServerMonitorMode>,
}
impl AnalysisServicesServerMutableProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod analysis_services_server_mutable_properties {
use super::*;
#[doc = "How the read-write server's participation in the query pool is controlled.<br/>It can have the following values: <ul><li>readOnly - indicates that the read-write server is intended not to participate in query operations</li><li>all - indicates that the read-write server can participate in query operations</li></ul>Specifying readOnly when capacity is 1 results in error."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum QuerypoolConnectionMode {
All,
ReadOnly,
}
impl Default for QuerypoolConnectionMode {
fn default() -> Self {
Self::All
}
}
#[doc = "The managed mode of the server (0 = not managed, 1 = managed)."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ManagedMode {}
#[doc = "The server monitor mode for AS server"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServerMonitorMode {}
}
#[doc = "Properties of Analysis Services resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AnalysisServicesServerProperties {
#[serde(flatten)]
pub analysis_services_server_mutable_properties: AnalysisServicesServerMutableProperties,
#[doc = "The current state of Analysis Services resource. The state is to indicate more states outside of resource provisioning."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<analysis_services_server_properties::State>,
#[doc = "The current deployment state of Analysis Services resource. The provisioningState is to indicate states for resource provisioning."]
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<analysis_services_server_properties::ProvisioningState>,
#[doc = "The full name of the Analysis Services resource."]
#[serde(rename = "serverFullName", default, skip_serializing_if = "Option::is_none")]
pub server_full_name: Option<String>,
#[doc = "Represents the SKU name and Azure pricing tier for Analysis Services resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ResourceSku>,
}
impl AnalysisServicesServerProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod analysis_services_server_properties {
use super::*;
#[doc = "The current state of Analysis Services resource. The state is to indicate more states outside of resource provisioning."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Deleting,
Succeeded,
Failed,
Paused,
Suspended,
Provisioning,
Updating,
Suspending,
Pausing,
Resuming,
Preparing,
Scaling,
}
#[doc = "The current deployment state of Analysis Services resource. The provisioningState is to indicate states for resource provisioning."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Deleting,
Succeeded,
Failed,
Paused,
Suspended,
Provisioning,
Updating,
Suspending,
Pausing,
Resuming,
Preparing,
Scaling,
}
}
#[doc = "Provision request specification"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AnalysisServicesServerUpdateParameters {
#[doc = "Represents the SKU name and Azure pricing tier for Analysis Services resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ResourceSku>,
#[doc = "Key-value pairs of additional provisioning properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[doc = "An object that represents a set of mutable Analysis Services resource properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AnalysisServicesServerMutableProperties>,
}
impl AnalysisServicesServerUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An array of Analysis Services resources."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AnalysisServicesServers {
#[doc = "An array of Analysis Services resources."]
pub value: Vec<AnalysisServicesServer>,
}
impl AnalysisServicesServers {
pub fn new(value: Vec<AnalysisServicesServer>) -> Self {
Self { value }
}
}
#[doc = "Details of server name request body."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CheckServerNameAvailabilityParameters {
#[doc = "Name for checking availability."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The resource type of azure analysis services."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
impl CheckServerNameAvailabilityParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The checking result of server name availability."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CheckServerNameAvailabilityResult {
#[doc = "Indicator of available of the server name."]
#[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")]
pub name_available: Option<bool>,
#[doc = "The reason of unavailability."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
#[doc = "The detailed message of the request unavailability."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl CheckServerNameAvailabilityResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The resource management error additional info."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorAdditionalInfo {
#[doc = "The additional info type."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "The additional info."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
impl ErrorAdditionalInfo {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The error detail."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorDetail {
#[doc = "The error code."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "The error message."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[doc = "The error target."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[doc = "The error details."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
#[doc = "The error additional info."]
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
impl ErrorDetail {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Describes the format of Error response."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorResponse {
#[doc = "The error object"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<error_response::Error>,
}
impl ErrorResponse {
pub fn new() -> Self {
Self::default()
}
}
pub mod error_response {
use super::*;
#[doc = "The error object"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Error {
#[doc = "Error code"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "Error message indicating why the operation failed."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[doc = "The error details."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
}
impl Error {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "The gateway details."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct GatewayDetails {
#[doc = "Gateway resource to be associated with the server."]
#[serde(rename = "gatewayResourceId", default, skip_serializing_if = "Option::is_none")]
pub gateway_resource_id: Option<String>,
#[doc = "Gateway object id from in the DMTS cluster for the gateway resource."]
#[serde(rename = "gatewayObjectId", default, skip_serializing_if = "Option::is_none")]
pub gateway_object_id: Option<String>,
#[doc = "Uri of the DMTS cluster."]
#[serde(rename = "dmtsClusterUri", default, skip_serializing_if = "Option::is_none")]
pub dmts_cluster_uri: Option<String>,
}
impl GatewayDetails {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Detail of gateway errors."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct GatewayError {
#[doc = "Error code of list gateway."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "Error message of list gateway."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl GatewayError {
pub fn new() -> Self |
}
#[doc = "Status of gateway is error."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct GatewayListStatusError {
#[doc = "Detail of gateway errors."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<GatewayError>,
}
impl GatewayListStatusError {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Status of gateway is live."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct GatewayListStatusLive {
#[doc = "Live message of list gateway. Status: 0 - Live"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<gateway_list_status_live::Status>,
}
impl GatewayListStatusLive {
pub fn new() -> Self {
Self::default()
}
}
pub mod gateway_list_status_live {
use super::*;
#[doc = "Live message of list gateway. Status: 0 - Live"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {}
}
#[doc = "The detail of firewall rule."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct IPv4FirewallRule {
#[doc = "The rule name."]
#[serde(rename = "firewallRuleName", default, skip_serializing_if = "Option::is_none")]
pub firewall_rule_name: Option<String>,
#[doc = "The start range of IPv4."]
#[serde(rename = "rangeStart", default, skip_serializing_if = "Option::is_none")]
pub range_start: Option<String>,
#[doc = "The end range of IPv4."]
#[serde(rename = "rangeEnd", default, skip_serializing_if = "Option::is_none")]
pub range_end: Option<String>,
}
impl IPv4FirewallRule {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An array of firewall rules."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct IPv4FirewallSettings {
#[doc = "An array of firewall rules."]
#[serde(rename = "firewallRules", default, skip_serializing_if = "Vec::is_empty")]
pub firewall_rules: Vec<IPv4FirewallRule>,
#[doc = "The indicator of enabling PBI service."]
#[serde(rename = "enablePowerBIService", default, skip_serializing_if = "Option::is_none")]
pub enable_power_bi_service: Option<bool>,
}
impl IPv4FirewallSettings {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A Consumption REST API operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Operation {
#[doc = "Operation name: {provider}/{resource}/{operation}."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The object that represents the operation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[doc = "The origin"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
}
impl Operation {
pub fn new() -> Self {
Self::default()
}
}
pub mod operation {
use super::*;
#[doc = "The object that represents the operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Display {
#[doc = "Service provider: Microsoft.Consumption."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[doc = "Resource on which the operation is performed: UsageDetail, etc."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[doc = "Operation type: Read, write, delete, etc."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[doc = "Description of the operation object."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
impl Display {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Result of listing consumption operations. It contains a list of operations and a URL link to get the next set of results."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationListResult {
#[doc = "List of analysis services operations supported by the Microsoft.AnalysisServices resource provider."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[doc = "URL to get the next set of operation list results if there are any."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl OperationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The status of operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationStatus {
#[doc = "The operation Id."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "The operation name."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The start time of the operation."]
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[doc = "The end time of the operation."]
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[doc = "The status of the operation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
#[doc = "Describes the format of Error response."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponse>,
}
impl OperationStatus {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Represents an instance of an Analysis Services resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[doc = "An identifier that represents the Analysis Services resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "The name of the Analysis Services resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The type of the Analysis Services resource."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "Location of the Analysis Services resource."]
pub location: String,
#[doc = "Represents the SKU name and Azure pricing tier for Analysis Services resource."]
pub sku: ResourceSku,
#[doc = "Key-value pairs of additional resource provisioning properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
impl Resource {
pub fn new(location: String, sku: ResourceSku) -> Self {
Self {
id: None,
name: None,
type_: None,
location,
sku,
tags: None,
}
}
}
#[doc = "Represents the SKU name and Azure pricing tier for Analysis Services resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSku {
#[doc = "Name of the SKU level."]
pub name: String,
#[doc = "The name of the Azure pricing tier to which the SKU applies."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<resource_sku::Tier>,
#[doc = "The number of instances in the read only query pool."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i32>,
}
impl ResourceSku {
pub fn new(name: String) -> Self {
Self {
name,
tier: None,
capacity: None,
}
}
}
pub mod resource_sku {
use super::*;
#[doc = "The name of the Azure pricing tier to which the SKU applies."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Development,
Basic,
Standard,
}
}
#[doc = "An array of administrator user identities."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ServerAdministrators {
#[doc = "An array of administrator user identities."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub members: Vec<String>,
}
impl ServerAdministrators {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents SKU details for existing resources."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SkuDetailsForExistingResource {
#[doc = "Represents the SKU name and Azure pricing tier for Analysis Services resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ResourceSku>,
}
impl SkuDetailsForExistingResource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents enumerating SKUs for existing resources."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SkuEnumerationForExistingResourceResult {
#[doc = "The collection of available SKUs for existing resources."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SkuDetailsForExistingResource>,
}
impl SkuEnumerationForExistingResourceResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents enumerating SKUs for new resources."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SkuEnumerationForNewResourceResult {
#[doc = "The collection of available SKUs for new resources."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ResourceSku>,
}
impl SkuEnumerationForNewResourceResult {
pub fn new() -> Self {
Self::default()
}
}
| {
Self::default()
} |
spawner.rs | use bevy::prelude::*;
use rand::prelude::*;
use crate::enemy::*;
#[derive(Debug, PartialEq)]
pub struct SpawnData {
wave: i32,
min_enemies: usize,
max_enemies: usize,
time_passed: f32,
interval: f32,
enemy_count: usize,
}
impl Default for SpawnData {
fn default() -> SpawnData {
SpawnData {
wave: 0,
min_enemies: 1,
max_enemies: 3,
time_passed: 0.,
interval: 6.,
enemy_count: 0,
}
}
} |
pub fn spawn_startup (
mut commands: Commands,
) {
commands.insert_resource(SpawnData::default());
}
pub fn spawn_enemies_constant(
time: Res<Time>,
mut spawn_data: ResMut<SpawnData>,
mut commands: Commands,
) {
spawn_data.time_passed += time.delta_seconds();
if spawn_data.time_passed < spawn_data.interval {
return;
}
if spawn_data.enemy_count > 90 {
return;
}
let mut rng = rand::thread_rng();
let max = rng.gen_range(spawn_data.min_enemies..spawn_data.max_enemies);
for _ in 0..max {
let x = rng.gen_range(-320.0..320.0);
let y = rng.gen_range(-240.0..240.0);
enemy_spawn(&mut commands, Transform {
translation: Vec3::new(x, y, 1.),
..Default::default()
});
spawn_data.enemy_count += 1;
}
spawn_data.time_passed = 0.;
spawn_data.wave += 1;
spawn_data.min_enemies = (spawn_data.wave / 4) as usize + 1;
spawn_data.max_enemies = (spawn_data.wave / 2) as usize + 3;
}
pub fn enemy_removed (
query: RemovedComponents<Enemy>,
mut spawn_data: ResMut<SpawnData>,
) {
for _ in query.iter() {
if spawn_data.enemy_count > 1 {
spawn_data.enemy_count -= 1;
}
}
} | |
hypervisors.py | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _
from nova import servicegroup
ALIAS = "os-hypervisors"
authorize = extensions.os_compute_authorizer(ALIAS)
class HypervisorsController(wsgi.Controller):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
self.host_api = compute.HostAPI()
self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, service, detail, servers=None,
**kwargs):
alive = self.servicegroup_api.service_is_up(service)
hyp_dict = {
'id': hypervisor.id,
'hypervisor_hostname': hypervisor.hypervisor_hostname,
'state': 'up' if alive else 'down',
'status': ('disabled' if service.disabled
else 'enabled'),
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least',
'host_ip'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': service.id,
'host': hypervisor.host,
'disabled_reason': service.disabled_reason,
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
False)
for hyp in compute_nodes])
@extensions.expected_errors(())
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
req.cache_db_compute_nodes(compute_nodes)
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host(
context, hyp.host),
True)
for hyp in compute_nodes])
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
service = self.host_api.service_get_by_compute_host(
context, hyp.host)
return dict(hypervisor=self._view_hypervisor(hyp, service, True))
@extensions.expected_errors((404, 501))
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
req.cache_db_compute_node(hyp)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp.host
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
service = self.host_api.service_get_by_compute_host(context, host)
return dict(hypervisor=self._view_hypervisor(hyp, service, False,
uptime=uptime))
@extensions.expected_errors(404)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(
hyp,
self.host_api.service_get_by_compute_host( | else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@extensions.expected_errors(404)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node.host)
service = self.host_api.service_get_by_compute_host(
context, compute_node.host)
hyp = self._view_hypervisor(compute_node, service, False,
instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@extensions.expected_errors(())
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.V3APIExtensionBase):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
def get_controller_extensions(self):
return [] | context, hyp.host),
False)
for hyp in hypervisors]) |
client.go | package redis
import (
"context"
"errors"
"sync"
"time"
"github.com/cenkalti/backoff"
"github.com/go-redis/redis/v8"
"go.uber.org/zap"
"github.com/geometry-labs/icon-logs/config"
)
type Client struct {
client *redis.Client
pubsub *redis.PubSub
}
var redisClient *Client
var redisClientOnce sync.Once
func GetRedisClient() *Client {
redisClientOnce.Do(func() {
addr := config.Config.RedisHost + ":" + config.Config.RedisPort
retryOperation := func() error {
redisClient = new(Client)
// Init connection
if config.Config.RedisSentinelClientMode == false {
// Use default client
redisClient.client = redis.NewClient(&redis.Options{
Addr: addr,
Password: config.Config.RedisPassword,
DB: 0,
})
} else {
// Use sentinel client
redisClient.client = redis.NewFailoverClient(&redis.FailoverOptions{
MasterName: config.Config.RedisSentinelClientMasterName,
SentinelAddrs: []string{addr},
})
}
if redisClient.client == nil {
zap.S().Warn("RedisClient: Unable to create to redis client")
return errors.New("RedisClient: Unable to create to redis client")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Test connection
_, err := redisClient.client.Ping(ctx).Result()
if err != nil |
// Init pubsub
redisClient.pubsub = redisClient.client.Subscribe(ctx, config.Config.RedisChannel)
// Test pubsub
_, err = redisClient.pubsub.Receive(ctx)
if err != nil {
zap.S().Warn("RedisClient: Unable to create pubsub channel")
return err
}
return nil
}
err := backoff.Retry(retryOperation, backoff.NewConstantBackOff(time.Second*3))
if err != nil {
zap.S().Fatal("RedisClient: Could not connect to redis service ERROR=", err.Error())
}
})
return redisClient
}
| {
zap.S().Warn("RedisClient: Unable to connect to redis", err.Error())
return err
} |
customer.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { CustomerRoutingModule } from './customer-routing.module';
import { CustomerComponent } from './customer.component';
import { FormsModule, ReactiveFormsModule } from '@angular/forms';
import { HttpClientModule, HTTP_INTERCEPTORS } from '@angular/common/http';
import { MyInterceptor } from '../Utility/Service';
import { ReverseString } from '../Utility/Pipes';
import { IndianCurrency } from '../Utility/Currency.Pipes';
@NgModule({
declarations: [CustomerComponent, ReverseString, IndianCurrency],
imports: [
CommonModule,
CustomerRoutingModule, FormsModule, ReactiveFormsModule, HttpClientModule
],
providers: [// For service
{ provide: HTTP_INTERCEPTORS, useClass: MyInterceptor, multi: true }]
})
export class | { }
| CustomerModule |
index.js | // Copyright (c) Jupyter Development Team.
// Distributed under the terms of the Modified BSD License.
export * from './blueprint'; | export * from './icon';
export * from './utils';
//# sourceMappingURL=index.js.map |
|
urls.py | from django.urls import include, path
from rest_framework import routers
from profiles_api import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('login/', views.UserLoginApiView.as_view()),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')), | ] |
|
complexplanet.rs | extern crate noise;
use noise::{utils::*, *};
/// This example demonstrates how to use the noise-rs library to generate
/// terrain elevations for a complex planetary surface.
///
/// The terrain elevations are generated by a collection of over a hundred
/// noise functions in a hierarchy of groups and subgroups. Each group and
/// subgroup outputs a single output value that originates from a caching
/// module (`noise::modules::Cache`). Each group and subgroup can be thought of
/// as a single complex noise function that can be used as a source function for
/// other noise functions. The caching module was chosen as the source of the
/// output value to prevent costly recalculations by each group and subgroup
/// requesting an output value from it.
///
/// The following is a list of module groups and subgroups that build the
/// planet's terrain:
///
/// 1. Group (continent definition)
/// * Subgroup (base continent definition)
/// * Subgroup (continent definition)
/// 2. Group (terrain type definition)
/// * Subgroup (terrain type definition)
/// 3. Group (mountainous terrain)
/// * Subgroup (mountain base definition)
/// * Subgroup (high mountainous terrain)
/// * Subgroup (low mountainous terrain)
/// * Subgroup (mountainous terrain)
/// 4. Group (hilly terrain)
/// * Subgroup (hilly terrain)
/// 5. Group (plains terrain)
/// * Subgroup (plains terrain)
/// 6. Group (badlands terrain)
/// * Subgroup (badlands sand)
/// * Subgroup (badlands cliffs)
/// * Subgroup (badlands terrain)
/// 7. Group (river positions)
/// * Subgroup (river positions)
/// 8. Group (scaled mountainous terrain)
/// * Subgroup (scaled mountainous terrain)
/// 9. Group (scaled hilly terrain)
/// * Subgroup (scaled hilly terrain)
/// 10. Group (scaled plains terrain)
/// * Subgroup (scaled plains terrain)
/// 11. Group (scaled badlands terrain)
/// * Subgroup (scaled badlands terrain)
/// 12. Group (final planet)
/// * Subgroup (continental shelf)
/// * Subgroup (base continent elevation)
/// * Subgroup (continents with plains)
/// * Subgroup (continent with hills)
/// * Subgroup (continents with mountains)
/// * Subgroup (continents with badlands)
/// * Subgroup (continents with rivers)
/// * Subgroup (unscaled final planet)
/// * Subgroup (final planet)
///
/// A description for each group and subgroup can be found above the source
/// code for that group and subgroup.
#[allow(non_snake_case)]
fn | () {
/// Planet seed. Change this to generate a different planet.
const CURRENT_SEED: u32 = 0;
/// Frequency of the planet's continents. Higher frequency produces
/// smaller, more numerous continents. This value is measured in radians.
const CONTINENT_FREQUENCY: f64 = 1.0;
/// Lacunarity of the planet's continents. Changing this value produces
/// slightly different continents. For the best results, this value should
/// be random, but close to 2.0.
const CONTINENT_LACUNARITY: f64 = 2.208984375;
/// Lacunarity of the planet's mountains. Changing the value produces
/// slightly different mountains. For the best results, this value should
/// be random, but close to 2.0.
const MOUNTAIN_LACUNARITY: f64 = 2.142578125;
/// Lacunarity of the planet's hills. Changing this value produces
/// slightly different hills. For the best results, this value should be
/// random, but close to 2.0.
const HILLS_LACUNARITY: f64 = 2.162109375;
/// Lacunarity of the planet's plains. Changing this value produces
/// slightly different plains. For the best results, this value should be
/// random, but close to 2.0.
const PLAINS_LACUNARITY: f64 = 2.314453125;
/// Lacunarity of the planet's badlands. Changing this value produces
/// slightly different badlands. For the best results, this value should
/// be random, but close to 2.0.
const BADLANDS_LACUNARITY: f64 = 2.212890625;
/// Specifies the "twistiness" of the mountains.
const MOUNTAINS_TWIST: f64 = 1.0;
/// Specifies the "twistiness" of the hills.
const HILLS_TWIST: f64 = 1.0;
/// Specifies the "twistiness" of the badlands.
const BADLANDS_TWIST: f64 = 1.0;
/// Specifies the planet's sea level. This value must be between -1.0
/// (minimum planet elevation) and +1.0 (maximum planet elevation).
const SEA_LEVEL: f64 = 0.0;
/// Specifies the level on the planet in which continental shelves appear.
/// This value must be between -1.0 (minimum planet elevation) and +1.0
/// (maximum planet elevation), and must be less than `SEA_LEVEL`.
const SHELF_LEVEL: f64 = -0.375;
/// Determines the amount of mountainous terrain that appears on the
/// planet. Values range from 0.0 (no mountains) to 1.0 (all terrain is
/// covered in mountains). Mountains terrain will overlap hilly terrain.
/// Because the badlands terrain may overlap parts of the mountainous
/// terrain, setting `MOUNTAINS_AMOUNT` to 1.0 may not completely cover the
/// terrain in mountains.
const MOUNTAINS_AMOUNT: f64 = 0.5;
/// Determines the amount of hilly terrain that appears on the planet.
/// Values range from 0.0 (no hills) to 1.0 (all terrain is covered in
/// hills). This value must be less than `MOUNTAINS_AMOUNT`. Because the
/// mountains terrain will overlap parts of the hilly terrain, and the
/// badlands terrain may overlap parts of the hilly terrain, setting
/// `HILLS_AMOUNT` to 1.0 may not completely cover the terrain in hills.
const HILLS_AMOUNT: f64 = (1.0 + MOUNTAINS_AMOUNT) / 2.0;
/// Determines the amount of badlands terrain that covers the planet.
/// Values range from 0.0 (no badlands) to 1.0 (all terrain is covered in
/// badlands). Badlands terrain will overlap any other type of terrain.
const BADLANDS_AMOUNT: f64 = 0.3125;
/// Offset to apply to the terrain type definition. Low values (< 1.0)
/// cause the rough areas to appear only at high elevations. High values
/// (> 2.0) cause the rough areas to appear at any elevation. The
/// percentage of rough areas on the planet are independent of this value.
const TERRAIN_OFFSET: f64 = 1.0;
/// Specifies the amount of "glaciation" on the mountains. This value
/// should be close to 1.0 and greater than 1.0.
const MOUNTAIN_GLACIATION: f64 = 1.375;
/// Scaling to apply to the base continent elevations, in planetary
/// elevation units.
const CONTINENT_HEIGHT_SCALE: f64 = (1.0 - SEA_LEVEL) / 4.0;
/// Maximum depth of the rivers, in planetary elevation units.
const RIVER_DEPTH: f64 = 0.0234375;
// ////////////////////////////////////////////////////////////////////////
// Function group: continent definition
// ////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: base continent definition (7 noise functions)
//
// This subgroup roughly defines the positions and base elevations of the
// planet's continents.
//
// The "base elevation" is the elevation of the terrain before any terrain
// features (mountains, hills, etc.) are placed on that terrain.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Continent module]: This FBM module generates the continents. This
// noise function has a high number of octaves so that detail is visible at
// high zoom levels.
let baseContinentDef_fb0 = Fbm::new()
.with_seed(CURRENT_SEED)
.with_frequency(CONTINENT_FREQUENCY)
.with_persistence(0.5)
.with_lacunarity(CONTINENT_LACUNARITY)
.with_octaves(14);
// debug::render_noise_module("complexplanet_images/00_0_baseContinentDef_fb0\
// .png",
// &baseContinentDef_fb0,
// 1024,
// 1024,
// 100);
// 2: [Continent-with-ranges module]: Next, a curve module modifies the
// output value from the continent module so that very high values appear
// near sea level. This defines the positions of the mountain ranges.
let baseContinentDef_cu = Curve::new(&baseContinentDef_fb0)
.add_control_point(-2.0000 + SEA_LEVEL, -1.625 + SEA_LEVEL)
.add_control_point(-1.0000 + SEA_LEVEL, -1.375 + SEA_LEVEL)
.add_control_point(0.0000 + SEA_LEVEL, -0.375 + SEA_LEVEL)
.add_control_point(0.0625 + SEA_LEVEL, 0.125 + SEA_LEVEL)
.add_control_point(0.1250 + SEA_LEVEL, 0.250 + SEA_LEVEL)
.add_control_point(0.2500 + SEA_LEVEL, 1.000 + SEA_LEVEL)
.add_control_point(0.5000 + SEA_LEVEL, 0.250 + SEA_LEVEL)
.add_control_point(0.7500 + SEA_LEVEL, 0.250 + SEA_LEVEL)
.add_control_point(1.0000 + SEA_LEVEL, 0.500 + SEA_LEVEL)
.add_control_point(2.0000 + SEA_LEVEL, 0.500 + SEA_LEVEL);
// debug::render_noise_module("complexplanet_images/00_1_baseContinentDef_cu\
// .png",
// &baseContinentDef_cu,
// 1024,
// 1024,
// 100);
// 3: [Carver module]: This higher-frequency BasicMulti module will be
// used by subsequent noise functions to carve out chunks from the
// mountain ranges within the continent-with-ranges module so that the
// mountain ranges will not be completely impassible.
let baseContinentDef_fb1 = Fbm::new()
.with_seed(CURRENT_SEED + 1)
.with_frequency(CONTINENT_FREQUENCY * 4.34375)
.with_persistence(0.5)
.with_lacunarity(CONTINENT_LACUNARITY)
.with_octaves(11);
// debug::render_noise_module("complexplanet_images/00_2_baseContinentDef_fb1\
// .png",
// &baseContinentDef_fb1,
// 1024,
// 1024,
// 100);
// 4: [Scaled-carver module]: This scale/bias module scales the output
// value from the carver module such that it is usually near 1.0. This
// is required for step 5.
let baseContinentDef_sb = ScaleBias::new(&baseContinentDef_fb1)
.with_scale(0.375)
.with_bias(0.625);
// debug::render_noise_module("complexplanet_images/00_3_baseContinentDef_sb\
// .png",
// &baseContinentDef_sb,
// 1024,
// 1024,
// 100);
// 5: [Carved-continent module]: This minimum-value module carves out
// chunks from the continent-with-ranges module. it does this by ensuring
// that only the minimum of the output values from the scaled-carver
// module and the continent-with-ranges module contributes to the output
// value of this subgroup. Most of the time, the minimum value module will
// select the output value from the continent-with-ranges module since the
// output value from the scaled-carver is usually near 1.0. Occasionally,
// the output from the scaled-carver module will be less than the output
// value from the continent-with-ranges module, so in this case, the output
// value from the scaled-carver module is selected.
let baseContinentDef_mi = Min::new(&baseContinentDef_sb, &baseContinentDef_cu);
// debug::render_noise_module("complexplanet_images/00_4_baseContinentDef_mi\
// .png",
// &baseContinentDef_mi,
// 1024,
// 1024,
// 100);
// 6: [Clamped-continent module]: Finally, a clamp module modifies the
// carved continent module to ensure that the output value of this subgroup
// is between -1.0 and 1.0.
let baseContinentDef_cl = Clamp::new(&baseContinentDef_mi).with_bounds(-1.0, 1.0);
// 7: [Base-continent-definition subgroup]: Caches the output value from
// the clamped-continent module.
let baseContinentDef = Cache::new(baseContinentDef_cl);
// debug::render_noise_module("complexplanet_images/00_5_baseContinentDef.png",
// &baseContinentDef,
// 1024,
// 1024,
// 100);
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: continent definition (5 noise functions)
//
// This subgroup warps the output value from the base-continent-definition
// subgroup, producing more realistic terrain.
//
// Warping the base continent definition produces lumpier terrain with
// cliffs and rifts.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Coarse-turbulence module]: This turbulence module warps the output
// value from the base-continent-definition subgroup, adding some coarse
// detail to it.
let continentDef_tu0 = Turbulence::new(&baseContinentDef)
.with_seed(CURRENT_SEED + 10)
.with_frequency(CONTINENT_FREQUENCY * 15.25)
.with_power(CONTINENT_FREQUENCY / 113.75)
.with_roughness(13);
// debug::render_noise_module("complexplanet_images/01_0_continentDef_tu0.png",
// &continentDef_tu0,
// 1024,
// 1024,
// 1000);
// 2: [Intermediate-turbulence module]: This turbulence module warps the
// output value from the coarse-turbulence module. This turbulence has a
// higher frequency, but lower power, than the coarse-turbulence module,
// adding some intermediate detail to it.
let continentDef_tu1 = Turbulence::new(continentDef_tu0)
.with_seed(CURRENT_SEED + 11)
.with_frequency(CONTINENT_FREQUENCY * 47.25)
.with_power(CONTINENT_FREQUENCY / 433.75)
.with_roughness(12);
// debug::render_noise_module("complexplanet_images/01_1_continentDef_tu1.png",
// &continentDef_tu1,
// 1024,
// 1024,
// 1000);
// 3: [Warped-base-continent-definition module]: This turbulence module
// warps the output value from the intermediate-turbulence module. This
// turbulence has a higher frequency, but lower power, than the
// intermediate-turbulence module, adding some fine detail to it.
let continentDef_tu2 = Turbulence::new(continentDef_tu1)
.with_seed(CURRENT_SEED + 12)
.with_frequency(CONTINENT_FREQUENCY * 95.25)
.with_power(CONTINENT_FREQUENCY / 1019.75)
.with_roughness(11);
// debug::render_noise_module("complexplanet_images/01_2_continentDef_tu2.png",
// &continentDef_tu2,
// 1024,
// 1024,
// 1000);
// 4: [Select-turbulence module]: At this stage, the turbulence is applied
// to the entire base-continent-definition subgroup, producing some very
// rugged, unrealistic coastlines. This selector module selects the
// output values from the (unwarped) base-continent-definition subgroup
// and the warped-base-continent-definition module, based on the output
// value from the (unwarped) base-continent-definition subgroup. The
// selection boundary is near sea level and has a relatively smooth
// transition. In effect, only the higher areas of the base-continent-
// definition subgroup become warped; the underwater and coastal areas
// remain unaffected.
let continentDef_se = Select::new(&baseContinentDef, &continentDef_tu2, &baseContinentDef)
.with_bounds(SEA_LEVEL - 0.0375, SEA_LEVEL + 1000.0375)
.with_falloff(0.0625);
// debug::render_noise_module("complexplanet_images/01_3_continentDef_se.png",
// &continentDef_se,
// 1024,
// 1024,
// 1000);
// 5: [Continent-definition group]: Caches the output value from the
// clamped-continent module. This is the output value for the entire
// continent-definition group.
let continentDef = Cache::new(continentDef_se);
// debug::render_noise_module("complexplanet_images/01_4_continentDef.png",
// &continentDef,
// 1024,
// 1024,
// 1000);
// ////////////////////////////////////////////////////////////////////////
// Function group: terrain type definition
// ////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: terrain type definition (3 noise functions)
//
// This subgroup defines the positions of the terrain types on the planet.
//
// Terrain types include, in order of increasing roughness, plains, hills,
// and mountains.
//
// This subgroup's output value is based on the output value from the
// continent-definition group. Rougher terrain mainly appears at higher
// elevations.
//
// -1.0 represents the smoothest terrain types (plains and underwater) and
// +1.0 represents the roughest terrain types (mountains).
//
// 1: [Warped-continent module]: This turbulence module slightly warps the
// output value from the continent-definition group. This prevents the
// rougher terrain from appearing exclusively at higher elevations. Rough
// areas may now appear in the the ocean, creating rocky islands and
// fjords.
let terrainTypeDef_tu = Turbulence::new(&continentDef)
.with_seed(CURRENT_SEED + 20)
.with_frequency(CONTINENT_FREQUENCY * 18.125)
.with_power(CONTINENT_FREQUENCY / 20.59375 * TERRAIN_OFFSET)
.with_roughness(3);
// 2: [Roughness-probability-shift module]: This terracing module sharpens
// the edges of the warped-continent module near sea level and lowers the
// slope towards the higher-elevation areas. This shrinks the areas in
// which the rough terrain appears, increasing the "rarity" of rough
// terrain.
let terrainTypeDef_te = Terrace::new(&terrainTypeDef_tu)
.add_control_point(-1.00)
.add_control_point(SHELF_LEVEL + SEA_LEVEL / 2.0)
.add_control_point(1.00);
// 3: [Terrain-type-definition group]: Caches the output value from the
// roughness-probability-shift module. This is the output value for the
// entire terrain-type-definition group.
let terrainTypeDef = Cache::new(terrainTypeDef_te);
// /////////////////////////////////////////////////////////////////////////
// Function group: mountainous terrain
// /////////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: mountain base definition (9 noise functions)
//
// This subgroup generates the base-mountain elevations. Other subgroups
// will add the ridges and low areas to the base elevations.
//
// -1.0 represents low mountainous terrain and +1.0 represents high
// mountainous terrain.
//
// 1: [Mountain-ridge module]: This ridged-multifractal-noise function
// generates the mountain ridges.
let mountainBaseDef_rm0 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 30)
.with_frequency(1723.0)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(4);
// 2: [Scaled-mountain-ridge module]: Next, a scale/bias module scales the
// output value from the mountain-ridge module so that its ridges are not
// too high. The reason for this is that another subgroup adds actual
// mountainous terrain to these ridges.
let mountainBaseDef_sb0 = ScaleBias::new(&mountainBaseDef_rm0)
.with_scale(0.5)
.with_bias(0.375);
// 3: [River-valley module]: This ridged-multifractal-noise function
// generates the river valleys. It has a much lower frequency than the
// mountain-ridge module so that more mountain ridges will appear outside
// of the valleys. Note that this noise function generates ridged-multifractal
// noise using only one octave; this information will be important in the
// next step.
let mountainBaseDef_rm1 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 31)
.with_frequency(367.0)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(1);
// 4: [Scaled-river-valley module]: Next, a scale/bias module applies a
// scaling factor of -2.0 to the output value from the river-valley module.
// This stretches the possible elevation values because one-octave ridged-
// multifractal noise has a lower range of output values than multiple-
// octave ridged-multifractal noise. The negative scaling factor inverts
// the range of the output value, turning the ridges from the river-valley
// module into valleys.
let mountainBaseDef_sb1 = ScaleBias::new(&mountainBaseDef_rm1)
.with_scale(-2.0)
.with_bias(-0.5);
// 5: [Low-flat module]: This low constant value is used by step 6.
let mountainBaseDef_co = Constant::new(-1.0);
// 6: [Mountains-and-valleys module]: This blender module merges the scaled-
// mountain-ridge module and the scaled-river-valley module together. It
// causes the low-lying areas of the terrain to become smooth, and causes
// the high-lying areas of the terrain to contain ridges. To do this, it
// uses the scaled-river-valley module as the control module, causing the
// low-flat module to appear in the lower areas and causing the scaled-
// mountain-ridge module to appear in the higher areas.
let mountainBaseDef_bl = Blend::new(
&mountainBaseDef_co,
&mountainBaseDef_sb0,
&mountainBaseDef_sb1,
);
// 7: [Coarse-turbulence module]: This turbulence module warps the output
// value from the mountain-and-valleys module, adding some coarse detail to
// it.
let mountainBaseDef_tu0 = Turbulence::new(mountainBaseDef_bl)
.with_seed(CURRENT_SEED + 32)
.with_frequency(1337.0)
.with_power(1.0 / 6730.0 * MOUNTAINS_TWIST)
.with_roughness(4);
// 8: [Warped-mountains-and-valleys module]: This turbulence module warps
// the output value from the coarse-turbulence module. This turbulence has
// a higher frequency, but lower power, than the coarse-turbulence module,
// adding some fine detail to it.
let mountainBaseDef_tu1 = Turbulence::new(mountainBaseDef_tu0)
.with_seed(CURRENT_SEED + 33)
.with_frequency(21221.0)
.with_power(1.0 / 120157.0 * MOUNTAINS_TWIST)
.with_roughness(6);
// 9: [Mountain-base-definition subgroup]: Caches the output value from the
// warped-mountains-and-valleys module.
let mountainBaseDef = Cache::new(mountainBaseDef_tu1);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: high mountainous terrain (5 noise functions)
//
// This subgroup generates the mountainous terrain that appears at high
// elevations within the mountain ridges.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Mountain-basis-0 module]: This ridged-multifractal-noise function,
// along with the mountain-basis-1 module, generates the individual
// mountains.
let mountainousHigh_rm0 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 40)
.with_frequency(2371.0)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(3);
// 2: [Mountain-basis-1 module]: This ridged-multifractal-noise function,
// along with the mountain-basis-0 module, generates the individual
// mountains.
let mountainousHigh_rm1 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 41)
.with_frequency(2341.0)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(3);
// 3: [High-mountains module]: Next, a maximum-value module causes more
// mountains to appear at the expense of valleys. It does this by ensuring
// that only the maximum of the output values from the two ridged-
// multifractal-noise functions contribute to the output value of this
// subgroup.
let mountainousHigh_ma = Max::new(&mountainousHigh_rm0, &mountainousHigh_rm1);
// 4: [Warped-high-mountains module]: This turbulence module warps the
// output value from the high-mountains module, adding some detail to it.
let mountainousHigh_tu = Turbulence::new(mountainousHigh_ma)
.with_seed(CURRENT_SEED + 42)
.with_frequency(31511.0)
.with_power(1.0 / 180371.0 * MOUNTAINS_TWIST)
.with_roughness(4);
// 5: [High-mountainous-terrain subgroup]: Caches the output value from the
// warped-high-mountains module.
let mountainousHigh = Cache::new(mountainousHigh_tu);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: low mountainous terrain (4 noise functions)
//
// This subgroup generates the mountainous terrain that appears at low
// elevations within the river valleys.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Lowland-basis-0 module]: This ridged-multifractal-noise function,
// along with the lowland-basis-1 module, produces the low mountainous
// terrain.
let mountainousLow_rm0 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 50)
.with_frequency(1381.0)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(8);
// 1: [Lowland-basis-1 module]: This ridged-multifractal-noise function,
// along with the lowland-basis-0 module, produces the low mountainous
// terrain.
let mountainousLow_rm1 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 51)
.with_frequency(1427.0)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(8);
// 3: [Low-mountainous-terrain module]: This multiplication module combines
// the output values from the two ridged-multifractal-noise functions. This
// causes the following to appear in the resulting terrain:
// - Cracks appear when two negative output values are multiplied together.
// - Flat areas appear when a positive and a negative output value are
// multiplied together.
// - Ridges appear when two positive output values are multiplied together.
let mountainousLow_mu = Multiply::new(&mountainousLow_rm0, &mountainousLow_rm1);
// 4: [Low-mountainous-terrain subgroup]: Caches the output value from the
// low-mountainous-terrain module.
let mountainousLow = Cache::new(mountainousLow_mu);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: mountainous terrain (7 noise functions)
//
// This subgroup generates the final mountainous terrain by combining the
// high-mountainous-terrain subgroup with the low-mountainous-terrain
// subgroup.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Scaled-low-mountainous-terrain module]: First, this scale/bias module
// scales the output value from the low-mountainous-terrain subgroup to a very
// low value and biases it towards -1.0. This results in the low mountainous
// areas becoming more-or-less flat with little variation. This will also
// result in the low mountainous areas appearing at the lowest elevations in
// this subgroup.
let mountainousTerrain_sb0 = ScaleBias::new(&mountainousLow)
.with_scale(0.03125)
.with_bias(-0.96875);
// 2: [Scaled-high-mountainous-terrain module]: Next, this scale/bias module
// scales the output value from the high-mountainous-terrain subgroup to 1/4
// of its initial value and biases it so that its output value is usually
// positive.
let mountainousTerrain_sb1 = ScaleBias::new(&mountainousHigh)
.with_scale(0.25)
.with_bias(0.25);
// 3: [Added-high-mountainous-terrain module]: This addition module adds the
// output value from the scaled-high-mountainous-terrain module to the
// output value from the mountain-base-definition subgroup. Mountains now
// appear all over the terrain.
let mountainousTerrain_ad = Add::new(&mountainousTerrain_sb1, &mountainBaseDef);
// 4: [Combined-mountainous-terrain module]: Note that at this point, the
// entire terrain is covered in high mountainous terrain, even at the low
// elevations. To make sure the mountains only appear at the higher
// elevations, this selector module causes low mountainous terrain to appear
// at the low elevations (within the valleys) and the high mountainous
// terrain to appear at the high elevations (within the ridges). To do this,
// this noise function selects the output value from the added-high-
// mountainous-terrain module if the output value from the mountain-base-
// definition subgroup is higher than a set amount. Otherwise, this noise
// module selects the output value from the scaled-low-mountainous-terrain
// module.
let mountainousTerrain_se = Select::new(
&mountainousTerrain_sb0,
&mountainousTerrain_ad,
&mountainBaseDef,
)
.with_bounds(-0.5, 999.5)
.with_falloff(0.5);
// 5: [Scaled-mountainous-terrain-module]: This scale/bias module slightly
// reduces the range of the output value from the combined-mountainous-
// terrain module, decreasing the heights of the mountain peaks.
let mountainousTerrain_sb2 = ScaleBias::new(&mountainousTerrain_se)
.with_scale(0.8)
.with_bias(0.0);
// 6: [Glaciated-mountainous-terrain-module]: This exponential-curve module
// applies an exponential curve to the output value from the scaled-
// mountainous-terrain module. This causes the slope of the mountains to
// smoothly increase towards higher elevations, as if a glacier ground out
// those mountains. This exponential-curve module expects the output value
// to range from -1.0 to +1.0.
let mountainousTerrain_ex =
Exponent::new(&mountainousTerrain_sb2).with_exponent(MOUNTAIN_GLACIATION);
let mountainousTerrain = Cache::new(mountainousTerrain_ex);
// ////////////////////////////////////////////////////////////////////////
// Function group: hilly terrain
// ////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: hilly terrain (11 noise functions)
//
// This subgroup generates the hilly terrain.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Hills module]: This billow-noise function generates the hills.
let hillyTerrain_bi = Billow::new()
.with_seed(CURRENT_SEED + 60)
.with_frequency(1663.0)
.with_persistence(0.5)
.with_lacunarity(HILLS_LACUNARITY)
.with_octaves(6);
// 2: [Scaled-hills module]: Next, a scale/bias module scales the output
// value from the hills module so that its hilltops are not too high. The
// reason for this is that these hills are eventually added to the river
// valleys (see below).
let hillyTerrain_sb0 = ScaleBias::new(&hillyTerrain_bi)
.with_scale(0.5)
.with_bias(0.5);
// 3: [River-valley module]: This ridged-multifractal-noise function generates
// the river valleys. It has a much lower frequency so that more hills will
// appear in between the valleys. Note that this noise function generates
// ridged-multifractal noise using only one octave; this information will be
// important in the next step.
let hillyTerrain_rm = RidgedMulti::new()
.with_seed(CURRENT_SEED + 61)
.with_frequency(367.5)
.with_lacunarity(HILLS_LACUNARITY)
.with_octaves(1);
// 4: [Scaled-river-valley module]: Next, a scale/bias module applies a
// scaling factor of -2.0 to the output value from the river-valley module.
// This stretches the possible elevation values because one-octave ridged-
// multifractal noise has a lower range of output values than multiple-
// octave ridged-multifractal noise. The negative scaling factor inverts
// the range of the output value, turning the ridges from the river-valley
// module into valleys.
let hillyTerrain_sb1 = ScaleBias::new(&hillyTerrain_rm)
.with_scale(-2.0)
.with_bias(-1.0);
// 5: [Low-flat module]: This low constant value is used by step 6.
let hillyTerrain_co = Constant::new(-1.0);
// 6: [Mountains-and-valleys module]: This blender module merges the scaled-
// hills module and the scaled-river-valley module together. It causes the
// low-lying areas of the terrain to become smooth, and causes the high-
// lying areas of the terrain to contain hills. To do this, it uses uses the
// scaled-hills module as the control module, causing the low-flat module to
// appear in the lower areas and causing the scaled-river-valley module to
// appear in the higher areas.
let hillyTerrain_bl = Blend::new(&hillyTerrain_co, &hillyTerrain_sb1, &hillyTerrain_sb0);
// 7: [Scaled-hills-and-valleys module]: This scale/bias module slightly
// reduces the range of the output value from the hills-and-valleys
// module, decreasing the heights of the hilltops.
let hillyTerrain_sb2 = ScaleBias::new(&hillyTerrain_bl)
.with_scale(0.75)
.with_bias(-0.25);
// 8: [Increased-slope-hilly-terrain module]: To increase the hill slopes
// at higher elevations, this exponential-curve module applies an
// exponential curve to the output value the scaled-hills-and-valleys
// module. This exponential-curve module expects the input value to range
// from -1.0 to 1.0.
let hillyTerrain_ex = Exponent::new(&hillyTerrain_sb2).with_exponent(1.375);
// 9: [Coarse-turbulence module]: This turbulence module warps the output
// value from the increased-slope-hilly-terrain module, adding some
// coarse detail to it.
let hillyTerrain_tu0 = Turbulence::new(hillyTerrain_ex)
.with_seed(CURRENT_SEED + 62)
.with_frequency(1531.0)
.with_power(1.0 / 16921.0 * HILLS_TWIST)
.with_roughness(4);
// 10: [Warped-hilly-terrain module]: This turbulence module warps the
// output value from the coarse-turbulence module. This turbulence has a
// higher frequency, but lower power, than the coarse-turbulence module,
// adding some fine detail to it.
let hillyTerrain_tu1 = Turbulence::new(hillyTerrain_tu0)
.with_seed(CURRENT_SEED + 63)
.with_frequency(21617.0)
.with_power(1.0 / 117529.0 * HILLS_TWIST)
.with_roughness(6);
// 11: [Hilly-terrain group]: Caches the output value from the warped-hilly-
// terrain module. This is the output value for the entire hilly-terrain
// group.
let hillyTerrain = Cache::new(hillyTerrain_tu1);
// ////////////////////////////////////////////////////////////////////////
// Function group: plains terrain
// ////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: plains terrain (7 noise functions)
//
// This subgroup generates the plains terrain.
//
// Because this subgroup will eventually be flattened considerably, the
// types and combinations of noise functions that generate the plains are not
// really that important; they only need to "look" interesting.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Plains-basis-0 module]: This billow-noise function, along with the
// plains-basis-1 module, produces the plains.
let plainsTerrain_bi0 = Billow::new()
.with_seed(CURRENT_SEED + 70)
.with_frequency(1097.5)
.with_persistence(0.5)
.with_lacunarity(PLAINS_LACUNARITY)
.with_octaves(8);
// 2: [Positive-plains-basis-0 module]: This scale/bias module makes the
// output value from the plains-basis-0 module positive since this output
// value will be multiplied together with the positive-plains-basis-1
// module.
let plainsTerrain_sb0 = ScaleBias::new(&plainsTerrain_bi0)
.with_scale(0.5)
.with_bias(0.5);
// 3: [Plains-basis-1 module]: This billow-noise function, along with the
// plains-basis-2 module, produces the plains.
let plainsTerrain_bi1 = Billow::new()
.with_seed(CURRENT_SEED + 71)
.with_frequency(1097.5)
.with_persistence(0.5)
.with_lacunarity(PLAINS_LACUNARITY)
.with_octaves(8);
// 4: [Positive-plains-basis-1 module]: This scale/bias module makes the
// output value from the plains-basis-1 module positive since this output
// value will be multiplied together with the positive-plains-basis-0
// module.
let plainsTerrain_sb1 = ScaleBias::new(&plainsTerrain_bi1)
.with_scale(0.5)
.with_bias(0.5);
// 5: [Combined-plains-basis module]: This multiplication module combines
// the two plains basis modules together.
let plainsTerrain_mu = Multiply::new(&plainsTerrain_sb0, &plainsTerrain_sb1);
// 6: [Rescaled-plains-basis module]: This scale/bias module maps the output
// value that ranges from 0.0 to 1.0 back to a value that ranges from
// -1.0 to +1.0.
let plainsTerrain_sb2 = ScaleBias::new(&plainsTerrain_mu)
.with_scale(2.0)
.with_bias(-1.0);
// 7: [Plains-terrain group]: Caches the output value from the rescaled-
// plains-basis module. This is the output value for the entire plains-
// terrain group.
let plainsTerrain = Cache::new(plainsTerrain_sb2);
// ////////////////////////////////////////////////////////////////////////
// Function group: badlands terrain
// ////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: badlands sand (6 noise functions)
//
// This subgroup generates the sandy terrain for the badlands.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Sand-dunes module]: This ridged-multifractal-noise function generates
// sand dunes. This ridged-multifractal noise is generated with a single
// octave, which makes very smooth dunes.
let badlandsSand_rm = RidgedMulti::new()
.with_seed(CURRENT_SEED + 80)
.with_frequency(6163.5)
.with_lacunarity(BADLANDS_LACUNARITY)
.with_octaves(1);
// 2: [Scaled-sand-dunes module]: This scale/bias module shrinks the dune
// heights by a small amount. This is necessary so that the subsequent
// noise functions in this subgroup can add some detail to the dunes.
let badlandsSand_sb0 = ScaleBias::new(&badlandsSand_rm)
.with_scale(0.875)
.with_bias(0.0);
// 3: [Dune-detail module]: This noise function uses Voronoi polygons to
// generate the detail to add to the dunes. By enabling the distance
// algorithm, small polygonal pits are generated; the edges of the pits
// are joined to the edges of nearby pits.
let badlandsSand_wo = Worley::new(CURRENT_SEED + 81)
.with_frequency(16183.25)
.with_return_type(ReturnType::Distance);
// 4: [Scaled-dune-detail module]: This scale/bias module shrinks the dune
// details by a large amount. This is necessary so that the subsequent
// noise functions in this subgroup can add this detail to the sand-dunes
// module.
let badlandsSand_sb1 = ScaleBias::new(&badlandsSand_wo)
.with_scale(0.25)
.with_bias(0.25);
// 5: [Dunes-with-detail module]: This addition module combines the scaled-
// sand-dunes module with the scaled-dune-detail module.
let badlandsSand_ad = Add::new(&badlandsSand_sb0, &badlandsSand_sb1);
// 6: [Badlands-sand subgroup]: Caches the output value from the dunes-with-
// detail module.
let badlandsSand = Cache::new(badlandsSand_ad);
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: badlands cliffs (7 noise functions)
//
// This subgroup generates the cliffs for the badlands.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Cliff-basis module]: This Perlin-noise function generates some coherent
// noise that will be used to generate the cliffs.
let badlandsCliffs_fb = Fbm::new()
.with_seed(CURRENT_SEED + 90)
.with_frequency(CONTINENT_FREQUENCY * 839.0)
.with_persistence(0.5)
.with_lacunarity(BADLANDS_LACUNARITY)
.with_octaves(6);
// 2: [Cliff-shaping module]: Next, this curve module applies a curve to
// the output value from the cliff-basis module. This curve is initially
// very shallow, but then its slope increases sharply. At the highest
// elevations, the curve becomes very flat again. This produces the
// stereotypical Utah-style desert cliffs.
let badlandsCliffs_cu = Curve::new(&badlandsCliffs_fb)
.add_control_point(-2.000, -2.000)
.add_control_point(-1.000, -1.000)
.add_control_point(-0.000, -0.750)
.add_control_point(0.500, -0.250)
.add_control_point(0.625, 0.875)
.add_control_point(0.750, 1.000)
.add_control_point(2.000, 1.250);
// 3: [Clamped-cliffs module]: This clamping module makes the tops of the
// cliffs very flat by clamping the output value from the cliff-shaping
// module.
let badlandsCliffs_cl = Clamp::new(&badlandsCliffs_cu).with_bounds(-999.125, 0.875);
// 4: [Terraced-cliffs module]: Next, this terracing module applies some
// terraces to the clamped-cliffs module in the lower elevations before the
// sharp cliff transition.
let badlandsCliffs_te = Terrace::new(&badlandsCliffs_cl)
.add_control_point(-1.000)
.add_control_point(-0.875)
.add_control_point(-0.750)
.add_control_point(-0.500)
.add_control_point(0.000)
.add_control_point(1.000);
// 5: [Coarse-turbulence module]: This turbulence module warps the output
// value from the terraced-cliffs module, adding some coarse detail to it.
let badlandsCliffs_tu0 = Turbulence::new(badlandsCliffs_te)
.with_seed(CURRENT_SEED + 91)
.with_frequency(16111.0)
.with_power(1.0 / 141539.0 * BADLANDS_TWIST)
.with_roughness(3);
// 6: [Warped-cliffs module]: This turbulence module warps the output value
// from the coarse-turbulence module. This turbulence has a higher
// frequency, but lower power, than the coarse-turbulence module, adding
// some fine detail to it.
let badlandsCliffs_tu1 = Turbulence::new(badlandsCliffs_tu0)
.with_seed(CURRENT_SEED + 92)
.with_frequency(36107.0)
.with_power(1.0 / 211543.0 * BADLANDS_TWIST)
.with_roughness(3);
// 7: [Badlands-cliffs subgroup]: Caches the output value from the warped-
// cliffs module.
let badlandsCliffs = Cache::new(badlandsCliffs_tu1);
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: badlands terrain (3 noise functions)
//
// Generates the final badlands terrain.
//
// Using a scale/bias module, the badlands sand is flattened considerably,
// then the sand elevations are lowered to around -1.0. The maximum value
// from the flattened sand module and the cliff module contributes to the
// final elevation. This causes sand to appear at the low elevations since
// the sand is slightly higher than the cliff base.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Scaled-sand-dunes module]: This scale/bias module considerably
// flattens the output value from the badlands-sands subgroup and lowers
// this value to near -1.0.
let badlandsTerrain_sb = ScaleBias::new(&badlandsSand)
.with_scale(0.25)
.with_bias(-0.75);
// 2: [Dunes-and-cliffs module]: This maximum-value module causes the dunes
// to appear in the low areas and the cliffs to appear in the high areas.
// It does this by selecting the maximum of the output values from the
// scaled-sand-dunes module and the badlands-cliffs subgroup.
let badlandsTerrain_ma = Max::new(&badlandsCliffs, &badlandsTerrain_sb);
// 3: [Badlands-terrain group]: Caches the output value from the dunes-and-
// cliffs module. This is the output value for the entire badlands-terrain
// group.
let badlandsTerrain = Cache::new(badlandsTerrain_ma);
// debug::render_noise_module("complexplanet_images/12_2_badlandsTerrain.png",
// &badlandsTerrain,
// 1024,
// 1024,
// 1000);
// ////////////////////////////////////////////////////////////////////////
// Function group: river positions
// ////////////////////////////////////////////////////////////////////////
// ////////////////////////////////////////////////////////////////////////
// Function subgroup: river positions (7 noise functions)
//
// This subgroup generates the river positions.
//
// -1.0 represents the lowest elevations and +1.0 represents the highest
// elevations.
//
// 1: [Large-river-basis module]: This ridged-multifractal-noise function
// creates the large, deep rivers.
let riverPositions_rm0 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 100)
.with_frequency(18.75)
.with_lacunarity(CONTINENT_LACUNARITY)
.with_octaves(1);
// 2: [Large-river-curve module]: This curve module applies a curve to the
// output value from the large-river-basis module so that the ridges become
// inverted. This creates the rivers. This curve also compresses the edge of
// the rivers, producing a sharp transition from the land to the river
// bottom.
let riverPositions_cu0 = Curve::new(&riverPositions_rm0)
.add_control_point(-2.000, 2.000)
.add_control_point(-1.000, 1.000)
.add_control_point(-0.125, 0.875)
.add_control_point(0.000, -1.000)
.add_control_point(1.000, -1.500)
.add_control_point(2.000, -2.000);
// 3: [Small-river-basis module]: This ridged-multifractal-noise function
// creates the small, shallow rivers.
let riverPositions_rm1 = RidgedMulti::new()
.with_seed(CURRENT_SEED + 101)
.with_frequency(43.25)
.with_lacunarity(CONTINENT_LACUNARITY)
.with_octaves(1);
// 4: [Small-river-curve module]: This curve module applies a curve to the
// output value from the small-river-basis module so that the ridges become
// inverted. This creates the rivers. This curve also compresses the edge of
// the rivers, producing a sharp transition from the land to the river
// bottom.
let riverPositions_cu1 = Curve::new(&riverPositions_rm1)
.add_control_point(-2.000, 2.0000)
.add_control_point(-1.000, 1.5000)
.add_control_point(-0.125, 1.4375)
.add_control_point(0.000, 0.5000)
.add_control_point(1.000, 0.2500)
.add_control_point(2.000, 0.0000);
// 5: [Combined-rivers module]: This minimum-value module causes the small
// rivers to cut into the large rivers. It does this by selecting the
// minimum output values from the large-river-curve module and the small-
// river-curve module.
let riverPositions_mi = Min::new(&riverPositions_cu0, &riverPositions_cu1);
// 6: [Warped-rivers module]: This turbulence module warps the output value
// from the combined-rivers module, which twists the rivers. The high
// roughness produces less-smooth rivers.
let riverPositions_tu = Turbulence::new(riverPositions_mi)
.with_seed(CURRENT_SEED + 102)
.with_frequency(9.25)
.with_power(1.0 / 57.75)
.with_roughness(6);
// 7: [River-positions group]: Caches the output value from the warped-
// rivers module. This is the output value for the entire river-
// positions group.
let riverPositions = Cache::new(riverPositions_tu);
// /////////////////////////////////////////////////////////////////////////
// Function group: scaled mountainous terrain
// /////////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: scaled mountainous terrain (6 noise functions)
//
// This subgroup scales the output value from the mountainous-terrain group
// so that it can be added to the elevation defined by the continent-
// definition group.
//
// This subgroup scales the output value such that it is almost always
// positive. This is done so that a negative elevation does not get applied
// to the continent-definition group, preventing parts of that group from
// having negative terrain features "stamped" into it.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Base-scaled-mountainous-terrain module]: This scale/bias module
// scales the output value from the mountainous-terrain group so that the
// output value is measured in planetary elevation units.
let scaledMountainousTerrain_sb0 = ScaleBias::new(&mountainousTerrain)
.with_scale(0.125)
.with_bias(0.125);
// 2: [Base-peak-modulation module]: At this stage, most mountain peaks have
// roughly the same elevation. This BasicMulti module generates some
// random values that will be used by subsequent noise functions to randomly
// change the elevations of the mountain peaks.
let scaledMountainousTerrain_fb = Fbm::new()
.with_seed(CURRENT_SEED + 110)
.with_frequency(14.5)
.with_persistence(0.5)
.with_lacunarity(MOUNTAIN_LACUNARITY)
.with_octaves(6);
// 3: [Peak-modulation module]: This exponential-curve module applies an
// exponential curve to the output value from the base-peak-modulation
// module. This produces a small number of high values and a much larger
// number of low values. This means there will be a few peaks with much
// higher elevations than the majority of the peaks, making the terrain
// features more varied.
let scaledMountainousTerrain_ex =
Exponent::new(&scaledMountainousTerrain_fb).with_exponent(1.25);
// 4: [Scaled-peak-modulation module]: This scale/bias module modifies the
// range of the output value from the peak-modulation module so that it can
// be used as the modulator for the peak-height-multiplier module. It is
// important that this output value is not much lower than 1.0.
let scaledMountainousTerrain_sb1 = ScaleBias::new(&scaledMountainousTerrain_ex)
.with_scale(0.25)
.with_bias(1.0);
// 5: [Peak-height-multiplier module]: This multiplier module modulates the
// heights of the mountain peaks from the base-scaled-mountainous-terrain
// module using the output value from the scaled-peak-modulation module.
let scaledMountainousTerrain_mu =
Multiply::new(&scaledMountainousTerrain_sb0, &scaledMountainousTerrain_sb1);
// 6: [Scaled-mountainous-terrain group]: Caches the output value from the
// peak-height-multiplier module. This is the output value for the
// entire scaled-mountainous-terrain group.
let scaledMountainousTerrain = Cache::new(scaledMountainousTerrain_mu);
// /////////////////////////////////////////////////////////////////////////
// Function group: scaled hilly terrain
// /////////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: scaled hilly terrain (6 noise functions)
//
// This subgroup scales the output value from the hilly-terrain group so
// that it can be added to the elevation defined by the continent-
// definition group. The scaling amount applied to the hills is one half of
// the scaling amount applied to the scaled-mountainous-terrain group.
//
// This subgroup scales the output value such that it is almost always
// positive. This is done so that negative elevations are not applied to
// the continent-definition group, preventing parts of the continent-
// definition group from having negative terrain features "stamped" into it.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Base-scaled-hilly-terrain module]: This scale/bias module scales the
// output value from the hilly-terrain group so that this output value is
// measured in planetary elevation units.
let scaledHillyTerrain_sb0 = ScaleBias::new(&hillyTerrain)
.with_scale(0.0625)
.with_bias(0.0625);
// 2: [Base-hilltop-modulation module]: At this stage, most hilltops have
// roughly the same elevation. This BasicMulti module generates some
// random values that will be used by subsequent noise functions to
// randomly change the elevations of the hilltops.
let scaledHillyTerrain_fb = Fbm::new()
.with_seed(CURRENT_SEED + 120)
.with_frequency(13.5)
.with_persistence(0.5)
.with_lacunarity(HILLS_LACUNARITY)
.with_octaves(6);
// 3: [Hilltop-modulation module]: This exponential-curve module applies an
// exponential curve to the output value from the base-hilltop-modulation
// module. This produces a small number of high values and a much larger
// number of low values. This means there will be a few hilltops with
// much higher elevations than the majority of the hilltops, making the
// terrain features more varied.
let scaledHillyTerrain_ex = Exponent::new(&scaledHillyTerrain_fb).with_exponent(1.25);
// 4: [Scaled-hilltop-modulation module]: This scale/bias module modifies
// the range of the output value from the hilltop-modulation module so that
// it can be used as the modulator for the hilltop-height-multiplier module.
// It is important that this output value is not much lower than 1.0.
let scaledHillyTerrain_sb1 = ScaleBias::new(&scaledHillyTerrain_ex)
.with_scale(0.5)
.with_bias(1.5);
// 5: [Hilltop-height-multiplier module]: This multiplier module modulates
// the heights of the hilltops from the base-scaled-hilly-terrain module
// using the output value from the scaled-hilltop-modulation module.
let scaledHillyTerrain_mu = Multiply::new(&scaledHillyTerrain_sb0, &scaledHillyTerrain_sb1);
// 6: [Scaled-hilly-terrain group]: Caches the output value from the
// hilltop-height-multiplier module. This is the output value for the entire
// scaled-hilly-terrain group.
let scaledHillyTerrain = Cache::new(scaledHillyTerrain_mu);
// /////////////////////////////////////////////////////////////////////////
// Function group: scaled plains terrain
// /////////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: scaled plains terrain (2 noise functions)
//
// This subgroup scales the output value from the plains-terrain group so
// that it can be added to the elevations defined by the continent-
// definition group.
//
// This subgroup scales the output value such that it is almost always
// positive. This is done so that negative elevations are not applied to
// the continent-definition group, preventing parts of the continent-
// definition group from having negative terrain features "stamped" into it.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Scaled-plains-terrain module]: This scale/bias module greatly
// flattens the output value from the plains terrain. This output value
// is measured in planetary elevation units.
let scaledPlainsTerrain_sb0 = ScaleBias::new(&plainsTerrain)
.with_scale(0.00390625)
.with_bias(0.0078125);
// 2: [Scaled-plains-terrain group]: Caches the output value from the
// scaled-plains-terrain module. This is the output value for the entire
// scaled-plains-terrain group.
let scaledPlainsTerrain = Cache::new(scaledPlainsTerrain_sb0);
// /////////////////////////////////////////////////////////////////////////
// Function group: scaled badlands terrain
// /////////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: scaled badlands terrain (2 noise functions)
//
// This subgroup scales the output value from the badlands-terrain group so
// that it can be added to the elevations defined by the continent-
// definition group.
//
// This subgroup scales the output value such that it is almost always
// positive. This is done so that negative elevations are not applied to the
// continent-definition group, preventing parts of the continent-definition
// group from having negative terrain features "stamped" into it.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Scaled-badlands-terrain module]: This scale/bias module scales the
// output value from the badlands-terrain group so that it is measured
// in planetary elevation units.
let scaledBadlandsTerrain_sb = ScaleBias::new(&badlandsTerrain)
.with_scale(0.0625)
.with_bias(0.0625);
// 2: [Scaled-badlands-terrain group]: Caches the output value from the
// scaled-badlands-terrain module. This is the output value for the
// entire scaled-badlands-terrain group.
let scaledBadlandsTerrain = Cache::new(scaledBadlandsTerrain_sb);
// debug::render_noise_module("complexplanet_images/17_0_scaledBadlandsTerrain\
// .png",
// &scaledBadlandsTerrain,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function group: final planet
// /////////////////////////////////////////////////////////////////////////
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: continental shelf (6 noise functions)
//
// This module subgroup creates the continental shelves.
//
// The output value from this module subgroup are measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Shelf-creator module]: This terracing module applies a terracing
// curve to the continent-definition group at the specified shelf level.
// This terrace becomes the continental shelf. Note that this terracing
// module also places another terrace below the continental shelf near -1.0.
// The bottom of this terrace is defined as the bottom of the ocean;
// subsequent noise functions will later add oceanic trenches to the bottom of
// the ocean.
let continentalShelf_te = Terrace::new(&continentDef)
.add_control_point(-1.0)
.add_control_point(-0.75)
.add_control_point(SHELF_LEVEL)
.add_control_point(1.0);
// debug::render_noise_module("complexplanet_images/18_0_continentalShelf_te\
// .png",
// &continentalShelf_te,
// 1024,
// 1024,
// 1000);
// 2: [Clamped-sea-bottom module]: This clamping module clamps the output
// value from the shelf-creator module so that its possible range is from
// the bottom of the ocean to sea level. This is done because this subgroup
// is only concerned about the oceans.
let continentalShelf_cl = Clamp::new(&continentalShelf_te).with_bounds(-0.75, SEA_LEVEL);
// debug::render_noise_module("complexplanet_images/18_1_continentalShelf_cl\
// .png",
// &continentalShelf_cl,
// 1024,
// 1024,
// 1000);
// 3: [Oceanic-trench-basis module]: This ridged-multifractal-noise function
// generates some coherent noise that will be used to generate the oceanic
// trenches. The ridges represent the bottom of the trenches.
let continentalShelf_rm = RidgedMulti::new()
.with_seed(CURRENT_SEED + 130)
.with_frequency(CONTINENT_FREQUENCY * 4.375)
.with_lacunarity(CONTINENT_LACUNARITY)
.with_octaves(16);
// debug::render_noise_module("complexplanet_images/18_2_continentalShelf_rm\
// .png",
// &continentalShelf_rm,
// 1024,
// 1024,
// 1000);
// 4: [Oceanic-trench module]: This scale/bias module inverts the ridges
// from the oceanic-trench-basis-module so that the ridges become trenches.
// This noise function also reduces the depth of the trenches so that their
// depths are measured in planetary elevation units.
let continentalShelf_sb = ScaleBias::new(&continentalShelf_rm)
.with_scale(-0.125)
.with_bias(-0.125);
// debug::render_noise_module("complexplanet_images/18_3_continentalShelf_sb\
// .png",
// &continentalShelf_sb,
// 1024,
// 1024,
// 1000);
// 5: [Shelf-and-trenches module]: This addition module adds the oceanic
// trenches to the clamped-sea-bottom module.
let continentalShelf_ad = Add::new(&continentalShelf_sb, &continentalShelf_cl);
// 6: [Continental-shelf subgroup]: Caches the output value from the shelf-
// and-trenches module.
let continentalShelf = Cache::new(continentalShelf_ad);
// debug::render_noise_module("complexplanet_images/18_4_continentalShelf.png",
// &continentalShelf,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function group: base continent elevations (3 noise functions)
//
// This subgroup generates the base elevations for the continents, before
// terrain features are added.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Base-scaled-continent-elevations module]: This scale/bias module
// scales the output value from the continent-definition group so that it
// is measured in planetary elevation units.
let baseContinentElev_sb = ScaleBias::new(&continentDef)
.with_scale(CONTINENT_HEIGHT_SCALE)
.with_bias(0.0);
// debug::render_noise_module("complexplanet_images/19_0_baseContinentElev_sb\
// .png",
// &baseContinentElev_sb,
// 1024,
// 1024,
// 1000);
// 2: [Base-continent-with-oceans module]: This selector module applies the
// elevations of the continental shelves to the base elevations of the
// continent. It does this by selecting the output value from the
// continental-shelf subgroup if the corresponding output value from the
// continent-definition group is below the shelf level. Otherwise, it
// selects the output value from the base-scaled-continent-elevations
// module.
let baseContinentElev_se = Select::new(&baseContinentElev_sb, &continentalShelf, &continentDef)
.with_bounds(SHELF_LEVEL - 1000.0, SHELF_LEVEL)
.with_falloff(0.03125);
// 3: [Base-continent-elevation subgroup]: Caches the output value from the
// base-continent-with-oceans module.
let baseContinentElev = Cache::new(baseContinentElev_se);
// debug::render_noise_module("complexplanet_images/19_1_baseContinentElev\
// .png",
// &baseContinentElev,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: continents with plains (2 noise functions)
//
// This subgroup applies the scaled-plains-terrain group to the base-
// continent-elevation subgroup.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Continents-with-plains module]: This addition module adds the scaled-
// plains-terrain group to the base-continent-elevation subgroup.
let continentsWithPlains_ad = Add::new(&baseContinentElev, &scaledPlainsTerrain);
// 2: [Continents-with-plains subgroup]: Caches the output value from the
// continents-with-plains module.
let continentsWithPlains = Cache::new(continentsWithPlains_ad);
// debug::render_noise_module("complexplanet_images/20_0_continentsWithPlains\
// .png",
// &continentsWithPlains,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: continents with hills (3 noise functions)
//
// This subgroup applies the scaled-hilly-terrain group to the continents-
// with-plains subgroup.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Continents-with-hills module]: This addition module adds the scaled-
// hilly-terrain group to the base-continent-elevation subgroup.
let continentsWithHills_ad = Add::new(&baseContinentElev, &scaledHillyTerrain);
// debug::render_noise_module("complexplanet_images/21_0_continentsWithHills_ad.png",
// &continentsWithHills_ad,
// 1024,
// 1024,
// 1000);
// 2: [Select-high-elevations module]: This selector module ensures that the
// hills only appear at higher elevations. It does this by selecting the
// output value from the continent-with-hills module if the corresponding
// output value from the terrain-type-definition group is above a certain
// value. Otherwise, it selects the output value from the continents-with-
// plains subgroup.
let continentsWithHills_se = Select::new(
&continentsWithPlains,
&continentsWithHills_ad,
&terrainTypeDef,
)
.with_bounds(1.0 - HILLS_AMOUNT, 1001.0 - HILLS_AMOUNT)
.with_falloff(0.25);
// 3: [Continents-with-hills subgroup]: Caches the output value from the
// select-high-elevations module.
let continentsWithHills = Cache::new(continentsWithHills_se);
// debug::render_noise_module("complexplanet_images/21_1_continentsWithHills\
// .png",
// &continentsWithHills,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: continents with mountains (5 noise functions)
//
// This subgroup applies the scaled-mountainous-terrain group to the
// continents-with-hills subgroup.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Continents-and-mountains module]: This addition module adds the
// scaled-mountainous-terrain group to the base-continent-elevation
// subgroup.
let continentsWithMountains_ad0 = Add::new(&baseContinentElev, &scaledMountainousTerrain);
// debug::render_noise_module("complexplanet_images/22_0_continentsWithMountains_ad0.png",
// &continentsWithMountains_ad0,
// 1024,
// 1024,
// 1000);
// 2: [Increase-mountain-heights module]: This curve module applies a curve
// to the output value from the continent-definition group. This modified
// output value is used by a subsequent noise function to add additional
// height to the mountains based on the current continent elevation. The
// higher the continent elevation, the higher the mountains.
let continentsWithMountains_cu = Curve::new(&continentDef)
.add_control_point(-1.0, -0.0625)
.add_control_point(0.0, 0.0000)
.add_control_point(1.0 - MOUNTAINS_AMOUNT, 0.0625)
.add_control_point(1.0, 0.2500);
// debug::render_noise_module("complexplanet_images/22_1_continentsWithMountains_cu.png",
// &continentsWithMountains_cu,
// 1024,
// 1024,
// 1000);
// 3: [Add-increased-mountain-heights module]: This addition module adds the
// increased-mountain-heights module to the continents-and-mountains module.
// The highest continent elevations now have the highest mountains.
let continentsWithMountains_ad1 =
Add::new(&continentsWithMountains_ad0, &continentsWithMountains_cu);
// debug::render_noise_module("complexplanet_images/22_2_continentsWithMountains_ad1.png",
// &continentsWithMountains_ad1,
// 1024,
// 1024,
// 1000);
// 4: [Select-high-elevations module]: This selector module ensures that
// mountains only appear at higher elevations. It does this by selecting the
// output value from the continent-with-mountains module if the
// corresponding output value from the terrain-type-definition group is
// above a certain value. Otherwise, it selects the output value from the
// continents-with-hills subgroup. Note that the continents-with-hills
// subgroup also contains the plains terrain.
let continentsWithMountains_se = Select::new(
&continentsWithHills,
&continentsWithMountains_ad1,
&terrainTypeDef,
)
.with_bounds(1.0 - MOUNTAINS_AMOUNT, 1001.0 - MOUNTAINS_AMOUNT)
.with_falloff(0.25);
// 5: [Continents-with-mountains subgroup]: Caches the output value from the
// select-high-elevations module.
let continentsWithMountains = Cache::new(continentsWithMountains_se);
// debug::render_noise_module("complexplanet_images/22_3_continentsWithMountains.png",
// &continentsWithMountains,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: continents with badlands (5 noise functions)
//
// This subgroup applies the scaled-badlands-terrain group to the
// continents-with-mountains subgroup.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Badlands-positions module]: This BasicMulti module generates some
// random noise, which is used by subsequent noise functions to specify the
// locations of the badlands.
let continentsWithBadlands_bm = Fbm::new()
.with_seed(CURRENT_SEED + 140)
.with_frequency(16.5)
.with_persistence(0.5)
.with_lacunarity(CONTINENT_LACUNARITY)
.with_octaves(2);
// debug::render_noise_module("complexplanet_images/23_0_continentsWithBadlands_bm.png",
// &continentsWithBadlands_bm,
// 1024,
// 1024,
// 1000);
// 2: [Continents-and-badlands module]: This addition module adds the
// scaled-badlands-terrain group to the base-continent-elevation
// subgroup.
let continentsWithBadlands_ad = Add::new(&baseContinentElev, &scaledBadlandsTerrain);
// debug::render_noise_module("complexplanet_images/23_1_continentsWithBadlands_ad.png",
// &continentsWithBadlands_ad,
// 1024,
// 1024,
// 1000);
// 3: [Select-badlands-positions module]: This selector module places
// badlands at random spots on the continents based on the BasicMulti noise
// generated by the badlands-positions module. To do this, it selects the
// output value from the continents-and-badlands module if the corresponding
// output value from the badlands-position module is greater than a
// specified value. Otherwise, this selector module selects the output value
// from the continents-with-mountains subgroup. There is also a wide
// transition between these two noise functions so that the badlands can blend
// into the rest of the terrain on the continents.
let continentsWithBadlands_se = Select::new(
&continentsWithMountains,
&continentsWithBadlands_ad,
&continentsWithBadlands_bm,
)
.with_bounds(1.0 - BADLANDS_AMOUNT, 1001.0 - BADLANDS_AMOUNT)
.with_falloff(0.25);
// debug::render_noise_module("complexplanet_images/23_2_continentsWithBadlands_se.png",
// &continentsWithBadlands_se,
// 1024,
// 1024,
// 1000);
// 4: [Apply-badlands module]: This maximum-value module causes the badlands
// to "poke out" from the rest of the terrain. It does this by ensuring
// that only the maximum of the output values from the continents-with-
// mountains subgroup and the select-badlands-positions modules contribute
// to the output value of this subgroup. One side effect of this process is
// that the badlands will not appear in mountainous terrain.
let continentsWithBadlands_ma = Max::new(&continentsWithMountains, &continentsWithBadlands_se);
// 5: [Continents-with-badlands subgroup]: Caches the output value from the
// apply-badlands module.
let continentsWithBadlands = Cache::new(continentsWithBadlands_ma);
// debug::render_noise_module("complexplanet_images/23_3_continentsWithBadlands.png",
// &continentsWithBadlands,
// 1024,
// 1024,
// 1000);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: continents with rivers (4 noise functions)
//
// This subgroup applies the river-positions group to the continents-with-
// badlands subgroup.
//
// The output value from this module subgroup is measured in planetary
// elevation units (-1.0 for the lowest underwater trenches and +1.0 for the
// highest mountain peaks.)
//
// 1: [Scaled-rivers module]: This scale/bias module scales the output value
// from the river-positions group so that it is measured in planetary
// elevation units and is negative; this is required for step 2.
let continentsWithRivers_sb = ScaleBias::new(&riverPositions)
.with_scale(RIVER_DEPTH / 2.0)
.with_bias(-RIVER_DEPTH / 2.0);
// debug::render_noise_module("complexplanet_images/24_0_continentsWithRivers_sb.png",
// &continentsWithRivers_sb,
// 1024,
// 1024,
// 1000);
// 2: [Add-rivers-to-continents module]: This addition module adds the
// rivers to the continents-with-badlands subgroup. Because the scaled-
// rivers module only outputs a negative value, the scaled-rivers module
// carves the rivers out of the terrain.
let continentsWithRivers_ad = Add::new(&continentsWithBadlands, &continentsWithRivers_sb);
// debug::render_noise_module("complexplanet_images/24_1_continentsWithRivers_ad.png",
// &continentsWithRivers_ad,
// 1024,
// 1024,
// 1000);
// 3: [Blended-rivers-to-continents module]: This selector module outputs
// deep rivers near sea level and shallower rivers in higher terrain. It
// does this by selecting the output value from the continents-with-
// badlands subgroup if the corresponding output value from the
// continents-with-badlands subgroup is far from sea level. Otherwise,
// this selector module selects the output value from the add-rivers-to-
// continents module.
let continentsWithRivers_se = Select::new(
&continentsWithBadlands,
&continentsWithRivers_ad,
&continentsWithBadlands,
)
.with_bounds(SEA_LEVEL, CONTINENT_HEIGHT_SCALE + SEA_LEVEL)
.with_falloff(CONTINENT_HEIGHT_SCALE - SEA_LEVEL);
// 4: [Continents-with-rivers subgroup]: Caches the output value from the
// blended-rivers-to-continents module.
let continentsWithRivers = Cache::new(continentsWithRivers_se);
// /////////////////////////////////////////////////////////////////////////
// Function subgroup: unscaled final planet (1 noise function)
//
// This subgroup simply caches the output value from the continent-with-
// rivers subgroup to contribute to the final output value.
//
// 1: [Unscaled-final-planet subgroup]: Caches the output value from the
// continent-with-rivers subgroup.
let unscaledFinalPlanet = Cache::new(continentsWithRivers);
// debug::render_noise_module3(
// "complexplanet_images/30_0_unscaledFinalPlanet\
// .png",
// &unscaledFinalPlanet,
// 1024,
// 1024,
// 100,
// );
//
// debug::render_noise_module3(
// "complexplanet_images/30_1_unscaledFinalPlanet\
// .png",
// &unscaledFinalPlanet,
// 2048,
// 2048,
// 1000,
// );
//
// debug::render_noise_module3(
// "complexplanet_images/30_2_unscaledFinalPlanet\
// .png",
// &unscaledFinalPlanet,
// 2048,
// 2048,
// 10000,
// );
//
// debug::render_noise_module3(
// "complexplanet_images/30_3_unscaledFinalPlanet\
// .png",
// &unscaledFinalPlanet,
// 4096,
// 4096,
// 100000,
// );
let noise_map = PlaneMapBuilder::new(&unscaledFinalPlanet)
.with_size(1024, 1024)
.with_x_bounds(-2.0, 2.0)
.with_y_bounds(-2.0, 2.0)
.build();
ImageRenderer::new()
.with_gradient(ColorGradient::new().build_terrain_gradient())
.render(&noise_map)
.write_to_file("unscaledFinalPlanet.png");
let noise_map = PlaneMapBuilder::new(&unscaledFinalPlanet)
.with_size(1024, 1024)
.with_x_bounds(-0.5, 0.5)
.with_y_bounds(-0.5, 0.5)
.build();
ImageRenderer::new()
.with_gradient(ColorGradient::new().build_terrain_gradient())
.render(&noise_map)
.write_to_file("unscaledFinalPlanet_4x_zoom.png");
let noise_map = PlaneMapBuilder::new(&unscaledFinalPlanet)
.with_size(1024, 1024)
.with_x_bounds(-0.0, 0.25)
.with_y_bounds(-0.125, 0.125)
.build();
ImageRenderer::new()
.with_gradient(ColorGradient::new().build_terrain_gradient())
.render(&noise_map)
.write_to_file("unscaledFinalPlanet_16x_zoom.png");
}
| main |
LogicTypeSelector.tsx | /*
* Copyright 2021 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import "./LogicTypeSelector.css";
import * as React from "react";
import { useCallback, useContext, useMemo } from "react";
import {
ContextProps,
DataType,
DecisionTableProps,
ExpressionProps,
FunctionKind,
FunctionProps,
generateUuid,
InvocationProps,
ListProps,
LiteralExpressionProps,
LogicType,
PMMLLiteralExpressionProps,
RelationProps,
} from "../../api";
import { LiteralExpression, PMMLLiteralExpression } from "../LiteralExpression";
import { RelationExpression } from "../RelationExpression";
import { ContextExpression } from "../ContextExpression";
import { useBoxedExpressionEditorI18n } from "../../i18n";
import { PopoverMenu } from "../PopoverMenu";
import { Menu, MenuGroup, MenuItem, MenuList } from "@patternfly/react-core";
import * as _ from "lodash";
import { useContextMenuHandler } from "../../hooks";
import { BoxedExpressionGlobalContext } from "../../context";
import { DecisionTableExpression } from "../DecisionTableExpression";
import { ListExpression } from "../ListExpression";
import { InvocationExpression } from "../InvocationExpression";
import { FunctionExpression } from "../FunctionExpression";
export interface LogicTypeSelectorProps {
/** Expression properties */
selectedExpression: ExpressionProps;
/** Function to be invoked when logic type changes */
onLogicTypeUpdating: (logicType: LogicType) => void;
/** Function to be invoked when logic type is reset */
onLogicTypeResetting: () => void;
/** Function to be invoked to update expression's name and datatype */
onUpdatingNameAndDataType?: (updatedName: string, updatedDataType: DataType) => void;
/** Function to be invoked to retrieve the DOM reference to be used for selector placement */
getPlacementRef: () => HTMLDivElement;
/** True to have no header for this specific expression component, used in a recursive expression */
isHeadless?: boolean;
/** When a component is headless, it will call this function to pass its most updated expression definition */
onUpdatingRecursiveExpression?: (expression: ExpressionProps) => void;
}
export const LogicTypeSelector: React.FunctionComponent<LogicTypeSelectorProps> = ({
selectedExpression,
onLogicTypeUpdating,
onLogicTypeResetting,
onUpdatingNameAndDataType,
getPlacementRef,
isHeadless,
onUpdatingRecursiveExpression,
}) => {
const { i18n } = useBoxedExpressionEditorI18n();
const globalContext = useContext(BoxedExpressionGlobalContext);
const expression = useMemo(() => {
return {
...selectedExpression,
id: selectedExpression.id ?? generateUuid(),
isHeadless: isHeadless ?? false,
onUpdatingNameAndDataType,
onUpdatingRecursiveExpression,
};
}, [selectedExpression, isHeadless, onUpdatingNameAndDataType, onUpdatingRecursiveExpression]);
const isLogicTypeSelected = useMemo(
() => selectedExpression.logicType && selectedExpression.logicType !== LogicType.Undefined,
[selectedExpression.logicType]
);
const {
contextMenuRef,
contextMenuXPos,
contextMenuYPos,
contextMenuVisibility,
setContextMenuVisibility,
targetElement,
} = useContextMenuHandler(globalContext.boxedExpressionEditorRef?.current ?? document);
const renderExpression = useMemo(() => {
switch (expression.logicType) {
case LogicType.LiteralExpression:
return <LiteralExpression {...(expression as LiteralExpressionProps)} />;
case LogicType.PMMLLiteralExpression:
return <PMMLLiteralExpression {...(expression as PMMLLiteralExpressionProps)} />;
case LogicType.Relation:
return <RelationExpression {...(expression as RelationProps)} />;
case LogicType.Context:
return <ContextExpression {...(expression as ContextProps)} />;
case LogicType.DecisionTable:
return <DecisionTableExpression {...(expression as DecisionTableProps)} />;
case LogicType.Invocation:
return <InvocationExpression {...(expression as InvocationProps)} />;
case LogicType.List:
return <ListExpression {...(expression as ListProps)} />;
case LogicType.Function:
return <FunctionExpression {..._.defaults(expression, { functionKind: FunctionKind.Feel } as FunctionProps)} />;
default:
return expression.logicType;
}
// logicType is enough for deciding when to re-execute this function
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [expression]);
const getSelectableLogicTypes = useCallback(
() =>
Object.values(LogicType).filter(
(logicType) => !_.includes([LogicType.Undefined, LogicType.PMMLLiteralExpression], logicType)
),
[]
);
const renderLogicTypeItems = useCallback(
() =>
_.map(getSelectableLogicTypes(), (key) => (
<MenuItem key={key} itemId={key}>
{key}
</MenuItem>
)),
[getSelectableLogicTypes]
);
const getArrowPlacement = useCallback(() => getPlacementRef() as HTMLElement, [getPlacementRef]);
const getAppendToPlacement = useCallback(() => {
return globalContext.boxedExpressionEditorRef?.current ?? getArrowPlacement;
}, [getArrowPlacement, globalContext.boxedExpressionEditorRef]);
const onLogicTypeSelect = useCallback(
(event?: React.MouseEvent, itemId?: string | number) => {
const selectedLogicType = itemId as LogicType;
onLogicTypeUpdating(selectedLogicType);
},
[onLogicTypeUpdating]
);
const buildLogicSelectorMenu = useMemo( | <PopoverMenu
title={i18n.selectLogicType}
arrowPlacement={getArrowPlacement}
appendTo={getAppendToPlacement()}
className="logic-type-popover"
hasAutoWidth
body={
<Menu onSelect={onLogicTypeSelect}>
<MenuList>{renderLogicTypeItems()}</MenuList>
</Menu>
}
/>
),
[i18n.selectLogicType, getArrowPlacement, getAppendToPlacement, onLogicTypeSelect, renderLogicTypeItems]
);
const executeClearAction = useCallback(() => {
setContextMenuVisibility(false);
onLogicTypeResetting();
}, [onLogicTypeResetting, setContextMenuVisibility]);
const buildContextMenu = useMemo(
() => (
<div
className="context-menu-container no-table-context-menu"
style={{
top: contextMenuYPos,
left: contextMenuXPos,
}}
>
<Menu className="table-handler-menu">
<MenuGroup label={(expression?.logicType ?? LogicType.Undefined).toLocaleUpperCase()}>
<MenuList>
<MenuItem isDisabled={!isLogicTypeSelected} onClick={executeClearAction}>
{i18n.clear}
</MenuItem>
</MenuList>
</MenuGroup>
</Menu>
</div>
),
[contextMenuYPos, contextMenuXPos, expression.logicType, isLogicTypeSelected, executeClearAction, i18n.clear]
);
const shouldClearContextMenuBeOpened = useMemo(() => {
const notClickedOnTable = _.isNil((targetElement as HTMLElement)?.closest("table"));
const clickedOnTableRemainderContent = !_.isNil((targetElement as HTMLElement)?.closest(".row-remainder-content"));
const clickedOnAllowedTableSection = notClickedOnTable || clickedOnTableRemainderContent;
return !selectedExpression.noClearAction && contextMenuVisibility && clickedOnAllowedTableSection;
}, [contextMenuVisibility, selectedExpression.noClearAction, targetElement]);
const cssClasses = useMemo(() => {
const classes = [];
if (!isHeadless) {
classes.push(`${globalContext.decisionNodeId}`);
}
classes.push("logic-type-selector");
if (isLogicTypeSelected) {
classes.push("logic-type-selected");
} else {
classes.push("logic-type-not-present");
}
return classes.join(" ");
}, [globalContext.decisionNodeId, isHeadless, isLogicTypeSelected]);
return (
<div className={cssClasses} ref={contextMenuRef}>
{isLogicTypeSelected ? renderExpression : i18n.selectExpression}
{!isLogicTypeSelected && buildLogicSelectorMenu}
{shouldClearContextMenuBeOpened && buildContextMenu}
</div>
);
}; | () => ( |
safeJson.ts | import fse from 'fs-extra'
export function loadJsonSync(file, defaultVal?) {
// eslint-disable-next-line no-sync
return fse.readJsonSync(file, {throws: false}) || defaultVal
}
export function loadJson(file, defaultVal) {
return fse.readJson(file).catch(() => defaultVal)
}
| return JSON.parse(json)
} catch (err) {
return defaultVal
}
} | export function parseJson(json, defaultVal) {
try { |
index.ts | export * from './login.dto';
export * from './register.dto';
export * from './user.dto'; | ||
sw_mux_ctl_pad_gpio_b1_06.rs | #[doc = "Reader of register SW_MUX_CTL_PAD_GPIO_B1_06"]
pub type R = crate::R<u32, super::SW_MUX_CTL_PAD_GPIO_B1_06>;
#[doc = "Writer for register SW_MUX_CTL_PAD_GPIO_B1_06"]
pub type W = crate::W<u32, super::SW_MUX_CTL_PAD_GPIO_B1_06>;
#[doc = "Register SW_MUX_CTL_PAD_GPIO_B1_06 `reset()`'s with value 0x05"]
impl crate::ResetValue for super::SW_MUX_CTL_PAD_GPIO_B1_06 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x05
}
}
#[doc = "MUX Mode Select Field.\n\nValue on reset: 5"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum MUX_MODE_A {
#[doc = "0: Select mux mode: ALT0 mux port: LCD_DATA18 of instance: lcdif"]
ALT0 = 0,
#[doc = "1: Select mux mode: ALT1 mux port: LPSPI4_SDO of instance: lpspi4"]
ALT1 = 1,
#[doc = "2: Select mux mode: ALT2 mux port: CSI_DATA13 of instance: csi"]
ALT2 = 2,
#[doc = "3: Select mux mode: ALT3 mux port: ENET_RX_EN of instance: enet"]
ALT3 = 3,
#[doc = "4: Select mux mode: ALT4 mux port: FLEXIO2_FLEXIO22 of instance: flexio2"]
ALT4 = 4,
#[doc = "5: Select mux mode: ALT5 mux port: GPIO2_IO22 of instance: gpio2"]
ALT5 = 5,
#[doc = "8: Select mux mode: ALT8 mux port: GPT1_CAPTURE2 of instance: gpt1"]
ALT8 = 8,
#[doc = "9: Select mux mode: ALT9 mux port: FLEXIO3_FLEXIO22 of instance: flexio3"]
ALT9 = 9,
}
impl From<MUX_MODE_A> for u8 {
#[inline(always)]
fn from(variant: MUX_MODE_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `MUX_MODE`"]
pub type MUX_MODE_R = crate::R<u8, MUX_MODE_A>;
impl MUX_MODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, MUX_MODE_A> {
use crate::Variant::*;
match self.bits {
0 => Val(MUX_MODE_A::ALT0),
1 => Val(MUX_MODE_A::ALT1),
2 => Val(MUX_MODE_A::ALT2),
3 => Val(MUX_MODE_A::ALT3),
4 => Val(MUX_MODE_A::ALT4),
5 => Val(MUX_MODE_A::ALT5),
8 => Val(MUX_MODE_A::ALT8),
9 => Val(MUX_MODE_A::ALT9),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `ALT0`"]
#[inline(always)]
pub fn is_alt0(&self) -> bool {
*self == MUX_MODE_A::ALT0
}
#[doc = "Checks if the value of the field is `ALT1`"]
#[inline(always)]
pub fn is_alt1(&self) -> bool {
*self == MUX_MODE_A::ALT1
}
#[doc = "Checks if the value of the field is `ALT2`"]
#[inline(always)]
pub fn is_alt2(&self) -> bool {
*self == MUX_MODE_A::ALT2
}
#[doc = "Checks if the value of the field is `ALT3`"]
#[inline(always)]
pub fn is_alt3(&self) -> bool {
*self == MUX_MODE_A::ALT3
}
#[doc = "Checks if the value of the field is `ALT4`"]
#[inline(always)]
pub fn is_alt4(&self) -> bool {
*self == MUX_MODE_A::ALT4
}
#[doc = "Checks if the value of the field is `ALT5`"]
#[inline(always)]
pub fn is_alt5(&self) -> bool {
*self == MUX_MODE_A::ALT5
}
#[doc = "Checks if the value of the field is `ALT8`"]
#[inline(always)]
pub fn is_alt8(&self) -> bool |
#[doc = "Checks if the value of the field is `ALT9`"]
#[inline(always)]
pub fn is_alt9(&self) -> bool {
*self == MUX_MODE_A::ALT9
}
}
#[doc = "Write proxy for field `MUX_MODE`"]
pub struct MUX_MODE_W<'a> {
w: &'a mut W,
}
impl<'a> MUX_MODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: MUX_MODE_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Select mux mode: ALT0 mux port: LCD_DATA18 of instance: lcdif"]
#[inline(always)]
pub fn alt0(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT0)
}
#[doc = "Select mux mode: ALT1 mux port: LPSPI4_SDO of instance: lpspi4"]
#[inline(always)]
pub fn alt1(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT1)
}
#[doc = "Select mux mode: ALT2 mux port: CSI_DATA13 of instance: csi"]
#[inline(always)]
pub fn alt2(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT2)
}
#[doc = "Select mux mode: ALT3 mux port: ENET_RX_EN of instance: enet"]
#[inline(always)]
pub fn alt3(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT3)
}
#[doc = "Select mux mode: ALT4 mux port: FLEXIO2_FLEXIO22 of instance: flexio2"]
#[inline(always)]
pub fn alt4(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT4)
}
#[doc = "Select mux mode: ALT5 mux port: GPIO2_IO22 of instance: gpio2"]
#[inline(always)]
pub fn alt5(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT5)
}
#[doc = "Select mux mode: ALT8 mux port: GPT1_CAPTURE2 of instance: gpt1"]
#[inline(always)]
pub fn alt8(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT8)
}
#[doc = "Select mux mode: ALT9 mux port: FLEXIO3_FLEXIO22 of instance: flexio3"]
#[inline(always)]
pub fn alt9(self) -> &'a mut W {
self.variant(MUX_MODE_A::ALT9)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
#[doc = "Software Input On Field.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SION_A {
#[doc = "0: Input Path is determined by functionality"]
DISABLED = 0,
#[doc = "1: Force input path of pad GPIO_B1_06"]
ENABLED = 1,
}
impl From<SION_A> for bool {
#[inline(always)]
fn from(variant: SION_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SION`"]
pub type SION_R = crate::R<bool, SION_A>;
impl SION_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SION_A {
match self.bits {
false => SION_A::DISABLED,
true => SION_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == SION_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == SION_A::ENABLED
}
}
#[doc = "Write proxy for field `SION`"]
pub struct SION_W<'a> {
w: &'a mut W,
}
impl<'a> SION_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SION_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Input Path is determined by functionality"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(SION_A::DISABLED)
}
#[doc = "Force input path of pad GPIO_B1_06"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(SION_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - MUX Mode Select Field."]
#[inline(always)]
pub fn mux_mode(&self) -> MUX_MODE_R {
MUX_MODE_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bit 4 - Software Input On Field."]
#[inline(always)]
pub fn sion(&self) -> SION_R {
SION_R::new(((self.bits >> 4) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:3 - MUX Mode Select Field."]
#[inline(always)]
pub fn mux_mode(&mut self) -> MUX_MODE_W {
MUX_MODE_W { w: self }
}
#[doc = "Bit 4 - Software Input On Field."]
#[inline(always)]
pub fn sion(&mut self) -> SION_W {
SION_W { w: self }
}
}
| {
*self == MUX_MODE_A::ALT8
} |
models.py | from __future__ import unicode_literals | from django.db import models
import re
import json
# import nlp
try:
import Queue as Q #python version < 3.0
except ImportError:
import queue as Q #python3.*
class wordBlock():
def __init__(self, start, end, kind):
self.start = start;
self.end = end;
self.kind = kind;
def __lt__(self,other):#operator <
return self.end < other.start
def __cmp__(self,other):
#call global(builtin) function cmp for int
return cmp(self.start,other.end)
class sentence(models.Model):
originalText = models.TextField(blank=True)
annotatedText = models.TextField(blank=True)
#alteredText = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
@classmethod
def create(originalText):
stc = sentence();
#stc.originalText = originalText.replace('\n', ' ').replace('\r', ' ').strip();
stc.originalText = ' '.join((originalText.split())).strip();
#stc.analyze();
return stc;
# def getScrubbedText(self):
# self.scrubbedText = '';
# return self.analyze();
# #we would rescrub due to frequent update of algorithm.
# if self.scrubbedText is None:
# return self.analyze();
# if self.scrubbedText == '':
# return self.analyze();
# return self.scrubbedText;
def __unicode__(self):
return self.originalText;
# def analyze(self):
# scrubbedContent = "test";#nlp.scrub(self.originalText);
# i = 0;
# str_suffix = '';
# for token in scrubbedContent:
# j = token[0].idx;
# if self.originalText[j - 1] == ' ':
# j = j - 1;
# str_suffix = ''.join((str_suffix, ' '));
# if token[1] != '':
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:j], str_suffix, "<scrub type='", token[1].lower() ,"'>"));
# str_suffix = '</scrub>';
# else:
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:j], str_suffix));
# str_suffix = '';
# i = token[0].idx;
# self.scrubbedText = "".join((self.scrubbedText, self.originalText[i:len(self.originalText)], str_suffix));
# self.save();
# #self.scrubbedText = self.scrubbedText.replace('<scrub></scrub>', ' ').strip();
# return self.scrubbedText;
class task(models.Model):
#msg_a1 = models.ForeignKey(sentence, related_name="sentence_a1")
#msg_a2 = models.ForeignKey(sentence, related_name="sentence_a2")
#msg_b1 = models.ForeignKey(sentence, related_name="sentence_b1")
#msg_b2 = models.ForeignKey(sentence, related_name="sentence_b2")
#msg_c1 = models.ForeignKey(sentence, related_name="sentence_c1")
sentences = models.TextField(default="[]")
status = models.IntegerField() #0: init 1: opened 2: answered
workers = models.TextField(default="[]")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.id, self.status));
def getSentences(self):
ret = []
stc_ids = json.loads(self.sentences)
for stc_id in stc_ids:
ret.append(sentence.objects.get(id=stc_id))
return ret
class hit(models.Model):
mTurk_id = models.TextField()
status = models.IntegerField()
code = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return " ".join((self.mTurk_id, self.code, self.status)); | |
reclass_equal_interval.rs | /*
This tool is part of the WhiteboxTools geospatial analysis library.
Authors: Dr. John Lindsay
Created: 06/07/2017
Last Modified: 30/01/2020
License: MIT
*/
use whitebox_raster::*;
use crate::tools::*;
use num_cpus;
use std::env;
use std::f64;
use std::io::{Error, ErrorKind};
use std::path;
use std::sync::mpsc;
use std::sync::Arc;
use std::thread;
/// This tool reclassifies the values in an input raster (`--input`) file based on an equal-interval scheme, where the
/// user must specify the reclass interval value (`--interval`), the starting value (`--start_val`), and optionally,
/// the ending value (`--end_val`). Grid cells containing values that fall outside of the range defined by the starting
/// and ending values, will be assigned their original values in the output grid. If the user does not specify an ending
/// value, the tool will assign a very large positive value.
///
/// # See Also
/// `Reclass`, `ReclassFromFile`
pub struct ReclassEqualInterval {
name: String,
description: String,
toolbox: String,
parameters: Vec<ToolParameter>,
example_usage: String,
}
impl ReclassEqualInterval {
/// public constructor
pub fn new() -> ReclassEqualInterval {
let name = "ReclassEqualInterval".to_string();
let toolbox = "GIS Analysis".to_string();
let description =
"Reclassifies the values in a raster image based on equal-ranges.".to_string();
let mut parameters = vec![];
parameters.push(ToolParameter {
name: "Input File".to_owned(),
flags: vec!["-i".to_owned(), "--input".to_owned()],
description: "Input raster file.".to_owned(),
parameter_type: ParameterType::ExistingFile(ParameterFileType::Raster),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Output File".to_owned(),
flags: vec!["-o".to_owned(), "--output".to_owned()],
description: "Output raster file.".to_owned(),
parameter_type: ParameterType::NewFile(ParameterFileType::Raster),
default_value: None,
optional: false,
});
parameters.push(ToolParameter {
name: "Class Interval Size".to_owned(),
flags: vec!["--interval".to_owned()],
description: "Class interval size.".to_owned(),
parameter_type: ParameterType::Float,
default_value: Some("10.0".to_owned()),
optional: false,
});
parameters.push(ToolParameter {
name: "Starting Value".to_owned(),
flags: vec!["--start_val".to_owned()],
description: "Optional starting value (default is input minimum value).".to_owned(),
parameter_type: ParameterType::Float,
default_value: None,
optional: true,
});
parameters.push(ToolParameter {
name: "Ending Value".to_owned(),
flags: vec!["--end_val".to_owned()],
description: "Optional ending value (default is input maximum value).".to_owned(),
parameter_type: ParameterType::Float,
default_value: None,
optional: true,
});
let sep: String = path::MAIN_SEPARATOR.to_string();
let p = format!("{}", env::current_dir().unwrap().display());
let e = format!("{}", env::current_exe().unwrap().display());
let mut short_exe = e
.replace(&p, "")
.replace(".exe", "")
.replace(".", "")
.replace(&sep, "");
if e.contains(".exe") {
short_exe += ".exe";
}
let usage = format!(">>.*{0} -r={1} -v --wd=\"*path*to*data*\" -i='input.tif' -o=output.tif --interval=10.0 --start_val=0.0", short_exe, name).replace("*", &sep);
ReclassEqualInterval {
name: name,
description: description,
toolbox: toolbox,
parameters: parameters,
example_usage: usage,
}
}
}
impl WhiteboxTool for ReclassEqualInterval {
fn get_source_file(&self) -> String {
String::from(file!())
}
fn | (&self) -> String {
self.name.clone()
}
fn get_tool_description(&self) -> String {
self.description.clone()
}
fn get_tool_parameters(&self) -> String {
match serde_json::to_string(&self.parameters) {
Ok(json_str) => return format!("{{\"parameters\":{}}}", json_str),
Err(err) => return format!("{:?}", err),
}
}
fn get_example_usage(&self) -> String {
self.example_usage.clone()
}
fn get_toolbox(&self) -> String {
self.toolbox.clone()
}
fn run<'a>(
&self,
args: Vec<String>,
working_directory: &'a str,
verbose: bool,
) -> Result<(), Error> {
let mut input_file = String::new();
let mut output_file = String::new();
let mut interval_size = 10.0;
let mut start_val = f64::NEG_INFINITY;
let mut end_val = f64::INFINITY;
if args.len() == 0 {
return Err(Error::new(
ErrorKind::InvalidInput,
"Tool run with no parameters.",
));
}
for i in 0..args.len() {
let mut arg = args[i].replace("\"", "");
arg = arg.replace("\'", "");
let cmd = arg.split("="); // in case an equals sign was used
let vec = cmd.collect::<Vec<&str>>();
let mut keyval = false;
if vec.len() > 1 {
keyval = true;
}
let flag_val = vec[0].to_lowercase().replace("--", "-");
if flag_val == "-i" || flag_val == "-input" {
if keyval {
input_file = vec[1].to_string();
} else {
input_file = args[i + 1].to_string();
}
} else if flag_val == "-o" || flag_val == "-output" {
if keyval {
output_file = vec[1].to_string();
} else {
output_file = args[i + 1].to_string();
}
} else if flag_val == "-interval" {
if keyval {
interval_size = vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val));
} else {
interval_size = args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val));
}
} else if flag_val == "-start_val" {
if keyval {
start_val = vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val));
} else {
start_val = args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val));
}
} else if flag_val == "-end_val" {
if keyval {
end_val = vec[1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val));
} else {
end_val = args[i + 1]
.to_string()
.parse::<f64>()
.expect(&format!("Error parsing {}", flag_val));
}
}
}
if verbose {
let tool_name = self.get_tool_name();
let welcome_len = format!("* Welcome to {} *", tool_name).len().max(28);
// 28 = length of the 'Powered by' by statement.
println!("{}", "*".repeat(welcome_len));
println!("* Welcome to {} {}*", tool_name, " ".repeat(welcome_len - 15 - tool_name.len()));
println!("* Powered by WhiteboxTools {}*", " ".repeat(welcome_len - 28));
println!("* www.whiteboxgeo.com {}*", " ".repeat(welcome_len - 23));
println!("{}", "*".repeat(welcome_len));
}
let sep: String = path::MAIN_SEPARATOR.to_string();
let mut progress: usize;
let mut old_progress: usize = 1;
if !input_file.contains(&sep) && !input_file.contains("/") {
input_file = format!("{}{}", working_directory, input_file);
}
if !output_file.contains(&sep) && !output_file.contains("/") {
output_file = format!("{}{}", working_directory, output_file);
}
if verbose {
println!("Reading data...")
};
let input = Arc::new(Raster::new(&input_file, "r")?);
let start = Instant::now();
let rows = input.configs.rows as isize;
let columns = input.configs.columns as isize;
let nodata = input.configs.nodata;
if start_val == f64::NEG_INFINITY {
start_val = input.configs.minimum;
}
if end_val == f64::INFINITY {
end_val = input.configs.maximum;
}
let mut num_procs = num_cpus::get() as isize;
let configs = whitebox_common::configs::get_configs()?;
let max_procs = configs.max_procs;
if max_procs > 0 && max_procs < num_procs {
num_procs = max_procs;
}
let (tx, rx) = mpsc::channel();
for tid in 0..num_procs {
let input = input.clone();
let tx = tx.clone();
thread::spawn(move || {
let mut z: f64;
for row in (0..rows).filter(|r| r % num_procs == tid) {
let mut data: Vec<f64> = vec![nodata; columns as usize];
for col in 0..columns {
z = input[(row, col)];
if z != nodata {
if z >= start_val && z <= end_val {
z = (z / interval_size).floor() * interval_size;
}
data[col as usize] = z;
}
}
tx.send((row, data)).unwrap();
}
});
}
let mut output = Raster::initialize_using_file(&output_file, &input);
for r in 0..rows {
let (row, data) = rx.recv().expect("Error receiving data from thread.");
output.set_row_data(row, data);
if verbose {
progress = (100.0_f64 * r as f64 / (rows - 1) as f64) as usize;
if progress != old_progress {
println!("Progress: {}%", progress);
old_progress = progress;
}
}
}
let elapsed_time = get_formatted_elapsed_time(start);
output.add_metadata_entry(format!(
"Created by whitebox_tools\' {} tool",
self.get_tool_name()
));
output.add_metadata_entry(format!("Input file: {}", input_file));
output.add_metadata_entry(format!("Elapsed Time (excluding I/O): {}", elapsed_time));
if verbose {
println!("Saving data...")
};
let _ = match output.write() {
Ok(_) => {
if verbose {
println!("Output file written")
}
}
Err(e) => return Err(e),
};
if verbose {
println!(
"{}",
&format!("Elapsed Time (excluding I/O): {}", elapsed_time)
);
}
Ok(())
}
}
| get_tool_name |
PodLister.go | // Code generated by mockery v0.0.0-dev. DO NOT EDIT.
package mocks
import (
labels "k8s.io/apimachinery/pkg/labels"
corev1 "k8s.io/client-go/listers/core/v1"
mock "github.com/stretchr/testify/mock"
v1 "k8s.io/api/core/v1"
)
| type PodLister struct {
mock.Mock
}
// List provides a mock function with given fields: selector
func (_m *PodLister) List(selector labels.Selector) ([]*v1.Pod, error) {
ret := _m.Called(selector)
var r0 []*v1.Pod
if rf, ok := ret.Get(0).(func(labels.Selector) []*v1.Pod); ok {
r0 = rf(selector)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*v1.Pod)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(labels.Selector) error); ok {
r1 = rf(selector)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Pods provides a mock function with given fields: namespace
func (_m *PodLister) Pods(namespace string) corev1.PodNamespaceLister {
ret := _m.Called(namespace)
var r0 corev1.PodNamespaceLister
if rf, ok := ret.Get(0).(func(string) corev1.PodNamespaceLister); ok {
r0 = rf(namespace)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(corev1.PodNamespaceLister)
}
}
return r0
} | // PodLister is an autogenerated mock type for the PodLister type |
buffer.rs | #![forbid(unsafe_code)]
use bytes::BytesMut;
/// Any type that can act as a buffer.
pub trait ToBuffer {
/// Returned the buffer.
fn get_buffer(&mut self) -> Buffer<'_>;
}
/// Buffer that can be used to write data into.
#[derive(Debug)]
pub enum Buffer<'a> {
/// A `Vec<u8>` acts as a buffer.
Vector(&'a mut Vec<u8>),
/// A byte slice acts as a buffer.
Slice(&'a mut [u8]),
/// A `BytesMut` acts as a buffer.
Bytes(&'a mut BytesMut),
}
impl ToBuffer for Vec<u8> {
#[inline(always)]
fn get_buffer(&mut self) -> Buffer<'_> {
Buffer::Vector(self)
}
}
impl ToBuffer for BytesMut {
#[inline(always)]
fn get_buffer(&mut self) -> Buffer<'_> {
Buffer::Bytes(self)
}
}
impl ToBuffer for Box<[u8]> {
#[inline(always)]
fn get_buffer(&mut self) -> Buffer<'_> {
Buffer::Slice(&mut *self)
}
}
impl<const LEN: usize> ToBuffer for [u8; LEN] {
#[inline(always)]
fn | (&mut self) -> Buffer<'_> {
Buffer::Slice(self)
}
}
| get_buffer |
kv_bench_test.go | package kv
import (
"context"
"crypto/rand"
"fmt"
"io/ioutil"
"testing"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/types"
"github.com/celestiaorg/optimint/store"
)
func | (b *testing.B) {
dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test")
if err != nil {
b.Errorf("failed to create temporary directory: %s", err)
}
db := store.NewDefaultKVStore(dbDir, "db", "benchmark_tx_search_test")
if err != nil {
b.Errorf("failed to create database: %s", err)
}
indexer := NewTxIndex(db)
for i := 0; i < 35000; i++ {
events := []abci.Event{
{
Type: "transfer",
Attributes: []abci.EventAttribute{
{Key: []byte("address"), Value: []byte(fmt.Sprintf("address_%d", i%100)), Index: true},
{Key: []byte("amount"), Value: []byte("50"), Index: true},
},
},
}
txBz := make([]byte, 8)
if _, err := rand.Read(txBz); err != nil {
b.Errorf("failed produce random bytes: %s", err)
}
txResult := &abci.TxResult{
Height: int64(i),
Index: 0,
Tx: types.Tx(string(txBz)),
Result: abci.ResponseDeliverTx{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",
Events: events,
},
}
if err := indexer.Index(txResult); err != nil {
b.Errorf("failed to index tx: %s", err)
}
}
txQuery := query.MustParse("transfer.address = 'address_43' AND transfer.amount = 50")
b.ResetTimer()
ctx := context.Background()
for i := 0; i < b.N; i++ {
if _, err := indexer.Search(ctx, txQuery); err != nil {
b.Errorf("failed to query for txs: %s", err)
}
}
}
| BenchmarkTxSearch |
user.go | package rps
import (
"fmt"
"strconv"
"strings"
"sync"
"time"
)
// User structure.
type User struct {
userID int64
subscribed bool
hasTicket bool
isPlayer bool
lastWonAmount float64
totalWonAmount float64
leaderboardPosition uint32
playSequence string
name string
walletAddress string
lastTicketDate time.Time
joinDate time.Time
lock *sync.RWMutex
}
// NewUser creates an object of User structure.
func NewUser(
userID int64, name string,
subscribed, hasTicket, isPlayer bool,
leaderboardPosition uint32,
) User |
// GetUserID performs non-blocking get of user's id.
func (u *User) GetUserID() int64 {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.userID
}
// GetSubscribed performs non-blocking get of user's subscription status.
func (u *User) GetSubscribed() bool {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.subscribed
}
// GetHasTicket performs non-blocking get of user's ticket status.
func (u *User) GetHasTicket() bool {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.hasTicket
}
// GetIsPlayer performs non-blocking get of user's player status.
func (u *User) GetIsPlayer() bool {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.isPlayer
}
// GetLastWonAmount performs non-blocking get of user's last won amount.
func (u *User) GetLastWonAmount() float64 {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.lastWonAmount
}
// GetTotalWonAmount performs non-blocking get of user's total won amount.
func (u *User) GetTotalWonAmount() float64 {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.totalWonAmount
}
// GetLeaderboardPosition performs non-blocking get of user's leaderboard position.
func (u *User) GetLeaderboardPosition() uint32 {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.leaderboardPosition
}
// GetPlaySequence performs non-blocking get of user's play sequence.
func (u *User) GetPlaySequence() string {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.playSequence
}
// GetName performs non-blocking get of user's name.
func (u *User) GetName() string {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.name
}
// GetWalletAddress performs non-blocking get of user's wallet address.
func (u *User) GetWalletAddress() string {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.walletAddress
}
// GetLastTicketDate performs non-blocking get of user's last ticket purchase date.
func (u *User) GetLastTicketDate() time.Time {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.lastTicketDate
}
// GetJoinDate performs non-blocking get of user's join date.
func (u *User) GetJoinDate() time.Time {
(*u.lock).RLock()
defer (*u.lock).RUnlock()
return u.joinDate
}
// SetUserID performs non-blocking set of user's ID.
func (u *User) SetUserID(id int64) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.userID = id
}
// SetSubscribed performs non-blocking set of user's subscription status.
func (u *User) SetSubscribed(val bool) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.subscribed = val
}
// SetHasTicket performs non-blocking set of user's ticket status.
func (u *User) SetHasTicket(val bool) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.hasTicket = val
}
// SetIsPlayer performs non-blocking set of user's player status.
func (u *User) SetIsPlayer(val bool) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.isPlayer = val
}
// SetLastWonAmount performs non-blocking set of user's last won amount.
func (u *User) SetLastWonAmount(val float64) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.lastWonAmount = val
}
// SetTotalWonAmount performs non-blocking set of user's total won amount.
func (u *User) SetTotalWonAmount(val float64) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.totalWonAmount = val
}
// SetLeaderboardPosition performs non-blocking set of user's leaderboard position.
func (u *User) SetLeaderboardPosition(val uint32) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.leaderboardPosition = val
}
// SetPlaySequence performs non-blocking set of user's play sequence.
func (u *User) SetPlaySequence(val string) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.playSequence = val
}
// SetName performs non-blocking set of user's name.
func (u *User) SetName(val string) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.name = val
}
// SetWalletAddress performs non-blocking set of user's wallet address.
func (u *User) SetWalletAddress(val string) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.walletAddress = val
}
// SetLastTicketDate performs non-blocking set of user's last ticket purchase date.
func (u *User) SetLastTicketDate(date time.Time) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.lastTicketDate = date
}
// SetJoinDate performs non-blocking set of user's join date.
func (u *User) SetJoinDate(date time.Time) {
(*u.lock).Lock()
defer (*u.lock).Unlock()
u.joinDate = date
}
// Serialize performs serialization of the User structure.
func (u *User) Serialize() []byte {
return []byte(fmt.Sprintf("UserID: %d|Subscribed: %t|HasTicket: %t|IsPlayer: %t|"+
"LastWonAmount: %f|TotalWonAmount: %f|LeaderboardPosition: %d|PlaySequence: %s|"+
"Name: %s|WalletAddress: %s|LastTicketDate: %s|JoinDate: %s",
u.userID, u.subscribed, u.hasTicket, u.isPlayer, u.lastWonAmount, u.totalWonAmount,
u.leaderboardPosition, u.playSequence, u.name, u.walletAddress,
u.lastTicketDate.Format(time.RFC1123), u.joinDate.Format(time.RFC1123)),
)
}
// Deserialize performs deserialization of the User structure.
func Deserialize(data []byte) (User, error) {
strData := string(data)
d := strings.Split(strData, "|")
strUserID := d[0][strings.Index(d[0], " ")+1:]
userID, err := strconv.ParseInt(strUserID, 10, 64)
if err != nil {
return User{}, err
}
strSubscribed := d[1][strings.Index(d[1], " ")+1:]
subscribed, err := strconv.ParseBool(strSubscribed)
if err != nil {
return User{}, err
}
strHasTicket := d[2][strings.Index(d[2], " ")+1:]
hasTicket, err := strconv.ParseBool(strHasTicket)
if err != nil {
return User{}, err
}
strIsPlayer := d[3][strings.Index(d[3], " ")+1:]
isPlayer, err := strconv.ParseBool(strIsPlayer)
if err != nil {
return User{}, err
}
strLastWonAmount := d[4][strings.Index(d[4], " ")+1:]
lastWonAmount, err := strconv.ParseFloat(strLastWonAmount, 64)
if err != nil {
return User{}, err
}
strTotalWonAmount := d[5][strings.Index(d[5], " ")+1:]
totalWonAmount, err := strconv.ParseFloat(strTotalWonAmount, 64)
if err != nil {
return User{}, err
}
strLeaderboardPosition := d[6][strings.Index(d[6], " ")+1:]
_leaderboardPosition, err := strconv.ParseUint(strLeaderboardPosition, 10, 32)
if err != nil {
return User{}, err
}
leaderboardPosition := uint32(_leaderboardPosition)
playSequence := d[7][strings.Index(d[7], " ")+1:]
name := d[8][strings.Index(d[8], " ")+1:]
walletAddress := d[9][strings.Index(d[9], " ")+1:]
strLastTicketDate := d[10][strings.Index(d[10], " ")+1:]
lastTicketDate, err := time.Parse(time.RFC1123, strLastTicketDate)
if err != nil {
return User{}, err
}
strJoinDate := d[11][strings.Index(d[11], " ")+1:]
joinDate, err := time.Parse(time.RFC1123, strJoinDate)
if err != nil {
return User{}, err
}
lock := sync.RWMutex{}
u := User{userID, subscribed, hasTicket, isPlayer, lastWonAmount, totalWonAmount,
leaderboardPosition, playSequence, name, walletAddress, lastTicketDate, joinDate, &lock}
return u, err
}
| {
var playSequence, walletAddress string
var lastWonAmount, totalWonAmount float64
var lastTicketDate, joinDate time.Time
joinDate = time.Now()
lock := sync.RWMutex{}
u := User{userID, subscribed, hasTicket, isPlayer, lastWonAmount, totalWonAmount,
leaderboardPosition, playSequence, name, walletAddress, lastTicketDate, joinDate, &lock}
return u
} |
test_api.py | import datetime
import json
import os
from decimal import Decimal
from uuid import UUID
import pytest
import respx
from conftest import *
from shipstation.api import ShipStation
from shipstation.models import *
from shipstation.pagination import Page
@respx.mock
def test_get_carrier(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_carrier"]
response = ss.get_carrier("stamps_com")
assert request.called
assert isinstance(response, ShipStationCarrier)
assert isinstance(response.primary, bool)
assert response.name == "Stamps.com"
assert response.account_number == "example"
@respx.mock
def test_get_customer(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_customer"]
response = ss.get_customer(123456789)
assert request.called
assert isinstance(response, ShipStationCustomer)
assert isinstance(response.address_verified, bool)
assert response.address_verified is True
assert response.create_date == datetime.datetime(2017, 12, 16, 18, 49, 16, 7000)
assert response.marketplace_usernames[0].customer_id == 123456789
@respx.mock
def test_get_order(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_order"]
response = ss.get_order(123456789)
assert request.called
assert isinstance(response, ShipStationOrder)
assert isinstance(response.ship_to, ShipStationAddress)
assert isinstance(response.advanced_options, ShipStationAdvancedOptions)
assert isinstance(response.international_options, ShipStationInternationalOptions)
assert isinstance(response.insurance_options, ShipStationInsuranceOptions)
assert response.create_date == datetime.datetime(2015, 6, 30, 15, 20, 26, 723000)
@respx.mock
def test_get_product(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_product"]
response = ss.get_product(123456789)
assert request.called
assert isinstance(response, ShipStationItem)
assert response.create_date == datetime.datetime(2016, 10, 31, 7, 43, 0, 203000)
@respx.mock
def test_get_rates(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_rates"]
response = ss.get_rates(
ShipStationRateOptions(
carrier_code="stamps_com",
from_postal_code="20500",
to_postal_code="20500",
to_country="US",
weight=ShipStationWeight(units="ounces", value=12),
)
)
assert request.called
assert isinstance(response[0], ShipStationRate)
assert response[0].service_code == "usps_first_class_mail"
assert response[0].shipment_cost == Decimal("3.2")
@respx.mock
def test_get_stores(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_store"]
response = ss.get_store(12345)
assert request.called
assert isinstance(response, ShipStationStore)
assert response.store_name == "US Amazon Store"
assert response.account_name == "GHI123456789"
@respx.mock
def test_get_warehouse(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["get_warehouse"]
response = ss.get_warehouse(456789)
assert request.called
assert isinstance(response, ShipStationWarehouse)
assert isinstance(response.return_address, ShipStationAddress)
assert response.warehouse_name == "Test Company"
@respx.mock
def test_list_carriers(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_carriers"]
response = ss.list_carriers()
assert request.called
assert isinstance(response[0], ShipStationCarrier)
assert response[0].code == "stamps_com"
assert response[0].balance == Decimal("15.01")
@respx.mock
def test_list_tags(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_tags"]
response = ss.list_tags()
assert request.called
assert isinstance(response[0], ShipStationOrderTag)
assert response[0].tag_id == 12345
assert response[0].name == "Amazon Prime Order"
@respx.mock
def test_list_marketplaces(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_marketplaces"]
response = ss.list_marketplaces()
assert request.called
assert isinstance(response[0], ShipStationMarketplace)
assert response[0].name == "3dcart"
assert response[1].name == "Acumatica"
@respx.mock
def test_list_orders(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_orders"]
response = ss.list_orders()
assert request.called
# for order in response:
print("items", response[0].items)
assert isinstance(response[0], ShipStationOrder)
assert isinstance(response[0].ship_to, ShipStationAddress)
# assert isinstance(response[0].items[0], ShipStationOrderItem)
assert isinstance(response[0].advanced_options, ShipStationAdvancedOptions)
assert isinstance(response[0].weight, ShipStationWeight)
assert isinstance(
response[1].international_options, ShipStationInternationalOptions
)
assert isinstance(
response[1].international_options.customs_items[0], ShipStationCustomsItem
)
assert response[1].create_date == datetime.datetime(2015, 6, 30, 15, 20, 26, 723000)
assert response[1].shipping_amount == Decimal("0.0")
@respx.mock
def test_list_stores(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_stores"]
response = ss.list_stores(marketplace_id=2)
assert request.called
assert isinstance(response[0], ShipStationStore)
assert response[0].store_name == "Mexico Amazon Store"
assert response[1].account_name == "DEF123456789"
@respx.mock
def test_list_users(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_users"]
response = ss.list_users()
assert request.called
assert isinstance(response[0], ShipStationUser)
assert response[0].name == "Merchandising"
assert isinstance(response[1].user_id, UUID)
assert response[1].user_id == UUID("0dbc3f54-5cd4-4054-b2b5-92427e18d6cd")
@respx.mock
def test_list_warehouses(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_warehouses"]
response = ss.list_warehouses()
assert request.called
assert isinstance(response[0], ShipStationWarehouse)
assert isinstance(response[0].origin_address, ShipStationAddress)
assert response[0].origin_address.name == "Warehouse 1"
assert response[0].warehouse_id == "456789"
assert response[0].origin_address.street2 == "Unit 4"
@pytest.mark.skip
@respx.mock
def test_list_webhooks(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
|
@respx.mock
def test_list_services(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_services"]
response = ss.list_services(carrier_code="stamps_com")
assert request.called
assert isinstance(response[0], ShipStationCarrierService)
assert response[1].international is False
@respx.mock
def test_list_shipments(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_shipments"]
response = ss.list_shipments()
assert request.called
assert isinstance(response, Page)
assert isinstance(response[0], ShipStationOrder)
assert isinstance(response[0].ship_to, ShipStationAddress)
assert isinstance(response[0].advanced_options, ShipStationAdvancedOptions)
assert isinstance(response[0].weight, ShipStationWeight)
assert response[0].create_date == datetime.datetime(2015, 6, 29, 14, 29, 28, 583000)
assert response[0].shipment_cost == Decimal("2.35")
assert response[0].tracking_number == "9400111899562764298812"
@respx.mock
def test_list_packages(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_packages"]
response = ss.list_packages(carrier_code="stamps_com")
assert request.called
assert isinstance(response[0], ShipStationCarrierPackage)
assert isinstance(response[0].domestic, bool)
assert response[1].domestic is True
assert response[1].code == "flat_rate_envelope"
@respx.mock
def test_list_customers(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_customers"]
response = ss.list_customers()
assert request.called
assert isinstance(response[0], ShipStationCustomer)
assert isinstance(response[0].address_verified, bool)
assert response[0].address_verified is True
assert response[0].create_date == datetime.datetime(2017, 12, 16, 18, 49, 16, 7000)
assert response[0].marketplace_usernames[0].customer_id == 123456789
@respx.mock
def test_list_fulfillments(ss: ShipStation, mocked_api: respx.MockTransport) -> None:
request = mocked_api["list_fulfillments"]
response = ss.list_fulfillments()
assert request.called
assert isinstance(response[0], ShipStationFulfillment)
assert isinstance(response[0].ship_to, ShipStationAddress)
assert isinstance(response[0].user_id, UUID)
assert response[0].create_date == datetime.datetime(2020, 6, 19, 7, 21, 51, 773000)
assert response[1].notify_error_message is not None
"""
test_list_products is tested in test_pagination.py
"""
# def test_label():
# order = ss.get_order(481287142)
# order.weight = ShipStationWeight(units="ounces", value=64)
# order.carrier_code = "fedex"
# order.service_code = "fedex_ground"
# order.package_code = "package"
# return ss.create_label_for_order(order, True, True)
#
#
# def test_webooks():
# subscribe_to_webhook_options = {
# "target_url": "http://someexamplewebhookurl.com/neworder",
# "event": "ORDER_NOTIFY",
# "friendly_name": "pyshipstation test",
# }
# subscribed_to_webhook = ss.subscribe_to_webhook(subscribe_to_webhook_options)
# assert subscribed_to_webhook.status_code == 201
# webhook_id = subscribed_to_webhook.json()["id"]
# _list_webhooks_(webhook_id, found=True)
# ss.unsubscribe_to_webhook(webhook_id)
# _list_webhooks_(webhook_id, found=False)
#
#
# def _list_webhooks_(webhook_id=None, found=True):
# webhook_list = ss.list_webhooks()
# if found is False:
# with pytest.raises("KeyError"):
# webhook_list.json()["webhooks"]
# webhooks_list = webhook_list.json()["webhooks"]
# for webhook in webhooks_list:
# if webhook["WebHookID"] == webhook_id:
# assert webhook["WebHookID"] == webhook_id
# else:
# with pytest.raises("KeyError"):
# webhook["WebHookID"]
#
#
# def test_stores():
# marketplaces = ss.list_marketplaces()
# assert marketplaces.status_code == 200
# stores = ss.list_stores().json()
# assert len(stores) >= 1
# store_id = stores[-1].get("storeId")
# specific_store = ss.get_store(store_id)
# assert specific_store.status_code == 200
# r = ss.deactivate_store(store_id)
# assert r.status_code == 200
# r = ss.reactivate_store(store_id)
# assert r.status_code == 200
#
#
# def test_warehouses():
# warehouses = ss.list_warehouses()
# assert warehouses.status_code == 200
# warehouses_id = warehouses.json()[0].get("warehouseId")
# warehouse = ss.get_warehouse(warehouse_id)
# assert warehouse.status_code == 200
# new_warehouse = {
# "warehouse_name": "New Ship From Location",
# "origin_address": get_warehouse_address(),
# "return_address": get_warehouse_address(),
# "is_default": "false",
# }
# r = ss.create_warehouse(new_warehouse)
# assert warehouse.status_code == 200
# new_warehouse_id = r.json().get("warehouseId")
#
# # new_warehouse = ss.get_warehouse(new_warehouse_id).json()
# new_warehouse = ss.get_warehouse("2126606").json()
# new_warehouse.warehouse_name = "Updated New Ship From Location"
# ss.update_warehouse(new_warehouse)
# # ss.delete_warehouse()
#
# {
# "warehouseId": 12345,
# "warehouseName": "API Ship From Location",
# "originAddress": {
# "name": "API Warehouse",
# "company": "ShipStation",
# "street1": "2815 Exposition Blvd",
# "street2": null,
# "street3": null,
# "city": "Austin",
# "state": "TX",
# "postalCode": "78703",
# "country": "US",
# "phone": "512-555-5555",
# "residential": true,
# "addressVerified": null,
# },
# "returnAddress": {
# "name": "API Ship From Location",
# "company": "ShipStation",
# "street1": "2815 Exposition Blvd",
# "street2": null,
# "street3": null,
# "city": "Austin",
# "state": "TX",
# "postalCode": "78703",
# "country": "US",
# "phone": "512-555-5555",
# "residential": null,
# "addressVerified": null,
# },
# "createDate": "2015-07-02T08:38:31.4870000",
# "isDefault": true,
# }
#
#
# def get_warehouse_address():
# return ShipStationAddress(
# name="NM Warehouse",
# company="White Sands Co.",
# street1="4704 Arabela Dr.",
# city="Las Cruces",
# state="NM",
# postal_code="80012",
# country="US",
# phone="512-111-2222",
# residential="true",
# )
#
#
# def test_customers():
# r = ss.list_customers()
# assert r.status_code == 200
# customer_id = r.json()["customers"][0].get("customerId")
# r = ss.get_customer(customer_id)
# assert r.status_code == 200
#
#
# def test_shipments_and_fulfillments():
# r = ss.get_rates(carrier_code)
# assert r.status_code == 200
# r = ss.list_fulfillments()
# assert r.status_code == 200
# r = ss.list_shipments()
# assert r.status_code == 200
#
#
# def test_label_creation():
# pass
| request = mocked_api["list_webhooks"]
response = ss.list_webhooks()
assert request.called
# assert isinstance(response[0], ShipStationWarehouse)
# assert isinstance(response[0].origin_address, ShipStationAddress)
# assert response[0].origin_address.name == "Warehouse 1"
# assert response[0].warehouse_id == '456789'
# assert response[0].origin_address.street2 == "Unit 4" |
index.ts | import * as on_block from "./on_block";
import updateCampaignBalance from "./update_campaign_balance";
import coinConvert from "./coin_convert"; | import * as update_exchange_rate from "./update_exchange_rate";
import { createTask } from "../utils/task";
export function registerTasks() {
createTask(
"block",
async () => on_block.run([updateCampaignBalance, coinConvert]),
5 * 1000);
if (process.env.UPDATE_RATE) {
createTask(
"updateBipPrice",
update_exchange_rate.updateBipPrice,
5 * 60 * 1000);
createTask(
"updateBipPrice",
update_exchange_rate.updateCurrencyRates,
60 * 60 * 1000);
}
} | |
mock-axios.ts | /**
* TypeScript version of Axios mock for unit testing with [Jest](https://facebook.github.io/jest/).
* This file is based on https://gist.github.com/tux4/36006a1859323f779ab0
*
* @author knee-cola <[email protected]>
* @license @license MIT License, http://www.opensource.org/licenses/MIT
*/
import { SynchronousPromise, UnresolvedSynchronousPromise } from "synchronous-promise";
import Cancel from "./cancel/Cancel";
import CancelToken from "./cancel/CancelToken";
import {
AxiosMockQueueItem,
AxiosMockRequestCriteria,
AxiosMockType,
HttpResponse,
} from "./mock-axios-types";
/** a FIFO queue of pending request */
const _pending_requests: AxiosMockQueueItem[] = [];
const _newReq: (config?: any) => UnresolvedSynchronousPromise<any> = (config: any = {}, actualConfig: any = {}) => {
if(typeof config === 'string') {
// Allow for axios('example/url'[, config])
actualConfig.url = config;
config = actualConfig;
}
const method: string = config.method || "get";
const url: string = config.url;
const data: any = config.data;
const promise: UnresolvedSynchronousPromise<any> = SynchronousPromise.unresolved();
if(config.cancelToken) {
config.cancelToken.promise.then((cancel: any) => {
// check if promise is still waiting for an answer
if(_pending_requests.find(x => x.promise === promise)) {
MockAxios.mockError(cancel, promise)
}
})
}
_pending_requests.push({
config,
data,
method,
promise,
url
});
return promise;
};
const _helperReq = (method: string, url: string, data?: any, config?: any) => {
const conf = data && config ? config : {};
return _newReq({
...conf,
data,
method,
url,
});
};
const _helperReqNoData = (method: string, url: string, config?: any) => {
return _helperReq(method, url, {}, config)
}
const MockAxios: AxiosMockType = (jest.fn(_newReq) as unknown) as AxiosMockType;
// mocking Axios methods
MockAxios.get = jest.fn(_helperReqNoData.bind(null, "get"));
MockAxios.post = jest.fn(_helperReq.bind(null, "post"));
MockAxios.put = jest.fn(_helperReq.bind(null, "put"));
MockAxios.patch = jest.fn(_helperReq.bind(null, "patch"));
MockAxios.delete = jest.fn(_helperReqNoData.bind(null, "delete"));
MockAxios.request = jest.fn(_newReq);
MockAxios.all = jest.fn((values) => Promise.all(values));
MockAxios.head = jest.fn(_helperReqNoData.bind(null, "head"));
MockAxios.options = jest.fn(_helperReqNoData.bind(null, "options"));
MockAxios.create = jest.fn(() => MockAxios);
MockAxios.interceptors = {
request: {
use: jest.fn(),
eject: jest.fn(),
}, | use: jest.fn(),
eject: jest.fn(),
},
};
MockAxios.defaults = {
headers: {
common: [],
},
};
MockAxios.popPromise = (promise?: SynchronousPromise<any>) => {
if (promise) {
// remove the promise from pending queue
for (let ix = 0; ix < _pending_requests.length; ix++) {
const req: AxiosMockQueueItem = _pending_requests[ix];
if (req.promise === promise) {
_pending_requests.splice(ix, 1);
return req.promise;
}
}
} else {
// take the oldest promise
const req: AxiosMockQueueItem = _pending_requests.shift();
return req ? req.promise : void 0;
}
};
MockAxios.popRequest = (request?: AxiosMockQueueItem) => {
if (request) {
const ix = _pending_requests.indexOf(request);
if (ix === -1) {
return void 0;
}
_pending_requests.splice(ix, 1);
return request;
} else {
return _pending_requests.shift();
}
};
/**
* Removes an item form the queue, based on it's type
* @param queueItem
*/
const popQueueItem = (queueItem: SynchronousPromise<any> | AxiosMockQueueItem = null) => {
// first let's pretend the param is a queue item
const request: AxiosMockQueueItem = MockAxios.popRequest(
queueItem as AxiosMockQueueItem,
);
if (request) {
// IF the request was found
// > set the promise
return request.promise;
} else {
// ELSE maybe the `queueItem` is a promise (legacy mode)
return MockAxios.popPromise(queueItem as UnresolvedSynchronousPromise<any>);
}
};
MockAxios.mockResponse = (
response?: HttpResponse,
queueItem: SynchronousPromise<any> | AxiosMockQueueItem = null,
silentMode: boolean = false,
): void => {
// replacing missing data with default values
response = Object.assign(
{
config: {},
data: {},
headers: {},
status: 200,
statusText: "OK",
},
response,
);
const promise = popQueueItem(queueItem);
if (!promise && !silentMode) {
throw new Error("No request to respond to!");
} else if (!promise) {
return;
}
// resolving the Promise with the given response data
promise.resolve(response);
};
MockAxios.mockResponseFor = (
criteria: string | AxiosMockRequestCriteria,
response?: HttpResponse,
silentMode: boolean = false,
): void => {
if (typeof criteria === "string") {
criteria = {url: criteria};
}
const queueItem = MockAxios.getReqMatching(criteria);
if (!queueItem && !silentMode) {
throw new Error("No request to respond to!");
} else if (!queueItem) {
return;
}
MockAxios.mockResponse(response, queueItem, silentMode);
};
MockAxios.mockError = (
error: any = {},
queueItem: SynchronousPromise<any> | AxiosMockQueueItem = null,
silentMode: boolean = false,
) => {
const promise = popQueueItem(queueItem);
if (!promise && !silentMode) {
throw new Error("No request to respond to!");
} else if (!promise) {
return;
}
// resolving the Promise with the given response data
promise.reject(error);
};
MockAxios.lastReqGet = () => {
return _pending_requests[_pending_requests.length - 1];
};
MockAxios.lastPromiseGet = () => {
const req = MockAxios.lastReqGet();
return req ? req.promise : void 0;
};
const _findReqByPredicate = (predicate: (item: AxiosMockQueueItem) => boolean) => {
return _pending_requests
.slice()
.reverse() // reverse cloned array to return most recent req
.find(predicate);
}
const _checkCriteria = (item: AxiosMockQueueItem, criteria: AxiosMockRequestCriteria) => {
if (criteria.method !== undefined && criteria.method.toLowerCase() !== item.method.toLowerCase()) {
return false;
}
if (criteria.url !== undefined && criteria.url !== item.url) {
return false;
}
return true;
};
MockAxios.getReqMatching = (criteria: AxiosMockRequestCriteria) => {
return _findReqByPredicate((x) => _checkCriteria(x, criteria));
};
MockAxios.getReqByUrl = (url: string) => {
return MockAxios.getReqMatching({url});
};
MockAxios.getReqByMatchUrl = (url: RegExp) => {
return _findReqByPredicate((x) => url.test(x.url));
};
MockAxios.queue = () => {
return _pending_requests;
};
MockAxios.reset = () => {
// remove all the requests
_pending_requests.splice(0, _pending_requests.length);
// resets all information stored in the mockFn.mock.calls and mockFn.mock.instances arrays
MockAxios.get.mockClear();
MockAxios.post.mockClear();
MockAxios.put.mockClear();
MockAxios.patch.mockClear();
MockAxios.delete.mockClear();
MockAxios.head.mockClear();
MockAxios.options.mockClear();
MockAxios.request.mockClear();
MockAxios.all.mockClear();
};
MockAxios.Cancel = Cancel;
MockAxios.CancelToken = CancelToken;
MockAxios.isCancel = (u): u is Cancel => {
return !!(u && u.__CANCEL__);
};
// this is a singleton object
export default MockAxios; | response: { |
serializer.rs | //! A JSON-LD serializer implementing the
//! [`Serialize RDF as JSON-LD Algorithm`].
//!
//! [`Serialize RDF as JSON-LD Algorithm`]: https://www.w3.org/TR/json-ld11-api/#serialize-rdf-as-json-ld-algorithm
use crate::config::*;
use crate::error::*;
use json::JsonValue;
use sophia::quad::stream::*;
use sophia::triple::stream::{SinkError, StreamResult};
use sophia_api::serializer::*;
mod engine;
mod rdf_object;
#[cfg(test)]
mod test;
/// A JSON-LD serializer.
pub struct JsonLdSerializer<W> {
config: JsonLdConfig,
target: W,
}
impl<W> JsonLdSerializer<W> {
/// Build a new JSON-LD serializer with the default config.
#[inline]
pub fn new(target: W) -> JsonLdSerializer<W> {
Self::new_with_config(target, JsonLdConfig::default())
}
/// Build a new JSON-LD serializer writing to `write`, with the given config.
pub fn new_with_config(target: W, config: JsonLdConfig) -> JsonLdSerializer<W> {
JsonLdSerializer { config, target }
}
/// Borrow this serializer's configuration.
pub fn config(&self) -> &JsonLdConfig {
&self.config
}
/// Convert a quad stream into a Json object
fn convert_quads<QS>(&mut self, source: QS) -> StreamResult<JsonValue, QS::Error, JsonLdError>
where
QS: QuadSource,
{
let mut engine = engine::Engine::new_with_config(self.config.clone());
engine.process_quads(source)?;
engine.into_json().map_err(SinkError)
}
}
impl<W> QuadSerializer for JsonLdSerializer<W>
where
W: std::io::Write,
{
type Error = JsonLdError;
fn serialize_quads<QS>(&mut self, source: QS) -> StreamResult<&mut Self, QS::Error, Self::Error>
where
QS: QuadSource,
{
let result = self.convert_quads(source)?;
let json_txt = match self.config.spaces {
0 => json::stringify(result),
x => json::stringify_pretty(result, x),
};
self.target
.write(json_txt.as_bytes())
.map_err(|e| SinkError(e.into()))?;
Ok(self)
}
}
/// A utility type alias of [`JsonLdSerializer`] with `[JsonTarget]` as its target.
///
/// [`JsonLdSerializer`]: struct.JsonLdSerializer.html
/// [`JsonTarget`]: struct.JsonTarget.html
pub type Jsonifier = JsonLdSerializer<JsonTarget>;
| /// See [`new_jsonifier`] and [`new_jsonifier_with_config`].
///
/// [`JsonLdSerializer`]: struct.JsonLdSerializer.html
/// [`new_jsonifier`]: struct.JsonLdSerializer.html#method.new_jsonifier
/// [`new_jsonifier_with_config`]: struct.JsonLdSerializer.html#method.new_jsonifier_with_config
#[derive(Clone, Debug)]
pub struct JsonTarget(JsonValue);
impl Jsonifier {
/// Create a new serializer which targets a `JsonValue`.
#[inline]
pub fn new_jsonifier() -> Self {
JsonLdSerializer::new(JsonTarget(JsonValue::Null))
}
/// Create a new serializer which targets a `JsonValue` with a custom config.
#[inline]
pub fn new_jsonifier_with_config(config: JsonLdConfig) -> Self {
JsonLdSerializer::new_with_config(JsonTarget(JsonValue::Null), config)
}
/// Get a reference to the converted JsonValue
#[inline]
pub fn as_json(&self) -> &JsonValue {
&self.target.0
}
}
impl QuadSerializer for Jsonifier {
type Error = JsonLdError;
fn serialize_quads<QS>(&mut self, source: QS) -> StreamResult<&mut Self, QS::Error, Self::Error>
where
QS: QuadSource,
{
self.target.0 = self.convert_quads(source)?;
Ok(self)
}
}
/// A utility type alias representing a [`JsonLdSerializer`] which targets a string.
///
/// [`JsonLdSerializer`]: struct.JsonLdSerializer.html
pub type JsonLdStringifier = JsonLdSerializer<Vec<u8>>;
impl JsonLdStringifier {
/// Create a new serializer which targets a string.
#[inline]
pub fn new_stringifier() -> Self {
JsonLdSerializer::new(Vec::new())
}
/// Create a new serializer which targets a string with a custom config.
#[inline]
pub fn new_stringifier_with_config(config: JsonLdConfig) -> Self {
JsonLdSerializer::new_with_config(Vec::new(), config)
}
}
impl Stringifier for JsonLdStringifier {
fn as_utf8(&self) -> &[u8] {
&self.target[..]
}
} | /// This type is just a placeholder [`JsonLdSerializer`]
/// targetting a `JsonValue`. |
options-validator.spec.ts | import os from 'os';
import sinon from 'sinon';
import { LogLevel, ReportType, strykerCoreSchema, StrykerOptions } from '@stryker-mutator/api/core';
import { testInjector, factory } from '@stryker-mutator/test-helpers';
import { expect } from 'chai';
import { OptionsValidator, validateOptions, markUnknownOptions } from '../../../src/config/options-validator';
import { coreTokens } from '../../../src/di';
import { createCpuInfo } from '../../helpers/producers';
describe(OptionsValidator.name, () => {
let sut: OptionsValidator;
beforeEach(() => {
sut = testInjector.injector.provideValue(coreTokens.validationSchema, strykerCoreSchema).injectClass(OptionsValidator);
});
it('should validate an empty object', () => {
sut.validate({});
expect(testInjector.logger.fatal).not.called;
expect(testInjector.logger.error).not.called;
expect(testInjector.logger.warn).not.called;
});
it('should fill default values', () => {
const options: Record<string, unknown> = {};
sut.validate(options);
const expectedOptions: StrykerOptions = {
allowConsoleColors: true,
appendPlugins: [],
checkers: [],
cleanTempDir: true,
inPlace: false,
clearTextReporter: {
allowColor: true,
logTests: true,
maxTestsToLog: 3,
},
commandRunner: {
command: 'npm test',
},
coverageAnalysis: 'off',
dashboard: {
baseUrl: 'https://dashboard.stryker-mutator.io/api/reports',
reportType: ReportType.Full,
},
disableTypeChecks: '{test,src,lib}/**/*.{js,ts,jsx,tsx,html,vue}',
dryRunTimeoutMinutes: 5,
eventReporter: {
baseDir: 'reports/mutation/events',
},
fileLogLevel: LogLevel.Off,
jsonReporter: {
fileName: 'reports/mutation/mutation.json',
},
logLevel: LogLevel.Information,
maxConcurrentTestRunners: 9007199254740991,
maxTestRunnerReuse: 0,
mutate: [
'{src,lib}/**/!(*.+(s|S)pec|*.+(t|T)est).+(cjs|mjs|js|ts|jsx|tsx|html|vue)',
'!{src,lib}/**/__tests__/**/*.+(cjs|mjs|js|ts|jsx|tsx|html|vue)',
],
mutator: {
excludedMutations: [],
plugins: null,
},
plugins: ['@stryker-mutator/*'],
reporters: ['clear-text', 'progress', 'html'],
symlinkNodeModules: true,
tempDirName: '.stryker-tmp',
testRunner: 'command',
testRunnerNodeArgs: [],
thresholds: {
break: null,
high: 80,
low: 60,
},
timeoutFactor: 1.5,
timeoutMS: 5000,
tsconfigFile: 'tsconfig.json',
warnings: true,
};
expect(options).deep.eq(expectedOptions);
});
it('should validate the default options', () => {
actAssertValid();
});
describe('thresholds', () => {
it('should be invalid with thresholds < 0 or > 100', () => {
testInjector.options.thresholds.high = -1;
testInjector.options.thresholds.low = 101;
actValidationErrors('Config option "thresholds.high" should be >= 0, was -1.', 'Config option "thresholds.low" should be <= 100, was 101.');
});
it('should be invalid with thresholds.high null', () => {
// @ts-expect-error invalid setting
testInjector.options.thresholds.high = null;
actValidationErrors('Config option "thresholds.high" has the wrong type. It should be a number, but was a null.');
});
it('should not allow high < low', () => {
testInjector.options.thresholds.high = 20;
testInjector.options.thresholds.low = 21;
actValidationErrors('Config option "thresholds.high" should be higher than "thresholds.low".');
});
});
it('should be invalid with invalid logLevel', () => {
// @ts-expect-error invalid setting
testInjector.options.logLevel = 'thisTestPasses';
actValidationErrors(
'Config option "logLevel" should be one of the allowed values ("off", "fatal", "error", "warn", "info", "debug", "trace"), but was "thisTestPasses".'
);
});
it('should be invalid with non-numeric timeoutMS', () => {
breakConfig('timeoutMS', 'break');
actValidationErrors('Config option "timeoutMS" has the wrong type. It should be a number, but was a string.');
});
it('should be invalid with non-numeric timeoutFactor', () => {
breakConfig('timeoutFactor', 'break');
actValidationErrors('Config option "timeoutFactor" has the wrong type. It should be a number, but was a string.');
});
it('should be invalid with non-numeric dryRunTimeout', () => {
breakConfig('dryRunTimeoutMinutes', 'break');
actValidationErrors('Config option "dryRunTimeoutMinutes" has the wrong type. It should be a number, but was a string.');
});
it('should be invalid with negative numeric dryRunTimeout', () => {
breakConfig('dryRunTimeoutMinutes', -1);
actValidationErrors('Config option "dryRunTimeoutMinutes" should be >= 0, was -1.');
});
describe('plugins', () => {
it('should be invalid with non-array plugins', () => {
breakConfig('plugins', '@stryker-mutator/typescript');
actValidationErrors('Config option "plugins" has the wrong type. It should be a array, but was a string.');
});
it('should be invalid with non-string array elements', () => {
breakConfig('plugins', ['stryker-jest', 0]);
actValidationErrors('Config option "plugins[1]" has the wrong type. It should be a string, but was a number.');
});
});
describe('appendPlugins', () => {
it('should be invalid with non-array plugins', () => {
breakConfig('appendPlugins', '@stryker-mutator/typescript');
actValidationErrors('Config option "appendPlugins" has the wrong type. It should be a array, but was a string.');
});
it('should be invalid with non-string array elements', () => {
breakConfig('appendPlugins', ['stryker-jest', 0]);
actValidationErrors('Config option "appendPlugins[1]" has the wrong type. It should be a string, but was a number.');
});
});
describe('mutator', () => {
it('should be invalid with non-string mutator', () => {
// @ts-expect-error invalid setting
testInjector.options.mutator = 1;
actValidationErrors('Config option "mutator" has the wrong type. It should be a object, but was a number.');
});
it('should report a deprecation warning for "mutator.name"', () => {
testInjector.options.mutator = {
// @ts-expect-error invalid setting
name: 'javascript',
};
sut.validate(testInjector.options);
expect(testInjector.logger.warn).calledWith(
'DEPRECATED. Use of "mutator.name" is no longer needed. You can remove "mutator.name" from your configuration. Stryker now supports mutating of JavaScript and friend files out of the box.'
);
});
it('should report a deprecation warning for mutator as a string', () => {
// @ts-expect-error invalid setting
testInjector.options.mutator = 'javascript';
sut.validate(testInjector.options);
expect(testInjector.logger.warn).calledWith(
'DEPRECATED. Use of "mutator" as string is no longer needed. You can remove it from your configuration. Stryker now supports mutating of JavaScript and friend files out of the box.'
);
});
});
describe('testFramework', () => {
it('should report a deprecation warning', () => {
testInjector.options.testFramework = '';
sut.validate(testInjector.options);
expect(testInjector.logger.warn).calledWith(
'DEPRECATED. Use of "testFramework" is no longer needed. You can remove it from your configuration. Your test runner plugin now handles its own test framework integration.'
);
});
});
describe('reporters', () => {
it('should be invalid with non-array reporters', () => {
breakConfig('reporters', '@stryker-mutator/typescript');
actValidationErrors('Config option "reporters" has the wrong type. It should be a array, but was a string.');
});
it('should be invalid with non-string array elements', () => {
breakConfig('reporters', ['stryker-jest', 0]);
actValidationErrors('Config option "reporters[1]" has the wrong type. It should be a string, but was a number.');
});
});
describe('dashboard', () => {
it('should be invalid for non-string project', () => {
breakConfig('dashboard', { project: 23 });
actValidationErrors('Config option "dashboard.project" has the wrong type. It should be a string, but was a number.');
});
it('should be invalid for non-string module', () => {
breakConfig('dashboard', { module: 23 });
actValidationErrors('Config option "dashboard.module" has the wrong type. It should be a string, but was a number.');
});
it('should be invalid for non-string version', () => {
breakConfig('dashboard', { version: 23 });
actValidationErrors('Config option "dashboard.version" has the wrong type. It should be a string, but was a number.');
});
it('should be invalid for non-string baseUrl', () => {
breakConfig('dashboard', { baseUrl: 23 });
actValidationErrors('Config option "dashboard.baseUrl" has the wrong type. It should be a string, but was a number.');
});
it('should be invalid for a wrong reportType', () => {
breakConfig('dashboard', { reportType: 'empty' });
actValidationErrors('Config option "dashboard.reportType" should be one of the allowed values ("full", "mutationScore"), but was "empty".');
});
});
describe('maxConcurrentTestRunners', () => {
it('should report a deprecation warning', () => { | expect(testInjector.logger.warn).calledWith('DEPRECATED. Use of "maxConcurrentTestRunners" is deprecated. Please use "concurrency" instead.');
});
it('should not configure "concurrency" if "maxConcurrentTestRunners" is >= cpus-1', () => {
testInjector.options.maxConcurrentTestRunners = 2;
sinon.stub(os, 'cpus').returns([createCpuInfo(), createCpuInfo(), createCpuInfo()]);
sut.validate(testInjector.options);
expect(testInjector.options.concurrency).undefined;
});
it('should configure "concurrency" if "maxConcurrentTestRunners" is set with a lower value', () => {
testInjector.options.maxConcurrentTestRunners = 1;
sinon.stub(os, 'cpus').returns([createCpuInfo(), createCpuInfo(), createCpuInfo()]);
sut.validate(testInjector.options);
expect(testInjector.options.concurrency).eq(1);
});
});
it('should be invalid with non-numeric maxTestRunnerReuse', () => {
breakConfig('maxTestRunnerReuse', 'break');
actValidationErrors('Config option "maxTestRunnerReuse" has the wrong type. It should be a number, but was a string.');
});
it('should warn when testRunnerNodeArgs are combined with the "command" test runner', () => {
testInjector.options.testRunnerNodeArgs = ['--inspect-brk'];
testInjector.options.testRunner = 'command';
sut.validate(testInjector.options);
expect(testInjector.logger.warn).calledWith(
'Using "testRunnerNodeArgs" together with the "command" test runner is not supported, these arguments will be ignored. You can add your custom arguments by setting the "commandRunner.command" option.'
);
});
describe('transpilers', () => {
it('should report a deprecation warning', () => {
testInjector.options.transpilers = ['stryker-jest'];
sut.validate(testInjector.options);
expect(testInjector.logger.warn).calledWith(
'DEPRECATED. Support for "transpilers" is removed. You can now configure your own "buildCommand". For example, npm run build.'
);
});
});
it('should be invalid with invalid coverageAnalysis', () => {
breakConfig('coverageAnalysis', 'invalid');
actValidationErrors('Config option "coverageAnalysis" should be one of the allowed values ("off", "all", "perTest"), but was "invalid".');
});
function actValidationErrors(...expectedErrors: string[]) {
expect(() => sut.validate(testInjector.options)).throws();
for (const error of expectedErrors) {
expect(testInjector.logger.error).calledWith(error);
}
expect(testInjector.logger.error).callCount(expectedErrors.length);
}
function actAssertValid() {
sut.validate(testInjector.options);
expect(testInjector.logger.fatal).not.called;
expect(testInjector.logger.error).not.called;
expect(testInjector.logger.warn).not.called;
}
function breakConfig(key: keyof StrykerOptions, value: any): void {
const original = testInjector.options[key];
if (typeof original === 'object' && !Array.isArray(original)) {
testInjector.options[key] = { ...original, ...value };
} else {
testInjector.options[key] = value;
}
}
});
describe(validateOptions.name, () => {
let optionsValidatorMock: sinon.SinonStubbedInstance<OptionsValidator>;
beforeEach(() => {
optionsValidatorMock = sinon.createStubInstance(OptionsValidator);
});
it('should validate the options using given optionsValidator', () => {
const options = { foo: 'bar' };
const output = validateOptions(options, (optionsValidatorMock as unknown) as OptionsValidator);
expect(options).deep.eq(output);
expect(optionsValidatorMock.validate).calledWith(options);
});
});
describe(markUnknownOptions.name, () => {
it('should not warn when there are no unknown properties', () => {
testInjector.options.htmlReporter = {
baseDir: 'test',
};
expect(testInjector.logger.warn).not.called;
});
it('should return the options, no matter what', () => {
testInjector.options['this key does not exist'] = 'foo';
const output = markUnknownOptions(testInjector.options, strykerCoreSchema, testInjector.logger);
expect(output).eq(testInjector.options);
});
it('should not warn when unknown properties are postfixed with "_comment"', () => {
testInjector.options.maxConcurrentTestRunners_comment = 'Recommended to use half of your cores';
markUnknownOptions(testInjector.options, strykerCoreSchema, testInjector.logger);
expect(testInjector.logger.warn).not.called;
});
it('should warn about unknown properties', () => {
testInjector.options.karma = {};
testInjector.options.jest = {};
markUnknownOptions(testInjector.options, strykerCoreSchema, testInjector.logger);
expect(testInjector.logger.warn).calledThrice;
expect(testInjector.logger.warn).calledWith('Unknown stryker config option "karma".');
expect(testInjector.logger.warn).calledWith('Unknown stryker config option "jest".');
expect(testInjector.logger.warn).calledWithMatch('Possible causes');
});
it('should not warn about unknown properties when warnings are disabled', () => {
testInjector.options.karma = {};
testInjector.options.warnings = factory.warningOptions({ unknownOptions: false });
markUnknownOptions(testInjector.options, strykerCoreSchema, testInjector.logger);
expect(testInjector.logger.warn).not.called;
});
it('should ignore options added by Stryker itself', () => {
testInjector.options.set = {};
testInjector.options.configFile = {};
testInjector.options.$schema = '';
markUnknownOptions(testInjector.options, strykerCoreSchema, testInjector.logger);
expect(testInjector.logger.warn).not.called;
});
}); | testInjector.options.maxConcurrentTestRunners = 8;
sut.validate(testInjector.options); |
json.rs | use educe::Educe;
use futures::{pin_mut, stream, AsyncReadExt, FutureExt, Stream, StreamExt};
use serde::{Deserialize, Serialize};
use serde_closure::FnMutNamed;
use serde_json::Error as InternalJsonError;
use std::{
error, fmt::{self, Debug, Display}, io::{self, Cursor}, marker::PhantomData
};
use amadeus_core::{
file::{File, Page, Partition}, into_par_stream::IntoDistributedStream, par_stream::DistributedStream, util::{DistParStream, ResultExpandIter}, Source
};
use super::{SerdeData, SerdeDeserialize};
#[derive(Educe)]
#[educe(Clone, Debug)]
pub struct Json<File, Row>
where
File: amadeus_core::file::File,
Row: SerdeData,
{
partitions: Vec<File::Partition>,
marker: PhantomData<fn() -> Row>,
}
impl<F, Row> Json<F, Row>
where
F: File,
Row: SerdeData,
{
pub async fn new(file: F) -> Result<Self, <Self as Source>::Error> {
Ok(Self {
partitions: file.partitions().await.map_err(JsonError::File)?,
marker: PhantomData,
})
}
}
type Error<P, E> = JsonError<E, <P as Partition>::Error, <<P as Partition>::Page as Page>::Error>;
#[cfg(not(nightly))]
type Output<P, Row: SerdeData, E> = std::pin::Pin<Box<dyn Stream<Item = Result<Row, Error<P, E>>>>>;
#[cfg(nightly)]
type Output<P: Partition, Row: SerdeData, E> = impl Stream<Item = Result<Row, Error<P, E>>>;
FnMutNamed! {
pub type Closure<P, Row, E> = |self|partition=> P| -> Output<P, Row, E>
where
P: Partition,
Row: SerdeData,
E: 'static
{
#[allow(clippy::let_and_return)]
let ret = async move {
Ok(stream::iter(
partition
.pages()
.await
.map_err(JsonError::Partition)?
.into_iter(),
)
.flat_map(|page| {
async move {
let mut buf = Vec::with_capacity(10 * 1024 * 1024);
let reader = Page::reader(page);
pin_mut!(reader);
let buf = PassError::new(
reader.read_to_end(&mut buf).await.map(|_| Cursor::new(buf)),
);
Ok(stream::iter(
serde_json::Deserializer::from_reader(buf).into_iter().map(
|x: Result<SerdeDeserialize<Row>, InternalJsonError>| Ok(x?.0),
),
))
}
.map(ResultExpandIter::new)
.flatten_stream()
})
.map(|row: Result<Result<Row, InternalJsonError>, Error<P, E>>| Ok(row??)))
}
.map(ResultExpandIter::new)
.flatten_stream()
.map(|row: Result<Result<Row, Error<P, E>>, Error<P, E>>| Ok(row??));
#[cfg(not(nightly))]
let ret = ret.boxed_local();
ret
}
}
impl<F, Row> Source for Json<F, Row>
where
F: File,
Row: SerdeData,
{
type Item = Row;
#[allow(clippy::type_complexity)]
type Error = JsonError<
F::Error,
<F::Partition as Partition>::Error,
<<F::Partition as Partition>::Page as Page>::Error,
>;
type ParStream = DistParStream<Self::DistStream>;
#[cfg(not(nightly))]
#[allow(clippy::type_complexity)]
type DistStream = amadeus_core::par_stream::FlatMap<
amadeus_core::into_par_stream::IterDistStream<std::vec::IntoIter<F::Partition>>,
Closure<F::Partition, Row, F::Error>,
>;
#[cfg(nightly)]
type DistStream = impl DistributedStream<Item = Result<Self::Item, Self::Error>>;
fn par_stream(self) -> Self::ParStream {
DistParStream::new(self.dist_stream())
}
#[allow(clippy::let_and_return)]
fn dist_stream(self) -> Self::DistStream {
self.partitions.into_dist_stream().flat_map(Closure::new())
}
}
mod jsonerror {
use serde::{Deserializer, Serializer};
pub(crate) fn serialize<T, S>(_t: &T, _serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
unimplemented!()
}
pub(crate) fn deserialize<'de, T, D>(_deserializer: D) -> Result<T, D::Error>
where
D: Deserializer<'de>,
{
unimplemented!()
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum JsonError<A, B, C> {
File(A),
Partition(B),
Page(C),
Json(#[serde(with = "jsonerror")] InternalJsonError),
}
impl<A, B, C> Clone for JsonError<A, B, C>
where
A: Clone,
B: Clone,
C: Clone,
{
fn clone(&self) -> Self {
match self {
Self::File(err) => Self::File(err.clone()),
Self::Partition(err) => Self::Partition(err.clone()),
Self::Page(err) => Self::Page(err.clone()),
Self::Json(err) => Self::Json(serde::de::Error::custom(err)),
}
}
}
impl<A, B, C> PartialEq for JsonError<A, B, C>
where
A: PartialEq,
B: PartialEq,
C: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::File(a), Self::File(b)) => a.eq(b),
(Self::Partition(a), Self::Partition(b)) => a.eq(b),
(Self::Page(a), Self::Page(b)) => a.eq(b),
(Self::Json(a), Self::Json(b)) => a.to_string() == b.to_string(),
_ => false,
}
}
}
impl<A, B, C> error::Error for JsonError<A, B, C>
where | C: error::Error,
{
}
impl<A, B, C> Display for JsonError<A, B, C>
where
A: Display,
B: Display,
C: Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::File(err) => Display::fmt(err, f),
Self::Partition(err) => Display::fmt(err, f),
Self::Page(err) => Display::fmt(err, f),
Self::Json(err) => Display::fmt(err, f),
}
}
}
impl<A, B, C> From<InternalJsonError> for JsonError<A, B, C> {
fn from(err: InternalJsonError) -> Self {
Self::Json(err)
}
}
struct PassError<R>(Result<R, Option<io::Error>>);
impl<R> PassError<R> {
fn new(r: Result<R, io::Error>) -> Self {
Self(r.map_err(Some))
}
}
impl<R> io::Read for PassError<R>
where
R: io::Read,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match &mut self.0 {
Ok(r) => r.read(buf),
Err(r) => Err(r.take().unwrap()),
}
}
} | A: error::Error,
B: error::Error, |
plot.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple script to plot waveforms in one or more files.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from argparse import ArgumentParser
from obspy import Stream, __version__, read
from obspy.core.util.base import ENTRY_POINTS
from obspy.core.util.misc import MatplotlibBackend
def | (argv=None):
parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip())
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
help='Waveform format.')
parser.add_argument('-o', '--outfile',
help='Output filename.')
parser.add_argument('-n', '--no-automerge', dest='automerge',
action='store_false',
help='Disable automatic merging of matching channels.')
parser.add_argument('--full', dest='full', action='store_true',
help='Disable min/max-plot, i.e. always plot every '
'single sample (Stream.plot(..., method="full"), '
'for interactive zooming).')
parser.add_argument('files', nargs='+',
help='Files to plot.')
args = parser.parse_args(argv)
if args.outfile is not None:
MatplotlibBackend.switch_backend("AGG", sloppy=False)
st = Stream()
for f in args.files:
st += read(f, format=args.format)
kwargs = {"outfile": args.outfile,
"automerge": args.automerge}
if args.full:
kwargs['method'] = "full"
st.plot(**kwargs)
if __name__ == "__main__":
main()
| main |
arithmetischeoperation_string.go | // Code generated by "stringer --type ArithmetischeOperation"; DO NOT EDIT.
package arithmetischeoperation
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[ADDITION-1]
_ = x[SUBTRAKTION-2]
_ = x[MULTIPLIKATION-3]
_ = x[DIVISION-4]
}
const _ArithmetischeOperation_name = "ADDITIONSUBTRAKTIONMULTIPLIKATIONDIVISION"
var _ArithmetischeOperation_index = [...]uint8{0, 8, 19, 33, 41}
func (i ArithmetischeOperation) String() string {
i -= 1
if i < 0 || i >= ArithmetischeOperation(len(_ArithmetischeOperation_index)-1) |
return _ArithmetischeOperation_name[_ArithmetischeOperation_index[i]:_ArithmetischeOperation_index[i+1]]
}
| {
return "ArithmetischeOperation(" + strconv.FormatInt(int64(i+1), 10) + ")"
} |
TotemModel.py | # -*- coding: utf-8 -*-
"""
Created April 2019
@author: Amon Millner
This is a module that contains a class that serves as a model
for the totem game built, which is an example of the
Model-View-Controller (MVC) framework.
"""
import pygame, copy
from pygame.locals import *
import random
class | (object):
""" Encodes a model of the game state """
def __init__(self, size=(640,480),number_of_faces=0):
self.width, self.height = size
self.level = 1
self.foundation = {}
self.direction = 'left'
self.reset_game = 0
self.won_game = 0
self.new_game = 0
self.number_of_faces = number_of_faces
self.face_index = random.randint(0, number_of_faces)
self.face = Face(self.width, self.height, self.face_index)
def addFaceToFoundation(self):
"""Puts a face in the game area on the current level where
the user pressed the space key. Future rows will check the location
of faces in the foundation to test whether a head can stack on top.
"""
if self.level > 1: #only initiates if there are faces below
#compares the x and y values of the face below to check boundaries
if (self.face.x > (self.foundation[self.level-1].x + (self.face.width//2)))\
or ((self.face.x + (self.face.width//2)) < self.foundation[self.level-1].x):
self.reset_game = 1 #sets the reset flag if out of bounds
return
self.oldface = copy.deepcopy(self.face) #puts a copy into the foundation
self.foundation[self.level] = self.oldface
self.level += 1
#picks a new face from the array of possible images
self.face_index = random.randint(0, self.number_of_faces)
def update(self):
""" Update the game state """
if self.face.x > (self.width - self.face.width):
self.direction = 'left'
elif self.face.x < 1: # checks the left wall, changes direction
self.direction = 'right'
# checks to see whether the stack is high enough to win the game
if (self.height - (self.face.height * self.level)) < self.face.height:
self.won_game = 1
else:
# calls each face's update function, to help facilitate its drawing
self.face.update(self.height - (self.face.height * self.level),
self.direction, self.level, self.face_index)
def __str__(self):
output_lines = []
# will detail each face as a string
for key, value in self.foundation:
output_lines.append(str(value))
# print one item per line
return "\n".join(output_lines)
class Face(object):
""" Encodes the state of a face in the game """
def __init__(self,starting_x=0,starting_y=0,velocity=6,height=80,width=80,
face_index=0):
self.height = height
self.width = width
self.x = starting_x
self.y = starting_y - self.height
self.velocity = velocity
self.face_index = face_index
def update(self, vertLocation, direction, level, new_face_index):
""" update the state of the faces """
if direction == 'right':
self.x += (self.velocity + (level)) # adds speed as level increases
else:
self.x -= (self.velocity + (level))
self.y = vertLocation
if self.face_index != new_face_index: #sets a new face upon level ups
self.face_index = new_face_index
def __str__(self):
return "Face height=%f, width=%f, x=%f, y=%f, velocity=%f" % (self.height,
self.width,
self.x,
self.y,
self.velocity,
self.face_index)
| TotemModel |
lstm_mixed_loso.py | # CSL Paper: Dimensional speech emotion recognition from acoustic and text
# Changelog:
# 2019-09-01: initial version
# 2019-10-06: optimizer MTL parameters with linear search (in progress)
# 2012-12-25: modified fot ser_iemocap_loso_hfs.py
# feature is either std+mean or std+mean+silence (uncomment line 44)
import numpy as np
import pickle
import pandas as pd
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, CuDNNLSTM, Flatten, \
Embedding, Dropout, BatchNormalization, \
RNN, concatenate, Activation
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
import random as rn
import tensorflow as tf
rn.seed(123)
np.random.seed(99)
tf.set_random_seed(1234)
# load feature and labels
feat_iemocap = np.load('/home/s1820002/spro2020/data/feat_ws_3.npy')
vad_iemocap = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')
feat_improv_train = np.load('/home/s1820002/deepMLP/data/feat_hfs_gemaps_msp_train.npy')
feat_improv_test = np.load('/home/s1820002/deepMLP/data/feat_hfs_gemaps_msp_test.npy')
feat_improv = np.vstack([feat_improv_train, feat_improv_test])
list_path = '/home/s1820002/msp-improv/helper/improv_data.csv'
list_file = pd.read_csv(list_path, index_col=None)
list_sorted = list_file.sort_values(by=['wavfile'])
vad_list = [list_sorted['v'], list_sorted['a'], list_sorted['d']]
vad_improv = np.array(vad_list).T
# for LSTM input shape (batch, steps, features/channel)
feat = np.vstack([feat_iemocap, feat_improv])
vad = np.vstack([vad_iemocap, vad_improv])
feat = feat.reshape(feat.shape[0], 1, feat.shape[1])
# remove outlier, < 1, > 5
vad = np.where(vad==5.5, 5.0, vad)
vad = np.where(vad==0.5, 1.0, vad)
# standardization
scaled_feature = True
# set Dropout
do = 0.3
if scaled_feature == True:
scaler = StandardScaler()
scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))
scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])
feat = scaled_feat
else:
feat = feat
scaled_vad = True
# standardization
if scaled_vad:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))
vad = scaled_vad
else:
vad = vad
# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics
def | (gold, pred):
gold = K.squeeze(gold, axis=-1)
pred = K.squeeze(pred, axis=-1)
gold_mean = K.mean(gold, axis=-1, keepdims=True)
pred_mean = K.mean(pred, axis=-1, keepdims=True)
covariance = (gold-gold_mean)*(pred-pred_mean)
gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)
pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)
ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())
return ccc
def ccc_loss(gold, pred):
# input (num_batches, seq_len, 1)
ccc_loss = K.constant(1.) - ccc(gold, pred)
return ccc_loss
# API model, if use RNN, first two rnn layer must return_sequences=True
def api_model(alpha, beta, gamma):
# speech network
input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')
net_speech = BatchNormalization()(input_speech)
net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(128, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(64, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(32, return_sequences=True)(net_speech)
net_speech = CuDNNLSTM(16, return_sequences=True)(net_speech)
model_speech = Flatten()(net_speech)
#model_speech = Dropout(0.1)(net_speech)
target_names = ('v', 'a', 'd')
model_combined = [Dense(1, name=name)(model_speech) for name in target_names]
model = Model(input_speech, model_combined)
#model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])
model.compile(loss=ccc_loss,
loss_weights={'v': alpha, 'a': beta, 'd': gamma},
optimizer='adam', metrics=[ccc])
return model
#def main(alpha, beta, gamma):
model = api_model(0.1, 0.5, 0.4)
model.summary()
idx_train = np.hstack([np.arange(0, 7869), np.arange(10039, len(feat_improv_train))])
idx_test = np.hstack([np.arange(7869,10039), np.arange(10039 +
len(feat_improv_train), 18387)])
# 7869 first data of session 5 (for LOSO), 8000 for SD
earlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,
restore_best_weights=True)
hist = model.fit(feat[idx_train], vad[idx_train].T.tolist(), batch_size=200, #best:8
validation_split=0.2, epochs=180, verbose=1, shuffle=True,
callbacks=[earlystop])
metrik = model.evaluate(feat[idx_test], vad[idx_test].T.tolist())
print(metrik)
# save prediction, comment to avoid overwriting
#predict = model.predict(feat[6296:], batch_size=200)
#np.save('../data/predict_lstm_iemocap_sd',
# np.array(predict).reshape(3, 3743).T)
| ccc |
admission.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package podsecurity
import (
"context"
"errors"
"fmt"
"io"
"sync"
// install conversions for types we need to convert
_ "k8s.io/kubernetes/pkg/apis/apps/install"
_ "k8s.io/kubernetes/pkg/apis/batch/install"
_ "k8s.io/kubernetes/pkg/apis/core/install"
admissionv1 "k8s.io/api/admission/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer"
"k8s.io/apiserver/pkg/audit"
"k8s.io/apiserver/pkg/warning"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
podsecurityadmission "k8s.io/pod-security-admission/admission"
podsecurityconfigloader "k8s.io/pod-security-admission/admission/api/load"
podsecurityadmissionapi "k8s.io/pod-security-admission/api"
podsecuritymetrics "k8s.io/pod-security-admission/metrics"
"k8s.io/pod-security-admission/policy"
)
// PluginName is a string with the name of the plugin
const PluginName = "PodSecurity"
// Register registers a plugin
func Register(plugins *admission.Plugins) {
plugins.Register(PluginName, func(reader io.Reader) (admission.Interface, error) {
return newPlugin(reader)
})
}
// Plugin holds state for and implements the admission plugin.
type Plugin struct {
*admission.Handler
enabled bool
inspectedFeatureGates bool
client kubernetes.Interface
namespaceLister corev1listers.NamespaceLister
podLister corev1listers.PodLister
delegate *podsecurityadmission.Admission
}
var _ admission.ValidationInterface = &Plugin{}
var _ genericadmissioninit.WantsExternalKubeInformerFactory = &Plugin{}
var _ genericadmissioninit.WantsExternalKubeClientSet = &Plugin{}
// newPlugin creates a new admission plugin.
func newPlugin(reader io.Reader) (*Plugin, error) {
config, err := podsecurityconfigloader.LoadFromReader(reader)
if err != nil {
return nil, err
}
evaluator, err := policy.NewEvaluator(policy.DefaultChecks())
if err != nil {
return nil, fmt.Errorf("could not create PodSecurityRegistry: %w", err)
}
return &Plugin{
Handler: admission.NewHandler(admission.Create, admission.Update),
delegate: &podsecurityadmission.Admission{
Configuration: config,
Evaluator: evaluator,
Metrics: podsecuritymetrics.NewPrometheusRecorder(podsecurityadmissionapi.GetAPIVersion()),
PodSpecExtractor: podsecurityadmission.DefaultPodSpecExtractor{},
},
}, nil
}
// SetExternalKubeInformerFactory registers an informer
func (p *Plugin) SetExternalKubeInformerFactory(f informers.SharedInformerFactory) {
namespaceInformer := f.Core().V1().Namespaces()
p.namespaceLister = namespaceInformer.Lister()
p.podLister = f.Core().V1().Pods().Lister()
p.SetReadyFunc(namespaceInformer.Informer().HasSynced)
p.updateDelegate()
}
// SetExternalKubeClientSet sets the plugin's client
func (p *Plugin) SetExternalKubeClientSet(client kubernetes.Interface) {
p.client = client
p.updateDelegate()
}
func (p *Plugin) updateDelegate() {
// return early if we don't have what we need to set up the admission delegate
if p.namespaceLister == nil {
return
}
if p.podLister == nil {
return
}
if p.client == nil |
p.delegate.PodLister = podsecurityadmission.PodListerFromInformer(p.podLister)
p.delegate.NamespaceGetter = podsecurityadmission.NamespaceGetterFromListerAndClient(p.namespaceLister, p.client)
}
func (c *Plugin) InspectFeatureGates(featureGates featuregate.FeatureGate) {
c.enabled = featureGates.Enabled(features.PodSecurity)
c.inspectedFeatureGates = true
}
// ValidateInitialization ensures all required options are set
func (p *Plugin) ValidateInitialization() error {
if !p.inspectedFeatureGates {
return fmt.Errorf("%s did not see feature gates", PluginName)
}
if err := p.delegate.CompleteConfiguration(); err != nil {
return fmt.Errorf("%s configuration error: %w", PluginName, err)
}
if err := p.delegate.ValidateConfiguration(); err != nil {
return fmt.Errorf("%s invalid: %w", PluginName, err)
}
return nil
}
var (
applicableResources = map[schema.GroupResource]bool{
corev1.Resource("pods"): true,
corev1.Resource("namespaces"): true,
}
)
func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error {
if !p.enabled {
return nil
}
gr := a.GetResource().GroupResource()
if !applicableResources[gr] && !p.delegate.PodSpecExtractor.HasPodSpec(gr) {
return nil
}
result := p.delegate.Validate(ctx, &lazyConvertingAttributes{Attributes: a})
for _, w := range result.Warnings {
warning.AddWarning(ctx, "", w)
}
for k, v := range result.AuditAnnotations {
audit.AddAuditAnnotation(ctx, podsecurityadmissionapi.AuditAnnotationPrefix+k, v)
}
if !result.Allowed {
// start with a generic forbidden error
retval := admission.NewForbidden(a, errors.New("Not allowed by PodSecurity")).(*apierrors.StatusError)
// use message/reason/details/code from admission library if populated
if result.Result != nil {
if len(result.Result.Message) > 0 {
retval.ErrStatus.Message = result.Result.Message
}
if len(result.Result.Reason) > 0 {
retval.ErrStatus.Reason = result.Result.Reason
}
if result.Result.Details != nil {
retval.ErrStatus.Details = result.Result.Details
}
if result.Result.Code != 0 {
retval.ErrStatus.Code = result.Result.Code
}
}
return retval
}
return nil
}
type lazyConvertingAttributes struct {
admission.Attributes
convertObjectOnce sync.Once
convertedObject runtime.Object
convertedObjectError error
convertOldObjectOnce sync.Once
convertedOldObject runtime.Object
convertedOldObjectError error
}
func (l *lazyConvertingAttributes) GetObject() (runtime.Object, error) {
l.convertObjectOnce.Do(func() {
l.convertedObject, l.convertedObjectError = convert(l.Attributes.GetObject())
})
return l.convertedObject, l.convertedObjectError
}
func (l *lazyConvertingAttributes) GetOldObject() (runtime.Object, error) {
l.convertOldObjectOnce.Do(func() {
l.convertedOldObject, l.convertedOldObjectError = convert(l.Attributes.GetOldObject())
})
return l.convertedOldObject, l.convertedOldObjectError
}
func (l *lazyConvertingAttributes) GetOperation() admissionv1.Operation {
return admissionv1.Operation(l.Attributes.GetOperation())
}
func (l *lazyConvertingAttributes) GetUserName() string {
return l.GetUserInfo().GetName()
}
func convert(in runtime.Object) (runtime.Object, error) {
var out runtime.Object
switch in.(type) {
case *core.Namespace:
out = &corev1.Namespace{}
case *core.Pod:
out = &corev1.Pod{}
case *core.ReplicationController:
out = &corev1.ReplicationController{}
case *core.PodTemplate:
out = &corev1.PodTemplate{}
case *apps.ReplicaSet:
out = &appsv1.ReplicaSet{}
case *apps.Deployment:
out = &appsv1.Deployment{}
case *apps.StatefulSet:
out = &appsv1.StatefulSet{}
case *apps.DaemonSet:
out = &appsv1.DaemonSet{}
case *batch.Job:
out = &batchv1.Job{}
case *batch.CronJob:
out = &batchv1.CronJob{}
default:
return in, fmt.Errorf("unexpected type %T", in)
}
if err := legacyscheme.Scheme.Convert(in, out, nil); err != nil {
return in, err
}
return out, nil
}
| {
return
} |
label_image_openCV_gui__041315.py | import cv2
import os
import numpy as np
import copy
WINDOW_NAME = "Label image"
WINDOW2_NAME = "Class image"
all_img = []
rootdir = './dataset'
drawing = False # true if mouse is pressed
Cnow = -1
ix,iy = -1,-1
i = 0
type_name = ".bmp"
def nothing(x):
pass
def draw_null(event,x,y,flags,param):
|
def color(Cnow):
if Cnow == 0 :
r = 0x17
g = 0xbe
b = 0xcf
elif Cnow == 1 :
r = 0xff
g = 0x7f
b = 0x0e
elif Cnow == 2 :
r = 0x2c
g = 0x77
b = 0xb4
elif Cnow == 3 :
r = 0x2c
g = 0xa0
b = 0x2c
elif Cnow == 4 :
r = 0xd6
g = 0x67
b = 0x28
elif Cnow == 5 :
r = 0x94
g = 0x67
b = 0xbd
elif Cnow == 6 :
r = 0x8c
g = 0x56
b = 0x4b
elif Cnow == 7 :
r = 0xe3
g = 0x77
b = 0xc2
elif Cnow == 8 :
r = 0x7f
g = 0x7f
b = 0x7f
elif Cnow == 9 :
r = 0xbc
g = 0xbd
b = 0x22
elif Cnow == 10 :
r = 0xff
g = 0xff
b = 0xff
elif Cnow == -1 :
print "please! select class number"
#else :
#return
# mouse callback function
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,img,img2,Cnow,r,g,b,height, width
sizeB = cv2.getTrackbarPos('Size',WINDOW_NAME)
# color of class
if Cnow == 0 :
r = 0x17
g = 0xbe
b = 0xcf
elif Cnow == 1 :
r = 0xff
g = 0x7f
b = 0x0e
elif Cnow == 2 :
r = 0x2c
g = 0x77
b = 0xb4
elif Cnow == 3 :
r = 0x2c
g = 0xa0
b = 0x2c
elif Cnow == 4 :
r = 0xd6
g = 0x67
b = 0x28
elif Cnow == 5 :
r = 0x94
g = 0x67
b = 0xbd
elif Cnow == 6 :
r = 0x8c
g = 0x56
b = 0x4b
elif Cnow == 7 :
r = 0xe3
g = 0x77
b = 0xc2
elif Cnow == 8 :
r = 0x7f
g = 0x7f
b = 0x7f
elif Cnow == 9 :
r = 0xbc
g = 0xbd
b = 0x22
elif Cnow == 10 :
r = 0xff
g = 0xff
b = 0xff
elif Cnow == -1 :
print "please! select class number"
#else :
# Cnow = Cnow
# r = "0x" + "%02x"%random.randint(0,255)
# g = 0x56
# b = 0xc2
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True and Cnow != -1:
cv2.circle(img,(x,y),sizeB, (r,g,b),-1)
cv2.circle(img2 ,(x,y),sizeB,Cnow,-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if Cnow != -1:
cv2.circle(img,(x,y),sizeB,(r,g,b),-1)
cv2.circle(img2 ,(x,y),sizeB,Cnow,-1)
#print Cnow
cv2.imshow(WINDOW_NAME,img)
cv2.imshow(WINDOW2_NAME, img2 )
def draw_continue():
global img,img2
height, width = img2.shape
for i in range(height):
for j in range(width):
Cnow = img2[i,j]
if Cnow == 0 :
img[i,j] = [0x17,0xbe,0xcf]
elif Cnow == 1 :
img[i,j] = [0xff,0x7f,0x0e]
elif Cnow == 2 :
img[i,j] = [0x2c,0x77,0xb4]
elif Cnow == 3 :
img[i,j] = [0x2c,0xa0,0x2c]
elif Cnow == 4 :
img[i,j] = [0xd6,0x67,0x28]
elif Cnow == 5 :
img[i,j] = [0x94,0x67,0xbd]
elif Cnow == 6 :
img[i,j] = [0x8c,0x56,0x4b]
elif Cnow == 7 :
img[i,j] = [0xe3,0x77,0xc2]
elif Cnow == 8 :
img[i,j] = [0x7f,0x7f,0x7f]
elif Cnow == 9 :
img[i,j] = [0xbc,0xbd,0x22]
elif Cnow == 10 :
img[i,j] = [0xff,0xff,0xff]
if __name__ == '__main__':
for root,dirs,files in os.walk(rootdir):
for f in files:
#if f.endswith('jpg') or f.endswith('JPG') or f.endswith('bmp') or f.endswith('BMP') or f.endswith('png'):
all_img.append(f)
temp = all_img.pop()
img_name = os.path.basename(os.path.join(root,temp))
file_name = img_name.split(".")
if type_name in img_name :
print "---", img_name, "does not label---\n"
else:
print "--- labeling image : ", img_name
img = cv2.imread(os.path.join(root,temp),cv2.IMREAD_COLOR)
#imgclone
buffer_img = copy.copy(img)
height, width = buffer_img.shape[:2]
if os.path.isfile(os.path.join(root,file_name[0] + type_name)) == False:
img2 = np.zeros( (height,width), dtype=np.uint8)
img2[:] = [255]
print "none"
else:
img2 = cv2.imread(os.path.join(root,file_name[0] + type_name) ,cv2.IMREAD_GRAYSCALE) #img2 = cv2.imread(file_name[0] + type_name ,cv2.IMREAD_GRAYSCALE)
draw_continue()
print "have" , type(img2)
img_class = os.path.basename(os.path.join(root,temp))
print "--- labeling image : " , img_class
cv2.namedWindow(WINDOW_NAME)
cv2.namedWindow(WINDOW2_NAME)
cv2.createTrackbar('Size',WINDOW_NAME,5,20, nothing)
cv2.setMouseCallback(WINDOW_NAME,draw_circle)
cv2.setMouseCallback(WINDOW2_NAME, draw_null)
while(1):
cv2.imshow(WINDOW_NAME,img)
k = cv2.waitKey(0)
if k == ord('0'):
print "class: 0 - Oscillatoria sp."
Cnow = 0
elif k == ord('1'):
print "class: 1 - Chlorella sp."
Cnow = 1
elif k == ord('2'):
print "class: 2 - Closterium sp."
Cnow = 2
elif k == ord('3'):
print "class: 3 - Anabaena sp."
Cnow=3
elif k == ord('4'):
print "class: 4 - Melosira sp."
Cnow=4
elif k == ord('5'):
print "class: 5 - Cyclotella sp."
Cnow=5
elif k == ord('6'):
print "class: 6 - Actinastrum sp."
Cnow=6
elif k == ord('7'):
print "class: 7 - Ankistrodesmus sp."
Cnow=7
elif k == ord('8'):
print "class: 8 - Synedra sp."
Cnow=8
elif k == ord('9'):
print "class: 9 - Nitzschia sp."
Cnow=9 #case '-': cout<<"class null(256)"<<endl; Cnow=255; break;
elif k == ord('-'):
print " No class "
Cnow=10 #case '-': cout<<"class null(256)"<<endl; Cnow=255; break;
#elif k == ord('a'): # wait for ESC key to exit
# #a = raw_input('What is your class name ? (xxxx .sp)')
# x = input('What is your class name ? (xxxx .sp)')
# Cnow = Cnow + 1
# print ("Your class is " + Cnow +" - " + x)
elif k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
print "you pressed esc"
break
elif k == ord('s'): # wait for 's' key to save and exit
file_name = img_name.split(".")
for root,dirs,files in os.walk(rootdir):
cv2.imwrite(os.path.join(root, file_name[0] + type_name) , img2 )
break
cv2.destroyAllWindows()
#"%02d"%(i)+ "_"+
print "you pressed save " + file_name[0] + type_name + " ( " + img_name + " )"
break
Cnow = -1
i = i+1
| pass |
monitor.ts | import assert from 'assert'
import util from 'util'
import { Resource } from '../util/resource.js'
import { AwaitLock } from '../util/lock.js'
import { Database } from './database.js'
import { OpLog } from './log.js'
import { VM } from './vm.js'
import { ContractFraudProof, ContractFraudProofDetails } from './fraud-proofs.js'
import { IndexHistoryEntry, OpLogEntry, IndexBatchEntry, Key, keyToStr, keyToBuf } from '../types.js'
import {
CONTRACT_SOURCE_PATH,
PARTICIPANT_PATH_PREFIX,
ACK_PATH_PREFIX,
GENESIS_ACK_PATH,
InputSchema,
AckSchema
} from '../schemas.js'
import _isEqual from 'lodash.isequal'
enum MonitorState {
VALIDATING_GENESIS_SOURCE,
VALIDATING_GENESIS_INPUTS,
AWAITING_TX,
VALIDATING_TX
}
interface QueuedEffect {
effect: 'set-vm'|'add-input'|'remove-input'
value: any
}
export class ContractMonitor extends Resource {
expectedSeq = 1
expectedChanges: IndexBatchEntry[] = []
state: MonitorState = MonitorState.VALIDATING_GENESIS_SOURCE
inputs: Set<string> = new Set()
vm: VM|undefined
verifying = false
private _oplogs: Map<string, OpLog> = new Map()
private _oplogProcessedSeqs: Map<string, number> = new Map()
private _loadOplogLock = new AwaitLock()
private _historyGenerator: AsyncGenerator<IndexHistoryEntry>|undefined
private _queuedEffects: QueuedEffect[] = []
constructor (public db: Database) {
super()
}
get verifiedLength () {
return this.expectedSeq - 1
}
async _close () {
for (const oplog of this._oplogs.values()) {
await oplog.close()
}
this.vm?.close()
this._historyGenerator?.return(undefined)
this.verifying = false
}
async verify () {
assert(!this.verifying, 'Monitor already running verification')
this.reset()
this.verifying = true
for await (const entry of this.db.index.history()) {
await this.validate(entry)
}
this.verifying = false
}
watch () {
assert(!this.verifying, 'Monitor already running verification')
this.reset()
this.verifying = true
;(async () => {
this._historyGenerator = this.db.index.history({live: true})
for await (const entry of this._historyGenerator) {
try {
await this.validate(entry)
} catch (e) {
this.emit('violation', e)
return
}
}
})()
}
private reset () {
this.expectedSeq = 1
this.expectedChanges.length = 0
this.state = MonitorState.VALIDATING_GENESIS_SOURCE
this.inputs = new Set()
this._queuedEffects.length = 0
this._oplogProcessedSeqs = new Map()
}
private async transition (state: MonitorState) {
this.state = state
if (state === MonitorState.AWAITING_TX) {
await this.applyQueuedEffects()
}
}
private async validate (entry: IndexHistoryEntry) {
if (!this.verifying) return
this.assert(entry.seq === this.expectedSeq, new UnexpectedSeqError({entry, expectedSeq: this.expectedSeq}))
if (this.vm) {
this.vm.checkoutIndexAt(entry.seq)
}
switch (this.state) {
case MonitorState.VALIDATING_GENESIS_SOURCE: {
this.assert(entry.path === CONTRACT_SOURCE_PATH, new UnexpectedPathError({entry, expectedPath: CONTRACT_SOURCE_PATH}))
this.validateContractSourceChange(entry)
await this.transition(MonitorState.VALIDATING_GENESIS_INPUTS)
break
}
case MonitorState.VALIDATING_GENESIS_INPUTS: {
if (entry.path.startsWith(PARTICIPANT_PATH_PREFIX)) {
this.validateInputChange(entry)
} else if (entry.path === GENESIS_ACK_PATH) {
await this.transition(MonitorState.AWAITING_TX)
this.assert(this.inputs.size > 0, new NoGenesisInputsDeclaredError())
} else {
throw new UnexpectedPathError({entry, expectedPath: `${GENESIS_ACK_PATH} or a child of ${PARTICIPANT_PATH_PREFIX}`})
}
break
}
case MonitorState.AWAITING_TX: {
this.assert(entry.path.startsWith(ACK_PATH_PREFIX), new ChangeNotProducedByMonitorError({entry}))
this.validateAck(entry)
const ackValue = entry.value as AckSchema
const op = await this.fetchOp(ackValue.origin, ackValue.seq)
this.assert(!!op, new CannotFetchOpError({entry}))
const replayRes = await this.replayOp(ackValue, (op as OpLogEntry).value)
if ('error' in replayRes) {
this.assert(ackValue.success === false, new MonitorApplyFailedError({entry, errorMessage: replayRes.errorMessage}))
} else {
this.expectedChanges = (replayRes as IndexBatchEntry[])
}
await this.transition(MonitorState.VALIDATING_TX)
break
}
case MonitorState.VALIDATING_TX: {
const expectedChange = this.expectedChanges.shift() as IndexBatchEntry
this.assert(!entry.path.startsWith(ACK_PATH_PREFIX), new ChangeNotProducedByExecutorError({entry, expectedChange}))
this.validateChange(entry, expectedChange)
if (this.expectedChanges.length === 0){
await this.transition(MonitorState.AWAITING_TX)
}
break
}
}
this.expectedSeq++
this.emit('validated', entry)
}
private validateAck (entry: IndexHistoryEntry) {
this.assert(entry.value && typeof entry.value === 'object', new InvalidSchemaError({entry, description: 'value to be an object'}))
const ackValue = entry.value as AckSchema
this.assert(typeof ackValue.success === 'boolean', new InvalidSchemaError({entry, description: '.success to be a boolean'}))
this.assert(typeof ackValue.origin === 'string' && ackValue.origin.length === 64, new InvalidSchemaError({entry, description: '.origin to be a 64-character utf-8 string'}))
this.assert(typeof ackValue.seq === 'number', new InvalidSchemaError({entry, description: '.seq to be a number'}))
this.assert(typeof ackValue.ts === 'number', new InvalidSchemaError({entry, description: '.ts to be a number'}))
this.assert(this.inputs.has(ackValue.origin), new NonParticipantError({entry, oplogPubkey: ackValue.origin}))
this.assert(this.getNextOplogSeqToProcess(ackValue.origin) === ackValue.seq, new ProcessedOutOfOrderError({entry, oplogPubkey: ackValue.origin, expectedSeq: this.getNextOplogSeqToProcess(ackValue.origin), executedSeq: ackValue.seq}))
this.setOplogSeqProcessed(ackValue.origin, ackValue.seq)
if (ackValue.success) {
this.assert(typeof ackValue.numChanges === 'number', new InvalidSchemaError({entry, description: '.numChanges to be a number'}))
} else {
this.assert(typeof ackValue.error === 'string' || typeof ackValue.error === 'undefined', new InvalidSchemaError({entry, description: '.error to be a string or undefined'}))
}
}
getNextOplogSeqToProcess (pubkey: string) {
let lastProcessed = this._oplogProcessedSeqs.get(pubkey)
if (typeof lastProcessed === 'undefined') lastProcessed = -1
return lastProcessed + 1
}
setOplogSeqProcessed (pubkey: string, seq: number) {
this._oplogProcessedSeqs.set(pubkey, seq)
}
private validateChange (entry: IndexHistoryEntry, expectedChange: IndexBatchEntry) {
this.assert(entry.type === expectedChange.type, new ChangeMismatchError({entry, expectedChange, description: 'Change type is different.'}))
this.assert(entry.path === expectedChange.path, new ChangeMismatchError({entry, expectedChange, description: 'Change path is different.'}))
this.assert(_isEqual(entry.value, expectedChange.value), new ChangeMismatchError({entry, expectedChange, description: 'Change value is different.'}))
if (entry.path === CONTRACT_SOURCE_PATH) this.validateContractSourceChange(entry)
if (entry.path.startsWith(PARTICIPANT_PATH_PREFIX)) this.validateInputChange(entry)
}
private validateContractSourceChange (entry: IndexHistoryEntry) {
this.assert(typeof entry.value === 'string' && entry.value.length, new InvalidSchemaError({entry, description: 'a utf-8 string'}))
this._queuedEffects.push({effect: 'set-vm', value: entry.value})
}
private validateInputChange (entry: IndexHistoryEntry) {
this.assert(entry.value && typeof entry.value === 'object', new InvalidSchemaError({entry, description: 'value to be an object'}))
const inputValue = entry.value as InputSchema
this.assert(Buffer.isBuffer(inputValue.pubkey), new InvalidSchemaError({entry, description: '.pubkey to be a buffer'}))
this.assert(inputValue.pubkey?.byteLength === 32, new InvalidSchemaError({entry, description: '.pubkey to be a buffer of 32 bytes'}))
this.assert(typeof inputValue.active === 'boolean', new InvalidSchemaError({entry, description: '.active to be a boolean'}))
if (inputValue.active) {
this._queuedEffects.push({effect: 'add-input', value: keyToStr(inputValue.pubkey)})
} else {
this._queuedEffects.push({effect: 'remove-input', value: keyToStr(inputValue.pubkey)})
}
}
private async applyQueuedEffects () {
for (const effect of this._queuedEffects) {
switch (effect.effect) {
case 'set-vm': {
if (this.vm) {
await this.vm.close()
}
this.vm = new VM(this.db, effect.value)
await this.vm.open()
await this.vm.restrict()
break
}
case 'add-input':
this.inputs.add(effect.value)
break
case 'remove-input':
this.inputs.delete(effect.value)
break
}
}
this._queuedEffects.length = 0
}
private async replayOp (ack: AckSchema, opValue: any): Promise<IndexBatchEntry[]|{error: boolean, errorMessage: string}> {
const release = await this.db.lock('replayOp')
try {
assert(!!this.vm, 'Contract VM not initialized')
let applySuccess = undefined
let applyError = undefined
let batch: IndexBatchEntry[] = []
try {
const applyRes = await this.vm.contractApply(opValue, ack)
batch = this.db._mapApplyActionsToBatch(applyRes.actions)
applySuccess = true
} catch (e: any) {
applySuccess = false
applyError = e
}
if (!applySuccess) {
return {error: true, errorMessage: applyError.toString()}
}
return batch
} finally {
release()
}
}
private async fetchOplog (pubkey: Key): Promise<OpLog> {
await this._loadOplogLock.acquireAsync()
try {
const pubkeyBuf = keyToBuf(pubkey)
const pubkeyStr = keyToStr(pubkey)
let log = this.db.oplogs.find(log => log.pubkey.equals(pubkeyBuf))
if (log) return log
log = this._oplogs.get(pubkeyStr)
if (log) return log
log = new OpLog(await this.db.storage.getHypercore(pubkeyBuf))
this._oplogs.set(pubkeyStr, log)
return log
} finally {
this._loadOplogLock.release() | }
}
private async fetchOp (pubkey: Key, seq: number): Promise<OpLogEntry|undefined> {
const log = await this.fetchOplog(pubkey)
return await log.get(seq)
}
private assert (cond: any, error: VerificationError|ContractFraudProofDetails) {
if (!cond) {
if (error instanceof VerificationError) {
throw error
}
const fraudProof = new ContractFraudProof(this.db.index.latestProof, error as ContractFraudProofDetails)
throw fraudProof
}
}
}
export class VerificationError extends Error {
name: string
data: any
constructor (message: string, data?: any) {
super(message)
this.name = this.constructor.name
this.data = data
}
[util.inspect.custom] (depth: number, opts: {indentationLvl: number, stylize: Function}) {
let indent = ''
if (opts.indentationLvl) {
while (indent.length < opts.indentationLvl) indent += ' '
}
return this.constructor.name + '(\n' +
indent + ' An issue occurred during verification. This does not indicate that the contract was violated, but that verification failed to complete.\n' +
indent + ' message: ' + opts.stylize(this.message, 'string') + '\n' +
indent + ')'
}
}
export class UnexpectedSeqError extends VerificationError {
constructor ({entry, expectedSeq}: {entry: IndexHistoryEntry, expectedSeq: number}) {
super(`Unexpected message seq. Expected ${expectedSeq}, received ${entry.seq}`, {entry, expectedSeq})
}
}
export class CannotFetchOpError extends VerificationError {
constructor ({entry}: {entry: IndexHistoryEntry}) {
super(`Failed to fetch op from ${entry.value.origin} at seq ${entry.value.seq}`, {entry})
}
}
export class NoGenesisInputsDeclaredError extends ContractFraudProofDetails {
constructor () {
super(`No input oplogs declared in genesis sequence`)
}
}
export class UnexpectedPathError extends ContractFraudProofDetails {
constructor ({entry, expectedPath}: {entry: IndexHistoryEntry, expectedPath: string}) {
super(`Unexpected message path. Expected ${expectedPath}, received ${entry.path}`, {entry, expectedPath})
}
}
export class InvalidSchemaError extends ContractFraudProofDetails {
constructor ({entry, description}: {entry: IndexHistoryEntry, description: string}) {
super(`Unexpected message value. Expected ${description}`, {entry})
}
}
export class MonitorApplyFailedError extends ContractFraudProofDetails {
constructor ({entry, errorMessage}: {entry: IndexHistoryEntry, errorMessage: string}) {
super(`The monitor expected the operation to fail but the executor successfully processed it. ${errorMessage}`, {entry, errorMessage})
}
}
export class ChangeNotProducedByMonitorError extends ContractFraudProofDetails {
constructor ({entry}: {entry: IndexHistoryEntry}) {
super(`The executor produced a change which the monitor did not expect.`, {entry})
}
}
export class ChangeNotProducedByExecutorError extends ContractFraudProofDetails {
constructor ({entry, expectedChange}: {entry: IndexHistoryEntry, expectedChange: IndexBatchEntry}) {
super(`The executor did not produce a change which the monitor expected.`, {entry, expectedChange})
}
}
export class ChangeMismatchError extends ContractFraudProofDetails {
constructor ({entry, expectedChange, description}: {entry: IndexHistoryEntry, expectedChange: IndexBatchEntry, description: string}) {
super(`The executor produced a change which is different than the change expected by the monitor. ${description}`, {entry, expectedChange})
}
}
export class ProcessedOutOfOrderError extends ContractFraudProofDetails {
constructor ({entry, oplogPubkey, expectedSeq, executedSeq}: {entry: IndexHistoryEntry, oplogPubkey: string, expectedSeq: number, executedSeq: number}) {
super(`The executor processed an operation out of order. Expected to process ${expectedSeq} but actually processed ${executedSeq} for oplog ${oplogPubkey}`, {entry, oplogPubkey, expectedSeq, executedSeq})
}
}
export class NonParticipantError extends ContractFraudProofDetails {
constructor ({entry, oplogPubkey}: {entry: IndexHistoryEntry, oplogPubkey: string}) {
super(`The executor processed an operation from an oplog which is not a declared participant, oplog = ${oplogPubkey}`, {entry, oplogPubkey})
}
} | |
serial.rs | //! API for the integrated USART ports
//!
//! This only implements the usual asynchronous bidirectional 8-bit transfers.
//!
//! It's possible to use a read-only/write-only serial implementation with
//! `usartXrx`/`usartXtx`.
//!
//! # Examples
//! Echo
//! ``` no_run
//! use stm32f0xx_hal as hal;
//!
//! use crate::hal::prelude::*;
//! use crate::hal::serial::Serial;
//! use crate::hal::stm32;
//!
//! use nb::block;
//!
//! cortex_m::interrupt::free(|cs| {
//! let rcc = p.RCC.configure().sysclk(48.mhz()).freeze();
//!
//! let gpioa = p.GPIOA.split(&mut rcc);
//!
//! let tx = gpioa.pa9.into_alternate_af1(cs);
//! let rx = gpioa.pa10.into_alternate_af1(cs);
//!
//! let mut serial = Serial::usart1(p.USART1, (tx, rx), 115_200.bps(), &mut rcc);
//!
//! loop {
//! let received = block!(serial.read()).unwrap();
//! block!(serial.write(received)).ok();
//! }
//! });
//! ```
//!
//! Hello World
//! ``` no_run
//! use stm32f0xx_hal as hal;
//!
//! use crate::hal::prelude::*;
//! use crate::hal::serial::Serial;
//! use crate::hal::stm32;
//!
//! use nb::block;
//!
//! cortex_m::interrupt::free(|cs| {
//! let rcc = p.RCC.configure().sysclk(48.mhz()).freeze();
//!
//! let gpioa = p.GPIOA.split(&mut rcc);
//!
//! let tx = gpioa.pa9.into_alternate_af1(cs);
//!
//! let mut serial = Serial::usart1tx(p.USART1, tx, 115_200.bps(), &mut rcc);
//!
//! loop {
//! serial.write_str("Hello World!\r\n");
//! }
//! });
//! ```
use core::{
fmt::{Result, Write},
ops::Deref,
ptr,
};
use embedded_hal::prelude::*;
use crate::{gpio::*, rcc::Rcc, time::Bps};
use core::marker::PhantomData;
/// Serial error
#[derive(Debug)]
pub enum Error {
/// Framing error
Framing,
/// Noise error
Noise,
/// RX buffer overrun
Overrun,
/// Parity check error
Parity,
#[doc(hidden)]
_Extensible,
}
/// Interrupt event
pub enum Event {
/// New data has been received
Rxne,
/// New data can be sent
Txe,
/// Idle line state detected
Idle,
}
pub trait TxPin<USART> {}
pub trait RxPin<USART> {}
macro_rules! usart_pins {
($($USART:ident => {
tx => [$($tx:ty),+ $(,)*],
rx => [$($rx:ty),+ $(,)*],
})+) => {
$(
$(
impl TxPin<crate::stm32::$USART> for $tx {}
)+
$(
impl RxPin<crate::stm32::$USART> for $rx {}
)+
)+
}
}
#[cfg(any(
feature = "stm32f030",
feature = "stm32f031",
feature = "stm32f038",
feature = "stm32f042",
feature = "stm32f048",
feature = "stm32f051",
feature = "stm32f058",
feature = "stm32f070",
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart_pins! {
USART1 => {
tx => [gpioa::PA9<Alternate<AF1>>, gpiob::PB6<Alternate<AF0>>],
rx => [gpioa::PA10<Alternate<AF1>>, gpiob::PB7<Alternate<AF0>>],
}
}
#[cfg(any(
feature = "stm32f030x4",
feature = "stm32f030x6",
feature = "stm32f031",
feature = "stm32f038",
))]
usart_pins! {
USART1 => {
tx => [gpioa::PA2<Alternate<AF1>>, gpioa::PA14<Alternate<AF1>>],
rx => [gpioa::PA3<Alternate<AF1>>, gpioa::PA15<Alternate<AF1>>],
}
}
#[cfg(any(
feature = "stm32f030x8",
feature = "stm32f030xc",
feature = "stm32f042",
feature = "stm32f048",
feature = "stm32f051",
feature = "stm32f058",
feature = "stm32f070",
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart_pins! {
USART2 => {
tx => [gpioa::PA2<Alternate<AF1>>, gpioa::PA14<Alternate<AF1>>],
rx => [gpioa::PA3<Alternate<AF1>>, gpioa::PA15<Alternate<AF1>>],
}
}
#[cfg(any(
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart_pins! {
USART2 => {
tx => [gpiod::PD5<Alternate<AF0>>],
rx => [gpiod::PD6<Alternate<AF0>>],
}
}
#[cfg(any(
feature = "stm32f030xc",
feature = "stm32f070xb",
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart_pins! {
USART3 => {
// According to the datasheet PB10 is both tx and rx, but in stm32cubemx it's only tx
tx => [gpiob::PB10<Alternate<AF4>>, gpioc::PC4<Alternate<AF1>>, gpioc::PC10<Alternate<AF1>>],
rx => [gpiob::PB11<Alternate<AF4>>, gpioc::PC5<Alternate<AF1>>, gpioc::PC11<Alternate<AF1>>],
}
USART4 => {
tx => [gpioa::PA0<Alternate<AF4>>, gpioc::PC10<Alternate<AF0>>],
rx => [gpioa::PA1<Alternate<AF4>>, gpioc::PC11<Alternate<AF0>>],
}
}
#[cfg(any(
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart_pins! {
USART3 => {
tx => [gpiod::PD8<Alternate<AF0>>],
rx => [gpiod::PD9<Alternate<AF0>>],
}
}
// TODO: The ST SVD files are missing the entire PE enable register.
// Re-enable as soon as this gets fixed.
// #[cfg(any(feature = "stm32f091", feature = "stm32f098"))]
// usart_pins! {
// USART4 => {
// tx => [gpioe::PE8<Alternate<AF1>>],
// rx => [gpioe::PE9<Alternate<AF1>>],
// }
// }
#[cfg(any(feature = "stm32f030xc", feature = "stm32f091", feature = "stm32f098"))]
usart_pins! {
USART5 => {
tx => [gpioc::PC12<Alternate<AF2>>],
rx => [gpiod::PD2<Alternate<AF2>>],
}
USART6 => {
tx => [gpioa::PA4<Alternate<AF5>>, gpioc::PC0<Alternate<AF2>>],
rx => [gpioa::PA5<Alternate<AF5>>, gpioc::PC1<Alternate<AF2>>],
}
}
#[cfg(any(feature = "stm32f030xc", feature = "stm32f091"))]
usart_pins! {
USART5 => {
tx => [gpiob::PB3<Alternate<AF4>>],
rx => [gpiob::PB4<Alternate<AF4>>],
}
}
// TODO: The ST SVD files are missing the entire PE enable register.
// Re-enable as soon as this gets fixed.
#[cfg(any(feature = "stm32f091", feature = "stm32f098"))]
usart_pins! {
// USART5 => {
// tx => [gpioe::PE10<Alternate<AF1>>],
// rx => [gpioe::PE11<Alternate<AF1>>],
// }
USART6 => {
tx => [gpiof::PF9<Alternate<AF1>>],
rx => [gpiof::PF10<Alternate<AF1>>],
}
}
/// Serial abstraction
pub struct Serial<USART, TXPIN, RXPIN> {
usart: USART,
pins: (TXPIN, RXPIN),
}
// Common register
type SerialRegisterBlock = crate::stm32::usart1::RegisterBlock;
/// Serial receiver
pub struct Rx<USART> {
usart: *const SerialRegisterBlock,
_instance: PhantomData<USART>,
}
// NOTE(unsafe) Required to allow protected shared access in handlers
unsafe impl<USART> Send for Rx<USART> {}
/// Serial transmitter
pub struct Tx<USART> {
usart: *const SerialRegisterBlock,
_instance: PhantomData<USART>,
}
// NOTE(unsafe) Required to allow protected shared access in handlers
unsafe impl<USART> Send for Tx<USART> {}
macro_rules! usart {
($($USART:ident: ($usart:ident, $usarttx:ident, $usartrx:ident, $usartXen:ident, $apbenr:ident),)+) => {
$(
use crate::stm32::$USART;
impl<TXPIN, RXPIN> Serial<$USART, TXPIN, RXPIN>
where
TXPIN: TxPin<$USART>,
RXPIN: RxPin<$USART>,
{
/// Creates a new serial instance
pub fn $usart(usart: $USART, pins: (TXPIN, RXPIN), baud_rate: Bps, rcc: &mut Rcc) -> Self
{
let mut serial = Serial { usart, pins };
serial.configure(baud_rate, rcc);
// Enable transmission and receiving
serial.usart.cr1.modify(|_, w| w.te().set_bit().re().set_bit().ue().set_bit());
serial
}
}
impl<TXPIN> Serial<$USART, TXPIN, ()>
where
TXPIN: TxPin<$USART>,
{
/// Creates a new tx-only serial instance
pub fn $usarttx(usart: $USART, txpin: TXPIN, baud_rate: Bps, rcc: &mut Rcc) -> Self
{
let rxpin = ();
let mut serial = Serial { usart, pins: (txpin, rxpin) };
serial.configure(baud_rate, rcc);
// Enable transmission
serial.usart.cr1.modify(|_, w| w.te().set_bit().ue().set_bit());
serial
}
}
impl<RXPIN> Serial<$USART, (), RXPIN>
where
RXPIN: RxPin<$USART>,
{
/// Creates a new tx-only serial instance
pub fn $usartrx(usart: $USART, rxpin: RXPIN, baud_rate: Bps, rcc: &mut Rcc) -> Self
{
let txpin = ();
let mut serial = Serial { usart, pins: (txpin, rxpin) };
serial.configure(baud_rate, rcc);
// Enable receiving
serial.usart.cr1.modify(|_, w| w.re().set_bit().ue().set_bit());
serial
}
}
impl<TXPIN, RXPIN> Serial<$USART, TXPIN, RXPIN> {
fn configure(&mut self, baud_rate: Bps, rcc: &mut Rcc) {
// Enable clock for USART
rcc.regs.$apbenr.modify(|_, w| w.$usartXen().set_bit());
// Calculate correct baudrate divisor on the fly
let brr = rcc.clocks.pclk().0 / baud_rate.0;
self.usart.brr.write(|w| unsafe { w.bits(brr) });
// Reset other registers to disable advanced USART features
self.usart.cr2.reset();
self.usart.cr3.reset();
}
/// Starts listening for an interrupt event
pub fn listen(&mut self, event: Event) {
match event {
Event::Rxne => {
self.usart.cr1.modify(|_, w| w.rxneie().set_bit())
},
Event::Txe => {
self.usart.cr1.modify(|_, w| w.txeie().set_bit())
},
Event::Idle => {
self.usart.cr1.modify(|_, w| w.idleie().set_bit())
},
}
}
/// Stop listening for an interrupt event
pub fn unlisten(&mut self, event: Event) {
match event {
Event::Rxne => {
self.usart.cr1.modify(|_, w| w.rxneie().clear_bit())
},
Event::Txe => {
self.usart.cr1.modify(|_, w| w.txeie().clear_bit())
},
Event::Idle => {
self.usart.cr1.modify(|_, w| w.idleie().clear_bit())
},
}
}
}
)+
}
}
usart! {
USART1: (usart1, usart1tx, usart1rx, usart1en, apb2enr),
}
#[cfg(any(
feature = "stm32f030x8",
feature = "stm32f030xc",
feature = "stm32f042",
feature = "stm32f048",
feature = "stm32f051",
feature = "stm32f058",
feature = "stm32f070",
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart! {
USART2: (usart2, usart2tx, usart2rx,usart2en, apb1enr),
}
#[cfg(any(
feature = "stm32f030xc",
feature = "stm32f070xb",
feature = "stm32f071",
feature = "stm32f072",
feature = "stm32f078",
feature = "stm32f091",
feature = "stm32f098",
))]
usart! {
USART3: (usart3, usart3tx, usart3rx,usart3en, apb1enr),
USART4: (usart4, usart4tx, usart4rx,usart4en, apb1enr),
}
#[cfg(any(feature = "stm32f030xc", feature = "stm32f091", feature = "stm32f098"))]
usart! {
USART5: (usart5, usart5tx, usart5rx,usart5en, apb1enr),
USART6: (usart6, usart6tx, usart6rx,usart6en, apb2enr),
}
impl<USART> embedded_hal::serial::Read<u8> for Rx<USART>
where
USART: Deref<Target = SerialRegisterBlock>,
{
type Error = Error;
/// Tries to read a byte from the uart
fn read(&mut self) -> nb::Result<u8, Error> {
read(self.usart)
}
}
impl<USART, TXPIN, RXPIN> embedded_hal::serial::Read<u8> for Serial<USART, TXPIN, RXPIN>
where
USART: Deref<Target = SerialRegisterBlock>,
RXPIN: RxPin<USART>,
{
type Error = Error;
/// Tries to read a byte from the uart
fn read(&mut self) -> nb::Result<u8, Error> {
read(&*self.usart)
}
}
impl<USART> embedded_hal::serial::Write<u8> for Tx<USART>
where
USART: Deref<Target = SerialRegisterBlock>,
{
type Error = void::Void;
/// Ensures that none of the previously written words are still buffered
fn flush(&mut self) -> nb::Result<(), Self::Error> {
flush(self.usart)
}
/// Tries to write a byte to the uart
/// Fails if the transmit buffer is full
fn write(&mut self, byte: u8) -> nb::Result<(), Self::Error> {
write(self.usart, byte)
}
}
impl<USART, TXPIN, RXPIN> embedded_hal::serial::Write<u8> for Serial<USART, TXPIN, RXPIN>
where
USART: Deref<Target = SerialRegisterBlock>,
TXPIN: TxPin<USART>,
{
type Error = void::Void;
/// Ensures that none of the previously written words are still buffered
fn flush(&mut self) -> nb::Result<(), Self::Error> {
flush(&*self.usart)
}
/// Tries to write a byte to the uart
/// Fails if the transmit buffer is full
fn write(&mut self, byte: u8) -> nb::Result<(), Self::Error> {
write(&*self.usart, byte)
}
}
impl<USART, TXPIN, RXPIN> Serial<USART, TXPIN, RXPIN>
where
USART: Deref<Target = SerialRegisterBlock>,
{
/// Splits the UART Peripheral in a Tx and an Rx part
/// This is required for sending/receiving
pub fn split(self) -> (Tx<USART>, Rx<USART>)
where | (
Tx {
usart: &*self.usart,
_instance: PhantomData,
},
Rx {
usart: &*self.usart,
_instance: PhantomData,
},
)
}
pub fn release(self) -> (USART, (TXPIN, RXPIN)) {
(self.usart, self.pins)
}
}
impl<USART> Write for Tx<USART>
where
Tx<USART>: embedded_hal::serial::Write<u8>,
{
fn write_str(&mut self, s: &str) -> Result {
s.as_bytes()
.iter()
.try_for_each(|c| nb::block!(self.write(*c)))
.map_err(|_| core::fmt::Error)
}
}
impl<USART, TXPIN, RXPIN> Write for Serial<USART, TXPIN, RXPIN>
where
USART: Deref<Target = SerialRegisterBlock>,
TXPIN: TxPin<USART>,
{
fn write_str(&mut self, s: &str) -> Result {
s.as_bytes()
.iter()
.try_for_each(|c| nb::block!(self.write(*c)))
.map_err(|_| core::fmt::Error)
}
}
/// Ensures that none of the previously written words are still buffered
fn flush(usart: *const SerialRegisterBlock) -> nb::Result<(), void::Void> {
// NOTE(unsafe) atomic read with no side effects
let isr = unsafe { (*usart).isr.read() };
if isr.tc().bit_is_set() {
Ok(())
} else {
Err(nb::Error::WouldBlock)
}
}
/// Tries to write a byte to the UART
/// Fails if the transmit buffer is full
fn write(usart: *const SerialRegisterBlock, byte: u8) -> nb::Result<(), void::Void> {
// NOTE(unsafe) atomic read with no side effects
let isr = unsafe { (*usart).isr.read() };
if isr.txe().bit_is_set() {
// NOTE(unsafe) atomic write to stateless register
// NOTE(write_volatile) 8-bit write that's not possible through the svd2rust API
unsafe { ptr::write_volatile(&(*usart).tdr as *const _ as *mut _, byte) }
Ok(())
} else {
Err(nb::Error::WouldBlock)
}
}
/// Tries to read a byte from the UART
fn read(usart: *const SerialRegisterBlock) -> nb::Result<u8, Error> {
// NOTE(unsafe) atomic read with no side effects
let isr = unsafe { (*usart).isr.read() };
// NOTE(unsafe) write accessor for atomic writes with no side effects
let icr = unsafe { &(*usart).icr };
let err = if isr.pe().bit_is_set() {
icr.write(|w| w.pecf().set_bit());
nb::Error::Other(Error::Parity)
} else if isr.fe().bit_is_set() {
icr.write(|w| w.fecf().set_bit());
nb::Error::Other(Error::Framing)
} else if isr.nf().bit_is_set() {
icr.write(|w| w.ncf().set_bit());
nb::Error::Other(Error::Noise)
} else if isr.ore().bit_is_set() {
icr.write(|w| w.orecf().set_bit());
nb::Error::Other(Error::Overrun)
} else if isr.rxne().bit_is_set() {
return Ok(unsafe { ptr::read_volatile(&(*usart).rdr as *const _ as *const _) });
} else {
return Err(nb::Error::WouldBlock);
};
// NOTE(unsafe) atomic write with no side effects other than clearing the errors we've just handled
unsafe {
(*usart).icr.write(|w| {
w.pecf()
.set_bit()
.fecf()
.set_bit()
.ncf()
.set_bit()
.orecf()
.set_bit()
})
};
Err(err)
} | TXPIN: TxPin<USART>,
RXPIN: RxPin<USART>,
{ |
views.py | from slick_reporting.views import SampleReportView
from .models import OrderLine
class | (SampleReportView):
report_model = OrderLine
date_field = 'date_placed' # or 'order__date_placed'
group_by = 'product'
columns = ['name', 'sku']
time_series_pattern = 'monthly'
time_series_columns = ['__total_quantity__']
| MonthlyProductSales |
ilsvrc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: ilsvrc.py
# Author: Yuxin Wu <[email protected]>
import os
import tarfile
import cv2
import numpy as np
from six.moves import range
import xml.etree.ElementTree as ET
from ...utils import logger, get_rng, get_dataset_path
from ...utils.loadcaffe import get_caffe_pb
from ...utils.fs import mkdir_p, download
from ...utils.timer import timed_operation
from ..base import RNGDataFlow
__all__ = ['ILSVRCMeta', 'ILSVRC12']
CAFFE_ILSVRC12_URL = "http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz"
class ILSVRCMeta(object):
"""
Some metadata for ILSVRC dataset.
"""
def __init__(self, dir=None):
|
def get_synset_words_1000(self):
"""
:returns a dict of {cls_number: cls_name}
"""
fname = os.path.join(self.dir, 'synset_words.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def get_synset_1000(self):
"""
:returns a dict of {cls_number: synset_id}
"""
fname = os.path.join(self.dir, 'synsets.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def _download_caffe_meta(self):
fpath = download(CAFFE_ILSVRC12_URL, self.dir)
tarfile.open(fpath, 'r:gz').extractall(self.dir)
def get_image_list(self, name):
"""
:param name: 'train' or 'val' or 'test'
:returns: list of (image filename, cls)
"""
assert name in ['train', 'val', 'test']
fname = os.path.join(self.dir, name + '.txt')
assert os.path.isfile(fname)
with open(fname) as f:
ret = []
for line in f.readlines():
name, cls = line.strip().split()
ret.append((name, int(cls)))
assert len(ret)
return ret
def get_per_pixel_mean(self, size=None):
"""
:param size: return image size in [h, w]. default to (256, 256)
:returns: per-pixel mean as an array of shape (h, w, 3) in range [0, 255]
"""
obj = self.caffepb.BlobProto()
mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')
with open(mean_file, 'rb') as f:
obj.ParseFromString(f.read())
arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')
arr = np.transpose(arr, [1,2,0])
if size is not None:
arr = cv2.resize(arr, size[::-1])
return arr
class ILSVRC12(RNGDataFlow):
def __init__(self, dir, name, meta_dir=None, shuffle=True,
dir_structure='original', include_bb=False):
"""
:param dir: A directory containing a subdir named `name`, where the
original ILSVRC12_`name`.tar gets decompressed.
:param name: 'train' or 'val' or 'test'
:param dir_structure: The dir structure of 'val' and 'test'.
If is 'original' then keep the original decompressed directory with list
of image files (as below). If set to 'train', use the the same
directory structure as 'train/', with class name as subdirectories.
:param include_bb: Include the bounding box. Maybe useful in training.
When `dir_structure=='original'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
test/
ILSVRC2012_test_00000001.JPEG
...
bbox/
n02134418/
n02134418_198.xml
...
...
After decompress ILSVRC12_img_train.tar, you can use the following
command to build the above structure for `train/`:
.. code-block:: none
tar xvf ILSVRC12_img_train.tar -C train && cd train
find -type f -name '*.tar' | parallel -P 10 'echo {} && mkdir -p {/.} && tar xf {} -C {/.}'
Or:
for i in *.tar; do dir=${i%.tar}; echo $dir; mkdir -p $dir; tar xf $i -C $dir; done
"""
assert name in ['train', 'test', 'val']
self.full_dir = os.path.join(dir, name)
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
self.shuffle = shuffle
meta = ILSVRCMeta(meta_dir)
self.imglist = meta.get_image_list(name)
self.dir_structure = dir_structure
self.synset = meta.get_synset_1000()
if include_bb:
bbdir = os.path.join(dir, 'bbox') if not \
isinstance(include_bb, six.string_types) else include_bb
assert name == 'train', 'Bounding box only available for training'
self.bblist = ILSVRC12.get_training_bbox(bbdir, self.imglist)
self.include_bb = include_bb
def size(self):
return len(self.imglist)
def get_data(self):
"""
Produce original images of shape [h, w, 3(BGR)], and label,
and optionally a bbox of [xmin, ymin, xmax, ymax]
"""
idxs = np.arange(len(self.imglist))
add_label_to_fname = (self.name != 'train' and self.dir_structure != 'original')
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
if add_label_to_fname:
fname = os.path.join(self.full_dir, self.synset[label], fname)
else:
fname = os.path.join(self.full_dir, fname)
im = cv2.imread(fname.strip(), cv2.IMREAD_COLOR)
assert im is not None, fname
if im.ndim == 2:
im = np.expand_dims(im, 2).repeat(3,2)
if self.include_bb:
bb = self.bblist[k]
if bb is None:
bb = [0, 0, im.shape[1]-1, im.shape[0]-1]
yield [im, label, bb]
else:
yield [im, label]
@staticmethod
def get_training_bbox(bbox_dir, imglist):
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
#box[0] /= size[0]
#box[1] /= size[1]
#box[2] /= size[0]
#box[3] /= size[1]
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
import tqdm
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except KeyboardInterrupt:
raise
except:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret
if __name__ == '__main__':
meta = ILSVRCMeta()
#print(meta.get_synset_words_1000())
ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', include_bb=True,
shuffle=False)
ds.reset_state()
for k in ds.get_data():
from IPython import embed; embed()
break
| if dir is None:
dir = get_dataset_path('ilsvrc_metadata')
self.dir = dir
mkdir_p(self.dir)
self.caffepb = get_caffe_pb()
f = os.path.join(self.dir, 'synsets.txt')
if not os.path.isfile(f):
self._download_caffe_meta() |
14682.py | """
14682. Shifty Sum
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 60 ms
해결 날짜: 2020년 9월 20일
"""
| res = N
for _ in range(k):
N *= 10
res += N
print(res)
if __name__ == '__main__':
main() | def main():
N, k = [int(input()) for _ in range(2)] |
user.js | import request from '@/utils/request'
export function addUser(data) {
return request({
url: 'users',
method: 'post',
params: {
isAdmin: false
},
auth: {
username: 'admin',
password: 'admin'
},
data
})
}
export function | (data) {
return request({
url: 'users',
method: 'post',
params: {
isAdmin: false
},
auth: {
username: 'admin',
password: 'admin'
},
data
})
}
| modifyUser |
test_endpoint.py | import time
from http import HTTPStatus
from typing import Dict, List, Optional, Type
import pytest
from aioauth.storage import BaseStorage
from aioauth.config import Settings
from aioauth.models import Token
from aioauth.requests import Post, Request
from aioauth.server import AuthorizationServer
from aioauth.types import ErrorType, GrantType, RequestMethod
from aioauth.utils import (
catch_errors_and_unavailability,
encode_auth_headers,
generate_token,
)
from .models import Defaults
@pytest.mark.asyncio
async def test_internal_server_error():
class EndpointClass:
available: Optional[bool] = True
def __init__(self, available: Optional[bool] = None):
if available is not None:
self.available = available
@catch_errors_and_unavailability
async def server(self, request):
raise Exception()
e = EndpointClass()
response = await e.server(Request(method=RequestMethod.POST))
assert response.status_code == HTTPStatus.BAD_REQUEST
@pytest.mark.asyncio
async def test_invalid_token(server: AuthorizationServer, defaults: Defaults):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = "invalid token"
post = Post(token=token)
request = Request(
user_id=user_id,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"]
assert response.status_code == HTTPStatus.OK
@pytest.mark.asyncio
async def | (
server: AuthorizationServer, storage: Dict[str, List], defaults: Defaults
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
settings = Settings(INSECURE_TRANSPORT=True)
token = Token(
user_id=user_id,
client_id=client_id,
expires_in=settings.TOKEN_EXPIRES_IN,
refresh_token_expires_in=settings.REFRESH_TOKEN_EXPIRES_IN,
access_token=generate_token(42),
refresh_token=generate_token(48),
issued_at=int(time.time() - settings.TOKEN_EXPIRES_IN),
scope=defaults.scope,
)
storage["tokens"].append(token)
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert not response.content["active"]
@pytest.mark.asyncio
async def test_valid_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
token = storage["tokens"][0]
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
settings=settings,
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert response.content["active"]
@pytest.mark.asyncio
async def test_introspect_revoked_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = storage["tokens"][0]
post = Post(
grant_type=GrantType.TYPE_REFRESH_TOKEN,
refresh_token=token.refresh_token,
)
request = Request(
user_id=user_id,
settings=settings,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_response(request)
assert response.status_code == HTTPStatus.OK
# Check that refreshed token was revoked
post = Post(token=token.access_token)
request = Request(
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"], "The refresh_token must be revoked"
@pytest.mark.asyncio
async def test_endpoint_availability(db_class: Type[BaseStorage]):
server = AuthorizationServer(storage=db_class())
request = Request(method=RequestMethod.POST, settings=Settings(AVAILABLE=False))
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert response.content["error"] == ErrorType.TEMPORARILY_UNAVAILABLE
| test_expired_token |
traphandlers.rs | // This file contains code from external sources.
// Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md
//! WebAssembly trap handling, which is built on top of the lower-level
//! signalhandling mechanisms.
use super::trapcode::TrapCode;
use crate::vmcontext::{VMFunctionBody, VMFunctionEnvironment, VMTrampoline};
use backtrace::Backtrace;
use std::any::Any;
use std::cell::{Cell, UnsafeCell};
use std::error::Error;
use std::io;
use std::mem::{self, MaybeUninit};
use std::ptr;
use std::sync::Once;
pub use tls::TlsRestore;
cfg_if::cfg_if! {
if #[cfg(unix)] {
/// Function which may handle custom signals while processing traps.
pub type TrapHandlerFn = dyn Fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) -> bool;
} else if #[cfg(target_os = "windows")] {
/// Function which may handle custom signals while processing traps.
pub type TrapHandlerFn = dyn Fn(winapi::um::winnt::PEXCEPTION_POINTERS) -> bool;
}
}
extern "C" {
fn wasmer_register_setjmp(
jmp_buf: *mut *const u8,
callback: extern "C" fn(*mut u8),
payload: *mut u8,
) -> i32;
fn wasmer_unwind(jmp_buf: *const u8) -> !;
}
cfg_if::cfg_if! {
if #[cfg(unix)] {
static mut PREV_SIGSEGV: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
static mut PREV_SIGBUS: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
static mut PREV_SIGILL: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
static mut PREV_SIGFPE: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
unsafe fn platform_init() {
let register = |slot: &mut MaybeUninit<libc::sigaction>, signal: i32| {
let mut handler: libc::sigaction = mem::zeroed();
// The flags here are relatively careful, and they are...
//
// SA_SIGINFO gives us access to information like the program
// counter from where the fault happened.
//
// SA_ONSTACK allows us to handle signals on an alternate stack,
// so that the handler can run in response to running out of
// stack space on the main stack. Rust installs an alternate
// stack with sigaltstack, so we rely on that.
//
// SA_NODEFER allows us to reenter the signal handler if we
// crash while handling the signal, and fall through to the
// Breakpad handler by testing handlingSegFault.
handler.sa_flags = libc::SA_SIGINFO | libc::SA_NODEFER | libc::SA_ONSTACK;
handler.sa_sigaction = trap_handler as usize;
libc::sigemptyset(&mut handler.sa_mask);
if libc::sigaction(signal, &handler, slot.as_mut_ptr()) != 0 {
panic!(
"unable to install signal handler: {}",
io::Error::last_os_error(),
);
}
};
// Allow handling OOB with signals on all architectures
register(&mut PREV_SIGSEGV, libc::SIGSEGV);
// Handle `unreachable` instructions which execute `ud2` right now
register(&mut PREV_SIGILL, libc::SIGILL);
// x86 uses SIGFPE to report division by zero
if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") {
register(&mut PREV_SIGFPE, libc::SIGFPE);
}
// On ARM, handle Unaligned Accesses.
// On Darwin, guard page accesses are raised as SIGBUS.
if cfg!(target_arch = "arm") || cfg!(target_vendor = "apple") {
register(&mut PREV_SIGBUS, libc::SIGBUS);
}
}
#[cfg(target_vendor = "apple")]
unsafe fn thread_stack() -> (usize, usize) {
let this_thread = libc::pthread_self();
let stackaddr = libc::pthread_get_stackaddr_np(this_thread);
let stacksize = libc::pthread_get_stacksize_np(this_thread);
(stackaddr as usize - stacksize, stacksize)
}
#[cfg(not(target_vendor = "apple"))]
unsafe fn thread_stack() -> (usize, usize) {
let this_thread = libc::pthread_self();
let mut thread_attrs: libc::pthread_attr_t = mem::zeroed();
let mut stackaddr: *mut libc::c_void = ptr::null_mut();
let mut stacksize: libc::size_t = 0;
#[cfg(not(target_os = "freebsd"))]
let ok = libc::pthread_getattr_np(this_thread, &mut thread_attrs);
#[cfg(target_os = "freebsd")]
let ok = libc::pthread_attr_get_np(this_thread, &mut thread_attrs);
if ok == 0 {
libc::pthread_attr_getstack(&thread_attrs, &mut stackaddr, &mut stacksize);
libc::pthread_attr_destroy(&mut thread_attrs);
}
(stackaddr as usize, stacksize)
}
unsafe extern "C" fn trap_handler(
signum: libc::c_int,
siginfo: *mut libc::siginfo_t,
context: *mut libc::c_void,
) {
let previous = match signum {
libc::SIGSEGV => &PREV_SIGSEGV,
libc::SIGBUS => &PREV_SIGBUS,
libc::SIGFPE => &PREV_SIGFPE,
libc::SIGILL => &PREV_SIGILL,
_ => panic!("unknown signal: {}", signum),
};
// We try to get the Code trap associated to this signal
let maybe_signal_trap = match signum {
libc::SIGSEGV | libc::SIGBUS => {
let addr = (*siginfo).si_addr() as usize;
let (stackaddr, stacksize) = thread_stack();
// The stack and its guard page covers the
// range [stackaddr - guard pages .. stackaddr + stacksize).
// We assume the guard page is 1 page, and pages are 4KiB (or 16KiB in Apple Silicon)
if stackaddr - region::page::size() <= addr && addr < stackaddr + stacksize {
Some(TrapCode::StackOverflow)
} else {
Some(TrapCode::HeapAccessOutOfBounds)
}
}
_ => None,
};
let handled = tls::with(|info| {
// If no wasm code is executing, we don't handle this as a wasm
// trap.
let info = match info {
Some(info) => info,
None => return false,
};
// If we hit an exception while handling a previous trap, that's
// quite bad, so bail out and let the system handle this
// recursive segfault.
//
// Otherwise flag ourselves as handling a trap, do the trap
// handling, and reset our trap handling flag. Then we figure
// out what to do based on the result of the trap handling.
let jmp_buf = info.handle_trap(
get_pc(context),
false,
maybe_signal_trap,
|handler| handler(signum, siginfo, context),
);
// Figure out what to do based on the result of this handling of
// the trap. Note that our sentinel value of 1 means that the
// exception was handled by a custom exception handler, so we
// keep executing.
if jmp_buf.is_null() {
false
} else if jmp_buf as usize == 1 {
true
} else {
wasmer_unwind(jmp_buf)
}
});
if handled {
return;
}
// This signal is not for any compiled wasm code we expect, so we
// need to forward the signal to the next handler. If there is no
// next handler (SIG_IGN or SIG_DFL), then it's time to crash. To do
// this, we set the signal back to its original disposition and
// return. This will cause the faulting op to be re-executed which
// will crash in the normal way. If there is a next handler, call
// it. It will either crash synchronously, fix up the instruction
// so that execution can continue and return, or trigger a crash by
// returning the signal to it's original disposition and returning.
let previous = &*previous.as_ptr();
if previous.sa_flags & libc::SA_SIGINFO != 0 {
mem::transmute::<
usize,
extern "C" fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void),
>(previous.sa_sigaction)(signum, siginfo, context)
} else if previous.sa_sigaction == libc::SIG_DFL ||
previous.sa_sigaction == libc::SIG_IGN
{
libc::sigaction(signum, previous, ptr::null_mut());
} else {
mem::transmute::<usize, extern "C" fn(libc::c_int)>(
previous.sa_sigaction
)(signum)
}
}
unsafe fn get_pc(cx: *mut libc::c_void) -> *const u8 {
cfg_if::cfg_if! {
if #[cfg(all(target_os = "linux", target_arch = "x86_64"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.gregs[libc::REG_RIP as usize] as *const u8
} else if #[cfg(all(target_os = "linux", target_arch = "x86"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.gregs[libc::REG_EIP as usize] as *const u8
} else if #[cfg(all(target_os = "android", target_arch = "x86"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.gregs[libc::REG_EIP as usize] as *const u8
} else if #[cfg(all(target_os = "linux", target_arch = "aarch64"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.pc as *const u8
} else if #[cfg(all(target_os = "android", target_arch = "aarch64"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.pc as *const u8
} else if #[cfg(all(target_vendor = "apple", target_arch = "x86_64"))] {
let cx = &*(cx as *const libc::ucontext_t);
(*cx.uc_mcontext).__ss.__rip as *const u8
} else if #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] {
use std::mem;
// TODO: This should be integrated into rust/libc
// Related issue: https://github.com/rust-lang/libc/issues/1977
#[allow(non_camel_case_types)]
pub struct __darwin_arm_thread_state64 {
pub __x: [u64; 29], /* General purpose registers x0-x28 */
pub __fp: u64, /* Frame pointer x29 */
pub __lr: u64, /* Link register x30 */
pub __sp: u64, /* Stack pointer x31 */
pub __pc: u64, /* Program counter */
pub __cpsr: u32, /* Current program status register */
pub __pad: u32, /* Same size for 32-bit or 64-bit clients */
}
let cx = &*(cx as *const libc::ucontext_t);
let uc_mcontext = mem::transmute::<_, *const __darwin_arm_thread_state64>(&(*cx.uc_mcontext).__ss);
(*uc_mcontext).__pc as *const u8
} else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
let cx = &*(cx as *const libc::ucontext_t);
cx.uc_mcontext.mc_rip as *const u8
} else if #[cfg(all(target_os = "freebsd", target_arch = "aarch64"))] {
#[repr(align(16))]
#[allow(non_camel_case_types)]
pub struct gpregs {
pub gp_x: [libc::register_t; 30],
pub gp_lr: libc::register_t,
pub gp_sp: libc::register_t,
pub gp_elr: libc::register_t,
pub gp_spsr: u32,
pub gp_pad: libc::c_int,
};
#[repr(align(16))]
#[allow(non_camel_case_types)]
pub struct fpregs {
pub fp_q: [u128; 32],
pub fp_sr: u32,
pub fp_cr: u32,
pub fp_flags: libc::c_int,
pub fp_pad: libc::c_int,
};
#[repr(align(16))]
#[allow(non_camel_case_types)]
pub struct mcontext_t {
pub mc_gpregs: gpregs,
pub mc_fpregs: fpregs,
pub mc_flags: libc::c_int,
pub mc_pad: libc::c_int,
pub mc_spare: [u64; 8],
};
#[repr(align(16))]
#[allow(non_camel_case_types)]
pub struct ucontext_t {
pub uc_sigmask: libc::sigset_t,
pub uc_mcontext: mcontext_t,
pub uc_link: *mut ucontext_t,
pub uc_stack: libc::stack_t,
pub uc_flags: libc::c_int,
__spare__: [libc::c_int; 4],
}
let cx = &*(cx as *const ucontext_t);
cx.uc_mcontext.mc_gpregs.gp_elr as *const u8
} else {
compile_error!("unsupported platform");
}
}
}
} else if #[cfg(target_os = "windows")] {
use winapi::um::errhandlingapi::*;
use winapi::um::winnt::*;
use winapi::um::minwinbase::*;
use winapi::vc::excpt::*;
unsafe fn platform_init() {
// our trap handler needs to go first, so that we can recover from
// wasm faults and continue execution, so pass `1` as a true value
// here.
if AddVectoredExceptionHandler(1, Some(exception_handler)).is_null() {
panic!("failed to add exception handler: {}", io::Error::last_os_error());
}
}
unsafe extern "system" fn exception_handler(
exception_info: PEXCEPTION_POINTERS
) -> LONG {
// Check the kind of exception, since we only handle a subset within
// wasm code. If anything else happens we want to defer to whatever
// the rest of the system wants to do for this exception.
let record = &*(*exception_info).ExceptionRecord;
if record.ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
record.ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION &&
record.ExceptionCode != EXCEPTION_STACK_OVERFLOW &&
record.ExceptionCode != EXCEPTION_INT_DIVIDE_BY_ZERO &&
record.ExceptionCode != EXCEPTION_INT_OVERFLOW
{
return EXCEPTION_CONTINUE_SEARCH;
}
// FIXME: this is what the previous C++ did to make sure that TLS
// works by the time we execute this trap handling code. This isn't
// exactly super easy to call from Rust though and it's not clear we
// necessarily need to do so. Leaving this here in case we need this
// in the future, but for now we can probably wait until we see a
// strange fault before figuring out how to reimplement this in
// Rust.
//
// if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
// return EXCEPTION_CONTINUE_SEARCH;
// }
// This is basically the same as the unix version above, only with a
// few parameters tweaked here and there.
tls::with(|info| {
let info = match info {
Some(info) => info,
None => return EXCEPTION_CONTINUE_SEARCH,
};
#[cfg(target_pointer_width = "32")]
let pc = (*(*exception_info).ContextRecord).Eip as *const u8;
#[cfg(target_pointer_width = "64")]
let pc = (*(*exception_info).ContextRecord).Rip as *const u8;
let jmp_buf = info.handle_trap(
pc,
record.ExceptionCode == EXCEPTION_STACK_OVERFLOW,
// TODO: fix the signal trap associated to memory access in Windows
None,
|handler| handler(exception_info),
);
if jmp_buf.is_null() {
EXCEPTION_CONTINUE_SEARCH
} else if jmp_buf as usize == 1 {
EXCEPTION_CONTINUE_EXECUTION
} else {
wasmer_unwind(jmp_buf)
}
})
}
}
}
/// Globally-set callback to determine whether a program counter is actually a
/// wasm trap.
///
/// This is initialized during `init_traps` below. The definition lives within
/// `wasmer` currently.
static mut IS_WASM_PC: fn(usize) -> bool = |_| false;
/// This function is required to be called before any WebAssembly is entered.
/// This will configure global state such as signal handlers to prepare the
/// process to receive wasm traps.
///
/// This function must not only be called globally once before entering
/// WebAssembly but it must also be called once-per-thread that enters
/// WebAssembly. Currently in wasmer's integration this function is called on
/// creation of a `Store`.
///
/// The `is_wasm_pc` argument is used when a trap happens to determine if a
/// program counter is the pc of an actual wasm trap or not. This is then used
/// to disambiguate faults that happen due to wasm and faults that happen due to
/// bugs in Rust or elsewhere.
pub fn init_traps(is_wasm_pc: fn(usize) -> bool) {
static INIT: Once = Once::new();
INIT.call_once(|| unsafe {
IS_WASM_PC = is_wasm_pc;
platform_init();
});
}
/// Raises a user-defined trap immediately.
///
/// This function performs as-if a wasm trap was just executed, only the trap
/// has a dynamic payload associated with it which is user-provided. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previous called and not yet returned.
/// Additionally no Rust destructors may be on the stack.
/// They will be skipped and not executed.
pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::UserTrap(data)))
}
/// Raises a trap from inside library code immediately.
///
/// This function performs as-if a wasm trap was just executed. This trap
/// payload is then returned from `catch_traps` below.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previous called and not yet returned.
/// Additionally no Rust destructors may be on the stack.
/// They will be skipped and not executed.
pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::LibTrap(trap)))
}
/// Carries a Rust panic across wasm code and resumes the panic on the other
/// side.
///
/// # Safety
///
/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
/// have been previously called and not returned. Additionally no Rust destructors may be on the
/// stack. They will be skipped and not executed.
pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
}
#[cfg(target_os = "windows")]
fn reset_guard_page() {
extern "C" {
fn _resetstkoflw() -> winapi::ctypes::c_int;
}
// We need to restore guard page under stack to handle future stack overflows properly.
// https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/resetstkoflw?view=vs-2019
if unsafe { _resetstkoflw() } == 0 {
panic!("failed to restore stack guard page");
}
}
#[cfg(not(target_os = "windows"))]
fn reset_guard_page() {}
/// Stores trace message with backtrace.
#[derive(Debug)]
pub enum Trap {
/// A user-raised trap through `raise_user_trap`.
User(Box<dyn Error + Send + Sync>),
/// A trap raised from the Wasm generated code
///
/// Note: this trap is deterministic (assuming a deterministic host implementation)
Wasm {
/// The program counter in generated code where this trap happened.
pc: usize,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
/// Optional trapcode associated to the signal that caused the trap
signal_trap: Option<TrapCode>,
},
/// A trap raised from a wasm libcall
///
/// Note: this trap is deterministic (assuming a deterministic host implementation)
Lib {
/// Code of the trap.
trap_code: TrapCode,
/// Native stack backtrace at the time the trap occurred
backtrace: Backtrace,
},
/// A trap indicating that the runtime was unable to allocate sufficient memory.
///
/// Note: this trap is nondeterministic, since it depends on the host system.
OOM {
/// Native stack backtrace at the time the OOM occurred
backtrace: Backtrace,
},
}
impl Trap {
/// Construct a new Wasm trap with the given source location and backtrace.
///
/// Internally saves a backtrace when constructed.
pub fn wasm(pc: usize, backtrace: Backtrace, signal_trap: Option<TrapCode>) -> Self {
Trap::Wasm {
pc,
backtrace,
signal_trap,
}
}
/// Construct a new Wasm trap with the given trap code.
///
/// Internally saves a backtrace when constructed.
pub fn lib(trap_code: TrapCode) -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::Lib {
trap_code,
backtrace,
}
}
/// Construct a new OOM trap with the given source location and trap code.
///
/// Internally saves a backtrace when constructed.
pub fn oom() -> Self {
let backtrace = Backtrace::new_unresolved();
Trap::OOM { backtrace }
}
}
/// Call the wasm function pointed to by `callee`.
///
/// * `vmctx` - the callee vmctx argument
/// * `caller_vmctx` - the caller vmctx argument
/// * `trampoline` - the jit-generated trampoline whose ABI takes 4 values, the
/// callee vmctx, the caller vmctx, the `callee` argument below, and then the
/// `values_vec` argument.
/// * `callee` - the third argument to the `trampoline` function
/// * `values_vec` - points to a buffer which holds the incoming arguments, and to
/// which the outgoing return values will be written.
///
/// # Safety
///
/// Wildly unsafe because it calls raw function pointers and reads/writes raw
/// function pointers.
pub unsafe fn wasmer_call_trampoline(
trap_handler: &impl TrapHandler,
vmctx: VMFunctionEnvironment,
trampoline: VMTrampoline,
callee: *const VMFunctionBody,
values_vec: *mut u8,
) -> Result<(), Trap> {
catch_traps(trap_handler, || {
mem::transmute::<_, extern "C" fn(VMFunctionEnvironment, *const VMFunctionBody, *mut u8)>(
trampoline,
)(vmctx, callee, values_vec);
})
}
/// Catches any wasm traps that happen within the execution of `closure`,
/// returning them as a `Result`.
///
/// Highly unsafe since `closure` won't have any dtors run.
pub unsafe fn catch_traps<F>(trap_handler: &dyn TrapHandler, mut closure: F) -> Result<(), Trap>
where
F: FnMut(),
{
return CallThreadState::new(trap_handler).with(|cx| {
wasmer_register_setjmp(
cx.jmp_buf.as_ptr(),
call_closure::<F>,
&mut closure as *mut F as *mut u8,
)
});
extern "C" fn call_closure<F>(payload: *mut u8)
where
F: FnMut(),
{
unsafe { (*(payload as *mut F))() }
}
}
/// Catches any wasm traps that happen within the execution of `closure`,
/// returning them as a `Result`, with the closure contents.
///
/// The main difference from this method and `catch_traps`, is that is able
/// to return the results from the closure.
///
/// # Safety
///
/// Check [`catch_traps`].
pub unsafe fn catch_traps_with_result<F, R>(
trap_handler: &dyn TrapHandler,
mut closure: F,
) -> Result<R, Trap>
where
F: FnMut() -> R,
{
let mut global_results = MaybeUninit::<R>::uninit();
catch_traps(trap_handler, || {
global_results.as_mut_ptr().write(closure());
})?;
Ok(global_results.assume_init())
}
/// Temporary state stored on the stack which is registered in the `tls` module
/// below for calls into wasm.
pub struct CallThreadState<'a> {
unwind: UnsafeCell<MaybeUninit<UnwindReason>>,
jmp_buf: Cell<*const u8>,
reset_guard_page: Cell<bool>,
prev: Cell<tls::Ptr>,
trap_handler: &'a (dyn TrapHandler + 'a),
handling_trap: Cell<bool>,
}
/// A package of functionality needed by `catch_traps` to figure out what to do
/// when handling a trap.
///
/// Note that this is an `unsafe` trait at least because it's being run in the
/// context of a synchronous signal handler, so it needs to be careful to not
/// access too much state in answering these queries.
pub unsafe trait TrapHandler {
/// Converts this object into an `Any` to dynamically check its type.
fn as_any(&self) -> &dyn Any;
/// Uses `call` to call a custom signal handler, if one is specified.
///
/// Returns `true` if `call` returns true, otherwise returns `false`.
fn custom_trap_handler(&self, call: &dyn Fn(&TrapHandlerFn) -> bool) -> bool;
}
enum UnwindReason {
/// A panic caused by the host
Panic(Box<dyn Any + Send>),
/// A custom error triggered by the user
UserTrap(Box<dyn Error + Send + Sync>),
/// A Trap triggered by a wasm libcall
LibTrap(Trap),
/// A trap caused by the Wasm generated code
WasmTrap {
backtrace: Backtrace,
pc: usize,
signal_trap: Option<TrapCode>,
},
}
impl<'a> CallThreadState<'a> {
#[inline]
fn new(trap_handler: &'a (dyn TrapHandler + 'a)) -> CallThreadState<'a> {
Self {
unwind: UnsafeCell::new(MaybeUninit::uninit()),
jmp_buf: Cell::new(ptr::null()),
reset_guard_page: Cell::new(false),
prev: Cell::new(ptr::null()),
trap_handler,
handling_trap: Cell::new(false),
}
}
fn with(self, closure: impl FnOnce(&CallThreadState) -> i32) -> Result<(), Trap> {
let ret = tls::set(&self, || closure(&self))?;
if ret != 0 {
return Ok(());
}
// We will only reach this path if ret == 0. And that will
// only happen if a trap did happen. As such, it's safe to
// assume that the `unwind` field is already initialized
// at this moment.
match unsafe { (*self.unwind.get()).as_ptr().read() } {
UnwindReason::UserTrap(data) => Err(Trap::User(data)),
UnwindReason::LibTrap(trap) => Err(trap),
UnwindReason::WasmTrap {
backtrace,
pc,
signal_trap,
} => Err(Trap::wasm(pc, backtrace, signal_trap)),
UnwindReason::Panic(panic) => std::panic::resume_unwind(panic),
}
}
fn unwind_with(&self, reason: UnwindReason) -> ! {
unsafe {
(*self.unwind.get()).as_mut_ptr().write(reason);
wasmer_unwind(self.jmp_buf.get());
}
}
/// Trap handler using our thread-local state.
///
/// * `pc` - the program counter the trap happened at
/// * `reset_guard_page` - whether or not to reset the guard page,
/// currently Windows specific
/// * `call_handler` - a closure used to invoke the platform-specific
/// signal handler for each instance, if available.
///
/// Attempts to handle the trap if it's a wasm trap. Returns a few
/// different things:
///
/// * null - the trap didn't look like a wasm trap and should continue as a
/// trap
/// * 1 as a pointer - the trap was handled by a custom trap handler on an
/// instance, and the trap handler should quickly return.
/// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
/// the wasm trap was succesfully handled.
fn handle_trap(
&self,
pc: *const u8,
reset_guard_page: bool,
signal_trap: Option<TrapCode>,
call_handler: impl Fn(&TrapHandlerFn) -> bool,
) -> *const u8 {
// If we hit a fault while handling a previous trap, that's quite bad,
// so bail out and let the system handle this recursive segfault.
//
// Otherwise flag ourselves as handling a trap, do the trap handling,
// and reset our trap handling flag.
if self.handling_trap.replace(true) {
return ptr::null();
}
// First up see if we have a custom trap handler,
// in which case run it. If anything handles the trap then we
// return that the trap was handled.
if self.trap_handler.custom_trap_handler(&call_handler) {
return 1 as *const _;
}
// If this fault wasn't in wasm code, then it's not our problem
// except if it's a StackOverflow (see below)
if unsafe { !IS_WASM_PC(pc as _) } && signal_trap != Some(TrapCode::StackOverflow) {
return ptr::null();
}
// TODO: stack overflow can happen at any random time (i.e. in malloc()
// in memory.grow) and it's really hard to determine if the cause was
// stack overflow and if it happened in WebAssembly module.
//
// So, let's assume that any untrusted code called from WebAssembly
// doesn't trap. Then, if we have called some WebAssembly code, it
// means the trap is stack overflow.
if self.jmp_buf.get().is_null() {
self.handling_trap.set(false);
return ptr::null();
}
let backtrace = Backtrace::new_unresolved();
self.reset_guard_page.set(reset_guard_page);
unsafe {
(*self.unwind.get())
.as_mut_ptr()
.write(UnwindReason::WasmTrap {
backtrace,
signal_trap,
pc: pc as usize,
});
}
self.handling_trap.set(false);
self.jmp_buf.get()
}
}
impl<'a> Drop for CallThreadState<'a> {
fn drop(&mut self) {
if self.reset_guard_page.get() |
}
}
// A private inner module for managing the TLS state that we require across
// calls in wasm. The WebAssembly code is called from C++ and then a trap may
// happen which requires us to read some contextual state to figure out what to
// do with the trap. This `tls` module is used to persist that information from
// the caller to the trap site.
mod tls {
use super::CallThreadState;
use crate::Trap;
use std::mem;
use std::ptr;
pub use raw::Ptr;
// An even *more* inner module for dealing with TLS. This actually has the
// thread local variable and has functions to access the variable.
//
// Note that this is specially done to fully encapsulate that the accessors
// for tls must not be inlined. Wasmer's async support will employ stack
// switching which can resume execution on different OS threads. This means
// that borrows of our TLS pointer must never live across accesses because
// otherwise the access may be split across two threads and cause unsafety.
//
// This also means that extra care is taken by the runtime to save/restore
// these TLS values when the runtime may have crossed threads.
mod raw {
use super::CallThreadState;
use crate::Trap;
use std::cell::Cell;
use std::ptr;
pub type Ptr = *const CallThreadState<'static>;
// The first entry here is the `Ptr` which is what's used as part of the
// public interface of this module. The second entry is a boolean which
// allows the runtime to perform per-thread initialization if necessary
// for handling traps (e.g. setting up ports on macOS and sigaltstack on
// Unix).
thread_local!(static PTR: Cell<(Ptr, bool)> = Cell::new((ptr::null(), false)));
#[inline(never)] // see module docs for why this is here
pub fn replace(val: Ptr) -> Result<Ptr, Trap> {
PTR.with(|p| {
// When a new value is configured that means that we may be
// entering WebAssembly so check to see if this thread has
// performed per-thread initialization for traps.
let (prev, mut initialized) = p.get();
if !initialized {
super::super::lazy_per_thread_init()?;
initialized = true;
}
p.set((val, initialized));
Ok(prev)
})
}
#[inline(never)] // see module docs for why this is here
pub fn get() -> Ptr {
PTR.with(|p| p.get().0)
}
}
/// Opaque state used to help control TLS state across stack switches for
/// async support.
pub struct TlsRestore(raw::Ptr);
impl TlsRestore {
/// Takes the TLS state that is currently configured and returns a
/// token that is used to replace it later.
///
/// # Safety
///
/// This is not a safe operation since it's intended to only be used
/// with stack switching found with fibers and async wasmer.
pub unsafe fn take() -> Result<TlsRestore, Trap> {
// Our tls pointer must be set at this time, and it must not be
// null. We need to restore the previous pointer since we're
// removing ourselves from the call-stack, and in the process we
// null out our own previous field for safety in case it's
// accidentally used later.
let raw = raw::get();
assert!(!raw.is_null());
let prev = (*raw).prev.replace(ptr::null());
raw::replace(prev)?;
Ok(TlsRestore(raw))
}
/// Restores a previous tls state back into this thread's TLS.
///
/// # Safety
///
/// This is unsafe because it's intended to only be used within the
/// context of stack switching within wasmer.
pub unsafe fn replace(self) -> Result<(), super::Trap> {
// We need to configure our previous TLS pointer to whatever is in
// TLS at this time, and then we set the current state to ourselves.
let prev = raw::get();
assert!((*self.0).prev.get().is_null());
(*self.0).prev.set(prev);
raw::replace(self.0)?;
Ok(())
}
}
/// Configures thread local state such that for the duration of the
/// execution of `closure` any call to `with` will yield `ptr`, unless this
/// is recursively called again.
pub fn set<R>(state: &CallThreadState<'_>, closure: impl FnOnce() -> R) -> Result<R, Trap> {
struct Reset<'a, 'b>(&'a CallThreadState<'b>);
impl Drop for Reset<'_, '_> {
#[inline]
fn drop(&mut self) {
raw::replace(self.0.prev.replace(ptr::null()))
.expect("tls should be previously initialized");
}
}
// Note that this extension of the lifetime to `'static` should be
// safe because we only ever access it below with an anonymous
// lifetime, meaning `'static` never leaks out of this module.
let ptr = unsafe { mem::transmute::<*const CallThreadState<'_>, _>(state) };
let prev = raw::replace(ptr)?;
state.prev.set(prev);
let _reset = Reset(state);
Ok(closure())
}
/// Returns the last pointer configured with `set` above. Panics if `set`
/// has not been previously called and not returned.
pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState<'_>>) -> R) -> R {
let p = raw::get();
unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
}
}
#[cfg(not(unix))]
pub fn lazy_per_thread_init() -> Result<(), Trap> {
// Unused on Windows
Ok(())
}
/// A module for registering a custom alternate signal stack (sigaltstack).
///
/// Rust's libstd installs an alternate stack with size `SIGSTKSZ`, which is not
/// always large enough for our signal handling code. Override it by creating
/// and registering our own alternate stack that is large enough and has a guard
/// page.
#[cfg(unix)]
pub fn lazy_per_thread_init() -> Result<(), Trap> {
use std::cell::RefCell;
use std::ptr::null_mut;
thread_local! {
/// Thread-local state is lazy-initialized on the first time it's used,
/// and dropped when the thread exits.
static TLS: RefCell<Tls> = RefCell::new(Tls::None);
}
/// The size of the sigaltstack (not including the guard, which will be
/// added). Make this large enough to run our signal handlers.
const MIN_STACK_SIZE: usize = 16 * 4096;
enum Tls {
None,
Allocated {
mmap_ptr: *mut libc::c_void,
mmap_size: usize,
},
BigEnough,
}
return TLS.with(|slot| unsafe {
let mut slot = slot.borrow_mut();
match *slot {
Tls::None => {}
// already checked
_ => return Ok(()),
}
// Check to see if the existing sigaltstack, if it exists, is big
// enough. If so we don't need to allocate our own.
let mut old_stack = mem::zeroed();
let r = libc::sigaltstack(ptr::null(), &mut old_stack);
assert_eq!(r, 0, "learning about sigaltstack failed");
if old_stack.ss_flags & libc::SS_DISABLE == 0 && old_stack.ss_size >= MIN_STACK_SIZE {
*slot = Tls::BigEnough;
return Ok(());
}
// ... but failing that we need to allocate our own, so do all that
// here.
let page_size: usize = region::page::size();
let guard_size = page_size;
let alloc_size = guard_size + MIN_STACK_SIZE;
let ptr = libc::mmap(
null_mut(),
alloc_size,
libc::PROT_NONE,
libc::MAP_PRIVATE | libc::MAP_ANON,
-1,
0,
);
if ptr == libc::MAP_FAILED {
return Err(Trap::oom());
}
// Prepare the stack with readable/writable memory and then register it
// with `sigaltstack`.
let stack_ptr = (ptr as usize + guard_size) as *mut libc::c_void;
let r = libc::mprotect(
stack_ptr,
MIN_STACK_SIZE,
libc::PROT_READ | libc::PROT_WRITE,
);
assert_eq!(r, 0, "mprotect to configure memory for sigaltstack failed");
let new_stack = libc::stack_t {
ss_sp: stack_ptr,
ss_flags: 0,
ss_size: MIN_STACK_SIZE,
};
let r = libc::sigaltstack(&new_stack, ptr::null_mut());
assert_eq!(r, 0, "registering new sigaltstack failed");
*slot = Tls::Allocated {
mmap_ptr: ptr,
mmap_size: alloc_size,
};
Ok(())
});
impl Drop for Tls {
fn drop(&mut self) {
let (ptr, size) = match self {
Self::Allocated {
mmap_ptr,
mmap_size,
} => (*mmap_ptr, *mmap_size),
_ => return,
};
unsafe {
// Deallocate the stack memory.
let r = libc::munmap(ptr, size);
debug_assert_eq!(r, 0, "munmap failed during thread shutdown");
}
}
}
}
| {
reset_guard_page();
} |
request_test.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"bytes"
"context"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"strings"
"syscall"
"testing"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/scheme"
restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/client-go/util/flowcontrol"
utiltesting "k8s.io/client-go/util/testing"
)
func TestNewRequestSetsAccept(t *testing.T) {
r := NewRequest(nil, "get", &url.URL{Path: "/path/"}, "", ContentConfig{}, Serializers{}, nil, nil, 0)
if r.headers.Get("Accept") != "" {
t.Errorf("unexpected headers: %#v", r.headers)
}
r = NewRequest(nil, "get", &url.URL{Path: "/path/"}, "", ContentConfig{ContentType: "application/other"}, Serializers{}, nil, nil, 0)
if r.headers.Get("Accept") != "application/other, */*" {
t.Errorf("unexpected headers: %#v", r.headers)
}
}
type clientFunc func(req *http.Request) (*http.Response, error)
func (f clientFunc) Do(req *http.Request) (*http.Response, error) {
return f(req)
}
func TestRequestSetsHeaders(t *testing.T) {
server := clientFunc(func(req *http.Request) (*http.Response, error) {
if req.Header.Get("Accept") != "application/other, */*" {
t.Errorf("unexpected headers: %#v", req.Header)
}
return &http.Response{
StatusCode: http.StatusForbidden,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}, nil
})
config := defaultContentConfig()
config.ContentType = "application/other"
serializers := defaultSerializers(t)
r := NewRequest(server, "get", &url.URL{Path: "/path"}, "", config, serializers, nil, nil, 0)
// Check if all "issue" methods are setting headers.
_ = r.Do()
_, _ = r.Watch()
_, _ = r.Stream()
}
func TestRequestWithErrorWontChange(t *testing.T) {
gvCopy := v1.SchemeGroupVersion
original := Request{
err: errors.New("test"),
content: ContentConfig{GroupVersion: &gvCopy},
}
r := original
changed := r.Param("foo", "bar").
AbsPath("/abs").
Prefix("test").
Suffix("testing").
Namespace("new").
Resource("foos").
Name("bars").
Body("foo").
Timeout(time.Millisecond)
if changed != &r {
t.Errorf("returned request should point to the same object")
}
if !reflect.DeepEqual(changed, &original) {
t.Errorf("expected %#v, got %#v", &original, changed)
}
}
func TestRequestPreservesBaseTrailingSlash(t *testing.T) {
r := &Request{baseURL: &url.URL{}, pathPrefix: "/path/"}
if s := r.URL().String(); s != "/path/" {
t.Errorf("trailing slash should be preserved: %s", s)
}
}
func TestRequestAbsPathPreservesTrailingSlash(t *testing.T) {
r := (&Request{baseURL: &url.URL{}}).AbsPath("/foo/")
if s := r.URL().String(); s != "/foo/" {
t.Errorf("trailing slash should be preserved: %s", s)
}
r = (&Request{baseURL: &url.URL{}}).AbsPath("/foo/")
if s := r.URL().String(); s != "/foo/" {
t.Errorf("trailing slash should be preserved: %s", s)
}
}
func TestRequestAbsPathJoins(t *testing.T) {
r := (&Request{baseURL: &url.URL{}}).AbsPath("foo/bar", "baz")
if s := r.URL().String(); s != "foo/bar/baz" {
t.Errorf("trailing slash should be preserved: %s", s)
}
}
func TestRequestSetsNamespace(t *testing.T) {
r := (&Request{
baseURL: &url.URL{
Path: "/",
},
}).Namespace("foo")
if r.namespace == "" {
t.Errorf("namespace should be set: %#v", r)
}
if s := r.URL().String(); s != "namespaces/foo" {
t.Errorf("namespace should be in path: %s", s)
}
}
func TestRequestOrdersNamespaceInPath(t *testing.T) {
r := (&Request{
baseURL: &url.URL{},
pathPrefix: "/test/",
}).Name("bar").Resource("baz").Namespace("foo")
if s := r.URL().String(); s != "/test/namespaces/foo/baz/bar" {
t.Errorf("namespace should be in order in path: %s", s)
}
}
func TestRequestOrdersSubResource(t *testing.T) {
r := (&Request{
baseURL: &url.URL{},
pathPrefix: "/test/",
}).Name("bar").Resource("baz").Namespace("foo").Suffix("test").SubResource("a", "b")
if s := r.URL().String(); s != "/test/namespaces/foo/baz/bar/a/b/test" {
t.Errorf("namespace should be in order in path: %s", s)
}
}
func TestRequestSetTwiceError(t *testing.T) {
if (&Request{}).Name("bar").Name("baz").err == nil {
t.Errorf("setting name twice should result in error")
}
if (&Request{}).Namespace("bar").Namespace("baz").err == nil {
t.Errorf("setting namespace twice should result in error")
}
if (&Request{}).Resource("bar").Resource("baz").err == nil {
t.Errorf("setting resource twice should result in error")
}
if (&Request{}).SubResource("bar").SubResource("baz").err == nil {
t.Errorf("setting subresource twice should result in error")
}
}
func TestInvalidSegments(t *testing.T) {
invalidSegments := []string{".", "..", "test/segment", "test%2bsegment"}
setters := map[string]func(string, *Request){
"namespace": func(s string, r *Request) { r.Namespace(s) },
"resource": func(s string, r *Request) { r.Resource(s) },
"name": func(s string, r *Request) { r.Name(s) },
"subresource": func(s string, r *Request) { r.SubResource(s) },
}
for _, invalidSegment := range invalidSegments {
for setterName, setter := range setters {
r := &Request{}
setter(invalidSegment, r)
if r.err == nil {
t.Errorf("%s: %s: expected error, got none", setterName, invalidSegment)
}
}
}
}
func TestRequestParam(t *testing.T) {
r := (&Request{}).Param("foo", "a")
if !reflect.DeepEqual(r.params, url.Values{"foo": []string{"a"}}) {
t.Errorf("should have set a param: %#v", r)
}
r.Param("bar", "1")
r.Param("bar", "2")
if !reflect.DeepEqual(r.params, url.Values{"foo": []string{"a"}, "bar": []string{"1", "2"}}) {
t.Errorf("should have set a param: %#v", r)
}
}
func TestRequestVersionedParams(t *testing.T) {
r := (&Request{content: ContentConfig{GroupVersion: &v1.SchemeGroupVersion}}).Param("foo", "a")
if !reflect.DeepEqual(r.params, url.Values{"foo": []string{"a"}}) {
t.Errorf("should have set a param: %#v", r)
}
r.VersionedParams(&v1.PodLogOptions{Follow: true, Container: "bar"}, scheme.ParameterCodec)
if !reflect.DeepEqual(r.params, url.Values{
"foo": []string{"a"},
"container": []string{"bar"},
"follow": []string{"true"},
}) {
t.Errorf("should have set a param: %#v", r)
}
}
func TestRequestVersionedParamsFromListOptions(t *testing.T) {
r := &Request{content: ContentConfig{GroupVersion: &v1.SchemeGroupVersion}}
r.VersionedParams(&metav1.ListOptions{ResourceVersion: "1"}, scheme.ParameterCodec)
if !reflect.DeepEqual(r.params, url.Values{
"resourceVersion": []string{"1"},
}) {
t.Errorf("should have set a param: %#v", r)
}
var timeout int64 = 10
r.VersionedParams(&metav1.ListOptions{ResourceVersion: "2", TimeoutSeconds: &timeout}, scheme.ParameterCodec)
if !reflect.DeepEqual(r.params, url.Values{
"resourceVersion": []string{"1", "2"},
"timeoutSeconds": []string{"10"},
}) {
t.Errorf("should have set a param: %#v %v", r.params, r.err)
}
}
func TestRequestURI(t *testing.T) {
r := (&Request{}).Param("foo", "a")
r.Prefix("other")
r.RequestURI("/test?foo=b&a=b&c=1&c=2")
if r.pathPrefix != "/test" {
t.Errorf("path is wrong: %#v", r)
}
if !reflect.DeepEqual(r.params, url.Values{"a": []string{"b"}, "foo": []string{"b"}, "c": []string{"1", "2"}}) {
t.Errorf("should have set a param: %#v", r)
}
}
type NotAnAPIObject struct{}
func (obj NotAnAPIObject) GroupVersionKind() *schema.GroupVersionKind { return nil }
func (obj NotAnAPIObject) SetGroupVersionKind(gvk *schema.GroupVersionKind) {}
func defaultContentConfig() ContentConfig {
gvCopy := v1.SchemeGroupVersion
return ContentConfig{
ContentType: "application/json",
GroupVersion: &gvCopy,
NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs},
}
}
func defaultSerializers(t *testing.T) Serializers {
config := defaultContentConfig()
serializers, err := createSerializers(config)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
return *serializers
}
func TestRequestBody(t *testing.T) {
// test unknown type
r := (&Request{}).Body([]string{"test"})
if r.err == nil || r.body != nil {
t.Errorf("should have set err and left body nil: %#v", r)
}
// test error set when failing to read file
f, err := ioutil.TempFile("", "test")
if err != nil {
t.Fatalf("unable to create temp file")
}
defer f.Close()
os.Remove(f.Name())
r = (&Request{}).Body(f.Name())
if r.err == nil || r.body != nil {
t.Errorf("should have set err and left body nil: %#v", r)
}
// test unencodable api object
r = (&Request{content: defaultContentConfig()}).Body(&NotAnAPIObject{})
if r.err == nil || r.body != nil {
t.Errorf("should have set err and left body nil: %#v", r)
}
}
func TestResultIntoWithErrReturnsErr(t *testing.T) |
func TestResultIntoWithNoBodyReturnsErr(t *testing.T) {
res := Result{
body: []byte{},
decoder: scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion),
}
if err := res.Into(&v1.Pod{}); err == nil || !strings.Contains(err.Error(), "0-length") {
t.Errorf("should have complained about 0 length body")
}
}
func TestURLTemplate(t *testing.T) {
uri, _ := url.Parse("http://localhost")
r := NewRequest(nil, "POST", uri, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0)
r.Prefix("pre1").Resource("r1").Namespace("ns").Name("nm").Param("p0", "v0")
full := r.URL()
if full.String() != "http://localhost/pre1/namespaces/ns/r1/nm?p0=v0" {
t.Errorf("unexpected initial URL: %s", full)
}
actualURL := r.finalURLTemplate()
actual := actualURL.String()
expected := "http://localhost/pre1/namespaces/%7Bnamespace%7D/r1/%7Bname%7D?p0=%7Bvalue%7D"
if actual != expected {
t.Errorf("unexpected URL template: %s %s", actual, expected)
}
if r.URL().String() != full.String() {
t.Errorf("creating URL template changed request: %s -> %s", full.String(), r.URL().String())
}
}
func TestTransformResponse(t *testing.T) {
invalid := []byte("aaaaa")
uri, _ := url.Parse("http://localhost")
testCases := []struct {
Response *http.Response
Data []byte
Created bool
Error bool
ErrFn func(err error) bool
}{
{Response: &http.Response{StatusCode: 200}, Data: []byte{}},
{Response: &http.Response{StatusCode: 201}, Data: []byte{}, Created: true},
{Response: &http.Response{StatusCode: 199}, Error: true},
{Response: &http.Response{StatusCode: 500}, Error: true},
{Response: &http.Response{StatusCode: 422}, Error: true},
{Response: &http.Response{StatusCode: 409}, Error: true},
{Response: &http.Response{StatusCode: 404}, Error: true},
{Response: &http.Response{StatusCode: 401}, Error: true},
{
Response: &http.Response{
StatusCode: 401,
Header: http.Header{"Content-Type": []string{"application/json"}},
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Error: true,
ErrFn: func(err error) bool {
return err.Error() != "aaaaa" && apierrors.IsUnauthorized(err)
},
},
{
Response: &http.Response{
StatusCode: 401,
Header: http.Header{"Content-Type": []string{"text/any"}},
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Error: true,
ErrFn: func(err error) bool {
return strings.Contains(err.Error(), "server has asked for the client to provide") && apierrors.IsUnauthorized(err)
},
},
{Response: &http.Response{StatusCode: 403}, Error: true},
{Response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(invalid))}, Data: invalid},
{Response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(invalid))}, Data: invalid},
}
for i, test := range testCases {
r := NewRequest(nil, "", uri, "", defaultContentConfig(), defaultSerializers(t), nil, nil, 0)
if test.Response.Body == nil {
test.Response.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
}
result := r.transformResponse(test.Response, &http.Request{})
response, created, err := result.body, result.statusCode == http.StatusCreated, result.err
hasErr := err != nil
if hasErr != test.Error {
t.Errorf("%d: unexpected error: %t %v", i, test.Error, err)
} else if hasErr && test.Response.StatusCode > 399 {
status, ok := err.(apierrors.APIStatus)
if !ok {
t.Errorf("%d: response should have been transformable into APIStatus: %v", i, err)
continue
}
if int(status.Status().Code) != test.Response.StatusCode {
t.Errorf("%d: status code did not match response: %#v", i, status.Status())
}
}
if test.ErrFn != nil && !test.ErrFn(err) {
t.Errorf("%d: error function did not match: %v", i, err)
}
if !(test.Data == nil && response == nil) && !apiequality.Semantic.DeepDerivative(test.Data, response) {
t.Errorf("%d: unexpected response: %#v %#v", i, test.Data, response)
}
if test.Created != created {
t.Errorf("%d: expected created %t, got %t", i, test.Created, created)
}
}
}
type renegotiator struct {
called bool
contentType string
params map[string]string
decoder runtime.Decoder
err error
}
func (r *renegotiator) invoke(contentType string, params map[string]string) (runtime.Decoder, error) {
r.called = true
r.contentType = contentType
r.params = params
return r.decoder, r.err
}
func TestTransformResponseNegotiate(t *testing.T) {
invalid := []byte("aaaaa")
uri, _ := url.Parse("http://localhost")
testCases := []struct {
Response *http.Response
Data []byte
Created bool
Error bool
ErrFn func(err error) bool
ContentType string
Called bool
ExpectContentType string
Decoder runtime.Decoder
NegotiateErr error
}{
{
ContentType: "application/json",
Response: &http.Response{
StatusCode: 401,
Header: http.Header{"Content-Type": []string{"application/json"}},
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Error: true,
ErrFn: func(err error) bool {
return err.Error() != "aaaaa" && apierrors.IsUnauthorized(err)
},
},
{
ContentType: "application/json",
Response: &http.Response{
StatusCode: 401,
Header: http.Header{"Content-Type": []string{"application/protobuf"}},
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Decoder: scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion),
Called: true,
ExpectContentType: "application/protobuf",
Error: true,
ErrFn: func(err error) bool {
return err.Error() != "aaaaa" && apierrors.IsUnauthorized(err)
},
},
{
ContentType: "application/json",
Response: &http.Response{
StatusCode: 500,
Header: http.Header{"Content-Type": []string{"application/,others"}},
},
Decoder: scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion),
Error: true,
ErrFn: func(err error) bool {
return err.Error() == "Internal error occurred: mime: expected token after slash" && err.(apierrors.APIStatus).Status().Code == 500
},
},
{
// no negotiation when no content type specified
Response: &http.Response{
StatusCode: 200,
Header: http.Header{"Content-Type": []string{"text/any"}},
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Decoder: scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion),
},
{
// no negotiation when no response content type specified
ContentType: "text/any",
Response: &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Decoder: scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion),
},
{
// unrecognized content type is not handled
ContentType: "application/json",
Response: &http.Response{
StatusCode: 404,
Header: http.Header{"Content-Type": []string{"application/unrecognized"}},
Body: ioutil.NopCloser(bytes.NewReader(invalid)),
},
Decoder: scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion),
NegotiateErr: fmt.Errorf("aaaa"),
Called: true,
ExpectContentType: "application/unrecognized",
Error: true,
ErrFn: func(err error) bool {
return err.Error() != "aaaaa" && apierrors.IsNotFound(err)
},
},
}
for i, test := range testCases {
serializers := defaultSerializers(t)
negotiator := &renegotiator{
decoder: test.Decoder,
err: test.NegotiateErr,
}
serializers.RenegotiatedDecoder = negotiator.invoke
contentConfig := defaultContentConfig()
contentConfig.ContentType = test.ContentType
r := NewRequest(nil, "", uri, "", contentConfig, serializers, nil, nil, 0)
if test.Response.Body == nil {
test.Response.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
}
result := r.transformResponse(test.Response, &http.Request{})
_, err := result.body, result.err
hasErr := err != nil
if hasErr != test.Error {
t.Errorf("%d: unexpected error: %t %v", i, test.Error, err)
continue
} else if hasErr && test.Response.StatusCode > 399 {
status, ok := err.(apierrors.APIStatus)
if !ok {
t.Errorf("%d: response should have been transformable into APIStatus: %v", i, err)
continue
}
if int(status.Status().Code) != test.Response.StatusCode {
t.Errorf("%d: status code did not match response: %#v", i, status.Status())
}
}
if test.ErrFn != nil && !test.ErrFn(err) {
t.Errorf("%d: error function did not match: %v", i, err)
}
if negotiator.called != test.Called {
t.Errorf("%d: negotiator called %t != %t", i, negotiator.called, test.Called)
}
if !test.Called {
continue
}
if negotiator.contentType != test.ExpectContentType {
t.Errorf("%d: unexpected content type: %s", i, negotiator.contentType)
}
}
}
func TestTransformUnstructuredError(t *testing.T) {
testCases := []struct {
Req *http.Request
Res *http.Response
Resource string
Name string
ErrFn func(error) bool
Transformed error
}{
{
Resource: "foo",
Name: "bar",
Req: &http.Request{
Method: "POST",
},
Res: &http.Response{
StatusCode: http.StatusConflict,
Body: ioutil.NopCloser(bytes.NewReader(nil)),
},
ErrFn: apierrors.IsAlreadyExists,
},
{
Resource: "foo",
Name: "bar",
Req: &http.Request{
Method: "PUT",
},
Res: &http.Response{
StatusCode: http.StatusConflict,
Body: ioutil.NopCloser(bytes.NewReader(nil)),
},
ErrFn: apierrors.IsConflict,
},
{
Resource: "foo",
Name: "bar",
Req: &http.Request{},
Res: &http.Response{
StatusCode: http.StatusNotFound,
Body: ioutil.NopCloser(bytes.NewReader(nil)),
},
ErrFn: apierrors.IsNotFound,
},
{
Req: &http.Request{},
Res: &http.Response{
StatusCode: http.StatusBadRequest,
Body: ioutil.NopCloser(bytes.NewReader(nil)),
},
ErrFn: apierrors.IsBadRequest,
},
{
// status in response overrides transformed result
Req: &http.Request{},
Res: &http.Response{StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"kind":"Status","apiVersion":"v1","status":"Failure","code":404}`)))},
ErrFn: apierrors.IsBadRequest,
Transformed: &apierrors.StatusError{
ErrStatus: metav1.Status{Status: metav1.StatusFailure, Code: http.StatusNotFound},
},
},
{
// successful status is ignored
Req: &http.Request{},
Res: &http.Response{StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"kind":"Status","apiVersion":"v1","status":"Success","code":404}`)))},
ErrFn: apierrors.IsBadRequest,
},
{
// empty object does not change result
Req: &http.Request{},
Res: &http.Response{StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(bytes.NewReader([]byte(`{}`)))},
ErrFn: apierrors.IsBadRequest,
},
{
// we default apiVersion for backwards compatibility with old clients
// TODO: potentially remove in 1.7
Req: &http.Request{},
Res: &http.Response{StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"kind":"Status","status":"Failure","code":404}`)))},
ErrFn: apierrors.IsBadRequest,
Transformed: &apierrors.StatusError{
ErrStatus: metav1.Status{Status: metav1.StatusFailure, Code: http.StatusNotFound},
},
},
{
// we do not default kind
Req: &http.Request{},
Res: &http.Response{StatusCode: http.StatusBadRequest, Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"status":"Failure","code":404}`)))},
ErrFn: apierrors.IsBadRequest,
},
}
for i, testCase := range testCases {
r := &Request{
content: defaultContentConfig(),
serializers: defaultSerializers(t),
resourceName: testCase.Name,
resource: testCase.Resource,
}
result := r.transformResponse(testCase.Res, testCase.Req)
err := result.err
if !testCase.ErrFn(err) {
t.Errorf("unexpected error: %v", err)
continue
}
if !apierrors.IsUnexpectedServerError(err) {
t.Errorf("%d: unexpected error type: %v", i, err)
}
if len(testCase.Name) != 0 && !strings.Contains(err.Error(), testCase.Name) {
t.Errorf("unexpected error string: %s", err)
}
if len(testCase.Resource) != 0 && !strings.Contains(err.Error(), testCase.Resource) {
t.Errorf("unexpected error string: %s", err)
}
// verify Error() properly transforms the error
transformed := result.Error()
expect := testCase.Transformed
if expect == nil {
expect = err
}
if !reflect.DeepEqual(expect, transformed) {
t.Errorf("%d: unexpected Error(): %s", i, diff.ObjectReflectDiff(expect, transformed))
}
// verify result.Get properly transforms the error
if _, err := result.Get(); !reflect.DeepEqual(expect, err) {
t.Errorf("%d: unexpected error on Get(): %s", i, diff.ObjectReflectDiff(expect, err))
}
// verify result.Into properly handles the error
if err := result.Into(&v1.Pod{}); !reflect.DeepEqual(expect, err) {
t.Errorf("%d: unexpected error on Into(): %s", i, diff.ObjectReflectDiff(expect, err))
}
// verify result.Raw leaves the error in the untransformed state
if _, err := result.Raw(); !reflect.DeepEqual(result.err, err) {
t.Errorf("%d: unexpected error on Raw(): %s", i, diff.ObjectReflectDiff(expect, err))
}
}
}
func TestRequestWatch(t *testing.T) {
testCases := []struct {
Request *Request
Err bool
ErrFn func(error) bool
Empty bool
}{
{
Request: &Request{err: errors.New("bail")},
Err: true,
},
{
Request: &Request{baseURL: &url.URL{}, pathPrefix: "%"},
Err: true,
},
{
Request: &Request{
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, errors.New("err")
}),
baseURL: &url.URL{},
},
Err: true,
},
{
Request: &Request{
content: defaultContentConfig(),
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusForbidden,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}, nil
}),
baseURL: &url.URL{},
},
Err: true,
ErrFn: func(err error) bool {
return apierrors.IsForbidden(err)
},
},
{
Request: &Request{
content: defaultContentConfig(),
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusUnauthorized,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}, nil
}),
baseURL: &url.URL{},
},
Err: true,
ErrFn: func(err error) bool {
return apierrors.IsUnauthorized(err)
},
},
{
Request: &Request{
content: defaultContentConfig(),
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusUnauthorized,
Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &metav1.Status{
Status: metav1.StatusFailure,
Reason: metav1.StatusReasonUnauthorized,
})))),
}, nil
}),
baseURL: &url.URL{},
},
Err: true,
ErrFn: func(err error) bool {
return apierrors.IsUnauthorized(err)
},
},
{
Request: &Request{
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, io.EOF
}),
baseURL: &url.URL{},
},
Empty: true,
},
{
Request: &Request{
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, &url.Error{Err: io.EOF}
}),
baseURL: &url.URL{},
},
Empty: true,
},
{
Request: &Request{
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, errors.New("http: can't write HTTP request on broken connection")
}),
baseURL: &url.URL{},
},
Empty: true,
},
{
Request: &Request{
serializers: defaultSerializers(t),
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, errors.New("foo: connection reset by peer")
}),
baseURL: &url.URL{},
},
Empty: true,
},
}
for i, testCase := range testCases {
t.Logf("testcase %v", testCase.Request)
testCase.Request.backoffMgr = &NoBackoff{}
watch, err := testCase.Request.Watch()
hasErr := err != nil
if hasErr != testCase.Err {
t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, err)
continue
}
if testCase.ErrFn != nil && !testCase.ErrFn(err) {
t.Errorf("%d: error not valid: %v", i, err)
}
if hasErr && watch != nil {
t.Errorf("%d: watch should be nil when error is returned", i)
continue
}
if testCase.Empty {
_, ok := <-watch.ResultChan()
if ok {
t.Errorf("%d: expected the watch to be empty: %#v", i, watch)
}
}
}
}
func TestRequestStream(t *testing.T) {
testCases := []struct {
Request *Request
Err bool
ErrFn func(error) bool
}{
{
Request: &Request{err: errors.New("bail")},
Err: true,
},
{
Request: &Request{baseURL: &url.URL{}, pathPrefix: "%"},
Err: true,
},
{
Request: &Request{
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, errors.New("err")
}),
baseURL: &url.URL{},
},
Err: true,
},
{
Request: &Request{
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusUnauthorized,
Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), &metav1.Status{
Status: metav1.StatusFailure,
Reason: metav1.StatusReasonUnauthorized,
})))),
}, nil
}),
content: defaultContentConfig(),
serializers: defaultSerializers(t),
baseURL: &url.URL{},
},
Err: true,
},
{
Request: &Request{
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusBadRequest,
Body: ioutil.NopCloser(bytes.NewReader([]byte(`{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"a container name must be specified for pod kube-dns-v20-mz5cv, choose one of: [kubedns dnsmasq healthz]","reason":"BadRequest","code":400}`))),
}, nil
}),
content: defaultContentConfig(),
serializers: defaultSerializers(t),
baseURL: &url.URL{},
},
Err: true,
ErrFn: func(err error) bool {
if err.Error() == "a container name must be specified for pod kube-dns-v20-mz5cv, choose one of: [kubedns dnsmasq healthz]" {
return true
}
return false
},
},
}
for i, testCase := range testCases {
testCase.Request.backoffMgr = &NoBackoff{}
body, err := testCase.Request.Stream()
hasErr := err != nil
if hasErr != testCase.Err {
t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, err)
}
if hasErr && body != nil {
t.Errorf("%d: body should be nil when error is returned", i)
}
if hasErr {
if testCase.ErrFn != nil && !testCase.ErrFn(err) {
t.Errorf("unexpected error: %v", err)
}
}
}
}
type fakeUpgradeConnection struct{}
func (c *fakeUpgradeConnection) CreateStream(headers http.Header) (httpstream.Stream, error) {
return nil, nil
}
func (c *fakeUpgradeConnection) Close() error {
return nil
}
func (c *fakeUpgradeConnection) CloseChan() <-chan bool {
return make(chan bool)
}
func (c *fakeUpgradeConnection) SetIdleTimeout(timeout time.Duration) {
}
type fakeUpgradeRoundTripper struct {
req *http.Request
conn httpstream.Connection
}
func (f *fakeUpgradeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
f.req = req
b := []byte{}
body := ioutil.NopCloser(bytes.NewReader(b))
resp := &http.Response{
StatusCode: 101,
Body: body,
}
return resp, nil
}
func (f *fakeUpgradeRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
return f.conn, nil
}
func TestRequestDo(t *testing.T) {
testCases := []struct {
Request *Request
Err bool
}{
{
Request: &Request{err: errors.New("bail")},
Err: true,
},
{
Request: &Request{baseURL: &url.URL{}, pathPrefix: "%"},
Err: true,
},
{
Request: &Request{
client: clientFunc(func(req *http.Request) (*http.Response, error) {
return nil, errors.New("err")
}),
baseURL: &url.URL{},
},
Err: true,
},
}
for i, testCase := range testCases {
testCase.Request.backoffMgr = &NoBackoff{}
body, err := testCase.Request.Do().Raw()
hasErr := err != nil
if hasErr != testCase.Err {
t.Errorf("%d: expected %t, got %t: %v", i, testCase.Err, hasErr, err)
}
if hasErr && body != nil {
t.Errorf("%d: body should be nil when error is returned", i)
}
}
}
func TestDoRequestNewWay(t *testing.T) {
reqBody := "request body"
expectedObj := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{
Protocol: "TCP",
Port: 12345,
TargetPort: intstr.FromInt(12345),
}}}}
expectedBody, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expectedObj)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(expectedBody),
T: t,
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
c := testRESTClient(t, testServer)
obj, err := c.Verb("POST").
Prefix("foo", "bar").
Suffix("baz").
Timeout(time.Second).
Body([]byte(reqBody)).
Do().Get()
if err != nil {
t.Errorf("Unexpected error: %v %#v", err, err)
return
}
if obj == nil {
t.Error("nil obj")
} else if !apiequality.Semantic.DeepDerivative(expectedObj, obj) {
t.Errorf("Expected: %#v, got %#v", expectedObj, obj)
}
requestURL := defaultResourcePathWithPrefix("foo/bar", "", "", "baz")
requestURL += "?timeout=1s"
fakeHandler.ValidateRequest(t, requestURL, "POST", &reqBody)
}
// This test assumes that the client implementation backs off exponentially, for an individual request.
func TestBackoffLifecycle(t *testing.T) {
count := 0
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
count++
t.Logf("Attempt %d", count)
if count == 5 || count == 9 {
w.WriteHeader(http.StatusOK)
return
} else {
w.WriteHeader(http.StatusGatewayTimeout)
return
}
}))
defer testServer.Close()
c := testRESTClient(t, testServer)
// Test backoff recovery and increase. This correlates to the constants
// which are used in the server implementation returning StatusOK above.
seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0}
request := c.Verb("POST").Prefix("backofftest").Suffix("abc")
clock := clock.FakeClock{}
request.backoffMgr = &URLBackoff{
// Use a fake backoff here to avoid flakes and speed the test up.
Backoff: flowcontrol.NewFakeBackOff(
time.Duration(1)*time.Second,
time.Duration(200)*time.Second,
&clock,
)}
for _, sec := range seconds {
thisBackoff := request.backoffMgr.CalculateBackoff(request.URL())
t.Logf("Current backoff %v", thisBackoff)
if thisBackoff != time.Duration(sec)*time.Second {
t.Errorf("Backoff is %v instead of %v", thisBackoff, sec)
}
now := clock.Now()
request.DoRaw()
elapsed := clock.Since(now)
if clock.Since(now) != thisBackoff {
t.Errorf("CalculatedBackoff not honored by clock: Expected time of %v, but got %v ", thisBackoff, elapsed)
}
}
}
type testBackoffManager struct {
sleeps []time.Duration
}
func (b *testBackoffManager) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
}
func (b *testBackoffManager) CalculateBackoff(actualUrl *url.URL) time.Duration {
return time.Duration(0)
}
func (b *testBackoffManager) Sleep(d time.Duration) {
b.sleeps = append(b.sleeps, d)
}
func TestCheckRetryClosesBody(t *testing.T) {
count := 0
ch := make(chan struct{})
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
count++
t.Logf("attempt %d", count)
if count >= 5 {
w.WriteHeader(http.StatusOK)
close(ch)
return
}
w.Header().Set("Retry-After", "1")
http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests)
}))
defer testServer.Close()
backoffMgr := &testBackoffManager{}
expectedSleeps := []time.Duration{0, time.Second, 0, time.Second, 0, time.Second, 0, time.Second, 0}
c := testRESTClient(t, testServer)
c.createBackoffMgr = func() BackoffManager { return backoffMgr }
_, err := c.Verb("POST").
Prefix("foo", "bar").
Suffix("baz").
Timeout(time.Second).
Body([]byte(strings.Repeat("abcd", 1000))).
DoRaw()
if err != nil {
t.Fatalf("Unexpected error: %v %#v", err, err)
}
<-ch
if count != 5 {
t.Errorf("unexpected retries: %d", count)
}
if !reflect.DeepEqual(backoffMgr.sleeps, expectedSleeps) {
t.Errorf("unexpected sleeps, expected: %v, got: %v", expectedSleeps, backoffMgr.sleeps)
}
}
func TestConnectionResetByPeerIsRetried(t *testing.T) {
count := 0
backoff := &testBackoffManager{}
req := &Request{
verb: "GET",
client: clientFunc(func(req *http.Request) (*http.Response, error) {
count++
if count >= 3 {
return &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}, nil
}
return nil, &net.OpError{Err: syscall.ECONNRESET}
}),
backoffMgr: backoff,
}
// We expect two retries of "connection reset by peer" and the success.
_, err := req.Do().Raw()
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
// We have a sleep before each retry (including the initial one) and for
// every "retry-after" call - thus 5 together.
if len(backoff.sleeps) != 5 {
t.Errorf("Expected 5 retries, got: %d", len(backoff.sleeps))
}
}
func TestCheckRetryHandles429And5xx(t *testing.T) {
count := 0
ch := make(chan struct{})
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
data, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatalf("unable to read request body: %v", err)
}
if !bytes.Equal(data, []byte(strings.Repeat("abcd", 1000))) {
t.Fatalf("retry did not send a complete body: %s", data)
}
t.Logf("attempt %d", count)
if count >= 4 {
w.WriteHeader(http.StatusOK)
close(ch)
return
}
w.Header().Set("Retry-After", "0")
w.WriteHeader([]int{http.StatusTooManyRequests, 500, 501, 504}[count])
count++
}))
defer testServer.Close()
c := testRESTClient(t, testServer)
_, err := c.Verb("POST").
Prefix("foo", "bar").
Suffix("baz").
Timeout(time.Second).
Body([]byte(strings.Repeat("abcd", 1000))).
DoRaw()
if err != nil {
t.Fatalf("Unexpected error: %v %#v", err, err)
}
<-ch
if count != 4 {
t.Errorf("unexpected retries: %d", count)
}
}
func BenchmarkCheckRetryClosesBody(b *testing.B) {
count := 0
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
count++
if count%3 == 0 {
w.WriteHeader(http.StatusOK)
return
}
w.Header().Set("Retry-After", "0")
w.WriteHeader(http.StatusTooManyRequests)
}))
defer testServer.Close()
c := testRESTClient(b, testServer)
r := c.Verb("POST").
Prefix("foo", "bar").
Suffix("baz").
Timeout(time.Second).
Body([]byte(strings.Repeat("abcd", 1000)))
for i := 0; i < b.N; i++ {
if _, err := r.DoRaw(); err != nil {
b.Fatalf("Unexpected error: %v %#v", err, err)
}
}
}
func TestDoRequestNewWayReader(t *testing.T) {
reqObj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
reqBodyExpected, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), reqObj)
expectedObj := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{
Protocol: "TCP",
Port: 12345,
TargetPort: intstr.FromInt(12345),
}}}}
expectedBody, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expectedObj)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(expectedBody),
T: t,
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
c := testRESTClient(t, testServer)
obj, err := c.Verb("POST").
Resource("bar").
Name("baz").
Prefix("foo").
Timeout(time.Second).
Body(bytes.NewBuffer(reqBodyExpected)).
Do().Get()
if err != nil {
t.Errorf("Unexpected error: %v %#v", err, err)
return
}
if obj == nil {
t.Error("nil obj")
} else if !apiequality.Semantic.DeepDerivative(expectedObj, obj) {
t.Errorf("Expected: %#v, got %#v", expectedObj, obj)
}
tmpStr := string(reqBodyExpected)
requestURL := defaultResourcePathWithPrefix("foo", "bar", "", "baz")
requestURL += "?timeout=1s"
fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr)
}
func TestDoRequestNewWayObj(t *testing.T) {
reqObj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
reqBodyExpected, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), reqObj)
expectedObj := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{
Protocol: "TCP",
Port: 12345,
TargetPort: intstr.FromInt(12345),
}}}}
expectedBody, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expectedObj)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(expectedBody),
T: t,
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
c := testRESTClient(t, testServer)
obj, err := c.Verb("POST").
Suffix("baz").
Name("bar").
Resource("foo").
Timeout(time.Second).
Body(reqObj).
Do().Get()
if err != nil {
t.Errorf("Unexpected error: %v %#v", err, err)
return
}
if obj == nil {
t.Error("nil obj")
} else if !apiequality.Semantic.DeepDerivative(expectedObj, obj) {
t.Errorf("Expected: %#v, got %#v", expectedObj, obj)
}
tmpStr := string(reqBodyExpected)
requestURL := defaultResourcePathWithPrefix("", "foo", "", "bar/baz")
requestURL += "?timeout=1s"
fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr)
}
func TestDoRequestNewWayFile(t *testing.T) {
reqObj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
reqBodyExpected, err := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), reqObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
file, err := ioutil.TempFile("", "foo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer file.Close()
defer os.Remove(file.Name())
_, err = file.Write(reqBodyExpected)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
expectedObj := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{
Protocol: "TCP",
Port: 12345,
TargetPort: intstr.FromInt(12345),
}}}}
expectedBody, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expectedObj)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(expectedBody),
T: t,
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
c := testRESTClient(t, testServer)
wasCreated := true
obj, err := c.Verb("POST").
Prefix("foo/bar", "baz").
Timeout(time.Second).
Body(file.Name()).
Do().WasCreated(&wasCreated).Get()
if err != nil {
t.Errorf("Unexpected error: %v %#v", err, err)
return
}
if obj == nil {
t.Error("nil obj")
} else if !apiequality.Semantic.DeepDerivative(expectedObj, obj) {
t.Errorf("Expected: %#v, got %#v", expectedObj, obj)
}
if wasCreated {
t.Errorf("expected object was created")
}
tmpStr := string(reqBodyExpected)
requestURL := defaultResourcePathWithPrefix("foo/bar/baz", "", "", "")
requestURL += "?timeout=1s"
fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr)
}
func TestWasCreated(t *testing.T) {
reqObj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
reqBodyExpected, err := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), reqObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
expectedObj := &v1.Service{Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{
Protocol: "TCP",
Port: 12345,
TargetPort: intstr.FromInt(12345),
}}}}
expectedBody, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expectedObj)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 201,
ResponseBody: string(expectedBody),
T: t,
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
c := testRESTClient(t, testServer)
wasCreated := false
obj, err := c.Verb("PUT").
Prefix("foo/bar", "baz").
Timeout(time.Second).
Body(reqBodyExpected).
Do().WasCreated(&wasCreated).Get()
if err != nil {
t.Errorf("Unexpected error: %v %#v", err, err)
return
}
if obj == nil {
t.Error("nil obj")
} else if !apiequality.Semantic.DeepDerivative(expectedObj, obj) {
t.Errorf("Expected: %#v, got %#v", expectedObj, obj)
}
if !wasCreated {
t.Errorf("Expected object was created")
}
tmpStr := string(reqBodyExpected)
requestURL := defaultResourcePathWithPrefix("foo/bar/baz", "", "", "")
requestURL += "?timeout=1s"
fakeHandler.ValidateRequest(t, requestURL, "PUT", &tmpStr)
}
func TestVerbs(t *testing.T) {
c := testRESTClient(t, nil)
if r := c.Post(); r.verb != "POST" {
t.Errorf("Post verb is wrong")
}
if r := c.Put(); r.verb != "PUT" {
t.Errorf("Put verb is wrong")
}
if r := c.Get(); r.verb != "GET" {
t.Errorf("Get verb is wrong")
}
if r := c.Delete(); r.verb != "DELETE" {
t.Errorf("Delete verb is wrong")
}
}
func TestAbsPath(t *testing.T) {
for i, tc := range []struct {
configPrefix string
resourcePrefix string
absPath string
wantsAbsPath string
}{
{"/", "", "", "/"},
{"", "", "/", "/"},
{"", "", "/api", "/api"},
{"", "", "/api/", "/api/"},
{"", "", "/apis", "/apis"},
{"", "/foo", "/bar/foo", "/bar/foo"},
{"", "/api/foo/123", "/bar/foo", "/bar/foo"},
{"/p1", "", "", "/p1"},
{"/p1", "", "/", "/p1/"},
{"/p1", "", "/api", "/p1/api"},
{"/p1", "", "/apis", "/p1/apis"},
{"/p1", "/r1", "/apis", "/p1/apis"},
{"/p1", "/api/r1", "/apis", "/p1/apis"},
{"/p1/api/p2", "", "", "/p1/api/p2"},
{"/p1/api/p2", "", "/", "/p1/api/p2/"},
{"/p1/api/p2", "", "/api", "/p1/api/p2/api"},
{"/p1/api/p2", "", "/api/", "/p1/api/p2/api/"},
{"/p1/api/p2", "/r1", "/api/", "/p1/api/p2/api/"},
{"/p1/api/p2", "/api/r1", "/api/", "/p1/api/p2/api/"},
} {
u, _ := url.Parse("http://localhost:123" + tc.configPrefix)
r := NewRequest(nil, "POST", u, "", ContentConfig{GroupVersion: &schema.GroupVersion{Group: "test"}}, Serializers{}, nil, nil, 0).Prefix(tc.resourcePrefix).AbsPath(tc.absPath)
if r.pathPrefix != tc.wantsAbsPath {
t.Errorf("test case %d failed, unexpected path: %q, expected %q", i, r.pathPrefix, tc.wantsAbsPath)
}
}
}
func TestUnacceptableParamNames(t *testing.T) {
table := []struct {
name string
testVal string
expectSuccess bool
}{
// timeout is no longer "protected"
{"timeout", "42", true},
}
for _, item := range table {
c := testRESTClient(t, nil)
r := c.Get().setParam(item.name, item.testVal)
if e, a := item.expectSuccess, r.err == nil; e != a {
t.Errorf("expected %v, got %v (%v)", e, a, r.err)
}
}
}
func TestBody(t *testing.T) {
const data = "test payload"
obj := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
bodyExpected, _ := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), obj)
f, err := ioutil.TempFile("", "test_body")
if err != nil {
t.Fatalf("TempFile error: %v", err)
}
if _, err := f.WriteString(data); err != nil {
t.Fatalf("TempFile.WriteString error: %v", err)
}
f.Close()
defer os.Remove(f.Name())
var nilObject *metav1.DeleteOptions
typedObject := interface{}(nilObject)
c := testRESTClient(t, nil)
tests := []struct {
input interface{}
expected string
headers map[string]string
}{
{[]byte(data), data, nil},
{f.Name(), data, nil},
{strings.NewReader(data), data, nil},
{obj, string(bodyExpected), map[string]string{"Content-Type": "application/json"}},
{typedObject, "", nil},
}
for i, tt := range tests {
r := c.Post().Body(tt.input)
if r.err != nil {
t.Errorf("%d: r.Body(%#v) error: %v", i, tt, r.err)
continue
}
if tt.headers != nil {
for k, v := range tt.headers {
if r.headers.Get(k) != v {
t.Errorf("%d: r.headers[%q] = %q; want %q", i, k, v, v)
}
}
}
if r.body == nil {
if len(tt.expected) != 0 {
t.Errorf("%d: r.body = %q; want %q", i, r.body, tt.expected)
}
continue
}
buf := make([]byte, len(tt.expected))
if _, err := r.body.Read(buf); err != nil {
t.Errorf("%d: r.body.Read error: %v", i, err)
continue
}
body := string(buf)
if body != tt.expected {
t.Errorf("%d: r.body = %q; want %q", i, body, tt.expected)
}
}
}
func TestWatch(t *testing.T) {
var table = []struct {
t watch.EventType
obj runtime.Object
}{
{watch.Added, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "first"}}},
{watch.Modified, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "second"}}},
{watch.Deleted, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "last"}}},
}
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
if !ok {
panic("need flusher!")
}
w.Header().Set("Transfer-Encoding", "chunked")
w.WriteHeader(http.StatusOK)
flusher.Flush()
encoder := restclientwatch.NewEncoder(streaming.NewEncoder(w, scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion)), scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion))
for _, item := range table {
if err := encoder.Encode(&watch.Event{Type: item.t, Object: item.obj}); err != nil {
panic(err)
}
flusher.Flush()
}
}))
defer testServer.Close()
s := testRESTClient(t, testServer)
watching, err := s.Get().Prefix("path/to/watch/thing").Watch()
if err != nil {
t.Fatalf("Unexpected error")
}
for _, item := range table {
got, ok := <-watching.ResultChan()
if !ok {
t.Fatalf("Unexpected early close")
}
if e, a := item.t, got.Type; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
if e, a := item.obj, got.Object; !apiequality.Semantic.DeepDerivative(e, a) {
t.Errorf("Expected %v, got %v", e, a)
}
}
_, ok := <-watching.ResultChan()
if ok {
t.Fatal("Unexpected non-close")
}
}
func TestStream(t *testing.T) {
expectedBody := "expected body"
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
flusher, ok := w.(http.Flusher)
if !ok {
panic("need flusher!")
}
w.Header().Set("Transfer-Encoding", "chunked")
w.WriteHeader(http.StatusOK)
w.Write([]byte(expectedBody))
flusher.Flush()
}))
defer testServer.Close()
s := testRESTClient(t, testServer)
readCloser, err := s.Get().Prefix("path/to/stream/thing").Stream()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer readCloser.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(readCloser)
resultBody := buf.String()
if expectedBody != resultBody {
t.Errorf("Expected %s, got %s", expectedBody, resultBody)
}
}
func testRESTClient(t testing.TB, srv *httptest.Server) *RESTClient {
baseURL, _ := url.Parse("http://localhost")
if srv != nil {
var err error
baseURL, err = url.Parse(srv.URL)
if err != nil {
t.Fatalf("failed to parse test URL: %v", err)
}
}
versionedAPIPath := defaultResourcePathWithPrefix("", "", "", "")
client, err := NewRESTClient(baseURL, versionedAPIPath, defaultContentConfig(), 0, 0, nil, nil)
if err != nil {
t.Fatalf("failed to create a client: %v", err)
}
return client
}
func TestDoContext(t *testing.T) {
receivedCh := make(chan struct{})
block := make(chan struct{})
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
close(receivedCh)
<-block
w.WriteHeader(http.StatusOK)
}))
defer testServer.Close()
defer close(block)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
<-receivedCh
cancel()
}()
c := testRESTClient(t, testServer)
_, err := c.Verb("GET").
Context(ctx).
Prefix("foo").
DoRaw()
if err == nil {
t.Fatal("Expected context cancellation error")
}
}
func buildString(length int) string {
s := make([]byte, length)
for i := range s {
s[i] = 'a'
}
return string(s)
}
func TestTruncateBody(t *testing.T) {
tests := []struct {
body string
want string
level string
}{
// Anything below 8 is completely truncated
{
body: "Completely truncated below 8",
want: " [truncated 28 chars]",
level: "0",
},
// Small strings are not truncated by high levels
{
body: "Small body never gets truncated",
want: "Small body never gets truncated",
level: "10",
},
{
body: "Small body never gets truncated",
want: "Small body never gets truncated",
level: "8",
},
// Strings are truncated to 1024 if level is less than 9.
{
body: buildString(2000),
level: "8",
want: fmt.Sprintf("%s [truncated 976 chars]", buildString(1024)),
},
// Strings are truncated to 10240 if level is 9.
{
body: buildString(20000),
level: "9",
want: fmt.Sprintf("%s [truncated 9760 chars]", buildString(10240)),
},
// Strings are not truncated if level is 10 or higher
{
body: buildString(20000),
level: "10",
want: buildString(20000),
},
// Strings are not truncated if level is 10 or higher
{
body: buildString(20000),
level: "11",
want: buildString(20000),
},
}
l := flag.Lookup("v").Value.(flag.Getter).Get().(glog.Level)
for _, test := range tests {
flag.Set("v", test.level)
got := truncateBody(test.body)
if got != test.want {
t.Errorf("truncateBody(%v) = %v, want %v", test.body, got, test.want)
}
}
flag.Set("v", l.String())
}
func defaultResourcePathWithPrefix(prefix, resource, namespace, name string) string {
var path string
path = "/api/" + v1.SchemeGroupVersion.Version
if prefix != "" {
path = path + "/" + prefix
}
if namespace != "" {
path = path + "/namespaces/" + namespace
}
// Resource names are lower case.
resource = strings.ToLower(resource)
if resource != "" {
path = path + "/" + resource
}
if name != "" {
path = path + "/" + name
}
return path
}
| {
res := Result{err: errors.New("test")}
if err := res.Into(&v1.Pod{}); err != res.err {
t.Errorf("should have returned exact error from result")
}
} |
navigates_galaxy.py | """A mixing that extends a HasDriver class with Galaxy-specific utilities.
Implementer must provide a self.build_url method to target Galaxy.
"""
from __future__ import print_function
import contextlib
import random
import string
import time
from functools import partial, wraps
import requests
import yaml
from .data import NAVIGATION_DATA
from .has_driver import exception_indicates_stale_element, HasDriver
from . import sizzle
# Test case data
DEFAULT_PASSWORD = '123456'
class NullTourCallback(object):
def handle_step(self, step, step_index):
pass
def retry_call_during_transitions(f, attempts=5, sleep=.1):
previous_attempts = 0
while True:
try:
return f()
except Exception as e:
if previous_attempts > attempts:
raise
if not exception_indicates_stale_element(e):
raise
time.sleep(sleep)
previous_attempts += 1
def retry_during_transitions(f, attempts=5, sleep=.1):
@wraps(f)
def _retry(*args, **kwds):
retry_call_during_transitions(partial(f, *args, **kwds), attempts=attempts, sleep=sleep)
return _retry
class NavigatesGalaxy(HasDriver):
default_password = DEFAULT_PASSWORD
def get(self, url=""):
full_url = self.build_url(url)
return self.driver.get(full_url)
@property
def navigation_data(self):
return NAVIGATION_DATA
def home(self):
self.get()
self.wait_for_selector_visible("#masthead")
self.wait_for_selector_visible("#current-history-panel")
def switch_to_main_panel(self):
self.driver.switch_to.frame(self.navigation_data["selectors"]["frames"]["main"])
@contextlib.contextmanager
def main_panel(self):
try:
self.switch_to_main_panel()
yield
finally:
self.driver.switch_to.default_content
def api_get(self, endpoint, data={}, raw=False):
full_url = self.build_url("api/" + endpoint, for_selenium=False)
response = requests.get(full_url, data=data, cookies=self.selenium_to_requests_cookies())
if raw:
return response
else:
return response.json()
def get_galaxy_session(self):
for cookie in self.driver.get_cookies():
if cookie["name"] == "galaxysession":
return cookie["value"]
def selenium_to_requests_cookies(self):
return {
'galaxysession': self.get_galaxy_session()
}
def history_panel_name_element(self):
name_selector = self.test_data["historyPanel"]["selectors"]["history"]["name"]
return self.wait_for_selector(name_selector)
def current_history(self):
history = self.api_get("histories")[0]
return history
def current_history_id(self):
return self.current_history()["id"]
def current_history_contents(self):
current_history_id = self.current_history_id()
history_contents = self.api_get("histories/%s/contents" % current_history_id)
return history_contents
def latest_history_item(self):
history_contents = self.current_history_contents()
assert len(history_contents) > 0
return history_contents[-1]
def wait_for_history(self, timeout=30, assert_ok=True):
def history_becomes_terminal(driver):
current_history_id = self.current_history_id()
state = self.api_get("histories/%s" % current_history_id)["state"]
if state not in ["running", "queued", "new", "ready"]:
return state
else:
return None
final_state = self.wait(timeout).until(history_becomes_terminal)
if assert_ok:
assert final_state == "ok", final_state
return final_state
def history_panel_wait_for_hid_ok(self, hid, timeout=60):
self.history_panel_wait_for_hid_state(hid, 'ok', timeout=timeout)
def history_panel_wait_for_hid_visible(self, hid, timeout=60):
current_history_id = self.current_history_id()
def history_has_hid(driver):
contents = self.api_get("histories/%s/contents" % current_history_id)
return any([d for d in contents if d["hid"] == hid])
self.wait(timeout).until(history_has_hid)
contents = self.api_get("histories/%s/contents" % current_history_id)
history_item = [d for d in contents if d["hid"] == hid][0]
history_item_selector = "#%s-%s" % (history_item["history_content_type"], history_item["id"])
self.wait_for_selector_visible(history_item_selector)
return history_item_selector
def history_panel_wait_for_hid_hidden(self, hid, timeout=60):
current_history_id = self.current_history_id()
contents = self.api_get("histories/%s/contents" % current_history_id)
history_item = [d for d in contents if d["hid"] == hid][0]
history_item_selector = "#%s-%s" % (history_item["history_content_type"], history_item["id"])
self.wait_for_selector_absent(history_item_selector)
return history_item_selector
def history_panel_wait_for_hid_state(self, hid, state, timeout=60):
history_item_selector = self.history_panel_wait_for_hid_visible(hid, timeout=timeout)
history_item_selector_state = "%s.state-%s" % (history_item_selector, state)
try:
self.wait_for_selector_visible(history_item_selector_state)
except self.TimeoutException as e:
history_item = self.driver.find_element_by_css_selector(history_item_selector)
current_state = "UNKNOWN"
classes = history_item.get_attribute("class").split(" ")
for clazz in classes:
if clazz.startswith("state-"):
current_state = clazz[len("state-"):]
template = "Failed waiting on history item %d state to change to [%s] current state [%s]. "
message = template % (hid, state, current_state)
raise self.prepend_timeout_message(e, message)
def get_logged_in_user(self):
return self.api_get("users/current")
def is_logged_in(self):
return "email" in self.get_logged_in_user()
def _get_random_name(self, prefix=None, suffix=None, len=10):
return '%s%s%s' % (
prefix or '',
''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(len)),
suffix or '',
)
def _get_random_email(self, username=None, domain=None):
username = username or 'test'
domain = domain or 'test.test'
return self._get_random_name(prefix=username, suffix="@" + domain)
def submit_login(self, email, password=None):
if password is None:
password = self.default_password
login_info = {
'login': email,
'password': password,
}
self.click_masthead_user()
self.click_label(self.navigation_data["labels"]["masthead"]["userMenu"]["login"])
with self.main_panel():
form = self.wait_for_selector(self.navigation_data["selectors"]["loginPage"]["form"])
self.fill(form, login_info)
self.click_submit(form)
def register(self, email=None, password=None, username=None, confirm=None, assert_valid=True):
if email is None:
email = self._get_random_email()
if password is None:
password = self.default_password
if confirm is None:
confirm = password
if username is None:
username = email.split("@")[0]
self.home()
self.click_masthead_user()
self.click_label(self.navigation_data["labels"]["masthead"]["userMenu"]["register"])
with self.main_panel():
register_form_id = self.navigation_data["selectors"]["registrationPage"]["form"]
form = self.wait_for_id(register_form_id)
self.fill(form, dict(
email=email,
password=password,
username=username,
confirm=confirm
))
self.click_xpath(self.navigation_data["selectors"]["registrationPage"]["submit_xpath"])
if assert_valid:
self.home()
self.click_masthead_user()
user_email_element = self.wait_for_xpath_visible(self.navigation_data["selectors"]["masthead"]["userMenu"]["userEmail_xpath"])
text = user_email_element.text
assert email in text
assert self.get_logged_in_user()["email"] == email
# Hide masthead menu click
self.click_center()
def click_center(self):
action_chains = self.action_chains()
center_element = self.driver.find_element_by_css_selector("#center")
action_chains.move_to_element(center_element).click().perform()
def perform_upload(self, test_path, ext=None, genome=None, ext_all=None, genome_all=None):
self.home()
upload_button = self.wait_for_selector_clickable(".upload-button")
upload_button.click()
if ext_all is not None:
self.wait_for_selector_visible('.upload-footer-extension')
self.select2_set_value(".upload-footer-extension", ext_all)
if genome_all is not None:
self.wait_for_selector_visible('.upload-footer-genome')
self.select2_set_value(".upload-footer-genome", genome_all)
local_upload_button = self.wait_for_selector_clickable("button#btn-local")
local_upload_button.click()
file_upload = self.wait_for_selector('input[type="file"]')
file_upload.send_keys(test_path)
if ext is not None:
self.wait_for_selector_visible('.upload-extension')
self.select2_set_value(".upload-extension", ext)
if genome is not None:
self.wait_for_selector_visible('.upload-genome')
self.select2_set_value(".upload-genome", genome)
start_button = self.wait_for_selector_clickable("button#btn-start")
start_button.click()
close_button = self.wait_for_selector_clickable("button#btn-close")
close_button.click()
def workflow_index_open(self):
self.home()
self.click_masthead_workflow()
def workflow_index_table_elements(self):
self.wait_for_selector_visible(".manage-table tbody")
table_elements = self.driver.find_elements_by_css_selector(".manage-table tbody > tr")
# drop header
return table_elements[1:]
def workflow_index_click_option(self, option_title, workflow_index=0):
table_elements = self.workflow_index_table_elements()
workflow_row = table_elements[workflow_index]
workflow_button = workflow_row.find_element_by_css_selector(".menubutton")
workflow_button.click()
menu_element = self.wait_for_selector_visible(".popmenu-wrapper .dropdown-menu")
menu_options = menu_element.find_elements_by_css_selector("li a")
found_option = False
for menu_option in menu_options:
if option_title in menu_option.text:
menu_option.click()
found_option = True
break
if not found_option:
raise AssertionError("Failed to find workflow action option with title [%s]" % option_title)
def workflow_run_submit(self):
button = self.wait_for_selector(".ui-form-header button")
button.click()
def tool_open(self, tool_id):
link_element = self.wait_for_selector('a[href$="tool_runner?tool_id=%s"]' % tool_id)
link_element.click()
def tool_parameter_div(self, expanded_parameter_id):
return self.wait_for_selector("div.ui-form-element[tour_id$='%s']" % expanded_parameter_id)
def tool_set_value(self, expanded_parameter_id, value, expected_type=None, test_data_resolver=None):
div_element = self.tool_parameter_div(expanded_parameter_id)
assert div_element
if expected_type == "data":
div_selector = "div.ui-form-element[tour_id$='%s']" % expanded_parameter_id
self.select2_set_value(div_selector, value)
else:
input_element = div_element.find_element_by_css_selector("input")
# Clear default value
input_element.clear()
input_element.send_keys(value)
def tool_execute(self):
execute_button = self.wait_for_selector("button#execute")
execute_button.click()
def click_masthead_user(self):
self.click_xpath(self.navigation_data["selectors"]["masthead"]["user"])
def click_masthead_workflow(self):
self.click_xpath(self.navigation_data["selectors"]["masthead"]["workflow"])
def click_button_new_workflow(self):
self.click_selector(self.navigation_data["selectors"]["workflows"]["new_button"])
def click_history_options(self):
history_options_button_selector = self.test_data["historyOptions"]["selectors"]["button"]
history_options_element = self.wait_for_selector(history_options_button_selector)
assert history_options_element.is_displayed()
history_options_button_icon_selector = self.test_data["historyOptions"]["selectors"]["buttonIcon"]
history_options_button_icon_element = self.wait_for_selector(history_options_button_icon_selector)
assert history_options_button_icon_element.is_displayed()
history_options_element.click()
def click_history_option(self, option_label):
# Open menu
self.click_history_options()
# Click labelled option
menu_selector = self.history_options_menu_selector()
menu_element = self.wait_for_selector(menu_selector)
menu_selection_element = menu_element.find_element_by_xpath('//ul[@id="history-options-button-menu"]/li/a[text()[contains(.,"%s")]]' % option_label)
menu_selection_element.click()
def history_options_menu_selector(self):
menu_selector = self.test_data["historyOptions"]["selectors"]["menu"]
return menu_selector
@retry_during_transitions
def history_panel_refresh_click(self):
refresh_item = self.wait_for_selector_clickable("#history-refresh-button")
refresh_item.click()
def history_panel_multi_operations_selector(self):
return self.test_data["historyPanel"]["selectors"]["history"]["multiOperationsIcon"]
def history_panel_multi_operations_show(self):
operations_selector = self.history_panel_multi_operations_selector()
operations_element = self.wait_for_selector_clickable(operations_selector)
operations_element.click()
@retry_during_transitions
def history_panel_muli_operation_select_hid(self, hid):
item_selector = self.history_panel_item_selector(hid, wait=True)
operation_radio_selector = "%s .selector" % item_selector
element = self.wait_for_selector_clickable(operation_radio_selector)
element.click()
def history_panel_multi_operation_action_selector(self):
return self.test_data["historyPanel"]["selectors"]["history"]["multiOperationsActionBtn"]
def history_panel_multi_operation_action_click(self, action):
time.sleep(5)
button_element = self.wait_for_selector_clickable(self.history_panel_multi_operation_action_selector())
button_element.click()
menu_element = self.wait_for_selector_visible(".list-action-menu.open")
action_element = menu_element.find_element_by_link_text(action)
action_element.click()
def history_panel_item_selector(self, hid, wait=False):
current_history_id = self.current_history_id()
contents = self.api_get("histories/%s/contents" % current_history_id)
try:
history_item = [d for d in contents if d["hid"] == hid][0]
except IndexError:
raise Exception("Could not find history item with hid [%s] in contents [%s]" % (hid, contents))
history_item_selector = "#%s-%s" % (history_item["history_content_type"], history_item["id"])
if wait:
self.wait_for_selector_visible(history_item_selector)
return history_item_selector
def modal_body_selector(self):
return ".modal-body"
def history_panel_item_body_selector(self, hid, wait=False):
selector = "%s %s" % (self.history_panel_item_selector(hid), self.test_data["historyPanel"]["selectors"]["hda"]["body"])
if wait:
self.wait_for_selector_visible(selector)
return selector
def hda_div_selector(self, hda_id):
return "#dataset-%s" % hda_id
def hda_body_selector(self, hda_id):
return "%s %s" % (self.hda_div_selector(hda_id), self.test_data["historyPanel"]["selectors"]["hda"]["body"])
def hda_click_primary_action_button(self, hid, button_key):
self.history_panel_click_item_title(hid=hid, wait=True)
body_selector = self.history_panel_item_body_selector(hid=hid, wait=True)
buttons_selector = body_selector + " " + self.test_data["historyPanel"]["selectors"]["hda"]["primaryActionButtons"]
self.wait_for_selector_visible(buttons_selector)
button_def = self.test_data["historyPanel"]["hdaPrimaryActionButtons"][button_key]
button_selector = button_def["selector"]
button_item = self.wait_for_selector_visible("%s %s" % (buttons_selector, button_selector))
return button_item.click()
def history_panel_click_item_title(self, **kwds):
if "hda_id" in kwds:
item_selector = self.hda_div_selector(kwds["hda_id"])
else:
item_selector = self.history_panel_item_selector(kwds["hid"])
title_selector = "%s .title" % item_selector
title_element = self.wait_for_selector(title_selector)
title_element.click()
if kwds.get("wait", False):
# Find a better way to wait for transition
time.sleep(.5)
def click_hda_title(self, hda_id, wait=False):
# TODO: Replace with calls to history_panel_click_item_title.
return self.history_panel_click_item_title(hda_id=hda_id, wait=wait)
def collection_builder_set_name(self, name):
name_element = self.wait_for_selector_visible("input.collection-name")
name_element.send_keys(name)
def collection_builder_hide_originals(self):
hide_element = self.wait_for_selector_clickable("input.hide-originals")
hide_element.click()
def collection_builder_create(self):
create_element = self.wait_for_selector_clickable("button.create-collection")
create_element.click()
def | (self):
if self.is_logged_in():
self.home()
self.click_masthead_user()
self.click_label(self.navigation_data["labels"]["masthead"]["userMenu"]["logout"])
self.click_label('go to the home page')
assert not self.is_logged_in()
def run_tour(self, path, skip_steps=[], sleep_on_steps={}, tour_callback=None):
if tour_callback is None:
tour_callback = NullTourCallback()
self.home()
with open(path, "r") as f:
tour_dict = yaml.load(f)
steps = tour_dict["steps"]
for i, step in enumerate(steps):
title = step.get("title", None)
skip = False
if skip_steps:
for skip_step in skip_steps:
if title == skip_step:
skip = True
if title in sleep_on_steps:
time.sleep(sleep_on_steps[title])
if skip:
continue
self.run_tour_step(step, i, tour_callback)
def tour_wait_for_clickable_element(self, selector):
wait = self.wait()
element = wait.until(sizzle.sizzle_selector_clickable(selector))
return element
def tour_wait_for_element_present(self, selector):
wait = self.wait()
element = wait.until(sizzle.sizzle_presence_of_selector(selector))
return element
def get_tooltip_text(self, element, sleep=0, click_away=True):
tooltip_selector = self.test_data["selectors"]["tooltipBalloon"]
self.wait_for_selector_absent(tooltip_selector)
action_chains = self.action_chains()
action_chains.move_to_element(element)
action_chains.perform()
if sleep > 0:
time.sleep(sleep)
tooltip_element = self.wait_for_selector_visible(tooltip_selector)
text = tooltip_element.text
if click_away:
self.click_center()
return text
def assert_tooltip_text(self, element, expected, sleep=0, click_away=True):
text = self.get_tooltip_text(element, sleep=sleep, click_away=click_away)
assert text == expected, "Tooltip text [%s] was not expected text [%s]." % (text, expected)
def assert_error_message(self, contains=None):
return self._assert_message("error", contains=contains)
def assert_warning_message(self, contains=None):
return self._assert_message("warning", contains=contains)
def _assert_message(self, type, contains=None):
element = self.wait_for_selector(self.test_data["selectors"]["messages"][type])
assert element, "No error message found, one expected."
if contains is not None:
assert contains in element.text
def assert_no_error_message(self):
self.assert_selector_absent(self.test_data["selectors"]["messages"]["error"])
def run_tour_step(self, step, step_index, tour_callback):
preclick = step.get("preclick", [])
for preclick_selector in preclick:
print("(Pre)Clicking %s" % preclick_selector)
element = self.tour_wait_for_clickable_element(preclick_selector)
element.click()
element_str = step.get("element", None)
if element_str is not None:
print("Waiting for element %s" % element_str)
element = self.tour_wait_for_element_present(element_str)
assert element is not None
textinsert = step.get("textinsert", None)
if textinsert is not None:
element.send_keys(textinsert)
tour_callback.handle_step(step, step_index)
postclick = step.get("postclick", [])
for postclick_selector in postclick:
print("(Post)Clicking %s" % postclick_selector)
element = self.tour_wait_for_clickable_element(postclick_selector)
element.click()
def select2_set_value(self, container_selector, value, with_click=True):
# There are two hacky was to select things from the select2 widget -
# with_click=True: This simulates the mouse click after the suggestion contains
# only the selected value.
# with_click=False: This presses enter on the selection. Not sure
# why.
# with_click seems to work in all situtations - the enter methods
# doesn't seem to work with the tool form for some reason.
container_elem = self.wait_for_selector(container_selector)
text_element = container_elem.find_element_by_css_selector("input[type='text']")
text_element.send_keys(value)
# Wait for select2 options to load and then click to add this one.
drop_elem = self.wait_for_selector_visible("#select2-drop")
# Sleep seems to be needed - at least for send_enter.
time.sleep(.5)
if not with_click:
# Wait for select2 options to load and then click to add this one.
self.send_enter(text_element)
else:
select_elem = drop_elem.find_elements_by_css_selector(".select2-result-label")[0]
action_chains = self.action_chains()
action_chains.move_to_element(select_elem).click().perform()
self.wait_for_selector_absent_or_hidden("#select2-drop")
| logout_if_needed |
querying_multiple_types.rs | //!
//! ```sh
//! cargo run --example querying_mutiple_types --features="diesel/sqlite"
//! ```
extern crate diesel;
extern crate diesel_dynamic_schema;
use diesel::*;
use diesel::sql_types::{Integer, Text};
use diesel::sqlite::SqliteConnection;
use diesel_dynamic_schema::table;
fn main() {
// Create a connection; we are using a simple Sqlite memory database.
let conn = SqliteConnection::establish(":memory:").unwrap();
// Create some example data by using typical SQL statements.
sql_query("CREATE TABLE users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL)").execute(&conn).unwrap();
sql_query("INSERT INTO users (name) VALUES ('Sean'), ('Tess')").execute(&conn).unwrap();
// Use diesel-dynamic-schema to create a table and a column.
let users = table("users");
let id = users.column::<Integer, _>("id");
let name = users.column::<Text, _>("name");
// Use typical Diesel syntax to get some data.
let users = users
.select((id, name))
.load::<(i32, String)>(&conn);
// Print the results.
// The `users` are type `std::result::Result<std::vec::Vec<(i32, std::string::String)>, diesel::result::Error>`
let users = users.unwrap();
for (user_id, user_name) in users {
println!("user id:{} name:{}", user_id, user_name);
}
} | //! Example: querying multiple types
//!
//! To run this: |
|
util.js | import { isNaN } from 'lodash';
export function | (value) {
if (typeof value === 'string') {
const fVal = parseFloat(value);
if (typeof fVal === 'number' && !isNaN(fVal)) {
return fVal;
}
return 1;
}
return value;
}
| toFloat |
text.rs | use std::any::Any;
use gristmill::color::Color;
use gristmill::geometry2d::*;
use super::{Widget, DrawContext, Drawable, TextMetrics, font::Font};
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub enum Align {
Start,
Middle,
End,
}
impl Align {
fn position(self, outer_pos: f32, outer_size: f32, inner_size: f32) -> f32 {
match self {
Align::Start => outer_pos,
Align::Middle => outer_pos + (outer_size / 2.) - (inner_size / 2.),
Align::End => outer_pos + outer_size - inner_size,
}
}
}
pub struct Text {
font: Font,
size: f32,
color: Color,
align: (Align, Align),
text: String,
text_changed: bool,
drawable: Option<(Drawable, TextMetrics)>,
}
impl Text {
pub fn new(text: String) -> Text {
let text_changed = !text.is_empty();
Text {
font: Font::default(),
size: 14.,
color: gristmill::color::black(),
align: (Align::Start, Align::Start),
text,
text_changed,
drawable: None,
}
}
pub fn new_empty() -> Text |
pub fn set_font(&mut self, font: Font, size: f32) {
self.font = font;
self.size = size;
self.text_changed = true;
}
pub fn set_text(&mut self, text: String) {
self.text = text;
self.text_changed = true;
}
pub fn set_color(&mut self, color: Color) {
self.color = color;
}
pub fn set_alignment(&mut self, h_align: Align, v_align: Align) {
self.align = (h_align, v_align)
}
}
impl Widget for Text {
fn as_any(&mut self) -> &mut dyn Any { self }
fn draw(&mut self, context: &mut DrawContext, rect: Rect) {
if self.text_changed {
self.drawable = Some(context.new_text_drawable(self.font, self.size, &self.text));
self.text_changed = false;
}
if let Some((drawable, metrics)) = self.drawable.as_ref() {
let x = self.align.0.position(rect.position.x as f32, rect.size.width as f32, metrics.width());
let y = match self.align.1 {
// Align baseline to container bottom.
Align::End => self.align.1.position(rect.position.y as f32, rect.size.height as f32, 0.),
// Align using the full height of the text.
_ => self.align.1.position(rect.position.y as f32, rect.size.height as f32, metrics.height()) + metrics.ascent(),
};
context.draw(drawable, Rect { position: Point::nearest(x, y), size: Size::zero() }, self.color);
}
}
} | {
Text::new(String::new())
} |
runner.py | # MIT License
#
# Copyright (c) 2016 Olivier Bachem
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Map Reduce Framework for Python (Task3, DM2016)
https://project.las.ethz.ch/task3
1. The framework first loads the python source code specified in `sourcefile`.
The source should contain a `mapper` and a `reducer` function.
The `mapper(key, value)` function takes as input a (key, value) tuple where
key is None and value is a string. It should yield (key, value) pairs.
The `reducer(key, value)` function takes as input a key and a list of values.
It should yield (key, value) pairs.
The source of a word count program looks as follows:
>>> def mapper(key, value):
>>> for i in value.split():
>>> yield i, 1
>>>
>>> def reducer(key, values):
>>> yield key, sum(values)
2. Implementation details:
- Keys produced by the mapper *must* only be strings, ints or floats.
- The (key, value) pairs produced by the mapper must be pickable by cPickle
(https://docs.python.org/2/library/pickle.html).
- The (key, value) pairs produced by the reducer must be convertable to JSON
(https://docs.python.org/2/library/json.html?).
3. The training files are then used to run the example.
4. For debugging purposes, logging to STDERR can be enabled using the `--log` or `-l` flag.
(c) 2016 Olivier Bachem
"""
from collections import defaultdict
from itertools import chain, islice, izip
import argparse
import glob
import imp
import multiprocessing
import numpy as np
import os
import random
import sys
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def chunks(iterable, size=10):
|
def isolated_batch_call(f, arguments):
"""Calls the function f in a separate process and returns list of results"""
def lf(q):
result_generator = f(*arguments)
result = list(result_generator)
q.put(result)
q = multiprocessing.Queue()
p = multiprocessing.Process(target=lf, args=(q, ))
p.start()
ret = q.get()
p.join()
return ret
def mapreduce(input, mapper, reducer, batch_size=50, log=False):
"""Python function that runs a worst-case map reduce framework on the provided data
Args:
input -- list or generator of (key, value) tuple
mapper -- function that takes (key, value) pair as input and returns iterable key-value pairs
reducer -- function that takes key + list of values and outputs (key, value) pair
log -- whether log messages should be generated (default: False)
Returns list of (key, value pairs)
"""
# Set initial random seed
random.seed(0)
# Run mappers
if log: logger.info("Starting mapping phase!")
d = defaultdict(list)
for pairs_generator in chunks(input, batch_size):
pairs = np.array(pairs_generator)
if log: logger.debug(" Running mapper for '%s' key with value '%s'...", k, v)
for k2, v2 in isolated_batch_call(mapper, (None, pairs)):
# for k2, v2 in mapper(None, pairs):
if log: logger.debug(" Mapper produced (%s, %s) pair...", k2, v2)
if not isinstance(k2, (basestring, int, float)):
raise Exception("Keys must be strings, ints or floats (provided '%s')!"% k2)
d[k2].append(v2)
if log: logger.info("Finished mapping phase!")
# Random permutations of both keys and values.
keys = d.keys()
random.shuffle(keys)
for k in keys:
random.shuffle(d[k])
# Run reducers
if log: logger.info("Starting reducing phase!")
res = []
if len(keys) > 1:
raise Exception("Only one distinct key expected from mappers.")
k = keys[0]
v = np.vstack(d[k])
r = isolated_batch_call(reducer, (k, v))
if log: logger.debug(" Reducer produced %s", r)
logger.info("Finished reducing phase!")
return r
def yield_pattern(path):
"""Yield lines from each file in specified folder"""
for i in glob.iglob(path):
if os.path.isfile(i):
with open(i, "r") as fin:
for line in fin:
yield None, line
def import_from_file(f):
"""Import code from the specified file"""
mod = imp.new_module("mod")
exec f in mod.__dict__
return mod
def evaluate(points, centers):
score = 0.0
for chunk in chunks(points, 20):
batch = np.array(chunk)
score += np.square(batch[:,np.newaxis,:] - centers).sum(axis=2).min(axis=1).sum()
return score / points.shape[0]
def run(sourcestring, training_file, test_file, batch, log):
mod = import_from_file(sourcestring)
training_data = np.load(training_file)
output = mapreduce(training_data, mod.mapper, mod.reducer, batch, log)
centers = np.vstack(output)
test_data = np.load(test_file)
return evaluate(test_data, centers)
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'train_file', help='File with the training instances')
parser.add_argument(
'test_file', help='File with the test instances')
parser.add_argument(
'source_file', help='.py file with mapper and reducer function')
parser.add_argument(
'--log', '-l', help='Enable logging for debugging', action='store_true')
args = parser.parse_args()
BATCH = 3000
with open(args.source_file, "r") as fin:
source = fin.read()
print run(source, args.train_file, args.test_file, BATCH, args.log)
if __name__ == "__main__":
main()
| iterator = iter(iterable)
for first in iterator:
yield list(chain([first], islice(iterator, size - 1))) |
abs_moniker.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
child_moniker::ChildMoniker, error::MonikerError,
partial_child_moniker::PartialChildMoniker, relative_moniker::RelativeMoniker,
},
core::cmp::{self, Ord, Ordering},
itertools,
std::fmt,
};
/// An absolute moniker describes the identity of a component instance in terms of its path
/// relative to the root of the component instance tree. | /// components are referenced by encoded relative moniker so as to minimize the amount of
/// information which is disclosed about the overall structure of the component instance tree.
///
/// Display notation: "/", "/name1:1", "/name1:1/name2:2", ...
#[derive(Default, Eq, PartialEq, Debug, Clone, Hash)]
pub struct AbsoluteMoniker {
path: Vec<ChildMoniker>,
}
impl AbsoluteMoniker {
pub fn new(path: Vec<ChildMoniker>) -> AbsoluteMoniker {
AbsoluteMoniker { path }
}
fn parse(path: &Vec<&str>) -> Result<Self, MonikerError> {
let path: Result<Vec<ChildMoniker>, MonikerError> =
path.iter().map(|x| ChildMoniker::parse(x)).collect();
Ok(AbsoluteMoniker::new(path?))
}
/// Parse the given string as an absolute moniker. The string should be a '/' delimited series
/// of child monikers without any instance identifiers, e.g. "/", or "/name1/name2" or
/// "/name1:collection1".
// TODO(fxbug.dev/49968): Remove instance ID 0 assumption when removing instance IDs from
// AbsoluteMoniker/ChildMoniker (and rename to parse_str + add From<&str> impl).
pub fn parse_string_without_instances(input: &str) -> Result<Self, MonikerError> {
if input.chars().nth(0) != Some('/') {
return Err(MonikerError::invalid_moniker(input));
}
if input == "/" {
return Ok(Self::root());
}
let path = input[1..]
.split('/')
.map(PartialChildMoniker::parse)
.map(|p| p.map(|ok_p| ChildMoniker::from_partial(&ok_p, 0)))
.collect::<Result<_, MonikerError>>()?;
Ok(Self::new(path))
}
// Serializes absolute moniker into its string format, omitting instance ids.
//
// This method is the inverse of `parse_string_without_instances()`.
pub fn to_string_without_instances(&self) -> String {
format!(
"/{}",
itertools::join(
(&self.path)
.into_iter()
.map(|segment: &ChildMoniker| segment.to_partial().as_str().to_string()),
"/"
)
)
}
/// Given an absolute moniker realm `start`, and a relative moniker from `start` to an `end`
/// realm, returns the absolute moniker of the `end` realm.
///
/// If an absolute moniker cannot be computed, then a MonikerError::InvalidMoniker error is
/// returned.
///
/// Example:
///
/// a
/// / \
/// b c
/// /
/// d
///
/// Given:
/// `start` = /a/c
/// `start_to_end` (c -> d) = .\c/b/d
/// Returns:
/// /a/b/d
pub fn from_relative(
start: &AbsoluteMoniker,
start_to_end: &RelativeMoniker,
) -> Result<AbsoluteMoniker, MonikerError> {
// Verify that `start.path`'s tail is of `start_to_end.up_path`.
if start_to_end.up_path().len() > start.path.len()
|| !start_to_end.up_path().iter().eq(start
.path
.iter()
.rev()
.take(start_to_end.up_path().len()))
{
return Err(MonikerError::invalid_moniker(format!("{}", start)));
}
Ok(AbsoluteMoniker::new(
start
.path
.iter()
.take(start.path.len() - start_to_end.up_path().len()) // remove the first `start_to_end.up_path` elements from `from`
.chain(start_to_end.down_path().iter()) // append the `start_to_end.down_path` elements
.cloned()
.collect(),
))
}
pub fn path(&self) -> &Vec<ChildMoniker> {
&self.path
}
/// Indicates whether `other` is contained within the realm specified by
/// this AbsoluteMoniker.
pub fn contains_in_realm(&self, other: &AbsoluteMoniker) -> bool {
if other.path.len() < self.path.len() {
return false;
}
self.path.iter().enumerate().all(|item| *item.1 == other.path[item.0])
}
pub fn root() -> AbsoluteMoniker {
AbsoluteMoniker { path: vec![] }
}
pub fn leaf(&self) -> Option<&ChildMoniker> {
self.path.last()
}
pub fn is_root(&self) -> bool {
self.path.is_empty()
}
pub fn parent(&self) -> Option<AbsoluteMoniker> {
if self.is_root() {
None
} else {
let l = self.path.len() - 1;
Some(AbsoluteMoniker { path: self.path[..l].to_vec() })
}
}
pub fn child(&self, child: ChildMoniker) -> AbsoluteMoniker {
let mut path = self.path.clone();
path.push(child);
AbsoluteMoniker { path }
}
}
impl From<Vec<&str>> for AbsoluteMoniker {
fn from(rep: Vec<&str>) -> Self {
AbsoluteMoniker::parse(&rep)
.expect(&format!("absolute moniker failed to parse: {:?}", &rep))
}
}
impl cmp::Ord for AbsoluteMoniker {
fn cmp(&self, other: &Self) -> cmp::Ordering {
let min_size = cmp::min(self.path.len(), other.path.len());
for i in 0..min_size {
if self.path[i] < other.path[i] {
return cmp::Ordering::Less;
} else if self.path[i] > other.path[i] {
return cmp::Ordering::Greater;
}
}
if self.path.len() > other.path.len() {
return cmp::Ordering::Greater;
} else if self.path.len() < other.path.len() {
return cmp::Ordering::Less;
}
return cmp::Ordering::Equal;
}
}
impl PartialOrd for AbsoluteMoniker {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl fmt::Display for AbsoluteMoniker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.path.is_empty() {
write!(f, "/")?;
} else {
for segment in &self.path {
write!(f, "/{}", segment.as_str())?
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use {super::*, anyhow::Error};
#[test]
fn absolute_monikers() {
let root = AbsoluteMoniker::root();
assert_eq!(true, root.is_root());
assert_eq!("/", format!("{}", root));
assert_eq!(root, AbsoluteMoniker::from(vec![]));
let m = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), Some("coll".to_string()), 2),
]);
assert_eq!(false, m.is_root());
assert_eq!("/a:1/coll:b:2", format!("{}", m));
assert_eq!(m, AbsoluteMoniker::from(vec!["a:1", "coll:b:2"]));
assert_eq!(m.leaf(), Some(&ChildMoniker::from("coll:b:2")));
}
#[test]
fn absolute_moniker_parent() {
let root = AbsoluteMoniker::root();
assert_eq!(true, root.is_root());
assert_eq!(None, root.parent());
let m = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
]);
assert_eq!("/a:1/b:2", format!("{}", m));
assert_eq!("/a:1", format!("{}", m.parent().unwrap()));
assert_eq!("/", format!("{}", m.parent().unwrap().parent().unwrap()));
assert_eq!(None, m.parent().unwrap().parent().unwrap().parent());
assert_eq!(m.leaf(), Some(&ChildMoniker::from("b:2")));
}
#[test]
fn absolute_moniker_compare() {
let a = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
ChildMoniker::new("c".to_string(), None, 3),
]);
let a2 = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 3),
ChildMoniker::new("c".to_string(), None, 3),
]);
let b = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
ChildMoniker::new("b".to_string(), None, 3),
]);
let c = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
ChildMoniker::new("c".to_string(), None, 3),
ChildMoniker::new("d".to_string(), None, 4),
]);
let d = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
ChildMoniker::new("c".to_string(), None, 3),
]);
assert_eq!(Ordering::Less, a.cmp(&a2));
assert_eq!(Ordering::Greater, a2.cmp(&a));
assert_eq!(Ordering::Greater, a.cmp(&b));
assert_eq!(Ordering::Less, b.cmp(&a));
assert_eq!(Ordering::Less, a.cmp(&c));
assert_eq!(Ordering::Greater, c.cmp(&a));
assert_eq!(Ordering::Equal, a.cmp(&d));
assert_eq!(Ordering::Equal, d.cmp(&a));
assert_eq!(Ordering::Less, b.cmp(&c));
assert_eq!(Ordering::Greater, c.cmp(&b));
assert_eq!(Ordering::Less, b.cmp(&d));
assert_eq!(Ordering::Greater, d.cmp(&b));
assert_eq!(Ordering::Greater, c.cmp(&d));
assert_eq!(Ordering::Less, d.cmp(&c));
}
#[test]
fn absolute_monikers_contains_in_realm() {
let root = AbsoluteMoniker::root();
let a = AbsoluteMoniker::new(vec![ChildMoniker::new("a".to_string(), None, 1)]);
let ab = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
]);
let abc = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
ChildMoniker::new("c".to_string(), None, 3),
]);
let abd = AbsoluteMoniker::new(vec![
ChildMoniker::new("a".to_string(), None, 1),
ChildMoniker::new("b".to_string(), None, 2),
ChildMoniker::new("d".to_string(), None, 3),
]);
assert!(root.contains_in_realm(&root));
assert!(root.contains_in_realm(&a));
assert!(root.contains_in_realm(&ab));
assert!(root.contains_in_realm(&abc));
assert!(root.contains_in_realm(&abd));
assert!(!a.contains_in_realm(&root));
assert!(a.contains_in_realm(&a));
assert!(a.contains_in_realm(&ab));
assert!(a.contains_in_realm(&abc));
assert!(a.contains_in_realm(&abd));
assert!(!ab.contains_in_realm(&root));
assert!(!ab.contains_in_realm(&a));
assert!(ab.contains_in_realm(&ab));
assert!(ab.contains_in_realm(&abc));
assert!(ab.contains_in_realm(&abd));
assert!(!abc.contains_in_realm(&root));
assert!(abc.contains_in_realm(&abc));
assert!(!abc.contains_in_realm(&a));
assert!(!abc.contains_in_realm(&ab));
assert!(!abc.contains_in_realm(&abd));
assert!(!abc.contains_in_realm(&abd));
assert!(abd.contains_in_realm(&abd));
assert!(!abd.contains_in_realm(&a));
assert!(!abd.contains_in_realm(&ab));
assert!(!abd.contains_in_realm(&abc));
}
#[test]
fn absolute_moniker_from_string_without_instance_id() -> Result<(), Error> {
let under_test = |s| AbsoluteMoniker::parse_string_without_instances(s);
assert_eq!(under_test("/")?, AbsoluteMoniker::root());
let a = ChildMoniker::new("a".to_string(), None, 0);
let bb = ChildMoniker::new("b".to_string(), Some("b".to_string()), 0);
assert_eq!(under_test("/a")?, AbsoluteMoniker::new(vec![a.clone()]));
assert_eq!(under_test("/a/b:b")?, AbsoluteMoniker::new(vec![a.clone(), bb.clone()]));
assert_eq!(
under_test("/a/b:b/a/b:b")?,
AbsoluteMoniker::new(vec![a.clone(), bb.clone(), a.clone(), bb.clone()])
);
assert!(under_test("").is_err(), "cannot be empty");
assert!(under_test("a").is_err(), "must start with root");
assert!(under_test("a/b").is_err(), "must start with root");
assert!(under_test("//").is_err(), "path segments cannot be empty");
assert!(under_test("/a/").is_err(), "path segments cannot be empty");
assert!(under_test("/a//b").is_err(), "path segments cannot be empty");
assert!(under_test("/a:a:0").is_err(), "cannot contain instance id");
Ok(())
}
#[test]
fn absolute_moniker_to_string_without_instance_id() {
assert_eq!("/", AbsoluteMoniker::root().to_string_without_instances());
let a = ChildMoniker::new("a".to_string(), None, 0);
let bb = ChildMoniker::new("b".to_string(), Some("b".to_string()), 0);
assert_eq!("/a", AbsoluteMoniker::new(vec![a.clone()]).to_string_without_instances());
assert_eq!(
"/a/b:b",
AbsoluteMoniker::new(vec![a.clone(), bb.clone()]).to_string_without_instances()
);
assert_eq!(
"/a/b:b/a/b:b",
AbsoluteMoniker::new(vec![a.clone(), bb.clone(), a.clone(), bb.clone()])
.to_string_without_instances()
);
}
} | ///
/// A root moniker is a moniker with an empty path.
///
/// Absolute monikers are only used internally within the component manager. Externally, |
cast.rs | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
use std::fmt;
use common_arrow::arrow::compute;
use common_arrow::arrow::compute::CastOptions;
use common_datavalues::DataColumnarValue;
use common_datavalues::DataSchema;
use common_datavalues::DataType;
use common_exception::Result;
use crate::function::IFunction;
/// provide Datafuse default cast options
pub const DEFAULT_DATAFUSE_CAST_OPTIONS: CastOptions = CastOptions { safe: false };
#[derive(Clone)]
pub struct CastFunction {
/// The data type to cast to
cast_type: DataType,
}
impl CastFunction {
pub fn create(cast_type: DataType) -> Box<dyn IFunction> {
Box::new(Self { cast_type })
}
}
impl IFunction for CastFunction {
fn name(&self) -> &str {
"CastFunction"
}
fn return_type(&self, _args: &[DataType]) -> Result<DataType> {
Ok(self.cast_type.clone())
}
// TODO
fn nullable(&self, _input_schema: &DataSchema) -> Result<bool> {
Ok(false)
}
fn eval(&self, columns: &[DataColumnarValue], _input_rows: usize) -> Result<DataColumnarValue> { | compute::kernels::cast::cast_with_options(
&value,
&self.cast_type,
&DEFAULT_DATAFUSE_CAST_OPTIONS,
)?,
))
}
fn num_arguments(&self) -> usize {
1
}
}
impl fmt::Display for CastFunction {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CAST")
}
} | let value = columns[0].to_array()?;
Ok(DataColumnarValue::Array( |
metrics.go | package statuspage |
||
helloworld.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: helloworld.proto
package helloworld
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// The request message containing the user's name.
type HelloRequest struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HelloRequest) Reset() { *m = HelloRequest{} }
func (m *HelloRequest) String() string { return proto.CompactTextString(m) }
func (*HelloRequest) ProtoMessage() {}
func (*HelloRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_17b8c58d586b62f2, []int{0}
}
func (m *HelloRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HelloRequest.Unmarshal(m, b)
}
func (m *HelloRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HelloRequest.Marshal(b, m, deterministic)
}
func (m *HelloRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_HelloRequest.Merge(m, src)
}
func (m *HelloRequest) XXX_Size() int {
return xxx_messageInfo_HelloRequest.Size(m)
}
func (m *HelloRequest) XXX_DiscardUnknown() {
xxx_messageInfo_HelloRequest.DiscardUnknown(m)
}
var xxx_messageInfo_HelloRequest proto.InternalMessageInfo
func (m *HelloRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
// The response message containing the greetings
type HelloReply struct {
Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *HelloReply) Reset() { *m = HelloReply{} }
func (m *HelloReply) String() string { return proto.CompactTextString(m) }
func (*HelloReply) ProtoMessage() {}
func (*HelloReply) Descriptor() ([]byte, []int) {
return fileDescriptor_17b8c58d586b62f2, []int{1}
}
func (m *HelloReply) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HelloReply.Unmarshal(m, b)
}
func (m *HelloReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HelloReply.Marshal(b, m, deterministic)
}
func (m *HelloReply) XXX_Merge(src proto.Message) {
xxx_messageInfo_HelloReply.Merge(m, src)
} | return xxx_messageInfo_HelloReply.Size(m)
}
func (m *HelloReply) XXX_DiscardUnknown() {
xxx_messageInfo_HelloReply.DiscardUnknown(m)
}
var xxx_messageInfo_HelloReply proto.InternalMessageInfo
func (m *HelloReply) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func init() {
proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest")
proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply")
}
func init() { proto.RegisterFile("helloworld.proto", fileDescriptor_17b8c58d586b62f2) }
var fileDescriptor_17b8c58d586b62f2 = []byte{
// 166 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9,
0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88,
0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42,
0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92,
0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71,
0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a,
0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64,
0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0xc9, 0x47, 0xc9, 0xa6, 0xe7,
0xe7, 0xa4, 0xe8, 0xa7, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0xeb, 0xa7, 0x17, 0x15, 0x24,
0xeb, 0x23, 0x54, 0x27, 0xb1, 0x81, 0xbd, 0x62, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x42, 0xa1,
0xa1, 0xf0, 0xde, 0x00, 0x00, 0x00,
} | func (m *HelloReply) XXX_Size() int { |
flat-btn-group.tsx | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0 | // Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
import React, { FunctionComponent, useRef } from 'react';
import { DownOutlined } from '@ant-design/icons';
import { Menu, Dropdown, Divider } from 'antd';
import './flat-btn-group.less';
interface FlatItemProps {
children?: React.ReactNode[];
showNum?: number;
}
const FlatBtnGroup: FunctionComponent<FlatItemProps> = ({ showNum = 3, children = [] }) => {
let childList: React.ReactNode[] = [];
if (showNum <= 1) {
showNum = 3;
}
if (!Array.isArray(children)) {
childList.push(children);
} else {
childList = children;
}
const validChildren = childList.filter(child => !!child).flat(Infinity);
const newList = validChildren.slice(0, showNum - 1);
const dropList = validChildren.slice(showNum - 1);
const menu = (
<Menu className="flat-menu">
{dropList.map((item: any, index) => {
return (
<Menu.Item disabled={item.props.disabled} key={index}>
{item}
</Menu.Item>
);
})}
</Menu>
);
const wrap = useRef(null);
return (
<div className="flat-btn-group">
{newList.map((btn, key) => (
<span key={`flat-btn-${key}`}>
{btn}
{(key !== showNum - 1 && !(key < showNum && key === newList.length - 1)) || dropList.length ? (
<Divider type="vertical" />
) : (
<></>
)}
</span>
))}
{dropList.length ? (
<Dropdown
overlay={menu}
className="flat-btn-group"
getPopupContainer={() => {
const dom = wrap.current;
if (dom) {
return dom;
}
return document.body;
}}
>
<a className="ant-dropdown-link">
更多
<DownOutlined />
</a>
</Dropdown>
) : (
<></>
)}
</div>
);
};
export default FlatBtnGroup; | // |
path_location_strategy.js | /**
* @fileoverview added by tsickle
* @suppress {checkTypes,extraRequire,uselessCode} checked by tsc
*/
/**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import { Inject, Injectable, Optional } from '@angular/core';
import { Location } from './location';
import { APP_BASE_HREF, LocationStrategy } from './location_strategy';
import { PlatformLocation } from './platform_location';
/**
* \@description
* A {\@link LocationStrategy} used to configure the {\@link Location} service to
* represent its state in the
* [path](https://en.wikipedia.org/wiki/Uniform_Resource_Locator#Syntax) of the
* browser's URL.
*
* If you're using `PathLocationStrategy`, you must provide a {\@link APP_BASE_HREF}
* or add a base element to the document. This URL prefix that will be preserved
* when generating and recognizing URLs.
*
* For instance, if you provide an `APP_BASE_HREF` of `'/my/app'` and call
* `location.go('/foo')`, the browser's URL will become
* `example.com/my/app/foo`.
*
* Similarly, if you add `<base href='/my/app'/>` to the document and call
* `location.go('/foo')`, the browser's URL will become
* `example.com/my/app/foo`.
*
* ### Example
*
* {\@example common/location/ts/path_location_component.ts region='LocationComponent'}
*
*
*/
export class PathLocationStrategy extends LocationStrategy { | */
constructor(_platformLocation, href) {
super();
this._platformLocation = _platformLocation;
if (href == null) {
href = this._platformLocation.getBaseHrefFromDOM();
}
if (href == null) {
throw new Error(`No base href set. Please provide a value for the APP_BASE_HREF token or add a base element to the document.`);
}
this._baseHref = href;
}
/**
* @param {?} fn
* @return {?}
*/
onPopState(fn) {
this._platformLocation.onPopState(fn);
this._platformLocation.onHashChange(fn);
}
/**
* @return {?}
*/
getBaseHref() { return this._baseHref; }
/**
* @param {?} internal
* @return {?}
*/
prepareExternalUrl(internal) {
return Location.joinWithSlash(this._baseHref, internal);
}
/**
* @param {?=} includeHash
* @return {?}
*/
path(includeHash = false) {
/** @type {?} */
const pathname = this._platformLocation.pathname +
Location.normalizeQueryParams(this._platformLocation.search);
/** @type {?} */
const hash = this._platformLocation.hash;
return hash && includeHash ? `${pathname}${hash}` : pathname;
}
/**
* @param {?} state
* @param {?} title
* @param {?} url
* @param {?} queryParams
* @return {?}
*/
pushState(state, title, url, queryParams) {
/** @type {?} */
const externalUrl = this.prepareExternalUrl(url + Location.normalizeQueryParams(queryParams));
this._platformLocation.pushState(state, title, externalUrl);
}
/**
* @param {?} state
* @param {?} title
* @param {?} url
* @param {?} queryParams
* @return {?}
*/
replaceState(state, title, url, queryParams) {
/** @type {?} */
const externalUrl = this.prepareExternalUrl(url + Location.normalizeQueryParams(queryParams));
this._platformLocation.replaceState(state, title, externalUrl);
}
/**
* @return {?}
*/
forward() { this._platformLocation.forward(); }
/**
* @return {?}
*/
back() { this._platformLocation.back(); }
}
PathLocationStrategy.decorators = [
{ type: Injectable }
];
/** @nocollapse */
PathLocationStrategy.ctorParameters = () => [
{ type: PlatformLocation },
{ type: String, decorators: [{ type: Optional }, { type: Inject, args: [APP_BASE_HREF,] }] }
];
if (false) {
/** @type {?} */
PathLocationStrategy.prototype._baseHref;
/** @type {?} */
PathLocationStrategy.prototype._platformLocation;
}
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoicGF0aF9sb2NhdGlvbl9zdHJhdGVneS5qcyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uLy4uLy4uLy4uLy4uLy4uLy4uL3BhY2thZ2VzL2NvbW1vbi9zcmMvbG9jYXRpb24vcGF0aF9sb2NhdGlvbl9zdHJhdGVneS50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7OztBQVFBLE9BQU8sRUFBQyxNQUFNLEVBQUUsVUFBVSxFQUFFLFFBQVEsRUFBQyxNQUFNLGVBQWUsQ0FBQztBQUczRCxPQUFPLEVBQUMsUUFBUSxFQUFDLE1BQU0sWUFBWSxDQUFDO0FBQ3BDLE9BQU8sRUFBQyxhQUFhLEVBQUUsZ0JBQWdCLEVBQUMsTUFBTSxxQkFBcUIsQ0FBQztBQUNwRSxPQUFPLEVBQXlCLGdCQUFnQixFQUFDLE1BQU0scUJBQXFCLENBQUM7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7O0FBOEI3RSxNQUFNLDJCQUE0QixTQUFRLGdCQUFnQjs7Ozs7SUFHeEQsWUFDWSxtQkFDMkIsSUFBYTtRQUNsRCxLQUFLLEVBQUUsQ0FBQztRQUZFLHNCQUFpQixHQUFqQixpQkFBaUI7UUFJM0IsSUFBSSxJQUFJLElBQUksSUFBSSxFQUFFO1lBQ2hCLElBQUksR0FBRyxJQUFJLENBQUMsaUJBQWlCLENBQUMsa0JBQWtCLEVBQUUsQ0FBQztTQUNwRDtRQUVELElBQUksSUFBSSxJQUFJLElBQUksRUFBRTtZQUNoQixNQUFNLElBQUksS0FBSyxDQUNYLDZHQUE2RyxDQUFDLENBQUM7U0FDcEg7UUFFRCxJQUFJLENBQUMsU0FBUyxHQUFHLElBQUksQ0FBQztLQUN2Qjs7Ozs7SUFFRCxVQUFVLENBQUMsRUFBMEI7UUFDbkMsSUFBSSxDQUFDLGlCQUFpQixDQUFDLFVBQVUsQ0FBQyxFQUFFLENBQUMsQ0FBQztRQUN0QyxJQUFJLENBQUMsaUJBQWlCLENBQUMsWUFBWSxDQUFDLEVBQUUsQ0FBQyxDQUFDO0tBQ3pDOzs7O0lBRUQsV0FBVyxLQUFhLE9BQU8sSUFBSSxDQUFDLFNBQVMsQ0FBQyxFQUFFOzs7OztJQUVoRCxrQkFBa0IsQ0FBQyxRQUFnQjtRQUNqQyxPQUFPLFFBQVEsQ0FBQyxhQUFhLENBQUMsSUFBSSxDQUFDLFNBQVMsRUFBRSxRQUFRLENBQUMsQ0FBQztLQUN6RDs7Ozs7SUFFRCxJQUFJLENBQUMsY0FBdUIsS0FBSzs7UUFDL0IsTUFBTSxRQUFRLEdBQUcsSUFBSSxDQUFDLGlCQUFpQixDQUFDLFFBQVE7WUFDNUMsUUFBUSxDQUFDLG9CQUFvQixDQUFDLElBQUksQ0FBQyxpQkFBaUIsQ0FBQyxNQUFNLENBQUMsQ0FBQzs7UUFDakUsTUFBTSxJQUFJLEdBQUcsSUFBSSxDQUFDLGlCQUFpQixDQUFDLElBQUksQ0FBQztRQUN6QyxPQUFPLElBQUksSUFBSSxXQUFXLENBQUMsQ0FBQyxDQUFDLEdBQUcsUUFBUSxHQUFHLElBQUksRUFBRSxDQUFDLENBQUMsQ0FBQyxRQUFRLENBQUM7S0FDOUQ7Ozs7Ozs7O0lBRUQsU0FBUyxDQUFDLEtBQVUsRUFBRSxLQUFhLEVBQUUsR0FBVyxFQUFFLFdBQW1COztRQUNuRSxNQUFNLFdBQVcsR0FBRyxJQUFJLENBQUMsa0JBQWtCLENBQUMsR0FBRyxHQUFHLFFBQVEsQ0FBQyxvQkFBb0IsQ0FBQyxXQUFXLENBQUMsQ0FBQyxDQUFDO1FBQzlGLElBQUksQ0FBQyxpQkFBaUIsQ0FBQyxTQUFTLENBQUMsS0FBSyxFQUFFLEtBQUssRUFBRSxXQUFXLENBQUMsQ0FBQztLQUM3RDs7Ozs7Ozs7SUFFRCxZQUFZLENBQUMsS0FBVSxFQUFFLEtBQWEsRUFBRSxHQUFXLEVBQUUsV0FBbUI7O1FBQ3RFLE1BQU0sV0FBVyxHQUFHLElBQUksQ0FBQyxrQkFBa0IsQ0FBQyxHQUFHLEdBQUcsUUFBUSxDQUFDLG9CQUFvQixDQUFDLFdBQVcsQ0FBQyxDQUFDLENBQUM7UUFDOUYsSUFBSSxDQUFDLGlCQUFpQixDQUFDLFlBQVksQ0FBQyxLQUFLLEVBQUUsS0FBSyxFQUFFLFdBQVcsQ0FBQyxDQUFDO0tBQ2hFOzs7O0lBRUQsT0FBTyxLQUFXLElBQUksQ0FBQyxpQkFBaUIsQ0FBQyxPQUFPLEVBQUUsQ0FBQyxFQUFFOzs7O0lBRXJELElBQUksS0FBVyxJQUFJLENBQUMsaUJBQWlCLENBQUMsSUFBSSxFQUFFLENBQUMsRUFBRTs7O1lBbkRoRCxVQUFVOzs7O1lBN0JxQixnQkFBZ0I7eUNBbUN6QyxRQUFRLFlBQUksTUFBTSxTQUFDLGFBQWEiLCJzb3VyY2VzQ29udGVudCI6WyIvKipcbiAqIEBsaWNlbnNlXG4gKiBDb3B5cmlnaHQgR29vZ2xlIEluYy4gQWxsIFJpZ2h0cyBSZXNlcnZlZC5cbiAqXG4gKiBVc2Ugb2YgdGhpcyBzb3VyY2UgY29kZSBpcyBnb3Zlcm5lZCBieSBhbiBNSVQtc3R5bGUgbGljZW5zZSB0aGF0IGNhbiBiZVxuICogZm91bmQgaW4gdGhlIExJQ0VOU0UgZmlsZSBhdCBodHRwczovL2FuZ3VsYXIuaW8vbGljZW5zZVxuICovXG5cbmltcG9ydCB7SW5qZWN0LCBJbmplY3RhYmxlLCBPcHRpb25hbH0gZnJvbSAnQGFuZ3VsYXIvY29yZSc7XG5cblxuaW1wb3J0IHtMb2NhdGlvbn0gZnJvbSAnLi9sb2NhdGlvbic7XG5pbXBvcnQge0FQUF9CQVNFX0hSRUYsIExvY2F0aW9uU3RyYXRlZ3l9IGZyb20gJy4vbG9jYXRpb25fc3RyYXRlZ3knO1xuaW1wb3J0IHtMb2NhdGlvbkNoYW5nZUxpc3RlbmVyLCBQbGF0Zm9ybUxvY2F0aW9ufSBmcm9tICcuL3BsYXRmb3JtX2xvY2F0aW9uJztcblxuXG5cbi8qKlxuICogQGRlc2NyaXB0aW9uXG4gKiBBIHtAbGluayBMb2NhdGlvblN0cmF0ZWd5fSB1c2VkIHRvIGNvbmZpZ3VyZSB0aGUge0BsaW5rIExvY2F0aW9ufSBzZXJ2aWNlIHRvXG4gKiByZXByZXNlbnQgaXRzIHN0YXRlIGluIHRoZVxuICogW3BhdGhdKGh0dHBzOi8vZW4ud2lraXBlZGlhLm9yZy93aWtpL1VuaWZvcm1fUmVzb3VyY2VfTG9jYXRvciNTeW50YXgpIG9mIHRoZVxuICogYnJvd3NlcidzIFVSTC5cbiAqXG4gKiBJZiB5b3UncmUgdXNpbmcgYFBhdGhMb2NhdGlvblN0cmF0ZWd5YCwgeW91IG11c3QgcHJvdmlkZSBhIHtAbGluayBBUFBfQkFTRV9IUkVGfVxuICogb3IgYWRkIGEgYmFzZSBlbGVtZW50IHRvIHRoZSBkb2N1bWVudC4gVGhpcyBVUkwgcHJlZml4IHRoYXQgd2lsbCBiZSBwcmVzZXJ2ZWRcbiAqIHdoZW4gZ2VuZXJhdGluZyBhbmQgcmVjb2duaXppbmcgVVJMcy5cbiAqXG4gKiBGb3IgaW5zdGFuY2UsIGlmIHlvdSBwcm92aWRlIGFuIGBBUFBfQkFTRV9IUkVGYCBvZiBgJy9teS9hcHAnYCBhbmQgY2FsbFxuICogYGxvY2F0aW9uLmdvKCcvZm9vJylgLCB0aGUgYnJvd3NlcidzIFVSTCB3aWxsIGJlY29tZVxuICogYGV4YW1wbGUuY29tL215L2FwcC9mb29gLlxuICpcbiAqIFNpbWlsYXJseSwgaWYgeW91IGFkZCBgPGJhc2UgaHJlZj0nL215L2FwcCcvPmAgdG8gdGhlIGRvY3VtZW50IGFuZCBjYWxsXG4gKiBgbG9jYXRpb24uZ28oJy9mb28nKWAsIHRoZSBicm93c2VyJ3MgVVJMIHdpbGwgYmVjb21lXG4gKiBgZXhhbXBsZS5jb20vbXkvYXBwL2Zvb2AuXG4gKlxuICogIyMjIEV4YW1wbGVcbiAqXG4gKiB7QGV4YW1wbGUgY29tbW9uL2xvY2F0aW9uL3RzL3BhdGhfbG9jYXRpb25fY29tcG9uZW50LnRzIHJlZ2lvbj0nTG9jYXRpb25Db21wb25lbnQnfVxuICpcbiAqXG4gKi9cbkBJbmplY3RhYmxlKClcbmV4cG9ydCBjbGFzcyBQYXRoTG9jYXRpb25TdHJhdGVneSBleHRlbmRzIExvY2F0aW9uU3RyYXRlZ3kge1xuICBwcml2YXRlIF9iYXNlSHJlZjogc3RyaW5nO1xuXG4gIGNvbnN0cnVjdG9yKFxuICAgICAgcHJpdmF0ZSBfcGxhdGZvcm1Mb2NhdGlvbjogUGxhdGZvcm1Mb2NhdGlvbixcbiAgICAgIEBPcHRpb25hbCgpIEBJbmplY3QoQVBQX0JBU0VfSFJFRikgaHJlZj86IHN0cmluZykge1xuICAgIHN1cGVyKCk7XG5cbiAgICBpZiAoaHJlZiA9PSBudWxsKSB7XG4gICAgICBocmVmID0gdGhpcy5fcGxhdGZvcm1Mb2NhdGlvbi5nZXRCYXNlSHJlZkZyb21ET00oKTtcbiAgICB9XG5cbiAgICBpZiAoaHJlZiA9PSBudWxsKSB7XG4gICAgICB0aHJvdyBuZXcgRXJyb3IoXG4gICAgICAgICAgYE5vIGJhc2UgaHJlZiBzZXQuIFBsZWFzZSBwcm92aWRlIGEgdmFsdWUgZm9yIHRoZSBBUFBfQkFTRV9IUkVGIHRva2VuIG9yIGFkZCBhIGJhc2UgZWxlbWVudCB0byB0aGUgZG9jdW1lbnQuYCk7XG4gICAgfVxuXG4gICAgdGhpcy5fYmFzZUhyZWYgPSBocmVmO1xuICB9XG5cbiAgb25Qb3BTdGF0ZShmbjogTG9jYXRpb25DaGFuZ2VMaXN0ZW5lcik6IHZvaWQge1xuICAgIHRoaXMuX3BsYXRmb3JtTG9jYXRpb24ub25Qb3BTdGF0ZShmbik7XG4gICAgdGhpcy5fcGxhdGZvcm1Mb2NhdGlvbi5vbkhhc2hDaGFuZ2UoZm4pO1xuICB9XG5cbiAgZ2V0QmFzZUhyZWYoKTogc3RyaW5nIHsgcmV0dXJuIHRoaXMuX2Jhc2VIcmVmOyB9XG5cbiAgcHJlcGFyZUV4dGVybmFsVXJsKGludGVybmFsOiBzdHJpbmcpOiBzdHJpbmcge1xuICAgIHJldHVybiBMb2NhdGlvbi5qb2luV2l0aFNsYXNoKHRoaXMuX2Jhc2VIcmVmLCBpbnRlcm5hbCk7XG4gIH1cblxuICBwYXRoKGluY2x1ZGVIYXNoOiBib29sZWFuID0gZmFsc2UpOiBzdHJpbmcge1xuICAgIGNvbnN0IHBhdGhuYW1lID0gdGhpcy5fcGxhdGZvcm1Mb2NhdGlvbi5wYXRobmFtZSArXG4gICAgICAgIExvY2F0aW9uLm5vcm1hbGl6ZVF1ZXJ5UGFyYW1zKHRoaXMuX3BsYXRmb3JtTG9jYXRpb24uc2VhcmNoKTtcbiAgICBjb25zdCBoYXNoID0gdGhpcy5fcGxhdGZvcm1Mb2NhdGlvbi5oYXNoO1xuICAgIHJldHVybiBoYXNoICYmIGluY2x1ZGVIYXNoID8gYCR7cGF0aG5hbWV9JHtoYXNofWAgOiBwYXRobmFtZTtcbiAgfVxuXG4gIHB1c2hTdGF0ZShzdGF0ZTogYW55LCB0aXRsZTogc3RyaW5nLCB1cmw6IHN0cmluZywgcXVlcnlQYXJhbXM6IHN0cmluZykge1xuICAgIGNvbnN0IGV4dGVybmFsVXJsID0gdGhpcy5wcmVwYXJlRXh0ZXJuYWxVcmwodXJsICsgTG9jYXRpb24ubm9ybWFsaXplUXVlcnlQYXJhbXMocXVlcnlQYXJhbXMpKTtcbiAgICB0aGlzLl9wbGF0Zm9ybUxvY2F0aW9uLnB1c2hTdGF0ZShzdGF0ZSwgdGl0bGUsIGV4dGVybmFsVXJsKTtcbiAgfVxuXG4gIHJlcGxhY2VTdGF0ZShzdGF0ZTogYW55LCB0aXRsZTogc3RyaW5nLCB1cmw6IHN0cmluZywgcXVlcnlQYXJhbXM6IHN0cmluZykge1xuICAgIGNvbnN0IGV4dGVybmFsVXJsID0gdGhpcy5wcmVwYXJlRXh0ZXJuYWxVcmwodXJsICsgTG9jYXRpb24ubm9ybWFsaXplUXVlcnlQYXJhbXMocXVlcnlQYXJhbXMpKTtcbiAgICB0aGlzLl9wbGF0Zm9ybUxvY2F0aW9uLnJlcGxhY2VTdGF0ZShzdGF0ZSwgdGl0bGUsIGV4dGVybmFsVXJsKTtcbiAgfVxuXG4gIGZvcndhcmQoKTogdm9pZCB7IHRoaXMuX3BsYXRmb3JtTG9jYXRpb24uZm9yd2FyZCgpOyB9XG5cbiAgYmFjaygpOiB2b2lkIHsgdGhpcy5fcGxhdGZvcm1Mb2NhdGlvbi5iYWNrKCk7IH1cbn1cbiJdfQ== | /**
* @param {?} _platformLocation
* @param {?=} href |
http.rs | //! Simple HTTP implementation which supports both async and traditional execution environments
//! with minimal dependencies. This is used as the basis for REST and RPC clients.
use chunked_transfer;
use serde_json;
use std::convert::TryFrom;
#[cfg(not(feature = "tokio"))]
use std::io::Write;
use std::net::ToSocketAddrs;
use std::time::Duration;
#[cfg(feature = "tokio")]
use tokio::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt};
#[cfg(feature = "tokio")]
use tokio::net::TcpStream;
#[cfg(not(feature = "tokio"))]
use std::io::BufRead;
use std::io::Read;
#[cfg(not(feature = "tokio"))]
use std::net::TcpStream;
/// Timeout for operations on TCP streams.
const TCP_STREAM_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum HTTP message header size in bytes.
const MAX_HTTP_MESSAGE_HEADER_SIZE: usize = 8192;
/// Maximum HTTP message body size in bytes. Enough for a hex-encoded block in JSON format and any
/// overhead for HTTP chunked transfer encoding.
const MAX_HTTP_MESSAGE_BODY_SIZE: usize = 2 * 4_000_000 + 32_000;
/// Endpoint for interacting with an HTTP-based API.
#[derive(Debug)]
pub struct HttpEndpoint {
host: String,
port: Option<u16>,
path: String,
}
impl HttpEndpoint {
/// Creates an endpoint for the given host and default HTTP port.
pub fn for_host(host: String) -> Self {
Self {
host,
port: None,
path: String::from("/"),
}
}
/// Specifies a port to use with the endpoint.
pub fn with_port(mut self, port: u16) -> Self {
self.port = Some(port);
self
}
/// Specifies a path to use with the endpoint.
pub fn with_path(mut self, path: String) -> Self {
self.path = path;
self
}
/// Returns the endpoint host.
pub fn host(&self) -> &str {
&self.host
}
/// Returns the endpoint port.
pub fn port(&self) -> u16 {
match self.port {
None => 80,
Some(port) => port,
}
}
/// Returns the endpoint path.
pub fn path(&self) -> &str {
&self.path
}
}
impl<'a> std::net::ToSocketAddrs for &'a HttpEndpoint {
type Iter = <(&'a str, u16) as std::net::ToSocketAddrs>::Iter;
fn to_socket_addrs(&self) -> std::io::Result<Self::Iter> {
(self.host(), self.port()).to_socket_addrs()
}
}
/// Client for making HTTP requests.
pub(crate) struct HttpClient {
stream: TcpStream,
}
impl HttpClient {
/// Opens a connection to an HTTP endpoint.
pub fn connect<E: ToSocketAddrs>(endpoint: E) -> std::io::Result<Self> {
let address = match endpoint.to_socket_addrs()?.next() {
None => {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "could not resolve to any addresses"));
},
Some(address) => address,
};
let stream = std::net::TcpStream::connect_timeout(&address, TCP_STREAM_TIMEOUT)?;
stream.set_read_timeout(Some(TCP_STREAM_TIMEOUT))?;
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT))?;
#[cfg(feature = "tokio")]
let stream = {
stream.set_nonblocking(true)?;
TcpStream::from_std(stream)?
};
Ok(Self { stream })
}
/// Sends a `GET` request for a resource identified by `uri` at the `host`.
///
/// Returns the response body in `F` format.
#[allow(dead_code)]
pub async fn get<F>(&mut self, uri: &str, host: &str) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let request = format!(
"GET {} HTTP/1.1\r\n\
Host: {}\r\n\
Connection: keep-alive\r\n\
\r\n", uri, host);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends a `POST` request for a resource identified by `uri` at the `host` using the given HTTP
/// authentication credentials.
///
/// The request body consists of the provided JSON `content`. Returns the response body in `F`
/// format.
#[allow(dead_code)]
pub async fn post<F>(&mut self, uri: &str, host: &str, auth: &str, content: serde_json::Value) -> std::io::Result<F>
where F: TryFrom<Vec<u8>, Error = std::io::Error> {
let content = content.to_string();
let request = format!(
"POST {} HTTP/1.1\r\n\
Host: {}\r\n\
Authorization: {}\r\n\
Connection: keep-alive\r\n\
Content-Type: application/json\r\n\
Content-Length: {}\r\n\
\r\n\
{}", uri, host, auth, content.len(), content);
let response_body = self.send_request_with_retry(&request).await?;
F::try_from(response_body)
}
/// Sends an HTTP request message and reads the response, returning its body. Attempts to
/// reconnect and retry if the connection has been closed.
async fn send_request_with_retry(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
let endpoint = self.stream.peer_addr().unwrap();
match self.send_request(request).await {
Ok(bytes) => Ok(bytes),
Err(e) => match e.kind() {
std::io::ErrorKind::ConnectionReset |
std::io::ErrorKind::ConnectionAborted |
std::io::ErrorKind::UnexpectedEof => {
// Reconnect if the connection was closed. This may happen if the server's
// keep-alive limits are reached.
*self = Self::connect(endpoint)?;
self.send_request(request).await
},
_ => Err(e),
},
}
}
/// Sends an HTTP request message and reads the response, returning its body.
async fn send_request(&mut self, request: &str) -> std::io::Result<Vec<u8>> {
self.write_request(request).await?;
self.read_response().await
}
/// Writes an HTTP request message.
async fn write_request(&mut self, request: &str) -> std::io::Result<()> {
#[cfg(feature = "tokio")]
{
self.stream.write_all(request.as_bytes()).await?;
self.stream.flush().await
}
#[cfg(not(feature = "tokio"))]
{
self.stream.write_all(request.as_bytes())?;
self.stream.flush()
}
}
/// Reads an HTTP response message.
async fn read_response(&mut self) -> std::io::Result<Vec<u8>> {
#[cfg(feature = "tokio")]
let stream = self.stream.split().0;
#[cfg(not(feature = "tokio"))]
let stream = std::io::Read::by_ref(&mut self.stream);
let limited_stream = stream.take(MAX_HTTP_MESSAGE_HEADER_SIZE as u64);
#[cfg(feature = "tokio")]
let mut reader = tokio::io::BufReader::new(limited_stream);
#[cfg(not(feature = "tokio"))]
let mut reader = std::io::BufReader::new(limited_stream);
macro_rules! read_line { () => { {
let mut line = String::new();
#[cfg(feature = "tokio")]
let bytes_read = reader.read_line(&mut line).await?;
#[cfg(not(feature = "tokio"))]
let bytes_read = reader.read_line(&mut line)?;
match bytes_read {
0 => None,
_ => {
// Remove trailing CRLF
if line.ends_with('\n') { line.pop(); if line.ends_with('\r') { line.pop(); } }
Some(line)
},
}
} } }
// Read and parse status line
let status_line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no status line"))?;
let status = HttpStatus::parse(&status_line)?;
// Read and parse relevant headers
let mut message_length = HttpMessageLength::Empty;
loop {
let line = read_line!()
.ok_or(std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "no headers"))?;
if line.is_empty() { break; }
let header = HttpHeader::parse(&line)?;
if header.has_name("Content-Length") {
let length = header.value.parse()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?;
if let HttpMessageLength::Empty = message_length {
message_length = HttpMessageLength::ContentLength(length);
}
continue;
}
if header.has_name("Transfer-Encoding") {
message_length = HttpMessageLength::TransferEncoding(header.value.into());
continue;
}
}
if !status.is_ok() {
// TODO: Handle 3xx redirection responses.
return Err(std::io::Error::new(std::io::ErrorKind::NotFound, "not found"));
}
// Read message body
let read_limit = MAX_HTTP_MESSAGE_BODY_SIZE - reader.buffer().len();
reader.get_mut().set_limit(read_limit as u64);
match message_length {
HttpMessageLength::Empty => { Ok(Vec::new()) },
HttpMessageLength::ContentLength(length) => {
if length == 0 || length > MAX_HTTP_MESSAGE_BODY_SIZE {
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "out of range"))
} else {
let mut content = vec![0; length];
#[cfg(feature = "tokio")]
reader.read_exact(&mut content[..]).await?;
#[cfg(not(feature = "tokio"))]
reader.read_exact(&mut content[..])?;
Ok(content)
}
},
HttpMessageLength::TransferEncoding(coding) => {
if !coding.eq_ignore_ascii_case("chunked") {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "unsupported transfer coding"))
} else {
let mut content = Vec::new();
#[cfg(feature = "tokio")]
{
// Since chunked_transfer doesn't have an async interface, only use it to
// determine the size of each chunk to read.
//
// TODO: Replace with an async interface when available.
// https://github.com/frewsxcv/rust-chunked-transfer/issues/7
loop {
// Read the chunk header which contains the chunk size.
let mut chunk_header = String::new();
reader.read_line(&mut chunk_header).await?;
if chunk_header == "0\r\n" {
// Read the terminator chunk since the decoder consumes the CRLF
// immediately when this chunk is encountered.
reader.read_line(&mut chunk_header).await?;
}
// Decode the chunk header to obtain the chunk size.
let mut buffer = Vec::new();
let mut decoder = chunked_transfer::Decoder::new(chunk_header.as_bytes());
decoder.read_to_end(&mut buffer)?;
// Read the chunk body.
let chunk_size = match decoder.remaining_chunks_size() {
None => break,
Some(chunk_size) => chunk_size,
};
let chunk_offset = content.len();
content.resize(chunk_offset + chunk_size + "\r\n".len(), 0);
reader.read_exact(&mut content[chunk_offset..]).await?;
content.resize(chunk_offset + chunk_size, 0);
}
Ok(content)
}
#[cfg(not(feature = "tokio"))]
{
let mut decoder = chunked_transfer::Decoder::new(reader);
decoder.read_to_end(&mut content)?;
Ok(content)
}
}
},
}
}
}
/// HTTP response status code as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-6
struct HttpStatus<'a> {
code: &'a str,
}
impl<'a> HttpStatus<'a> {
/// Parses an HTTP status line as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.1.2
fn parse(line: &'a String) -> std::io::Result<HttpStatus<'a>> {
let mut tokens = line.splitn(3, ' ');
let http_version = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no HTTP-Version"))?;
if !http_version.eq_ignore_ascii_case("HTTP/1.1") &&
!http_version.eq_ignore_ascii_case("HTTP/1.0") {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid HTTP-Version"));
}
let code = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Status-Code"))?;
if code.len() != 3 || !code.chars().all(|c| c.is_ascii_digit()) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "invalid Status-Code"));
}
let _reason = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no Reason-Phrase"))?;
Ok(Self { code })
}
/// Returns whether the status is successful (i.e., 2xx status class).
fn is_ok(&self) -> bool {
self.code.starts_with('2')
}
}
/// HTTP response header as defined by [RFC 7231].
///
/// [RFC 7231]: https://tools.ietf.org/html/rfc7231#section-7
struct HttpHeader<'a> {
name: &'a str,
value: &'a str,
}
impl<'a> HttpHeader<'a> {
/// Parses an HTTP header field as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.2
fn parse(line: &'a String) -> std::io::Result<HttpHeader<'a>> {
let mut tokens = line.splitn(2, ':');
let name = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header name"))?;
let value = tokens.next()
.ok_or(std::io::Error::new(std::io::ErrorKind::InvalidData, "no header value"))?
.trim_start();
Ok(Self { name, value })
}
/// Returns whether the header field has the given name.
fn has_name(&self, name: &str) -> bool {
self.name.eq_ignore_ascii_case(name)
}
}
/// HTTP message body length as defined by [RFC 7230].
///
/// [RFC 7230]: https://tools.ietf.org/html/rfc7230#section-3.3.3
enum HttpMessageLength {
Empty,
ContentLength(usize),
TransferEncoding(String),
}
/// An HTTP response body in binary format.
pub struct BinaryResponse(pub Vec<u8>);
/// An HTTP response body in JSON format.
pub struct JsonResponse(pub serde_json::Value);
/// Interprets bytes from an HTTP response body as binary data.
impl TryFrom<Vec<u8>> for BinaryResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(BinaryResponse(bytes))
}
}
/// Interprets bytes from an HTTP response body as a JSON value.
impl TryFrom<Vec<u8>> for JsonResponse {
type Error = std::io::Error;
fn try_from(bytes: Vec<u8>) -> std::io::Result<Self> {
Ok(JsonResponse(serde_json::from_slice(&bytes)?))
}
}
#[cfg(test)]
mod endpoint_tests {
use super::HttpEndpoint;
#[test]
fn with_default_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 80);
}
#[test]
fn with_custom_port() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_port(8080);
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.port(), 8080);
}
#[test]
fn with_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into()).with_path("/path".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/path");
}
#[test]
fn without_uri_path() {
let endpoint = HttpEndpoint::for_host("foo.com".into());
assert_eq!(endpoint.host(), "foo.com");
assert_eq!(endpoint.path(), "/");
}
#[test]
fn convert_to_socket_addrs() |
}
#[cfg(test)]
pub(crate) mod client_tests {
use super::*;
use std::io::BufRead;
use std::io::Write;
/// Server for handling HTTP client requests with a stock response.
pub struct HttpServer {
address: std::net::SocketAddr,
handler: std::thread::JoinHandle<()>,
shutdown: std::sync::Arc<std::sync::atomic::AtomicBool>,
}
/// Body of HTTP response messages.
pub enum MessageBody<T: ToString> {
Empty,
Content(T),
ChunkedContent(T),
}
impl HttpServer {
pub fn responding_with_ok<T: ToString>(body: MessageBody<T>) -> Self {
let response = match body {
MessageBody::Empty => "HTTP/1.1 200 OK\r\n\r\n".to_string(),
MessageBody::Content(body) => {
let body = body.to_string();
format!(
"HTTP/1.1 200 OK\r\n\
Content-Length: {}\r\n\
\r\n\
{}", body.len(), body)
},
MessageBody::ChunkedContent(body) => {
let mut chuncked_body = Vec::new();
{
use chunked_transfer::Encoder;
let mut encoder = Encoder::with_chunks_size(&mut chuncked_body, 8);
encoder.write_all(body.to_string().as_bytes()).unwrap();
}
format!(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
{}", String::from_utf8(chuncked_body).unwrap())
},
};
HttpServer::responding_with(response)
}
pub fn responding_with_not_found() -> Self {
let response = "HTTP/1.1 404 Not Found\r\n\r\n".to_string();
HttpServer::responding_with(response)
}
fn responding_with(response: String) -> Self {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let shutdown = std::sync::Arc::new(std::sync::atomic::AtomicBool::new(false));
let shutdown_signaled = std::sync::Arc::clone(&shutdown);
let handler = std::thread::spawn(move || {
for stream in listener.incoming() {
let mut stream = stream.unwrap();
stream.set_write_timeout(Some(TCP_STREAM_TIMEOUT)).unwrap();
let lines_read = std::io::BufReader::new(&stream)
.lines()
.take_while(|line| !line.as_ref().unwrap().is_empty())
.count();
if lines_read == 0 { continue; }
for chunk in response.as_bytes().chunks(16) {
if shutdown_signaled.load(std::sync::atomic::Ordering::SeqCst) {
return;
} else {
if let Err(_) = stream.write(chunk) { break; }
if let Err(_) = stream.flush() { break; }
}
}
}
});
Self { address, handler, shutdown }
}
fn shutdown(self) {
self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst);
self.handler.join().unwrap();
}
pub fn endpoint(&self) -> HttpEndpoint {
HttpEndpoint::for_host(self.address.ip().to_string()).with_port(self.address.port())
}
}
#[test]
fn connect_to_unresolvable_host() {
match HttpClient::connect(("example.invalid", 80)) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::Other),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_no_socket_address() {
match HttpClient::connect(&vec![][..]) {
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput),
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn connect_with_unknown_server() {
match HttpClient::connect(("::", 80)) {
#[cfg(target_os = "windows")]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::AddrNotAvailable),
#[cfg(not(target_os = "windows"))]
Err(e) => assert_eq!(e.kind(), std::io::ErrorKind::ConnectionRefused),
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn connect_with_valid_endpoint() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
match HttpClient::connect(&server.endpoint()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(_) => {},
}
}
#[tokio::test]
async fn read_empty_message() {
let server = HttpServer::responding_with("".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no status line");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_incomplete_message() {
let server = HttpServer::responding_with("HTTP/1.1 200 OK".to_string());
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_headers() {
let response = format!(
"HTTP/1.1 302 Found\r\n\
Location: {}\r\n\
\r\n", "Z".repeat(MAX_HTTP_MESSAGE_HEADER_SIZE));
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::UnexpectedEof);
assert_eq!(e.get_ref().unwrap().to_string(), "no headers");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_too_large_message_body() {
let body = "Z".repeat(MAX_HTTP_MESSAGE_BODY_SIZE + 1);
let server = HttpServer::responding_with_ok::<String>(MessageBody::Content(body));
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
assert_eq!(e.get_ref().unwrap().to_string(), "out of range");
},
Ok(_) => panic!("Expected error"),
}
server.shutdown();
}
#[tokio::test]
async fn read_message_with_unsupported_transfer_coding() {
let response = String::from(
"HTTP/1.1 200 OK\r\n\
Transfer-Encoding: gzip\r\n\
\r\n\
foobar");
let server = HttpServer::responding_with(response);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => {
assert_eq!(e.kind(), std::io::ErrorKind::InvalidInput);
assert_eq!(e.get_ref().unwrap().to_string(), "unsupported transfer coding");
},
Ok(_) => panic!("Expected error"),
}
}
#[tokio::test]
async fn read_empty_message_body() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[tokio::test]
async fn read_message_body_with_length() {
let body = "foo bar baz qux".repeat(32);
let content = MessageBody::Content(body.clone());
let server = HttpServer::responding_with_ok::<String>(content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn read_chunked_message_body() {
let body = "foo bar baz qux".repeat(32);
let chunked_content = MessageBody::ChunkedContent(body.clone());
let server = HttpServer::responding_with_ok::<String>(chunked_content);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, body.as_bytes()),
}
}
#[tokio::test]
async fn reconnect_closed_connection() {
let server = HttpServer::responding_with_ok::<String>(MessageBody::Empty);
let mut client = HttpClient::connect(&server.endpoint()).unwrap();
assert!(client.get::<BinaryResponse>("/foo", "foo.com").await.is_ok());
match client.get::<BinaryResponse>("/foo", "foo.com").await {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(bytes) => assert_eq!(bytes.0, Vec::<u8>::new()),
}
}
#[test]
fn from_bytes_into_binary_response() {
let bytes = b"foo";
match BinaryResponse::try_from(bytes.to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(&response.0, bytes),
}
}
#[test]
fn from_invalid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes()[..5].to_vec()) {
Err(_) => {},
Ok(_) => panic!("Expected error"),
}
}
#[test]
fn from_valid_bytes_into_json_response() {
let json = serde_json::json!({ "result": 42 });
match JsonResponse::try_from(json.to_string().as_bytes().to_vec()) {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(response) => assert_eq!(response.0, json),
}
}
}
| {
let endpoint = HttpEndpoint::for_host("foo.com".into());
let host = endpoint.host();
let port = endpoint.port();
use std::net::ToSocketAddrs;
match (&endpoint).to_socket_addrs() {
Err(e) => panic!("Unexpected error: {:?}", e),
Ok(mut socket_addrs) => {
match socket_addrs.next() {
None => panic!("Expected socket address"),
Some(addr) => {
assert_eq!(addr, (host, port).to_socket_addrs().unwrap().next().unwrap());
assert!(socket_addrs.next().is_none());
}
}
}
}
} |
BaseStorage.py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Storage base class that is mostly a mistake
The base class here is tightly coupled with its subclasses and
its use is not recommended. It's still here for historical reasons.
"""
from __future__ import print_function
import time
import logging
from struct import pack as _structpack, unpack as _structunpack
import zope.interface
from persistent.TimeStamp import TimeStamp
import ZODB.interfaces
from . import POSException, utils
from .Connection import TransactionMetaData
from .utils import z64, oid_repr, byte_ord, byte_chr, load_current
from .UndoLogCompatible import UndoLogCompatible
from ._compat import py2_hasattr
log = logging.getLogger("ZODB.BaseStorage")
class BaseStorage(UndoLogCompatible):
"""Base class that supports storage implementations.
XXX Base classes like this are an attractive nuisance. They often
introduce more complexity than they save. While important logic
is implemented here, we should consider exposing it as utility
functions or as objects that can be used through composition.
A subclass must define the following methods:
load()
store()
close()
cleanup()
lastTransaction()
It must override these hooks:
_begin()
_vote()
_abort()
_finish()
_clear_temp()
If it stores multiple revisions, it should implement
loadSerial()
loadBefore()
Each storage will have two locks that are accessed via lock
acquire and release methods bound to the instance. (Yuck.)
_lock_acquire / _lock_release (reentrant)
_commit_lock_acquire / _commit_lock_release
The commit lock is acquired in tpc_begin() and released in
tpc_abort() and tpc_finish(). It is never acquired with the other
lock held.
The other lock appears to protect _oid and _transaction and
perhaps other things. It is always held when load() is called, so
presumably the load() implementation should also acquire the lock.
"""
_transaction = None # Transaction that is being committed
_tstatus = ' ' # Transaction status, used for copying data
_is_read_only = False
def __init__(self, name, base=None):
self.__name__ = name
log.debug("create storage %s", self.__name__)
# Allocate locks:
self._lock = utils.RLock()
self._commit_lock = utils.Lock()
# Needed by external storages that use this dumb api :(
self._lock_acquire = self._lock.acquire
self._lock_release = self._lock.release
self._commit_lock_acquire = self._commit_lock.acquire
self._commit_lock_release = self._commit_lock.release
t = time.time()
t = self._ts = TimeStamp(*(time.gmtime(t)[:5] + (t % 60,)))
self._tid = t.raw()
# ._oid is the highest oid in use (0 is always in use -- it's
# a reserved oid for the root object). Our new_oid() method
# increments it by 1, and returns the result. It's really a
# 64-bit integer stored as an 8-byte big-endian string.
oid = getattr(base, '_oid', None)
if oid is None:
self._oid = z64
else:
self._oid = oid
# In case that conflicts are resolved during store,
# this collects oids to be returned by tpc_vote.
self._resolved = []
def sortKey(self):
"""Return a string that can be used to sort storage instances.
The key must uniquely identify a storage and must be the same
across multiple instantiations of the same storage.
"""
# name may not be sufficient, e.g. ZEO has a user-definable name.
return self.__name__
def getName(self):
return self.__name__
def getSize(self):
|
def history(self, oid, version, length=1, filter=None):
return ()
def new_oid(self):
if self._is_read_only:
raise POSException.ReadOnlyError()
with self._lock:
last = self._oid
d = byte_ord(last[-1])
if d < 255: # fast path for the usual case
last = last[:-1] + byte_chr(d+1)
else: # there's a carry out of the last byte
last_as_long, = _structunpack(">Q", last)
last = _structpack(">Q", last_as_long + 1)
self._oid = last
return last
# Update the maximum oid in use, under protection of a lock. The
# maximum-in-use attribute is changed only if possible_new_max_oid is
# larger than its current value.
def set_max_oid(self, possible_new_max_oid):
with self._lock:
if possible_new_max_oid > self._oid:
self._oid = possible_new_max_oid
def registerDB(self, db):
pass # we don't care
def isReadOnly(self):
return self._is_read_only
def tpc_abort(self, transaction):
with self._lock:
if transaction is not self._transaction:
return
try:
self._abort()
self._clear_temp()
self._transaction = None
finally:
self._commit_lock_release()
def _abort(self):
"""Subclasses should redefine this to supply abort actions"""
pass
def tpc_begin(self, transaction, tid=None, status=' '):
if self._is_read_only:
raise POSException.ReadOnlyError()
with self._lock:
if self._transaction is transaction:
raise POSException.StorageTransactionError(
"Duplicate tpc_begin calls for same transaction")
self._commit_lock.acquire()
with self._lock:
self._transaction = transaction
self._clear_temp()
user = transaction.user
desc = transaction.description
ext = transaction.extension_bytes
self._ude = user, desc, ext
if tid is None:
now = time.time()
t = TimeStamp(*(time.gmtime(now)[:5] + (now % 60,)))
self._ts = t = t.laterThan(self._ts)
self._tid = t.raw()
else:
self._ts = TimeStamp(tid)
self._tid = tid
del self._resolved[:]
self._tstatus = status
self._begin(self._tid, user, desc, ext)
def tpc_transaction(self):
return self._transaction
def _begin(self, tid, u, d, e):
"""Subclasses should redefine this to supply transaction start actions.
"""
pass
def tpc_vote(self, transaction):
with self._lock:
if transaction is not self._transaction:
raise POSException.StorageTransactionError(
"tpc_vote called with wrong transaction")
return self._vote()
def _vote(self):
"""Subclasses should redefine this to supply transaction vote actions.
"""
return self._resolved
def tpc_finish(self, transaction, f=None):
# It's important that the storage calls the function we pass
# while it still has its lock. We don't want another thread
# to be able to read any updated data until we've had a chance
# to send an invalidation message to all of the other
# connections!
with self._lock:
if transaction is not self._transaction:
raise POSException.StorageTransactionError(
"tpc_finish called with wrong transaction")
try:
if f is not None:
f(self._tid)
u, d, e = self._ude
self._finish(self._tid, u, d, e)
self._clear_temp()
finally:
self._ude = None
self._transaction = None
self._commit_lock.release()
return self._tid
def _finish(self, tid, u, d, e):
"""Subclasses should redefine this to supply transaction finish actions
"""
pass
def lastTransaction(self):
with self._lock:
return self._ltid
def getTid(self, oid):
with self._lock:
return load_current(self, oid)[1]
def loadSerial(self, oid, serial):
raise POSException.Unsupported(
"Retrieval of historical revisions is not supported")
def loadBefore(self, oid, tid):
"""Return most recent revision of oid before tid committed."""
return None
def copyTransactionsFrom(self, other, verbose=0):
"""Copy transactions from another storage.
This is typically used for converting data from one storage to
another. `other` must have an .iterator() method.
"""
copy(other, self, verbose)
def copy(source, dest, verbose=0):
"""Copy transactions from a source to a destination storage
This is typically used for converting data from one storage to
another. `source` must have an .iterator() method.
"""
_ts = None
ok = 1
preindex = {}
preget = preindex.get
# restore() is a new storage API method which has an identical
# signature to store() except that it does not return anything.
# Semantically, restore() is also identical to store() except that it
# doesn't do the ConflictError or VersionLockError consistency
# checks. The reason to use restore() over store() in this method is
# that store() cannot be used to copy transactions spanning a version
# commit or abort, or over transactional undos.
#
# We'll use restore() if it's available, otherwise we'll fall back to
# using store(). However, if we use store, then
# copyTransactionsFrom() may fail with VersionLockError or
# ConflictError.
restoring = py2_hasattr(dest, 'restore')
fiter = source.iterator()
for transaction in fiter:
tid = transaction.tid
if _ts is None:
_ts = TimeStamp(tid)
else:
t = TimeStamp(tid)
if t <= _ts:
if ok:
print(('Time stamps out of order %s, %s' % (_ts, t)))
ok = 0
_ts = t.laterThan(_ts)
tid = _ts.raw()
else:
_ts = t
if not ok:
print(('Time stamps back in order %s' % (t)))
ok = 1
if verbose:
print(_ts)
dest.tpc_begin(transaction, tid, transaction.status)
for r in transaction:
oid = r.oid
if verbose:
print(oid_repr(oid), r.version, len(r.data))
if restoring:
dest.restore(oid, r.tid, r.data, r.version,
r.data_txn, transaction)
else:
pre = preget(oid, None)
dest.store(oid, pre, r.data, r.version, transaction)
preindex[oid] = tid
dest.tpc_vote(transaction)
dest.tpc_finish(transaction)
# defined outside of BaseStorage to facilitate independent reuse.
# just depends on _transaction attr and getTid method.
def checkCurrentSerialInTransaction(self, oid, serial, transaction):
if transaction is not self._transaction:
raise POSException.StorageTransactionError(self, transaction)
committed_tid = self.getTid(oid)
if committed_tid != serial:
raise POSException.ReadConflictError(
oid=oid, serials=(committed_tid, serial))
BaseStorage.checkCurrentSerialInTransaction = checkCurrentSerialInTransaction
@zope.interface.implementer(ZODB.interfaces.IStorageTransactionInformation)
class TransactionRecord(TransactionMetaData):
"""Abstract base class for iterator protocol"""
def __init__(self, tid, status, user, description, extension):
self.tid = tid
self.status = status
TransactionMetaData.__init__(self, user, description, extension)
@zope.interface.implementer(ZODB.interfaces.IStorageRecordInformation)
class DataRecord(object):
"""Abstract base class for iterator protocol"""
version = ''
def __init__(self, oid, tid, data, prev):
self.oid = oid
self.tid = tid
self.data = data
self.data_txn = prev
| return len(self)*300 # WAG! |
crawl.go | // Copyright 2013 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"regexp"
"strings"
"time"
"cloud.google.com/go/pubsub"
"github.com/golang/gddo/doc"
"github.com/golang/gddo/gosrc"
)
var (
testdataPat = regexp.MustCompile(`/testdata(?:/|$)`)
)
// crawlNote is a message sent to Pub/Sub when a crawl occurs.
// It is encoded as JSON, so changes should match its
// compatibility requirements.
type crawlNote struct {
ImportPath string
}
func (s *server) publishCrawl(ctx context.Context, importPath string) {
if s.crawlTopic == nil {
return
}
note := &crawlNote{ImportPath: importPath}
b, err := json.Marshal(note)
if err != nil {
log.Printf("Encoding crawlNote: %v", err)
return
}
s.crawlTopic.Publish(ctx, &pubsub.Message{Data: b})
}
// crawlDoc fetches the package documentation from the VCS and updates the database.
func (s *server) crawlDoc(ctx context.Context, source string, importPath string, pdoc *doc.Package, hasSubdirs bool, nextCrawl time.Time) (*doc.Package, error) {
message := []interface{}{source}
defer func() {
message = append(message, importPath)
log.Println(message...)
}()
if !nextCrawl.IsZero() {
d := time.Since(nextCrawl) / time.Hour
if d > 0 {
message = append(message, "late:", int64(d))
}
}
etag := ""
if pdoc != nil {
etag = pdoc.Etag
message = append(message, "etag:", etag)
}
start := time.Now()
var err error
if strings.HasPrefix(importPath, "code.google.com/p/go.") {
// Old import path for Go sub-repository.
pdoc = nil
err = gosrc.NotFoundError{Message: "old Go sub-repo", Redirect: "golang.org/x/" + importPath[len("code.google.com/p/go."):]}
} else if blocked, e := s.db.IsBlocked(importPath); blocked && e == nil {
pdoc = nil
err = gosrc.NotFoundError{Message: "blocked."}
} else if testdataPat.MatchString(importPath) {
pdoc = nil
err = gosrc.NotFoundError{Message: "testdata."}
} else {
var pdocNew *doc.Package
pdocNew, err = doc.Get(ctx, s.httpClient, importPath, etag)
message = append(message, "fetch:", int64(time.Since(start)/time.Millisecond))
if err == nil && pdocNew.Name == "" && !hasSubdirs {
for _, e := range pdocNew.Errors {
message = append(message, "err:", e)
}
pdoc = nil
err = gosrc.NotFoundError{Message: "no Go files or subdirs"}
} else if _, ok := err.(gosrc.NotModifiedError); !ok {
pdoc = pdocNew
}
}
maxAge := s.v.GetDuration(ConfigMaxAge)
nextCrawl = start.Add(maxAge)
switch {
case strings.HasPrefix(importPath, "github.com/") || (pdoc != nil && len(pdoc.Errors) > 0):
nextCrawl = start.Add(maxAge * 7)
case strings.HasPrefix(importPath, "gist.github.com/"):
// Don't spend time on gists. It's silly thing to do.
nextCrawl = start.Add(maxAge * 30)
}
if err == nil {
message = append(message, "put:", pdoc.Etag)
if err := s.put(ctx, pdoc, nextCrawl); err != nil {
log.Println(err)
}
s.publishCrawl(ctx, importPath)
return pdoc, nil
} else if e, ok := err.(gosrc.NotModifiedError); ok {
if pdoc.Status == gosrc.Active && !s.isActivePkg(importPath, e.Status) {
if e.Status == gosrc.NoRecentCommits {
e.Status = gosrc.Inactive
} | pdoc.Status = e.Status
if err := s.db.Put(ctx, pdoc, nextCrawl, false); err != nil {
log.Printf("ERROR db.Put(%q): %v", importPath, err)
}
} else {
// Touch the package without updating and move on to next one.
message = append(message, "touch")
if err := s.db.SetNextCrawl(importPath, nextCrawl); err != nil {
log.Printf("ERROR db.SetNextCrawl(%q): %v", importPath, err)
}
}
s.publishCrawl(ctx, importPath)
return pdoc, nil
} else if e, ok := err.(gosrc.NotFoundError); ok {
message = append(message, "notfound:", e)
if err := s.db.Delete(ctx, importPath); err != nil {
log.Printf("ERROR db.Delete(%q): %v", importPath, err)
}
return nil, e
} else {
message = append(message, "ERROR:", err)
return nil, err
}
}
func (s *server) put(ctx context.Context, pdoc *doc.Package, nextCrawl time.Time) error {
if pdoc.Status == gosrc.NoRecentCommits &&
s.isActivePkg(pdoc.ImportPath, gosrc.NoRecentCommits) {
pdoc.Status = gosrc.Active
}
if err := s.db.Put(ctx, pdoc, nextCrawl, false); err != nil {
return fmt.Errorf("ERROR db.Put(%q): %v", pdoc.ImportPath, err)
}
return nil
}
// isActivePkg reports whether a package is considered active,
// either because its directory is active or because it is imported by another package.
func (s *server) isActivePkg(pkg string, status gosrc.DirectoryStatus) bool {
switch status {
case gosrc.Active:
return true
case gosrc.NoRecentCommits:
// It should be inactive only if it has no imports as well.
n, err := s.db.ImporterCount(pkg)
if err != nil {
log.Printf("ERROR db.ImporterCount(%q): %v", pkg, err)
}
return n > 0
}
return false
} | message = append(message, "archive", e) |
data_cleaner.py | import pandas as pd
import re
data = pd.read_csv("BIPMetadata_current.csv")
def format_date(date_column):
# formatting the date data to display as yyyy-mm-dd
|
def truncate(column, length):
# truncates given column to given length and returns new column
new_d = []
for d in column:
if (len(d) > length):
d = d[0:length]
new_d.append(d)
return new_d
# source: https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(column):
new_desc = []
for d in column:
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
cleantext = re.sub(cleanr, '', d)
new_desc.append(' '.join(cleantext.split()))
return new_desc
def remove_spaces(column):
new_sql = []
for d in column:
new_sql.append(' '.join(d.split()))
return new_sql
new_created = format_date(data["created"])
print("UPDATAED")
new_updated = format_date(data["updated"])
new_query = remove_spaces(data["sql_query"])
new_query = truncate(new_query, 5000)
new_description = truncate(data["description"], 500)
new_description = cleanhtml(new_description)
data["created"] = new_created
data["updated"] = new_updated
data["sql_query"] = new_query
data["description"] = new_description
data.to_csv("BIPMetadata_cleaned.csv", index=False) | new_dates = []
for date in date_column:
month = date[0:date.find('/')]
date = date[date.find('/')+1:]
day = date[0:date.find('/')]
year = date[date.find('/')+1:]
if (len(month) == 1):
month = "0" + month
if (len(day) == 1):
day = "0" + day
if (len(year) == 2):
year = "20" + year
newDate = year + "-" + month + "-" + day
print(newDate)
new_dates.append(newDate)
return new_dates |
main.rs | use std::io::Write;
fn main() {
let filename = match std::env::args().nth(1) {
Some(arg) => arg,
None => {
eprintln!("Missing argument");
std::process::exit(1)
}
};
let file = elf::File::open_path(filename).unwrap();
let data = file
.get_section(".interp")
.expect("Failed to look up .interp section") | // skip the trailing \0
std::io::stdout()
.write_all(&data[..data.len() - 1])
.unwrap();
println!();
} | .data
.as_slice();
|
test_ls_all.py | from theplease.rules.ls_all import match, get_new_command
from theplease.types import Command
def | ():
assert match(Command('ls', ''))
assert not match(Command('ls', 'file.py\n'))
def test_get_new_command():
assert get_new_command(Command('ls empty_dir', '')) == 'ls -A empty_dir'
assert get_new_command(Command('ls', '')) == 'ls -A'
| test_match |
mb4_64b_word3.rs | #[doc = "Reader of register MB4_64B_WORD3"]
pub type R = crate::R<u32, super::MB4_64B_WORD3>;
#[doc = "Writer for register MB4_64B_WORD3"]
pub type W = crate::W<u32, super::MB4_64B_WORD3>;
#[doc = "Register MB4_64B_WORD3 `reset()`'s with value 0"]
impl crate::ResetValue for super::MB4_64B_WORD3 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `DATA_BYTE_15`"]
pub type DATA_BYTE_15_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DATA_BYTE_15`"]
pub struct DATA_BYTE_15_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_15_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff);
self.w
}
}
#[doc = "Reader of field `DATA_BYTE_14`"]
pub type DATA_BYTE_14_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DATA_BYTE_14`"] | #[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8);
self.w
}
}
#[doc = "Reader of field `DATA_BYTE_13`"]
pub type DATA_BYTE_13_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DATA_BYTE_13`"]
pub struct DATA_BYTE_13_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_13_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 16)) | (((value as u32) & 0xff) << 16);
self.w
}
}
#[doc = "Reader of field `DATA_BYTE_12`"]
pub type DATA_BYTE_12_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DATA_BYTE_12`"]
pub struct DATA_BYTE_12_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_12_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 24)) | (((value as u32) & 0xff) << 24);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - Data byte 0 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_15(&self) -> DATA_BYTE_15_R {
DATA_BYTE_15_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:15 - Data byte 1 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_14(&self) -> DATA_BYTE_14_R {
DATA_BYTE_14_R::new(((self.bits >> 8) & 0xff) as u8)
}
#[doc = "Bits 16:23 - Data byte 2 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_13(&self) -> DATA_BYTE_13_R {
DATA_BYTE_13_R::new(((self.bits >> 16) & 0xff) as u8)
}
#[doc = "Bits 24:31 - Data byte 3 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_12(&self) -> DATA_BYTE_12_R {
DATA_BYTE_12_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - Data byte 0 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_15(&mut self) -> DATA_BYTE_15_W {
DATA_BYTE_15_W { w: self }
}
#[doc = "Bits 8:15 - Data byte 1 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_14(&mut self) -> DATA_BYTE_14_W {
DATA_BYTE_14_W { w: self }
}
#[doc = "Bits 16:23 - Data byte 2 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_13(&mut self) -> DATA_BYTE_13_W {
DATA_BYTE_13_W { w: self }
}
#[doc = "Bits 24:31 - Data byte 3 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_12(&mut self) -> DATA_BYTE_12_W {
DATA_BYTE_12_W { w: self }
}
} | pub struct DATA_BYTE_14_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_14_W<'a> { |
core.py | # Copyright 2004-2019 Tom Rothamel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains code for initializing and managing the display
# window.
from __future__ import print_function
import renpy.display
import renpy.audio
import renpy.text
import renpy.test
import pygame_sdl2 as pygame
import sys
import os
import time
import cStringIO
import threading
import copy
import gc
import inspect
import_time = time.time()
try:
import android # @UnresolvedImport
except:
android = None
TIMEEVENT = pygame.event.register("TIMEEVENT")
PERIODIC = pygame.event.register("PERIODIC")
REDRAW = pygame.event.register("REDRAW")
EVENTNAME = pygame.event.register("EVENTNAME")
# All events except for TIMEEVENT and REDRAW
ALL_EVENTS = set(pygame.event.get_standard_events()) # @UndefinedVariable
ALL_EVENTS.add(PERIODIC)
ALL_EVENTS.add(EVENTNAME)
enabled_events = {
pygame.QUIT,
pygame.APP_TERMINATING,
pygame.APP_LOWMEMORY,
pygame.APP_WILLENTERBACKGROUND,
pygame.APP_DIDENTERBACKGROUND,
pygame.APP_WILLENTERFOREGROUND,
pygame.APP_DIDENTERFOREGROUND,
pygame.WINDOWEVENT,
pygame.SYSWMEVENT,
pygame.KEYDOWN,
pygame.KEYUP,
pygame.TEXTEDITING,
pygame.TEXTINPUT,
pygame.MOUSEMOTION,
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEWHEEL,
pygame.JOYAXISMOTION,
pygame.JOYHATMOTION,
pygame.JOYBALLMOTION,
pygame.JOYBUTTONDOWN,
pygame.JOYBUTTONUP,
pygame.JOYDEVICEADDED,
pygame.JOYDEVICEREMOVED,
pygame.CONTROLLERAXISMOTION,
pygame.CONTROLLERBUTTONDOWN,
pygame.CONTROLLERBUTTONUP,
pygame.CONTROLLERDEVICEADDED,
pygame.CONTROLLERDEVICEREMOVED,
pygame.RENDER_TARGETS_RESET,
TIMEEVENT,
PERIODIC,
REDRAW,
EVENTNAME,
}
# The number of msec between periodic events.
PERIODIC_INTERVAL = 50
# Time management.
time_base = 0.0
time_mult = 1.0
def init_time():
warp = os.environ.get("RENPY_TIMEWARP", "1.0")
global time_base
global time_mult
time_base = time.time()
time_mult = float(warp)
def get_time():
t = time.time()
return time_base + (t - time_base) * time_mult
def displayable_by_tag(layer, tag):
"""
Get the displayable on the given layer with the given tag.
"""
return renpy.game.context().scene_lists.get_displayable_by_tag(layer, tag)
class IgnoreEvent(Exception):
"""
Exception that is raised when we want to ignore an event, but
also don't want to return anything.
"""
pass
class EndInteraction(Exception):
"""
Exception that can be raised (for example, during the render method of
a displayable) to end the current interaction immediately.
"""
def __init__(self, value):
self.value = value
class absolute(float):
"""
This represents an absolute float coordinate.
"""
__slots__ = [ ]
def place(width, height, sw, sh, placement):
"""
Performs the Ren'Py placement algorithm.
`width`, `height`
The width and height of the area the image will be
placed in.
`size`
The size of the image to be placed.
`placement`
The tuple returned by Displayable.get_placement().
"""
xpos, ypos, xanchor, yanchor, xoffset, yoffset, _subpixel = placement
if xpos is None:
xpos = 0
if ypos is None:
ypos = 0
if xanchor is None:
xanchor = 0
if yanchor is None:
yanchor = 0
if xoffset is None:
xoffset = 0
if yoffset is None:
yoffset = 0
# We need to use type, since isinstance(absolute(0), float).
if xpos.__class__ is float:
xpos *= width
if xanchor.__class__ is float:
xanchor *= sw
x = xpos + xoffset - xanchor
if ypos.__class__ is float:
ypos *= height
if yanchor.__class__ is float:
yanchor *= sh
y = ypos + yoffset - yanchor
return x, y
class DisplayableArguments(renpy.object.Object):
"""
Represents a set of arguments that can be passed to a duplicated
displayable.
"""
# The name of the displayable without any arguments.
name = ()
# Arguments supplied.
args = ()
# The style prefix in play. This is used by DynamicImage to figure
# out the prefix list to apply.
prefix = None
# True if lint is in use.
lint = False
def copy(self, **kwargs):
"""
Returns a copy of this object with the various fields set to the
values they were given in kwargs.
"""
rv = DisplayableArguments()
rv.__dict__.update(self.__dict__)
rv.__dict__.update(kwargs)
return rv
def extraneous(self):
if renpy.config.developer and renpy.config.report_extraneous_attributes:
raise Exception("Image '{}' does not accept attributes '{}'.".format(
" ".join(self.name),
" ".join(self.args),
))
default_style = renpy.style.Style("default")
class Displayable(renpy.object.Object):
|
class SceneListEntry(renpy.object.Object):
"""
Represents a scene list entry. Since this was replacing a tuple,
it should be treated as immutable after its initial creation.
"""
def __init__(self, tag, zorder, show_time, animation_time, displayable, name):
self.tag = tag
self.zorder = zorder
self.show_time = show_time
self.animation_time = animation_time
self.displayable = displayable
self.name = name
def __iter__(self):
return iter((self.tag, self.zorder, self.show_time, self.animation_time, self.displayable))
def __getitem__(self, index):
return (self.tag, self.zorder, self.show_time, self.animation_time, self.displayable)[index]
def __repr__(self):
return "<SLE: %r %r %r>" % (self.tag, self.name, self.displayable)
def copy(self):
return SceneListEntry(
self.tag,
self.zorder,
self.show_time,
self.animation_time,
self.displayable,
self.name)
def update_time(self, time):
rv = self
if self.show_time is None or self.animation_time is None:
rv = self.copy()
rv.show_time = rv.show_time or time
rv.animation_time = rv.animation_time or time
return rv
class SceneLists(renpy.object.Object):
"""
This stores the current scene lists that are being used to display
things to the user.
"""
__version__ = 7
def after_setstate(self):
for i in renpy.config.layers + renpy.config.top_layers:
if i not in self.layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
def after_upgrade(self, version):
if version < 1:
self.at_list = { }
self.layer_at_list = { }
for i in renpy.config.layers + renpy.config.top_layers:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
if version < 3:
self.shown_window = False
if version < 4:
for k in self.layers:
self.layers[k] = [ SceneListEntry(*(i + (None,)) ) for i in self.layers[k] ]
self.additional_transient = [ ]
if version < 5:
self.drag_group = None
if version < 6:
self.shown = self.image_predict_info
if version < 7:
self.layer_transform = { }
def __init__(self, oldsl, shown):
super(SceneLists, self).__init__()
# Has a window been shown as part of these scene lists?
self.shown_window = False
# A map from layer name -> list(SceneListEntry)
self.layers = { }
# A map from layer name -> tag -> at_list associated with that tag.
self.at_list = { }
# A map from layer to (star time, at_list), where the at list has
# been applied to the layer as a whole.
self.layer_at_list = { }
# The current shown images,
self.shown = shown
# A list of (layer, tag) pairs that are considered to be
# transient.
self.additional_transient = [ ]
# Either None, or a DragGroup that's used as the default for
# drags with names.
self.drag_group = None
# A map from a layer to the transform that applies to that
# layer.
self.layer_transform = { }
if oldsl:
for i in renpy.config.layers + renpy.config.top_layers:
try:
self.layers[i] = oldsl.layers[i][:]
except KeyError:
self.layers[i] = [ ]
if i in oldsl.at_list:
self.at_list[i] = oldsl.at_list[i].copy()
self.layer_at_list[i] = oldsl.layer_at_list[i]
else:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
for i in renpy.config.overlay_layers:
self.clear(i)
self.replace_transient(prefix=None)
self.focused = None
self.drag_group = oldsl.drag_group
self.layer_transform.update(oldsl.layer_transform)
else:
for i in renpy.config.layers + renpy.config.top_layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
self.music = None
self.focused = None
def replace_transient(self, prefix="hide"):
"""
Replaces the contents of the transient display list with
a copy of the master display list. This is used after a
scene is displayed to get rid of transitions and interface
elements.
`prefix`
The prefix/event to use. Set this to None to prevent the hide
from happening.
"""
for i in renpy.config.transient_layers:
self.clear(i, True)
for layer, tag in self.additional_transient:
self.remove(layer, tag, prefix=prefix)
self.additional_transient = [ ]
def transient_is_empty(self):
"""
This returns True if all transient layers are empty. This is
used by the rollback code, as we can't start a new rollback
if there is something in a transient layer (as things in the
transient layer may contain objects that cannot be pickled,
like lambdas.)
"""
for i in renpy.config.transient_layers:
if self.layers[i]:
return False
return True
def transform_state(self, old_thing, new_thing, execution=False):
"""
If the old thing is a transform, then move the state of that transform
to the new thing.
"""
if old_thing is None:
return new_thing
# Don't bother wrapping screens, as they can't be transformed.
if isinstance(new_thing, renpy.display.screen.ScreenDisplayable):
return new_thing
if renpy.config.take_state_from_target:
old_transform = old_thing._target()
else:
old_transform = old_thing
if not isinstance(old_transform, renpy.display.motion.Transform):
return new_thing
if renpy.config.take_state_from_target:
new_transform = new_thing._target()
else:
new_transform = new_thing
if not isinstance(new_transform, renpy.display.motion.Transform):
new_thing = new_transform = renpy.display.motion.Transform(child=new_thing)
new_transform.take_state(old_transform)
if execution:
new_transform.take_execution_state(old_transform)
return new_thing
def find_index(self, layer, tag, zorder, behind):
"""
This finds the spot in the named layer where we should insert the
displayable. It returns two things: an index at which the new thing
should be added, and an index at which the old thing should be hidden.
(Note that the indexes are relative to the current state of the list,
which may change on an add or remove.)
"""
add_index = None
remove_index = None
for i, sle in enumerate(self.layers[layer]):
if remove_index is None:
if (sle.tag and sle.tag == tag) or sle.displayable == tag:
remove_index = i
if zorder is None:
zorder = sle.zorder
if zorder is None:
zorder = renpy.config.tag_zorder.get(tag, 0)
for i, sle in enumerate(self.layers[layer]):
if add_index is None:
if sle.zorder == zorder:
if sle.tag and (sle.tag == tag or sle.tag in behind):
add_index = i
elif sle.zorder > zorder:
add_index = i
if add_index is None:
add_index = len(self.layers[layer])
return add_index, remove_index, zorder
def add(self,
layer,
thing,
key=None,
zorder=0,
behind=[ ],
at_list=[ ],
name=None,
atl=None,
default_transform=None,
transient=False,
keep_st=False):
"""
Adds something to this scene list. Some of these names are quite a bit
out of date.
`thing` - The displayable to add.
`key` - A string giving the tag associated with this thing.
`zorder` - Where to place this thing in the zorder, an integer
A greater value means closer to the user.
`behind` - A list of tags to place the thing behind.
`at_list` - The at_list associated with this
displayable. Counterintunitively, this is not actually
applied, but merely stored for future use.
`name` - The full name of the image being displayed. This is used for
image lookup.
`atl` - If not None, an atl block applied to the thing. (This actually is
applied here.)
`default_transform` - The default transform that is used to initialized
the values in the other transforms.
`keep_st`
If true, we preserve the shown time of a replaced displayable.
"""
if not isinstance(thing, Displayable):
raise Exception("Attempting to show something that isn't a displayable:" + repr(thing))
if layer not in self.layers:
raise Exception("Trying to add something to non-existent layer '%s'." % layer)
if key:
self.remove_hide_replaced(layer, key)
self.at_list[layer][key] = at_list
if key and name:
self.shown.predict_show(layer, name)
if transient:
self.additional_transient.append((layer, key))
l = self.layers[layer]
if atl:
thing = renpy.display.motion.ATLTransform(atl, child=thing)
add_index, remove_index, zorder = self.find_index(layer, key, zorder, behind)
at = None
st = None
if remove_index is not None:
sle = l[remove_index]
old = sle.displayable
at = sle.animation_time
if keep_st:
st = sle.show_time
if (not atl and
not at_list and
renpy.config.keep_running_transform and
isinstance(old, renpy.display.motion.Transform)):
thing = sle.displayable._change_transform_child(thing)
else:
thing = self.transform_state(l[remove_index].displayable, thing)
thing.set_transform_event("replace")
thing._show()
else:
if not isinstance(thing, renpy.display.motion.Transform):
thing = self.transform_state(default_transform, thing)
thing.set_transform_event("show")
thing._show()
sle = SceneListEntry(key, zorder, st, at, thing, name)
l.insert(add_index, sle)
if remove_index is not None:
if add_index <= remove_index:
remove_index += 1
self.hide_or_replace(layer, remove_index, "replaced")
def hide_or_replace(self, layer, index, prefix):
"""
Hides or replaces the scene list entry at the given
index. `prefix` is a prefix that is used if the entry
decides it doesn't want to be hidden quite yet.
"""
if index is None:
return
l = self.layers[layer]
oldsle = l[index]
now = get_time()
st = oldsle.show_time or now
at = oldsle.animation_time or now
if renpy.config.fast_unhandled_event:
if not oldsle.displayable._handles_event(prefix):
prefix = None
if (prefix is not None) and oldsle.tag:
d = oldsle.displayable._hide(now - st, now - at, prefix)
# _hide can mutate the layers, so we need to recompute
# index.
index = l.index(oldsle)
if d is not None:
sle = SceneListEntry(
prefix + "$" + oldsle.tag,
oldsle.zorder,
st,
at,
d,
None)
l[index] = sle
return
l.pop(index)
def get_all_displayables(self):
"""
Gets all displayables reachable from this scene list.
"""
rv = [ ]
for l in self.layers.itervalues():
for sle in l:
rv.append(sle.displayable)
return rv
def remove_above(self, layer, thing):
"""
Removes everything on the layer that is closer to the user
than thing, which may be either a tag or a displayable. Thing must
be displayed, or everything will be removed.
"""
for i in reversed(xrange(len(self.layers[layer]))):
sle = self.layers[layer][i]
if thing:
if sle.tag == thing or sle.displayable == thing:
break
if sle.tag and "$" in sle.tag:
continue
self.hide_or_replace(layer, i, "hide")
def remove(self, layer, thing, prefix="hide"):
"""
Thing is either a key or a displayable. This iterates through the
named layer, searching for entries matching the thing.
When they are found, they are removed from the displaylist.
It's not an error to remove something that isn't in the layer in
the first place.
"""
if layer not in self.layers:
raise Exception("Trying to remove something from non-existent layer '%s'." % layer)
_add_index, remove_index, _zorder = self.find_index(layer, thing, 0, [ ])
if remove_index is not None:
tag = self.layers[layer][remove_index].tag
if tag:
self.shown.predict_hide(layer, (tag,))
self.at_list[layer].pop(tag, None)
self.hide_or_replace(layer, remove_index, prefix)
def clear(self, layer, hide=False):
"""
Clears the named layer, making it empty.
If hide is True, then objects are hidden. Otherwise, they are
totally wiped out.
"""
if layer not in self.layers:
return
if not hide:
self.layers[layer] = [ ]
else:
# Have to iterate in reverse order, since otherwise
# the indexes might change.
for i in reversed(xrange(len(self.layers[layer]))):
self.hide_or_replace(layer, i, hide)
self.at_list[layer].clear()
self.shown.predict_scene(layer)
self.layer_at_list[layer] = (None, [ ])
def set_layer_at_list(self, layer, at_list, reset=True):
self.layer_at_list[layer] = (None, list(at_list))
if reset:
self.layer_transform[layer] = None
def set_times(self, time):
"""
This finds entries with a time of None, and replaces that
time with the given time.
"""
for l, (t, list) in self.layer_at_list.items(): # @ReservedAssignment
self.layer_at_list[l] = (t or time, list)
for l, ll in self.layers.iteritems():
self.layers[l] = [ i.update_time(time) for i in ll ]
def showing(self, layer, name):
"""
Returns true if something with the prefix of the given name
is found in the scene list.
"""
return self.shown.showing(layer, name)
def get_showing_tags(self, layer):
return self.shown.get_showing_tags(layer)
def get_sorted_tags(self, layer):
rv = [ ]
for sle in self.layers[layer]:
if not sle.tag:
continue
if "$" in sle.tag:
continue
rv.append(sle.tag)
return rv
def make_layer(self, layer, properties):
"""
Creates a Fixed with the given layer name and scene_list.
"""
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **properties)
rv.append_scene_list(self.layers[layer])
rv.layer_name = layer
rv._duplicatable = False
time, at_list = self.layer_at_list[layer]
old_transform = self.layer_transform.get(layer, None)
new_transform = None
if at_list:
for a in at_list:
if isinstance(a, renpy.display.motion.Transform):
rv = a(child=rv)
new_transform = rv
else:
rv = a(rv)
if (new_transform is not None) and (renpy.config.keep_show_layer_state):
self.transform_state(old_transform, new_transform, execution=True)
f = renpy.display.layout.MultiBox(layout='fixed')
f.add(rv, time, time)
f.layer_name = layer
rv = f
self.layer_transform[layer] = new_transform
return rv
def remove_hide_replaced(self, layer, tag):
"""
Removes things that are hiding or replaced, that have the given
tag.
"""
hide_tag = "hide$" + tag
replaced_tag = "replaced$" + tag
l = self.layers[layer]
self.layers[layer] = [ i for i in l if i.tag != hide_tag and i.tag != replaced_tag ]
def remove_hidden(self):
"""
Goes through all of the layers, and removes things that are
hidden and are no longer being kept alive by their hide
methods.
"""
now = get_time()
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if sle.tag.startswith("hide$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "hide")
if not d:
continue
elif sle.tag.startswith("replaced$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "replaced")
if not d:
continue
newl.append(sle)
self.layers[l] = newl
def remove_all_hidden(self):
"""
Removes everything hidden, even if it's not time yet. (Used when making a rollback copy).
"""
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if "$" in sle.tag:
continue
newl.append(sle)
self.layers[l] = newl
def get_displayable_by_tag(self, layer, tag):
"""
Returns the displayable on the layer with the given tag, or None
if no such displayable exists. Note that this will usually return
a Transform.
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
return sle.displayable
return None
def get_displayable_by_name(self, layer, name):
"""
Returns the displayable on the layer with the given name, or None
if no such displayable exists. Note that this will usually return
a Transform.
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.name == name:
return sle.displayable
return None
def get_image_bounds(self, layer, tag, width, height):
"""
Implements renpy.get_image_bounds().
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
break
else:
return None
now = get_time()
if sle.show_time is not None:
st = now - sle.show_time
else:
st = 0
if sle.animation_time is not None:
at = now - sle.animation_time
else:
at = 0
surf = renpy.display.render.render_for_size(sle.displayable, width, height, st, at)
sw = surf.width
sh = surf.height
x, y = place(width, height, sw, sh, sle.displayable.get_placement())
return (x, y, sw, sh)
def scene_lists(index=-1):
"""
Returns either the current scenelists object, or the one for the
context at the given index.
"""
return renpy.game.context(index).scene_lists
class MouseMove(object):
"""
This contains information about the current mouse move.
"""
def __init__(self, x, y, duration):
self.start = get_time()
if duration is not None:
self.duration = duration
else:
self.duration = 0
self.start_x, self.start_y = renpy.display.draw.get_mouse_pos()
self.end_x = x
self.end_y = y
def perform(self):
"""
Performs the mouse move. Returns True if this should be called
again, or False if the move has finished.
"""
elapsed = get_time() - self.start
if elapsed >= self.duration:
renpy.display.draw.set_mouse_pos(self.end_x, self.end_y)
return False
done = 1.0 * elapsed / self.duration
x = int(self.start_x + done * (self.end_x - self.start_x))
y = int(self.start_y + done * (self.end_y - self.start_y))
renpy.display.draw.set_mouse_pos(x, y)
return True
def get_safe_mode():
"""
Returns true if we should go into safe mode.
"""
if renpy.safe_mode_checked:
return False
try:
if renpy.windows:
import ctypes
VK_SHIFT = 0x10
ctypes.windll.user32.GetKeyState.restype = ctypes.c_ushort
if ctypes.windll.user32.GetKeyState(VK_SHIFT) & 0x8000:
return True
else:
return False
# Safe mode doesn't work on other platforms.
return False
except:
return False
# How long should we be in maximum framerate mode at the start of the game?
initial_maximum_framerate = 0.0
class Interface(object):
"""
This represents the user interface that interacts with the user.
It manages the Display objects that display things to the user, and
also handles accepting and responding to user input.
@ivar display: The display that we used to display the screen.
@ivar profile_time: The time of the last profiling.
@ivar screenshot: A screenshot, or None if no screenshot has been
taken.
@ivar old_scene: The last thing that was displayed to the screen.
@ivar transition: A map from layer name to the transition that will
be applied the next time interact restarts.
@ivar transition_time: A map from layer name to the time the transition
involving that layer started.
@ivar transition_from: A map from layer name to the scene that we're
transitioning from on that layer.
@ivar suppress_transition: If True, then the next transition will not
happen.
@ivar force_redraw: If True, a redraw is forced.
@ivar restart_interaction: If True, the current interaction will
be restarted.
@ivar pushed_event: If not None, an event that was pushed back
onto the stack.
@ivar mouse: The name of the mouse cursor to use during the current
interaction.
@ivar ticks: The number of 20hz ticks.
@ivar frame_time: The time at which we began drawing this frame.
@ivar interact_time: The time of the start of the first frame of the current interact_core.
@ivar time_event: A singleton ignored event.
@ivar event_time: The time of the current event.
@ivar timeout_time: The time at which the timeout will occur.
"""
def __init__(self):
# PNG data and the surface for the current file screenshot.
self.screenshot = None
self.screenshot_surface = None
self.old_scene = { }
self.transition = { }
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.suppress_transition = False
self.quick_quit = False
self.force_redraw = False
self.restart_interaction = False
self.pushed_event = None
self.ticks = 0
self.mouse = 'default'
self.timeout_time = None
self.last_event = None
self.current_context = None
self.roll_forward = None
# Things to be preloaded.
self.preloads = [ ]
# The time at which this draw occurs.
self.frame_time = 0
# The time when this interaction occured.
self.interact_time = None
# The time we last tried to quit.
self.quit_time = 0
# Are we currently processing the quit event?
self.in_quit_event = False
self.time_event = pygame.event.Event(TIMEEVENT)
self.redraw_event = pygame.event.Event(REDRAW)
# Are we focused?
self.mouse_focused = True
self.keyboard_focused = True
# Properties for each layer.
self.layer_properties = { }
# Have we shown the window this interaction?
self.shown_window = False
# Are we in fullscren mode?
self.fullscreen = False
# Should we ignore the rest of the current touch? Used to ignore the
# rest of a mousepress after a longpress occurs.
self.ignore_touch = False
# Should we clear the screenshot at the start of the next interaction?
self.clear_screenshot = False
for layer in renpy.config.layers + renpy.config.top_layers:
if layer in renpy.config.layer_clipping:
x, y, w, h = renpy.config.layer_clipping[layer]
self.layer_properties[layer] = dict(
xpos=x,
xanchor=0,
ypos=y,
yanchor=0,
xmaximum=w,
ymaximum=h,
xminimum=w,
yminimum=h,
clipping=True,
)
else:
self.layer_properties[layer] = dict()
# A stack giving the values of self.transition and self.transition_time
# for contexts outside the current one. This is used to restore those
# in the case where nothing has changed in the new context.
self.transition_info_stack = [ ]
# The time when the event was dispatched.
self.event_time = 0
# The time we saw the last mouse event.
self.mouse_event_time = None
# Should we show the mouse?
self.show_mouse = True
# Should we reset the display?
self.display_reset = False
# The last size we were resized to.
self.last_resize = None
# The thread that can do display operations.
self.thread = threading.current_thread()
# Initialize audio.
renpy.audio.audio.init()
# Initialize pygame.
try:
pygame.display.init()
except:
pass
# Init timing.
init_time()
self.mouse_event_time = get_time()
# The current window caption.
self.window_caption = None
renpy.game.interface = self
renpy.display.interface = self
# Are we in safe mode, from holding down shift at start?
self.safe_mode = False
# Do we need a background screenshot?
self.bgscreenshot_needed = False
# Event used to signal background screenshot taken.
self.bgscreenshot_event = threading.Event()
# The background screenshot surface.
self.bgscreenshot_surface = None
# Mouse move. If not None, information about the current mouse
# move.
self.mouse_move = None
# If in text editing mode, the current text editing event.
self.text_editing = None
# The text rectangle after the current draw.
self.text_rect = None
# The text rectangle after the previous draw.
self.old_text_rect = None
# Are we a touchscreen?
self.touch = renpy.exports.variant("touch")
# Should we restart the interaction?
self.restart_interaction = True
# For compatibility with older code.
if renpy.config.periodic_callback:
renpy.config.periodic_callbacks.append(renpy.config.periodic_callback)
renpy.display.emulator.init_emulator()
# Has start been called?
self.started = False
# Are we in fullscreen video mode?
self.fullscreen_video = False
self.safe_mode = get_safe_mode()
renpy.safe_mode_checked = True
# A scale factor used to compensate for the system DPI.
self.dpi_scale = self.setup_dpi_scaling()
renpy.display.log.write("DPI scale factor: %f", self.dpi_scale)
# A time until which we should draw at maximum framerate.
self.maximum_framerate_time = 0.0
self.maximum_framerate(initial_maximum_framerate)
# True if this is the first interact.
self.start_interact = True
# The time of each frame.
self.frame_times = [ ]
# The duration of each frame, in seconds.
self.frame_duration = 1.0 / 60.0
def setup_dpi_scaling(self):
if "RENPY_HIGHDPI" in os.environ:
return float(os.environ["RENPY_HIGHDPI"])
if not renpy.windows:
return 1.0
try:
import ctypes
from ctypes import c_void_p, c_int
ctypes.windll.user32.SetProcessDPIAware()
GetDC = ctypes.windll.user32.GetDC
GetDC.restype = c_void_p
GetDC.argtypes = [ c_void_p ]
ReleaseDC = ctypes.windll.user32.ReleaseDC
ReleaseDC.argtypes = [ c_void_p, c_void_p ]
GetDeviceCaps = ctypes.windll.gdi32.GetDeviceCaps
GetDeviceCaps.restype = c_int
GetDeviceCaps.argtypes = [ c_void_p, c_int ]
LOGPIXELSX = 88
dc = GetDC(None)
rv = GetDeviceCaps(dc, LOGPIXELSX) / 96.0
ReleaseDC(None, dc)
if rv < renpy.config.de_minimus_dpi_scale:
renpy.display.log.write("De minimus DPI scale, was %r", rv)
rv = 1.0
return rv
except:
renpy.display.log.write("Could not determine DPI scale factor:")
renpy.display.log.exception()
return 1.0
def start(self):
"""
Starts the interface, by opening a window and setting the mode.
"""
if self.started:
return
gc.collect()
if gc.garbage:
gc.garbage[:] = [ ]
renpy.display.render.render_ready()
# Kill off the presplash.
renpy.display.presplash.end()
renpy.main.log_clock("Interface start")
self.started = True
self.set_mode()
# Load the image fonts.
renpy.text.font.load_fonts()
# Setup periodic event.
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
# Don't grab the screen.
pygame.event.set_grab(False)
if not self.safe_mode:
renpy.display.controller.init()
s = "Total time until interface ready: {}s".format(time.time() - import_time)
renpy.display.log.write(s)
if renpy.android and not renpy.config.log_to_stdout:
print(s)
def post_init(self):
"""
This is called after display init, but before the window is created.
"""
pygame.display.hint("SDL_VIDEO_MINIMIZE_ON_FOCUS_LOSS", "0")
# Needed for Unity.
wmclass = renpy.config.save_directory or os.path.basename(sys.argv[0])
os.environ[b'SDL_VIDEO_X11_WMCLASS'] = wmclass.encode("utf-8")
self.set_window_caption(force=True)
self.set_icon()
if renpy.config.key_repeat is not None:
delay, repeat_delay = renpy.config.key_repeat
pygame.key.set_repeat(int(1000 * delay), int(1000 * repeat_delay))
if android:
android.wakelock(True)
# Block events we don't use.
for i in pygame.event.get_standard_events():
if i in enabled_events:
continue
if i in renpy.config.pygame_events:
continue
pygame.event.set_blocked(i)
def set_icon(self):
"""
This is called to set up the window icon.
"""
# Window icon.
icon = renpy.config.window_icon
if icon:
im = renpy.display.scale.image_load_unscaled(
renpy.loader.load(icon),
icon,
)
# Convert the aspect ratio to be square.
iw, ih = im.get_size()
imax = max(iw, ih)
square_im = renpy.display.pgrender.surface_unscaled((imax, imax), True)
square_im.blit(im, ( (imax-iw)/2, (imax-ih)/2 ))
im = square_im
pygame.display.set_icon(im)
def set_window_caption(self, force=False):
window_title = renpy.config.window_title
if window_title is None:
window_title = "A Ren'Py Game"
caption = renpy.translation.translate_string(window_title) + renpy.store._window_subtitle
if renpy.exports.get_autoreload():
caption += " - autoreload"
if not force and caption == self.window_caption:
return
self.window_caption = caption
pygame.display.set_caption(caption.encode("utf-8"))
def iconify(self):
pygame.display.iconify()
def get_draw_constructors(self):
"""
Figures out the list of draw constructors to try.
"""
renderer = renpy.game.preferences.renderer
renderer = os.environ.get("RENPY_RENDERER", renderer)
if self.safe_mode:
renderer = "sw"
if (renderer == "angle") and (not renpy.windows):
renderer = "auto"
renpy.config.renderer = renderer
if renderer == "auto":
if renpy.windows:
renderers = [ "gl", "angle", "sw" ]
else:
renderers = [ "gl", "sw" ]
if renpy.config.gl2:
renderers = [ "gl2", "egl2" ] + renderers
else:
renderers = [ renderer, "sw" ]
draw_objects = { }
def make_draw(name, mod, cls, *args):
if name not in renderers:
return False
try:
__import__(mod)
module = sys.modules[mod]
draw_class = getattr(module, cls)
draw_objects[name] = draw_class(*args)
return True
except:
renpy.display.log.write("Couldn't import {0} renderer:".format(name))
renpy.display.log.exception()
return False
if renpy.windows:
has_angle = make_draw("angle", "renpy.angle.gldraw", "GLDraw")
else:
has_angle = False
make_draw("gl", "renpy.gl.gldraw", "GLDraw", not has_angle)
make_draw("gl2", "renpy.gl2.gl2draw", "GL2Draw", "gl2", False)
make_draw("gles2", "renpy.gl2.gl2draw", "GL2Draw", "gles2", True)
make_draw("sw", "renpy.display.swdraw", "SWDraw")
rv = [ ]
def append_draw(name):
if name in draw_objects:
rv.append(draw_objects[name])
else:
renpy.display.log.write("Unknown renderer: {0}".format(name))
for i in renderers:
append_draw(i)
return rv
def kill_textures(self):
if renpy.display.draw is not None:
renpy.display.draw.kill_textures()
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
def kill_textures_and_surfaces(self):
"""
Kill all textures and surfaces that are loaded.
"""
self.kill_textures()
renpy.display.im.cache.clear()
renpy.display.module.bo_cache = None
def set_mode(self, physical_size=None):
"""
This sets the video mode. It also picks the draw object.
"""
# Ensure that we kill off the movie when changing screen res.
if renpy.display.draw and renpy.display.draw.info["renderer"] == "sw":
renpy.display.video.movie_stop(clear=False)
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
renpy.display.module.bo_cache = None
if self.display_reset:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.text_rect = None
if renpy.display.draw.info["renderer"] == "angle":
renpy.display.draw.quit()
# This is necessary to fix a bug with restoring a window from
# minimized state on windows.
pygame.display.quit()
self.kill_textures_and_surfaces()
self.old_text_rect = None
self.display_reset = False
virtual_size = (renpy.config.screen_width, renpy.config.screen_height)
if physical_size is None:
if renpy.mobile or renpy.game.preferences.physical_size is None: # @UndefinedVariable
physical_size = (None, None)
else:
physical_size = renpy.game.preferences.physical_size
# Setup screen.
fullscreen = renpy.game.preferences.fullscreen
old_fullscreen = self.fullscreen
self.fullscreen = fullscreen
if os.environ.get('RENPY_DISABLE_FULLSCREEN', False):
fullscreen = False
self.fullscreen = renpy.game.preferences.fullscreen
if renpy.display.draw:
draws = [ renpy.display.draw ]
else:
draws = self.get_draw_constructors()
for draw in draws:
if draw.set_mode(virtual_size, physical_size, fullscreen):
renpy.display.draw = draw
renpy.display.render.models = draw.info.get("models", False)
break
else:
# pygame.display.quit()
pass
else:
# Ensure we don't get stuck in fullscreen.
renpy.game.preferences.fullscreen = False
raise Exception("Could not set video mode.")
# Save the video size.
if renpy.config.save_physical_size and not fullscreen and not old_fullscreen:
renpy.game.preferences.physical_size = renpy.display.draw.get_physical_size()
if android:
android.init()
# We need to redraw the (now blank) screen.
self.force_redraw = True
# Assume we have focus until told otherwise.
self.mouse_focused = True
self.keyboard_focused = True
# Assume we're not minimized.
self.minimized = False
# Force an interaction restart.
self.restart_interaction = True
# True if we're doing a one-time profile.
self.profile_once = False
# Clear the frame times.
self.frame_times = [ ]
def draw_screen(self, root_widget, fullscreen_video, draw):
try:
renpy.display.render.per_frame = True
renpy.display.screen.per_frame()
finally:
renpy.display.render.per_frame = False
surftree = renpy.display.render.render_screen(
root_widget,
renpy.config.screen_width,
renpy.config.screen_height,
)
if draw:
renpy.display.draw.draw_screen(surftree, fullscreen_video)
now = time.time()
self.frame_times.append(now)
while (now - self.frame_times[0]) > renpy.config.performance_window:
self.frame_times.pop(0)
renpy.display.render.mark_sweep()
renpy.display.focus.take_focuses()
self.surftree = surftree
self.fullscreen_video = fullscreen_video
def take_screenshot(self, scale, background=False):
"""
This takes a screenshot of the current screen, and stores it so
that it can gotten using get_screenshot()
`background`
If true, we're in a background thread. So queue the request
until it can be handled by the main thread.
"""
self.clear_screenshot = False
# Do nothing before the first interaction.
if not self.started:
return
if background and not renpy.emscripten:
self.bgscreenshot_event.clear()
self.bgscreenshot_needed = True
if not self.bgscreenshot_event.wait(1.0):
raise Exception("Screenshot timed out.")
surf = self.bgscreenshot_surface
self.bgscreenshot_surface = None
else:
surf = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
surf = renpy.display.scale.smoothscale(surf, scale)
renpy.display.render.mutated_surface(surf)
self.screenshot_surface = surf
sio = cStringIO.StringIO()
renpy.display.module.save_png(surf, sio, 0)
self.screenshot = sio.getvalue()
sio.close()
def check_background_screenshot(self):
"""
Handles requests for a background screenshot.
"""
if self.bgscreenshot_needed:
self.bgscreenshot_needed = False
self.bgscreenshot_surface = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
self.bgscreenshot_event.set()
def get_screenshot(self):
"""
Gets the current screenshot, as a string. Returns None if there isn't
a current screenshot.
"""
if not self.started:
self.start()
rv = self.screenshot
if not rv:
self.take_screenshot(
(renpy.config.thumbnail_width, renpy.config.thumbnail_height),
background=(threading.current_thread() is not self.thread),
)
rv = self.screenshot
self.lose_screenshot()
return rv
def lose_screenshot(self):
"""
This deallocates the saved screenshot.
"""
self.screenshot = None
self.screenshot_surface = None
def save_screenshot(self, filename):
"""
Saves a full-size screenshot in the given filename.
"""
window = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
if renpy.config.screenshot_crop:
window = window.subsurface(renpy.config.screenshot_crop)
try:
renpy.display.scale.image_save_unscaled(window, filename)
if renpy.emscripten:
import emscripten
emscripten.run_script(r'''FSDownload('%s')''' % filename)
return True
except:
if renpy.config.debug:
raise
return False
def show_window(self):
if not renpy.store._window:
return
if not renpy.game.preferences.show_empty_window:
return
if renpy.game.context().scene_lists.shown_window:
return
if renpy.config.empty_window:
old_history = renpy.store._history # @UndefinedVariable
renpy.store._history = False
PPP("empty window")
try:
renpy.config.empty_window()
finally:
renpy.store._history = old_history
def do_with(self, trans, paired, clear=False):
if renpy.config.with_callback:
trans = renpy.config.with_callback(trans, paired)
if (not trans) or self.suppress_transition:
self.with_none()
return False
else:
self.set_transition(trans)
return self.interact(trans_pause=True,
suppress_overlay=not renpy.config.overlay_during_with,
mouse='with',
clear=clear)
def with_none(self, overlay=True):
"""
Implements the with None command, which sets the scene we will
be transitioning from.
"""
PPP("start of with none")
renpy.game.context().say_attributes = None
# Show the window, if that's necessary.
self.show_window()
# Compute the overlay.
if overlay:
self.compute_overlay()
scene_lists = renpy.game.context().scene_lists
# Compute the scene.
for layer, d in self.compute_scene(scene_lists).iteritems():
if layer not in self.transition:
self.old_scene[layer] = d
# Get rid of transient things.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
scene_lists.replace_transient()
scene_lists.shown_window = False
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def set_transition(self, transition, layer=None, force=False):
"""
Sets the transition that will be performed as part of the next
interaction.
"""
if self.suppress_transition and not force:
return
if transition is None:
self.transition.pop(layer, None)
else:
self.transition[layer] = transition
def event_peek(self):
"""
This peeks the next event. It returns None if no event exists.
"""
if self.pushed_event:
return self.pushed_event
ev = pygame.event.poll()
if ev.type == pygame.NOEVENT:
self.check_background_screenshot()
# Seems to prevent the CPU from speeding up.
renpy.display.draw.event_peek_sleep()
return None
self.pushed_event = ev
return ev
def event_poll(self):
"""
Called to busy-wait for an event while we're waiting to
redraw a frame.
"""
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
else:
rv = pygame.event.poll()
self.last_event = rv
return rv
def event_wait(self):
"""
This is in its own function so that we can track in the
profiler how much time is spent in interact.
"""
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
self.last_event = rv
return rv
self.check_background_screenshot()
ev = pygame.event.wait()
self.last_event = ev
return ev
def compute_overlay(self):
if renpy.store.suppress_overlay:
return
# Figure out what the overlay layer should look like.
renpy.ui.layer("overlay")
for i in renpy.config.overlay_functions:
i()
if renpy.game.context().scene_lists.shown_window:
for i in renpy.config.window_overlay_functions:
i()
renpy.ui.close()
def compute_scene(self, scene_lists):
"""
This converts scene lists into a dictionary mapping layer
name to a Fixed containing that layer.
"""
rv = { }
for layer in renpy.config.layers + renpy.config.top_layers:
rv[layer] = scene_lists.make_layer(layer, self.layer_properties[layer])
root = renpy.display.layout.MultiBox(layout='fixed')
root.layers = { }
for layer in renpy.config.layers:
root.layers[layer] = rv[layer]
root.add(rv[layer])
rv[None] = root
return rv
def quit_event(self):
"""
This is called to handle the user invoking a quit.
"""
if self.screenshot is None:
renpy.exports.take_screenshot()
if self.quit_time > (time.time() - .75):
renpy.exports.quit(save=True)
if self.in_quit_event:
renpy.exports.quit(save=True)
if renpy.config.quit_action is not None:
self.quit_time = time.time()
# Make the screen more suitable for interactions.
renpy.exports.movie_stop(only_fullscreen=True)
renpy.store.mouse_visible = True
try:
self.in_quit_event = True
renpy.display.behavior.run(renpy.config.quit_action)
finally:
self.in_quit_event = False
else:
renpy.exports.quit(save=True)
def get_mouse_info(self):
# Figure out if the mouse visibility algorithm is hiding the mouse.
if (renpy.config.mouse_hide_time is not None) and (self.mouse_event_time + renpy.config.mouse_hide_time < renpy.display.core.get_time()):
visible = False
else:
visible = renpy.store.mouse_visible and (not renpy.game.less_mouse)
visible = visible and self.show_mouse and not (renpy.display.video.fullscreen)
# If not visible, hide the mouse.
if not visible:
return False, 0, 0, None
# Deal with a hardware mouse, the easy way.
if not renpy.config.mouse:
return True, 0, 0, None
# Deal with the mouse going offscreen.
if not self.mouse_focused:
return False, 0, 0, None
mouse_kind = renpy.display.focus.get_mouse() or self.mouse
# Figure out the mouse animation.
if mouse_kind in renpy.config.mouse:
anim = renpy.config.mouse[mouse_kind]
else:
anim = renpy.config.mouse[getattr(renpy.store, 'default_mouse', 'default')]
img, x, y = anim[self.ticks % len(anim)]
rend = renpy.display.im.load_image(img)
tex = rend.children[0][0]
xo = rend.children[0][1]
yo = rend.children[0][2]
return False, x - xo, y - yo, tex
def set_mouse_pos(self, x, y, duration):
"""
Sets the mouse position. Duration can be a number of seconds or
None.
"""
self.mouse_move = MouseMove(x, y, duration)
self.force_redraw = True
def drawn_since(self, seconds_ago):
"""
Returns true if the screen has been drawn in the last `seconds_ago`,
and false otherwise.
"""
return (get_time() - self.frame_time) <= seconds_ago
def check_suspend(self, ev):
"""
Handles the SDL2 suspend process.
"""
def save():
if renpy.config.save_on_mobile_background and (not renpy.store.main_menu):
renpy.loadsave.save("_reload-1")
renpy.persistent.update(True)
if ev.type == pygame.APP_TERMINATING:
save()
sys.exit(0)
if ev.type != pygame.APP_WILLENTERBACKGROUND:
return False
# At this point, we're about to enter the background.
renpy.audio.audio.pause_all()
if android:
android.wakelock(False)
pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(REDRAW, 0)
pygame.time.set_timer(TIMEEVENT, 0)
save()
if renpy.config.quit_on_mobile_background:
sys.exit(0)
renpy.exports.free_memory()
print("Entered background.")
while True:
ev = pygame.event.wait()
if ev.type == pygame.APP_DIDENTERFOREGROUND:
break
if ev.type == pygame.APP_TERMINATING:
sys.exit(0)
print("Entering foreground.")
# Since we came back to life, we can get rid of the
# auto-reload.
renpy.loadsave.unlink_save("_reload-1")
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
renpy.audio.audio.unpause_all()
if android:
android.wakelock(True)
# Reset the display so we get the GL context back.
self.display_reset = True
self.restart_interaction = True
return True
def iconified(self):
"""
Called when we become an icon.
"""
if self.minimized:
return
self.minimized = True
renpy.display.log.write("The window was minimized.")
def restored(self):
"""
Called when we are restored from being an icon.
"""
# This is necessary on Windows/DirectX/Angle, as otherwise we get
# a blank screen.
if not self.minimized:
return
self.minimized = False
renpy.display.log.write("The window was restored.")
if renpy.windows:
self.display_reset = True
self.set_mode(self.last_resize)
def enter_context(self):
"""
Called when we enter a new context.
"""
# Stop ongoing transitions.
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
def post_time_event(self):
"""
Posts a time_event object to the queue.
"""
try:
pygame.event.post(self.time_event)
except:
pass
def after_longpress(self):
"""
Called after a longpress, to ignore the mouse button release.
"""
self.ignore_touch = True
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
def text_event_in_queue(self):
"""
Returns true if the next event in the queue is a text editing event.
"""
ev = self.event_peek()
if ev is None:
return False
else:
return ev.type in (pygame.TEXTINPUT, pygame.TEXTEDITING)
def update_text_rect(self):
"""
Updates the text input state and text rectangle.
"""
if renpy.store._text_rect is not None: # @UndefinedVariable
self.text_rect = renpy.store._text_rect # @UndefinedVariable
if self.text_rect is not None:
not_shown = pygame.key.has_screen_keyboard_support() and not pygame.key.is_screen_keyboard_shown() # @UndefinedVariable
if self.old_text_rect != self.text_rect:
x, y, w, h = self.text_rect
x0, y0 = renpy.display.draw.untranslate_point(x, y)
x1, y1 = renpy.display.draw.untranslate_point(x + w, y + h)
rect = (x0, y0, x1 - x0, y1 - y0)
pygame.key.set_text_input_rect(rect) # @UndefinedVariable
if not self.old_text_rect or not_shown:
pygame.key.start_text_input() # @UndefinedVariable
else:
if self.old_text_rect:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.old_text_rect = self.text_rect
def maximum_framerate(self, t):
"""
Forces Ren'Py to draw the screen at the maximum framerate for `t` seconds.
"""
if t is None:
self.maximum_framerate_time = 0
else:
self.maximum_framerate_time = max(self.maximum_framerate_time, get_time() + t)
def interact(self, clear=True, suppress_window=False, trans_pause=False, **kwargs):
"""
This handles an interaction, restarting it if necessary. All of the
keyword arguments are passed off to interact_core.
"""
renpy.plog(1, "start of new interaction")
if not self.started:
self.start()
if self.clear_screenshot:
self.lose_screenshot()
self.clear_screenshot = False
self.trans_pause = trans_pause
# Cancel magic error reporting.
renpy.bootstrap.report_error = None
context = renpy.game.context()
if context.interacting:
raise Exception("Cannot start an interaction in the middle of an interaction, without creating a new context.")
context.interacting = True
# Show a missing window.
if not suppress_window:
self.show_window()
# These things can be done once per interaction.
preloads = self.preloads
self.preloads = [ ]
try:
for i in renpy.config.start_interact_callbacks:
i()
repeat = True
self.start_interact = True
while repeat:
repeat, rv = self.interact_core(preloads=preloads, trans_pause=trans_pause, **kwargs)
self.start_interact = False
return rv
finally:
context.interacting = False
# Clean out transient stuff at the end of an interaction.
if clear:
scene_lists = renpy.game.context().scene_lists
scene_lists.replace_transient()
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.restart_interaction = True
renpy.game.context().mark_seen()
renpy.game.context().scene_lists.shown_window = False
if renpy.game.log is not None:
renpy.game.log.did_interaction = True
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def consider_gc(self):
"""
Considers if we should peform a garbage collection.
"""
if not renpy.config.manage_gc:
return
count = gc.get_count()
if count[0] >= renpy.config.idle_gc_count:
renpy.plog(2, "before gc")
if count[2] >= renpy.config.gc_thresholds[2]:
gen = 2
elif count[1] >= renpy.config.gc_thresholds[1]:
gen = 1
else:
gen = 0
gc.collect(gen)
if gc.garbage:
renpy.memory.print_garbage(gen)
gc.garbage[:] = [ ]
renpy.plog(2, "after gc")
def idle_frame(self, can_block, expensive):
"""
Tasks that are run during "idle" frames.
"""
if expensive:
renpy.plog(1, "start idle_frame (expensive)")
else:
renpy.plog(1, "start idle_frame (inexpensive)")
# We want this to include the GC time, so we don't predict on
# frames where we GC.
start = get_time()
step = 1
while True:
if self.event_peek():
break
if not (can_block and expensive):
if get_time() > (start + .0005):
break
# Step 1: Run gc.
if step == 1:
self.consider_gc()
step += 1
# Step 2: Push textures to GPU.
elif step == 2:
renpy.display.draw.ready_one_texture()
step += 1
# Step 3: Predict more images.
elif step == 3:
if not self.prediction_coroutine:
step += 1
continue
try:
result = self.prediction_coroutine.send(expensive)
except ValueError:
# Saw this happen once during a quit, giving a
# ValueError: generator already executing
result = None
if result is None:
self.prediction_coroutine = None
step += 1
elif result is False:
if not expensive:
step += 1
# Step 4: Preload images (on emscripten)
elif step == 4:
if expensive and renpy.emscripten:
renpy.display.im.cache.preload_thread_pass()
step += 1
# Step 5: Autosave.
elif step == 5:
if not self.did_autosave:
renpy.loadsave.autosave()
renpy.persistent.check_update()
self.did_autosave = True
step += 1
else:
break
if expensive:
renpy.plog(1, "end idle_frame (expensive)")
else:
renpy.plog(1, "end idle_frame (inexpensive)")
def interact_core(self,
show_mouse=True,
trans_pause=False,
suppress_overlay=False,
suppress_underlay=False,
mouse='default',
preloads=[],
roll_forward=None,
):
"""
This handles one cycle of displaying an image to the user,
and then responding to user input.
@param show_mouse: Should the mouse be shown during this
interaction? Only advisory, and usually doesn't work.
@param trans_pause: If given, we must have a transition. Should we
add a pause behavior during the transition?
@param suppress_overlay: This suppresses the display of the overlay.
@param suppress_underlay: This suppresses the display of the underlay.
"""
renpy.plog(1, "start interact_core")
suppress_overlay = suppress_overlay or renpy.store.suppress_overlay
# Store the various parameters.
self.suppress_overlay = suppress_overlay
self.suppress_underlay = suppress_underlay
self.trans_pause = trans_pause
# Show default screens.
renpy.display.screen.show_overlay_screens(suppress_overlay)
# Prepare screens, if need be.
renpy.display.screen.prepare_screens()
self.roll_forward = roll_forward
self.show_mouse = show_mouse
suppress_transition = renpy.config.skipping or renpy.game.less_updates
# The global one.
self.suppress_transition = False
# Figure out transitions.
if suppress_transition:
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
else:
for k in self.transition:
if k not in self.old_scene:
continue
self.ongoing_transition[k] = self.transition[k]
self.transition_from[k] = self.old_scene[k]._in_current_store()
self.transition_time[k] = None
self.transition.clear()
# Safety condition, prevents deadlocks.
if trans_pause:
if not self.ongoing_transition:
return False, None
if None not in self.ongoing_transition:
return False, None
if suppress_transition:
return False, None
if not self.old_scene:
return False, None
# Check to see if the language has changed.
renpy.translation.check_language()
# We just restarted.
self.restart_interaction = False
# Setup the mouse.
self.mouse = mouse
# The start and end times of this interaction.
start_time = get_time()
end_time = start_time
# frames = 0
for i in renpy.config.interact_callbacks:
i()
# Set the window caption.
self.set_window_caption()
# Tick time forward.
renpy.display.im.cache.tick()
renpy.text.text.text_tick()
renpy.display.predict.reset()
# Clear the size groups.
renpy.display.layout.size_groups.clear()
# Clear the set of updated screens.
renpy.display.screen.updated_screens.clear()
# Clear some events.
pygame.event.clear((pygame.MOUSEMOTION,
PERIODIC,
TIMEEVENT,
REDRAW))
# Add a single TIMEEVENT to the queue.
self.post_time_event()
# Figure out the scene list we want to show.
scene_lists = renpy.game.context().scene_lists
# Remove the now-hidden things.
scene_lists.remove_hidden()
# Compute the overlay.
if not suppress_overlay:
self.compute_overlay()
# The root widget of everything that is displayed on the screen.
root_widget = renpy.display.layout.MultiBox(layout='fixed')
root_widget.layers = { }
# A list of widgets that are roots of trees of widgets that are
# considered for focusing.
focus_roots = [ ]
# Add the underlay to the root widget.
if not suppress_underlay:
for i in renpy.config.underlay:
root_widget.add(i)
focus_roots.append(i)
if roll_forward is not None:
rfw = renpy.display.behavior.RollForward(roll_forward)
root_widget.add(rfw)
focus_roots.append(rfw)
# Figure out the scene. (All of the layers, and the root.)
scene = self.compute_scene(scene_lists)
renpy.display.tts.set_root(scene[None])
renpy.plog(1, "computed scene")
# If necessary, load all images here.
for w in scene.itervalues():
try:
renpy.display.predict.displayable(w)
except:
pass
renpy.plog(1, "final predict")
# The root widget of all of the layers.
layers_root = renpy.display.layout.MultiBox(layout='fixed')
layers_root.layers = { }
def add_layer(where, layer):
scene_layer = scene[layer]
focus_roots.append(scene_layer)
if (self.ongoing_transition.get(layer, None) and
not suppress_transition):
trans = self.ongoing_transition[layer](
old_widget=self.transition_from[layer],
new_widget=scene_layer)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
transition_time = self.transition_time.get(layer, None)
where.add(trans, transition_time, transition_time)
where.layers[layer] = trans
else:
where.layers[layer] = scene_layer
where.add(scene_layer)
# Add layers (perhaps with transitions) to the layers root.
for layer in renpy.config.layers:
add_layer(layers_root, layer)
# Add layers_root to root_widget, perhaps through a transition.
if (self.ongoing_transition.get(None, None) and
not suppress_transition):
old_root = renpy.display.layout.MultiBox(layout='fixed')
old_root.layers = { }
for layer in renpy.config.layers:
d = self.transition_from[None].layers[layer]
old_root.layers[layer] = d
old_root.add(d)
trans = self.ongoing_transition[None](
old_widget=old_root,
new_widget=layers_root)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
trans._show()
transition_time = self.transition_time.get(None, None)
root_widget.add(trans, transition_time, transition_time)
if trans_pause:
if renpy.store._dismiss_pause:
sb = renpy.display.behavior.SayBehavior()
else:
sb = renpy.display.behavior.SayBehavior(dismiss='dismiss_hard_pause')
root_widget.add(sb)
focus_roots.append(sb)
pb = renpy.display.behavior.PauseBehavior(trans.delay)
root_widget.add(pb, transition_time, transition_time)
focus_roots.append(pb)
else:
root_widget.add(layers_root)
# Add top_layers to the root_widget.
for layer in renpy.config.top_layers:
add_layer(root_widget, layer)
for i in renpy.display.emulator.overlay:
root_widget.add(i)
del add_layer
self.prediction_coroutine = renpy.display.predict.prediction_coroutine(root_widget)
self.prediction_coroutine.send(None)
# Clean out the registered adjustments.
renpy.display.behavior.adj_registered.clear()
# Clean up some movie-related things.
renpy.display.video.early_interact()
# Call per-interaction code for all widgets.
renpy.display.behavior.input_pre_per_interact()
root_widget.visit_all(lambda i : i.per_interact())
renpy.display.behavior.input_post_per_interact()
# Now, update various things regarding scenes and transitions,
# so we are ready for a new interaction or a restart.
self.old_scene = scene
# Okay, from here on we now have a single root widget (root_widget),
# which we will try to show to the user.
# Figure out what should be focused.
renpy.display.focus.before_interact(focus_roots)
# Something updated the screens. Deal with it now, so the player doesn't
# see it.
if self.restart_interaction:
return True, None
# Redraw the screen.
needs_redraw = True
# First pass through the while loop?
first_pass = True
# We don't yet know when the interaction began.
self.interact_time = None
# We only want to do autosave once.
self.did_autosave = False
old_timeout_time = None
old_redraw_time = None
rv = None
# Start sound.
renpy.audio.audio.interact()
# How long until we redraw.
_redraw_in = 3600
# Have we drawn a frame yet?
video_frame_drawn = False
# We're no longer after rollback.
renpy.game.after_rollback = False
# How many frames have we shown so far?
frame = 0
can_block = False
# This try block is used to force cleanup even on termination
# caused by an exception propagating through this function.
try:
while rv is None:
renpy.plog(1, "start of interact while loop")
renpy.execution.not_infinite_loop(10)
# Check for a change in fullscreen preference.
if ((self.fullscreen != renpy.game.preferences.fullscreen) or
self.display_reset or (renpy.display.draw is None)):
self.set_mode()
needs_redraw = True
# Check for autoreload.
if renpy.loader.needs_autoreload:
renpy.loader.needs_autoreload = False
renpy.exports.reload_script()
for i in renpy.config.needs_redraw_callbacks:
if i():
needs_redraw = True
# Redraw the screen.
if (self.force_redraw or
((first_pass or not pygame.event.peek(ALL_EVENTS)) and
renpy.display.draw.should_redraw(needs_redraw, first_pass, can_block))):
self.force_redraw = False
renpy.display.render.process_redraws()
# If we have a movie, start showing it.
fullscreen_video = renpy.display.video.interact()
# Clean out the redraws, if we have to.
# renpy.display.render.kill_redraws()
self.text_rect = None
# Draw the screen.
self.frame_time = get_time()
renpy.audio.audio.advance_time() # Sets the time of all video frames.
self.draw_screen(root_widget, fullscreen_video, (not fullscreen_video) or video_frame_drawn)
if first_pass:
if not self.interact_time:
self.interact_time = max(self.frame_time, get_time() - self.frame_duration)
scene_lists.set_times(self.interact_time)
for k, v in self.transition_time.iteritems():
if v is None:
self.transition_time[k] = self.interact_time
renpy.display.render.adjust_render_cache_times(self.frame_time, self.interact_time)
frame += 1
renpy.config.frames += 1
# If profiling is enabled, report the profile time.
if renpy.config.profile or self.profile_once:
renpy.plog(0, "end frame")
renpy.performance.analyze()
renpy.performance.clear()
renpy.plog(0, "start frame")
self.profile_once = False
if first_pass and self.last_event and self.last_event.type in [ pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION ]:
x, y = renpy.display.draw.get_mouse_pos()
ev, x, y = renpy.display.emulator.emulator(self.last_event, x, y)
if self.ignore_touch:
x = -1
y = -1
if renpy.android and self.last_event.type == pygame.MOUSEBUTTONUP:
x = -1
y = -1
renpy.display.focus.mouse_handler(None, x, y, default=False)
needs_redraw = False
first_pass = False
pygame.time.set_timer(REDRAW, 0)
pygame.event.clear([REDRAW])
old_redraw_time = None
self.update_text_rect()
renpy.test.testexecution.execute()
# Move the mouse, if necessary.
if self.mouse_move is not None:
if not self.mouse_move.perform():
self.mouse_move = None
# Draw the mouse, if it needs drawing.
renpy.display.draw.update_mouse()
# See if we want to restart the interaction entirely.
if self.restart_interaction:
return True, None
# Determine if we need a redraw. (We want to run these
# functions, so we put them first to prevent short-circuiting.)
if renpy.display.video.frequent():
needs_redraw = True
video_frame_drawn = True
if renpy.display.render.check_redraws():
needs_redraw = True
# How many seconds until we timeout.
_timeout_in = 3600
# Handle the redraw timer.
redraw_time = renpy.display.render.redraw_time()
# We only need to set the REDRAW timer if we can block.
can_block = renpy.display.draw.can_block()
if self.maximum_framerate_time > get_time():
can_block = False
if (redraw_time is not None) and (not needs_redraw) and can_block:
if redraw_time != old_redraw_time:
time_left = redraw_time - get_time()
time_left = min(time_left, 3600)
_redraw_in = time_left
if time_left <= 0:
try:
pygame.event.post(self.redraw_event)
except:
pass
pygame.time.set_timer(REDRAW, 0)
else:
pygame.time.set_timer(REDRAW, max(int(time_left * 1000), 1))
old_redraw_time = redraw_time
else:
_redraw_in = 3600
pygame.time.set_timer(REDRAW, 0)
# Handle the timeout timer.
if not self.timeout_time:
pygame.time.set_timer(TIMEEVENT, 0)
else:
time_left = self.timeout_time - get_time()
time_left = min(time_left, 3600)
_timeout_in = time_left
if time_left <= 0:
self.timeout_time = None
pygame.time.set_timer(TIMEEVENT, 0)
self.post_time_event()
elif self.timeout_time != old_timeout_time:
# Always set to at least 1ms.
pygame.time.set_timer(TIMEEVENT, int(time_left * 1000 + 1))
old_timeout_time = self.timeout_time
if can_block or (frame >= renpy.config.idle_frame):
expensive = not ( needs_redraw or (_redraw_in < .2) or (_timeout_in < .2) or renpy.display.video.playing() )
self.idle_frame(can_block, expensive)
if needs_redraw or (not can_block) or self.mouse_move or renpy.display.video.playing():
renpy.plog(1, "pre peek")
ev = self.event_poll()
renpy.plog(1, "post peek {!r}", ev)
else:
renpy.plog(1, "pre wait")
ev = self.event_wait()
renpy.plog(1, "post wait {!r}", ev)
if ev.type == pygame.NOEVENT:
if can_block and (not needs_redraw) and (not self.prediction_coroutine) and (not self.mouse_move):
pygame.time.wait(1)
continue
# Recognize and ignore AltGr on Windows.
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_LCTRL:
ev2 = self.event_peek()
if (ev2 is not None) and (ev2.type == pygame.KEYDOWN):
if ev2.key == pygame.K_RALT:
continue
# Check to see if the OS is asking us to suspend (on Android
# and iOS.)
if self.check_suspend(ev):
continue
# Try to merge an TIMEEVENT with other timeevents.
if ev.type == TIMEEVENT:
old_timeout_time = None
pygame.event.clear([TIMEEVENT])
# On Android, where we have multiple mouse buttons, we can
# merge a mouse down and mouse up event with its successor. This
# prevents us from getting overwhelmed with too many events on
# a multitouch screen.
if android and (ev.type == pygame.MOUSEBUTTONDOWN or ev.type == pygame.MOUSEBUTTONUP):
pygame.event.clear(ev.type)
# Handle redraw timeouts.
if ev.type == REDRAW:
pygame.event.clear([REDRAW])
old_redraw_time = None
continue
# Handle periodic events. This includes updating the mouse timers (and through the loop,
# the mouse itself), and the audio system periodic calls.
if ev.type == PERIODIC:
events = 1 + len(pygame.event.get([PERIODIC]))
self.ticks += events
for i in renpy.config.periodic_callbacks:
i()
renpy.audio.audio.periodic()
renpy.display.tts.periodic()
continue
# Handle quit specially for now.
if ev.type == pygame.QUIT:
self.quit_event()
continue
# Ignore KEY-events while text is being edited (usually with an IME).
if ev.type == pygame.TEXTEDITING:
if ev.text:
self.text_editing = ev
else:
self.text_editing = None
elif ev.type == pygame.TEXTINPUT:
self.text_editing = None
elif self.text_editing and ev.type in [ pygame.KEYDOWN, pygame.KEYUP ]:
continue
if ev.type == pygame.VIDEOEXPOSE:
# Needed to force the display to redraw after expose in
# the software renderer.
renpy.game.interface.full_redraw = True
renpy.game.interface.force_redraw = True
if isinstance(renpy.display.draw, renpy.display.swdraw.SWDraw):
renpy.display.draw.full_redraw = True
continue
# Handle videoresize.
if ev.type == pygame.VIDEORESIZE:
evs = pygame.event.get([pygame.VIDEORESIZE])
if len(evs):
ev = evs[-1]
# We seem to get a spurious event like this when leaving
# fullscreen mode on windows.
if ev.w < 256 or ev.h < 256:
continue
size = (ev.w // self.dpi_scale, ev.h // self.dpi_scale)
# Refresh fullscreen status (e.g. user pressed Esc. in browser)
main_window = pygame.display.get_window()
self.fullscreen = main_window is not None and bool(main_window.get_window_flags() & (pygame.WINDOW_FULLSCREEN_DESKTOP|pygame.WINDOW_FULLSCREEN))
renpy.game.preferences.fullscreen = self.fullscreen
if pygame.display.get_surface().get_size() != ev.size:
self.set_mode(size)
if not self.fullscreen:
self.last_resize = size
continue
# If we're ignoring touch events, and get a mouse up, stop
# ignoring those events.
if self.ignore_touch and \
ev.type == pygame.MOUSEBUTTONUP and \
ev.button == 1:
self.ignore_touch = False
continue
# Merge mousemotion events.
if ev.type == pygame.MOUSEMOTION:
evs = pygame.event.get([pygame.MOUSEMOTION])
if len(evs):
ev = evs[-1]
if renpy.windows:
self.mouse_focused = True
# Handle mouse event time, and ignoring touch.
if ev.type == pygame.MOUSEMOTION or \
ev.type == pygame.MOUSEBUTTONDOWN or \
ev.type == pygame.MOUSEBUTTONUP:
self.mouse_event_time = renpy.display.core.get_time()
if self.ignore_touch:
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
# Handle focus notifications.
if ev.type == pygame.ACTIVEEVENT:
if ev.state & 1:
if not ev.gain:
renpy.display.focus.clear_focus()
self.mouse_focused = ev.gain
if ev.state & 2:
self.keyboard_focused = ev.gain
if ev.state & 4:
if ev.gain:
self.restored()
else:
self.iconified()
pygame.key.set_mods(0)
# This returns the event location. It also updates the
# mouse state as necessary.
x, y = renpy.display.draw.mouse_event(ev)
x, y = renpy.test.testmouse.get_mouse_pos(x, y)
ev, x, y = renpy.display.emulator.emulator(ev, x, y)
if ev is None:
continue
if not self.mouse_focused or self.ignore_touch:
x = -1
y = -1
# This can set the event to None, to ignore it.
ev = renpy.display.controller.event(ev)
if not ev:
continue
# Handle skipping.
renpy.display.behavior.skipping(ev)
self.event_time = end_time = get_time()
try:
if self.touch:
renpy.display.gesture.recognizer.event(ev, x, y) # @UndefinedVariable
# Handle the event normally.
rv = renpy.display.focus.mouse_handler(ev, x, y)
if rv is None:
rv = root_widget.event(ev, x, y, 0)
if rv is None:
rv = renpy.display.focus.key_handler(ev)
if rv is not None:
break
# Handle displayable inspector.
if renpy.config.inspector:
if renpy.display.behavior.map_event(ev, "inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.transient_layers + renpy.config.context_clear_layers + renpy.config.overlay_layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
elif renpy.display.behavior.map_event(ev, "full_inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
except IgnoreEvent:
# An ignored event can change the timeout. So we want to
# process an TIMEEVENT to ensure that the timeout is
# set correctly
if ev.type != TIMEEVENT:
self.post_time_event()
# Check again after handling the event.
needs_redraw |= renpy.display.render.check_redraws()
if self.restart_interaction:
return True, None
# If we were trans-paused and rv is true, suppress
# transitions up to the next interaction.
if trans_pause and rv:
self.suppress_transition = True
# But wait, there's more! The finally block runs some cleanup
# after this.
return False, rv
except EndInteraction as e:
return False, e.value
finally:
renpy.game.context().say_attributes = None
# Clean out the overlay layers.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
# Stop ongoing preloading.
renpy.display.im.cache.end_tick()
# We no longer disable periodic between interactions.
# pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(TIMEEVENT, 0)
pygame.time.set_timer(REDRAW, 0)
self.consider_gc()
renpy.game.context().runtime += end_time - start_time
# Restart the old interaction, which also causes a
# redraw if needed.
self.restart_interaction = True
renpy.plog(1, "end interact_core")
# print("It took", frames, "frames.")
def timeout(self, offset):
if offset < 0:
return
if self.timeout_time:
self.timeout_time = min(self.event_time + offset, self.timeout_time)
else:
self.timeout_time = self.event_time + offset
def finish_pending(self):
"""
Called before a quit or restart to finish any pending work that might
block other threads.
"""
self.check_background_screenshot()
| """
The base class for every object in Ren'Py that can be
displayed to the screen.
Drawables will be serialized to a savegame file. Therefore, they
shouldn't store non-serializable things (like pygame surfaces) in
their fields.
"""
# Some invariants about method call order:
#
# per_interact is called before render.
# render is called before event.
#
# get_placement can be called at any time, so can't
# assume anything.
# If True this displayable can accept focus.
# If False, it can't, but it keeps its place in the focus order.
# If None, it does not have a place in the focus order.
focusable = None
# This is the focus named assigned by the focus code.
full_focus_name = None
# A role ('selected_' or '' that prefixes the style).
role = ''
# The event we'll pass on to our parent transform.
transform_event = None
# Can we change our look in response to transform_events?
transform_event_responder = False
# The main displayable, if this displayable is the root of a composite
# displayable. (This is used by SL to figure out where to add children
# to.) If None, it is itself.
_main = None
# A list of the children that make up this composite displayable.
_composite_parts = [ ]
# The location the displayable was created at, if known.
_location = None
# Does this displayable use the scope?
_uses_scope = False
# Arguments supplied to this displayable.
_args = DisplayableArguments()
# Set to true of the displayable is duplicatable (has a non-trivial
# duplicate method), or one of its children is.
_duplicatable = False
# Does this displayable require clipping?
_clipping = False
# Does this displayable have a tooltip?
_tooltip = None
def __ne__(self, o):
return not (self == o)
def __init__(self, focus=None, default=False, style='default', _args=None, tooltip=None, default_focus=False, **properties):
global default_style
if (style == "default") and (not properties):
self.style = default_style
else:
self.style = renpy.style.Style(style, properties) # @UndefinedVariable
self.focus_name = focus
self.default = default or default_focus
self._tooltip = tooltip
if _args is not None:
self._args = _args
def _copy(self, args=None):
"""
Makes a shallow copy of the displayable. If `args` is provided,
replaces the arguments with the stored copy.
"""
rv = copy.copy(self)
if args is not None:
rv._args = args
return rv
def _duplicate(self, args):
"""
Makes a duplicate copy of the following kids of displayables:
* Displayables that can accept arguments.
* Displayables that maintain state that should be reset before being
shown to the user.
* Containers that contain (including transitively) one of the other
kinds of displayables.
Displayables that contain state that can be manipulated by the user
are never copied.
This should call _unique on children that have been copied before
setting its own _duplicatable flag.
"""
if args and args.args:
args.extraneous()
return self
def _get_tooltip(self):
"""
Returns the tooltip of this displayable.
"""
return self._tooltip
def _in_current_store(self):
"""
Returns a version of this displayable that will not change as it is
rendered.
"""
return self
def _unique(self):
"""
This is called when a displayable is "born" unique, which occurs
when there is only a single reference to it. What it does is to
manage the _duplicatable flag - setting it false unless one of
the displayable's children happens to be duplicatable.
"""
return
def parameterize(self, name, parameters):
"""
Obsolete alias for _duplicate.
"""
a = self._args.copy(name=name, args=parameters)
return self._duplicate(a)
def _equals(self, o):
"""
This is a utility method that can be called by a Displayable's
__eq__ method, to compare displayables for type and displayable
component equality.
"""
if type(self) is not type(o):
return False
if self.focus_name != o.focus_name:
return False
if self.style != o.style:
return False
if self.default != o.default:
return False
return True
def __unicode__(self):
return self.__class__.__name__
def __repr__(self):
return "<{} at {:x}>".format(unicode(self).encode("utf-8"), id(self))
def find_focusable(self, callback, focus_name):
focus_name = self.focus_name or focus_name
if self.focusable:
callback(self, focus_name)
elif self.focusable is not None:
callback(None, focus_name)
for i in self.visit():
if i is None:
continue
i.find_focusable(callback, focus_name)
def focus(self, default=False):
"""
Called to indicate that this widget has the focus.
"""
self.set_style_prefix(self.role + "hover_", True)
if not default:
renpy.exports.play(self.style.hover_sound)
def unfocus(self, default=False):
"""
Called to indicate that this widget has become unfocused.
"""
self.set_style_prefix(self.role + "idle_", True)
def is_focused(self):
if renpy.display.focus.grab and renpy.display.focus.grab is not self:
return
return renpy.game.context().scene_lists.focused is self
def set_style_prefix(self, prefix, root):
"""
Called to set the style prefix of this widget and its child
widgets, if any.
`root` - True if this is the root of a style tree, False if this
has been passed on to a child.
"""
if prefix == self.style.prefix:
return
self.style.set_prefix(prefix)
renpy.display.render.redraw(self, 0)
def render(self, width, height, st, at):
"""
Called to display this displayable. This is called with width
and height parameters, which give the largest width and height
that this drawable can be drawn to without overflowing some
bounding box. It's also given two times. It returns a Surface
that is the current image of this drawable.
@param st: The time since this widget was first shown, in seconds.
@param at: The time since a similarly named widget was first shown,
in seconds.
"""
raise Exception("Render not implemented.")
def event(self, ev, x, y, st):
"""
Called to report than an event has occured. Ev is the raw
pygame event object representing that event. If the event
involves the mouse, x and y are the translation of the event
into the coordinates of this displayable. st is the time this
widget has been shown for.
@returns A value that should be returned from Interact, or None if
no value is appropriate.
"""
return None
def get_placement(self):
"""
Returns a style object containing placement information for
this Displayable. Children are expected to overload this
to return something more sensible.
"""
return self.style.get_placement()
def visit_all(self, callback, seen=None):
"""
Calls the callback on this displayable, and then on all children
of this displayable.
"""
if seen is None:
seen = set()
for d in self.visit():
if d is None:
continue
id_d = id(d)
if id_d in seen:
continue
seen.add(id_d)
d.visit_all(callback, seen)
callback(self)
def visit(self):
"""
Called to ask the displayable to return a list of its children
(including children taken from styles). For convenience, this
list may also include None values.
"""
return [ ]
def per_interact(self):
"""
Called once per widget per interaction.
"""
return None
def predict_one(self):
"""
Called to ask this displayable to call the callback with all
the images it may want to load.
"""
return
def predict_one_action(self):
"""
Called to ask this displayable to cause image prediction
to occur for images that may be loaded by its actions.
"""
return
def place(self, dest, x, y, width, height, surf, main=True):
"""
This places a render (which must be of this displayable)
within a bounding area. Returns an (x, y) tuple giving the location
the displayable was placed at.
`dest`
If not None, the `surf` will be blitted to `dest` at the
computed coordinates.
`x`, `y`, `width`, `height`
The bounding area.
`surf`
The render to place.
`main`
This is passed to Render.blit().
"""
placement = self.get_placement()
subpixel = placement[6]
xpos, ypos = place(width, height, surf.width, surf.height, placement)
xpos += x
ypos += y
pos = (xpos, ypos)
if dest is not None:
if subpixel:
dest.subpixel_blit(surf, pos, main, main, None)
else:
dest.blit(surf, pos, main, main, None)
return pos
def set_transform_event(self, event):
"""
Sets the transform event of this displayable to event.
"""
if event == self.transform_event:
return
self.transform_event = event
if self.transform_event_responder:
renpy.display.render.redraw(self, 0)
def _handles_event(self, event):
"""
Returns True if the displayable handles event, False otherwise.
"""
return False
def _hide(self, st, at, kind):
"""
Returns None if this displayable is ready to be hidden, or
a replacement displayable if it doesn't want to be hidden
quite yet. Kind is either "hide" or "replaced".
"""
return None
def _show(self):
"""
Called when the displayable is added to a scene list.
"""
def _target(self):
"""
If this displayable is part of a chain of one or more references,
returns the ultimate target of those references. Otherwise, returns
the displayable.
"""
return self
def _change_transform_child(self, child):
"""
If this is a transform, makes a copy of the transform and sets
the child of the innermost transform to this. Otherwise,
simply returns child.
"""
return child
def _clear(self):
"""
Clears out the children of this displayable, if any.
"""
return
def _tts_common(self, default_alt=None):
rv = [ ]
for i in self.visit():
if i is not None:
speech = i._tts()
if speech.strip():
rv.append(speech)
rv = ": ".join(rv)
rv = rv.replace("::", ":")
rv = rv.replace(": :", ":")
alt = self.style.alt
if alt is None:
alt = default_alt
if alt is not None:
rv = renpy.substitutions.substitute(alt, scope={ "text" : rv })[0]
return rv
def _tts(self):
"""
Returns the self-voicing text of this displayable and all of its
children that cannot take focus. If the displayable can take focus,
returns the empty string.
"""
return self._tts_common()
def _tts_all(self):
"""
Returns the self-voicing text of this displayable and all of its
children that cannot take focus.
"""
return self._tts_common() |
budweiser.py | __author__ = 'luke.beer'
import subprocess
import threading
import logging
import socket
import time
import questions
import states
class Executor(threading.Thread):
def __init__(self, r, channel):
|
def communicate(self, msg='Sup?'):
msg = '%s :: %s :: %s' % (socket.gethostname(), self.state, msg)
logging.info(msg)
self.redis.publish(self.channel, msg)
def set_state(self, state):
state_msg = 'State changed from %s to %s' % (self.state, state)
logging.info(state_msg)
self.communicate(state_msg)
def archive(self):
self.set_state(states.State.ARCHIVE)
time.sleep(5)
def compress(self):
self.set_state(states.State.COMPRESS)
time.sleep(5)
def file_sync(self, host):
self.set_state(states.State.COLLECT)
self.communicate("%s: Valid config." % host.hostname)
try:
destination = "%s/%s" % (host.destination, host.hostname)
src = "%s@%s:%s/" % (host.username, host.address, host.source)
result = subprocess.check_output(['/usr/bin/rsync', '-pvvctrz', '--include=\"%s\"' % host.match, src,
destination], stderr=subprocess.STDOUT)
self.communicate(result)
except Exception as e:
self.communicate(e.message)
def stop(self):
self.pubsub.unsubscribe()
self.communicate('Goodbye....')
def ready(self, item):
hostname, state, msg = item['data'].split(' :: ', 3)
if hostname == self.name:
return
if msg == questions.Questions.WHAT:
self.communicate("Hey friend, I'm %s" % self.state)
else:
if state == states.State.IDLE:
return True
if state in [states.State.COLLECT, states.State.ARCHIVE, states.State.COMPRESS]:
return False
| threading.Thread.__init__(self)
self.redis = r
self.channel = channel
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe([channel])
self.state = states.State.INIT
self.name = socket.gethostname()
self.hosts = [] |
main.rs | #![feature(box_syntax)]
#![feature(asm)]
extern crate e2d2;
extern crate fnv;
extern crate getopts;
extern crate rand;
extern crate time;
use self::nf::*;
use e2d2::allocators::CacheAligned;
use e2d2::config::*;
use e2d2::interface::*;
use e2d2::operators::*;
use e2d2::scheduler::*;
use std::env;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
mod nf;
const CONVERSION_FACTOR: f64 = 1000000000.;
fn test<S: Scheduler + Sized>(ports: Vec<CacheAligned<PortQueue>>, sched: &mut S) {
for port in &ports {
println!(
"Receiving port {} rxq {} txq {}",
port.port.mac_address(),
port.rxq(),
port.txq()
);
}
let pipelines: Vec<_> = ports
.iter()
.map(|port| reconstruction(ReceiveBatch::new(port.clone()), sched).send(port.clone()))
.collect();
println!("Running {} pipelines", pipelines.len());
for pipeline in pipelines {
sched.add_task(pipeline).unwrap();
}
}
fn main() {
let opts = basic_opts();
let args: Vec<String> = env::args().collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!(f.to_string()),
};
let configuration = read_matches(&matches, &opts);
let mut config = initialize_system(&configuration).unwrap();
config.start_schedulers();
config.add_pipeline_to_run(Arc::new(move |p, s: &mut StandaloneScheduler| test(p, s)));
config.execute();
let mut pkts_so_far = (0, 0);
let mut last_printed = 0.;
const MAX_PRINT_INTERVAL: f64 = 30.;
const PRINT_DELAY: f64 = 15.;
let sleep_delay = (PRINT_DELAY / 2.) as u64;
let mut start = time::precise_time_ns() as f64 / CONVERSION_FACTOR;
let sleep_time = Duration::from_millis(sleep_delay);
println!("0 OVERALL RX 0.00 TX 0.00 CYCLE_PER_DELAY 0 0 0");
loop {
thread::sleep(sleep_time); // Sleep for a bit
let now = time::precise_time_ns() as f64 / CONVERSION_FACTOR;
if now - start > PRINT_DELAY {
let mut rx = 0;
let mut tx = 0;
for port in config.ports.values() { | tx += tp;
}
}
let pkts = (rx, tx);
let rx_pkts = pkts.0 - pkts_so_far.0;
if rx_pkts > 0 || now - last_printed > MAX_PRINT_INTERVAL {
println!(
"{:.2} OVERALL RX {:.2} TX {:.2}",
now - start,
rx_pkts as f64 / (now - start),
(pkts.1 - pkts_so_far.1) as f64 / (now - start)
);
last_printed = now;
start = now;
pkts_so_far = pkts;
}
}
}
} | for q in 0..port.rxqs() {
let (rp, tp) = port.stats(q);
rx += rp; |
allocator.rs | use crate::attributes;
use libc::c_uint;
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::sym;
use crate::llvm::{self, False, True};
use crate::ModuleLlvm;
pub(crate) unsafe fn codegen(
tcx: TyCtxt<'_>,
mods: &mut ModuleLlvm,
kind: AllocatorKind,
has_alloc_error_handler: bool,
) {
let llcx = &*mods.llcx;
let llmod = mods.llmod();
let usize = match tcx.sess.target.pointer_width {
16 => llvm::LLVMInt16TypeInContext(llcx),
32 => llvm::LLVMInt32TypeInContext(llcx),
64 => llvm::LLVMInt64TypeInContext(llcx),
tws => bug!("Unsupported target word size for int: {}", tws),
};
let i8 = llvm::LLVMInt8TypeInContext(llcx);
let i8p = llvm::LLVMPointerType(i8, 0);
let void = llvm::LLVMVoidTypeInContext(llcx);
for method in ALLOCATOR_METHODS {
let mut args = Vec::with_capacity(method.inputs.len());
for ty in method.inputs.iter() {
match *ty {
AllocatorTy::Layout => {
args.push(usize); // size
args.push(usize); // align
}
AllocatorTy::Ptr => args.push(i8p),
AllocatorTy::Usize => args.push(usize),
AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
}
}
let output = match method.output {
AllocatorTy::ResultPtr => Some(i8p),
AllocatorTy::Unit => None,
AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => |
};
let ty = llvm::LLVMFunctionType(
output.unwrap_or(void),
args.as_ptr(),
args.len() as c_uint,
False,
);
let name = format!("__rust_{}", method.name);
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
if tcx.sess.target.options.default_hidden_visibility {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
if tcx.sess.must_emit_unwind_tables() {
attributes::emit_uwtable(llfn, true);
}
let callee = kind.fn_name(method.name);
let callee =
llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
let args = args
.iter()
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret =
llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
llvm::LLVMBuildRet(llbuilder, ret);
} else {
llvm::LLVMBuildRetVoid(llbuilder);
}
llvm::LLVMDisposeBuilder(llbuilder);
}
// rust alloc error handler
let args = [usize, usize]; // size, align
let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
let name = "__rust_alloc_error_handler".to_string();
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
// -> ! DIFlagNoReturn
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
if tcx.sess.target.options.default_hidden_visibility {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
if tcx.sess.must_emit_unwind_tables() {
attributes::emit_uwtable(llfn, true);
}
let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
let callee = kind.fn_name(sym::oom);
let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
// -> ! DIFlagNoReturn
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, callee);
llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
let args = args
.iter()
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
llvm::LLVMSetTailCall(ret, True);
llvm::LLVMBuildRetVoid(llbuilder);
llvm::LLVMDisposeBuilder(llbuilder);
}
| {
panic!("invalid allocator output")
} |
VolumeMuteFill.ts | import createIcon, { JSX } from './utils/createIcon'
export const jsx: JSX = {
type: 'svg',
props: {
xmlns: 'http://www.w3.org/2000/svg', | type: 'g',
props: null,
children: [
{
type: 'path',
props: {
fill: 'none',
d: 'M0 0h24v24H0z'
},
children: []
},
{
type: 'path',
props: {
d:
'M5.889 16H2a1 1 0 0 1-1-1V9a1 1 0 0 1 1-1h3.889l5.294-4.332a.5.5 0 0 1 .817.387v15.89a.5.5 0 0 1-.817.387L5.89 16zm14.525-4l3.536 3.536-1.414 1.414L19 13.414l-3.536 3.536-1.414-1.414L17.586 12 14.05 8.464l1.414-1.414L19 10.586l3.536-3.536 1.414 1.414L20.414 12z'
},
children: []
}
]
}
]
}
export default createIcon(jsx, 'VolumeMuteFill') | viewBox: '0 0 24 24'
},
children: [
{ |
lancer.js | import { SystemManager } from "./manager.js";
import { ActionHandlerLancer as ActionHandler } from "../actions/lancer/lancer-actions.js"
import { RollHandlerBaseLancer as Core } from "../rollHandlers/lancer/lancer-base.js"
import * as settings from '../settings/lancer-settings.js';
export class LancerSystemManager extends SystemManager {
constructor(appName) { | /** @override */
doGetActionHandler(filterManager, categoryManager) {
console.log("startup");
let actionHandler = new ActionHandler(filterManager, categoryManager);
return actionHandler;
}
/** @override */
getAvailableRollHandlers() {
let choices = { 'core': 'Core Lancer' };
return choices;
}
/** @override */
doGetRollHandler(handlerId) {
return new Core();
}
/** @override */
doRegisterSettings(appName, updateFunc) {
settings.register(appName, updateFunc);
}
} | super(appName);
}
|
cartopy_utils.py | # Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Cartopy specific mapping utilities."""
try:
from cartopy.feature import Feature, Scaler
from ..cbook import get_test_data
class MetPyMapFeature(Feature):
"""A simple interface to MetPy-included shapefiles."""
def __init__(self, name, scale, **kwargs):
"""Create MetPyMapFeature instance."""
import cartopy.crs as ccrs
super().__init__(ccrs.PlateCarree(), **kwargs)
self.name = name
if isinstance(scale, str):
scale = Scaler(scale)
self.scaler = scale
def geometries(self):
"""Return an iterator of (shapely) geometries for this feature."""
import cartopy.io.shapereader as shapereader
# Ensure that the associated files are in the cache
fname = f'{self.name}_{self.scaler.scale}'
for extension in ['.dbf', '.shx']:
get_test_data(fname + extension, as_file_obj=False)
path = get_test_data(fname + '.shp', as_file_obj=False)
return iter(tuple(shapereader.Reader(path).geometries()))
def intersecting_geometries(self, extent):
"""Return geometries that intersect the extent."""
self.scaler.scale_from_extent(extent)
return super().intersecting_geometries(extent)
def with_scale(self, new_scale):
"""
Return a copy of the feature with a new scale.
Parameters
----------
new_scale
The new dataset scale, i.e. one of '500k', '5m', or '20m'.
Corresponding to 1:500,000, 1:5,000,000, and 1:20,000,000
respectively.
"""
return MetPyMapFeature(self.name, new_scale, **self.kwargs)
USCOUNTIES = MetPyMapFeature('us_counties', '20m', facecolor='None', edgecolor='black')
USSTATES = MetPyMapFeature('us_states', '20m', facecolor='None', edgecolor='black')
except ImportError:
# If no Cartopy is present, we just don't have map features
pass
__all__ = ['USCOUNTIES', 'USSTATES']
def import_cartopy():
"""Import CartoPy; return a stub if unable.
This allows code requiring CartoPy to fail at use time rather than import time.
"""
try:
import cartopy.crs as ccrs
return ccrs
except ImportError:
return CartopyStub()
class CartopyStub:
| """Fail if a CartoPy attribute is accessed."""
def __getattr__(self, name):
"""Raise an error on any attribute access."""
raise AttributeError(f'Cannot use {name} without Cartopy installed.') |
|
constants.py | # -*- coding: utf-8 -*-
"""
Messaging constant variables.
"""
from __future__ import unicode_literals
from builtins import str as text
from django.utils.translation import ugettext as _ |
XFORM = text('xform')
PROJECT = text('project')
USER = text('user')
APP_LABEL_MAPPING = {
XFORM: 'logger',
PROJECT: 'logger',
USER: 'auth',
}
MESSAGE = 'message'
UNKNOWN_TARGET = _("Unknown target.") | |
interpreter.py | from nodes import *
from tokens import Token, TokenType
class | :
def __init__(self, ast):
self.ast = ast
def eval(self):
return self.evalHelper(self.ast)
def evalHelper(self, ast):
if isinstance(ast, NumberNode):
return ast.node
elif isinstance(ast, AddNode):
return self.evalHelper(ast.node_a) + self.evalHelper(ast.node_b)
elif isinstance(ast, SubtractNode):
return self.evalHelper(ast.node_a) - self.evalHelper(ast.node_b)
elif isinstance(ast, MultiplyNode):
return self.evalHelper(ast.node_a) * self.evalHelper(ast.node_b)
elif isinstance(ast, DivideNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) / eval_b
elif isinstance(ast, ModuloNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) % eval_b
elif isinstance(ast, PowerNode):
return self.evalHelper(ast.node_a) ** self.evalHelper(ast.node_b)
elif isinstance(ast, PositiveNode):
return self.evalHelper(ast.node)
elif isinstance(ast, NegativeNode):
return -self.evalHelper(ast.node)
def postfix_eval(tokens):
stack = []
for t in tokens:
if t.type == TokenType.PLUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a + b))
elif t.type == TokenType.MINUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b - a))
elif t.type == TokenType.MULTIPLY:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a * b))
elif t.type == TokenType.DIVIDE:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b / a))
elif t.type == TokenType.MODULO:
print(stack)
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b % a))
elif t.type == TokenType.POWER:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b ** a))
else:
stack.append(t)
return stack[0].value
| Interpreter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.