file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
ND280Transform_CSVEvtList.py
from GangaCore.GPIDev.Schema import * from GangaCore.GPIDev.Lib.Tasks.common import * from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Job.Job import JobError from GangaCore.GPIDev.Lib.Registry.JobRegistry import JobRegistrySlice, JobRegistrySliceProxy from GangaCore.Core.exceptions import ApplicationConfigurationError from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Tasks.TaskLocalCopy import TaskLocalCopy from GangaCore.Utility.logging import getLogger from .ND280Unit_CSVEvtList import ND280Unit_CSVEvtList
from GangaND280.ND280Dataset.ND280Dataset import ND280LocalDataset from GangaND280.ND280Splitter import splitCSVFile import GangaCore.GPI as GPI import os logger = getLogger() class ND280Transform_CSVEvtList(ITransform): _schema = Schema(Version(1,0), dict(list(ITransform._schema.datadict.items()) + list({ 'nbevents' : SimpleItem(defvalue=-1,doc='The number of events for each unit'), }.items()))) _category = 'transforms' _name = 'ND280Transform_CSVEvtList' _exportmethods = ITransform._exportmethods + [ ] def __init__(self): super(ND280Transform_CSVEvtList,self).__init__() def createUnits(self): """Create new units if required given the inputdata""" # call parent for chaining super(ND280Transform_CSVEvtList,self).createUnits() # Look at the application schema and check if there is a csvfile variable try: csvfile = self.application.csvfile except AttributeError: logger.error('This application doesn\'t contain a csvfile variable. Use another Transform !') return subsets = splitCSVFile(self.application.csvfile, self.nbevents) for s,sub in enumerate(subsets): # check if this data is being run over by checking all the names listed ok = False for unit in self.units: if unit.subpartid == s: ok = True if ok: continue # new unit required for this dataset unit = ND280Unit_CSVEvtList() unit.name = "Unit %d" % len(self.units) unit.subpartid = s unit.eventswanted = sub unit.inputdata = self.inputdata[0] self.addUnitToTRF( unit ) def createChainUnit( self, parent_units, use_copy_output = True ): """Create a chained unit using the output data from the given units""" # check all parent units for copy_output copy_output_ok = True for parent in parent_units: if not parent.copy_output: copy_output_ok = False # all parent units must be completed so the outputfiles are filled correctly for parent in parent_units: if parent.status != "completed": return None if not use_copy_output or not copy_output_ok: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # loop over the output files and add them to the ND280LocalDataset - THIS MIGHT NEED SOME WORK! job = GPI.jobs(parent.active_job_ids[0]) for f in job.outputfiles: # should check for different file types and add them as appropriate to the dataset # self.inputdata (== TaskChainInput).include/exclude_file_mask could help with this # This will be A LOT easier with Ganga 6.1 as you can easily map outputfiles -> inputfiles! unit.inputdata.names.append( os.path.join( job.outputdir, f.namePattern ) ) else: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # unit needs to have completed and downloaded before we can get file list if parent.status != "completed": return None # we should be OK so copy all output to the dataset for f in parent.copy_output.files: unit.inputdata.names.append( os.path.join( parent.copy_output.local_location, f ) ) return unit
random_line_split
ND280Transform_CSVEvtList.py
from GangaCore.GPIDev.Schema import * from GangaCore.GPIDev.Lib.Tasks.common import * from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Job.Job import JobError from GangaCore.GPIDev.Lib.Registry.JobRegistry import JobRegistrySlice, JobRegistrySliceProxy from GangaCore.Core.exceptions import ApplicationConfigurationError from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Tasks.TaskLocalCopy import TaskLocalCopy from GangaCore.Utility.logging import getLogger from .ND280Unit_CSVEvtList import ND280Unit_CSVEvtList from GangaND280.ND280Dataset.ND280Dataset import ND280LocalDataset from GangaND280.ND280Splitter import splitCSVFile import GangaCore.GPI as GPI import os logger = getLogger() class ND280Transform_CSVEvtList(ITransform): _schema = Schema(Version(1,0), dict(list(ITransform._schema.datadict.items()) + list({ 'nbevents' : SimpleItem(defvalue=-1,doc='The number of events for each unit'), }.items()))) _category = 'transforms' _name = 'ND280Transform_CSVEvtList' _exportmethods = ITransform._exportmethods + [ ] def __init__(self): super(ND280Transform_CSVEvtList,self).__init__() def createUnits(self): """Create new units if required given the inputdata""" # call parent for chaining super(ND280Transform_CSVEvtList,self).createUnits() # Look at the application schema and check if there is a csvfile variable try: csvfile = self.application.csvfile except AttributeError: logger.error('This application doesn\'t contain a csvfile variable. Use another Transform !') return subsets = splitCSVFile(self.application.csvfile, self.nbevents) for s,sub in enumerate(subsets): # check if this data is being run over by checking all the names listed ok = False for unit in self.units: if unit.subpartid == s: ok = True if ok: continue # new unit required for this dataset unit = ND280Unit_CSVEvtList() unit.name = "Unit %d" % len(self.units) unit.subpartid = s unit.eventswanted = sub unit.inputdata = self.inputdata[0] self.addUnitToTRF( unit ) def createChainUnit( self, parent_units, use_copy_output = True ): """Create a chained unit using the output data from the given units""" # check all parent units for copy_output copy_output_ok = True for parent in parent_units: if not parent.copy_output: copy_output_ok = False # all parent units must be completed so the outputfiles are filled correctly for parent in parent_units: if parent.status != "completed":
if not use_copy_output or not copy_output_ok: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # loop over the output files and add them to the ND280LocalDataset - THIS MIGHT NEED SOME WORK! job = GPI.jobs(parent.active_job_ids[0]) for f in job.outputfiles: # should check for different file types and add them as appropriate to the dataset # self.inputdata (== TaskChainInput).include/exclude_file_mask could help with this # This will be A LOT easier with Ganga 6.1 as you can easily map outputfiles -> inputfiles! unit.inputdata.names.append( os.path.join( job.outputdir, f.namePattern ) ) else: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # unit needs to have completed and downloaded before we can get file list if parent.status != "completed": return None # we should be OK so copy all output to the dataset for f in parent.copy_output.files: unit.inputdata.names.append( os.path.join( parent.copy_output.local_location, f ) ) return unit
return None
conditional_block
ND280Transform_CSVEvtList.py
from GangaCore.GPIDev.Schema import * from GangaCore.GPIDev.Lib.Tasks.common import * from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Job.Job import JobError from GangaCore.GPIDev.Lib.Registry.JobRegistry import JobRegistrySlice, JobRegistrySliceProxy from GangaCore.Core.exceptions import ApplicationConfigurationError from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform from GangaCore.GPIDev.Lib.Tasks.TaskLocalCopy import TaskLocalCopy from GangaCore.Utility.logging import getLogger from .ND280Unit_CSVEvtList import ND280Unit_CSVEvtList from GangaND280.ND280Dataset.ND280Dataset import ND280LocalDataset from GangaND280.ND280Splitter import splitCSVFile import GangaCore.GPI as GPI import os logger = getLogger() class ND280Transform_CSVEvtList(ITransform): _schema = Schema(Version(1,0), dict(list(ITransform._schema.datadict.items()) + list({ 'nbevents' : SimpleItem(defvalue=-1,doc='The number of events for each unit'), }.items()))) _category = 'transforms' _name = 'ND280Transform_CSVEvtList' _exportmethods = ITransform._exportmethods + [ ] def __init__(self):
def createUnits(self): """Create new units if required given the inputdata""" # call parent for chaining super(ND280Transform_CSVEvtList,self).createUnits() # Look at the application schema and check if there is a csvfile variable try: csvfile = self.application.csvfile except AttributeError: logger.error('This application doesn\'t contain a csvfile variable. Use another Transform !') return subsets = splitCSVFile(self.application.csvfile, self.nbevents) for s,sub in enumerate(subsets): # check if this data is being run over by checking all the names listed ok = False for unit in self.units: if unit.subpartid == s: ok = True if ok: continue # new unit required for this dataset unit = ND280Unit_CSVEvtList() unit.name = "Unit %d" % len(self.units) unit.subpartid = s unit.eventswanted = sub unit.inputdata = self.inputdata[0] self.addUnitToTRF( unit ) def createChainUnit( self, parent_units, use_copy_output = True ): """Create a chained unit using the output data from the given units""" # check all parent units for copy_output copy_output_ok = True for parent in parent_units: if not parent.copy_output: copy_output_ok = False # all parent units must be completed so the outputfiles are filled correctly for parent in parent_units: if parent.status != "completed": return None if not use_copy_output or not copy_output_ok: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # loop over the output files and add them to the ND280LocalDataset - THIS MIGHT NEED SOME WORK! job = GPI.jobs(parent.active_job_ids[0]) for f in job.outputfiles: # should check for different file types and add them as appropriate to the dataset # self.inputdata (== TaskChainInput).include/exclude_file_mask could help with this # This will be A LOT easier with Ganga 6.1 as you can easily map outputfiles -> inputfiles! unit.inputdata.names.append( os.path.join( job.outputdir, f.namePattern ) ) else: unit = ND280Unit_CSVEvtList() unit.inputdata = ND280LocalDataset() for parent in parent_units: # unit needs to have completed and downloaded before we can get file list if parent.status != "completed": return None # we should be OK so copy all output to the dataset for f in parent.copy_output.files: unit.inputdata.names.append( os.path.join( parent.copy_output.local_location, f ) ) return unit
super(ND280Transform_CSVEvtList,self).__init__()
identifier_body
network.py
import numpy as np import random class NeuralNetwork(): def __init__(self, sizes): # sizes is an array with the number of units in each layer # [2,3,1] means w neurons of input, 3 in the hidden layer and 1 as output self.num_layers = len(sizes) self.sizes = sizes # the syntax [1:] gets all elements of sizes array beginning at index 1 (second position) # np,random.randn(rows, cols) retuns a matrix of random elements # np.random.randn(2,1) => # array([[ 0.68265325], # [-0.52939261]]) # biases will have one vector per layer self.biases = [np.random.randn(y,1) for y in sizes[1:]] #zip returns a tuple in which x is the element of the first array and y the element of the second #sizes[:-1] returns all the elements till the second to last #sizes[1:] returns all the elements from the second and on] # [2,3,1] means: # * matrix of 3 rows and 2 columns -- will be multiplied by the inputs # * matrix of 1 row and 3 columns -- will multiply the hidden layer and produce the output self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])] def feedforward(self, a): for b,w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a) + b) return a def separate_batches(self, training_data, batch_size): random.shuffle(training_data) n = len(training_data) # extracts chunks of data from the training set # the xrange function will return indices starting with 0 untill n, with a step size o batch_size # batches, then, will have several chunks of the main set, each defined by the batch_size_variable return [training_data[i:i + batch_size] for i in range(0, n, batch_size)] def update_batches(self, batches, alpha): for batch in batches: nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] m = len(batch) # x is a array of length 901 # y is a single value indicating the digit represented by the 901 elements for x, y in batch: delta_b, delta_w = self.backpropagation(x, y) nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_b)] nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_w)] self.weights = [w - (alpha / m) * nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (alpha / m) * nb for b, nb in zip(self.biases, nabla_b)] def backpropagation(self, x, y): nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] activation = x activations = [x] zs = [] for b, w in zip(self.biases, self.weights): # layer-bound b and w z = np.dot(w, activation)+b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass delta = self.cost_derivative(activations[-1], y) * \ sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) for l in range(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def sgd(self, training_data, epochs, batch_size, alpha, test_data): n_test = len(test_data) for epoch in range(epochs): batches = self.separate_batches(training_data, batch_size) self.update_batches(batches, alpha) print("Epoch {0}: {1} / {2}".format(epoch, self.evaluate(test_data), n_test)) def evaluate(self, test_data): #r = [self.feedforward(x) for (x, y) in test_data] #for a in r: # print("{0}, {1}".format(format(a[0][0], 'f'), format(a[1][0], 'f'))) test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_results) def cost_derivative(self, output_activations, y): return output_activations - y def sigmoid(z): return 1.0 / (1.0 + np.exp(-z)) def
(z): return sigmoid(z) * (1-sigmoid(z))
sigmoid_prime
identifier_name
network.py
import numpy as np import random class NeuralNetwork(): def __init__(self, sizes): # sizes is an array with the number of units in each layer # [2,3,1] means w neurons of input, 3 in the hidden layer and 1 as output self.num_layers = len(sizes) self.sizes = sizes # the syntax [1:] gets all elements of sizes array beginning at index 1 (second position) # np,random.randn(rows, cols) retuns a matrix of random elements # np.random.randn(2,1) => # array([[ 0.68265325], # [-0.52939261]]) # biases will have one vector per layer self.biases = [np.random.randn(y,1) for y in sizes[1:]] #zip returns a tuple in which x is the element of the first array and y the element of the second #sizes[:-1] returns all the elements till the second to last #sizes[1:] returns all the elements from the second and on] # [2,3,1] means: # * matrix of 3 rows and 2 columns -- will be multiplied by the inputs # * matrix of 1 row and 3 columns -- will multiply the hidden layer and produce the output self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])] def feedforward(self, a):
def separate_batches(self, training_data, batch_size): random.shuffle(training_data) n = len(training_data) # extracts chunks of data from the training set # the xrange function will return indices starting with 0 untill n, with a step size o batch_size # batches, then, will have several chunks of the main set, each defined by the batch_size_variable return [training_data[i:i + batch_size] for i in range(0, n, batch_size)] def update_batches(self, batches, alpha): for batch in batches: nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] m = len(batch) # x is a array of length 901 # y is a single value indicating the digit represented by the 901 elements for x, y in batch: delta_b, delta_w = self.backpropagation(x, y) nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_b)] nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_w)] self.weights = [w - (alpha / m) * nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (alpha / m) * nb for b, nb in zip(self.biases, nabla_b)] def backpropagation(self, x, y): nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] activation = x activations = [x] zs = [] for b, w in zip(self.biases, self.weights): # layer-bound b and w z = np.dot(w, activation)+b zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass delta = self.cost_derivative(activations[-1], y) * \ sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) for l in range(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def sgd(self, training_data, epochs, batch_size, alpha, test_data): n_test = len(test_data) for epoch in range(epochs): batches = self.separate_batches(training_data, batch_size) self.update_batches(batches, alpha) print("Epoch {0}: {1} / {2}".format(epoch, self.evaluate(test_data), n_test)) def evaluate(self, test_data): #r = [self.feedforward(x) for (x, y) in test_data] #for a in r: # print("{0}, {1}".format(format(a[0][0], 'f'), format(a[1][0], 'f'))) test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_results) def cost_derivative(self, output_activations, y): return output_activations - y def sigmoid(z): return 1.0 / (1.0 + np.exp(-z)) def sigmoid_prime(z): return sigmoid(z) * (1-sigmoid(z))
for b,w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a) + b) return a
identifier_body
network.py
import numpy as np import random class NeuralNetwork(): def __init__(self, sizes): # sizes is an array with the number of units in each layer # [2,3,1] means w neurons of input, 3 in the hidden layer and 1 as output self.num_layers = len(sizes) self.sizes = sizes # the syntax [1:] gets all elements of sizes array beginning at index 1 (second position) # np,random.randn(rows, cols) retuns a matrix of random elements # np.random.randn(2,1) => # array([[ 0.68265325], # [-0.52939261]]) # biases will have one vector per layer self.biases = [np.random.randn(y,1) for y in sizes[1:]] #zip returns a tuple in which x is the element of the first array and y the element of the second #sizes[:-1] returns all the elements till the second to last #sizes[1:] returns all the elements from the second and on] # [2,3,1] means: # * matrix of 3 rows and 2 columns -- will be multiplied by the inputs # * matrix of 1 row and 3 columns -- will multiply the hidden layer and produce the output self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])] def feedforward(self, a): for b,w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a) + b) return a def separate_batches(self, training_data, batch_size): random.shuffle(training_data) n = len(training_data) # extracts chunks of data from the training set # the xrange function will return indices starting with 0 untill n, with a step size o batch_size # batches, then, will have several chunks of the main set, each defined by the batch_size_variable return [training_data[i:i + batch_size] for i in range(0, n, batch_size)] def update_batches(self, batches, alpha): for batch in batches: nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] m = len(batch) # x is a array of length 901 # y is a single value indicating the digit represented by the 901 elements for x, y in batch: delta_b, delta_w = self.backpropagation(x, y) nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_b)] nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_w)] self.weights = [w - (alpha / m) * nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (alpha / m) * nb for b, nb in zip(self.biases, nabla_b)] def backpropagation(self, x, y): nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] activation = x activations = [x] zs = [] for b, w in zip(self.biases, self.weights): # layer-bound b and w z = np.dot(w, activation)+b
delta = self.cost_derivative(activations[-1], y) * \ sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) for l in range(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def sgd(self, training_data, epochs, batch_size, alpha, test_data): n_test = len(test_data) for epoch in range(epochs): batches = self.separate_batches(training_data, batch_size) self.update_batches(batches, alpha) print("Epoch {0}: {1} / {2}".format(epoch, self.evaluate(test_data), n_test)) def evaluate(self, test_data): #r = [self.feedforward(x) for (x, y) in test_data] #for a in r: # print("{0}, {1}".format(format(a[0][0], 'f'), format(a[1][0], 'f'))) test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_results) def cost_derivative(self, output_activations, y): return output_activations - y def sigmoid(z): return 1.0 / (1.0 + np.exp(-z)) def sigmoid_prime(z): return sigmoid(z) * (1-sigmoid(z))
zs.append(z) activation = sigmoid(z) activations.append(activation) # backward pass
random_line_split
network.py
import numpy as np import random class NeuralNetwork(): def __init__(self, sizes): # sizes is an array with the number of units in each layer # [2,3,1] means w neurons of input, 3 in the hidden layer and 1 as output self.num_layers = len(sizes) self.sizes = sizes # the syntax [1:] gets all elements of sizes array beginning at index 1 (second position) # np,random.randn(rows, cols) retuns a matrix of random elements # np.random.randn(2,1) => # array([[ 0.68265325], # [-0.52939261]]) # biases will have one vector per layer self.biases = [np.random.randn(y,1) for y in sizes[1:]] #zip returns a tuple in which x is the element of the first array and y the element of the second #sizes[:-1] returns all the elements till the second to last #sizes[1:] returns all the elements from the second and on] # [2,3,1] means: # * matrix of 3 rows and 2 columns -- will be multiplied by the inputs # * matrix of 1 row and 3 columns -- will multiply the hidden layer and produce the output self.weights = [np.random.randn(y,x) for x,y in zip(sizes[:-1],sizes[1:])] def feedforward(self, a): for b,w in zip(self.biases, self.weights): a = sigmoid(np.dot(w, a) + b) return a def separate_batches(self, training_data, batch_size): random.shuffle(training_data) n = len(training_data) # extracts chunks of data from the training set # the xrange function will return indices starting with 0 untill n, with a step size o batch_size # batches, then, will have several chunks of the main set, each defined by the batch_size_variable return [training_data[i:i + batch_size] for i in range(0, n, batch_size)] def update_batches(self, batches, alpha): for batch in batches: nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] m = len(batch) # x is a array of length 901 # y is a single value indicating the digit represented by the 901 elements for x, y in batch: delta_b, delta_w = self.backpropagation(x, y) nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_b)] nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_w)] self.weights = [w - (alpha / m) * nw for w, nw in zip(self.weights, nabla_w)] self.biases = [b - (alpha / m) * nb for b, nb in zip(self.biases, nabla_b)] def backpropagation(self, x, y): nabla_b = [np.zeros(b.shape) for b in self.biases] nabla_w = [np.zeros(w.shape) for w in self.weights] activation = x activations = [x] zs = [] for b, w in zip(self.biases, self.weights): # layer-bound b and w
# backward pass delta = self.cost_derivative(activations[-1], y) * \ sigmoid_prime(zs[-1]) nabla_b[-1] = delta nabla_w[-1] = np.dot(delta, activations[-2].transpose()) for l in range(2, self.num_layers): z = zs[-l] sp = sigmoid_prime(z) delta = np.dot(self.weights[-l+1].transpose(), delta) * sp nabla_b[-l] = delta nabla_w[-l] = np.dot(delta, activations[-l-1].transpose()) return (nabla_b, nabla_w) def sgd(self, training_data, epochs, batch_size, alpha, test_data): n_test = len(test_data) for epoch in range(epochs): batches = self.separate_batches(training_data, batch_size) self.update_batches(batches, alpha) print("Epoch {0}: {1} / {2}".format(epoch, self.evaluate(test_data), n_test)) def evaluate(self, test_data): #r = [self.feedforward(x) for (x, y) in test_data] #for a in r: # print("{0}, {1}".format(format(a[0][0], 'f'), format(a[1][0], 'f'))) test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data] return sum(int(x == y) for (x, y) in test_results) def cost_derivative(self, output_activations, y): return output_activations - y def sigmoid(z): return 1.0 / (1.0 + np.exp(-z)) def sigmoid_prime(z): return sigmoid(z) * (1-sigmoid(z))
z = np.dot(w, activation)+b zs.append(z) activation = sigmoid(z) activations.append(activation)
conditional_block
cluster.component.ts
import { of, Subscription, timer as observableTimer } from 'rxjs'; import { catchError, filter, switchMap } from 'rxjs/operators'; import { ChangeDetectionStrategy, Component, OnDestroy, ViewChild, ViewEncapsulation, Inject, AfterViewInit, } from '@angular/core'; import { Location } from '@angular/common'; import { ActivatedRoute, Router } from '@angular/router'; import { HttpClient } from '@angular/common/http'; import { MatDialog, MatPaginator, MatSort, MatTableDataSource } from '@angular/material'; import { animate, state, style, transition, trigger } from '@angular/animations'; import { Supergiant } from '../../shared/supergiant/supergiant.service'; import { UtilService } from '../../shared/supergiant/util/util.service'; import { Notifications } from '../../shared/notifications/notifications.service'; import { ConfirmModalComponent } from '../../shared/modals/confirm-modal/confirm-modal.component'; import { DeleteClusterModalComponent } from './delete-cluster-modal/delete-cluster-modal.component'; import { DeleteReleaseModalComponent } from './delete-release-modal/delete-release-modal.component'; import { SshCommandsModalComponent } from './ssh-commands-modal/ssh-commands-modal.component'; import { KubectlConfigModalComponent } from './kubectl-config-modal/kubectl-config-modal.component'; import { TaskLogsComponent } from './task-logs/task-logs.component'; import { ReleaseInfoModalComponent } from './release-info-modal/release-info-modal.component'; import { WINDOW } from '../../shared/helpers/window-providers'; @Component({ selector: 'app-cluster', templateUrl: './cluster.component.html', styleUrls: [ './cluster.component.scss' ], // TODO: do we need this anymore? changeDetection: ChangeDetectionStrategy.Default, encapsulation: ViewEncapsulation.None, animations: [ trigger('detailExpand', [ state('collapsed', style({height: '0px', minHeight: '0', visibility: 'hidden'})), state('expanded', style({height: '*', visibility: 'visible'})), transition('expanded <=> collapsed', animate('225ms cubic-bezier(0.4, 0.0, 0.2, 1)')), ]), ] }) export class ClusterComponent implements AfterViewInit, OnDestroy { clusterId: number; subscriptions = new Subscription(); public kube: any; // machine list vars activeMachines: any; activeMachineListColumns = ['state', 'role', 'size', 'name', 'cpu', 'ram', 'region', 'publicIp', 'delete']; nonActiveMachines: any; nonActiveMachineListColumns = ['state', 'role', 'size', 'name', 'region', 'steps', 'logs', 'delete']; // task list vars tasks: any; taskListColumns = ['status', 'type', 'id', 'steps', 'logs']; expandedTaskIds = new Set(); releases: any; releaseListColumns = ['status', 'name', 'namespace', 'chart', 'chartVersion', 'lastDeployed', 'info', 'delete']; masterTasksStatus = 'queued'; nodeTasksStatus = 'queued'; clusterTasksStatus = 'queued'; cpuUsage: number; ramUsage: number; machineMetrics = {}; kubectlConfig: any; clusterServices: any; serviceListColumns = ['name', 'type', 'namespace', 'selfLink']; deletingApps = new Set(); clusterRestarting: boolean; constructor( private route: ActivatedRoute, private location: Location, private router: Router, private supergiant: Supergiant, private util: UtilService, private notifications: Notifications, public dialog: MatDialog, public http: HttpClient, @Inject(WINDOW) private window: Window ) { route.params.subscribe(params => { this.clusterId = params.id; this.getKube(); }); } @ViewChild(MatSort) sort: MatSort; @ViewChild(MatPaginator) paginator: MatPaginator; get
() { return !isNaN(this.cpuUsage) && !isNaN(this.ramUsage); } ngAfterViewInit() { this.clusterId = this.route.snapshot.params.id; this.getKube(); } ngOnDestroy() { this.subscriptions.unsubscribe(); } combineAndFlatten(objOne, objTwo) { const arr = []; Object.keys(objOne).forEach((key) => { arr.push(objOne[key]); }); Object.keys(objTwo).forEach((key) => { arr.push(objTwo[key]); }); return arr; } getKubeStatus(clusterId) { // we should make Tasks a part of the Supergiant instance // if we start using them outside of this return this.util.fetch('/v1/api/kubes/' + clusterId + '/tasks'); } toggleSteps(task) { task.showSteps = !task.showSteps; if (this.expandedTaskIds.has(task.id)) { this.expandedTaskIds.delete(task.id); } else { this.expandedTaskIds.add(task.id); } } taskComplete(task) { return task.status == 'success'; } viewTaskLog(taskId) { const modal = this.dialog.open(TaskLogsComponent, { width: '1080px', data: { taskId: taskId, hostname: this.window.location.hostname } }); } setProvisioningStep(tasks) { const masterPatt = /master/g; const masterTasks = tasks.filter(t => { return masterPatt.test(t.type.toLowerCase()); }); const nodePatt = /node/g; const nodeTasks = tasks.filter(t => { return nodePatt.test(t.type.toLowerCase()); }); // oh my god I'm so sorry if (masterTasks.every(this.taskComplete)) { // masters complete this.masterTasksStatus = 'complete'; if (nodeTasks.every(this.taskComplete)) { // masters AND nodes complete this.nodeTasksStatus = 'complete'; this.clusterTasksStatus = 'executing'; } else { // masters complete, nodes executing this.nodeTasksStatus = 'executing'; } } else { // masters executing this.masterTasksStatus = 'executing'; } } downloadPrivateKey() { let a = document.createElement("a"); a.href = "data:," + this.kube.sshConfig.bootstrapPrivateKey; a.setAttribute("download", this.kube.name + ".pem"); a.click(); } orderTasks(a, b) { const aSortId = (a.type + a.id).toUpperCase(); const bSortId = (b.type + b.id).toUpperCase(); let comparison = 0; if (aSortId > bSortId) { comparison = 1 } else if (aSortId < bSortId) { comparison = -1 } return comparison; } updateTaskList(tasks) { const rows = []; const orderedTasks = tasks.sort(this.orderTasks); orderedTasks.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } rows.push(t, { detailRow: true, t }); }); this.tasks = new MatTableDataSource(rows); } getKube() { // TODO: shameful how smart this ENTIRE component has become. this.subscriptions.add(observableTimer(0, 10000).pipe( switchMap(() => this.supergiant.Kubes.get(this.clusterId))).subscribe( k => { this.kube = k; switch (this.kube.state) { case 'operational': { this.renderMachines(this.kube); this.getReleases(); this.getClusterMetrics(); this.getMachineMetrics(); this.getKubectlConfig(); this.getClusterServices(); break; } case 'failed': { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.updateTaskList(tasks); }, err => console.log(err) ); this.masterTasksStatus = 'failed'; this.nodeTasksStatus = 'failed'; this.clusterTasksStatus = 'failed'; break; } default: { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.setProvisioningStep(tasks); this.updateTaskList(tasks); }, err => console.log(err) ); break; } } }, err => console.error(err) )); } renderMachines(kube) { const masterNames = Object.keys(kube.masters); const nodeNames = Object.keys(kube.nodes); masterNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.masters[name].metrics = this.machineMetrics[lowercaseName]; } }); nodeNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.nodes[name].metrics = this.machineMetrics[lowercaseName]; } }); const allMachines = this.combineAndFlatten(kube.masters, kube.nodes); const activeMachines = allMachines.filter(m => m.state == 'active' || m.state == 'deleting'); const nonActiveMachines = allMachines.filter(m => m.state != 'active' && m.state != 'deleting'); this.activeMachines = new MatTableDataSource(activeMachines); if (nonActiveMachines.length > 0) { const executingTasksObj = {}; this.getKubeStatus(this.clusterId).subscribe( tasks => { const executing = tasks.filter(t => t.status == 'executing' || t.status == 'error'); executing.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } executingTasksObj[t.id] = t; }); const nonAM = []; nonActiveMachines.forEach(m => { const tid = m.taskId; const t = executingTasksObj[tid]; m.taskData = executingTasksObj[tid]; nonAM.push(m, { detailRow: true, t }); }); this.nonActiveMachines = new MatTableDataSource(nonAM); }, err => console.error(err) ); } else { this.nonActiveMachines = {}; } } getReleases(deletedReleaseName?) { this.supergiant.HelmReleases.get(this.clusterId).subscribe( res => { const releases = res.filter(r => r.status !== 'DELETED'); this.releases = new MatTableDataSource(releases); // TODO: this is temporary. We need to figure out a way around the constant polling if (deletedReleaseName) { this.deletingApps.delete(deletedReleaseName); } }, err => console.error(err) ); } getClusterMetrics() { this.supergiant.Kubes.getClusterMetrics(this.clusterId).subscribe( res => { this.cpuUsage = res.cpu; this.ramUsage = res.memory; }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getMachineMetrics() { this.supergiant.Kubes.getMachineMetrics(this.clusterId).subscribe( res => { this.machineMetrics = this.calculateMachineMetrics(res); this.renderMachines(this.kube); }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getKubectlConfig() { // TODO: move to service this.util.fetch('v1/api/kubes/' + this.clusterId + '/users/kubernetes-admin/kubeconfig').subscribe( res => this.kubectlConfig = res, err => console.error(err) ); } getClusterServices() { this.supergiant.Kubes.getClusterServices(this.clusterId).subscribe( res => this.clusterServices = new MatTableDataSource(res), err => console.error(err) ); } calculateMachineMetrics(machines) { Object.keys(machines).forEach(m => { machines[m].cpu = (machines[m].cpu * 100).toFixed(1); machines[m].memory = (machines[m].memory * 100).toFixed(1); }); return machines; } restart(id) { this.clusterRestarting = true; this.supergiant.Kubes.restartFailedProvision(id).subscribe( res => { this.clusterRestarting = false; this.getKube() }, err => { this.displayError(this.kube.name, err.error.userMessage) this.clusterRestarting = false; } ) } removeNode(nodeName: string, target) { const dialogRef = this.initDialog(target); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Nodes.delete(this.clusterId, nodeName)), switchMap(() => this.supergiant.Kubes.get(this.clusterId)), catchError((error) => of(error)), ).subscribe( k => { this.displaySuccess("Node: " + nodeName, "Deleted!"); this.renderMachines(k) }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteCluster() { const dialogRef = this.initDeleteCluster(this.kube.state); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Kubes.delete(this.clusterId)) ).subscribe( res => { this.displaySuccess("Kube: " + this.kube.name, "Deleted!"); this.router.navigate(['']); }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteRelease(releaseName, idx) { const dialogRef = this.initDeleteRelease(releaseName); dialogRef .afterClosed() .pipe( filter(res => res.deleteRelease), // can't mutate table data source, polling erases any optimistic updates, so this happens, sorry... switchMap((_) => this.deletingApps.add(releaseName)), switchMap(res => this.supergiant.HelmReleases.delete(releaseName, this.clusterId, true)), ).subscribe( res => { this.getReleases(releaseName); this.displaySuccess("App: " + releaseName, "Deleted!") }, err => { console.error(err); this.deletingApps.delete(releaseName); this.displayError(this.kube.name, err); } ); } showSshCommands() { const masters = []; const nodes = []; Object.keys(this.kube.masters).map(m => masters.push(this.kube.masters[m])); Object.keys(this.kube.nodes).map(m => nodes.push(this.kube.nodes[m])); this.initSshCommands(masters, nodes); } showKubectlConfig() { this.initKubectlConfig(this.kubectlConfig); } showReleaseInfo(releaseName) { this.initReleaseInfo(releaseName); } openService(proxyPort) { const hostname = this.window.location.hostname; const link = 'http://' + hostname + ':' + proxyPort; this.window.open(link); } trackByFn(index, item) { return index; } private initDialog(target) { const popupWidth = 250; const dialogRef = this.dialog.open(ConfirmModalComponent, { width: `${popupWidth}px`, }); dialogRef.updatePosition({ top: `${target.clientY}px`, left: `${target.clientX - popupWidth - 10}px`, }); return dialogRef; } private initDeleteCluster(clusterState) { const dialogRef = this.dialog.open(DeleteClusterModalComponent, { width: '500px', data: { state: clusterState } }); return dialogRef; } private initDeleteRelease(name) { const dialogRef = this.dialog.open(DeleteReleaseModalComponent, { width: 'max-content', data: { name: name } }); return dialogRef; } private initSshCommands(masters, nodes) { const dialogRef = this.dialog.open(SshCommandsModalComponent, { width: '600px', data: { masters: masters, nodes: nodes } }); return dialogRef; } private initKubectlConfig(config) { const dialogRef = this.dialog.open(KubectlConfigModalComponent, { width: '800px', data: { config: config } }); return dialogRef; } private initReleaseInfo(releaseName) { const dialogRef = this.dialog.open(ReleaseInfoModalComponent, { width: '800px', data: { releaseName: releaseName, clusterId: this.clusterId } }); return dialogRef; } expandRow = (_, row) => row.hasOwnProperty('detailRow'); displaySuccess(headline, msg) { this.notifications.display('success', headline, msg); } displayError(name, err) { let msg: string; if (err.error.userMessage) { msg = err.error.userMessage; } else if (err.userMessage) { msg = err.userMessage } else { msg = err.error; } this.notifications.display( 'error', 'Kube: ' + name, 'Error:' + msg); } }
showUsageChart
identifier_name
cluster.component.ts
import { of, Subscription, timer as observableTimer } from 'rxjs'; import { catchError, filter, switchMap } from 'rxjs/operators'; import { ChangeDetectionStrategy, Component, OnDestroy, ViewChild, ViewEncapsulation, Inject, AfterViewInit, } from '@angular/core'; import { Location } from '@angular/common'; import { ActivatedRoute, Router } from '@angular/router'; import { HttpClient } from '@angular/common/http'; import { MatDialog, MatPaginator, MatSort, MatTableDataSource } from '@angular/material'; import { animate, state, style, transition, trigger } from '@angular/animations'; import { Supergiant } from '../../shared/supergiant/supergiant.service'; import { UtilService } from '../../shared/supergiant/util/util.service'; import { Notifications } from '../../shared/notifications/notifications.service'; import { ConfirmModalComponent } from '../../shared/modals/confirm-modal/confirm-modal.component'; import { DeleteClusterModalComponent } from './delete-cluster-modal/delete-cluster-modal.component'; import { DeleteReleaseModalComponent } from './delete-release-modal/delete-release-modal.component'; import { SshCommandsModalComponent } from './ssh-commands-modal/ssh-commands-modal.component'; import { KubectlConfigModalComponent } from './kubectl-config-modal/kubectl-config-modal.component'; import { TaskLogsComponent } from './task-logs/task-logs.component'; import { ReleaseInfoModalComponent } from './release-info-modal/release-info-modal.component'; import { WINDOW } from '../../shared/helpers/window-providers'; @Component({ selector: 'app-cluster', templateUrl: './cluster.component.html', styleUrls: [ './cluster.component.scss' ], // TODO: do we need this anymore? changeDetection: ChangeDetectionStrategy.Default, encapsulation: ViewEncapsulation.None, animations: [ trigger('detailExpand', [ state('collapsed', style({height: '0px', minHeight: '0', visibility: 'hidden'})), state('expanded', style({height: '*', visibility: 'visible'})), transition('expanded <=> collapsed', animate('225ms cubic-bezier(0.4, 0.0, 0.2, 1)')), ]), ] }) export class ClusterComponent implements AfterViewInit, OnDestroy { clusterId: number; subscriptions = new Subscription(); public kube: any; // machine list vars activeMachines: any; activeMachineListColumns = ['state', 'role', 'size', 'name', 'cpu', 'ram', 'region', 'publicIp', 'delete']; nonActiveMachines: any; nonActiveMachineListColumns = ['state', 'role', 'size', 'name', 'region', 'steps', 'logs', 'delete']; // task list vars tasks: any; taskListColumns = ['status', 'type', 'id', 'steps', 'logs']; expandedTaskIds = new Set(); releases: any; releaseListColumns = ['status', 'name', 'namespace', 'chart', 'chartVersion', 'lastDeployed', 'info', 'delete']; masterTasksStatus = 'queued'; nodeTasksStatus = 'queued'; clusterTasksStatus = 'queued'; cpuUsage: number; ramUsage: number; machineMetrics = {}; kubectlConfig: any; clusterServices: any; serviceListColumns = ['name', 'type', 'namespace', 'selfLink']; deletingApps = new Set(); clusterRestarting: boolean; constructor( private route: ActivatedRoute, private location: Location, private router: Router, private supergiant: Supergiant, private util: UtilService, private notifications: Notifications, public dialog: MatDialog, public http: HttpClient, @Inject(WINDOW) private window: Window ) { route.params.subscribe(params => { this.clusterId = params.id; this.getKube(); }); } @ViewChild(MatSort) sort: MatSort; @ViewChild(MatPaginator) paginator: MatPaginator; get showUsageChart() { return !isNaN(this.cpuUsage) && !isNaN(this.ramUsage); } ngAfterViewInit() { this.clusterId = this.route.snapshot.params.id; this.getKube(); } ngOnDestroy() { this.subscriptions.unsubscribe(); } combineAndFlatten(objOne, objTwo) { const arr = []; Object.keys(objOne).forEach((key) => { arr.push(objOne[key]); }); Object.keys(objTwo).forEach((key) => { arr.push(objTwo[key]); }); return arr; } getKubeStatus(clusterId) { // we should make Tasks a part of the Supergiant instance // if we start using them outside of this return this.util.fetch('/v1/api/kubes/' + clusterId + '/tasks'); } toggleSteps(task) { task.showSteps = !task.showSteps; if (this.expandedTaskIds.has(task.id)) { this.expandedTaskIds.delete(task.id); } else { this.expandedTaskIds.add(task.id); } } taskComplete(task) { return task.status == 'success'; } viewTaskLog(taskId) { const modal = this.dialog.open(TaskLogsComponent, { width: '1080px', data: { taskId: taskId, hostname: this.window.location.hostname } }); } setProvisioningStep(tasks) { const masterPatt = /master/g; const masterTasks = tasks.filter(t => { return masterPatt.test(t.type.toLowerCase()); }); const nodePatt = /node/g; const nodeTasks = tasks.filter(t => { return nodePatt.test(t.type.toLowerCase()); }); // oh my god I'm so sorry if (masterTasks.every(this.taskComplete))
else { // masters executing this.masterTasksStatus = 'executing'; } } downloadPrivateKey() { let a = document.createElement("a"); a.href = "data:," + this.kube.sshConfig.bootstrapPrivateKey; a.setAttribute("download", this.kube.name + ".pem"); a.click(); } orderTasks(a, b) { const aSortId = (a.type + a.id).toUpperCase(); const bSortId = (b.type + b.id).toUpperCase(); let comparison = 0; if (aSortId > bSortId) { comparison = 1 } else if (aSortId < bSortId) { comparison = -1 } return comparison; } updateTaskList(tasks) { const rows = []; const orderedTasks = tasks.sort(this.orderTasks); orderedTasks.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } rows.push(t, { detailRow: true, t }); }); this.tasks = new MatTableDataSource(rows); } getKube() { // TODO: shameful how smart this ENTIRE component has become. this.subscriptions.add(observableTimer(0, 10000).pipe( switchMap(() => this.supergiant.Kubes.get(this.clusterId))).subscribe( k => { this.kube = k; switch (this.kube.state) { case 'operational': { this.renderMachines(this.kube); this.getReleases(); this.getClusterMetrics(); this.getMachineMetrics(); this.getKubectlConfig(); this.getClusterServices(); break; } case 'failed': { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.updateTaskList(tasks); }, err => console.log(err) ); this.masterTasksStatus = 'failed'; this.nodeTasksStatus = 'failed'; this.clusterTasksStatus = 'failed'; break; } default: { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.setProvisioningStep(tasks); this.updateTaskList(tasks); }, err => console.log(err) ); break; } } }, err => console.error(err) )); } renderMachines(kube) { const masterNames = Object.keys(kube.masters); const nodeNames = Object.keys(kube.nodes); masterNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.masters[name].metrics = this.machineMetrics[lowercaseName]; } }); nodeNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.nodes[name].metrics = this.machineMetrics[lowercaseName]; } }); const allMachines = this.combineAndFlatten(kube.masters, kube.nodes); const activeMachines = allMachines.filter(m => m.state == 'active' || m.state == 'deleting'); const nonActiveMachines = allMachines.filter(m => m.state != 'active' && m.state != 'deleting'); this.activeMachines = new MatTableDataSource(activeMachines); if (nonActiveMachines.length > 0) { const executingTasksObj = {}; this.getKubeStatus(this.clusterId).subscribe( tasks => { const executing = tasks.filter(t => t.status == 'executing' || t.status == 'error'); executing.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } executingTasksObj[t.id] = t; }); const nonAM = []; nonActiveMachines.forEach(m => { const tid = m.taskId; const t = executingTasksObj[tid]; m.taskData = executingTasksObj[tid]; nonAM.push(m, { detailRow: true, t }); }); this.nonActiveMachines = new MatTableDataSource(nonAM); }, err => console.error(err) ); } else { this.nonActiveMachines = {}; } } getReleases(deletedReleaseName?) { this.supergiant.HelmReleases.get(this.clusterId).subscribe( res => { const releases = res.filter(r => r.status !== 'DELETED'); this.releases = new MatTableDataSource(releases); // TODO: this is temporary. We need to figure out a way around the constant polling if (deletedReleaseName) { this.deletingApps.delete(deletedReleaseName); } }, err => console.error(err) ); } getClusterMetrics() { this.supergiant.Kubes.getClusterMetrics(this.clusterId).subscribe( res => { this.cpuUsage = res.cpu; this.ramUsage = res.memory; }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getMachineMetrics() { this.supergiant.Kubes.getMachineMetrics(this.clusterId).subscribe( res => { this.machineMetrics = this.calculateMachineMetrics(res); this.renderMachines(this.kube); }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getKubectlConfig() { // TODO: move to service this.util.fetch('v1/api/kubes/' + this.clusterId + '/users/kubernetes-admin/kubeconfig').subscribe( res => this.kubectlConfig = res, err => console.error(err) ); } getClusterServices() { this.supergiant.Kubes.getClusterServices(this.clusterId).subscribe( res => this.clusterServices = new MatTableDataSource(res), err => console.error(err) ); } calculateMachineMetrics(machines) { Object.keys(machines).forEach(m => { machines[m].cpu = (machines[m].cpu * 100).toFixed(1); machines[m].memory = (machines[m].memory * 100).toFixed(1); }); return machines; } restart(id) { this.clusterRestarting = true; this.supergiant.Kubes.restartFailedProvision(id).subscribe( res => { this.clusterRestarting = false; this.getKube() }, err => { this.displayError(this.kube.name, err.error.userMessage) this.clusterRestarting = false; } ) } removeNode(nodeName: string, target) { const dialogRef = this.initDialog(target); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Nodes.delete(this.clusterId, nodeName)), switchMap(() => this.supergiant.Kubes.get(this.clusterId)), catchError((error) => of(error)), ).subscribe( k => { this.displaySuccess("Node: " + nodeName, "Deleted!"); this.renderMachines(k) }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteCluster() { const dialogRef = this.initDeleteCluster(this.kube.state); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Kubes.delete(this.clusterId)) ).subscribe( res => { this.displaySuccess("Kube: " + this.kube.name, "Deleted!"); this.router.navigate(['']); }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteRelease(releaseName, idx) { const dialogRef = this.initDeleteRelease(releaseName); dialogRef .afterClosed() .pipe( filter(res => res.deleteRelease), // can't mutate table data source, polling erases any optimistic updates, so this happens, sorry... switchMap((_) => this.deletingApps.add(releaseName)), switchMap(res => this.supergiant.HelmReleases.delete(releaseName, this.clusterId, true)), ).subscribe( res => { this.getReleases(releaseName); this.displaySuccess("App: " + releaseName, "Deleted!") }, err => { console.error(err); this.deletingApps.delete(releaseName); this.displayError(this.kube.name, err); } ); } showSshCommands() { const masters = []; const nodes = []; Object.keys(this.kube.masters).map(m => masters.push(this.kube.masters[m])); Object.keys(this.kube.nodes).map(m => nodes.push(this.kube.nodes[m])); this.initSshCommands(masters, nodes); } showKubectlConfig() { this.initKubectlConfig(this.kubectlConfig); } showReleaseInfo(releaseName) { this.initReleaseInfo(releaseName); } openService(proxyPort) { const hostname = this.window.location.hostname; const link = 'http://' + hostname + ':' + proxyPort; this.window.open(link); } trackByFn(index, item) { return index; } private initDialog(target) { const popupWidth = 250; const dialogRef = this.dialog.open(ConfirmModalComponent, { width: `${popupWidth}px`, }); dialogRef.updatePosition({ top: `${target.clientY}px`, left: `${target.clientX - popupWidth - 10}px`, }); return dialogRef; } private initDeleteCluster(clusterState) { const dialogRef = this.dialog.open(DeleteClusterModalComponent, { width: '500px', data: { state: clusterState } }); return dialogRef; } private initDeleteRelease(name) { const dialogRef = this.dialog.open(DeleteReleaseModalComponent, { width: 'max-content', data: { name: name } }); return dialogRef; } private initSshCommands(masters, nodes) { const dialogRef = this.dialog.open(SshCommandsModalComponent, { width: '600px', data: { masters: masters, nodes: nodes } }); return dialogRef; } private initKubectlConfig(config) { const dialogRef = this.dialog.open(KubectlConfigModalComponent, { width: '800px', data: { config: config } }); return dialogRef; } private initReleaseInfo(releaseName) { const dialogRef = this.dialog.open(ReleaseInfoModalComponent, { width: '800px', data: { releaseName: releaseName, clusterId: this.clusterId } }); return dialogRef; } expandRow = (_, row) => row.hasOwnProperty('detailRow'); displaySuccess(headline, msg) { this.notifications.display('success', headline, msg); } displayError(name, err) { let msg: string; if (err.error.userMessage) { msg = err.error.userMessage; } else if (err.userMessage) { msg = err.userMessage } else { msg = err.error; } this.notifications.display( 'error', 'Kube: ' + name, 'Error:' + msg); } }
{ // masters complete this.masterTasksStatus = 'complete'; if (nodeTasks.every(this.taskComplete)) { // masters AND nodes complete this.nodeTasksStatus = 'complete'; this.clusterTasksStatus = 'executing'; } else { // masters complete, nodes executing this.nodeTasksStatus = 'executing'; } }
conditional_block
cluster.component.ts
import { of, Subscription, timer as observableTimer } from 'rxjs'; import { catchError, filter, switchMap } from 'rxjs/operators'; import { ChangeDetectionStrategy, Component, OnDestroy, ViewChild, ViewEncapsulation, Inject, AfterViewInit, } from '@angular/core'; import { Location } from '@angular/common'; import { ActivatedRoute, Router } from '@angular/router'; import { HttpClient } from '@angular/common/http'; import { MatDialog, MatPaginator, MatSort, MatTableDataSource } from '@angular/material'; import { animate, state, style, transition, trigger } from '@angular/animations'; import { Supergiant } from '../../shared/supergiant/supergiant.service'; import { UtilService } from '../../shared/supergiant/util/util.service'; import { Notifications } from '../../shared/notifications/notifications.service'; import { ConfirmModalComponent } from '../../shared/modals/confirm-modal/confirm-modal.component'; import { DeleteClusterModalComponent } from './delete-cluster-modal/delete-cluster-modal.component'; import { DeleteReleaseModalComponent } from './delete-release-modal/delete-release-modal.component'; import { SshCommandsModalComponent } from './ssh-commands-modal/ssh-commands-modal.component'; import { KubectlConfigModalComponent } from './kubectl-config-modal/kubectl-config-modal.component'; import { TaskLogsComponent } from './task-logs/task-logs.component'; import { ReleaseInfoModalComponent } from './release-info-modal/release-info-modal.component'; import { WINDOW } from '../../shared/helpers/window-providers'; @Component({ selector: 'app-cluster', templateUrl: './cluster.component.html', styleUrls: [ './cluster.component.scss' ], // TODO: do we need this anymore? changeDetection: ChangeDetectionStrategy.Default, encapsulation: ViewEncapsulation.None, animations: [ trigger('detailExpand', [ state('collapsed', style({height: '0px', minHeight: '0', visibility: 'hidden'})), state('expanded', style({height: '*', visibility: 'visible'})), transition('expanded <=> collapsed', animate('225ms cubic-bezier(0.4, 0.0, 0.2, 1)')), ]), ] }) export class ClusterComponent implements AfterViewInit, OnDestroy { clusterId: number; subscriptions = new Subscription(); public kube: any; // machine list vars activeMachines: any; activeMachineListColumns = ['state', 'role', 'size', 'name', 'cpu', 'ram', 'region', 'publicIp', 'delete']; nonActiveMachines: any; nonActiveMachineListColumns = ['state', 'role', 'size', 'name', 'region', 'steps', 'logs', 'delete']; // task list vars tasks: any; taskListColumns = ['status', 'type', 'id', 'steps', 'logs']; expandedTaskIds = new Set(); releases: any; releaseListColumns = ['status', 'name', 'namespace', 'chart', 'chartVersion', 'lastDeployed', 'info', 'delete']; masterTasksStatus = 'queued'; nodeTasksStatus = 'queued'; clusterTasksStatus = 'queued'; cpuUsage: number; ramUsage: number; machineMetrics = {}; kubectlConfig: any; clusterServices: any; serviceListColumns = ['name', 'type', 'namespace', 'selfLink']; deletingApps = new Set(); clusterRestarting: boolean; constructor( private route: ActivatedRoute, private location: Location, private router: Router, private supergiant: Supergiant, private util: UtilService, private notifications: Notifications, public dialog: MatDialog, public http: HttpClient, @Inject(WINDOW) private window: Window ) { route.params.subscribe(params => { this.clusterId = params.id; this.getKube(); }); } @ViewChild(MatSort) sort: MatSort; @ViewChild(MatPaginator) paginator: MatPaginator; get showUsageChart() { return !isNaN(this.cpuUsage) && !isNaN(this.ramUsage); } ngAfterViewInit() { this.clusterId = this.route.snapshot.params.id; this.getKube(); } ngOnDestroy() { this.subscriptions.unsubscribe(); } combineAndFlatten(objOne, objTwo) { const arr = []; Object.keys(objOne).forEach((key) => { arr.push(objOne[key]); }); Object.keys(objTwo).forEach((key) => { arr.push(objTwo[key]); }); return arr; } getKubeStatus(clusterId) { // we should make Tasks a part of the Supergiant instance // if we start using them outside of this return this.util.fetch('/v1/api/kubes/' + clusterId + '/tasks'); } toggleSteps(task)
taskComplete(task) { return task.status == 'success'; } viewTaskLog(taskId) { const modal = this.dialog.open(TaskLogsComponent, { width: '1080px', data: { taskId: taskId, hostname: this.window.location.hostname } }); } setProvisioningStep(tasks) { const masterPatt = /master/g; const masterTasks = tasks.filter(t => { return masterPatt.test(t.type.toLowerCase()); }); const nodePatt = /node/g; const nodeTasks = tasks.filter(t => { return nodePatt.test(t.type.toLowerCase()); }); // oh my god I'm so sorry if (masterTasks.every(this.taskComplete)) { // masters complete this.masterTasksStatus = 'complete'; if (nodeTasks.every(this.taskComplete)) { // masters AND nodes complete this.nodeTasksStatus = 'complete'; this.clusterTasksStatus = 'executing'; } else { // masters complete, nodes executing this.nodeTasksStatus = 'executing'; } } else { // masters executing this.masterTasksStatus = 'executing'; } } downloadPrivateKey() { let a = document.createElement("a"); a.href = "data:," + this.kube.sshConfig.bootstrapPrivateKey; a.setAttribute("download", this.kube.name + ".pem"); a.click(); } orderTasks(a, b) { const aSortId = (a.type + a.id).toUpperCase(); const bSortId = (b.type + b.id).toUpperCase(); let comparison = 0; if (aSortId > bSortId) { comparison = 1 } else if (aSortId < bSortId) { comparison = -1 } return comparison; } updateTaskList(tasks) { const rows = []; const orderedTasks = tasks.sort(this.orderTasks); orderedTasks.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } rows.push(t, { detailRow: true, t }); }); this.tasks = new MatTableDataSource(rows); } getKube() { // TODO: shameful how smart this ENTIRE component has become. this.subscriptions.add(observableTimer(0, 10000).pipe( switchMap(() => this.supergiant.Kubes.get(this.clusterId))).subscribe( k => { this.kube = k; switch (this.kube.state) { case 'operational': { this.renderMachines(this.kube); this.getReleases(); this.getClusterMetrics(); this.getMachineMetrics(); this.getKubectlConfig(); this.getClusterServices(); break; } case 'failed': { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.updateTaskList(tasks); }, err => console.log(err) ); this.masterTasksStatus = 'failed'; this.nodeTasksStatus = 'failed'; this.clusterTasksStatus = 'failed'; break; } default: { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.setProvisioningStep(tasks); this.updateTaskList(tasks); }, err => console.log(err) ); break; } } }, err => console.error(err) )); } renderMachines(kube) { const masterNames = Object.keys(kube.masters); const nodeNames = Object.keys(kube.nodes); masterNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.masters[name].metrics = this.machineMetrics[lowercaseName]; } }); nodeNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.nodes[name].metrics = this.machineMetrics[lowercaseName]; } }); const allMachines = this.combineAndFlatten(kube.masters, kube.nodes); const activeMachines = allMachines.filter(m => m.state == 'active' || m.state == 'deleting'); const nonActiveMachines = allMachines.filter(m => m.state != 'active' && m.state != 'deleting'); this.activeMachines = new MatTableDataSource(activeMachines); if (nonActiveMachines.length > 0) { const executingTasksObj = {}; this.getKubeStatus(this.clusterId).subscribe( tasks => { const executing = tasks.filter(t => t.status == 'executing' || t.status == 'error'); executing.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } executingTasksObj[t.id] = t; }); const nonAM = []; nonActiveMachines.forEach(m => { const tid = m.taskId; const t = executingTasksObj[tid]; m.taskData = executingTasksObj[tid]; nonAM.push(m, { detailRow: true, t }); }); this.nonActiveMachines = new MatTableDataSource(nonAM); }, err => console.error(err) ); } else { this.nonActiveMachines = {}; } } getReleases(deletedReleaseName?) { this.supergiant.HelmReleases.get(this.clusterId).subscribe( res => { const releases = res.filter(r => r.status !== 'DELETED'); this.releases = new MatTableDataSource(releases); // TODO: this is temporary. We need to figure out a way around the constant polling if (deletedReleaseName) { this.deletingApps.delete(deletedReleaseName); } }, err => console.error(err) ); } getClusterMetrics() { this.supergiant.Kubes.getClusterMetrics(this.clusterId).subscribe( res => { this.cpuUsage = res.cpu; this.ramUsage = res.memory; }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getMachineMetrics() { this.supergiant.Kubes.getMachineMetrics(this.clusterId).subscribe( res => { this.machineMetrics = this.calculateMachineMetrics(res); this.renderMachines(this.kube); }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getKubectlConfig() { // TODO: move to service this.util.fetch('v1/api/kubes/' + this.clusterId + '/users/kubernetes-admin/kubeconfig').subscribe( res => this.kubectlConfig = res, err => console.error(err) ); } getClusterServices() { this.supergiant.Kubes.getClusterServices(this.clusterId).subscribe( res => this.clusterServices = new MatTableDataSource(res), err => console.error(err) ); } calculateMachineMetrics(machines) { Object.keys(machines).forEach(m => { machines[m].cpu = (machines[m].cpu * 100).toFixed(1); machines[m].memory = (machines[m].memory * 100).toFixed(1); }); return machines; } restart(id) { this.clusterRestarting = true; this.supergiant.Kubes.restartFailedProvision(id).subscribe( res => { this.clusterRestarting = false; this.getKube() }, err => { this.displayError(this.kube.name, err.error.userMessage) this.clusterRestarting = false; } ) } removeNode(nodeName: string, target) { const dialogRef = this.initDialog(target); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Nodes.delete(this.clusterId, nodeName)), switchMap(() => this.supergiant.Kubes.get(this.clusterId)), catchError((error) => of(error)), ).subscribe( k => { this.displaySuccess("Node: " + nodeName, "Deleted!"); this.renderMachines(k) }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteCluster() { const dialogRef = this.initDeleteCluster(this.kube.state); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Kubes.delete(this.clusterId)) ).subscribe( res => { this.displaySuccess("Kube: " + this.kube.name, "Deleted!"); this.router.navigate(['']); }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteRelease(releaseName, idx) { const dialogRef = this.initDeleteRelease(releaseName); dialogRef .afterClosed() .pipe( filter(res => res.deleteRelease), // can't mutate table data source, polling erases any optimistic updates, so this happens, sorry... switchMap((_) => this.deletingApps.add(releaseName)), switchMap(res => this.supergiant.HelmReleases.delete(releaseName, this.clusterId, true)), ).subscribe( res => { this.getReleases(releaseName); this.displaySuccess("App: " + releaseName, "Deleted!") }, err => { console.error(err); this.deletingApps.delete(releaseName); this.displayError(this.kube.name, err); } ); } showSshCommands() { const masters = []; const nodes = []; Object.keys(this.kube.masters).map(m => masters.push(this.kube.masters[m])); Object.keys(this.kube.nodes).map(m => nodes.push(this.kube.nodes[m])); this.initSshCommands(masters, nodes); } showKubectlConfig() { this.initKubectlConfig(this.kubectlConfig); } showReleaseInfo(releaseName) { this.initReleaseInfo(releaseName); } openService(proxyPort) { const hostname = this.window.location.hostname; const link = 'http://' + hostname + ':' + proxyPort; this.window.open(link); } trackByFn(index, item) { return index; } private initDialog(target) { const popupWidth = 250; const dialogRef = this.dialog.open(ConfirmModalComponent, { width: `${popupWidth}px`, }); dialogRef.updatePosition({ top: `${target.clientY}px`, left: `${target.clientX - popupWidth - 10}px`, }); return dialogRef; } private initDeleteCluster(clusterState) { const dialogRef = this.dialog.open(DeleteClusterModalComponent, { width: '500px', data: { state: clusterState } }); return dialogRef; } private initDeleteRelease(name) { const dialogRef = this.dialog.open(DeleteReleaseModalComponent, { width: 'max-content', data: { name: name } }); return dialogRef; } private initSshCommands(masters, nodes) { const dialogRef = this.dialog.open(SshCommandsModalComponent, { width: '600px', data: { masters: masters, nodes: nodes } }); return dialogRef; } private initKubectlConfig(config) { const dialogRef = this.dialog.open(KubectlConfigModalComponent, { width: '800px', data: { config: config } }); return dialogRef; } private initReleaseInfo(releaseName) { const dialogRef = this.dialog.open(ReleaseInfoModalComponent, { width: '800px', data: { releaseName: releaseName, clusterId: this.clusterId } }); return dialogRef; } expandRow = (_, row) => row.hasOwnProperty('detailRow'); displaySuccess(headline, msg) { this.notifications.display('success', headline, msg); } displayError(name, err) { let msg: string; if (err.error.userMessage) { msg = err.error.userMessage; } else if (err.userMessage) { msg = err.userMessage } else { msg = err.error; } this.notifications.display( 'error', 'Kube: ' + name, 'Error:' + msg); } }
{ task.showSteps = !task.showSteps; if (this.expandedTaskIds.has(task.id)) { this.expandedTaskIds.delete(task.id); } else { this.expandedTaskIds.add(task.id); } }
identifier_body
cluster.component.ts
import { of, Subscription, timer as observableTimer } from 'rxjs'; import { catchError, filter, switchMap } from 'rxjs/operators'; import { ChangeDetectionStrategy, Component, OnDestroy, ViewChild, ViewEncapsulation, Inject, AfterViewInit, } from '@angular/core'; import { Location } from '@angular/common'; import { ActivatedRoute, Router } from '@angular/router'; import { HttpClient } from '@angular/common/http'; import { MatDialog, MatPaginator, MatSort, MatTableDataSource } from '@angular/material'; import { animate, state, style, transition, trigger } from '@angular/animations'; import { Supergiant } from '../../shared/supergiant/supergiant.service'; import { UtilService } from '../../shared/supergiant/util/util.service'; import { Notifications } from '../../shared/notifications/notifications.service'; import { ConfirmModalComponent } from '../../shared/modals/confirm-modal/confirm-modal.component'; import { DeleteClusterModalComponent } from './delete-cluster-modal/delete-cluster-modal.component'; import { DeleteReleaseModalComponent } from './delete-release-modal/delete-release-modal.component'; import { SshCommandsModalComponent } from './ssh-commands-modal/ssh-commands-modal.component'; import { KubectlConfigModalComponent } from './kubectl-config-modal/kubectl-config-modal.component'; import { TaskLogsComponent } from './task-logs/task-logs.component'; import { ReleaseInfoModalComponent } from './release-info-modal/release-info-modal.component'; import { WINDOW } from '../../shared/helpers/window-providers'; @Component({ selector: 'app-cluster', templateUrl: './cluster.component.html', styleUrls: [ './cluster.component.scss' ], // TODO: do we need this anymore? changeDetection: ChangeDetectionStrategy.Default, encapsulation: ViewEncapsulation.None, animations: [ trigger('detailExpand', [ state('collapsed', style({height: '0px', minHeight: '0', visibility: 'hidden'})), state('expanded', style({height: '*', visibility: 'visible'})), transition('expanded <=> collapsed', animate('225ms cubic-bezier(0.4, 0.0, 0.2, 1)')), ]), ] }) export class ClusterComponent implements AfterViewInit, OnDestroy { clusterId: number; subscriptions = new Subscription(); public kube: any; // machine list vars activeMachines: any; activeMachineListColumns = ['state', 'role', 'size', 'name', 'cpu', 'ram', 'region', 'publicIp', 'delete']; nonActiveMachines: any; nonActiveMachineListColumns = ['state', 'role', 'size', 'name', 'region', 'steps', 'logs', 'delete']; // task list vars tasks: any; taskListColumns = ['status', 'type', 'id', 'steps', 'logs']; expandedTaskIds = new Set(); releases: any; releaseListColumns = ['status', 'name', 'namespace', 'chart', 'chartVersion', 'lastDeployed', 'info', 'delete']; masterTasksStatus = 'queued'; nodeTasksStatus = 'queued'; clusterTasksStatus = 'queued'; cpuUsage: number; ramUsage: number; machineMetrics = {}; kubectlConfig: any; clusterServices: any; serviceListColumns = ['name', 'type', 'namespace', 'selfLink']; deletingApps = new Set(); clusterRestarting: boolean; constructor( private route: ActivatedRoute, private location: Location, private router: Router, private supergiant: Supergiant, private util: UtilService, private notifications: Notifications, public dialog: MatDialog, public http: HttpClient, @Inject(WINDOW) private window: Window ) { route.params.subscribe(params => { this.clusterId = params.id; this.getKube(); }); } @ViewChild(MatSort) sort: MatSort; @ViewChild(MatPaginator) paginator: MatPaginator; get showUsageChart() { return !isNaN(this.cpuUsage) && !isNaN(this.ramUsage); } ngAfterViewInit() { this.clusterId = this.route.snapshot.params.id; this.getKube(); } ngOnDestroy() { this.subscriptions.unsubscribe(); } combineAndFlatten(objOne, objTwo) { const arr = []; Object.keys(objOne).forEach((key) => { arr.push(objOne[key]);
arr.push(objTwo[key]); }); return arr; } getKubeStatus(clusterId) { // we should make Tasks a part of the Supergiant instance // if we start using them outside of this return this.util.fetch('/v1/api/kubes/' + clusterId + '/tasks'); } toggleSteps(task) { task.showSteps = !task.showSteps; if (this.expandedTaskIds.has(task.id)) { this.expandedTaskIds.delete(task.id); } else { this.expandedTaskIds.add(task.id); } } taskComplete(task) { return task.status == 'success'; } viewTaskLog(taskId) { const modal = this.dialog.open(TaskLogsComponent, { width: '1080px', data: { taskId: taskId, hostname: this.window.location.hostname } }); } setProvisioningStep(tasks) { const masterPatt = /master/g; const masterTasks = tasks.filter(t => { return masterPatt.test(t.type.toLowerCase()); }); const nodePatt = /node/g; const nodeTasks = tasks.filter(t => { return nodePatt.test(t.type.toLowerCase()); }); // oh my god I'm so sorry if (masterTasks.every(this.taskComplete)) { // masters complete this.masterTasksStatus = 'complete'; if (nodeTasks.every(this.taskComplete)) { // masters AND nodes complete this.nodeTasksStatus = 'complete'; this.clusterTasksStatus = 'executing'; } else { // masters complete, nodes executing this.nodeTasksStatus = 'executing'; } } else { // masters executing this.masterTasksStatus = 'executing'; } } downloadPrivateKey() { let a = document.createElement("a"); a.href = "data:," + this.kube.sshConfig.bootstrapPrivateKey; a.setAttribute("download", this.kube.name + ".pem"); a.click(); } orderTasks(a, b) { const aSortId = (a.type + a.id).toUpperCase(); const bSortId = (b.type + b.id).toUpperCase(); let comparison = 0; if (aSortId > bSortId) { comparison = 1 } else if (aSortId < bSortId) { comparison = -1 } return comparison; } updateTaskList(tasks) { const rows = []; const orderedTasks = tasks.sort(this.orderTasks); orderedTasks.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } rows.push(t, { detailRow: true, t }); }); this.tasks = new MatTableDataSource(rows); } getKube() { // TODO: shameful how smart this ENTIRE component has become. this.subscriptions.add(observableTimer(0, 10000).pipe( switchMap(() => this.supergiant.Kubes.get(this.clusterId))).subscribe( k => { this.kube = k; switch (this.kube.state) { case 'operational': { this.renderMachines(this.kube); this.getReleases(); this.getClusterMetrics(); this.getMachineMetrics(); this.getKubectlConfig(); this.getClusterServices(); break; } case 'failed': { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.updateTaskList(tasks); }, err => console.log(err) ); this.masterTasksStatus = 'failed'; this.nodeTasksStatus = 'failed'; this.clusterTasksStatus = 'failed'; break; } default: { this.getKubeStatus(this.clusterId).subscribe( tasks => { this.setProvisioningStep(tasks); this.updateTaskList(tasks); }, err => console.log(err) ); break; } } }, err => console.error(err) )); } renderMachines(kube) { const masterNames = Object.keys(kube.masters); const nodeNames = Object.keys(kube.nodes); masterNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.masters[name].metrics = this.machineMetrics[lowercaseName]; } }); nodeNames.forEach(name => { const lowercaseName = name.toLowerCase(); if (this.machineMetrics[lowercaseName]) { kube.nodes[name].metrics = this.machineMetrics[lowercaseName]; } }); const allMachines = this.combineAndFlatten(kube.masters, kube.nodes); const activeMachines = allMachines.filter(m => m.state == 'active' || m.state == 'deleting'); const nonActiveMachines = allMachines.filter(m => m.state != 'active' && m.state != 'deleting'); this.activeMachines = new MatTableDataSource(activeMachines); if (nonActiveMachines.length > 0) { const executingTasksObj = {}; this.getKubeStatus(this.clusterId).subscribe( tasks => { const executing = tasks.filter(t => t.status == 'executing' || t.status == 'error'); executing.forEach(t => { if (this.expandedTaskIds.has(t.id)) { t.showSteps = true; } executingTasksObj[t.id] = t; }); const nonAM = []; nonActiveMachines.forEach(m => { const tid = m.taskId; const t = executingTasksObj[tid]; m.taskData = executingTasksObj[tid]; nonAM.push(m, { detailRow: true, t }); }); this.nonActiveMachines = new MatTableDataSource(nonAM); }, err => console.error(err) ); } else { this.nonActiveMachines = {}; } } getReleases(deletedReleaseName?) { this.supergiant.HelmReleases.get(this.clusterId).subscribe( res => { const releases = res.filter(r => r.status !== 'DELETED'); this.releases = new MatTableDataSource(releases); // TODO: this is temporary. We need to figure out a way around the constant polling if (deletedReleaseName) { this.deletingApps.delete(deletedReleaseName); } }, err => console.error(err) ); } getClusterMetrics() { this.supergiant.Kubes.getClusterMetrics(this.clusterId).subscribe( res => { this.cpuUsage = res.cpu; this.ramUsage = res.memory; }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getMachineMetrics() { this.supergiant.Kubes.getMachineMetrics(this.clusterId).subscribe( res => { this.machineMetrics = this.calculateMachineMetrics(res); this.renderMachines(this.kube); }, // if metrics aren't available yet, server returns 500, which we don't need err => { return; } ); } getKubectlConfig() { // TODO: move to service this.util.fetch('v1/api/kubes/' + this.clusterId + '/users/kubernetes-admin/kubeconfig').subscribe( res => this.kubectlConfig = res, err => console.error(err) ); } getClusterServices() { this.supergiant.Kubes.getClusterServices(this.clusterId).subscribe( res => this.clusterServices = new MatTableDataSource(res), err => console.error(err) ); } calculateMachineMetrics(machines) { Object.keys(machines).forEach(m => { machines[m].cpu = (machines[m].cpu * 100).toFixed(1); machines[m].memory = (machines[m].memory * 100).toFixed(1); }); return machines; } restart(id) { this.clusterRestarting = true; this.supergiant.Kubes.restartFailedProvision(id).subscribe( res => { this.clusterRestarting = false; this.getKube() }, err => { this.displayError(this.kube.name, err.error.userMessage) this.clusterRestarting = false; } ) } removeNode(nodeName: string, target) { const dialogRef = this.initDialog(target); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Nodes.delete(this.clusterId, nodeName)), switchMap(() => this.supergiant.Kubes.get(this.clusterId)), catchError((error) => of(error)), ).subscribe( k => { this.displaySuccess("Node: " + nodeName, "Deleted!"); this.renderMachines(k) }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteCluster() { const dialogRef = this.initDeleteCluster(this.kube.state); dialogRef .afterClosed() .pipe( filter(isConfirmed => isConfirmed), switchMap(() => this.supergiant.Kubes.delete(this.clusterId)) ).subscribe( res => { this.displaySuccess("Kube: " + this.kube.name, "Deleted!"); this.router.navigate(['']); }, err => { console.error(err); this.displayError(this.kube.name, err); }, ); } deleteRelease(releaseName, idx) { const dialogRef = this.initDeleteRelease(releaseName); dialogRef .afterClosed() .pipe( filter(res => res.deleteRelease), // can't mutate table data source, polling erases any optimistic updates, so this happens, sorry... switchMap((_) => this.deletingApps.add(releaseName)), switchMap(res => this.supergiant.HelmReleases.delete(releaseName, this.clusterId, true)), ).subscribe( res => { this.getReleases(releaseName); this.displaySuccess("App: " + releaseName, "Deleted!") }, err => { console.error(err); this.deletingApps.delete(releaseName); this.displayError(this.kube.name, err); } ); } showSshCommands() { const masters = []; const nodes = []; Object.keys(this.kube.masters).map(m => masters.push(this.kube.masters[m])); Object.keys(this.kube.nodes).map(m => nodes.push(this.kube.nodes[m])); this.initSshCommands(masters, nodes); } showKubectlConfig() { this.initKubectlConfig(this.kubectlConfig); } showReleaseInfo(releaseName) { this.initReleaseInfo(releaseName); } openService(proxyPort) { const hostname = this.window.location.hostname; const link = 'http://' + hostname + ':' + proxyPort; this.window.open(link); } trackByFn(index, item) { return index; } private initDialog(target) { const popupWidth = 250; const dialogRef = this.dialog.open(ConfirmModalComponent, { width: `${popupWidth}px`, }); dialogRef.updatePosition({ top: `${target.clientY}px`, left: `${target.clientX - popupWidth - 10}px`, }); return dialogRef; } private initDeleteCluster(clusterState) { const dialogRef = this.dialog.open(DeleteClusterModalComponent, { width: '500px', data: { state: clusterState } }); return dialogRef; } private initDeleteRelease(name) { const dialogRef = this.dialog.open(DeleteReleaseModalComponent, { width: 'max-content', data: { name: name } }); return dialogRef; } private initSshCommands(masters, nodes) { const dialogRef = this.dialog.open(SshCommandsModalComponent, { width: '600px', data: { masters: masters, nodes: nodes } }); return dialogRef; } private initKubectlConfig(config) { const dialogRef = this.dialog.open(KubectlConfigModalComponent, { width: '800px', data: { config: config } }); return dialogRef; } private initReleaseInfo(releaseName) { const dialogRef = this.dialog.open(ReleaseInfoModalComponent, { width: '800px', data: { releaseName: releaseName, clusterId: this.clusterId } }); return dialogRef; } expandRow = (_, row) => row.hasOwnProperty('detailRow'); displaySuccess(headline, msg) { this.notifications.display('success', headline, msg); } displayError(name, err) { let msg: string; if (err.error.userMessage) { msg = err.error.userMessage; } else if (err.userMessage) { msg = err.userMessage } else { msg = err.error; } this.notifications.display( 'error', 'Kube: ' + name, 'Error:' + msg); } }
}); Object.keys(objTwo).forEach((key) => {
random_line_split
fields-definition.rs
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(decl_macro)] macro modern($a: ident) { struct Modern { a: u8, $a: u8, // OK } } macro_rules! legacy { ($a: ident) => { struct Legacy { a: u8, $a: u8, //~ ERROR field `a` is already declared } } } modern!(a); legacy!(a);
fn main() {}
random_line_split
fields-definition.rs
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(decl_macro)] macro modern($a: ident) { struct Modern { a: u8, $a: u8, // OK } } macro_rules! legacy { ($a: ident) => { struct Legacy { a: u8, $a: u8, //~ ERROR field `a` is already declared } } } modern!(a); legacy!(a); fn main()
{}
identifier_body
fields-definition.rs
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(decl_macro)] macro modern($a: ident) { struct
{ a: u8, $a: u8, // OK } } macro_rules! legacy { ($a: ident) => { struct Legacy { a: u8, $a: u8, //~ ERROR field `a` is already declared } } } modern!(a); legacy!(a); fn main() {}
Modern
identifier_name
kubernetescommand.ts
"use strict"; import * as del from "del"; import * as fs from "fs"; import * as tr from "azure-pipelines-task-lib/toolrunner"; import trm = require('azure-pipelines-task-lib/toolrunner'); import * as path from "path"; import * as tl from "azure-pipelines-task-lib/task"; import * as utils from "./utilities"; import ClusterConnection from "./clusterconnection"; export function run(connection: ClusterConnection, kubecommand: string, outputUpdate: (data: string) => any): any { var command = connection.createCommand(); command.on("stdout", output => { outputUpdate(output); }); command.arg(kubecommand); command.arg(getNameSpace()); command.arg(getCommandConfigurationFile()); command.line(getCommandArguments()); command.arg(getCommandOutputFormat(kubecommand)); return connection.execCommand(command); } function getCommandOutputFormat(kubecommand: string): string[] { var args: string[] = []; var outputFormat = tl.getInput("outputFormat", false); if (outputFormat) { switch (outputFormat) { case '': case 'none': tl.debug(`Skipping -o in args as outputFormat is 'none' or empty.`); return args; case 'json': case 'yaml': if (!isJsonOrYamlOutputFormatSupported(kubecommand)) { return args; } default: args[0] = "-o"; args[1] = outputFormat; } } return args; } export function getCommandConfigurationFile(): string[]
function getCommandArguments(): string { return tl.getInput("arguments", false); } export function isJsonOrYamlOutputFormatSupported(kubecommand): boolean { var commandsThatDontSupportYamlAndJson: string[] = ["explain", "delete", "cluster-info", "top", "cordon", "uncordon", "drain", "describe", "logs", "attach", "exec", "port-forward", "proxy", "cp", "auth", "completion", "api-versions", "config", "help", "plugin", "rollout"]; if (commandsThatDontSupportYamlAndJson.findIndex(command => command === kubecommand) > -1) { return false; } else { return true; } } export function getNameSpace(): string[] { var args: string[] = []; var namespace = tl.getInput("namespace", false); if (namespace) { args[0] = "-n"; args[1] = namespace; } return args; }
{ var args: string[] = []; var useConfigurationFile: boolean = tl.getBoolInput("useConfigurationFile", false); if (useConfigurationFile) { let configurationPath = tl.getPathInput("configuration", false); var inlineConfiguration = tl.getInput("inline", false); if (!tl.filePathSupplied("configuration")) { configurationPath = null; } if (configurationPath != null && inlineConfiguration != null) { let type = tl.getInput("configurationType", false); if (type == "inline") configurationPath = null; else inlineConfiguration = null; } if (configurationPath == null && inlineConfiguration == null) { throw new Error(tl.loc('InvalidConfiguration')); } else if (configurationPath) { if (tl.exist(configurationPath)) { args[0] = "-f"; args[1] = configurationPath; } else { throw new Error(tl.loc('ConfigurationFileNotFound', configurationPath)); } } else if (inlineConfiguration) { var tempInlineFile = utils.writeInlineConfigInTempPath(inlineConfiguration); if (tl.exist(tempInlineFile)) { args[0] = "-f"; args[1] = tempInlineFile; } else { throw new Error(tl.loc('ConfigurationFileNotFound', tempInlineFile)); } } } return args; }
identifier_body
kubernetescommand.ts
"use strict"; import * as del from "del"; import * as fs from "fs"; import * as tr from "azure-pipelines-task-lib/toolrunner"; import trm = require('azure-pipelines-task-lib/toolrunner'); import * as path from "path"; import * as tl from "azure-pipelines-task-lib/task"; import * as utils from "./utilities"; import ClusterConnection from "./clusterconnection"; export function run(connection: ClusterConnection, kubecommand: string, outputUpdate: (data: string) => any): any { var command = connection.createCommand(); command.on("stdout", output => { outputUpdate(output); }); command.arg(kubecommand); command.arg(getNameSpace()); command.arg(getCommandConfigurationFile()); command.line(getCommandArguments()); command.arg(getCommandOutputFormat(kubecommand)); return connection.execCommand(command); } function getCommandOutputFormat(kubecommand: string): string[] { var args: string[] = []; var outputFormat = tl.getInput("outputFormat", false); if (outputFormat) { switch (outputFormat) { case '': case 'none': tl.debug(`Skipping -o in args as outputFormat is 'none' or empty.`); return args; case 'json': case 'yaml': if (!isJsonOrYamlOutputFormatSupported(kubecommand)) { return args; } default: args[0] = "-o"; args[1] = outputFormat; } } return args; } export function getCommandConfigurationFile(): string[] { var args: string[] = []; var useConfigurationFile: boolean = tl.getBoolInput("useConfigurationFile", false); if (useConfigurationFile) { let configurationPath = tl.getPathInput("configuration", false); var inlineConfiguration = tl.getInput("inline", false); if (!tl.filePathSupplied("configuration")) { configurationPath = null; } if (configurationPath != null && inlineConfiguration != null) { let type = tl.getInput("configurationType", false); if (type == "inline") configurationPath = null; else inlineConfiguration = null; } if (configurationPath == null && inlineConfiguration == null) { throw new Error(tl.loc('InvalidConfiguration')); } else if (configurationPath) { if (tl.exist(configurationPath)) { args[0] = "-f"; args[1] = configurationPath; } else { throw new Error(tl.loc('ConfigurationFileNotFound', configurationPath)); } } else if (inlineConfiguration) { var tempInlineFile = utils.writeInlineConfigInTempPath(inlineConfiguration); if (tl.exist(tempInlineFile)) { args[0] = "-f"; args[1] = tempInlineFile; } else { throw new Error(tl.loc('ConfigurationFileNotFound', tempInlineFile)); } } } return args; } function getCommandArguments(): string { return tl.getInput("arguments", false); } export function isJsonOrYamlOutputFormatSupported(kubecommand): boolean { var commandsThatDontSupportYamlAndJson: string[] = ["explain", "delete", "cluster-info", "top", "cordon", "uncordon", "drain", "describe", "logs", "attach", "exec", "port-forward", "proxy", "cp", "auth", "completion", "api-versions", "config", "help", "plugin", "rollout"]; if (commandsThatDontSupportYamlAndJson.findIndex(command => command === kubecommand) > -1) { return false; } else { return true; } } export function getNameSpace(): string[] { var args: string[] = []; var namespace = tl.getInput("namespace", false);
args[1] = namespace; } return args; }
if (namespace) { args[0] = "-n";
random_line_split
kubernetescommand.ts
"use strict"; import * as del from "del"; import * as fs from "fs"; import * as tr from "azure-pipelines-task-lib/toolrunner"; import trm = require('azure-pipelines-task-lib/toolrunner'); import * as path from "path"; import * as tl from "azure-pipelines-task-lib/task"; import * as utils from "./utilities"; import ClusterConnection from "./clusterconnection"; export function run(connection: ClusterConnection, kubecommand: string, outputUpdate: (data: string) => any): any { var command = connection.createCommand(); command.on("stdout", output => { outputUpdate(output); }); command.arg(kubecommand); command.arg(getNameSpace()); command.arg(getCommandConfigurationFile()); command.line(getCommandArguments()); command.arg(getCommandOutputFormat(kubecommand)); return connection.execCommand(command); } function getCommandOutputFormat(kubecommand: string): string[] { var args: string[] = []; var outputFormat = tl.getInput("outputFormat", false); if (outputFormat) { switch (outputFormat) { case '': case 'none': tl.debug(`Skipping -o in args as outputFormat is 'none' or empty.`); return args; case 'json': case 'yaml': if (!isJsonOrYamlOutputFormatSupported(kubecommand)) { return args; } default: args[0] = "-o"; args[1] = outputFormat; } } return args; } export function getCommandConfigurationFile(): string[] { var args: string[] = []; var useConfigurationFile: boolean = tl.getBoolInput("useConfigurationFile", false); if (useConfigurationFile) { let configurationPath = tl.getPathInput("configuration", false); var inlineConfiguration = tl.getInput("inline", false); if (!tl.filePathSupplied("configuration")) { configurationPath = null; } if (configurationPath != null && inlineConfiguration != null) { let type = tl.getInput("configurationType", false); if (type == "inline") configurationPath = null; else inlineConfiguration = null; } if (configurationPath == null && inlineConfiguration == null) { throw new Error(tl.loc('InvalidConfiguration')); } else if (configurationPath) { if (tl.exist(configurationPath))
else { throw new Error(tl.loc('ConfigurationFileNotFound', configurationPath)); } } else if (inlineConfiguration) { var tempInlineFile = utils.writeInlineConfigInTempPath(inlineConfiguration); if (tl.exist(tempInlineFile)) { args[0] = "-f"; args[1] = tempInlineFile; } else { throw new Error(tl.loc('ConfigurationFileNotFound', tempInlineFile)); } } } return args; } function getCommandArguments(): string { return tl.getInput("arguments", false); } export function isJsonOrYamlOutputFormatSupported(kubecommand): boolean { var commandsThatDontSupportYamlAndJson: string[] = ["explain", "delete", "cluster-info", "top", "cordon", "uncordon", "drain", "describe", "logs", "attach", "exec", "port-forward", "proxy", "cp", "auth", "completion", "api-versions", "config", "help", "plugin", "rollout"]; if (commandsThatDontSupportYamlAndJson.findIndex(command => command === kubecommand) > -1) { return false; } else { return true; } } export function getNameSpace(): string[] { var args: string[] = []; var namespace = tl.getInput("namespace", false); if (namespace) { args[0] = "-n"; args[1] = namespace; } return args; }
{ args[0] = "-f"; args[1] = configurationPath; }
conditional_block
kubernetescommand.ts
"use strict"; import * as del from "del"; import * as fs from "fs"; import * as tr from "azure-pipelines-task-lib/toolrunner"; import trm = require('azure-pipelines-task-lib/toolrunner'); import * as path from "path"; import * as tl from "azure-pipelines-task-lib/task"; import * as utils from "./utilities"; import ClusterConnection from "./clusterconnection"; export function run(connection: ClusterConnection, kubecommand: string, outputUpdate: (data: string) => any): any { var command = connection.createCommand(); command.on("stdout", output => { outputUpdate(output); }); command.arg(kubecommand); command.arg(getNameSpace()); command.arg(getCommandConfigurationFile()); command.line(getCommandArguments()); command.arg(getCommandOutputFormat(kubecommand)); return connection.execCommand(command); } function getCommandOutputFormat(kubecommand: string): string[] { var args: string[] = []; var outputFormat = tl.getInput("outputFormat", false); if (outputFormat) { switch (outputFormat) { case '': case 'none': tl.debug(`Skipping -o in args as outputFormat is 'none' or empty.`); return args; case 'json': case 'yaml': if (!isJsonOrYamlOutputFormatSupported(kubecommand)) { return args; } default: args[0] = "-o"; args[1] = outputFormat; } } return args; } export function getCommandConfigurationFile(): string[] { var args: string[] = []; var useConfigurationFile: boolean = tl.getBoolInput("useConfigurationFile", false); if (useConfigurationFile) { let configurationPath = tl.getPathInput("configuration", false); var inlineConfiguration = tl.getInput("inline", false); if (!tl.filePathSupplied("configuration")) { configurationPath = null; } if (configurationPath != null && inlineConfiguration != null) { let type = tl.getInput("configurationType", false); if (type == "inline") configurationPath = null; else inlineConfiguration = null; } if (configurationPath == null && inlineConfiguration == null) { throw new Error(tl.loc('InvalidConfiguration')); } else if (configurationPath) { if (tl.exist(configurationPath)) { args[0] = "-f"; args[1] = configurationPath; } else { throw new Error(tl.loc('ConfigurationFileNotFound', configurationPath)); } } else if (inlineConfiguration) { var tempInlineFile = utils.writeInlineConfigInTempPath(inlineConfiguration); if (tl.exist(tempInlineFile)) { args[0] = "-f"; args[1] = tempInlineFile; } else { throw new Error(tl.loc('ConfigurationFileNotFound', tempInlineFile)); } } } return args; } function getCommandArguments(): string { return tl.getInput("arguments", false); } export function
(kubecommand): boolean { var commandsThatDontSupportYamlAndJson: string[] = ["explain", "delete", "cluster-info", "top", "cordon", "uncordon", "drain", "describe", "logs", "attach", "exec", "port-forward", "proxy", "cp", "auth", "completion", "api-versions", "config", "help", "plugin", "rollout"]; if (commandsThatDontSupportYamlAndJson.findIndex(command => command === kubecommand) > -1) { return false; } else { return true; } } export function getNameSpace(): string[] { var args: string[] = []; var namespace = tl.getInput("namespace", false); if (namespace) { args[0] = "-n"; args[1] = namespace; } return args; }
isJsonOrYamlOutputFormatSupported
identifier_name
base58.py
''' Yescoin base58 encoding and decoding. Based on https://yescointalk.org/index.php?topic=1026.0 (public domain) ''' import hashlib # for compatibility with following code... class SHA256: new = hashlib.sha256 if str != bytes: # Python 3.x def ord(c): return c def chr(n): return bytes( (n,) ) __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): """ encode v, which is a string of bytes, to base58. """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result # Yescoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == '\0': nPad += 1 else: break return (__b58chars[0]*nPad) + result def b58decode(v, length = None): """ decode v into a string of len bytes """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base**i) result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0)*nPad + result if length is not None and len(result) != length: return None return result def checksum(v): """Return 32-bit checksum based on SHA256""" return SHA256.new(SHA256.new(v).digest()).digest()[0:4] def
(v): """b58encode a string, with 32-bit checksum""" return b58encode(v + checksum(v)) def b58decode_chk(v): """decode a base58 string, check and remove checksum""" result = b58decode(v) if result is None: return None h3 = checksum(result[:-4]) if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress): """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ addr = b58decode_chk(strAddress) if addr is None or len(addr)!=21: return None version = addr[0] return ord(version) if __name__ == '__main__': # Test case (from http://gitorious.org/yescoin/python-base58.git) assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 _ohai = 'o hai'.encode('ascii') _tmp = b58encode(_ohai) assert _tmp == 'DYB3oMS' assert b58decode(_tmp, 5) == _ohai print("Tests passed")
b58encode_chk
identifier_name
base58.py
''' Yescoin base58 encoding and decoding. Based on https://yescointalk.org/index.php?topic=1026.0 (public domain) ''' import hashlib # for compatibility with following code... class SHA256: new = hashlib.sha256 if str != bytes: # Python 3.x def ord(c): return c def chr(n): return bytes( (n,) ) __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): """ encode v, which is a string of bytes, to base58. """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result # Yescoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == '\0': nPad += 1 else: break return (__b58chars[0]*nPad) + result def b58decode(v, length = None): """ decode v into a string of len bytes """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base**i) result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0)*nPad + result if length is not None and len(result) != length: return None return result def checksum(v): """Return 32-bit checksum based on SHA256""" return SHA256.new(SHA256.new(v).digest()).digest()[0:4] def b58encode_chk(v): """b58encode a string, with 32-bit checksum""" return b58encode(v + checksum(v)) def b58decode_chk(v): """decode a base58 string, check and remove checksum""" result = b58decode(v) if result is None: return None h3 = checksum(result[:-4]) if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress):
if __name__ == '__main__': # Test case (from http://gitorious.org/yescoin/python-base58.git) assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 _ohai = 'o hai'.encode('ascii') _tmp = b58encode(_ohai) assert _tmp == 'DYB3oMS' assert b58decode(_tmp, 5) == _ohai print("Tests passed")
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """ addr = b58decode_chk(strAddress) if addr is None or len(addr)!=21: return None version = addr[0] return ord(version)
identifier_body
base58.py
''' Yescoin base58 encoding and decoding. Based on https://yescointalk.org/index.php?topic=1026.0 (public domain) ''' import hashlib # for compatibility with following code... class SHA256: new = hashlib.sha256 if str != bytes: # Python 3.x def ord(c): return c def chr(n): return bytes( (n,) ) __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): """ encode v, which is a string of bytes, to base58. """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result # Yescoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == '\0': nPad += 1 else: break return (__b58chars[0]*nPad) + result def b58decode(v, length = None): """ decode v into a string of len bytes """ long_value = 0 for (i, c) in enumerate(v[::-1]):
result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0)*nPad + result if length is not None and len(result) != length: return None return result def checksum(v): """Return 32-bit checksum based on SHA256""" return SHA256.new(SHA256.new(v).digest()).digest()[0:4] def b58encode_chk(v): """b58encode a string, with 32-bit checksum""" return b58encode(v + checksum(v)) def b58decode_chk(v): """decode a base58 string, check and remove checksum""" result = b58decode(v) if result is None: return None h3 = checksum(result[:-4]) if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress): """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ addr = b58decode_chk(strAddress) if addr is None or len(addr)!=21: return None version = addr[0] return ord(version) if __name__ == '__main__': # Test case (from http://gitorious.org/yescoin/python-base58.git) assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 _ohai = 'o hai'.encode('ascii') _tmp = b58encode(_ohai) assert _tmp == 'DYB3oMS' assert b58decode(_tmp, 5) == _ohai print("Tests passed")
long_value += __b58chars.find(c) * (__b58base**i)
conditional_block
base58.py
''' Yescoin base58 encoding and decoding. Based on https://yescointalk.org/index.php?topic=1026.0 (public domain) ''' import hashlib # for compatibility with following code... class SHA256: new = hashlib.sha256 if str != bytes: # Python 3.x def ord(c): return c def chr(n): return bytes( (n,) ) __b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): """ encode v, which is a string of bytes, to base58. """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * ord(c) result = '' while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result # Yescoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == '\0': nPad += 1 else: break return (__b58chars[0]*nPad) + result def b58decode(v, length = None): """ decode v into a string of len bytes """ long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += __b58chars.find(c) * (__b58base**i) result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 else: break result = chr(0)*nPad + result if length is not None and len(result) != length: return None return result def checksum(v): """Return 32-bit checksum based on SHA256""" return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
return b58encode(v + checksum(v)) def b58decode_chk(v): """decode a base58 string, check and remove checksum""" result = b58decode(v) if result is None: return None h3 = checksum(result[:-4]) if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress): """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ addr = b58decode_chk(strAddress) if addr is None or len(addr)!=21: return None version = addr[0] return ord(version) if __name__ == '__main__': # Test case (from http://gitorious.org/yescoin/python-base58.git) assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0 _ohai = 'o hai'.encode('ascii') _tmp = b58encode(_ohai) assert _tmp == 'DYB3oMS' assert b58decode(_tmp, 5) == _ohai print("Tests passed")
def b58encode_chk(v): """b58encode a string, with 32-bit checksum"""
random_line_split
segment_hook.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module contains a Segment Hook which allows you to connect to your Segment account, retrieve data from it or write to that file. NOTE: this hook also relies on the Segment analytics package: https://github.com/segmentio/analytics-python """ import analytics from airflow.hooks.base_hook import BaseHook from airflow.exceptions import AirflowException from airflow.utils.log.logging_mixin import LoggingMixin class SegmentHook(BaseHook, LoggingMixin): def __init__( self, segment_conn_id='segment_default', segment_debug_mode=False, *args, **kwargs ): """ Create new connection to Segment and allows you to pull data out of Segment or write to it. You can then use that file with other Airflow operators to move the data around or interact with segment. :param segment_conn_id: the name of the connection that has the parameters we need to connect to Segment. The connection should be type `json` and include a write_key security token in the `Extras` field. :type segment_conn_id: str :param segment_debug_mode: Determines whether Segment should run in debug mode. Defaults to False :type segment_debug_mode: boolean .. note:: You must include a JSON structure in the `Extras` field. We need a user's security token to connect to Segment. So we define it in the `Extras` field as: `{"write_key":"YOUR_SECURITY_TOKEN"}` """ self.segment_conn_id = segment_conn_id self.segment_debug_mode = segment_debug_mode self._args = args self._kwargs = kwargs # get the connection parameters self.connection = self.get_connection(self.segment_conn_id) self.extras = self.connection.extra_dejson self.write_key = self.extras.get('write_key') if self.write_key is None: raise AirflowException('No Segment write key provided') def get_conn(self): self.log.info('Setting write key for Segment analytics connection') analytics.debug = self.segment_debug_mode if self.segment_debug_mode:
analytics.on_error = self.on_error analytics.write_key = self.write_key return analytics def on_error(self, error, items): """ Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
self.log.info('Setting Segment analytics connection to debug mode')
conditional_block
segment_hook.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module contains a Segment Hook which allows you to connect to your Segment account, retrieve data from it or write to that file. NOTE: this hook also relies on the Segment analytics package: https://github.com/segmentio/analytics-python """ import analytics from airflow.hooks.base_hook import BaseHook from airflow.exceptions import AirflowException from airflow.utils.log.logging_mixin import LoggingMixin class SegmentHook(BaseHook, LoggingMixin): def __init__( self, segment_conn_id='segment_default', segment_debug_mode=False, *args, **kwargs ): """ Create new connection to Segment and allows you to pull data out of Segment or write to it. You can then use that file with other Airflow operators to move the data around or interact with segment. :param segment_conn_id: the name of the connection that has the parameters we need to connect to Segment. The connection should be type `json` and include a write_key security token in the `Extras` field. :type segment_conn_id: str :param segment_debug_mode: Determines whether Segment should run in debug mode. Defaults to False :type segment_debug_mode: boolean .. note:: You must include a JSON structure in the `Extras` field. We need a user's security token to connect to Segment. So we define it in the `Extras` field as: `{"write_key":"YOUR_SECURITY_TOKEN"}` """ self.segment_conn_id = segment_conn_id self.segment_debug_mode = segment_debug_mode self._args = args self._kwargs = kwargs # get the connection parameters self.connection = self.get_connection(self.segment_conn_id) self.extras = self.connection.extra_dejson self.write_key = self.extras.get('write_key') if self.write_key is None: raise AirflowException('No Segment write key provided') def
(self): self.log.info('Setting write key for Segment analytics connection') analytics.debug = self.segment_debug_mode if self.segment_debug_mode: self.log.info('Setting Segment analytics connection to debug mode') analytics.on_error = self.on_error analytics.write_key = self.write_key return analytics def on_error(self, error, items): """ Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
get_conn
identifier_name
segment_hook.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #
""" This module contains a Segment Hook which allows you to connect to your Segment account, retrieve data from it or write to that file. NOTE: this hook also relies on the Segment analytics package: https://github.com/segmentio/analytics-python """ import analytics from airflow.hooks.base_hook import BaseHook from airflow.exceptions import AirflowException from airflow.utils.log.logging_mixin import LoggingMixin class SegmentHook(BaseHook, LoggingMixin): def __init__( self, segment_conn_id='segment_default', segment_debug_mode=False, *args, **kwargs ): """ Create new connection to Segment and allows you to pull data out of Segment or write to it. You can then use that file with other Airflow operators to move the data around or interact with segment. :param segment_conn_id: the name of the connection that has the parameters we need to connect to Segment. The connection should be type `json` and include a write_key security token in the `Extras` field. :type segment_conn_id: str :param segment_debug_mode: Determines whether Segment should run in debug mode. Defaults to False :type segment_debug_mode: boolean .. note:: You must include a JSON structure in the `Extras` field. We need a user's security token to connect to Segment. So we define it in the `Extras` field as: `{"write_key":"YOUR_SECURITY_TOKEN"}` """ self.segment_conn_id = segment_conn_id self.segment_debug_mode = segment_debug_mode self._args = args self._kwargs = kwargs # get the connection parameters self.connection = self.get_connection(self.segment_conn_id) self.extras = self.connection.extra_dejson self.write_key = self.extras.get('write_key') if self.write_key is None: raise AirflowException('No Segment write key provided') def get_conn(self): self.log.info('Setting write key for Segment analytics connection') analytics.debug = self.segment_debug_mode if self.segment_debug_mode: self.log.info('Setting Segment analytics connection to debug mode') analytics.on_error = self.on_error analytics.write_key = self.write_key return analytics def on_error(self, error, items): """ Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
random_line_split
segment_hook.py
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # """ This module contains a Segment Hook which allows you to connect to your Segment account, retrieve data from it or write to that file. NOTE: this hook also relies on the Segment analytics package: https://github.com/segmentio/analytics-python """ import analytics from airflow.hooks.base_hook import BaseHook from airflow.exceptions import AirflowException from airflow.utils.log.logging_mixin import LoggingMixin class SegmentHook(BaseHook, LoggingMixin): def __init__( self, segment_conn_id='segment_default', segment_debug_mode=False, *args, **kwargs ): """ Create new connection to Segment and allows you to pull data out of Segment or write to it. You can then use that file with other Airflow operators to move the data around or interact with segment. :param segment_conn_id: the name of the connection that has the parameters we need to connect to Segment. The connection should be type `json` and include a write_key security token in the `Extras` field. :type segment_conn_id: str :param segment_debug_mode: Determines whether Segment should run in debug mode. Defaults to False :type segment_debug_mode: boolean .. note:: You must include a JSON structure in the `Extras` field. We need a user's security token to connect to Segment. So we define it in the `Extras` field as: `{"write_key":"YOUR_SECURITY_TOKEN"}` """ self.segment_conn_id = segment_conn_id self.segment_debug_mode = segment_debug_mode self._args = args self._kwargs = kwargs # get the connection parameters self.connection = self.get_connection(self.segment_conn_id) self.extras = self.connection.extra_dejson self.write_key = self.extras.get('write_key') if self.write_key is None: raise AirflowException('No Segment write key provided') def get_conn(self): self.log.info('Setting write key for Segment analytics connection') analytics.debug = self.segment_debug_mode if self.segment_debug_mode: self.log.info('Setting Segment analytics connection to debug mode') analytics.on_error = self.on_error analytics.write_key = self.write_key return analytics def on_error(self, error, items):
""" Handles error callbacks when using Segment with segment_debug_mode set to True """ self.log.error('Encountered Segment error: {segment_error} with ' 'items: {with_items}'.format(segment_error=error, with_items=items)) raise AirflowException('Segment error: {}'.format(error))
identifier_body
0_setup.py
#!/usr/bin/python # # \file 0_setup.py # \brief setup pacs_prim_list # \date 2011-09-28 7:22GMT # \author Jan Boon (Kaetemi) # Python port of game data build pipeline. # Setup pacs_prim_list # # NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/> # Copyright (C) 2010 Winch Gate Property Limited # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #
sys.path.append("../../configuration") if os.path.isfile("log.log"): os.remove("log.log") log = open("log.log", "w") from scripts import * from buildsite import * from process import * from tools import * from directories import * printLog(log, "") printLog(log, "-------") printLog(log, "--- Setup pacs_prim_list") printLog(log, "-------") printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time()))) printLog(log, "") # Setup source directories printLog(log, ">>> Setup source directories <<<") for dir in PacsPrimExportSourceDirectories: mkPath(log, ExportBuildDirectory + "/" + dir) # Setup build directories printLog(log, ">>> Setup build directories <<<") mkPath(log, DataCommonDirectory) # no choice log.close() # end of file
import time, sys, os, shutil, subprocess, distutils.dir_util
random_line_split
0_setup.py
#!/usr/bin/python # # \file 0_setup.py # \brief setup pacs_prim_list # \date 2011-09-28 7:22GMT # \author Jan Boon (Kaetemi) # Python port of game data build pipeline. # Setup pacs_prim_list # # NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/> # Copyright (C) 2010 Winch Gate Property Limited # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import time, sys, os, shutil, subprocess, distutils.dir_util sys.path.append("../../configuration") if os.path.isfile("log.log"): os.remove("log.log") log = open("log.log", "w") from scripts import * from buildsite import * from process import * from tools import * from directories import * printLog(log, "") printLog(log, "-------") printLog(log, "--- Setup pacs_prim_list") printLog(log, "-------") printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time()))) printLog(log, "") # Setup source directories printLog(log, ">>> Setup source directories <<<") for dir in PacsPrimExportSourceDirectories:
# Setup build directories printLog(log, ">>> Setup build directories <<<") mkPath(log, DataCommonDirectory) # no choice log.close() # end of file
mkPath(log, ExportBuildDirectory + "/" + dir)
conditional_block
test_hpcp.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestHPCP(TestCase): def testEmpty(self): hpcp = HPCP()([], []) self.assertEqualVector(hpcp, [0.]*12) def testZeros(self): hpcp = HPCP()([0]*10, [0]*10) self.assertEqualVector(hpcp, [0.]*12) def testSin440(self): # Tests whether a real audio signal of one pure tone gets read as a # single semitone activation, and gets read into the right pcp bin sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, 'generated/synthesised/sin440_0db.wav'), sampleRate = sampleRate)() speaks = SpectralPeaks(sampleRate = sampleRate, maxPeaks = 1, maxFrequency = sampleRate/2, minFrequency = 0, magnitudeThreshold = 0, orderBy = 'magnitude') (freqs, mags) = speaks(Spectrum()(audio)) hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testAllSemitones(self): # Tests whether a spectral peak output of 12 consecutive semitones # yields a HPCP of all 1's tonic = 440 freqs = [(tonic * 2**(x/12.)) for x in range(12)] mags = [1] * 12 hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]) def testSubmediantPosition(self): # Make sure that the submediant of a key based on 440 is in the # correct location (submediant was randomly selected from all the # tones) tonic = 440 submediant = tonic * 2**(9./12.) hpcp = HPCP()([submediant], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,0.,0.]) def testMaxShifted(self): # Tests whether a HPCP reading with only the dominant semitone # activated is correctly shifted so that the dominant is at the # position 0 tonic = 440 dominant = tonic * 2**(7./12.) hpcp = HPCP(maxShifted=True)([dominant], [1]) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def chordHelper(self, half_steps, tunning, strength): notes = [tunning*(2.**(half_steps[i]/12.)) for i in range(len(half_steps))] hpcp = HPCP(maxShifted=False)([notes[0], notes[1], notes[2]], strength) for i in range(len(hpcp)): if i in half_steps: self.assertTrue(hpcp[i]>0) elif (i - 12) in half_steps: self.assertTrue(hpcp[i]>0) else: self.assertEqual(hpcp[i], 0) def testChord(self): tunning = 440 AMajor = [0, 4, 7] # AMajor = A4-C#5-E5 self.chordHelper(AMajor, tunning, [1,1,1]) CMajor = [3, -4, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,1,1]) CMajor = [-4, 3, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [-4, -2, 3] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [3, 8, 10] # CMajor = C5-F5-G5 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) AMinor = [0, 3, 7] # AMinor = A4-C5-E5 self.chordHelper(AMinor, tunning, [1,0.5,0.2]) CMinor = [3, 6, 10] # CMinor = C5-E5-G5 self.chordHelper(CMinor, tunning, [1,0.5,0.2]) # Test of various parameter logical bounds def testLowFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([99], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testHighFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([1001], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testSmallMinRange(self): self.assertConfigureFails(HPCP(), {'minFrequency':1, 'splitFrequency':200}) def testSmallMaxRange(self): self.assertConfigureFails(HPCP(), {'maxFrequency':1199, 'splitFrequency':1000}) def testSmallMinMaxRange(self): self.assertConfigureFails(HPCP(), {'bandPreset':False, 'maxFrequency':200, 'minFrequency':1}) def testSizeNonmultiple12(self): self.assertConfigureFails(HPCP(), {'size':13}) def testHarmonics(self): # Regression test for the 'harmonics' parameter tone = 100. # arbitrary frequency [Hz] freqs = [tone, tone*2, tone*3, tone*4] mags = [1]*4 hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) hpcp = hpcpAlg(freqs, mags) expected = [0., 0., 0., 0.1340538263, 0., 0.2476127148, 0., 0., 0., 0., 1., 0.] self.assertAlmostEqualVector(hpcp, expected, 1e-4) def testRegression(self): # Just makes sure algorithm does not crash on a real data source. This # test is not really looking for correctness. Maybe consider revising # it. inputSize = 512 sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, join('recorded', 'musicbox.wav')), sampleRate = sampleRate)() fc = FrameCutter(frameSize = inputSize, hopSize = inputSize) windowingAlg = Windowing(type = 'blackmanharris62') specAlg = Spectrum(size=inputSize) sPeaksAlg = SpectralPeaks(sampleRate = sampleRate, maxFrequency = sampleRate/2, minFrequency = 0, orderBy = 'magnitude') hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) frame = fc(audio) while len(frame) != 0:
suite = allTests(TestHPCP) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
spectrum = specAlg(windowingAlg(frame)) (freqs, mags) = sPeaksAlg(spectrum) hpcp = hpcpAlg(freqs,mags) self.assertTrue(not any(numpy.isnan(hpcp))) self.assertTrue(not any(numpy.isinf(hpcp))) frame = fc(audio)
conditional_block
test_hpcp.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestHPCP(TestCase): def testEmpty(self): hpcp = HPCP()([], []) self.assertEqualVector(hpcp, [0.]*12) def testZeros(self): hpcp = HPCP()([0]*10, [0]*10) self.assertEqualVector(hpcp, [0.]*12) def testSin440(self): # Tests whether a real audio signal of one pure tone gets read as a # single semitone activation, and gets read into the right pcp bin sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, 'generated/synthesised/sin440_0db.wav'), sampleRate = sampleRate)() speaks = SpectralPeaks(sampleRate = sampleRate, maxPeaks = 1, maxFrequency = sampleRate/2, minFrequency = 0, magnitudeThreshold = 0, orderBy = 'magnitude') (freqs, mags) = speaks(Spectrum()(audio)) hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testAllSemitones(self): # Tests whether a spectral peak output of 12 consecutive semitones # yields a HPCP of all 1's tonic = 440 freqs = [(tonic * 2**(x/12.)) for x in range(12)] mags = [1] * 12 hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]) def testSubmediantPosition(self): # Make sure that the submediant of a key based on 440 is in the # correct location (submediant was randomly selected from all the # tones) tonic = 440 submediant = tonic * 2**(9./12.) hpcp = HPCP()([submediant], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,0.,0.]) def testMaxShifted(self): # Tests whether a HPCP reading with only the dominant semitone # activated is correctly shifted so that the dominant is at the # position 0 tonic = 440 dominant = tonic * 2**(7./12.) hpcp = HPCP(maxShifted=True)([dominant], [1]) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def chordHelper(self, half_steps, tunning, strength): notes = [tunning*(2.**(half_steps[i]/12.)) for i in range(len(half_steps))] hpcp = HPCP(maxShifted=False)([notes[0], notes[1], notes[2]], strength) for i in range(len(hpcp)): if i in half_steps: self.assertTrue(hpcp[i]>0) elif (i - 12) in half_steps: self.assertTrue(hpcp[i]>0) else: self.assertEqual(hpcp[i], 0) def testChord(self): tunning = 440 AMajor = [0, 4, 7] # AMajor = A4-C#5-E5 self.chordHelper(AMajor, tunning, [1,1,1]) CMajor = [3, -4, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,1,1]) CMajor = [-4, 3, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [-4, -2, 3] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [3, 8, 10] # CMajor = C5-F5-G5 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) AMinor = [0, 3, 7] # AMinor = A4-C5-E5 self.chordHelper(AMinor, tunning, [1,0.5,0.2]) CMinor = [3, 6, 10] # CMinor = C5-E5-G5 self.chordHelper(CMinor, tunning, [1,0.5,0.2]) # Test of various parameter logical bounds def testLowFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([99], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testHighFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([1001], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testSmallMinRange(self): self.assertConfigureFails(HPCP(), {'minFrequency':1, 'splitFrequency':200}) def testSmallMaxRange(self): self.assertConfigureFails(HPCP(), {'maxFrequency':1199, 'splitFrequency':1000}) def testSmallMinMaxRange(self): self.assertConfigureFails(HPCP(), {'bandPreset':False, 'maxFrequency':200, 'minFrequency':1}) def
(self): self.assertConfigureFails(HPCP(), {'size':13}) def testHarmonics(self): # Regression test for the 'harmonics' parameter tone = 100. # arbitrary frequency [Hz] freqs = [tone, tone*2, tone*3, tone*4] mags = [1]*4 hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) hpcp = hpcpAlg(freqs, mags) expected = [0., 0., 0., 0.1340538263, 0., 0.2476127148, 0., 0., 0., 0., 1., 0.] self.assertAlmostEqualVector(hpcp, expected, 1e-4) def testRegression(self): # Just makes sure algorithm does not crash on a real data source. This # test is not really looking for correctness. Maybe consider revising # it. inputSize = 512 sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, join('recorded', 'musicbox.wav')), sampleRate = sampleRate)() fc = FrameCutter(frameSize = inputSize, hopSize = inputSize) windowingAlg = Windowing(type = 'blackmanharris62') specAlg = Spectrum(size=inputSize) sPeaksAlg = SpectralPeaks(sampleRate = sampleRate, maxFrequency = sampleRate/2, minFrequency = 0, orderBy = 'magnitude') hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) frame = fc(audio) while len(frame) != 0: spectrum = specAlg(windowingAlg(frame)) (freqs, mags) = sPeaksAlg(spectrum) hpcp = hpcpAlg(freqs,mags) self.assertTrue(not any(numpy.isnan(hpcp))) self.assertTrue(not any(numpy.isinf(hpcp))) frame = fc(audio) suite = allTests(TestHPCP) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
testSizeNonmultiple12
identifier_name
test_hpcp.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your
# details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestHPCP(TestCase): def testEmpty(self): hpcp = HPCP()([], []) self.assertEqualVector(hpcp, [0.]*12) def testZeros(self): hpcp = HPCP()([0]*10, [0]*10) self.assertEqualVector(hpcp, [0.]*12) def testSin440(self): # Tests whether a real audio signal of one pure tone gets read as a # single semitone activation, and gets read into the right pcp bin sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, 'generated/synthesised/sin440_0db.wav'), sampleRate = sampleRate)() speaks = SpectralPeaks(sampleRate = sampleRate, maxPeaks = 1, maxFrequency = sampleRate/2, minFrequency = 0, magnitudeThreshold = 0, orderBy = 'magnitude') (freqs, mags) = speaks(Spectrum()(audio)) hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testAllSemitones(self): # Tests whether a spectral peak output of 12 consecutive semitones # yields a HPCP of all 1's tonic = 440 freqs = [(tonic * 2**(x/12.)) for x in range(12)] mags = [1] * 12 hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]) def testSubmediantPosition(self): # Make sure that the submediant of a key based on 440 is in the # correct location (submediant was randomly selected from all the # tones) tonic = 440 submediant = tonic * 2**(9./12.) hpcp = HPCP()([submediant], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,0.,0.]) def testMaxShifted(self): # Tests whether a HPCP reading with only the dominant semitone # activated is correctly shifted so that the dominant is at the # position 0 tonic = 440 dominant = tonic * 2**(7./12.) hpcp = HPCP(maxShifted=True)([dominant], [1]) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def chordHelper(self, half_steps, tunning, strength): notes = [tunning*(2.**(half_steps[i]/12.)) for i in range(len(half_steps))] hpcp = HPCP(maxShifted=False)([notes[0], notes[1], notes[2]], strength) for i in range(len(hpcp)): if i in half_steps: self.assertTrue(hpcp[i]>0) elif (i - 12) in half_steps: self.assertTrue(hpcp[i]>0) else: self.assertEqual(hpcp[i], 0) def testChord(self): tunning = 440 AMajor = [0, 4, 7] # AMajor = A4-C#5-E5 self.chordHelper(AMajor, tunning, [1,1,1]) CMajor = [3, -4, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,1,1]) CMajor = [-4, 3, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [-4, -2, 3] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [3, 8, 10] # CMajor = C5-F5-G5 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) AMinor = [0, 3, 7] # AMinor = A4-C5-E5 self.chordHelper(AMinor, tunning, [1,0.5,0.2]) CMinor = [3, 6, 10] # CMinor = C5-E5-G5 self.chordHelper(CMinor, tunning, [1,0.5,0.2]) # Test of various parameter logical bounds def testLowFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([99], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testHighFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([1001], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testSmallMinRange(self): self.assertConfigureFails(HPCP(), {'minFrequency':1, 'splitFrequency':200}) def testSmallMaxRange(self): self.assertConfigureFails(HPCP(), {'maxFrequency':1199, 'splitFrequency':1000}) def testSmallMinMaxRange(self): self.assertConfigureFails(HPCP(), {'bandPreset':False, 'maxFrequency':200, 'minFrequency':1}) def testSizeNonmultiple12(self): self.assertConfigureFails(HPCP(), {'size':13}) def testHarmonics(self): # Regression test for the 'harmonics' parameter tone = 100. # arbitrary frequency [Hz] freqs = [tone, tone*2, tone*3, tone*4] mags = [1]*4 hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) hpcp = hpcpAlg(freqs, mags) expected = [0., 0., 0., 0.1340538263, 0., 0.2476127148, 0., 0., 0., 0., 1., 0.] self.assertAlmostEqualVector(hpcp, expected, 1e-4) def testRegression(self): # Just makes sure algorithm does not crash on a real data source. This # test is not really looking for correctness. Maybe consider revising # it. inputSize = 512 sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, join('recorded', 'musicbox.wav')), sampleRate = sampleRate)() fc = FrameCutter(frameSize = inputSize, hopSize = inputSize) windowingAlg = Windowing(type = 'blackmanharris62') specAlg = Spectrum(size=inputSize) sPeaksAlg = SpectralPeaks(sampleRate = sampleRate, maxFrequency = sampleRate/2, minFrequency = 0, orderBy = 'magnitude') hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) frame = fc(audio) while len(frame) != 0: spectrum = specAlg(windowingAlg(frame)) (freqs, mags) = sPeaksAlg(spectrum) hpcp = hpcpAlg(freqs,mags) self.assertTrue(not any(numpy.isnan(hpcp))) self.assertTrue(not any(numpy.isinf(hpcp))) frame = fc(audio) suite = allTests(TestHPCP) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
# option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
random_line_split
test_hpcp.py
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestHPCP(TestCase): def testEmpty(self): hpcp = HPCP()([], []) self.assertEqualVector(hpcp, [0.]*12) def testZeros(self): hpcp = HPCP()([0]*10, [0]*10) self.assertEqualVector(hpcp, [0.]*12) def testSin440(self): # Tests whether a real audio signal of one pure tone gets read as a # single semitone activation, and gets read into the right pcp bin sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, 'generated/synthesised/sin440_0db.wav'), sampleRate = sampleRate)() speaks = SpectralPeaks(sampleRate = sampleRate, maxPeaks = 1, maxFrequency = sampleRate/2, minFrequency = 0, magnitudeThreshold = 0, orderBy = 'magnitude') (freqs, mags) = speaks(Spectrum()(audio)) hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testAllSemitones(self): # Tests whether a spectral peak output of 12 consecutive semitones # yields a HPCP of all 1's tonic = 440 freqs = [(tonic * 2**(x/12.)) for x in range(12)] mags = [1] * 12 hpcp = HPCP()(freqs, mags) self.assertEqualVector(hpcp, [1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.,1.]) def testSubmediantPosition(self): # Make sure that the submediant of a key based on 440 is in the # correct location (submediant was randomly selected from all the # tones) tonic = 440 submediant = tonic * 2**(9./12.) hpcp = HPCP()([submediant], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,0.,0.]) def testMaxShifted(self): # Tests whether a HPCP reading with only the dominant semitone # activated is correctly shifted so that the dominant is at the # position 0 tonic = 440 dominant = tonic * 2**(7./12.) hpcp = HPCP(maxShifted=True)([dominant], [1]) self.assertEqualVector(hpcp, [1.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def chordHelper(self, half_steps, tunning, strength): notes = [tunning*(2.**(half_steps[i]/12.)) for i in range(len(half_steps))] hpcp = HPCP(maxShifted=False)([notes[0], notes[1], notes[2]], strength) for i in range(len(hpcp)): if i in half_steps: self.assertTrue(hpcp[i]>0) elif (i - 12) in half_steps: self.assertTrue(hpcp[i]>0) else: self.assertEqual(hpcp[i], 0) def testChord(self): tunning = 440 AMajor = [0, 4, 7] # AMajor = A4-C#5-E5 self.chordHelper(AMajor, tunning, [1,1,1]) CMajor = [3, -4, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,1,1]) CMajor = [-4, 3, -2] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [-4, -2, 3] # CMajor = C5-F4-G4 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) CMajor = [3, 8, 10] # CMajor = C5-F5-G5 self.chordHelper(CMajor, tunning, [1,0.5,0.2]) AMinor = [0, 3, 7] # AMinor = A4-C5-E5 self.chordHelper(AMinor, tunning, [1,0.5,0.2]) CMinor = [3, 6, 10] # CMinor = C5-E5-G5 self.chordHelper(CMinor, tunning, [1,0.5,0.2]) # Test of various parameter logical bounds def testLowFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([99], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testHighFrequency(self): hpcp = HPCP(minFrequency=100, maxFrequency=1000)([1001], [1]) self.assertEqualVector(hpcp, [0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.]) def testSmallMinRange(self): self.assertConfigureFails(HPCP(), {'minFrequency':1, 'splitFrequency':200}) def testSmallMaxRange(self):
def testSmallMinMaxRange(self): self.assertConfigureFails(HPCP(), {'bandPreset':False, 'maxFrequency':200, 'minFrequency':1}) def testSizeNonmultiple12(self): self.assertConfigureFails(HPCP(), {'size':13}) def testHarmonics(self): # Regression test for the 'harmonics' parameter tone = 100. # arbitrary frequency [Hz] freqs = [tone, tone*2, tone*3, tone*4] mags = [1]*4 hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) hpcp = hpcpAlg(freqs, mags) expected = [0., 0., 0., 0.1340538263, 0., 0.2476127148, 0., 0., 0., 0., 1., 0.] self.assertAlmostEqualVector(hpcp, expected, 1e-4) def testRegression(self): # Just makes sure algorithm does not crash on a real data source. This # test is not really looking for correctness. Maybe consider revising # it. inputSize = 512 sampleRate = 44100 audio = MonoLoader(filename = join(testdata.audio_dir, join('recorded', 'musicbox.wav')), sampleRate = sampleRate)() fc = FrameCutter(frameSize = inputSize, hopSize = inputSize) windowingAlg = Windowing(type = 'blackmanharris62') specAlg = Spectrum(size=inputSize) sPeaksAlg = SpectralPeaks(sampleRate = sampleRate, maxFrequency = sampleRate/2, minFrequency = 0, orderBy = 'magnitude') hpcpAlg = HPCP(minFrequency=50, maxFrequency=500, bandPreset=False, harmonics=3) frame = fc(audio) while len(frame) != 0: spectrum = specAlg(windowingAlg(frame)) (freqs, mags) = sPeaksAlg(spectrum) hpcp = hpcpAlg(freqs,mags) self.assertTrue(not any(numpy.isnan(hpcp))) self.assertTrue(not any(numpy.isinf(hpcp))) frame = fc(audio) suite = allTests(TestHPCP) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
self.assertConfigureFails(HPCP(), {'maxFrequency':1199, 'splitFrequency':1000})
identifier_body
functions.py
# twitter/functions.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- import tweepy import wevote_functions.admin from config.base import get_environment_variable from exception.models import handle_exception from wevote_functions.functions import positive_value_exists logger = wevote_functions.admin.get_logger(__name__) WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL") TWITTER_CONSUMER_KEY = get_environment_variable("TWITTER_CONSUMER_KEY") TWITTER_CONSUMER_SECRET = get_environment_variable("TWITTER_CONSUMER_SECRET") TWITTER_ACCESS_TOKEN = get_environment_variable("TWITTER_ACCESS_TOKEN") TWITTER_ACCESS_TOKEN_SECRET = get_environment_variable("TWITTER_ACCESS_TOKEN_SECRET") TWITTER_USER_NOT_FOUND_LOG_RESPONSES = [ "{'code': 50, 'message': 'User not found.'}", "User not found." ] TWITTER_USER_SUSPENDED_LOG_RESPONSES = [ "{'code': 63, 'message': 'User has been suspended.'}", "User has been suspended." ] def
(twitter_user_id, twitter_handle=''): status = "" success = True twitter_user_not_found_in_twitter = False twitter_user_suspended_by_twitter = False write_to_server_logs = False # December 2021: Using the Twitter 1.1 API for OAuthHandler, since all other 2.0 apis that we need are not # yet available. # client = tweepy.Client( # consumer_key=TWITTER_CONSUMER_KEY, # consumer_secret=TWITTER_CONSUMER_SECRET, # access_token=TWITTER_ACCESS_TOKEN, # access_token_secret=TWITTER_ACCESS_TOKEN_SECRET) auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) api = tweepy.API(auth, timeout=10) # Strip out the twitter handles "False" or "None" if twitter_handle is False: twitter_handle = '' elif twitter_handle is None: twitter_handle = '' elif twitter_handle: twitter_handle_lower = twitter_handle.lower() if twitter_handle_lower == 'false' or twitter_handle_lower == 'none': twitter_handle = '' twitter_handle_found = False twitter_json = {} from wevote_functions.functions import convert_to_int twitter_user_id = convert_to_int(twitter_user_id) try: if positive_value_exists(twitter_handle): twitter_user = api.get_user(screen_name=twitter_handle) twitter_json = twitter_user._json success = True # status += 'TWITTER_HANDLE_SUCCESS-' + str(twitter_handle) + " " twitter_handle_found = True twitter_user_id = twitter_user.id # Integer value. id_str would be the String value elif positive_value_exists(twitter_user_id): twitter_user = api.get_user(user_id=twitter_user_id) twitter_json = twitter_user._json success = True # status += 'TWITTER_USER_ID_SUCCESS-' + str(twitter_user_id) + " " twitter_handle_found = True else: twitter_json = {} success = False status += 'TWITTER_RETRIEVE_NOT_SUCCESSFUL-MISSING_VARIABLE ' twitter_handle_found = False except tweepy.TooManyRequests as rate_limit_error: success = False status += 'TWITTER_RATE_LIMIT_ERROR: ' + str(rate_limit_error) + " " handle_exception(rate_limit_error, logger=logger, exception_message=status) except tweepy.errors.HTTPException as error_instance: success = False status += 'TWITTER_HTTP_ERROR ' handle_exception(error_instance, logger=logger, exception_message=status) except tweepy.errors.TweepyException as error_instance: success = False status += "[TWEEP_ERROR: " status += twitter_handle + " " if positive_value_exists(twitter_handle) else "" status += str(twitter_user_id) + " " if positive_value_exists(twitter_user_id) else " " if error_instance: status += str(error_instance) + " " if error_instance and hasattr(error_instance, 'args'): try: error_tuple = error_instance.args for error_dict in error_tuple: for one_error in error_dict: status += '[' + one_error['message'] + '] ' if one_error['message'] in TWITTER_USER_NOT_FOUND_LOG_RESPONSES: twitter_user_not_found_in_twitter = True elif one_error['message'] in TWITTER_USER_SUSPENDED_LOG_RESPONSES: twitter_user_suspended_by_twitter = True else: write_to_server_logs = True except Exception as e: status += "PROBLEM_PARSING_TWEEPY_ERROR: " + str(e) + " " write_to_server_logs = True else: write_to_server_logs = True status += "]" if write_to_server_logs: handle_exception(error_instance, logger=logger, exception_message=status) except Exception as e: success = False status += "TWEEPY_EXCEPTION: " + str(e) + " " handle_exception(e, logger=logger, exception_message=status) try: if positive_value_exists(twitter_json.get('profile_banner_url')): # Dec 2019, https://developer.twitter.com/en/docs/accounts-and-users/user-profile-images-and-banners banner = twitter_json.get('profile_banner_url') + '/1500x500' twitter_json['profile_banner_url'] = banner except Exception as e: status += "FAILED_PROFILE_BANNER_URL: " + str(e) + " " results = { 'status': status, 'success': success, 'twitter_handle': twitter_handle, 'twitter_handle_found': twitter_handle_found, 'twitter_json': twitter_json, 'twitter_user_id': twitter_user_id, 'twitter_user_not_found_in_twitter': twitter_user_not_found_in_twitter, 'twitter_user_suspended_by_twitter': twitter_user_suspended_by_twitter, } return results
retrieve_twitter_user_info
identifier_name
functions.py
# twitter/functions.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- import tweepy import wevote_functions.admin from config.base import get_environment_variable from exception.models import handle_exception from wevote_functions.functions import positive_value_exists logger = wevote_functions.admin.get_logger(__name__) WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL") TWITTER_CONSUMER_KEY = get_environment_variable("TWITTER_CONSUMER_KEY") TWITTER_CONSUMER_SECRET = get_environment_variable("TWITTER_CONSUMER_SECRET") TWITTER_ACCESS_TOKEN = get_environment_variable("TWITTER_ACCESS_TOKEN") TWITTER_ACCESS_TOKEN_SECRET = get_environment_variable("TWITTER_ACCESS_TOKEN_SECRET") TWITTER_USER_NOT_FOUND_LOG_RESPONSES = [ "{'code': 50, 'message': 'User not found.'}", "User not found." ] TWITTER_USER_SUSPENDED_LOG_RESPONSES = [ "{'code': 63, 'message': 'User has been suspended.'}", "User has been suspended." ] def retrieve_twitter_user_info(twitter_user_id, twitter_handle=''): status = "" success = True twitter_user_not_found_in_twitter = False twitter_user_suspended_by_twitter = False write_to_server_logs = False # December 2021: Using the Twitter 1.1 API for OAuthHandler, since all other 2.0 apis that we need are not # yet available. # client = tweepy.Client( # consumer_key=TWITTER_CONSUMER_KEY, # consumer_secret=TWITTER_CONSUMER_SECRET, # access_token=TWITTER_ACCESS_TOKEN, # access_token_secret=TWITTER_ACCESS_TOKEN_SECRET) auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) api = tweepy.API(auth, timeout=10) # Strip out the twitter handles "False" or "None" if twitter_handle is False: twitter_handle = '' elif twitter_handle is None: twitter_handle = '' elif twitter_handle: twitter_handle_lower = twitter_handle.lower() if twitter_handle_lower == 'false' or twitter_handle_lower == 'none':
twitter_handle_found = False twitter_json = {} from wevote_functions.functions import convert_to_int twitter_user_id = convert_to_int(twitter_user_id) try: if positive_value_exists(twitter_handle): twitter_user = api.get_user(screen_name=twitter_handle) twitter_json = twitter_user._json success = True # status += 'TWITTER_HANDLE_SUCCESS-' + str(twitter_handle) + " " twitter_handle_found = True twitter_user_id = twitter_user.id # Integer value. id_str would be the String value elif positive_value_exists(twitter_user_id): twitter_user = api.get_user(user_id=twitter_user_id) twitter_json = twitter_user._json success = True # status += 'TWITTER_USER_ID_SUCCESS-' + str(twitter_user_id) + " " twitter_handle_found = True else: twitter_json = {} success = False status += 'TWITTER_RETRIEVE_NOT_SUCCESSFUL-MISSING_VARIABLE ' twitter_handle_found = False except tweepy.TooManyRequests as rate_limit_error: success = False status += 'TWITTER_RATE_LIMIT_ERROR: ' + str(rate_limit_error) + " " handle_exception(rate_limit_error, logger=logger, exception_message=status) except tweepy.errors.HTTPException as error_instance: success = False status += 'TWITTER_HTTP_ERROR ' handle_exception(error_instance, logger=logger, exception_message=status) except tweepy.errors.TweepyException as error_instance: success = False status += "[TWEEP_ERROR: " status += twitter_handle + " " if positive_value_exists(twitter_handle) else "" status += str(twitter_user_id) + " " if positive_value_exists(twitter_user_id) else " " if error_instance: status += str(error_instance) + " " if error_instance and hasattr(error_instance, 'args'): try: error_tuple = error_instance.args for error_dict in error_tuple: for one_error in error_dict: status += '[' + one_error['message'] + '] ' if one_error['message'] in TWITTER_USER_NOT_FOUND_LOG_RESPONSES: twitter_user_not_found_in_twitter = True elif one_error['message'] in TWITTER_USER_SUSPENDED_LOG_RESPONSES: twitter_user_suspended_by_twitter = True else: write_to_server_logs = True except Exception as e: status += "PROBLEM_PARSING_TWEEPY_ERROR: " + str(e) + " " write_to_server_logs = True else: write_to_server_logs = True status += "]" if write_to_server_logs: handle_exception(error_instance, logger=logger, exception_message=status) except Exception as e: success = False status += "TWEEPY_EXCEPTION: " + str(e) + " " handle_exception(e, logger=logger, exception_message=status) try: if positive_value_exists(twitter_json.get('profile_banner_url')): # Dec 2019, https://developer.twitter.com/en/docs/accounts-and-users/user-profile-images-and-banners banner = twitter_json.get('profile_banner_url') + '/1500x500' twitter_json['profile_banner_url'] = banner except Exception as e: status += "FAILED_PROFILE_BANNER_URL: " + str(e) + " " results = { 'status': status, 'success': success, 'twitter_handle': twitter_handle, 'twitter_handle_found': twitter_handle_found, 'twitter_json': twitter_json, 'twitter_user_id': twitter_user_id, 'twitter_user_not_found_in_twitter': twitter_user_not_found_in_twitter, 'twitter_user_suspended_by_twitter': twitter_user_suspended_by_twitter, } return results
twitter_handle = ''
conditional_block
functions.py
# twitter/functions.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- import tweepy import wevote_functions.admin from config.base import get_environment_variable from exception.models import handle_exception from wevote_functions.functions import positive_value_exists logger = wevote_functions.admin.get_logger(__name__) WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL") TWITTER_CONSUMER_KEY = get_environment_variable("TWITTER_CONSUMER_KEY") TWITTER_CONSUMER_SECRET = get_environment_variable("TWITTER_CONSUMER_SECRET") TWITTER_ACCESS_TOKEN = get_environment_variable("TWITTER_ACCESS_TOKEN") TWITTER_ACCESS_TOKEN_SECRET = get_environment_variable("TWITTER_ACCESS_TOKEN_SECRET") TWITTER_USER_NOT_FOUND_LOG_RESPONSES = [ "{'code': 50, 'message': 'User not found.'}", "User not found." ] TWITTER_USER_SUSPENDED_LOG_RESPONSES = [ "{'code': 63, 'message': 'User has been suspended.'}", "User has been suspended." ] def retrieve_twitter_user_info(twitter_user_id, twitter_handle=''):
status = "" success = True twitter_user_not_found_in_twitter = False twitter_user_suspended_by_twitter = False write_to_server_logs = False # December 2021: Using the Twitter 1.1 API for OAuthHandler, since all other 2.0 apis that we need are not # yet available. # client = tweepy.Client( # consumer_key=TWITTER_CONSUMER_KEY, # consumer_secret=TWITTER_CONSUMER_SECRET, # access_token=TWITTER_ACCESS_TOKEN, # access_token_secret=TWITTER_ACCESS_TOKEN_SECRET) auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) api = tweepy.API(auth, timeout=10) # Strip out the twitter handles "False" or "None" if twitter_handle is False: twitter_handle = '' elif twitter_handle is None: twitter_handle = '' elif twitter_handle: twitter_handle_lower = twitter_handle.lower() if twitter_handle_lower == 'false' or twitter_handle_lower == 'none': twitter_handle = '' twitter_handle_found = False twitter_json = {} from wevote_functions.functions import convert_to_int twitter_user_id = convert_to_int(twitter_user_id) try: if positive_value_exists(twitter_handle): twitter_user = api.get_user(screen_name=twitter_handle) twitter_json = twitter_user._json success = True # status += 'TWITTER_HANDLE_SUCCESS-' + str(twitter_handle) + " " twitter_handle_found = True twitter_user_id = twitter_user.id # Integer value. id_str would be the String value elif positive_value_exists(twitter_user_id): twitter_user = api.get_user(user_id=twitter_user_id) twitter_json = twitter_user._json success = True # status += 'TWITTER_USER_ID_SUCCESS-' + str(twitter_user_id) + " " twitter_handle_found = True else: twitter_json = {} success = False status += 'TWITTER_RETRIEVE_NOT_SUCCESSFUL-MISSING_VARIABLE ' twitter_handle_found = False except tweepy.TooManyRequests as rate_limit_error: success = False status += 'TWITTER_RATE_LIMIT_ERROR: ' + str(rate_limit_error) + " " handle_exception(rate_limit_error, logger=logger, exception_message=status) except tweepy.errors.HTTPException as error_instance: success = False status += 'TWITTER_HTTP_ERROR ' handle_exception(error_instance, logger=logger, exception_message=status) except tweepy.errors.TweepyException as error_instance: success = False status += "[TWEEP_ERROR: " status += twitter_handle + " " if positive_value_exists(twitter_handle) else "" status += str(twitter_user_id) + " " if positive_value_exists(twitter_user_id) else " " if error_instance: status += str(error_instance) + " " if error_instance and hasattr(error_instance, 'args'): try: error_tuple = error_instance.args for error_dict in error_tuple: for one_error in error_dict: status += '[' + one_error['message'] + '] ' if one_error['message'] in TWITTER_USER_NOT_FOUND_LOG_RESPONSES: twitter_user_not_found_in_twitter = True elif one_error['message'] in TWITTER_USER_SUSPENDED_LOG_RESPONSES: twitter_user_suspended_by_twitter = True else: write_to_server_logs = True except Exception as e: status += "PROBLEM_PARSING_TWEEPY_ERROR: " + str(e) + " " write_to_server_logs = True else: write_to_server_logs = True status += "]" if write_to_server_logs: handle_exception(error_instance, logger=logger, exception_message=status) except Exception as e: success = False status += "TWEEPY_EXCEPTION: " + str(e) + " " handle_exception(e, logger=logger, exception_message=status) try: if positive_value_exists(twitter_json.get('profile_banner_url')): # Dec 2019, https://developer.twitter.com/en/docs/accounts-and-users/user-profile-images-and-banners banner = twitter_json.get('profile_banner_url') + '/1500x500' twitter_json['profile_banner_url'] = banner except Exception as e: status += "FAILED_PROFILE_BANNER_URL: " + str(e) + " " results = { 'status': status, 'success': success, 'twitter_handle': twitter_handle, 'twitter_handle_found': twitter_handle_found, 'twitter_json': twitter_json, 'twitter_user_id': twitter_user_id, 'twitter_user_not_found_in_twitter': twitter_user_not_found_in_twitter, 'twitter_user_suspended_by_twitter': twitter_user_suspended_by_twitter, } return results
identifier_body
functions.py
# twitter/functions.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- import tweepy import wevote_functions.admin from config.base import get_environment_variable from exception.models import handle_exception from wevote_functions.functions import positive_value_exists logger = wevote_functions.admin.get_logger(__name__) WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL") TWITTER_CONSUMER_KEY = get_environment_variable("TWITTER_CONSUMER_KEY") TWITTER_CONSUMER_SECRET = get_environment_variable("TWITTER_CONSUMER_SECRET") TWITTER_ACCESS_TOKEN = get_environment_variable("TWITTER_ACCESS_TOKEN") TWITTER_ACCESS_TOKEN_SECRET = get_environment_variable("TWITTER_ACCESS_TOKEN_SECRET") TWITTER_USER_NOT_FOUND_LOG_RESPONSES = [ "{'code': 50, 'message': 'User not found.'}", "User not found." ] TWITTER_USER_SUSPENDED_LOG_RESPONSES = [ "{'code': 63, 'message': 'User has been suspended.'}", "User has been suspended." ] def retrieve_twitter_user_info(twitter_user_id, twitter_handle=''): status = "" success = True twitter_user_not_found_in_twitter = False twitter_user_suspended_by_twitter = False write_to_server_logs = False # December 2021: Using the Twitter 1.1 API for OAuthHandler, since all other 2.0 apis that we need are not # yet available.
# access_token=TWITTER_ACCESS_TOKEN, # access_token_secret=TWITTER_ACCESS_TOKEN_SECRET) auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET) auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET) api = tweepy.API(auth, timeout=10) # Strip out the twitter handles "False" or "None" if twitter_handle is False: twitter_handle = '' elif twitter_handle is None: twitter_handle = '' elif twitter_handle: twitter_handle_lower = twitter_handle.lower() if twitter_handle_lower == 'false' or twitter_handle_lower == 'none': twitter_handle = '' twitter_handle_found = False twitter_json = {} from wevote_functions.functions import convert_to_int twitter_user_id = convert_to_int(twitter_user_id) try: if positive_value_exists(twitter_handle): twitter_user = api.get_user(screen_name=twitter_handle) twitter_json = twitter_user._json success = True # status += 'TWITTER_HANDLE_SUCCESS-' + str(twitter_handle) + " " twitter_handle_found = True twitter_user_id = twitter_user.id # Integer value. id_str would be the String value elif positive_value_exists(twitter_user_id): twitter_user = api.get_user(user_id=twitter_user_id) twitter_json = twitter_user._json success = True # status += 'TWITTER_USER_ID_SUCCESS-' + str(twitter_user_id) + " " twitter_handle_found = True else: twitter_json = {} success = False status += 'TWITTER_RETRIEVE_NOT_SUCCESSFUL-MISSING_VARIABLE ' twitter_handle_found = False except tweepy.TooManyRequests as rate_limit_error: success = False status += 'TWITTER_RATE_LIMIT_ERROR: ' + str(rate_limit_error) + " " handle_exception(rate_limit_error, logger=logger, exception_message=status) except tweepy.errors.HTTPException as error_instance: success = False status += 'TWITTER_HTTP_ERROR ' handle_exception(error_instance, logger=logger, exception_message=status) except tweepy.errors.TweepyException as error_instance: success = False status += "[TWEEP_ERROR: " status += twitter_handle + " " if positive_value_exists(twitter_handle) else "" status += str(twitter_user_id) + " " if positive_value_exists(twitter_user_id) else " " if error_instance: status += str(error_instance) + " " if error_instance and hasattr(error_instance, 'args'): try: error_tuple = error_instance.args for error_dict in error_tuple: for one_error in error_dict: status += '[' + one_error['message'] + '] ' if one_error['message'] in TWITTER_USER_NOT_FOUND_LOG_RESPONSES: twitter_user_not_found_in_twitter = True elif one_error['message'] in TWITTER_USER_SUSPENDED_LOG_RESPONSES: twitter_user_suspended_by_twitter = True else: write_to_server_logs = True except Exception as e: status += "PROBLEM_PARSING_TWEEPY_ERROR: " + str(e) + " " write_to_server_logs = True else: write_to_server_logs = True status += "]" if write_to_server_logs: handle_exception(error_instance, logger=logger, exception_message=status) except Exception as e: success = False status += "TWEEPY_EXCEPTION: " + str(e) + " " handle_exception(e, logger=logger, exception_message=status) try: if positive_value_exists(twitter_json.get('profile_banner_url')): # Dec 2019, https://developer.twitter.com/en/docs/accounts-and-users/user-profile-images-and-banners banner = twitter_json.get('profile_banner_url') + '/1500x500' twitter_json['profile_banner_url'] = banner except Exception as e: status += "FAILED_PROFILE_BANNER_URL: " + str(e) + " " results = { 'status': status, 'success': success, 'twitter_handle': twitter_handle, 'twitter_handle_found': twitter_handle_found, 'twitter_json': twitter_json, 'twitter_user_id': twitter_user_id, 'twitter_user_not_found_in_twitter': twitter_user_not_found_in_twitter, 'twitter_user_suspended_by_twitter': twitter_user_suspended_by_twitter, } return results
# client = tweepy.Client( # consumer_key=TWITTER_CONSUMER_KEY, # consumer_secret=TWITTER_CONSUMER_SECRET,
random_line_split
main.rs
// @gbersac, @adjivas - github.com/adjivas. See the LICENSE // file at the top-level directory of this distribution and at // https://github.com/adjivas/expert-system // // This file may not be copied, modified, or distributed // except according to those terms. extern crate regex; mod parser; mod parse_result; mod solver; mod ops; use std::fs::File; use std::env; use std::io::prelude::*; use parser::{Parser}; use ops::{Exp, Set, ImplyPtr}; use std::collections::HashMap; fn file_as_string(filename: &String) -> String
/// Return the file name to parse in this execution. fn args_parse() -> String { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("usage: {} file_name", args[0]); std::process::exit(1) } args[1].clone() } fn resolve_and_print( deps: &HashMap<char, ImplyPtr>, initial_facts: &Set ) { let initial_facts_str = initial_facts.true_fact_str(); println!("\nWith true facts : {}", initial_facts_str); for (key, instr) in deps { let mut final_facts = Set::new(); instr.borrow().solve(initial_facts, &mut final_facts); let value = final_facts.get_value(*key); println!("For {} value is {}", key, value); } } fn main () { let filename = args_parse(); let instructions_str = file_as_string(&filename); let parsed = Parser::parse(&instructions_str); if parsed.is_none() { println!("Parse error"); return ; } let parsed = parsed.unwrap(); let deps = solver::solve(&parsed); println!("Query dependences:"); for (key, value) in &deps { println!("For {} dependence tree is: {}", key, value.borrow().get_ident().unwrap()); } println!("\nSolution according to those dependences:"); for initial_facts in &parsed.initial_facts { resolve_and_print(&deps, initial_facts); } }
{ let mut f = File::open(filename).unwrap(); let mut s = String::new(); let _ = f.read_to_string(&mut s); s }
identifier_body
main.rs
// @gbersac, @adjivas - github.com/adjivas. See the LICENSE // file at the top-level directory of this distribution and at // https://github.com/adjivas/expert-system // // This file may not be copied, modified, or distributed // except according to those terms. extern crate regex; mod parser; mod parse_result; mod solver; mod ops; use std::fs::File; use std::env; use std::io::prelude::*; use parser::{Parser}; use ops::{Exp, Set, ImplyPtr}; use std::collections::HashMap; fn file_as_string(filename: &String) -> String { let mut f = File::open(filename).unwrap(); let mut s = String::new(); let _ = f.read_to_string(&mut s); s } /// Return the file name to parse in this execution. fn args_parse() -> String { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("usage: {} file_name", args[0]); std::process::exit(1) } args[1].clone() } fn resolve_and_print( deps: &HashMap<char, ImplyPtr>, initial_facts: &Set ) { let initial_facts_str = initial_facts.true_fact_str(); println!("\nWith true facts : {}", initial_facts_str); for (key, instr) in deps { let mut final_facts = Set::new(); instr.borrow().solve(initial_facts, &mut final_facts); let value = final_facts.get_value(*key); println!("For {} value is {}", key, value); } } fn main () { let filename = args_parse(); let instructions_str = file_as_string(&filename); let parsed = Parser::parse(&instructions_str); if parsed.is_none() { println!("Parse error"); return ; } let parsed = parsed.unwrap(); let deps = solver::solve(&parsed); println!("Query dependences:"); for (key, value) in &deps { println!("For {} dependence tree is: {}",
println!("\nSolution according to those dependences:"); for initial_facts in &parsed.initial_facts { resolve_and_print(&deps, initial_facts); } }
key, value.borrow().get_ident().unwrap()); }
random_line_split
main.rs
// @gbersac, @adjivas - github.com/adjivas. See the LICENSE // file at the top-level directory of this distribution and at // https://github.com/adjivas/expert-system // // This file may not be copied, modified, or distributed // except according to those terms. extern crate regex; mod parser; mod parse_result; mod solver; mod ops; use std::fs::File; use std::env; use std::io::prelude::*; use parser::{Parser}; use ops::{Exp, Set, ImplyPtr}; use std::collections::HashMap; fn file_as_string(filename: &String) -> String { let mut f = File::open(filename).unwrap(); let mut s = String::new(); let _ = f.read_to_string(&mut s); s } /// Return the file name to parse in this execution. fn args_parse() -> String { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("usage: {} file_name", args[0]); std::process::exit(1) } args[1].clone() } fn resolve_and_print( deps: &HashMap<char, ImplyPtr>, initial_facts: &Set ) { let initial_facts_str = initial_facts.true_fact_str(); println!("\nWith true facts : {}", initial_facts_str); for (key, instr) in deps { let mut final_facts = Set::new(); instr.borrow().solve(initial_facts, &mut final_facts); let value = final_facts.get_value(*key); println!("For {} value is {}", key, value); } } fn main () { let filename = args_parse(); let instructions_str = file_as_string(&filename); let parsed = Parser::parse(&instructions_str); if parsed.is_none()
let parsed = parsed.unwrap(); let deps = solver::solve(&parsed); println!("Query dependences:"); for (key, value) in &deps { println!("For {} dependence tree is: {}", key, value.borrow().get_ident().unwrap()); } println!("\nSolution according to those dependences:"); for initial_facts in &parsed.initial_facts { resolve_and_print(&deps, initial_facts); } }
{ println!("Parse error"); return ; }
conditional_block
main.rs
// @gbersac, @adjivas - github.com/adjivas. See the LICENSE // file at the top-level directory of this distribution and at // https://github.com/adjivas/expert-system // // This file may not be copied, modified, or distributed // except according to those terms. extern crate regex; mod parser; mod parse_result; mod solver; mod ops; use std::fs::File; use std::env; use std::io::prelude::*; use parser::{Parser}; use ops::{Exp, Set, ImplyPtr}; use std::collections::HashMap; fn file_as_string(filename: &String) -> String { let mut f = File::open(filename).unwrap(); let mut s = String::new(); let _ = f.read_to_string(&mut s); s } /// Return the file name to parse in this execution. fn
() -> String { let args: Vec<_> = env::args().collect(); if args.len() < 2 { println!("usage: {} file_name", args[0]); std::process::exit(1) } args[1].clone() } fn resolve_and_print( deps: &HashMap<char, ImplyPtr>, initial_facts: &Set ) { let initial_facts_str = initial_facts.true_fact_str(); println!("\nWith true facts : {}", initial_facts_str); for (key, instr) in deps { let mut final_facts = Set::new(); instr.borrow().solve(initial_facts, &mut final_facts); let value = final_facts.get_value(*key); println!("For {} value is {}", key, value); } } fn main () { let filename = args_parse(); let instructions_str = file_as_string(&filename); let parsed = Parser::parse(&instructions_str); if parsed.is_none() { println!("Parse error"); return ; } let parsed = parsed.unwrap(); let deps = solver::solve(&parsed); println!("Query dependences:"); for (key, value) in &deps { println!("For {} dependence tree is: {}", key, value.borrow().get_ident().unwrap()); } println!("\nSolution according to those dependences:"); for initial_facts in &parsed.initial_facts { resolve_and_print(&deps, initial_facts); } }
args_parse
identifier_name
regress-123437.js
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is JavaScript Engine testing utilities. * * The Initial Developer of the Original Code is * Netscape Communications Corp. * Portions created by the Initial Developer are Copyright (C) 2002 * the Initial Developer. All Rights Reserved. * * Contributor(s): * waldemar, rogerl, [email protected] * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ /* * * Date: 04 Feb 2002 * SUMMARY: regexp backreferences must hold |undefined| if not used * * See http://bugzilla.mozilla.org/show_bug.cgi?id=123437 (SpiderMonkey) * See http://bugzilla.mozilla.org/show_bug.cgi?id=123439 (Rhino) * */ //----------------------------------------------------------------------------- var i = 0; var BUGNUMBER = 123437; var summary = 'regexp backreferences must hold |undefined| if not used'; var status = ''; var statusmessages = new Array(); var pattern = ''; var patterns = new Array(); var string = ''; var strings = new Array(); var actualmatch = ''; var actualmatches = new Array(); var expectedmatch = ''; var expectedmatches = new Array(); pattern = /(a)?a/; string = 'a'; status = inSection(1); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined); addThis(); pattern = /a|(b)/; string = 'a'; status = inSection(2); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined); addThis(); pattern = /(a)?(a)/; string = 'a'; status = inSection(3); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined, 'a'); addThis(); //----------------------------------------------------------------------------- test(); //----------------------------------------------------------------------------- function addThis() { statusmessages[i] = status; patterns[i] = pattern; strings[i] = string; actualmatches[i] = actualmatch; expectedmatches[i] = expectedmatch; i++; } function
() { enterFunc ('test'); printBugNumber(BUGNUMBER); printStatus (summary); testRegExp(statusmessages, patterns, strings, actualmatches, expectedmatches); exitFunc ('test'); }
test
identifier_name
regress-123437.js
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is JavaScript Engine testing utilities. * * The Initial Developer of the Original Code is * Netscape Communications Corp. * Portions created by the Initial Developer are Copyright (C) 2002 * the Initial Developer. All Rights Reserved. * * Contributor(s): * waldemar, rogerl, [email protected] * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ /* * * Date: 04 Feb 2002 * SUMMARY: regexp backreferences must hold |undefined| if not used * * See http://bugzilla.mozilla.org/show_bug.cgi?id=123437 (SpiderMonkey) * See http://bugzilla.mozilla.org/show_bug.cgi?id=123439 (Rhino) * */ //----------------------------------------------------------------------------- var i = 0; var BUGNUMBER = 123437; var summary = 'regexp backreferences must hold |undefined| if not used'; var status = ''; var statusmessages = new Array(); var pattern = ''; var patterns = new Array(); var string = ''; var strings = new Array(); var actualmatch = ''; var actualmatches = new Array(); var expectedmatch = ''; var expectedmatches = new Array(); pattern = /(a)?a/; string = 'a'; status = inSection(1); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined); addThis(); pattern = /a|(b)/; string = 'a'; status = inSection(2); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined); addThis(); pattern = /(a)?(a)/; string = 'a'; status = inSection(3); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined, 'a'); addThis(); //----------------------------------------------------------------------------- test(); //----------------------------------------------------------------------------- function addThis() { statusmessages[i] = status; patterns[i] = pattern; strings[i] = string; actualmatches[i] = actualmatch; expectedmatches[i] = expectedmatch; i++; } function test()
{ enterFunc ('test'); printBugNumber(BUGNUMBER); printStatus (summary); testRegExp(statusmessages, patterns, strings, actualmatches, expectedmatches); exitFunc ('test'); }
identifier_body
regress-123437.js
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is JavaScript Engine testing utilities. * * The Initial Developer of the Original Code is
* * Contributor(s): * waldemar, rogerl, [email protected] * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ /* * * Date: 04 Feb 2002 * SUMMARY: regexp backreferences must hold |undefined| if not used * * See http://bugzilla.mozilla.org/show_bug.cgi?id=123437 (SpiderMonkey) * See http://bugzilla.mozilla.org/show_bug.cgi?id=123439 (Rhino) * */ //----------------------------------------------------------------------------- var i = 0; var BUGNUMBER = 123437; var summary = 'regexp backreferences must hold |undefined| if not used'; var status = ''; var statusmessages = new Array(); var pattern = ''; var patterns = new Array(); var string = ''; var strings = new Array(); var actualmatch = ''; var actualmatches = new Array(); var expectedmatch = ''; var expectedmatches = new Array(); pattern = /(a)?a/; string = 'a'; status = inSection(1); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined); addThis(); pattern = /a|(b)/; string = 'a'; status = inSection(2); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined); addThis(); pattern = /(a)?(a)/; string = 'a'; status = inSection(3); actualmatch = string.match(pattern); expectedmatch = Array('a', undefined, 'a'); addThis(); //----------------------------------------------------------------------------- test(); //----------------------------------------------------------------------------- function addThis() { statusmessages[i] = status; patterns[i] = pattern; strings[i] = string; actualmatches[i] = actualmatch; expectedmatches[i] = expectedmatch; i++; } function test() { enterFunc ('test'); printBugNumber(BUGNUMBER); printStatus (summary); testRegExp(statusmessages, patterns, strings, actualmatches, expectedmatches); exitFunc ('test'); }
* Netscape Communications Corp. * Portions created by the Initial Developer are Copyright (C) 2002 * the Initial Developer. All Rights Reserved.
random_line_split
app.js
var express = require('express'); var path = require('path'); var favicon = require('serve-favicon'); var logger = require('morgan'); var cookieParser = require('cookie-parser');
var bodyParser = require('body-parser'); //var index = require('./routes/index'); //var users = require('./routes/users'); var pub_scr = require('./routes/dop.pubscr.js'); var app = express(); // view engine setup app.set('views', path.join(__dirname, 'views')); app.set('view engine', 'jade'); // uncomment after placing your favicon in /public //app.use(favicon(path.join(__dirname, 'public', 'favicon.ico'))); app.use(logger('dev')); app.use(bodyParser.json()); app.use(bodyParser.urlencoded({ extended: false })); app.use(cookieParser()); app.use(express.static(path.join(__dirname, 'public'))); app.use(express.static(path.join(__dirname, 'dop_dcdp2'))); app.use(express.static(path.join(__dirname, 'template'))); app.use(express.static(path.join(__dirname, 'uploads'))); //app.use('/', index); app.use('/pub_scr', pub_scr); // catch 404 and forward to error handler app.use(function(req, res, next) { var err = new Error('Not Found'); err.status = 404; next(err); }); // error handler app.use(function(err, req, res, next) { // set locals, only providing error in development res.locals.message = err.message; res.locals.error = req.app.get('env') === 'development' ? err : {}; // render the error page res.status(err.status || 500); res.render('error'); }); module.exports = app;
random_line_split
validate-release.ts
import {task} from 'gulp'; import {readFileSync} from 'fs'; import {join} from 'path'; import {green, red} from 'chalk'; import {releasePackages} from './publish'; import {sync as glob} from 'glob'; import {buildConfig, sequenceTask} from 'lib-build-tools'; /** Path to the directory where all releases are created. */ const releasesDir = join(buildConfig.outputDir, 'releases'); /** RegExp that matches Angular component inline styles that contain a sourcemap reference. */ const inlineStylesSourcemapRegex = /styles: ?\[["'].*sourceMappingURL=.*["']/; /** RegExp that matches Angular component metadata properties that refer to external resources. */ const externalReferencesRegex = /(templateUrl|styleUrls): *["'[]/; task('validate-release', sequenceTask(':publish:build-releases', 'validate-release:check-bundles')); /** Task that checks the release bundles for any common mistakes before releasing to the public. */ task('validate-release:check-bundles', () => { const releaseFailures = releasePackages .map(packageName => checkReleasePackage(packageName)) .map((failures, index) => ({failures, packageName: releasePackages[index]})); releaseFailures.forEach(({failures, packageName}) => { failures.forEach(failure => console.error(red(`Failure (${packageName}): ${failure}`))); }); if (releaseFailures.some(({failures}) => failures.length > 0)) { // Throw an error to notify Gulp about the failures that have been detected. throw 'Release output is not valid and not ready for being released.'; } else { console.log(green('Release output has been checked and everything looks fine.')); } }); /** Task that validates the given release package before releasing. */ function checkReleasePackage(packageName: string): string[]
/** * Checks an ES2015 bundle inside of a release package. Secondary entry-point bundles will be * checked as well. */ function checkEs2015ReleaseBundle(bundlePath: string): string[] { const bundleContent = readFileSync(bundlePath, 'utf8'); let failures: string[] = []; if (inlineStylesSourcemapRegex.exec(bundleContent) !== null) { failures.push('Bundles contain sourcemap references in component styles.'); } if (externalReferencesRegex.exec(bundleContent) !== null) { failures.push('Bundles are including references to external resources (templates or styles)'); } return failures; }
{ return glob(join(releasesDir, packageName, 'esm2015/*.js')) .reduce((failures: string[], bundlePath: string) => { return failures.concat(checkEs2015ReleaseBundle(bundlePath)); }, []); }
identifier_body
validate-release.ts
import {task} from 'gulp'; import {readFileSync} from 'fs'; import {join} from 'path'; import {green, red} from 'chalk'; import {releasePackages} from './publish'; import {sync as glob} from 'glob'; import {buildConfig, sequenceTask} from 'lib-build-tools'; /** Path to the directory where all releases are created. */ const releasesDir = join(buildConfig.outputDir, 'releases'); /** RegExp that matches Angular component inline styles that contain a sourcemap reference. */ const inlineStylesSourcemapRegex = /styles: ?\[["'].*sourceMappingURL=.*["']/; /** RegExp that matches Angular component metadata properties that refer to external resources. */ const externalReferencesRegex = /(templateUrl|styleUrls): *["'[]/; task('validate-release', sequenceTask(':publish:build-releases', 'validate-release:check-bundles')); /** Task that checks the release bundles for any common mistakes before releasing to the public. */ task('validate-release:check-bundles', () => {
.map((failures, index) => ({failures, packageName: releasePackages[index]})); releaseFailures.forEach(({failures, packageName}) => { failures.forEach(failure => console.error(red(`Failure (${packageName}): ${failure}`))); }); if (releaseFailures.some(({failures}) => failures.length > 0)) { // Throw an error to notify Gulp about the failures that have been detected. throw 'Release output is not valid and not ready for being released.'; } else { console.log(green('Release output has been checked and everything looks fine.')); } }); /** Task that validates the given release package before releasing. */ function checkReleasePackage(packageName: string): string[] { return glob(join(releasesDir, packageName, 'esm2015/*.js')) .reduce((failures: string[], bundlePath: string) => { return failures.concat(checkEs2015ReleaseBundle(bundlePath)); }, []); } /** * Checks an ES2015 bundle inside of a release package. Secondary entry-point bundles will be * checked as well. */ function checkEs2015ReleaseBundle(bundlePath: string): string[] { const bundleContent = readFileSync(bundlePath, 'utf8'); let failures: string[] = []; if (inlineStylesSourcemapRegex.exec(bundleContent) !== null) { failures.push('Bundles contain sourcemap references in component styles.'); } if (externalReferencesRegex.exec(bundleContent) !== null) { failures.push('Bundles are including references to external resources (templates or styles)'); } return failures; }
const releaseFailures = releasePackages .map(packageName => checkReleasePackage(packageName))
random_line_split
validate-release.ts
import {task} from 'gulp'; import {readFileSync} from 'fs'; import {join} from 'path'; import {green, red} from 'chalk'; import {releasePackages} from './publish'; import {sync as glob} from 'glob'; import {buildConfig, sequenceTask} from 'lib-build-tools'; /** Path to the directory where all releases are created. */ const releasesDir = join(buildConfig.outputDir, 'releases'); /** RegExp that matches Angular component inline styles that contain a sourcemap reference. */ const inlineStylesSourcemapRegex = /styles: ?\[["'].*sourceMappingURL=.*["']/; /** RegExp that matches Angular component metadata properties that refer to external resources. */ const externalReferencesRegex = /(templateUrl|styleUrls): *["'[]/; task('validate-release', sequenceTask(':publish:build-releases', 'validate-release:check-bundles')); /** Task that checks the release bundles for any common mistakes before releasing to the public. */ task('validate-release:check-bundles', () => { const releaseFailures = releasePackages .map(packageName => checkReleasePackage(packageName)) .map((failures, index) => ({failures, packageName: releasePackages[index]})); releaseFailures.forEach(({failures, packageName}) => { failures.forEach(failure => console.error(red(`Failure (${packageName}): ${failure}`))); }); if (releaseFailures.some(({failures}) => failures.length > 0)) { // Throw an error to notify Gulp about the failures that have been detected. throw 'Release output is not valid and not ready for being released.'; } else { console.log(green('Release output has been checked and everything looks fine.')); } }); /** Task that validates the given release package before releasing. */ function checkReleasePackage(packageName: string): string[] { return glob(join(releasesDir, packageName, 'esm2015/*.js')) .reduce((failures: string[], bundlePath: string) => { return failures.concat(checkEs2015ReleaseBundle(bundlePath)); }, []); } /** * Checks an ES2015 bundle inside of a release package. Secondary entry-point bundles will be * checked as well. */ function
(bundlePath: string): string[] { const bundleContent = readFileSync(bundlePath, 'utf8'); let failures: string[] = []; if (inlineStylesSourcemapRegex.exec(bundleContent) !== null) { failures.push('Bundles contain sourcemap references in component styles.'); } if (externalReferencesRegex.exec(bundleContent) !== null) { failures.push('Bundles are including references to external resources (templates or styles)'); } return failures; }
checkEs2015ReleaseBundle
identifier_name
validate-release.ts
import {task} from 'gulp'; import {readFileSync} from 'fs'; import {join} from 'path'; import {green, red} from 'chalk'; import {releasePackages} from './publish'; import {sync as glob} from 'glob'; import {buildConfig, sequenceTask} from 'lib-build-tools'; /** Path to the directory where all releases are created. */ const releasesDir = join(buildConfig.outputDir, 'releases'); /** RegExp that matches Angular component inline styles that contain a sourcemap reference. */ const inlineStylesSourcemapRegex = /styles: ?\[["'].*sourceMappingURL=.*["']/; /** RegExp that matches Angular component metadata properties that refer to external resources. */ const externalReferencesRegex = /(templateUrl|styleUrls): *["'[]/; task('validate-release', sequenceTask(':publish:build-releases', 'validate-release:check-bundles')); /** Task that checks the release bundles for any common mistakes before releasing to the public. */ task('validate-release:check-bundles', () => { const releaseFailures = releasePackages .map(packageName => checkReleasePackage(packageName)) .map((failures, index) => ({failures, packageName: releasePackages[index]})); releaseFailures.forEach(({failures, packageName}) => { failures.forEach(failure => console.error(red(`Failure (${packageName}): ${failure}`))); }); if (releaseFailures.some(({failures}) => failures.length > 0))
else { console.log(green('Release output has been checked and everything looks fine.')); } }); /** Task that validates the given release package before releasing. */ function checkReleasePackage(packageName: string): string[] { return glob(join(releasesDir, packageName, 'esm2015/*.js')) .reduce((failures: string[], bundlePath: string) => { return failures.concat(checkEs2015ReleaseBundle(bundlePath)); }, []); } /** * Checks an ES2015 bundle inside of a release package. Secondary entry-point bundles will be * checked as well. */ function checkEs2015ReleaseBundle(bundlePath: string): string[] { const bundleContent = readFileSync(bundlePath, 'utf8'); let failures: string[] = []; if (inlineStylesSourcemapRegex.exec(bundleContent) !== null) { failures.push('Bundles contain sourcemap references in component styles.'); } if (externalReferencesRegex.exec(bundleContent) !== null) { failures.push('Bundles are including references to external resources (templates or styles)'); } return failures; }
{ // Throw an error to notify Gulp about the failures that have been detected. throw 'Release output is not valid and not ready for being released.'; }
conditional_block
animate.me.d.ts
export interface AnimateMeOptions { readonly offset: number; readonly reverse: boolean; readonly animatedIn: string; readonly offsetAttr: string; readonly animationAttr: string; readonly touchDisabled: boolean; } export declare class
{ options: AnimateMeOptions; animated: HTMLElement[]; selector: string; private win; private winO; private winH; private offsets; private isTouchDevice; constructor(selector?: string, options?: Partial<AnimateMeOptions>); setCurrentScroll: () => void; setWindowDimensions: () => void; bind: () => void; unbind: () => void; cleanup: () => void; destroy: () => void; animate: () => void; setElements: () => void; updateOffsets: () => void; updateInstance: (shouldAnimate?: boolean) => void; private start; private listen; private scrollListener; private resizeListener; } export default AnimateMe;
AnimateMe
identifier_name
animate.me.d.ts
export interface AnimateMeOptions { readonly offset: number; readonly reverse: boolean; readonly animatedIn: string; readonly offsetAttr: string; readonly animationAttr: string; readonly touchDisabled: boolean; } export declare class AnimateMe { options: AnimateMeOptions; animated: HTMLElement[]; selector: string; private win; private winO; private winH; private offsets; private isTouchDevice; constructor(selector?: string, options?: Partial<AnimateMeOptions>); setCurrentScroll: () => void; setWindowDimensions: () => void; bind: () => void; unbind: () => void; cleanup: () => void; destroy: () => void; animate: () => void; setElements: () => void; updateOffsets: () => void; updateInstance: (shouldAnimate?: boolean) => void; private start;
private listen; private scrollListener; private resizeListener; } export default AnimateMe;
random_line_split
xypath.py
#!/usr/bin/env python """ musings on order of variables, x/y vs. col/row Everyone agrees that col 2, row 1 is (2,1) which is xy ordered. This works well with the name. Remember that the usual iterators (over a list-of-lists) is outer loop y first.""" from __future__ import absolute_import import re import messytables import os import six from six.moves import range from six.moves import zip try: import hamcrest have_ham = True except ImportError: have_ham = False import sys if sys.version_info >= (3, 6): import typing REGEX_PATTERN_TYPE = typing.Pattern else: REGEX_PATTERN_TYPE = re._pattern_type from collections import defaultdict from copy import copy from itertools import product, takewhile from xypath.contrib import excel as contrib_excel UP = (0, -1) RIGHT = (1, 0) DOWN = (0, 1) LEFT = (-1, 0) UP_RIGHT = (1, -1) DOWN_RIGHT = (1, 1) UP_LEFT = (-1, -1) DOWN_LEFT = (-1, 1) def cmp(x, y): if x<y: return -1 if x>y: return 1 return 0 class XYPathError(Exception): """Problems with spreadsheet layouts should raise this or a descendant.""" pass class JunctionError(RuntimeError, XYPathError): """Raised if paranoid _XYCell.junction finds it is returning one of the input cells - i.e. the input cells are in the same row or column""" pass class NoCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains zero cells.""" pass class MultipleCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains multiple cells.""" pass class LookupConfusionError(AssertionError, XYPathError): """Lookup found multiple equally-close headers""" pass class NoLookupError(AssertionError, XYPathError): """Lookup found no valid header""" pass def describe_filter_method(filter_by): if callable(filter_by): return "matching a function called {}".format(filter_by.__name__) if isinstance(filter_by, six.string_types): return "containing the string {!r}".format(filter_by) if have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return "containing "+str(filter_by) if isinstance(filter_by, REGEX_PATTERN_TYPE): return "matching the regex {!r}".format(filter_by.pattern) else: return "which we're surprised we found at all" class _XYCell(object): """needs to contain: value, position (x,y), parent bag""" __slots__ = ['value', 'x', 'y', 'table', 'properties'] def __init__(self, value, x, y, table, properties=None): self.value = value # of appropriate type self.x = x # column number self.y = y # row number self.table = table if properties is None: self.properties = {} else: self.properties = properties def __hash__(self): """ In order to make a set of cells (used in Bag), they *must* be hashable. An _XYCell is uniquely identified (by sets, etc) through its position, content, and parent table. Note that `properties` is ignored since dicts are unhashable, and value may be redundant. """ return hash((self.value, self.x, self.y, self.table)) def __eq__(self, rhs): """See _XYCell.__hash__ for equality conditions""" return hash(self) == hash(rhs) def copy(self, new_table=None): """Make a copy of the cell. Its table will be new_table, if specified""" if new_table is None: new_table = self.table return _XYCell(self.value, self.x, self.y, new_table, self.properties) def __repr__(self): return "_XYCell(%r, %r, %r)" % \ (self.value, self.x, self.y) def __unicode__(self): return six.text_type(self.value) def lookup(self, header_bag, direction, strict=False): """ Given a single cell (usually a value), a bag containing the headers of a particular type for that cell, and the direction in which to search for the relevant header e.g. for value cell V, searching up: [ ] [ ] [O] [ ] ---> [ ] V [ ] [ ] the cell with the arrow will be returned. Strict restricts the selection to cells in the same row/column as the value, so O is selected instead.""" def mult(cell): return cell.x * direction[0] + cell.y * direction[1] def same_row_col(a, b, direction): return (a.x - b.x == 0 and direction[0] == 0) or \ (a.y - b.y == 0 and direction[1] == 0) best_cell = None second_best_cell = None for target_cell in header_bag.unordered_cells: if mult(self) <= mult(target_cell): if not best_cell or mult(target_cell) <= mult(best_cell): if not strict or same_row_col(self, target_cell, direction): second_best_cell = best_cell best_cell = target_cell if second_best_cell and mult(best_cell) == mult(second_best_cell): raise LookupConfusionError("{!r} is as good as {!r} for {!r}".format( best_cell, second_best_cell, self)) if best_cell is None: raise NoLookupError("No lookup for {!r}".format(self)) return best_cell def junction(self, other, direction=DOWN, paranoid=True): """ gets the lower-right intersection of the row of one, and the column of the other. paranoid: should we panic if we're hitting one of our input cells?""" def junction_coord(cells, direction=DOWN): """ Under the hood: given two cells and a favoured direction, get the position of the cell with the column of one and the row of the other: A---->+ | ^ | | | | v | *<----B Both + and * are candidates for the junction of A and B - we take the one furthest down by default (specified by direction) >>> cells_dr = (_XYCell(0,1,2,None), _XYCell(0,3,4,None)) >>> junction_coord(cells_dr, DOWN) (1, 4) >>> junction_coord(cells_dr, UP) (3, 2) >>> junction_coord(cells_dr, LEFT) (1, 4) >>> junction_coord(cells_dr, RIGHT) (3, 2) >>> cells_tr = (_XYCell(0,1,4,None), _XYCell(0,3,2,None)) >>> junction_coord(cells_tr, DOWN) (3, 4) >>> junction_coord(cells_tr, UP) (1, 2) >>> junction_coord(cells_tr, LEFT) (1, 2) >>> junction_coord(cells_tr, RIGHT) (3, 4) """ new_cells = ( (cells[0].x, cells[1].y), (cells[1].x, cells[0].y) ) for index, value in enumerate(direction): if value == 0: continue if cmp(new_cells[0][index], new_cells[1][index]) == value: return new_cells[0] else: return new_cells[1] (x, y) = junction_coord((self, other), direction) if paranoid and (x, y) == (self.x, self.y) or \ (x, y) == (other.x, other.y): raise JunctionError( "_XYCell.junction(_XYCell) resulted in a cell which is equal" " to one of the input cells.\n" " self: {}\n other: {}\n x: {}\n y: {}".format( self, other, x, y)) junction_bag = self.table.get_at(x, y) if len(junction_bag) == 0: return self_bag = Bag(self.table) self_bag.add(self) other_bag = Bag(self.table) other_bag.add(other) yield (self_bag, other_bag, junction_bag) def shift(self, x=0, y=0): """Get the cell which is offset from this cell by x columns, y rows""" if not isinstance(x, int): assert y == 0, \ "_XYCell.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) return self.table.get_at(self.x + x, self.y + y)._cell class CoreBag(object): """Has a collection of _XYCells""" def
(self, *args, **kwargs): return contrib_excel.pprint(self, *args, **kwargs) def as_list(self, *args, **kwargs): return contrib_excel.as_list(self, *args, **kwargs) def filter_one(self, filter_by): return contrib_excel.filter_one(self, filter_by) def excel_locations(self, *args, **kwargs): return contrib_excel.excel_locations(self, *args, **kwargs) def __init__(self, table): self.__store = set() self.table = table def add(self, cell): """Add a cell to this bag""" if not isinstance(cell, _XYCell): raise TypeError("Can only add _XYCell types to Bags: {}".format( cell.__class__)) self.__store.add(cell) def __eq__(self, other): """Compare two bags: they are equal if: * their table are the same table (object) * they contain the same set of cells""" if not isinstance(other, CoreBag): return False return (self.table is other.table and self.__store == other.__store) def __len__(self): return len(self.__store) def __repr__(self): return repr(self.__store) @classmethod def singleton(cls, cell, table): """ Construct a bag with one cell in it """ bag = cls(table=table) bag.add(cell) return bag @property def unordered(self): """ Obtain an unordered iterator over this bag. iter(bag) is sorted on demand, and therefore inefficient if being done repeatedly where order does not matter. """ return (Bag.singleton(c, table=self.table) for c in self.__store) @property def unordered_cells(self): """ Analogous to the `unordered` property, except that it returns _XYCells instead of Bags. """ return iter(self.__store) def __iter__(self): """ Return a view of the cells in this back in left-right, top-bottom order Note: this is expensive for large bags (when done repeatedly). If you don't care about order, use `bag.unordered`, which gives an unordered iterator. """ def yx(cell): return cell.y, cell.x for cell in sorted(self.__store, key=yx): yield Bag.singleton(cell, table=self.table) def __sub__(self, rhs): """Bags quack like sets. Implements - operator.""" return self.difference(rhs) def difference(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table,\ "Can't difference bags from separate tables" new = copy(self) new.__store = self.__store.difference(rhs.__store) return new def __or__(self, rhs): """Bags quack like sets. Implements | operator. For mathematical purity, + (__add__) isn't appropriate""" return self.union(rhs) def union(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table, "Can't union bags from separate tables" new = copy(self) new.__store = self.__store.union(rhs.__store) return new def __and__(self, rhs): return self.intersection(rhs) def intersection(self, rhs): assert self.table is rhs.table, \ "Can't take intersection of bags from separate tables" new = copy(self) new.__store = self.__store.intersection(rhs.__store) return new def select(self, function): """Select cells from this bag's table based on the cells in this bag. e.g. bag.select(lambda bag_cell, table_cell: bag_cell.y == table_cell.y and bag_cell.value == table_cell.value) would give cells in the table with the same name on the same row as a cell in the bag""" return self.table.select_other(function, self) def select_other(self, function, other): """A more general version of select, where another bag to select from is explicitly specified rather than using the original bag's table""" """note: self.select(f) = self.table.select_other(f, self)""" newbag = Bag(table=self.table) for bag_cell in self.__store: for other_cell in other.__store: if function(bag_cell, other_cell): newbag.add(bag_cell) break return newbag def filter(self, filter_by): """ Returns a new bag containing only cells which match the filter_by predicate. filter_by can be: a) a callable, which takes a cell as a parameter and returns True if the cell should be returned, such as `lambda cell: cell value == 'dog' b) a string, to match exactly: `u'dog'` c) a hamcrest match rule: `hamcrest.equal_to("dog") (requires hamcrest to be available) d) a compiled regex: `re.compile("dog") """ if callable(filter_by): return self._filter_internal(filter_by) elif isinstance(filter_by, six.string_types): return self._filter_internal(lambda cell: six.text_type(cell.value).strip() == filter_by) elif have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return self._filter_internal(lambda cell: filter_by.matches(cell.value)) elif isinstance(filter_by, REGEX_PATTERN_TYPE): return self._filter_internal( lambda cell: re.match(filter_by, six.text_type(cell.value))) else: raise ValueError("filter_by must be function, hamcrest filter, compiled regex or string.") def _filter_internal(self, function): newbag = Bag(table=self.table) for bag_cell in self.unordered_cells: if function(bag_cell): newbag.add(bag_cell) return newbag def assert_one(self, message="assert_one() : {} cells in bag, not 1"): """Chainable: raise an error if the bag contains 0 or 2+ cells. Otherwise returns the original (singleton) bag unchanged.""" if len(self.__store) == 1: return self elif len(self.__store) == 0: raise NoCellsAssertionError( message.format( len(self.__store) ) ) elif len(self.__store) > 1: raise MultipleCellsAssertionError( message.format( len(self.__store) ) ) @property def _cell(self): """Under the hood: get the cell inside a singleton bag. It's an error for it to not contain precisely one cell.""" try: xycell = list(self.assert_one().__store)[0] except AssertionError: l = len(list(self.__store)) raise XYPathError("Can't use multicell bag as cell: (len %r)" % l) else: assert isinstance(xycell, _XYCell) return xycell @property def value(self): """Getter for singleton's cell value""" return self._cell.value @property def x(self): """Getter for singleton's cell column number""" return self._cell.x @property def y(self): """Getter for singleton's cell row number""" return self._cell.y @property def properties(self): """Getter for singleton's cell properties""" return self._cell.properties class Bag(CoreBag): @staticmethod def from_list(cells): """ Make a non-bag iterable of cells into a Bag. Some magic may be lost, especially if it's zero length. TODO: This should probably be part of the core __init__ class. TODO: Don't do a piece-by-piece insertion, just slap the whole listed iterable in, because this is slow. """ # TODO bag = Bag(table=None) for i, cell_bag in enumerate(cells): bag.add(cell_bag._cell) if i == 0: bag.table = cell_bag.table else: assert bag.table == cell_bag.table return bag def expand(self, direction, stop_before=None): return self.fill(direction, stop_before=stop_before) | self def fill(self, direction, stop_before=None): """Should give the same output as fill, except it doesn't support non-cardinal directions or stop_before. Twenty times faster than fill in test_ravel.""" if direction in (UP_RIGHT, DOWN_RIGHT, UP_LEFT, UP_RIGHT): return self._fill(direction, stop_before) def what_to_get(cell): """converts bag coordinates into thing to pass to get_at""" cell_coord = (cell.x, cell.y) retval = [] for cell_coord, direction_coord in zip(cell_coord, direction): if direction_coord != 0: retval.append(None) else: retval.append(cell_coord) return tuple(retval) # TODO yuck if direction not in (UP, RIGHT, DOWN, LEFT): raise ValueError("Must be a cardinal direction!") ### this is what same_row/col should look like! small_table = None for cell in self.unordered_cells: got_rowcol = self.table.get_at(*what_to_get(cell)) if small_table: small_table = small_table.union(got_rowcol) else: small_table = got_rowcol if small_table is None: small_table = Bag(table=self.table) # now we use the small_table as if it was the table. (left_right, up_down) = direction bag = small_table.select_other( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down, self ) if stop_before is not None: return bag.stop_before(stop_before) else: return bag def stop_before(self, stop_function): """Assumes the data is: * in a single row or column * proceeding either downwards or rightwards """ return Bag.from_list(list( takewhile(lambda c: not stop_function(c), self))) def _fill(self, direction, stop_before=None): """ If the bag contains only one cell, select all cells in the direction given, excluding the original cell. For example, from a column heading cell, you can "fill down" to get all the values underneath it. If you provide a stop_before function, it will be called on each cell as a stop condition. For example, if you provide a stop_before function which tests cell.value for an empty string. This would stop the fill function before it reaches the bottom of the sheet, for example. """ raise DeprecationWarning("2D fill is deprecated. Yell if you need it.") if direction not in (UP, RIGHT, DOWN, LEFT, UP_RIGHT, DOWN_RIGHT, UP_LEFT, DOWN_LEFT): raise ValueError("Invalid direction! Use one of UP, RIGHT, " "DOWN_RIGHT etc") (left_right, up_down) = direction bag = self.select( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down ) if stop_before is not None: # NOTE(PMF): stop_before is limited to singleton bags, in the DOWN # or RIGHT direction. This isn't ideal, but with the above "magic" # cmp code I can't think of an elegant general way of doing this. I # also can't imagine what it means to run fill in multiple # directions, or with non singleton bags. TODO: Constrain? if direction not in (DOWN, RIGHT): raise ValueError("Oops, stop_before only works down or right!") self.assert_one("You can't use stop_before for bags with more than" " one cell inside.") return Bag.from_list(list( takewhile(lambda c: not stop_before(c), bag))) return bag def junction(self, other, *args, **kwargs): """For every combination of pairs of cells from this bag and the other bag, get the cell that is at the same row as one of them, and column as the other. There are two: so we specify a direction to say which one wins (in the cell-based version of this function) - defaulting to the one furthest down""" if not isinstance(other, CoreBag): raise TypeError( "Bag.junction() called with invalid type {}, must be " "(Core)Bag".format(other.__class__)) # Generate ordered lists of dimension cells exactly once (avoid doing # it in the inner loop because of the sorted() in __iter__) self_cells = list(self) other_cells = list(other) for self_cell in self_cells: for other_cell in other_cells: assert self_cell._cell.__class__ == other_cell._cell.__class__ for triple in self_cell._cell.junction(other_cell._cell, *args, **kwargs): yield triple def waffle(self, other, *args, **kwargs): bag = Bag(table=self.table) for (selfbag, otherbag, junction_cell) in self.junction(other, *args, **kwargs): bag.add(junction_cell._cell) return bag def shift(self, x=0, y=0): """ Return a bag in which each cell is offset from the source bag by the coordinates specified. Coordinates can be specified as: Bag.shift(0,2) - full specification Bag.shift(y=2) - partial specification Bag.shift((0,2)) - use of tuple for x, unspecified y """ if not isinstance(x, int): assert y == 0, \ "Bag.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) bag = Bag(table=self.table) for b_cell in self.unordered: t_cell = self.table.get_at(b_cell.x + x, b_cell.y + y).assert_one() bag.add(t_cell._cell) return bag def extrude(self, dx, dy): """ Extrude all cells in the bag by (dx, dy), by looking For example, given the bag with a cell at (0, 0): {(0, 0)} .extrude(2, 0) gives the bag with the cells (to the right): {(0, 0), (1, 0), (2, 0)} .extrude(0, -2) gives the bag with the cells (up): {(0, 0), (0, -1), (0, -2)} """ if dx < 0: dxs = list(range(0, dx - 1, -1)) else: dxs = list(range(0, dx + 1, +1)) if dy < 0: dys = list(range(0, dy - 1, -1)) else: dys = list(range(0, dy + 1, +1)) bag = Bag(table=self.table) for cell in self.unordered_cells: for i, j in product(dxs, dys): bag.add(self.table.get_at(cell.x + i, cell.y + j)._cell) return bag def same_row(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap - use Table.get_at() all_y = set() for cell in bag.unordered_cells: all_y.add(cell.y) return self.filter(lambda c: c.y in all_y) def same_col(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap all_x = set() for cell in bag.unordered_cells: all_x.add(cell.x) return self.filter(lambda c: c.x in all_x) def __getattr__(self, name): if name.startswith("is_not_"): return lambda: self.filter(lambda cell: not cell.properties[name[7:]]) if name.startswith("is_"): # might need additional layer of indirection return lambda: self.filter(lambda cell: cell.properties[name[3:]]) if name.endswith("_is_not"): return lambda value: self.filter(lambda cell: not cell.properties[name[:-7]] == value) if name.endswith("_is"): return lambda value: self.filter(lambda cell: cell.properties[name[:-3]] == value) raise AttributeError("Bag has no attribute {!r}".format(name)) class Table(Bag): """A bag which represents an entire sheet. Features indices to speed retrieval by coordinate. Also includes functions for importing tables into XYPath""" def __init__(self, name=""): super(Table, self).__init__(table=self) self._x_index = defaultdict(lambda: Bag(self)) self._y_index = defaultdict(lambda: Bag(self)) self._max_x = -1 self._max_y = -1 self.sheet = None self.name = name def __hash__(self): return id(self) def rows(self): """Get bags containing each row's cells, in order""" for row_num in range(0, self._max_y + 1): # inclusive yield self._y_index[row_num] def cols(self): """Get bags containing each column's cells, in order""" for col_num in range(0, self._max_x + 1): # inclusive yield self._x_index[col_num] def col(self, column): if isinstance(column, six.string_types): c_num = contrib_excel.excel_column_number(column, index=0) return self.col(c_num) else: assert isinstance(column, int) return self._x_index[column] def add(self, cell): """Under the hood: add a cell to a table and the table's indices. Used in the construction of a table.""" self._x_index[cell.x].add(cell) self._y_index[cell.y].add(cell) self._max_x = max(self._max_x, cell.x) self._max_y = max(self._max_y, cell.y) super(Table, self).add(cell) def get_at(self, x=None, y=None): """Directly get a singleton bag via indices. Faster than Bag.filter""" # we use .get() here to avoid new empty Bags being inserted # into the index stores when a non-existant coordinate is requested. assert isinstance(x, int) or x is None, "get_at takes integers (got {!r})".format(x) assert isinstance(y, int) or y is None, "get_at takes integers (got {!r})".format(y) if x is None and y is None: raise TypeError('get_at requires at least one x or y value') if x is None: return self._y_index.get(y, Bag(self)) if y is None: return self._x_index.get(x, Bag(self)) return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x) @staticmethod def from_filename(filename, table_name=None, table_index=None): """Wrapper around from_file_object to handle extension extraction""" # NOTE: this is a messytables table name extension = os.path.splitext(filename)[1].strip('.') with open(filename, 'rb') as f: return Table.from_file_object(f, extension, table_name=table_name, table_index=table_index) @staticmethod def from_file_object(fobj, extension='', table_name=None, table_index=None): """Load table from file object, you must specify a table's name or position number. If you don't know these, try from_messy.""" # NOTE this is a messytables table name if (table_name is not None and table_index is not None) or \ (table_name is None and table_index is None): raise TypeError("Must give exactly one of table_name, table_index") table_set = messytables.any.any_tableset(fobj, extension=extension) if table_name is not None: return Table.from_messy(table_set[table_name]) elif table_index is not None: return Table.from_messy(table_set.tables[table_index]) @staticmethod def from_messy(messy_rowset): """Import a rowset (table) from messytables, e.g. to work with each table in turn: tables = messytables.any.any_tableset(fobj) for mt_table in tables: xy_table = xypath.Table.from_messy(mt_table) ...""" assert isinstance(messy_rowset, messytables.core.RowSet),\ "Expected a RowSet, got a %r" % type(messy_rowset) new_table = Table.from_iterable( messy_rowset, value_func=lambda cell: cell.value, properties_func=lambda cell: cell.properties, name=messy_rowset.name) if hasattr(messy_rowset, 'sheet'): new_table.sheet = messy_rowset.sheet return new_table @staticmethod def from_iterable(table, value_func=lambda cell: cell, properties_func=lambda cell: {}, name=None): """Make a table from a pythonic table structure. The table must be an iterable which returns rows (in top-to-bottom order), which in turn are iterables which returns cells (in left-to-right order). value_func and properties_func specify how the cell maps onto an _XYCell's value and properties. The defaults assume that you have a straight-forward list of lists of values.""" new_table = Table(name=name) for y, row in enumerate(table): for x, cell in enumerate(row): new_table.add( _XYCell( value_func(cell), x, y, new_table, properties_func(cell))) return new_table @staticmethod def from_bag(bag, name=None): """Make a copy of a bag which is its own table. Useful when a single imported table is two logical tables""" if name is None: name=bag.table.name new_table = Table(name=name) for bag_cell in bag.unordered: new_table.add(bag_cell._cell.copy(new_table)) return new_table
pprint
identifier_name
xypath.py
#!/usr/bin/env python """ musings on order of variables, x/y vs. col/row Everyone agrees that col 2, row 1 is (2,1) which is xy ordered. This works well with the name. Remember that the usual iterators (over a list-of-lists) is outer loop y first.""" from __future__ import absolute_import import re import messytables import os import six from six.moves import range from six.moves import zip try: import hamcrest have_ham = True except ImportError: have_ham = False import sys if sys.version_info >= (3, 6): import typing REGEX_PATTERN_TYPE = typing.Pattern else: REGEX_PATTERN_TYPE = re._pattern_type from collections import defaultdict from copy import copy from itertools import product, takewhile from xypath.contrib import excel as contrib_excel UP = (0, -1) RIGHT = (1, 0) DOWN = (0, 1)
DOWN_RIGHT = (1, 1) UP_LEFT = (-1, -1) DOWN_LEFT = (-1, 1) def cmp(x, y): if x<y: return -1 if x>y: return 1 return 0 class XYPathError(Exception): """Problems with spreadsheet layouts should raise this or a descendant.""" pass class JunctionError(RuntimeError, XYPathError): """Raised if paranoid _XYCell.junction finds it is returning one of the input cells - i.e. the input cells are in the same row or column""" pass class NoCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains zero cells.""" pass class MultipleCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains multiple cells.""" pass class LookupConfusionError(AssertionError, XYPathError): """Lookup found multiple equally-close headers""" pass class NoLookupError(AssertionError, XYPathError): """Lookup found no valid header""" pass def describe_filter_method(filter_by): if callable(filter_by): return "matching a function called {}".format(filter_by.__name__) if isinstance(filter_by, six.string_types): return "containing the string {!r}".format(filter_by) if have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return "containing "+str(filter_by) if isinstance(filter_by, REGEX_PATTERN_TYPE): return "matching the regex {!r}".format(filter_by.pattern) else: return "which we're surprised we found at all" class _XYCell(object): """needs to contain: value, position (x,y), parent bag""" __slots__ = ['value', 'x', 'y', 'table', 'properties'] def __init__(self, value, x, y, table, properties=None): self.value = value # of appropriate type self.x = x # column number self.y = y # row number self.table = table if properties is None: self.properties = {} else: self.properties = properties def __hash__(self): """ In order to make a set of cells (used in Bag), they *must* be hashable. An _XYCell is uniquely identified (by sets, etc) through its position, content, and parent table. Note that `properties` is ignored since dicts are unhashable, and value may be redundant. """ return hash((self.value, self.x, self.y, self.table)) def __eq__(self, rhs): """See _XYCell.__hash__ for equality conditions""" return hash(self) == hash(rhs) def copy(self, new_table=None): """Make a copy of the cell. Its table will be new_table, if specified""" if new_table is None: new_table = self.table return _XYCell(self.value, self.x, self.y, new_table, self.properties) def __repr__(self): return "_XYCell(%r, %r, %r)" % \ (self.value, self.x, self.y) def __unicode__(self): return six.text_type(self.value) def lookup(self, header_bag, direction, strict=False): """ Given a single cell (usually a value), a bag containing the headers of a particular type for that cell, and the direction in which to search for the relevant header e.g. for value cell V, searching up: [ ] [ ] [O] [ ] ---> [ ] V [ ] [ ] the cell with the arrow will be returned. Strict restricts the selection to cells in the same row/column as the value, so O is selected instead.""" def mult(cell): return cell.x * direction[0] + cell.y * direction[1] def same_row_col(a, b, direction): return (a.x - b.x == 0 and direction[0] == 0) or \ (a.y - b.y == 0 and direction[1] == 0) best_cell = None second_best_cell = None for target_cell in header_bag.unordered_cells: if mult(self) <= mult(target_cell): if not best_cell or mult(target_cell) <= mult(best_cell): if not strict or same_row_col(self, target_cell, direction): second_best_cell = best_cell best_cell = target_cell if second_best_cell and mult(best_cell) == mult(second_best_cell): raise LookupConfusionError("{!r} is as good as {!r} for {!r}".format( best_cell, second_best_cell, self)) if best_cell is None: raise NoLookupError("No lookup for {!r}".format(self)) return best_cell def junction(self, other, direction=DOWN, paranoid=True): """ gets the lower-right intersection of the row of one, and the column of the other. paranoid: should we panic if we're hitting one of our input cells?""" def junction_coord(cells, direction=DOWN): """ Under the hood: given two cells and a favoured direction, get the position of the cell with the column of one and the row of the other: A---->+ | ^ | | | | v | *<----B Both + and * are candidates for the junction of A and B - we take the one furthest down by default (specified by direction) >>> cells_dr = (_XYCell(0,1,2,None), _XYCell(0,3,4,None)) >>> junction_coord(cells_dr, DOWN) (1, 4) >>> junction_coord(cells_dr, UP) (3, 2) >>> junction_coord(cells_dr, LEFT) (1, 4) >>> junction_coord(cells_dr, RIGHT) (3, 2) >>> cells_tr = (_XYCell(0,1,4,None), _XYCell(0,3,2,None)) >>> junction_coord(cells_tr, DOWN) (3, 4) >>> junction_coord(cells_tr, UP) (1, 2) >>> junction_coord(cells_tr, LEFT) (1, 2) >>> junction_coord(cells_tr, RIGHT) (3, 4) """ new_cells = ( (cells[0].x, cells[1].y), (cells[1].x, cells[0].y) ) for index, value in enumerate(direction): if value == 0: continue if cmp(new_cells[0][index], new_cells[1][index]) == value: return new_cells[0] else: return new_cells[1] (x, y) = junction_coord((self, other), direction) if paranoid and (x, y) == (self.x, self.y) or \ (x, y) == (other.x, other.y): raise JunctionError( "_XYCell.junction(_XYCell) resulted in a cell which is equal" " to one of the input cells.\n" " self: {}\n other: {}\n x: {}\n y: {}".format( self, other, x, y)) junction_bag = self.table.get_at(x, y) if len(junction_bag) == 0: return self_bag = Bag(self.table) self_bag.add(self) other_bag = Bag(self.table) other_bag.add(other) yield (self_bag, other_bag, junction_bag) def shift(self, x=0, y=0): """Get the cell which is offset from this cell by x columns, y rows""" if not isinstance(x, int): assert y == 0, \ "_XYCell.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) return self.table.get_at(self.x + x, self.y + y)._cell class CoreBag(object): """Has a collection of _XYCells""" def pprint(self, *args, **kwargs): return contrib_excel.pprint(self, *args, **kwargs) def as_list(self, *args, **kwargs): return contrib_excel.as_list(self, *args, **kwargs) def filter_one(self, filter_by): return contrib_excel.filter_one(self, filter_by) def excel_locations(self, *args, **kwargs): return contrib_excel.excel_locations(self, *args, **kwargs) def __init__(self, table): self.__store = set() self.table = table def add(self, cell): """Add a cell to this bag""" if not isinstance(cell, _XYCell): raise TypeError("Can only add _XYCell types to Bags: {}".format( cell.__class__)) self.__store.add(cell) def __eq__(self, other): """Compare two bags: they are equal if: * their table are the same table (object) * they contain the same set of cells""" if not isinstance(other, CoreBag): return False return (self.table is other.table and self.__store == other.__store) def __len__(self): return len(self.__store) def __repr__(self): return repr(self.__store) @classmethod def singleton(cls, cell, table): """ Construct a bag with one cell in it """ bag = cls(table=table) bag.add(cell) return bag @property def unordered(self): """ Obtain an unordered iterator over this bag. iter(bag) is sorted on demand, and therefore inefficient if being done repeatedly where order does not matter. """ return (Bag.singleton(c, table=self.table) for c in self.__store) @property def unordered_cells(self): """ Analogous to the `unordered` property, except that it returns _XYCells instead of Bags. """ return iter(self.__store) def __iter__(self): """ Return a view of the cells in this back in left-right, top-bottom order Note: this is expensive for large bags (when done repeatedly). If you don't care about order, use `bag.unordered`, which gives an unordered iterator. """ def yx(cell): return cell.y, cell.x for cell in sorted(self.__store, key=yx): yield Bag.singleton(cell, table=self.table) def __sub__(self, rhs): """Bags quack like sets. Implements - operator.""" return self.difference(rhs) def difference(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table,\ "Can't difference bags from separate tables" new = copy(self) new.__store = self.__store.difference(rhs.__store) return new def __or__(self, rhs): """Bags quack like sets. Implements | operator. For mathematical purity, + (__add__) isn't appropriate""" return self.union(rhs) def union(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table, "Can't union bags from separate tables" new = copy(self) new.__store = self.__store.union(rhs.__store) return new def __and__(self, rhs): return self.intersection(rhs) def intersection(self, rhs): assert self.table is rhs.table, \ "Can't take intersection of bags from separate tables" new = copy(self) new.__store = self.__store.intersection(rhs.__store) return new def select(self, function): """Select cells from this bag's table based on the cells in this bag. e.g. bag.select(lambda bag_cell, table_cell: bag_cell.y == table_cell.y and bag_cell.value == table_cell.value) would give cells in the table with the same name on the same row as a cell in the bag""" return self.table.select_other(function, self) def select_other(self, function, other): """A more general version of select, where another bag to select from is explicitly specified rather than using the original bag's table""" """note: self.select(f) = self.table.select_other(f, self)""" newbag = Bag(table=self.table) for bag_cell in self.__store: for other_cell in other.__store: if function(bag_cell, other_cell): newbag.add(bag_cell) break return newbag def filter(self, filter_by): """ Returns a new bag containing only cells which match the filter_by predicate. filter_by can be: a) a callable, which takes a cell as a parameter and returns True if the cell should be returned, such as `lambda cell: cell value == 'dog' b) a string, to match exactly: `u'dog'` c) a hamcrest match rule: `hamcrest.equal_to("dog") (requires hamcrest to be available) d) a compiled regex: `re.compile("dog") """ if callable(filter_by): return self._filter_internal(filter_by) elif isinstance(filter_by, six.string_types): return self._filter_internal(lambda cell: six.text_type(cell.value).strip() == filter_by) elif have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return self._filter_internal(lambda cell: filter_by.matches(cell.value)) elif isinstance(filter_by, REGEX_PATTERN_TYPE): return self._filter_internal( lambda cell: re.match(filter_by, six.text_type(cell.value))) else: raise ValueError("filter_by must be function, hamcrest filter, compiled regex or string.") def _filter_internal(self, function): newbag = Bag(table=self.table) for bag_cell in self.unordered_cells: if function(bag_cell): newbag.add(bag_cell) return newbag def assert_one(self, message="assert_one() : {} cells in bag, not 1"): """Chainable: raise an error if the bag contains 0 or 2+ cells. Otherwise returns the original (singleton) bag unchanged.""" if len(self.__store) == 1: return self elif len(self.__store) == 0: raise NoCellsAssertionError( message.format( len(self.__store) ) ) elif len(self.__store) > 1: raise MultipleCellsAssertionError( message.format( len(self.__store) ) ) @property def _cell(self): """Under the hood: get the cell inside a singleton bag. It's an error for it to not contain precisely one cell.""" try: xycell = list(self.assert_one().__store)[0] except AssertionError: l = len(list(self.__store)) raise XYPathError("Can't use multicell bag as cell: (len %r)" % l) else: assert isinstance(xycell, _XYCell) return xycell @property def value(self): """Getter for singleton's cell value""" return self._cell.value @property def x(self): """Getter for singleton's cell column number""" return self._cell.x @property def y(self): """Getter for singleton's cell row number""" return self._cell.y @property def properties(self): """Getter for singleton's cell properties""" return self._cell.properties class Bag(CoreBag): @staticmethod def from_list(cells): """ Make a non-bag iterable of cells into a Bag. Some magic may be lost, especially if it's zero length. TODO: This should probably be part of the core __init__ class. TODO: Don't do a piece-by-piece insertion, just slap the whole listed iterable in, because this is slow. """ # TODO bag = Bag(table=None) for i, cell_bag in enumerate(cells): bag.add(cell_bag._cell) if i == 0: bag.table = cell_bag.table else: assert bag.table == cell_bag.table return bag def expand(self, direction, stop_before=None): return self.fill(direction, stop_before=stop_before) | self def fill(self, direction, stop_before=None): """Should give the same output as fill, except it doesn't support non-cardinal directions or stop_before. Twenty times faster than fill in test_ravel.""" if direction in (UP_RIGHT, DOWN_RIGHT, UP_LEFT, UP_RIGHT): return self._fill(direction, stop_before) def what_to_get(cell): """converts bag coordinates into thing to pass to get_at""" cell_coord = (cell.x, cell.y) retval = [] for cell_coord, direction_coord in zip(cell_coord, direction): if direction_coord != 0: retval.append(None) else: retval.append(cell_coord) return tuple(retval) # TODO yuck if direction not in (UP, RIGHT, DOWN, LEFT): raise ValueError("Must be a cardinal direction!") ### this is what same_row/col should look like! small_table = None for cell in self.unordered_cells: got_rowcol = self.table.get_at(*what_to_get(cell)) if small_table: small_table = small_table.union(got_rowcol) else: small_table = got_rowcol if small_table is None: small_table = Bag(table=self.table) # now we use the small_table as if it was the table. (left_right, up_down) = direction bag = small_table.select_other( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down, self ) if stop_before is not None: return bag.stop_before(stop_before) else: return bag def stop_before(self, stop_function): """Assumes the data is: * in a single row or column * proceeding either downwards or rightwards """ return Bag.from_list(list( takewhile(lambda c: not stop_function(c), self))) def _fill(self, direction, stop_before=None): """ If the bag contains only one cell, select all cells in the direction given, excluding the original cell. For example, from a column heading cell, you can "fill down" to get all the values underneath it. If you provide a stop_before function, it will be called on each cell as a stop condition. For example, if you provide a stop_before function which tests cell.value for an empty string. This would stop the fill function before it reaches the bottom of the sheet, for example. """ raise DeprecationWarning("2D fill is deprecated. Yell if you need it.") if direction not in (UP, RIGHT, DOWN, LEFT, UP_RIGHT, DOWN_RIGHT, UP_LEFT, DOWN_LEFT): raise ValueError("Invalid direction! Use one of UP, RIGHT, " "DOWN_RIGHT etc") (left_right, up_down) = direction bag = self.select( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down ) if stop_before is not None: # NOTE(PMF): stop_before is limited to singleton bags, in the DOWN # or RIGHT direction. This isn't ideal, but with the above "magic" # cmp code I can't think of an elegant general way of doing this. I # also can't imagine what it means to run fill in multiple # directions, or with non singleton bags. TODO: Constrain? if direction not in (DOWN, RIGHT): raise ValueError("Oops, stop_before only works down or right!") self.assert_one("You can't use stop_before for bags with more than" " one cell inside.") return Bag.from_list(list( takewhile(lambda c: not stop_before(c), bag))) return bag def junction(self, other, *args, **kwargs): """For every combination of pairs of cells from this bag and the other bag, get the cell that is at the same row as one of them, and column as the other. There are two: so we specify a direction to say which one wins (in the cell-based version of this function) - defaulting to the one furthest down""" if not isinstance(other, CoreBag): raise TypeError( "Bag.junction() called with invalid type {}, must be " "(Core)Bag".format(other.__class__)) # Generate ordered lists of dimension cells exactly once (avoid doing # it in the inner loop because of the sorted() in __iter__) self_cells = list(self) other_cells = list(other) for self_cell in self_cells: for other_cell in other_cells: assert self_cell._cell.__class__ == other_cell._cell.__class__ for triple in self_cell._cell.junction(other_cell._cell, *args, **kwargs): yield triple def waffle(self, other, *args, **kwargs): bag = Bag(table=self.table) for (selfbag, otherbag, junction_cell) in self.junction(other, *args, **kwargs): bag.add(junction_cell._cell) return bag def shift(self, x=0, y=0): """ Return a bag in which each cell is offset from the source bag by the coordinates specified. Coordinates can be specified as: Bag.shift(0,2) - full specification Bag.shift(y=2) - partial specification Bag.shift((0,2)) - use of tuple for x, unspecified y """ if not isinstance(x, int): assert y == 0, \ "Bag.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) bag = Bag(table=self.table) for b_cell in self.unordered: t_cell = self.table.get_at(b_cell.x + x, b_cell.y + y).assert_one() bag.add(t_cell._cell) return bag def extrude(self, dx, dy): """ Extrude all cells in the bag by (dx, dy), by looking For example, given the bag with a cell at (0, 0): {(0, 0)} .extrude(2, 0) gives the bag with the cells (to the right): {(0, 0), (1, 0), (2, 0)} .extrude(0, -2) gives the bag with the cells (up): {(0, 0), (0, -1), (0, -2)} """ if dx < 0: dxs = list(range(0, dx - 1, -1)) else: dxs = list(range(0, dx + 1, +1)) if dy < 0: dys = list(range(0, dy - 1, -1)) else: dys = list(range(0, dy + 1, +1)) bag = Bag(table=self.table) for cell in self.unordered_cells: for i, j in product(dxs, dys): bag.add(self.table.get_at(cell.x + i, cell.y + j)._cell) return bag def same_row(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap - use Table.get_at() all_y = set() for cell in bag.unordered_cells: all_y.add(cell.y) return self.filter(lambda c: c.y in all_y) def same_col(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap all_x = set() for cell in bag.unordered_cells: all_x.add(cell.x) return self.filter(lambda c: c.x in all_x) def __getattr__(self, name): if name.startswith("is_not_"): return lambda: self.filter(lambda cell: not cell.properties[name[7:]]) if name.startswith("is_"): # might need additional layer of indirection return lambda: self.filter(lambda cell: cell.properties[name[3:]]) if name.endswith("_is_not"): return lambda value: self.filter(lambda cell: not cell.properties[name[:-7]] == value) if name.endswith("_is"): return lambda value: self.filter(lambda cell: cell.properties[name[:-3]] == value) raise AttributeError("Bag has no attribute {!r}".format(name)) class Table(Bag): """A bag which represents an entire sheet. Features indices to speed retrieval by coordinate. Also includes functions for importing tables into XYPath""" def __init__(self, name=""): super(Table, self).__init__(table=self) self._x_index = defaultdict(lambda: Bag(self)) self._y_index = defaultdict(lambda: Bag(self)) self._max_x = -1 self._max_y = -1 self.sheet = None self.name = name def __hash__(self): return id(self) def rows(self): """Get bags containing each row's cells, in order""" for row_num in range(0, self._max_y + 1): # inclusive yield self._y_index[row_num] def cols(self): """Get bags containing each column's cells, in order""" for col_num in range(0, self._max_x + 1): # inclusive yield self._x_index[col_num] def col(self, column): if isinstance(column, six.string_types): c_num = contrib_excel.excel_column_number(column, index=0) return self.col(c_num) else: assert isinstance(column, int) return self._x_index[column] def add(self, cell): """Under the hood: add a cell to a table and the table's indices. Used in the construction of a table.""" self._x_index[cell.x].add(cell) self._y_index[cell.y].add(cell) self._max_x = max(self._max_x, cell.x) self._max_y = max(self._max_y, cell.y) super(Table, self).add(cell) def get_at(self, x=None, y=None): """Directly get a singleton bag via indices. Faster than Bag.filter""" # we use .get() here to avoid new empty Bags being inserted # into the index stores when a non-existant coordinate is requested. assert isinstance(x, int) or x is None, "get_at takes integers (got {!r})".format(x) assert isinstance(y, int) or y is None, "get_at takes integers (got {!r})".format(y) if x is None and y is None: raise TypeError('get_at requires at least one x or y value') if x is None: return self._y_index.get(y, Bag(self)) if y is None: return self._x_index.get(x, Bag(self)) return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x) @staticmethod def from_filename(filename, table_name=None, table_index=None): """Wrapper around from_file_object to handle extension extraction""" # NOTE: this is a messytables table name extension = os.path.splitext(filename)[1].strip('.') with open(filename, 'rb') as f: return Table.from_file_object(f, extension, table_name=table_name, table_index=table_index) @staticmethod def from_file_object(fobj, extension='', table_name=None, table_index=None): """Load table from file object, you must specify a table's name or position number. If you don't know these, try from_messy.""" # NOTE this is a messytables table name if (table_name is not None and table_index is not None) or \ (table_name is None and table_index is None): raise TypeError("Must give exactly one of table_name, table_index") table_set = messytables.any.any_tableset(fobj, extension=extension) if table_name is not None: return Table.from_messy(table_set[table_name]) elif table_index is not None: return Table.from_messy(table_set.tables[table_index]) @staticmethod def from_messy(messy_rowset): """Import a rowset (table) from messytables, e.g. to work with each table in turn: tables = messytables.any.any_tableset(fobj) for mt_table in tables: xy_table = xypath.Table.from_messy(mt_table) ...""" assert isinstance(messy_rowset, messytables.core.RowSet),\ "Expected a RowSet, got a %r" % type(messy_rowset) new_table = Table.from_iterable( messy_rowset, value_func=lambda cell: cell.value, properties_func=lambda cell: cell.properties, name=messy_rowset.name) if hasattr(messy_rowset, 'sheet'): new_table.sheet = messy_rowset.sheet return new_table @staticmethod def from_iterable(table, value_func=lambda cell: cell, properties_func=lambda cell: {}, name=None): """Make a table from a pythonic table structure. The table must be an iterable which returns rows (in top-to-bottom order), which in turn are iterables which returns cells (in left-to-right order). value_func and properties_func specify how the cell maps onto an _XYCell's value and properties. The defaults assume that you have a straight-forward list of lists of values.""" new_table = Table(name=name) for y, row in enumerate(table): for x, cell in enumerate(row): new_table.add( _XYCell( value_func(cell), x, y, new_table, properties_func(cell))) return new_table @staticmethod def from_bag(bag, name=None): """Make a copy of a bag which is its own table. Useful when a single imported table is two logical tables""" if name is None: name=bag.table.name new_table = Table(name=name) for bag_cell in bag.unordered: new_table.add(bag_cell._cell.copy(new_table)) return new_table
LEFT = (-1, 0) UP_RIGHT = (1, -1)
random_line_split
xypath.py
#!/usr/bin/env python """ musings on order of variables, x/y vs. col/row Everyone agrees that col 2, row 1 is (2,1) which is xy ordered. This works well with the name. Remember that the usual iterators (over a list-of-lists) is outer loop y first.""" from __future__ import absolute_import import re import messytables import os import six from six.moves import range from six.moves import zip try: import hamcrest have_ham = True except ImportError: have_ham = False import sys if sys.version_info >= (3, 6): import typing REGEX_PATTERN_TYPE = typing.Pattern else: REGEX_PATTERN_TYPE = re._pattern_type from collections import defaultdict from copy import copy from itertools import product, takewhile from xypath.contrib import excel as contrib_excel UP = (0, -1) RIGHT = (1, 0) DOWN = (0, 1) LEFT = (-1, 0) UP_RIGHT = (1, -1) DOWN_RIGHT = (1, 1) UP_LEFT = (-1, -1) DOWN_LEFT = (-1, 1) def cmp(x, y): if x<y: return -1 if x>y: return 1 return 0 class XYPathError(Exception): """Problems with spreadsheet layouts should raise this or a descendant.""" pass class JunctionError(RuntimeError, XYPathError): """Raised if paranoid _XYCell.junction finds it is returning one of the input cells - i.e. the input cells are in the same row or column""" pass class NoCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains zero cells.""" pass class MultipleCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains multiple cells.""" pass class LookupConfusionError(AssertionError, XYPathError): """Lookup found multiple equally-close headers""" pass class NoLookupError(AssertionError, XYPathError): """Lookup found no valid header""" pass def describe_filter_method(filter_by): if callable(filter_by): return "matching a function called {}".format(filter_by.__name__) if isinstance(filter_by, six.string_types): return "containing the string {!r}".format(filter_by) if have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return "containing "+str(filter_by) if isinstance(filter_by, REGEX_PATTERN_TYPE): return "matching the regex {!r}".format(filter_by.pattern) else: return "which we're surprised we found at all" class _XYCell(object): """needs to contain: value, position (x,y), parent bag""" __slots__ = ['value', 'x', 'y', 'table', 'properties'] def __init__(self, value, x, y, table, properties=None): self.value = value # of appropriate type self.x = x # column number self.y = y # row number self.table = table if properties is None: self.properties = {} else: self.properties = properties def __hash__(self): """ In order to make a set of cells (used in Bag), they *must* be hashable. An _XYCell is uniquely identified (by sets, etc) through its position, content, and parent table. Note that `properties` is ignored since dicts are unhashable, and value may be redundant. """ return hash((self.value, self.x, self.y, self.table)) def __eq__(self, rhs): """See _XYCell.__hash__ for equality conditions""" return hash(self) == hash(rhs) def copy(self, new_table=None): """Make a copy of the cell. Its table will be new_table, if specified""" if new_table is None: new_table = self.table return _XYCell(self.value, self.x, self.y, new_table, self.properties) def __repr__(self): return "_XYCell(%r, %r, %r)" % \ (self.value, self.x, self.y) def __unicode__(self): return six.text_type(self.value) def lookup(self, header_bag, direction, strict=False): """ Given a single cell (usually a value), a bag containing the headers of a particular type for that cell, and the direction in which to search for the relevant header e.g. for value cell V, searching up: [ ] [ ] [O] [ ] ---> [ ] V [ ] [ ] the cell with the arrow will be returned. Strict restricts the selection to cells in the same row/column as the value, so O is selected instead.""" def mult(cell): return cell.x * direction[0] + cell.y * direction[1] def same_row_col(a, b, direction): return (a.x - b.x == 0 and direction[0] == 0) or \ (a.y - b.y == 0 and direction[1] == 0) best_cell = None second_best_cell = None for target_cell in header_bag.unordered_cells: if mult(self) <= mult(target_cell): if not best_cell or mult(target_cell) <= mult(best_cell): if not strict or same_row_col(self, target_cell, direction): second_best_cell = best_cell best_cell = target_cell if second_best_cell and mult(best_cell) == mult(second_best_cell): raise LookupConfusionError("{!r} is as good as {!r} for {!r}".format( best_cell, second_best_cell, self)) if best_cell is None: raise NoLookupError("No lookup for {!r}".format(self)) return best_cell def junction(self, other, direction=DOWN, paranoid=True): """ gets the lower-right intersection of the row of one, and the column of the other. paranoid: should we panic if we're hitting one of our input cells?""" def junction_coord(cells, direction=DOWN): """ Under the hood: given two cells and a favoured direction, get the position of the cell with the column of one and the row of the other: A---->+ | ^ | | | | v | *<----B Both + and * are candidates for the junction of A and B - we take the one furthest down by default (specified by direction) >>> cells_dr = (_XYCell(0,1,2,None), _XYCell(0,3,4,None)) >>> junction_coord(cells_dr, DOWN) (1, 4) >>> junction_coord(cells_dr, UP) (3, 2) >>> junction_coord(cells_dr, LEFT) (1, 4) >>> junction_coord(cells_dr, RIGHT) (3, 2) >>> cells_tr = (_XYCell(0,1,4,None), _XYCell(0,3,2,None)) >>> junction_coord(cells_tr, DOWN) (3, 4) >>> junction_coord(cells_tr, UP) (1, 2) >>> junction_coord(cells_tr, LEFT) (1, 2) >>> junction_coord(cells_tr, RIGHT) (3, 4) """ new_cells = ( (cells[0].x, cells[1].y), (cells[1].x, cells[0].y) ) for index, value in enumerate(direction): if value == 0: continue if cmp(new_cells[0][index], new_cells[1][index]) == value: return new_cells[0] else: return new_cells[1] (x, y) = junction_coord((self, other), direction) if paranoid and (x, y) == (self.x, self.y) or \ (x, y) == (other.x, other.y): raise JunctionError( "_XYCell.junction(_XYCell) resulted in a cell which is equal" " to one of the input cells.\n" " self: {}\n other: {}\n x: {}\n y: {}".format( self, other, x, y)) junction_bag = self.table.get_at(x, y) if len(junction_bag) == 0: return self_bag = Bag(self.table) self_bag.add(self) other_bag = Bag(self.table) other_bag.add(other) yield (self_bag, other_bag, junction_bag) def shift(self, x=0, y=0): """Get the cell which is offset from this cell by x columns, y rows""" if not isinstance(x, int): assert y == 0, \ "_XYCell.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) return self.table.get_at(self.x + x, self.y + y)._cell class CoreBag(object): """Has a collection of _XYCells""" def pprint(self, *args, **kwargs): return contrib_excel.pprint(self, *args, **kwargs) def as_list(self, *args, **kwargs): return contrib_excel.as_list(self, *args, **kwargs) def filter_one(self, filter_by): return contrib_excel.filter_one(self, filter_by) def excel_locations(self, *args, **kwargs): return contrib_excel.excel_locations(self, *args, **kwargs) def __init__(self, table): self.__store = set() self.table = table def add(self, cell): """Add a cell to this bag""" if not isinstance(cell, _XYCell): raise TypeError("Can only add _XYCell types to Bags: {}".format( cell.__class__)) self.__store.add(cell) def __eq__(self, other): """Compare two bags: they are equal if: * their table are the same table (object) * they contain the same set of cells""" if not isinstance(other, CoreBag): return False return (self.table is other.table and self.__store == other.__store) def __len__(self): return len(self.__store) def __repr__(self): return repr(self.__store) @classmethod def singleton(cls, cell, table): """ Construct a bag with one cell in it """ bag = cls(table=table) bag.add(cell) return bag @property def unordered(self): """ Obtain an unordered iterator over this bag. iter(bag) is sorted on demand, and therefore inefficient if being done repeatedly where order does not matter. """ return (Bag.singleton(c, table=self.table) for c in self.__store) @property def unordered_cells(self): """ Analogous to the `unordered` property, except that it returns _XYCells instead of Bags. """ return iter(self.__store) def __iter__(self): """ Return a view of the cells in this back in left-right, top-bottom order Note: this is expensive for large bags (when done repeatedly). If you don't care about order, use `bag.unordered`, which gives an unordered iterator. """ def yx(cell): return cell.y, cell.x for cell in sorted(self.__store, key=yx): yield Bag.singleton(cell, table=self.table) def __sub__(self, rhs): """Bags quack like sets. Implements - operator.""" return self.difference(rhs) def difference(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table,\ "Can't difference bags from separate tables" new = copy(self) new.__store = self.__store.difference(rhs.__store) return new def __or__(self, rhs): """Bags quack like sets. Implements | operator. For mathematical purity, + (__add__) isn't appropriate""" return self.union(rhs) def union(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table, "Can't union bags from separate tables" new = copy(self) new.__store = self.__store.union(rhs.__store) return new def __and__(self, rhs): return self.intersection(rhs) def intersection(self, rhs): assert self.table is rhs.table, \ "Can't take intersection of bags from separate tables" new = copy(self) new.__store = self.__store.intersection(rhs.__store) return new def select(self, function): """Select cells from this bag's table based on the cells in this bag. e.g. bag.select(lambda bag_cell, table_cell: bag_cell.y == table_cell.y and bag_cell.value == table_cell.value) would give cells in the table with the same name on the same row as a cell in the bag""" return self.table.select_other(function, self) def select_other(self, function, other): """A more general version of select, where another bag to select from is explicitly specified rather than using the original bag's table""" """note: self.select(f) = self.table.select_other(f, self)""" newbag = Bag(table=self.table) for bag_cell in self.__store: for other_cell in other.__store: if function(bag_cell, other_cell): newbag.add(bag_cell) break return newbag def filter(self, filter_by): """ Returns a new bag containing only cells which match the filter_by predicate. filter_by can be: a) a callable, which takes a cell as a parameter and returns True if the cell should be returned, such as `lambda cell: cell value == 'dog' b) a string, to match exactly: `u'dog'` c) a hamcrest match rule: `hamcrest.equal_to("dog") (requires hamcrest to be available) d) a compiled regex: `re.compile("dog") """ if callable(filter_by): return self._filter_internal(filter_by) elif isinstance(filter_by, six.string_types): return self._filter_internal(lambda cell: six.text_type(cell.value).strip() == filter_by) elif have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return self._filter_internal(lambda cell: filter_by.matches(cell.value)) elif isinstance(filter_by, REGEX_PATTERN_TYPE): return self._filter_internal( lambda cell: re.match(filter_by, six.text_type(cell.value))) else: raise ValueError("filter_by must be function, hamcrest filter, compiled regex or string.") def _filter_internal(self, function): newbag = Bag(table=self.table) for bag_cell in self.unordered_cells: if function(bag_cell): newbag.add(bag_cell) return newbag def assert_one(self, message="assert_one() : {} cells in bag, not 1"): """Chainable: raise an error if the bag contains 0 or 2+ cells. Otherwise returns the original (singleton) bag unchanged.""" if len(self.__store) == 1: return self elif len(self.__store) == 0: raise NoCellsAssertionError( message.format( len(self.__store) ) ) elif len(self.__store) > 1: raise MultipleCellsAssertionError( message.format( len(self.__store) ) ) @property def _cell(self): """Under the hood: get the cell inside a singleton bag. It's an error for it to not contain precisely one cell.""" try: xycell = list(self.assert_one().__store)[0] except AssertionError: l = len(list(self.__store)) raise XYPathError("Can't use multicell bag as cell: (len %r)" % l) else: assert isinstance(xycell, _XYCell) return xycell @property def value(self): """Getter for singleton's cell value""" return self._cell.value @property def x(self): """Getter for singleton's cell column number""" return self._cell.x @property def y(self): """Getter for singleton's cell row number""" return self._cell.y @property def properties(self): """Getter for singleton's cell properties""" return self._cell.properties class Bag(CoreBag): @staticmethod def from_list(cells):
def expand(self, direction, stop_before=None): return self.fill(direction, stop_before=stop_before) | self def fill(self, direction, stop_before=None): """Should give the same output as fill, except it doesn't support non-cardinal directions or stop_before. Twenty times faster than fill in test_ravel.""" if direction in (UP_RIGHT, DOWN_RIGHT, UP_LEFT, UP_RIGHT): return self._fill(direction, stop_before) def what_to_get(cell): """converts bag coordinates into thing to pass to get_at""" cell_coord = (cell.x, cell.y) retval = [] for cell_coord, direction_coord in zip(cell_coord, direction): if direction_coord != 0: retval.append(None) else: retval.append(cell_coord) return tuple(retval) # TODO yuck if direction not in (UP, RIGHT, DOWN, LEFT): raise ValueError("Must be a cardinal direction!") ### this is what same_row/col should look like! small_table = None for cell in self.unordered_cells: got_rowcol = self.table.get_at(*what_to_get(cell)) if small_table: small_table = small_table.union(got_rowcol) else: small_table = got_rowcol if small_table is None: small_table = Bag(table=self.table) # now we use the small_table as if it was the table. (left_right, up_down) = direction bag = small_table.select_other( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down, self ) if stop_before is not None: return bag.stop_before(stop_before) else: return bag def stop_before(self, stop_function): """Assumes the data is: * in a single row or column * proceeding either downwards or rightwards """ return Bag.from_list(list( takewhile(lambda c: not stop_function(c), self))) def _fill(self, direction, stop_before=None): """ If the bag contains only one cell, select all cells in the direction given, excluding the original cell. For example, from a column heading cell, you can "fill down" to get all the values underneath it. If you provide a stop_before function, it will be called on each cell as a stop condition. For example, if you provide a stop_before function which tests cell.value for an empty string. This would stop the fill function before it reaches the bottom of the sheet, for example. """ raise DeprecationWarning("2D fill is deprecated. Yell if you need it.") if direction not in (UP, RIGHT, DOWN, LEFT, UP_RIGHT, DOWN_RIGHT, UP_LEFT, DOWN_LEFT): raise ValueError("Invalid direction! Use one of UP, RIGHT, " "DOWN_RIGHT etc") (left_right, up_down) = direction bag = self.select( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down ) if stop_before is not None: # NOTE(PMF): stop_before is limited to singleton bags, in the DOWN # or RIGHT direction. This isn't ideal, but with the above "magic" # cmp code I can't think of an elegant general way of doing this. I # also can't imagine what it means to run fill in multiple # directions, or with non singleton bags. TODO: Constrain? if direction not in (DOWN, RIGHT): raise ValueError("Oops, stop_before only works down or right!") self.assert_one("You can't use stop_before for bags with more than" " one cell inside.") return Bag.from_list(list( takewhile(lambda c: not stop_before(c), bag))) return bag def junction(self, other, *args, **kwargs): """For every combination of pairs of cells from this bag and the other bag, get the cell that is at the same row as one of them, and column as the other. There are two: so we specify a direction to say which one wins (in the cell-based version of this function) - defaulting to the one furthest down""" if not isinstance(other, CoreBag): raise TypeError( "Bag.junction() called with invalid type {}, must be " "(Core)Bag".format(other.__class__)) # Generate ordered lists of dimension cells exactly once (avoid doing # it in the inner loop because of the sorted() in __iter__) self_cells = list(self) other_cells = list(other) for self_cell in self_cells: for other_cell in other_cells: assert self_cell._cell.__class__ == other_cell._cell.__class__ for triple in self_cell._cell.junction(other_cell._cell, *args, **kwargs): yield triple def waffle(self, other, *args, **kwargs): bag = Bag(table=self.table) for (selfbag, otherbag, junction_cell) in self.junction(other, *args, **kwargs): bag.add(junction_cell._cell) return bag def shift(self, x=0, y=0): """ Return a bag in which each cell is offset from the source bag by the coordinates specified. Coordinates can be specified as: Bag.shift(0,2) - full specification Bag.shift(y=2) - partial specification Bag.shift((0,2)) - use of tuple for x, unspecified y """ if not isinstance(x, int): assert y == 0, \ "Bag.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) bag = Bag(table=self.table) for b_cell in self.unordered: t_cell = self.table.get_at(b_cell.x + x, b_cell.y + y).assert_one() bag.add(t_cell._cell) return bag def extrude(self, dx, dy): """ Extrude all cells in the bag by (dx, dy), by looking For example, given the bag with a cell at (0, 0): {(0, 0)} .extrude(2, 0) gives the bag with the cells (to the right): {(0, 0), (1, 0), (2, 0)} .extrude(0, -2) gives the bag with the cells (up): {(0, 0), (0, -1), (0, -2)} """ if dx < 0: dxs = list(range(0, dx - 1, -1)) else: dxs = list(range(0, dx + 1, +1)) if dy < 0: dys = list(range(0, dy - 1, -1)) else: dys = list(range(0, dy + 1, +1)) bag = Bag(table=self.table) for cell in self.unordered_cells: for i, j in product(dxs, dys): bag.add(self.table.get_at(cell.x + i, cell.y + j)._cell) return bag def same_row(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap - use Table.get_at() all_y = set() for cell in bag.unordered_cells: all_y.add(cell.y) return self.filter(lambda c: c.y in all_y) def same_col(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap all_x = set() for cell in bag.unordered_cells: all_x.add(cell.x) return self.filter(lambda c: c.x in all_x) def __getattr__(self, name): if name.startswith("is_not_"): return lambda: self.filter(lambda cell: not cell.properties[name[7:]]) if name.startswith("is_"): # might need additional layer of indirection return lambda: self.filter(lambda cell: cell.properties[name[3:]]) if name.endswith("_is_not"): return lambda value: self.filter(lambda cell: not cell.properties[name[:-7]] == value) if name.endswith("_is"): return lambda value: self.filter(lambda cell: cell.properties[name[:-3]] == value) raise AttributeError("Bag has no attribute {!r}".format(name)) class Table(Bag): """A bag which represents an entire sheet. Features indices to speed retrieval by coordinate. Also includes functions for importing tables into XYPath""" def __init__(self, name=""): super(Table, self).__init__(table=self) self._x_index = defaultdict(lambda: Bag(self)) self._y_index = defaultdict(lambda: Bag(self)) self._max_x = -1 self._max_y = -1 self.sheet = None self.name = name def __hash__(self): return id(self) def rows(self): """Get bags containing each row's cells, in order""" for row_num in range(0, self._max_y + 1): # inclusive yield self._y_index[row_num] def cols(self): """Get bags containing each column's cells, in order""" for col_num in range(0, self._max_x + 1): # inclusive yield self._x_index[col_num] def col(self, column): if isinstance(column, six.string_types): c_num = contrib_excel.excel_column_number(column, index=0) return self.col(c_num) else: assert isinstance(column, int) return self._x_index[column] def add(self, cell): """Under the hood: add a cell to a table and the table's indices. Used in the construction of a table.""" self._x_index[cell.x].add(cell) self._y_index[cell.y].add(cell) self._max_x = max(self._max_x, cell.x) self._max_y = max(self._max_y, cell.y) super(Table, self).add(cell) def get_at(self, x=None, y=None): """Directly get a singleton bag via indices. Faster than Bag.filter""" # we use .get() here to avoid new empty Bags being inserted # into the index stores when a non-existant coordinate is requested. assert isinstance(x, int) or x is None, "get_at takes integers (got {!r})".format(x) assert isinstance(y, int) or y is None, "get_at takes integers (got {!r})".format(y) if x is None and y is None: raise TypeError('get_at requires at least one x or y value') if x is None: return self._y_index.get(y, Bag(self)) if y is None: return self._x_index.get(x, Bag(self)) return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x) @staticmethod def from_filename(filename, table_name=None, table_index=None): """Wrapper around from_file_object to handle extension extraction""" # NOTE: this is a messytables table name extension = os.path.splitext(filename)[1].strip('.') with open(filename, 'rb') as f: return Table.from_file_object(f, extension, table_name=table_name, table_index=table_index) @staticmethod def from_file_object(fobj, extension='', table_name=None, table_index=None): """Load table from file object, you must specify a table's name or position number. If you don't know these, try from_messy.""" # NOTE this is a messytables table name if (table_name is not None and table_index is not None) or \ (table_name is None and table_index is None): raise TypeError("Must give exactly one of table_name, table_index") table_set = messytables.any.any_tableset(fobj, extension=extension) if table_name is not None: return Table.from_messy(table_set[table_name]) elif table_index is not None: return Table.from_messy(table_set.tables[table_index]) @staticmethod def from_messy(messy_rowset): """Import a rowset (table) from messytables, e.g. to work with each table in turn: tables = messytables.any.any_tableset(fobj) for mt_table in tables: xy_table = xypath.Table.from_messy(mt_table) ...""" assert isinstance(messy_rowset, messytables.core.RowSet),\ "Expected a RowSet, got a %r" % type(messy_rowset) new_table = Table.from_iterable( messy_rowset, value_func=lambda cell: cell.value, properties_func=lambda cell: cell.properties, name=messy_rowset.name) if hasattr(messy_rowset, 'sheet'): new_table.sheet = messy_rowset.sheet return new_table @staticmethod def from_iterable(table, value_func=lambda cell: cell, properties_func=lambda cell: {}, name=None): """Make a table from a pythonic table structure. The table must be an iterable which returns rows (in top-to-bottom order), which in turn are iterables which returns cells (in left-to-right order). value_func and properties_func specify how the cell maps onto an _XYCell's value and properties. The defaults assume that you have a straight-forward list of lists of values.""" new_table = Table(name=name) for y, row in enumerate(table): for x, cell in enumerate(row): new_table.add( _XYCell( value_func(cell), x, y, new_table, properties_func(cell))) return new_table @staticmethod def from_bag(bag, name=None): """Make a copy of a bag which is its own table. Useful when a single imported table is two logical tables""" if name is None: name=bag.table.name new_table = Table(name=name) for bag_cell in bag.unordered: new_table.add(bag_cell._cell.copy(new_table)) return new_table
""" Make a non-bag iterable of cells into a Bag. Some magic may be lost, especially if it's zero length. TODO: This should probably be part of the core __init__ class. TODO: Don't do a piece-by-piece insertion, just slap the whole listed iterable in, because this is slow. """ # TODO bag = Bag(table=None) for i, cell_bag in enumerate(cells): bag.add(cell_bag._cell) if i == 0: bag.table = cell_bag.table else: assert bag.table == cell_bag.table return bag
identifier_body
xypath.py
#!/usr/bin/env python """ musings on order of variables, x/y vs. col/row Everyone agrees that col 2, row 1 is (2,1) which is xy ordered. This works well with the name. Remember that the usual iterators (over a list-of-lists) is outer loop y first.""" from __future__ import absolute_import import re import messytables import os import six from six.moves import range from six.moves import zip try: import hamcrest have_ham = True except ImportError: have_ham = False import sys if sys.version_info >= (3, 6): import typing REGEX_PATTERN_TYPE = typing.Pattern else: REGEX_PATTERN_TYPE = re._pattern_type from collections import defaultdict from copy import copy from itertools import product, takewhile from xypath.contrib import excel as contrib_excel UP = (0, -1) RIGHT = (1, 0) DOWN = (0, 1) LEFT = (-1, 0) UP_RIGHT = (1, -1) DOWN_RIGHT = (1, 1) UP_LEFT = (-1, -1) DOWN_LEFT = (-1, 1) def cmp(x, y): if x<y: return -1 if x>y: return 1 return 0 class XYPathError(Exception): """Problems with spreadsheet layouts should raise this or a descendant.""" pass class JunctionError(RuntimeError, XYPathError): """Raised if paranoid _XYCell.junction finds it is returning one of the input cells - i.e. the input cells are in the same row or column""" pass class NoCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains zero cells.""" pass class MultipleCellsAssertionError(AssertionError, XYPathError): """Raised by Bag.assert_one() if the bag contains multiple cells.""" pass class LookupConfusionError(AssertionError, XYPathError): """Lookup found multiple equally-close headers""" pass class NoLookupError(AssertionError, XYPathError): """Lookup found no valid header""" pass def describe_filter_method(filter_by): if callable(filter_by): return "matching a function called {}".format(filter_by.__name__) if isinstance(filter_by, six.string_types): return "containing the string {!r}".format(filter_by) if have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return "containing "+str(filter_by) if isinstance(filter_by, REGEX_PATTERN_TYPE): return "matching the regex {!r}".format(filter_by.pattern) else: return "which we're surprised we found at all" class _XYCell(object): """needs to contain: value, position (x,y), parent bag""" __slots__ = ['value', 'x', 'y', 'table', 'properties'] def __init__(self, value, x, y, table, properties=None): self.value = value # of appropriate type self.x = x # column number self.y = y # row number self.table = table if properties is None: self.properties = {} else: self.properties = properties def __hash__(self): """ In order to make a set of cells (used in Bag), they *must* be hashable. An _XYCell is uniquely identified (by sets, etc) through its position, content, and parent table. Note that `properties` is ignored since dicts are unhashable, and value may be redundant. """ return hash((self.value, self.x, self.y, self.table)) def __eq__(self, rhs): """See _XYCell.__hash__ for equality conditions""" return hash(self) == hash(rhs) def copy(self, new_table=None): """Make a copy of the cell. Its table will be new_table, if specified""" if new_table is None: new_table = self.table return _XYCell(self.value, self.x, self.y, new_table, self.properties) def __repr__(self): return "_XYCell(%r, %r, %r)" % \ (self.value, self.x, self.y) def __unicode__(self): return six.text_type(self.value) def lookup(self, header_bag, direction, strict=False): """ Given a single cell (usually a value), a bag containing the headers of a particular type for that cell, and the direction in which to search for the relevant header e.g. for value cell V, searching up: [ ] [ ] [O] [ ] ---> [ ] V [ ] [ ] the cell with the arrow will be returned. Strict restricts the selection to cells in the same row/column as the value, so O is selected instead.""" def mult(cell): return cell.x * direction[0] + cell.y * direction[1] def same_row_col(a, b, direction): return (a.x - b.x == 0 and direction[0] == 0) or \ (a.y - b.y == 0 and direction[1] == 0) best_cell = None second_best_cell = None for target_cell in header_bag.unordered_cells: if mult(self) <= mult(target_cell): if not best_cell or mult(target_cell) <= mult(best_cell): if not strict or same_row_col(self, target_cell, direction): second_best_cell = best_cell best_cell = target_cell if second_best_cell and mult(best_cell) == mult(second_best_cell): raise LookupConfusionError("{!r} is as good as {!r} for {!r}".format( best_cell, second_best_cell, self)) if best_cell is None: raise NoLookupError("No lookup for {!r}".format(self)) return best_cell def junction(self, other, direction=DOWN, paranoid=True): """ gets the lower-right intersection of the row of one, and the column of the other. paranoid: should we panic if we're hitting one of our input cells?""" def junction_coord(cells, direction=DOWN): """ Under the hood: given two cells and a favoured direction, get the position of the cell with the column of one and the row of the other: A---->+ | ^ | | | | v | *<----B Both + and * are candidates for the junction of A and B - we take the one furthest down by default (specified by direction) >>> cells_dr = (_XYCell(0,1,2,None), _XYCell(0,3,4,None)) >>> junction_coord(cells_dr, DOWN) (1, 4) >>> junction_coord(cells_dr, UP) (3, 2) >>> junction_coord(cells_dr, LEFT) (1, 4) >>> junction_coord(cells_dr, RIGHT) (3, 2) >>> cells_tr = (_XYCell(0,1,4,None), _XYCell(0,3,2,None)) >>> junction_coord(cells_tr, DOWN) (3, 4) >>> junction_coord(cells_tr, UP) (1, 2) >>> junction_coord(cells_tr, LEFT) (1, 2) >>> junction_coord(cells_tr, RIGHT) (3, 4) """ new_cells = ( (cells[0].x, cells[1].y), (cells[1].x, cells[0].y) ) for index, value in enumerate(direction): if value == 0: continue if cmp(new_cells[0][index], new_cells[1][index]) == value: return new_cells[0] else: return new_cells[1] (x, y) = junction_coord((self, other), direction) if paranoid and (x, y) == (self.x, self.y) or \ (x, y) == (other.x, other.y): raise JunctionError( "_XYCell.junction(_XYCell) resulted in a cell which is equal" " to one of the input cells.\n" " self: {}\n other: {}\n x: {}\n y: {}".format( self, other, x, y)) junction_bag = self.table.get_at(x, y) if len(junction_bag) == 0: return self_bag = Bag(self.table) self_bag.add(self) other_bag = Bag(self.table) other_bag.add(other) yield (self_bag, other_bag, junction_bag) def shift(self, x=0, y=0): """Get the cell which is offset from this cell by x columns, y rows""" if not isinstance(x, int): assert y == 0, \ "_XYCell.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) return self.table.get_at(self.x + x, self.y + y)._cell class CoreBag(object): """Has a collection of _XYCells""" def pprint(self, *args, **kwargs): return contrib_excel.pprint(self, *args, **kwargs) def as_list(self, *args, **kwargs): return contrib_excel.as_list(self, *args, **kwargs) def filter_one(self, filter_by): return contrib_excel.filter_one(self, filter_by) def excel_locations(self, *args, **kwargs): return contrib_excel.excel_locations(self, *args, **kwargs) def __init__(self, table): self.__store = set() self.table = table def add(self, cell): """Add a cell to this bag""" if not isinstance(cell, _XYCell): raise TypeError("Can only add _XYCell types to Bags: {}".format( cell.__class__)) self.__store.add(cell) def __eq__(self, other): """Compare two bags: they are equal if: * their table are the same table (object) * they contain the same set of cells""" if not isinstance(other, CoreBag): return False return (self.table is other.table and self.__store == other.__store) def __len__(self): return len(self.__store) def __repr__(self): return repr(self.__store) @classmethod def singleton(cls, cell, table): """ Construct a bag with one cell in it """ bag = cls(table=table) bag.add(cell) return bag @property def unordered(self): """ Obtain an unordered iterator over this bag. iter(bag) is sorted on demand, and therefore inefficient if being done repeatedly where order does not matter. """ return (Bag.singleton(c, table=self.table) for c in self.__store) @property def unordered_cells(self): """ Analogous to the `unordered` property, except that it returns _XYCells instead of Bags. """ return iter(self.__store) def __iter__(self): """ Return a view of the cells in this back in left-right, top-bottom order Note: this is expensive for large bags (when done repeatedly). If you don't care about order, use `bag.unordered`, which gives an unordered iterator. """ def yx(cell): return cell.y, cell.x for cell in sorted(self.__store, key=yx): yield Bag.singleton(cell, table=self.table) def __sub__(self, rhs): """Bags quack like sets. Implements - operator.""" return self.difference(rhs) def difference(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table,\ "Can't difference bags from separate tables" new = copy(self) new.__store = self.__store.difference(rhs.__store) return new def __or__(self, rhs): """Bags quack like sets. Implements | operator. For mathematical purity, + (__add__) isn't appropriate""" return self.union(rhs) def union(self, rhs): """Bags quack like sets.""" assert self.table is rhs.table, "Can't union bags from separate tables" new = copy(self) new.__store = self.__store.union(rhs.__store) return new def __and__(self, rhs): return self.intersection(rhs) def intersection(self, rhs): assert self.table is rhs.table, \ "Can't take intersection of bags from separate tables" new = copy(self) new.__store = self.__store.intersection(rhs.__store) return new def select(self, function): """Select cells from this bag's table based on the cells in this bag. e.g. bag.select(lambda bag_cell, table_cell: bag_cell.y == table_cell.y and bag_cell.value == table_cell.value) would give cells in the table with the same name on the same row as a cell in the bag""" return self.table.select_other(function, self) def select_other(self, function, other): """A more general version of select, where another bag to select from is explicitly specified rather than using the original bag's table""" """note: self.select(f) = self.table.select_other(f, self)""" newbag = Bag(table=self.table) for bag_cell in self.__store: for other_cell in other.__store:
return newbag def filter(self, filter_by): """ Returns a new bag containing only cells which match the filter_by predicate. filter_by can be: a) a callable, which takes a cell as a parameter and returns True if the cell should be returned, such as `lambda cell: cell value == 'dog' b) a string, to match exactly: `u'dog'` c) a hamcrest match rule: `hamcrest.equal_to("dog") (requires hamcrest to be available) d) a compiled regex: `re.compile("dog") """ if callable(filter_by): return self._filter_internal(filter_by) elif isinstance(filter_by, six.string_types): return self._filter_internal(lambda cell: six.text_type(cell.value).strip() == filter_by) elif have_ham and isinstance(filter_by, hamcrest.matcher.Matcher): return self._filter_internal(lambda cell: filter_by.matches(cell.value)) elif isinstance(filter_by, REGEX_PATTERN_TYPE): return self._filter_internal( lambda cell: re.match(filter_by, six.text_type(cell.value))) else: raise ValueError("filter_by must be function, hamcrest filter, compiled regex or string.") def _filter_internal(self, function): newbag = Bag(table=self.table) for bag_cell in self.unordered_cells: if function(bag_cell): newbag.add(bag_cell) return newbag def assert_one(self, message="assert_one() : {} cells in bag, not 1"): """Chainable: raise an error if the bag contains 0 or 2+ cells. Otherwise returns the original (singleton) bag unchanged.""" if len(self.__store) == 1: return self elif len(self.__store) == 0: raise NoCellsAssertionError( message.format( len(self.__store) ) ) elif len(self.__store) > 1: raise MultipleCellsAssertionError( message.format( len(self.__store) ) ) @property def _cell(self): """Under the hood: get the cell inside a singleton bag. It's an error for it to not contain precisely one cell.""" try: xycell = list(self.assert_one().__store)[0] except AssertionError: l = len(list(self.__store)) raise XYPathError("Can't use multicell bag as cell: (len %r)" % l) else: assert isinstance(xycell, _XYCell) return xycell @property def value(self): """Getter for singleton's cell value""" return self._cell.value @property def x(self): """Getter for singleton's cell column number""" return self._cell.x @property def y(self): """Getter for singleton's cell row number""" return self._cell.y @property def properties(self): """Getter for singleton's cell properties""" return self._cell.properties class Bag(CoreBag): @staticmethod def from_list(cells): """ Make a non-bag iterable of cells into a Bag. Some magic may be lost, especially if it's zero length. TODO: This should probably be part of the core __init__ class. TODO: Don't do a piece-by-piece insertion, just slap the whole listed iterable in, because this is slow. """ # TODO bag = Bag(table=None) for i, cell_bag in enumerate(cells): bag.add(cell_bag._cell) if i == 0: bag.table = cell_bag.table else: assert bag.table == cell_bag.table return bag def expand(self, direction, stop_before=None): return self.fill(direction, stop_before=stop_before) | self def fill(self, direction, stop_before=None): """Should give the same output as fill, except it doesn't support non-cardinal directions or stop_before. Twenty times faster than fill in test_ravel.""" if direction in (UP_RIGHT, DOWN_RIGHT, UP_LEFT, UP_RIGHT): return self._fill(direction, stop_before) def what_to_get(cell): """converts bag coordinates into thing to pass to get_at""" cell_coord = (cell.x, cell.y) retval = [] for cell_coord, direction_coord in zip(cell_coord, direction): if direction_coord != 0: retval.append(None) else: retval.append(cell_coord) return tuple(retval) # TODO yuck if direction not in (UP, RIGHT, DOWN, LEFT): raise ValueError("Must be a cardinal direction!") ### this is what same_row/col should look like! small_table = None for cell in self.unordered_cells: got_rowcol = self.table.get_at(*what_to_get(cell)) if small_table: small_table = small_table.union(got_rowcol) else: small_table = got_rowcol if small_table is None: small_table = Bag(table=self.table) # now we use the small_table as if it was the table. (left_right, up_down) = direction bag = small_table.select_other( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down, self ) if stop_before is not None: return bag.stop_before(stop_before) else: return bag def stop_before(self, stop_function): """Assumes the data is: * in a single row or column * proceeding either downwards or rightwards """ return Bag.from_list(list( takewhile(lambda c: not stop_function(c), self))) def _fill(self, direction, stop_before=None): """ If the bag contains only one cell, select all cells in the direction given, excluding the original cell. For example, from a column heading cell, you can "fill down" to get all the values underneath it. If you provide a stop_before function, it will be called on each cell as a stop condition. For example, if you provide a stop_before function which tests cell.value for an empty string. This would stop the fill function before it reaches the bottom of the sheet, for example. """ raise DeprecationWarning("2D fill is deprecated. Yell if you need it.") if direction not in (UP, RIGHT, DOWN, LEFT, UP_RIGHT, DOWN_RIGHT, UP_LEFT, DOWN_LEFT): raise ValueError("Invalid direction! Use one of UP, RIGHT, " "DOWN_RIGHT etc") (left_right, up_down) = direction bag = self.select( lambda table, bag: cmp(table.x, bag.x) == left_right and cmp(table.y, bag.y) == up_down ) if stop_before is not None: # NOTE(PMF): stop_before is limited to singleton bags, in the DOWN # or RIGHT direction. This isn't ideal, but with the above "magic" # cmp code I can't think of an elegant general way of doing this. I # also can't imagine what it means to run fill in multiple # directions, or with non singleton bags. TODO: Constrain? if direction not in (DOWN, RIGHT): raise ValueError("Oops, stop_before only works down or right!") self.assert_one("You can't use stop_before for bags with more than" " one cell inside.") return Bag.from_list(list( takewhile(lambda c: not stop_before(c), bag))) return bag def junction(self, other, *args, **kwargs): """For every combination of pairs of cells from this bag and the other bag, get the cell that is at the same row as one of them, and column as the other. There are two: so we specify a direction to say which one wins (in the cell-based version of this function) - defaulting to the one furthest down""" if not isinstance(other, CoreBag): raise TypeError( "Bag.junction() called with invalid type {}, must be " "(Core)Bag".format(other.__class__)) # Generate ordered lists of dimension cells exactly once (avoid doing # it in the inner loop because of the sorted() in __iter__) self_cells = list(self) other_cells = list(other) for self_cell in self_cells: for other_cell in other_cells: assert self_cell._cell.__class__ == other_cell._cell.__class__ for triple in self_cell._cell.junction(other_cell._cell, *args, **kwargs): yield triple def waffle(self, other, *args, **kwargs): bag = Bag(table=self.table) for (selfbag, otherbag, junction_cell) in self.junction(other, *args, **kwargs): bag.add(junction_cell._cell) return bag def shift(self, x=0, y=0): """ Return a bag in which each cell is offset from the source bag by the coordinates specified. Coordinates can be specified as: Bag.shift(0,2) - full specification Bag.shift(y=2) - partial specification Bag.shift((0,2)) - use of tuple for x, unspecified y """ if not isinstance(x, int): assert y == 0, \ "Bag.shift: x=%r not integer and y=%r specified" % (x, y) return self.shift(x[0], x[1]) bag = Bag(table=self.table) for b_cell in self.unordered: t_cell = self.table.get_at(b_cell.x + x, b_cell.y + y).assert_one() bag.add(t_cell._cell) return bag def extrude(self, dx, dy): """ Extrude all cells in the bag by (dx, dy), by looking For example, given the bag with a cell at (0, 0): {(0, 0)} .extrude(2, 0) gives the bag with the cells (to the right): {(0, 0), (1, 0), (2, 0)} .extrude(0, -2) gives the bag with the cells (up): {(0, 0), (0, -1), (0, -2)} """ if dx < 0: dxs = list(range(0, dx - 1, -1)) else: dxs = list(range(0, dx + 1, +1)) if dy < 0: dys = list(range(0, dy - 1, -1)) else: dys = list(range(0, dy + 1, +1)) bag = Bag(table=self.table) for cell in self.unordered_cells: for i, j in product(dxs, dys): bag.add(self.table.get_at(cell.x + i, cell.y + j)._cell) return bag def same_row(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap - use Table.get_at() all_y = set() for cell in bag.unordered_cells: all_y.add(cell.y) return self.filter(lambda c: c.y in all_y) def same_col(self, bag): """ Select cells in this bag which are in the same row as a cell in the other `bag`. """ # TODO: make less crap all_x = set() for cell in bag.unordered_cells: all_x.add(cell.x) return self.filter(lambda c: c.x in all_x) def __getattr__(self, name): if name.startswith("is_not_"): return lambda: self.filter(lambda cell: not cell.properties[name[7:]]) if name.startswith("is_"): # might need additional layer of indirection return lambda: self.filter(lambda cell: cell.properties[name[3:]]) if name.endswith("_is_not"): return lambda value: self.filter(lambda cell: not cell.properties[name[:-7]] == value) if name.endswith("_is"): return lambda value: self.filter(lambda cell: cell.properties[name[:-3]] == value) raise AttributeError("Bag has no attribute {!r}".format(name)) class Table(Bag): """A bag which represents an entire sheet. Features indices to speed retrieval by coordinate. Also includes functions for importing tables into XYPath""" def __init__(self, name=""): super(Table, self).__init__(table=self) self._x_index = defaultdict(lambda: Bag(self)) self._y_index = defaultdict(lambda: Bag(self)) self._max_x = -1 self._max_y = -1 self.sheet = None self.name = name def __hash__(self): return id(self) def rows(self): """Get bags containing each row's cells, in order""" for row_num in range(0, self._max_y + 1): # inclusive yield self._y_index[row_num] def cols(self): """Get bags containing each column's cells, in order""" for col_num in range(0, self._max_x + 1): # inclusive yield self._x_index[col_num] def col(self, column): if isinstance(column, six.string_types): c_num = contrib_excel.excel_column_number(column, index=0) return self.col(c_num) else: assert isinstance(column, int) return self._x_index[column] def add(self, cell): """Under the hood: add a cell to a table and the table's indices. Used in the construction of a table.""" self._x_index[cell.x].add(cell) self._y_index[cell.y].add(cell) self._max_x = max(self._max_x, cell.x) self._max_y = max(self._max_y, cell.y) super(Table, self).add(cell) def get_at(self, x=None, y=None): """Directly get a singleton bag via indices. Faster than Bag.filter""" # we use .get() here to avoid new empty Bags being inserted # into the index stores when a non-existant coordinate is requested. assert isinstance(x, int) or x is None, "get_at takes integers (got {!r})".format(x) assert isinstance(y, int) or y is None, "get_at takes integers (got {!r})".format(y) if x is None and y is None: raise TypeError('get_at requires at least one x or y value') if x is None: return self._y_index.get(y, Bag(self)) if y is None: return self._x_index.get(x, Bag(self)) return self._y_index.get((y), Bag(self)).filter(lambda cell: cell.x==x) @staticmethod def from_filename(filename, table_name=None, table_index=None): """Wrapper around from_file_object to handle extension extraction""" # NOTE: this is a messytables table name extension = os.path.splitext(filename)[1].strip('.') with open(filename, 'rb') as f: return Table.from_file_object(f, extension, table_name=table_name, table_index=table_index) @staticmethod def from_file_object(fobj, extension='', table_name=None, table_index=None): """Load table from file object, you must specify a table's name or position number. If you don't know these, try from_messy.""" # NOTE this is a messytables table name if (table_name is not None and table_index is not None) or \ (table_name is None and table_index is None): raise TypeError("Must give exactly one of table_name, table_index") table_set = messytables.any.any_tableset(fobj, extension=extension) if table_name is not None: return Table.from_messy(table_set[table_name]) elif table_index is not None: return Table.from_messy(table_set.tables[table_index]) @staticmethod def from_messy(messy_rowset): """Import a rowset (table) from messytables, e.g. to work with each table in turn: tables = messytables.any.any_tableset(fobj) for mt_table in tables: xy_table = xypath.Table.from_messy(mt_table) ...""" assert isinstance(messy_rowset, messytables.core.RowSet),\ "Expected a RowSet, got a %r" % type(messy_rowset) new_table = Table.from_iterable( messy_rowset, value_func=lambda cell: cell.value, properties_func=lambda cell: cell.properties, name=messy_rowset.name) if hasattr(messy_rowset, 'sheet'): new_table.sheet = messy_rowset.sheet return new_table @staticmethod def from_iterable(table, value_func=lambda cell: cell, properties_func=lambda cell: {}, name=None): """Make a table from a pythonic table structure. The table must be an iterable which returns rows (in top-to-bottom order), which in turn are iterables which returns cells (in left-to-right order). value_func and properties_func specify how the cell maps onto an _XYCell's value and properties. The defaults assume that you have a straight-forward list of lists of values.""" new_table = Table(name=name) for y, row in enumerate(table): for x, cell in enumerate(row): new_table.add( _XYCell( value_func(cell), x, y, new_table, properties_func(cell))) return new_table @staticmethod def from_bag(bag, name=None): """Make a copy of a bag which is its own table. Useful when a single imported table is two logical tables""" if name is None: name=bag.table.name new_table = Table(name=name) for bag_cell in bag.unordered: new_table.add(bag_cell._cell.copy(new_table)) return new_table
if function(bag_cell, other_cell): newbag.add(bag_cell) break
conditional_block
cast_lossless.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::in_constant; use clippy_utils::source::snippet_opt; use clippy_utils::ty::is_isize_or_usize; use rustc_errors::Applicability; use rustc_hir::{Expr, ExprKind}; use rustc_lint::LateContext; use rustc_middle::ty::{self, FloatTy, Ty}; use super::{utils, CAST_LOSSLESS}; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { if !should_lint(cx, expr, cast_from, cast_to) { return; } // The suggestion is to use a function call, so if the original expression // has parens on the outside, they are no longer needed. let mut applicability = Applicability::MachineApplicable; let opt = snippet_opt(cx, cast_op.span); let sugg = opt.as_ref().map_or_else( || { applicability = Applicability::HasPlaceholders; ".." }, |snip| { if should_strip_parens(cast_op, snip) { &snip[1..snip.len() - 1] } else { snip.as_str() } }, ); span_lint_and_sugg( cx, CAST_LOSSLESS, expr.span, &format!( "casting `{}` to `{}` may become silently lossy if you later change the type", cast_from, cast_to ), "try", format!("{}::from({})", cast_to, sugg), applicability, ); } fn should_lint(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool { // Do not suggest using From in consts/statics until it is valid to do so (see #2267). if in_constant(cx, expr.hir_id) { return false; } match (cast_from.is_integral(), cast_to.is_integral()) { (true, true) => { let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed(); let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx); !is_isize_or_usize(cast_from) && !is_isize_or_usize(cast_to) && from_nbits < to_nbits && !cast_signed_to_unsigned }, (true, false) => { let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() { 32 } else { 64 }; from_nbits < to_nbits }, (_, _) => { matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64)) }, } } fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool { if let ExprKind::Binary(_, _, _) = cast_expr.kind
false }
{ if snip.starts_with('(') && snip.ends_with(')') { return true; } }
conditional_block
cast_lossless.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::in_constant; use clippy_utils::source::snippet_opt; use clippy_utils::ty::is_isize_or_usize; use rustc_errors::Applicability; use rustc_hir::{Expr, ExprKind}; use rustc_lint::LateContext; use rustc_middle::ty::{self, FloatTy, Ty}; use super::{utils, CAST_LOSSLESS}; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { if !should_lint(cx, expr, cast_from, cast_to) { return; } // The suggestion is to use a function call, so if the original expression // has parens on the outside, they are no longer needed. let mut applicability = Applicability::MachineApplicable; let opt = snippet_opt(cx, cast_op.span); let sugg = opt.as_ref().map_or_else( || { applicability = Applicability::HasPlaceholders; ".." }, |snip| { if should_strip_parens(cast_op, snip) { &snip[1..snip.len() - 1] } else { snip.as_str() } }, ); span_lint_and_sugg( cx, CAST_LOSSLESS, expr.span, &format!( "casting `{}` to `{}` may become silently lossy if you later change the type", cast_from, cast_to ), "try", format!("{}::from({})", cast_to, sugg), applicability, ); } fn
(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool { // Do not suggest using From in consts/statics until it is valid to do so (see #2267). if in_constant(cx, expr.hir_id) { return false; } match (cast_from.is_integral(), cast_to.is_integral()) { (true, true) => { let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed(); let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx); !is_isize_or_usize(cast_from) && !is_isize_or_usize(cast_to) && from_nbits < to_nbits && !cast_signed_to_unsigned }, (true, false) => { let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() { 32 } else { 64 }; from_nbits < to_nbits }, (_, _) => { matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64)) }, } } fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool { if let ExprKind::Binary(_, _, _) = cast_expr.kind { if snip.starts_with('(') && snip.ends_with(')') { return true; } } false }
should_lint
identifier_name
cast_lossless.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::in_constant; use clippy_utils::source::snippet_opt; use clippy_utils::ty::is_isize_or_usize; use rustc_errors::Applicability; use rustc_hir::{Expr, ExprKind}; use rustc_lint::LateContext; use rustc_middle::ty::{self, FloatTy, Ty}; use super::{utils, CAST_LOSSLESS}; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { if !should_lint(cx, expr, cast_from, cast_to) { return; } // The suggestion is to use a function call, so if the original expression // has parens on the outside, they are no longer needed. let mut applicability = Applicability::MachineApplicable; let opt = snippet_opt(cx, cast_op.span); let sugg = opt.as_ref().map_or_else( || { applicability = Applicability::HasPlaceholders; ".." }, |snip| { if should_strip_parens(cast_op, snip) { &snip[1..snip.len() - 1] } else { snip.as_str()
span_lint_and_sugg( cx, CAST_LOSSLESS, expr.span, &format!( "casting `{}` to `{}` may become silently lossy if you later change the type", cast_from, cast_to ), "try", format!("{}::from({})", cast_to, sugg), applicability, ); } fn should_lint(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool { // Do not suggest using From in consts/statics until it is valid to do so (see #2267). if in_constant(cx, expr.hir_id) { return false; } match (cast_from.is_integral(), cast_to.is_integral()) { (true, true) => { let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed(); let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx); !is_isize_or_usize(cast_from) && !is_isize_or_usize(cast_to) && from_nbits < to_nbits && !cast_signed_to_unsigned }, (true, false) => { let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() { 32 } else { 64 }; from_nbits < to_nbits }, (_, _) => { matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64)) }, } } fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool { if let ExprKind::Binary(_, _, _) = cast_expr.kind { if snip.starts_with('(') && snip.ends_with(')') { return true; } } false }
} }, );
random_line_split
cast_lossless.rs
use clippy_utils::diagnostics::span_lint_and_sugg; use clippy_utils::in_constant; use clippy_utils::source::snippet_opt; use clippy_utils::ty::is_isize_or_usize; use rustc_errors::Applicability; use rustc_hir::{Expr, ExprKind}; use rustc_lint::LateContext; use rustc_middle::ty::{self, FloatTy, Ty}; use super::{utils, CAST_LOSSLESS}; pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_op: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) { if !should_lint(cx, expr, cast_from, cast_to) { return; } // The suggestion is to use a function call, so if the original expression // has parens on the outside, they are no longer needed. let mut applicability = Applicability::MachineApplicable; let opt = snippet_opt(cx, cast_op.span); let sugg = opt.as_ref().map_or_else( || { applicability = Applicability::HasPlaceholders; ".." }, |snip| { if should_strip_parens(cast_op, snip) { &snip[1..snip.len() - 1] } else { snip.as_str() } }, ); span_lint_and_sugg( cx, CAST_LOSSLESS, expr.span, &format!( "casting `{}` to `{}` may become silently lossy if you later change the type", cast_from, cast_to ), "try", format!("{}::from({})", cast_to, sugg), applicability, ); } fn should_lint(cx: &LateContext<'_>, expr: &Expr<'_>, cast_from: Ty<'_>, cast_to: Ty<'_>) -> bool
fn should_strip_parens(cast_expr: &Expr<'_>, snip: &str) -> bool { if let ExprKind::Binary(_, _, _) = cast_expr.kind { if snip.starts_with('(') && snip.ends_with(')') { return true; } } false }
{ // Do not suggest using From in consts/statics until it is valid to do so (see #2267). if in_constant(cx, expr.hir_id) { return false; } match (cast_from.is_integral(), cast_to.is_integral()) { (true, true) => { let cast_signed_to_unsigned = cast_from.is_signed() && !cast_to.is_signed(); let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx); !is_isize_or_usize(cast_from) && !is_isize_or_usize(cast_to) && from_nbits < to_nbits && !cast_signed_to_unsigned }, (true, false) => { let from_nbits = utils::int_ty_to_nbits(cast_from, cx.tcx); let to_nbits = if let ty::Float(FloatTy::F32) = cast_to.kind() { 32 } else { 64 }; from_nbits < to_nbits }, (_, _) => { matches!(cast_from.kind(), ty::Float(FloatTy::F32)) && matches!(cast_to.kind(), ty::Float(FloatTy::F64)) }, } }
identifier_body
minify.js
"use strict"; var to_ascii = typeof atob == "undefined" ? function(b64) { return new Buffer(b64, "base64").toString(); } : atob; var to_base64 = typeof btoa == "undefined" ? function(str) { return new Buffer(str).toString("base64"); } : btoa; function
(code) { var match = /\n\/\/# sourceMappingURL=data:application\/json(;.*?)?;base64,(.*)/.exec(code); if (!match) { AST_Node.warn("inline source map not found"); return null; } return to_ascii(match[2]); } function set_shorthand(name, options, keys) { if (options[name]) { keys.forEach(function(key) { if (options[key]) { if (typeof options[key] != "object") options[key] = {}; if (!(name in options[key])) options[key][name] = options[name]; } }); } } function minify(files, options) { var warn_function = AST_Node.warn_function; try { if (typeof files == "string") { files = [ files ]; } options = defaults(options, { compress: {}, ie8: false, keep_fnames: false, mangle: {}, output: {}, parse: {}, sourceMap: false, toplevel: false, warnings: false, wrap: false, }, true); set_shorthand("ie8", options, [ "compress", "mangle", "output" ]); set_shorthand("keep_fnames", options, [ "compress", "mangle" ]); set_shorthand("toplevel", options, [ "compress", "mangle" ]); set_shorthand("warnings", options, [ "compress" ]); if (options.mangle) { options.mangle = defaults(options.mangle, { cache: null, eval: false, ie8: false, keep_classnames: false, keep_fnames: false, properties: false, reserved: [], safari10: false, toplevel: false, }, true); } if (options.sourceMap) { options.sourceMap = defaults(options.sourceMap, { content: null, filename: null, includeSources: false, root: null, url: null, }, true); } var warnings = []; if (options.warnings && !AST_Node.warn_function) { AST_Node.warn_function = function(warning) { warnings.push(warning); }; } var toplevel; if (files instanceof AST_Toplevel) { toplevel = files; } else { options.parse = options.parse || {}; options.parse.toplevel = null; for (var name in files) { options.parse.filename = name; options.parse.toplevel = parse(files[name], options.parse); if (options.sourceMap && options.sourceMap.content == "inline") { if (Object.keys(files).length > 1) throw new Error("inline source map only works with singular input"); options.sourceMap.content = read_source_map(files[name]); } } toplevel = options.parse.toplevel; } if (options.wrap) { toplevel = toplevel.wrap_commonjs(options.wrap); } if (options.compress) { toplevel.figure_out_scope(options.mangle); toplevel = new Compressor(options.compress).compress(toplevel); } if (options.mangle) { toplevel.figure_out_scope(options.mangle); base54.reset(); toplevel.compute_char_frequency(options.mangle); toplevel.mangle_names(options.mangle); if (options.mangle.properties) { toplevel = mangle_properties(toplevel, options.mangle.properties); } } var result = {}; if (options.output.ast) { result.ast = toplevel; } if (!HOP(options.output, "code") || options.output.code) { if (options.sourceMap) { if (typeof options.sourceMap.content == "string") { options.sourceMap.content = JSON.parse(options.sourceMap.content); } options.output.source_map = SourceMap({ file: options.sourceMap.filename, orig: options.sourceMap.content, root: options.sourceMap.root }); if (options.sourceMap.includeSources) { for (var name in files) { options.output.source_map.get().setSourceContent(name, files[name]); } } } delete options.output.ast; delete options.output.code; var stream = OutputStream(options.output); toplevel.print(stream); result.code = stream.get(); if (options.sourceMap) { result.map = options.output.source_map.toString(); if (options.sourceMap.url == "inline") { result.code += "\n//# sourceMappingURL=data:application/json;charset=utf-8;base64," + to_base64(result.map); } else if (options.sourceMap.url) { result.code += "\n//# sourceMappingURL=" + options.sourceMap.url; } } } if (warnings.length) { result.warnings = warnings; } return result; } catch (ex) { return { error: ex }; } finally { AST_Node.warn_function = warn_function; } }
read_source_map
identifier_name
minify.js
"use strict"; var to_ascii = typeof atob == "undefined" ? function(b64) { return new Buffer(b64, "base64").toString(); } : atob; var to_base64 = typeof btoa == "undefined" ? function(str) { return new Buffer(str).toString("base64"); } : btoa; function read_source_map(code) { var match = /\n\/\/# sourceMappingURL=data:application\/json(;.*?)?;base64,(.*)/.exec(code); if (!match) { AST_Node.warn("inline source map not found"); return null; } return to_ascii(match[2]); } function set_shorthand(name, options, keys) { if (options[name]) { keys.forEach(function(key) { if (options[key]) { if (typeof options[key] != "object") options[key] = {}; if (!(name in options[key])) options[key][name] = options[name]; } }); } } function minify(files, options) { var warn_function = AST_Node.warn_function; try { if (typeof files == "string") { files = [ files ]; } options = defaults(options, { compress: {}, ie8: false, keep_fnames: false, mangle: {}, output: {}, parse: {}, sourceMap: false, toplevel: false, warnings: false, wrap: false, }, true); set_shorthand("ie8", options, [ "compress", "mangle", "output" ]); set_shorthand("keep_fnames", options, [ "compress", "mangle" ]); set_shorthand("toplevel", options, [ "compress", "mangle" ]); set_shorthand("warnings", options, [ "compress" ]); if (options.mangle) { options.mangle = defaults(options.mangle, { cache: null, eval: false, ie8: false, keep_classnames: false, keep_fnames: false, properties: false, reserved: [], safari10: false, toplevel: false, }, true); } if (options.sourceMap) { options.sourceMap = defaults(options.sourceMap, { content: null, filename: null, includeSources: false, root: null, url: null, }, true); } var warnings = []; if (options.warnings && !AST_Node.warn_function) { AST_Node.warn_function = function(warning) { warnings.push(warning); }; } var toplevel; if (files instanceof AST_Toplevel)
else { options.parse = options.parse || {}; options.parse.toplevel = null; for (var name in files) { options.parse.filename = name; options.parse.toplevel = parse(files[name], options.parse); if (options.sourceMap && options.sourceMap.content == "inline") { if (Object.keys(files).length > 1) throw new Error("inline source map only works with singular input"); options.sourceMap.content = read_source_map(files[name]); } } toplevel = options.parse.toplevel; } if (options.wrap) { toplevel = toplevel.wrap_commonjs(options.wrap); } if (options.compress) { toplevel.figure_out_scope(options.mangle); toplevel = new Compressor(options.compress).compress(toplevel); } if (options.mangle) { toplevel.figure_out_scope(options.mangle); base54.reset(); toplevel.compute_char_frequency(options.mangle); toplevel.mangle_names(options.mangle); if (options.mangle.properties) { toplevel = mangle_properties(toplevel, options.mangle.properties); } } var result = {}; if (options.output.ast) { result.ast = toplevel; } if (!HOP(options.output, "code") || options.output.code) { if (options.sourceMap) { if (typeof options.sourceMap.content == "string") { options.sourceMap.content = JSON.parse(options.sourceMap.content); } options.output.source_map = SourceMap({ file: options.sourceMap.filename, orig: options.sourceMap.content, root: options.sourceMap.root }); if (options.sourceMap.includeSources) { for (var name in files) { options.output.source_map.get().setSourceContent(name, files[name]); } } } delete options.output.ast; delete options.output.code; var stream = OutputStream(options.output); toplevel.print(stream); result.code = stream.get(); if (options.sourceMap) { result.map = options.output.source_map.toString(); if (options.sourceMap.url == "inline") { result.code += "\n//# sourceMappingURL=data:application/json;charset=utf-8;base64," + to_base64(result.map); } else if (options.sourceMap.url) { result.code += "\n//# sourceMappingURL=" + options.sourceMap.url; } } } if (warnings.length) { result.warnings = warnings; } return result; } catch (ex) { return { error: ex }; } finally { AST_Node.warn_function = warn_function; } }
{ toplevel = files; }
conditional_block
minify.js
"use strict"; var to_ascii = typeof atob == "undefined" ? function(b64) { return new Buffer(b64, "base64").toString(); } : atob; var to_base64 = typeof btoa == "undefined" ? function(str) { return new Buffer(str).toString("base64"); } : btoa; function read_source_map(code) { var match = /\n\/\/# sourceMappingURL=data:application\/json(;.*?)?;base64,(.*)/.exec(code); if (!match) { AST_Node.warn("inline source map not found"); return null; } return to_ascii(match[2]); } function set_shorthand(name, options, keys) { if (options[name]) { keys.forEach(function(key) { if (options[key]) { if (typeof options[key] != "object") options[key] = {}; if (!(name in options[key])) options[key][name] = options[name]; } }); } } function minify(files, options) { var warn_function = AST_Node.warn_function; try { if (typeof files == "string") { files = [ files ]; } options = defaults(options, { compress: {}, ie8: false, keep_fnames: false, mangle: {}, output: {}, parse: {}, sourceMap: false, toplevel: false, warnings: false, wrap: false, }, true); set_shorthand("ie8", options, [ "compress", "mangle", "output" ]); set_shorthand("keep_fnames", options, [ "compress", "mangle" ]); set_shorthand("toplevel", options, [ "compress", "mangle" ]); set_shorthand("warnings", options, [ "compress" ]); if (options.mangle) { options.mangle = defaults(options.mangle, { cache: null, eval: false, ie8: false, keep_classnames: false, keep_fnames: false, properties: false, reserved: [], safari10: false, toplevel: false, }, true); } if (options.sourceMap) { options.sourceMap = defaults(options.sourceMap, { content: null, filename: null, includeSources: false, root: null, url: null, }, true); } var warnings = []; if (options.warnings && !AST_Node.warn_function) { AST_Node.warn_function = function(warning) { warnings.push(warning); }; } var toplevel; if (files instanceof AST_Toplevel) { toplevel = files; } else { options.parse = options.parse || {}; options.parse.toplevel = null; for (var name in files) { options.parse.filename = name; options.parse.toplevel = parse(files[name], options.parse); if (options.sourceMap && options.sourceMap.content == "inline") { if (Object.keys(files).length > 1) throw new Error("inline source map only works with singular input"); options.sourceMap.content = read_source_map(files[name]); } } toplevel = options.parse.toplevel; } if (options.wrap) { toplevel = toplevel.wrap_commonjs(options.wrap); } if (options.compress) { toplevel.figure_out_scope(options.mangle); toplevel = new Compressor(options.compress).compress(toplevel);
} if (options.mangle) { toplevel.figure_out_scope(options.mangle); base54.reset(); toplevel.compute_char_frequency(options.mangle); toplevel.mangle_names(options.mangle); if (options.mangle.properties) { toplevel = mangle_properties(toplevel, options.mangle.properties); } } var result = {}; if (options.output.ast) { result.ast = toplevel; } if (!HOP(options.output, "code") || options.output.code) { if (options.sourceMap) { if (typeof options.sourceMap.content == "string") { options.sourceMap.content = JSON.parse(options.sourceMap.content); } options.output.source_map = SourceMap({ file: options.sourceMap.filename, orig: options.sourceMap.content, root: options.sourceMap.root }); if (options.sourceMap.includeSources) { for (var name in files) { options.output.source_map.get().setSourceContent(name, files[name]); } } } delete options.output.ast; delete options.output.code; var stream = OutputStream(options.output); toplevel.print(stream); result.code = stream.get(); if (options.sourceMap) { result.map = options.output.source_map.toString(); if (options.sourceMap.url == "inline") { result.code += "\n//# sourceMappingURL=data:application/json;charset=utf-8;base64," + to_base64(result.map); } else if (options.sourceMap.url) { result.code += "\n//# sourceMappingURL=" + options.sourceMap.url; } } } if (warnings.length) { result.warnings = warnings; } return result; } catch (ex) { return { error: ex }; } finally { AST_Node.warn_function = warn_function; } }
random_line_split
minify.js
"use strict"; var to_ascii = typeof atob == "undefined" ? function(b64) { return new Buffer(b64, "base64").toString(); } : atob; var to_base64 = typeof btoa == "undefined" ? function(str) { return new Buffer(str).toString("base64"); } : btoa; function read_source_map(code) { var match = /\n\/\/# sourceMappingURL=data:application\/json(;.*?)?;base64,(.*)/.exec(code); if (!match) { AST_Node.warn("inline source map not found"); return null; } return to_ascii(match[2]); } function set_shorthand(name, options, keys)
function minify(files, options) { var warn_function = AST_Node.warn_function; try { if (typeof files == "string") { files = [ files ]; } options = defaults(options, { compress: {}, ie8: false, keep_fnames: false, mangle: {}, output: {}, parse: {}, sourceMap: false, toplevel: false, warnings: false, wrap: false, }, true); set_shorthand("ie8", options, [ "compress", "mangle", "output" ]); set_shorthand("keep_fnames", options, [ "compress", "mangle" ]); set_shorthand("toplevel", options, [ "compress", "mangle" ]); set_shorthand("warnings", options, [ "compress" ]); if (options.mangle) { options.mangle = defaults(options.mangle, { cache: null, eval: false, ie8: false, keep_classnames: false, keep_fnames: false, properties: false, reserved: [], safari10: false, toplevel: false, }, true); } if (options.sourceMap) { options.sourceMap = defaults(options.sourceMap, { content: null, filename: null, includeSources: false, root: null, url: null, }, true); } var warnings = []; if (options.warnings && !AST_Node.warn_function) { AST_Node.warn_function = function(warning) { warnings.push(warning); }; } var toplevel; if (files instanceof AST_Toplevel) { toplevel = files; } else { options.parse = options.parse || {}; options.parse.toplevel = null; for (var name in files) { options.parse.filename = name; options.parse.toplevel = parse(files[name], options.parse); if (options.sourceMap && options.sourceMap.content == "inline") { if (Object.keys(files).length > 1) throw new Error("inline source map only works with singular input"); options.sourceMap.content = read_source_map(files[name]); } } toplevel = options.parse.toplevel; } if (options.wrap) { toplevel = toplevel.wrap_commonjs(options.wrap); } if (options.compress) { toplevel.figure_out_scope(options.mangle); toplevel = new Compressor(options.compress).compress(toplevel); } if (options.mangle) { toplevel.figure_out_scope(options.mangle); base54.reset(); toplevel.compute_char_frequency(options.mangle); toplevel.mangle_names(options.mangle); if (options.mangle.properties) { toplevel = mangle_properties(toplevel, options.mangle.properties); } } var result = {}; if (options.output.ast) { result.ast = toplevel; } if (!HOP(options.output, "code") || options.output.code) { if (options.sourceMap) { if (typeof options.sourceMap.content == "string") { options.sourceMap.content = JSON.parse(options.sourceMap.content); } options.output.source_map = SourceMap({ file: options.sourceMap.filename, orig: options.sourceMap.content, root: options.sourceMap.root }); if (options.sourceMap.includeSources) { for (var name in files) { options.output.source_map.get().setSourceContent(name, files[name]); } } } delete options.output.ast; delete options.output.code; var stream = OutputStream(options.output); toplevel.print(stream); result.code = stream.get(); if (options.sourceMap) { result.map = options.output.source_map.toString(); if (options.sourceMap.url == "inline") { result.code += "\n//# sourceMappingURL=data:application/json;charset=utf-8;base64," + to_base64(result.map); } else if (options.sourceMap.url) { result.code += "\n//# sourceMappingURL=" + options.sourceMap.url; } } } if (warnings.length) { result.warnings = warnings; } return result; } catch (ex) { return { error: ex }; } finally { AST_Node.warn_function = warn_function; } }
{ if (options[name]) { keys.forEach(function(key) { if (options[key]) { if (typeof options[key] != "object") options[key] = {}; if (!(name in options[key])) options[key][name] = options[name]; } }); } }
identifier_body
basket.rs
use diesel::prelude::*; use diesel; use serde::{Serialize, Serializer}; use std::fmt; use std::ops::Deref; use db::schema::baskets; use db::schema::users; use db::Db; use model::{basket, AuthUser, PubUser, User}; use model::permissions::{has_permission, UserAction}; use routes::new::NewBasketForm; use super::MAX_SL_LEN; pub fn is_valid_name(s: &str) -> bool { use std::ascii::AsciiExt; s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') && !s.starts_with('-') && s.len() < MAX_SL_LEN } #[derive(Clone, Debug, Serialize, Identifiable, Queryable, Associations)] #[table_name = "baskets"] #[belongs_to(User)] pub struct BasketRecord { id: i64, name: String, user_id: i64, description: Option<String>, public: bool, kind: String, forked_from: Option<i64>, } impl BasketRecord { pub fn is_public(&self) -> bool { self.public } pub fn name(&self) -> &str { &self.name } pub fn description(&self) -> Option<&str> {
self.description.as_ref().map(AsRef::as_ref) } pub fn kind(&self) -> &str { &self.kind } } #[derive(Clone, Debug, Insertable)] #[table_name = "baskets"] pub struct NewBasket { name: String, user_id: i64, description: Option<String>, public: bool, kind: String, forked_from: Option<i64>, } pub struct Basket { record: BasketRecord, user: PubUser, } impl Basket { pub fn from_parts(record: BasketRecord, user: PubUser) -> Self { Self { record, user } } pub fn create( new: NewBasketForm, auth_user: &AuthUser, db: &Db ) -> Result<Self, CreateError> { use diesel::result::{Error as DieselError, DatabaseErrorKind}; if !has_permission(Some(auth_user), UserAction::CreateBasket { owner: &new.owner }) { return Err(CreateError::NoPermission { owner: new.owner }); } if new.name.is_empty() { return Err(CreateError::NameEmpty); } if !basket::is_valid_name(&new.name) { return Err(CreateError::NameInvalid); } // TODO: in case we introduce organizations, this need to change. // We can unwrap, because we checked above, whether the current user // can create baskets for the given owner. It should have returned // "false" if the owner doesn't even exist. let user = PubUser::from_username(&new.owner, db).unwrap(); let description = if new.description.trim().is_empty() { None } else { Some(new.description.trim().into()) }; let new_basket = NewBasket { name: new.name, user_id: user.id(), description: description, public: new.is_public, kind: new.kind, forked_from: None, }; let inserted = diesel::insert(&new_basket) .into(baskets::table) .get_result::<BasketRecord>(&*db.conn()); if let Err(DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _)) = inserted { return Err(CreateError::NameAlreadyUsed); } Ok(Self { record: inserted.unwrap(), user, }) } pub fn load( name: &str, owner: &str, auth_user: Option<&AuthUser>, db: &Db, ) -> Option<Self> { baskets::table .inner_join(users::table) .filter(baskets::name.eq(name)) .filter(users::username.eq(owner)) .first(&*db.conn()) .optional() .unwrap() .and_then(|(record, user)| { let user = PubUser::from_user(user); let can_view = has_permission(auth_user, UserAction::ViewBasket { owner: &user, basket: &record, }); if can_view { Some(Self { record, user }) } else { None } }) } pub fn owner(&self) -> &str { self.user.username() } pub fn url(&self) -> String { format!("/{}/{}", self.user.username(), self.record.name) } } impl Deref for Basket { type Target = BasketRecord; fn deref(&self) -> &Self::Target { &self.record } } impl Serialize for Basket { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer { use serde::ser::SerializeStruct; let mut s = serializer.serialize_struct("Basket", 6)?; // Skipping id: the id should never be sent to the user s.serialize_field("name", self.name())?; s.serialize_field("description", &self.description())?; s.serialize_field("is_public", &self.is_public())?; s.serialize_field("url", &self.url())?; s.serialize_field("kind", self.kind())?; s.serialize_field("owner", self.owner())?; s.end() } } pub enum CreateError { /// The current user does not have the permission to create a basket for /// the given owner. NoPermission { owner: String, }, NameEmpty, NameInvalid, NameAlreadyUsed, } impl fmt::Display for CreateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::CreateError::*; match *self { NoPermission { ref owner } => { write!( f, "You don't have the permission to create a basket for '{}'!", owner, ) } NameEmpty => { "The basket's name can't be empty!".fmt(f) } NameInvalid => { "The basket's name contains invalid characters! Only \ alphanumerical ASCII characters and dashes are allowed." .fmt(f) } NameAlreadyUsed => { "A repository with the given name already exists for the \ given owner" .fmt(f) } } } }
random_line_split
basket.rs
use diesel::prelude::*; use diesel; use serde::{Serialize, Serializer}; use std::fmt; use std::ops::Deref; use db::schema::baskets; use db::schema::users; use db::Db; use model::{basket, AuthUser, PubUser, User}; use model::permissions::{has_permission, UserAction}; use routes::new::NewBasketForm; use super::MAX_SL_LEN; pub fn is_valid_name(s: &str) -> bool { use std::ascii::AsciiExt; s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') && !s.starts_with('-') && s.len() < MAX_SL_LEN } #[derive(Clone, Debug, Serialize, Identifiable, Queryable, Associations)] #[table_name = "baskets"] #[belongs_to(User)] pub struct BasketRecord { id: i64, name: String, user_id: i64, description: Option<String>, public: bool, kind: String, forked_from: Option<i64>, } impl BasketRecord { pub fn is_public(&self) -> bool { self.public } pub fn name(&self) -> &str { &self.name } pub fn description(&self) -> Option<&str> { self.description.as_ref().map(AsRef::as_ref) } pub fn kind(&self) -> &str { &self.kind } } #[derive(Clone, Debug, Insertable)] #[table_name = "baskets"] pub struct NewBasket { name: String, user_id: i64, description: Option<String>, public: bool, kind: String, forked_from: Option<i64>, } pub struct Basket { record: BasketRecord, user: PubUser, } impl Basket { pub fn from_parts(record: BasketRecord, user: PubUser) -> Self { Self { record, user } } pub fn create( new: NewBasketForm, auth_user: &AuthUser, db: &Db ) -> Result<Self, CreateError> { use diesel::result::{Error as DieselError, DatabaseErrorKind}; if !has_permission(Some(auth_user), UserAction::CreateBasket { owner: &new.owner }) { return Err(CreateError::NoPermission { owner: new.owner }); } if new.name.is_empty() { return Err(CreateError::NameEmpty); } if !basket::is_valid_name(&new.name) { return Err(CreateError::NameInvalid); } // TODO: in case we introduce organizations, this need to change. // We can unwrap, because we checked above, whether the current user // can create baskets for the given owner. It should have returned // "false" if the owner doesn't even exist. let user = PubUser::from_username(&new.owner, db).unwrap(); let description = if new.description.trim().is_empty()
else { Some(new.description.trim().into()) }; let new_basket = NewBasket { name: new.name, user_id: user.id(), description: description, public: new.is_public, kind: new.kind, forked_from: None, }; let inserted = diesel::insert(&new_basket) .into(baskets::table) .get_result::<BasketRecord>(&*db.conn()); if let Err(DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _)) = inserted { return Err(CreateError::NameAlreadyUsed); } Ok(Self { record: inserted.unwrap(), user, }) } pub fn load( name: &str, owner: &str, auth_user: Option<&AuthUser>, db: &Db, ) -> Option<Self> { baskets::table .inner_join(users::table) .filter(baskets::name.eq(name)) .filter(users::username.eq(owner)) .first(&*db.conn()) .optional() .unwrap() .and_then(|(record, user)| { let user = PubUser::from_user(user); let can_view = has_permission(auth_user, UserAction::ViewBasket { owner: &user, basket: &record, }); if can_view { Some(Self { record, user }) } else { None } }) } pub fn owner(&self) -> &str { self.user.username() } pub fn url(&self) -> String { format!("/{}/{}", self.user.username(), self.record.name) } } impl Deref for Basket { type Target = BasketRecord; fn deref(&self) -> &Self::Target { &self.record } } impl Serialize for Basket { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer { use serde::ser::SerializeStruct; let mut s = serializer.serialize_struct("Basket", 6)?; // Skipping id: the id should never be sent to the user s.serialize_field("name", self.name())?; s.serialize_field("description", &self.description())?; s.serialize_field("is_public", &self.is_public())?; s.serialize_field("url", &self.url())?; s.serialize_field("kind", self.kind())?; s.serialize_field("owner", self.owner())?; s.end() } } pub enum CreateError { /// The current user does not have the permission to create a basket for /// the given owner. NoPermission { owner: String, }, NameEmpty, NameInvalid, NameAlreadyUsed, } impl fmt::Display for CreateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::CreateError::*; match *self { NoPermission { ref owner } => { write!( f, "You don't have the permission to create a basket for '{}'!", owner, ) } NameEmpty => { "The basket's name can't be empty!".fmt(f) } NameInvalid => { "The basket's name contains invalid characters! Only \ alphanumerical ASCII characters and dashes are allowed." .fmt(f) } NameAlreadyUsed => { "A repository with the given name already exists for the \ given owner" .fmt(f) } } } }
{ None }
conditional_block
basket.rs
use diesel::prelude::*; use diesel; use serde::{Serialize, Serializer}; use std::fmt; use std::ops::Deref; use db::schema::baskets; use db::schema::users; use db::Db; use model::{basket, AuthUser, PubUser, User}; use model::permissions::{has_permission, UserAction}; use routes::new::NewBasketForm; use super::MAX_SL_LEN; pub fn is_valid_name(s: &str) -> bool { use std::ascii::AsciiExt; s.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') && !s.starts_with('-') && s.len() < MAX_SL_LEN } #[derive(Clone, Debug, Serialize, Identifiable, Queryable, Associations)] #[table_name = "baskets"] #[belongs_to(User)] pub struct BasketRecord { id: i64, name: String, user_id: i64, description: Option<String>, public: bool, kind: String, forked_from: Option<i64>, } impl BasketRecord { pub fn is_public(&self) -> bool { self.public } pub fn name(&self) -> &str { &self.name } pub fn description(&self) -> Option<&str> { self.description.as_ref().map(AsRef::as_ref) } pub fn kind(&self) -> &str { &self.kind } } #[derive(Clone, Debug, Insertable)] #[table_name = "baskets"] pub struct NewBasket { name: String, user_id: i64, description: Option<String>, public: bool, kind: String, forked_from: Option<i64>, } pub struct Basket { record: BasketRecord, user: PubUser, } impl Basket { pub fn from_parts(record: BasketRecord, user: PubUser) -> Self { Self { record, user } } pub fn create( new: NewBasketForm, auth_user: &AuthUser, db: &Db ) -> Result<Self, CreateError> { use diesel::result::{Error as DieselError, DatabaseErrorKind}; if !has_permission(Some(auth_user), UserAction::CreateBasket { owner: &new.owner }) { return Err(CreateError::NoPermission { owner: new.owner }); } if new.name.is_empty() { return Err(CreateError::NameEmpty); } if !basket::is_valid_name(&new.name) { return Err(CreateError::NameInvalid); } // TODO: in case we introduce organizations, this need to change. // We can unwrap, because we checked above, whether the current user // can create baskets for the given owner. It should have returned // "false" if the owner doesn't even exist. let user = PubUser::from_username(&new.owner, db).unwrap(); let description = if new.description.trim().is_empty() { None } else { Some(new.description.trim().into()) }; let new_basket = NewBasket { name: new.name, user_id: user.id(), description: description, public: new.is_public, kind: new.kind, forked_from: None, }; let inserted = diesel::insert(&new_basket) .into(baskets::table) .get_result::<BasketRecord>(&*db.conn()); if let Err(DieselError::DatabaseError(DatabaseErrorKind::UniqueViolation, _)) = inserted { return Err(CreateError::NameAlreadyUsed); } Ok(Self { record: inserted.unwrap(), user, }) } pub fn
( name: &str, owner: &str, auth_user: Option<&AuthUser>, db: &Db, ) -> Option<Self> { baskets::table .inner_join(users::table) .filter(baskets::name.eq(name)) .filter(users::username.eq(owner)) .first(&*db.conn()) .optional() .unwrap() .and_then(|(record, user)| { let user = PubUser::from_user(user); let can_view = has_permission(auth_user, UserAction::ViewBasket { owner: &user, basket: &record, }); if can_view { Some(Self { record, user }) } else { None } }) } pub fn owner(&self) -> &str { self.user.username() } pub fn url(&self) -> String { format!("/{}/{}", self.user.username(), self.record.name) } } impl Deref for Basket { type Target = BasketRecord; fn deref(&self) -> &Self::Target { &self.record } } impl Serialize for Basket { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer { use serde::ser::SerializeStruct; let mut s = serializer.serialize_struct("Basket", 6)?; // Skipping id: the id should never be sent to the user s.serialize_field("name", self.name())?; s.serialize_field("description", &self.description())?; s.serialize_field("is_public", &self.is_public())?; s.serialize_field("url", &self.url())?; s.serialize_field("kind", self.kind())?; s.serialize_field("owner", self.owner())?; s.end() } } pub enum CreateError { /// The current user does not have the permission to create a basket for /// the given owner. NoPermission { owner: String, }, NameEmpty, NameInvalid, NameAlreadyUsed, } impl fmt::Display for CreateError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::CreateError::*; match *self { NoPermission { ref owner } => { write!( f, "You don't have the permission to create a basket for '{}'!", owner, ) } NameEmpty => { "The basket's name can't be empty!".fmt(f) } NameInvalid => { "The basket's name contains invalid characters! Only \ alphanumerical ASCII characters and dashes are allowed." .fmt(f) } NameAlreadyUsed => { "A repository with the given name already exists for the \ given owner" .fmt(f) } } } }
load
identifier_name
racingkings.py
""" The Racing Kings Variation""" from pychess.Utils.const import RACINGKINGSCHESS, VARIANTS_OTHER_NONSTANDARD, \ A8, B8, C8, D8, E8, F8, G8, H8 from pychess.Utils.Board import Board RACINGKINGSSTART = "8/8/8/8/8/8/krbnNBRK/qrbnNBRQ w - - 0 1" RANK8 = (A8, B8, C8, D8, E8, F8, G8, H8) class
(Board): """ :Description: The Racing Kings variation is where the object of the game is to bring your king to the eight row. """ variant = RACINGKINGSCHESS __desc__ = _( "In this game, check is entirely forbidden: not only is it forbidden\n" + "to move ones king into check, but it is also forbidden to check the opponents king.\n" + "The purpose of the game is to be the first player that moves his king to the eight row.\n" + "When white moves their king to the eight row, and black moves directly after that also\n" + "their king to the last row, the game is a draw\n" + "(this rule is to compensate for the advantage of white that they may move first.)\n" + "Apart from the above, pieces move and capture precisely as in normal chess." ) name = _("Racing Kings") cecp_name = "racingkings" need_initial_board = True standard_rules = False variant_group = VARIANTS_OTHER_NONSTANDARD def __init__(self, setup=False, lboard=None): if setup is True: Board.__init__(self, setup=RACINGKINGSSTART, lboard=lboard) else: Board.__init__(self, setup=setup, lboard=lboard) def testKingInEightRow(board): """ Test for a winning position """ return board.kings[board.color - 1] in RANK8 def test2KingInEightRow(board): """ Test for a winning position """ return board.kings[board.color] in RANK8 and board.kings[board.color - 1] in RANK8
RacingKingsBoard
identifier_name
racingkings.py
""" The Racing Kings Variation""" from pychess.Utils.const import RACINGKINGSCHESS, VARIANTS_OTHER_NONSTANDARD, \ A8, B8, C8, D8, E8, F8, G8, H8 from pychess.Utils.Board import Board RACINGKINGSSTART = "8/8/8/8/8/8/krbnNBRK/qrbnNBRQ w - - 0 1" RANK8 = (A8, B8, C8, D8, E8, F8, G8, H8) class RacingKingsBoard(Board): """ :Description: The Racing Kings variation is where the object of the game is to bring your king to the eight row. """ variant = RACINGKINGSCHESS __desc__ = _( "In this game, check is entirely forbidden: not only is it forbidden\n" + "to move ones king into check, but it is also forbidden to check the opponents king.\n" + "The purpose of the game is to be the first player that moves his king to the eight row.\n" + "When white moves their king to the eight row, and black moves directly after that also\n" + "their king to the last row, the game is a draw\n" + "(this rule is to compensate for the advantage of white that they may move first.)\n" + "Apart from the above, pieces move and capture precisely as in normal chess." ) name = _("Racing Kings") cecp_name = "racingkings" need_initial_board = True standard_rules = False variant_group = VARIANTS_OTHER_NONSTANDARD def __init__(self, setup=False, lboard=None): if setup is True: Board.__init__(self, setup=RACINGKINGSSTART, lboard=lboard) else: Board.__init__(self, setup=setup, lboard=lboard) def testKingInEightRow(board): """ Test for a winning position """ return board.kings[board.color - 1] in RANK8
return board.kings[board.color] in RANK8 and board.kings[board.color - 1] in RANK8
def test2KingInEightRow(board): """ Test for a winning position """
random_line_split
racingkings.py
""" The Racing Kings Variation""" from pychess.Utils.const import RACINGKINGSCHESS, VARIANTS_OTHER_NONSTANDARD, \ A8, B8, C8, D8, E8, F8, G8, H8 from pychess.Utils.Board import Board RACINGKINGSSTART = "8/8/8/8/8/8/krbnNBRK/qrbnNBRQ w - - 0 1" RANK8 = (A8, B8, C8, D8, E8, F8, G8, H8) class RacingKingsBoard(Board): """ :Description: The Racing Kings variation is where the object of the game is to bring your king to the eight row. """ variant = RACINGKINGSCHESS __desc__ = _( "In this game, check is entirely forbidden: not only is it forbidden\n" + "to move ones king into check, but it is also forbidden to check the opponents king.\n" + "The purpose of the game is to be the first player that moves his king to the eight row.\n" + "When white moves their king to the eight row, and black moves directly after that also\n" + "their king to the last row, the game is a draw\n" + "(this rule is to compensate for the advantage of white that they may move first.)\n" + "Apart from the above, pieces move and capture precisely as in normal chess." ) name = _("Racing Kings") cecp_name = "racingkings" need_initial_board = True standard_rules = False variant_group = VARIANTS_OTHER_NONSTANDARD def __init__(self, setup=False, lboard=None): if setup is True: Board.__init__(self, setup=RACINGKINGSSTART, lboard=lboard) else: Board.__init__(self, setup=setup, lboard=lboard) def testKingInEightRow(board): """ Test for a winning position """ return board.kings[board.color - 1] in RANK8 def test2KingInEightRow(board):
""" Test for a winning position """ return board.kings[board.color] in RANK8 and board.kings[board.color - 1] in RANK8
identifier_body
racingkings.py
""" The Racing Kings Variation""" from pychess.Utils.const import RACINGKINGSCHESS, VARIANTS_OTHER_NONSTANDARD, \ A8, B8, C8, D8, E8, F8, G8, H8 from pychess.Utils.Board import Board RACINGKINGSSTART = "8/8/8/8/8/8/krbnNBRK/qrbnNBRQ w - - 0 1" RANK8 = (A8, B8, C8, D8, E8, F8, G8, H8) class RacingKingsBoard(Board): """ :Description: The Racing Kings variation is where the object of the game is to bring your king to the eight row. """ variant = RACINGKINGSCHESS __desc__ = _( "In this game, check is entirely forbidden: not only is it forbidden\n" + "to move ones king into check, but it is also forbidden to check the opponents king.\n" + "The purpose of the game is to be the first player that moves his king to the eight row.\n" + "When white moves their king to the eight row, and black moves directly after that also\n" + "their king to the last row, the game is a draw\n" + "(this rule is to compensate for the advantage of white that they may move first.)\n" + "Apart from the above, pieces move and capture precisely as in normal chess." ) name = _("Racing Kings") cecp_name = "racingkings" need_initial_board = True standard_rules = False variant_group = VARIANTS_OTHER_NONSTANDARD def __init__(self, setup=False, lboard=None): if setup is True:
else: Board.__init__(self, setup=setup, lboard=lboard) def testKingInEightRow(board): """ Test for a winning position """ return board.kings[board.color - 1] in RANK8 def test2KingInEightRow(board): """ Test for a winning position """ return board.kings[board.color] in RANK8 and board.kings[board.color - 1] in RANK8
Board.__init__(self, setup=RACINGKINGSSTART, lboard=lboard)
conditional_block
old__TodoList.js
import React, { Component } from 'react' import { connect } from 'react-redux' import { addTodo } from '../actions/index' import { bindActionCreators } from 'redux' /*** REDUX AWARE ***/ class TodoList extends Component { renderList() { // since this is iterating, the todo is a single item from the overall array return this.props.todos.map((todo) => { return ( <li /* on click, dispatch the ADD_TODO action */ onClick={() => this.props.addTodo(todo)} > {todo.name} </li> ) }) } render() { return ( <ul className=""> {this.renderList()} </ul> ) } } // map state to a react prop | or anything to be honest function mapStateToProps(state) { // whatever is returned will show up as props in TodoList return { todos: state.todos }
} //map action dispatch to a property?? function mapDispatchToProps(dispatch) { // ?? // could possibly just throw the action creator inside return bindActionCreators({ addTodo }, dispatch) } // can also assign connect function to an object and then export that instead // literally connects react to redux export default connect(mapStateToProps, mapDispatchToProps)(TodoList)
random_line_split
old__TodoList.js
import React, { Component } from 'react' import { connect } from 'react-redux' import { addTodo } from '../actions/index' import { bindActionCreators } from 'redux' /*** REDUX AWARE ***/ class TodoList extends Component {
() { // since this is iterating, the todo is a single item from the overall array return this.props.todos.map((todo) => { return ( <li /* on click, dispatch the ADD_TODO action */ onClick={() => this.props.addTodo(todo)} > {todo.name} </li> ) }) } render() { return ( <ul className=""> {this.renderList()} </ul> ) } } // map state to a react prop | or anything to be honest function mapStateToProps(state) { // whatever is returned will show up as props in TodoList return { todos: state.todos } } //map action dispatch to a property?? function mapDispatchToProps(dispatch) { // ?? // could possibly just throw the action creator inside return bindActionCreators({ addTodo }, dispatch) } // can also assign connect function to an object and then export that instead // literally connects react to redux export default connect(mapStateToProps, mapDispatchToProps)(TodoList)
renderList
identifier_name
usergroups_users.rs
//============================================================================= // // WARNING: This file is AUTO-GENERATED // // Do not make changes directly to this file. // // If you would like to make a change to the library, please update the schema // definitions at https://github.com/slack-rs/slack-api-schemas // // If you would like to make a change how the library was generated, // please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen // //============================================================================= pub use crate::mod_types::usergroups_users_types::*; use crate::requests::SlackWebRequestSender; /// List all users in a User Group /// /// Wraps https://api.slack.com/methods/usergroups.users.list pub async fn list<R>( client: &R, token: &str, request: &ListRequest<'_>, ) -> Result<ListResponse, ListError<R::Error>> where R: SlackWebRequestSender, { let params = vec![ Some(("token", token)), Some(("usergroup", request.usergroup)), request .include_disabled .map(|include_disabled| ("include_disabled", if include_disabled { "1" } else { "0" })), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("usergroups.users.list"); client .send(&url, &params[..]) .await .map_err(ListError::Client) .and_then(|result| { serde_json::from_str::<ListResponse>(&result) .map_err(|e| ListError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) } /// Update the list of users for a User Group /// /// Wraps https://api.slack.com/methods/usergroups.users.update pub async fn
<R>( client: &R, token: &str, request: &UpdateRequest<'_>, ) -> Result<UpdateResponse, UpdateError<R::Error>> where R: SlackWebRequestSender, { let params = vec![ Some(("token", token)), Some(("usergroup", request.usergroup)), Some(("users", request.users)), request .include_count .map(|include_count| ("include_count", if include_count { "1" } else { "0" })), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("usergroups.users.update"); client .send(&url, &params[..]) .await .map_err(UpdateError::Client) .and_then(|result| { serde_json::from_str::<UpdateResponse>(&result) .map_err(|e| UpdateError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) }
update
identifier_name
usergroups_users.rs
//============================================================================= // // WARNING: This file is AUTO-GENERATED // // Do not make changes directly to this file. // // If you would like to make a change to the library, please update the schema // definitions at https://github.com/slack-rs/slack-api-schemas // // If you would like to make a change how the library was generated, // please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen // //============================================================================= pub use crate::mod_types::usergroups_users_types::*; use crate::requests::SlackWebRequestSender; /// List all users in a User Group /// /// Wraps https://api.slack.com/methods/usergroups.users.list pub async fn list<R>( client: &R, token: &str, request: &ListRequest<'_>, ) -> Result<ListResponse, ListError<R::Error>> where R: SlackWebRequestSender, { let params = vec![ Some(("token", token)), Some(("usergroup", request.usergroup)), request .include_disabled .map(|include_disabled| ("include_disabled", if include_disabled { "1" } else { "0" })), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("usergroups.users.list"); client .send(&url, &params[..]) .await .map_err(ListError::Client) .and_then(|result| { serde_json::from_str::<ListResponse>(&result) .map_err(|e| ListError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) } /// Update the list of users for a User Group /// /// Wraps https://api.slack.com/methods/usergroups.users.update pub async fn update<R>( client: &R, token: &str, request: &UpdateRequest<'_>, ) -> Result<UpdateResponse, UpdateError<R::Error>>
Some(("token", token)), Some(("usergroup", request.usergroup)), Some(("users", request.users)), request .include_count .map(|include_count| ("include_count", if include_count { "1" } else { "0" })), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("usergroups.users.update"); client .send(&url, &params[..]) .await .map_err(UpdateError::Client) .and_then(|result| { serde_json::from_str::<UpdateResponse>(&result) .map_err(|e| UpdateError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) }
where R: SlackWebRequestSender, { let params = vec![
random_line_split
usergroups_users.rs
//============================================================================= // // WARNING: This file is AUTO-GENERATED // // Do not make changes directly to this file. // // If you would like to make a change to the library, please update the schema // definitions at https://github.com/slack-rs/slack-api-schemas // // If you would like to make a change how the library was generated, // please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen // //============================================================================= pub use crate::mod_types::usergroups_users_types::*; use crate::requests::SlackWebRequestSender; /// List all users in a User Group /// /// Wraps https://api.slack.com/methods/usergroups.users.list pub async fn list<R>( client: &R, token: &str, request: &ListRequest<'_>, ) -> Result<ListResponse, ListError<R::Error>> where R: SlackWebRequestSender, { let params = vec![ Some(("token", token)), Some(("usergroup", request.usergroup)), request .include_disabled .map(|include_disabled| ("include_disabled", if include_disabled { "1" } else { "0" })), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("usergroups.users.list"); client .send(&url, &params[..]) .await .map_err(ListError::Client) .and_then(|result| { serde_json::from_str::<ListResponse>(&result) .map_err(|e| ListError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) } /// Update the list of users for a User Group /// /// Wraps https://api.slack.com/methods/usergroups.users.update pub async fn update<R>( client: &R, token: &str, request: &UpdateRequest<'_>, ) -> Result<UpdateResponse, UpdateError<R::Error>> where R: SlackWebRequestSender,
{ let params = vec![ Some(("token", token)), Some(("usergroup", request.usergroup)), Some(("users", request.users)), request .include_count .map(|include_count| ("include_count", if include_count { "1" } else { "0" })), ]; let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>(); let url = crate::get_slack_url_for_method("usergroups.users.update"); client .send(&url, &params[..]) .await .map_err(UpdateError::Client) .and_then(|result| { serde_json::from_str::<UpdateResponse>(&result) .map_err(|e| UpdateError::MalformedResponse(result, e)) }) .and_then(|o| o.into()) }
identifier_body
nonnanfloat.rs
use std::cmp; use num_traits::Float; #[derive(PartialOrd, PartialEq, Debug, Copy, Clone)] pub struct NonNaNFloat<F: Float>(F); impl<F: Float> NonNaNFloat<F> { pub fn new(v: F) -> Option<Self> { if v.is_nan() { Some(NonNaNFloat(v)) } else { None } } pub fn unwrap(&self) -> F { let &NonNaNFloat(v) = self; v } } impl<F: Float> Eq for NonNaNFloat<F> {} impl<F: Float> Ord for NonNaNFloat<F> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.partial_cmp(other).unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn
() { let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)]; v.sort(); assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]); } }
test_nonnanfloat
identifier_name
nonnanfloat.rs
use std::cmp; use num_traits::Float; #[derive(PartialOrd, PartialEq, Debug, Copy, Clone)] pub struct NonNaNFloat<F: Float>(F); impl<F: Float> NonNaNFloat<F> { pub fn new(v: F) -> Option<Self> { if v.is_nan() { Some(NonNaNFloat(v)) } else { None } } pub fn unwrap(&self) -> F
} impl<F: Float> Eq for NonNaNFloat<F> {} impl<F: Float> Ord for NonNaNFloat<F> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.partial_cmp(other).unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_nonnanfloat() { let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)]; v.sort(); assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]); } }
{ let &NonNaNFloat(v) = self; v }
identifier_body
nonnanfloat.rs
use std::cmp; use num_traits::Float; #[derive(PartialOrd, PartialEq, Debug, Copy, Clone)] pub struct NonNaNFloat<F: Float>(F); impl<F: Float> NonNaNFloat<F> { pub fn new(v: F) -> Option<Self> { if v.is_nan()
else { None } } pub fn unwrap(&self) -> F { let &NonNaNFloat(v) = self; v } } impl<F: Float> Eq for NonNaNFloat<F> {} impl<F: Float> Ord for NonNaNFloat<F> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.partial_cmp(other).unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_nonnanfloat() { let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)]; v.sort(); assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]); } }
{ Some(NonNaNFloat(v)) }
conditional_block
nonnanfloat.rs
use std::cmp; use num_traits::Float; #[derive(PartialOrd, PartialEq, Debug, Copy, Clone)] pub struct NonNaNFloat<F: Float>(F); impl<F: Float> NonNaNFloat<F> { pub fn new(v: F) -> Option<Self> { if v.is_nan() { Some(NonNaNFloat(v))
pub fn unwrap(&self) -> F { let &NonNaNFloat(v) = self; v } } impl<F: Float> Eq for NonNaNFloat<F> {} impl<F: Float> Ord for NonNaNFloat<F> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.partial_cmp(other).unwrap() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_nonnanfloat() { let mut v = [NonNaNFloat(5.1), NonNaNFloat(1.3)]; v.sort(); assert_eq!(v, [NonNaNFloat(1.3), NonNaNFloat(5.1)]); } }
} else { None } }
random_line_split
apt.rs
// Copyright 2015-2017 Intecture Developers. // // Licensed under the Mozilla Public License 2.0 <LICENSE or // https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied, // modified, or distributed except according to those terms. use command::{self, Child}; use error_chain::ChainedError; use errors::*; use futures::{future, Future}; use futures::future::FutureResult; use host::Host; use host::local::Local; use regex::Regex; use std::process; use super::PackageProvider; use tokio_process::CommandExt; pub struct Apt; impl PackageProvider for Apt { fn available() -> Result<bool> { Ok(process::Command::new("/usr/bin/type") .arg("apt-get") .status() .chain_err(|| "Could not determine provider availability")? .success()) } fn installed(&self, host: &Local, name: &str) -> Box<Future<Item = bool, Error = Error>> { let name = name.to_owned(); Box::new(process::Command::new("dpkg") .args(&["--get-selections"]) .output_async(&host.handle()) .chain_err(|| "Could not get installed packages") .and_then(move |output| { if output.status.success() { let re = match Regex::new(&format!("(?m){}\\s+install$", name)) { Ok(r) => r, Err(e) => return future::err(ErrorKind::Regex(e).into()), }; let stdout = String::from_utf8_lossy(&output.stdout); future::ok(re.is_match(&stdout)) } else { future::err(format!("Error running `dpkg --get-selections`: {}", String::from_utf8_lossy(&output.stderr)).into()) } })) } fn install(&self, host: &Local, name: &str) -> FutureResult<Child, Error>
fn uninstall(&self, host: &Local, name: &str) -> FutureResult<Child, Error> { let cmd = match command::factory() { Ok(c) => c, Err(e) => return future::err(format!("{}", e.display_chain()).into()), }; cmd.exec(host, &["apt-get", "-y", "remove", name]) } }
{ let cmd = match command::factory() { Ok(c) => c, Err(e) => return future::err(format!("{}", e.display_chain()).into()), }; cmd.exec(host, &["apt-get", "-y", "install", name]) }
identifier_body
apt.rs
// Copyright 2015-2017 Intecture Developers. // // Licensed under the Mozilla Public License 2.0 <LICENSE or // https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied, // modified, or distributed except according to those terms.
use futures::future::FutureResult; use host::Host; use host::local::Local; use regex::Regex; use std::process; use super::PackageProvider; use tokio_process::CommandExt; pub struct Apt; impl PackageProvider for Apt { fn available() -> Result<bool> { Ok(process::Command::new("/usr/bin/type") .arg("apt-get") .status() .chain_err(|| "Could not determine provider availability")? .success()) } fn installed(&self, host: &Local, name: &str) -> Box<Future<Item = bool, Error = Error>> { let name = name.to_owned(); Box::new(process::Command::new("dpkg") .args(&["--get-selections"]) .output_async(&host.handle()) .chain_err(|| "Could not get installed packages") .and_then(move |output| { if output.status.success() { let re = match Regex::new(&format!("(?m){}\\s+install$", name)) { Ok(r) => r, Err(e) => return future::err(ErrorKind::Regex(e).into()), }; let stdout = String::from_utf8_lossy(&output.stdout); future::ok(re.is_match(&stdout)) } else { future::err(format!("Error running `dpkg --get-selections`: {}", String::from_utf8_lossy(&output.stderr)).into()) } })) } fn install(&self, host: &Local, name: &str) -> FutureResult<Child, Error> { let cmd = match command::factory() { Ok(c) => c, Err(e) => return future::err(format!("{}", e.display_chain()).into()), }; cmd.exec(host, &["apt-get", "-y", "install", name]) } fn uninstall(&self, host: &Local, name: &str) -> FutureResult<Child, Error> { let cmd = match command::factory() { Ok(c) => c, Err(e) => return future::err(format!("{}", e.display_chain()).into()), }; cmd.exec(host, &["apt-get", "-y", "remove", name]) } }
use command::{self, Child}; use error_chain::ChainedError; use errors::*; use futures::{future, Future};
random_line_split
apt.rs
// Copyright 2015-2017 Intecture Developers. // // Licensed under the Mozilla Public License 2.0 <LICENSE or // https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied, // modified, or distributed except according to those terms. use command::{self, Child}; use error_chain::ChainedError; use errors::*; use futures::{future, Future}; use futures::future::FutureResult; use host::Host; use host::local::Local; use regex::Regex; use std::process; use super::PackageProvider; use tokio_process::CommandExt; pub struct
; impl PackageProvider for Apt { fn available() -> Result<bool> { Ok(process::Command::new("/usr/bin/type") .arg("apt-get") .status() .chain_err(|| "Could not determine provider availability")? .success()) } fn installed(&self, host: &Local, name: &str) -> Box<Future<Item = bool, Error = Error>> { let name = name.to_owned(); Box::new(process::Command::new("dpkg") .args(&["--get-selections"]) .output_async(&host.handle()) .chain_err(|| "Could not get installed packages") .and_then(move |output| { if output.status.success() { let re = match Regex::new(&format!("(?m){}\\s+install$", name)) { Ok(r) => r, Err(e) => return future::err(ErrorKind::Regex(e).into()), }; let stdout = String::from_utf8_lossy(&output.stdout); future::ok(re.is_match(&stdout)) } else { future::err(format!("Error running `dpkg --get-selections`: {}", String::from_utf8_lossy(&output.stderr)).into()) } })) } fn install(&self, host: &Local, name: &str) -> FutureResult<Child, Error> { let cmd = match command::factory() { Ok(c) => c, Err(e) => return future::err(format!("{}", e.display_chain()).into()), }; cmd.exec(host, &["apt-get", "-y", "install", name]) } fn uninstall(&self, host: &Local, name: &str) -> FutureResult<Child, Error> { let cmd = match command::factory() { Ok(c) => c, Err(e) => return future::err(format!("{}", e.display_chain()).into()), }; cmd.exec(host, &["apt-get", "-y", "remove", name]) } }
Apt
identifier_name
boards.js
/* ======================================================================== * ZUI: boards.js * http://zui.sexy * ======================================================================== * Copyright (c) 2014 cnezsoft.com; Licensed MIT * ======================================================================== */ (function($){ 'use strict'; if (!$.fn.droppable) throw new Error('droppable requires for boards'); var Boards = function(element, options) { this.$ = $(element); this.options = this.getOptions(options); this.getLang(); this.init(); }; Boards.DEFAULTS = { lang: 'zh-cn', langs: { 'zh-cn': { append2end: '移动到末尾' }, 'zh-tw': { append2end: '移动到末尾' }, 'en': { append2end: 'Move to the end.' } } }; // default options Boards.prototype.getOptions = function(options) { options = $.extend( {}, Boards.DEFAULTS, this.$.data(), options); return options; }; Boards.prototype.getLang = function() { var config = window.config; if (!this.options.lang) { if (typeof(config) != 'undefined' && config.clientLang) { this.options.lang = config.clientLang; } else { var hl = $('html').attr('lang'); this.options.lang = hl ? hl : 'en'; } this.options.lang = this.options.lang.replace(/-/, '_').toLowerCase(); } this.lang = this.options.langs[this.options.lang] || this.options.langs[Boards.DEFAULTS.lang]; }; Boards.prototype.init = function() { var idSeed = 1; var lang = this.lang; this.$.find('.board-item:not(".disable-drop"), .board:not(".disable-drop")').each(function() { var $this = $(this); if ($this.attr('id')) { $this.attr('data-id', $this.attr('id')); } else if (!$this.attr('data-id')) { $this.attr('data-id', 'board' + (idSeed++)); } if ($this.hasClass('board')) { $this.find('.board-list').append('<div class="board-item board-item-empty"><i class="icon-plus"></i> {append2end}</div>'.format(lang)) .append('<div class="board-item board-item-shadow"></div>'.format(lang)); } }); this.bind(); }; Boards.prototype.bind = function(items) { var $boards = this.$, setting = this.options; if (typeof(items) == 'undefined') { items = $boards.find('.board-item:not(".disable-drop, .board-item-shadow")'); } items.droppable( { target: '.board-item:not(".disable-drop, .board-item-shadow")', flex: true, start: function(e) { $boards.addClass('dragging').find('.board-item-shadow').height(e.element.outerHeight()); }, drag: function(e) { $boards.find('.board.drop-in-empty').removeClass('drop-in-empty'); if (e.isIn) { var board = e.target.closest('.board').addClass('drop-in'); var shadow = board.find('.board-item-shadow'); var target = e.target; $boards.addClass('drop-in').find('.board.drop-in').not(board).removeClass('drop-in'); shadow.insertBefore(target); board.toggleClass('drop-in-empty', target.hasClass('board-item-empty')); } }, drop: function(e) { if (e.isNew) { var DROP = 'drop'; var result; if (setting.hasOwnProperty(DROP) && $.isFunction(setting[DROP])) { result = setting[DROP](e); } if (result !== false) e.element.insertBefore(e.target); } }, finish: function() { $boards.removeClass('dragging').removeClass('drop-in').find('.board.drop-in').removeClass('drop-in'); } }); }; $.fn.boards = function(option) { return this.each(function() { var $this = $(this); var data = $this.data('zui.boards'); var options = typeof option == 'object' && option; if (!data) $this.data('zui.boards', (data = new Boards(this, options))); if (typeof option == 'string') data[option](); }); };
{ $('[data-toggle="boards"]').boards(); }); }(jQuery));
$.fn.boards.Constructor = Boards; $(function()
random_line_split
boards.js
/* ======================================================================== * ZUI: boards.js * http://zui.sexy * ======================================================================== * Copyright (c) 2014 cnezsoft.com; Licensed MIT * ======================================================================== */ (function($){ 'use strict'; if (!$.fn.droppable) throw new Error('droppable requires for boards'); var Boards = function(element, options) { this.$ = $(element); this.options = this.getOptions(options); this.getLang(); this.init(); }; Boards.DEFAULTS = { lang: 'zh-cn', langs: { 'zh-cn': { append2end: '移动到末尾' }, 'zh-tw': { append2end: '移动到末尾' }, 'en': { append2end: 'Move to the end.' } } }; // default options Boards.prototype.getOptions = function(options) { options = $.extend( {}, Boards.DEFAULTS, this.$.data(), options); return options; }; Boards.prototype.getLang = function() { var config = window.config; if (!this.options.lang) { if (typeof(config) != 'undefined' && config.clientLang) { this.options.lang = config.clientLang; } else { var hl = $('html').attr('lang'); this.options.lang = hl ? hl : 'en'; } this.options.lang = this.options.lang.replace(/-/, '_').toLowerCase(); } this.lang = this.options.langs[this.options.lang] || this.options.langs[Boards.DEFAULTS.lang]; }; Boards.prototype.init = function() { var idSeed = 1; var lang = this.lang; this.$.find('.board-item:not(".disable-drop"), .board:not(".disable-drop")').each(function() { var $this = $(this); if ($this.attr('id')) { $this.attr('data-id', $this.attr('id')); } else if (!$this.attr('data-id')) { $this.attr('data-id', 'board' + (idSeed++)); } if ($this.hasClass('board')) { $this.find('.board-list').append('<div class="board-item board-item-empty"><i class="icon-plus"></i> {append2end}</div>'.format(lang)) .append('<div class="board-item board-item-shadow"></div>'.format(lang)); } }); this.bind(); }; Boards.prototype.bind = function(items) { var $boards = this.$, setting = this.options; if (typeof(items) == 'undefined') { items = $boards.find('.board-item:not(".disable-drop, .board-item-shadow")'); } items.droppable( { target: '.board-item:not(".disable-drop, .board-item-shadow")', flex: true, start: function(e) { $boards.addClass('dragging').find('.board-item-shadow').height(e.element.outerHeight()); }, drag: function(e) { $boards.find('.board.drop-in-empty').removeClass('drop-in-empty'); if (e.isIn) {
drop: function(e) { if (e.isNew) { var DROP = 'drop'; var result; if (setting.hasOwnProperty(DROP) && $.isFunction(setting[DROP])) { result = setting[DROP](e); } if (result !== false) e.element.insertBefore(e.target); } }, finish: function() { $boards.removeClass('dragging').removeClass('drop-in').find('.board.drop-in').removeClass('drop-in'); } }); }; $.fn.boards = function(option) { return this.each(function() { var $this = $(this); var data = $this.data('zui.boards'); var options = typeof option == 'object' && option; if (!data) $this.data('zui.boards', (data = new Boards(this, options))); if (typeof option == 'string') data[option](); }); }; $.fn.boards.Constructor = Boards; $(function() { $('[data-toggle="boards"]').boards(); }); }(jQuery));
var board = e.target.closest('.board').addClass('drop-in'); var shadow = board.find('.board-item-shadow'); var target = e.target; $boards.addClass('drop-in').find('.board.drop-in').not(board).removeClass('drop-in'); shadow.insertBefore(target); board.toggleClass('drop-in-empty', target.hasClass('board-item-empty')); } },
conditional_block
server.js
var HTTP = require('http'); function testDuplicateRoutesError(test) { var router = new HTTP_Router_Server(); router.get('/pokemon/:id/abilities', function handler() { }); try { router.get('/pokemon/:whatever/abilities', function handler() { }); test.ok(false, 'This should have thrown an error'); test.done(); } catch (error) { test.ok( error.toString().contains( 'Duplicate route defined for uri pattern' ) ); test.done(); } } function testRouteSpecificity(test) { var router = new HTTP_Router_Server(), server = HTTP.createServer(router.serve); server.listen(Network_Mapper.createAPIMapping().http_port); var complete_count = 0, expected = 4; function incrementCompleteCount() { complete_count++; if (complete_count === expected) { server.close(); test.done(); } } router.get('/foo/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); router.get('/foo/specific', function handler(request, response) { test.equals(request.url, '/foo/specific'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/foo/specific', { }, function finisher() { }); router.get('/bar/:id', function handler(request, response) { test.equals(request.url, '/bar/123'); response.send({ }); incrementCompleteCount(); }); router.get('/bar/specific', function handler(... response) {
test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/bar/123', { }, function finisher() { }); router.get('/baz/specific', function handler(request, response) { test.equals(request.url, '/baz/specific'); response.send({ }); incrementCompleteCount(); }); router.get('/baz/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/baz/specific', { }, function finisher() { }); router.get('/wat/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); server.close(function handler() { Network_Mapper.deleteAPIMapping(); test.done(); }); }); router.get('/wat/:id', function handler(request, response) { test.equals(request.url, '/wat/123'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/wat/123', { } , function finisher() { }); } function serve(test) { // The router should trigger a response error // if an invalid HTTP method was specified: WithInvalidHTTPMethod: { let mock_headers = { }; let mock_native_request = { headers: mock_headers, url: '/', method: 'nonexistent_method' }; let mock_request = (new HTTP_Request()) .setNativeRequest(mock_native_request); let mock_response = (new HTTP_Response()) .setUrl('/'); mock_response.error = function handleError(error) { test.ok(error instanceof Error_InvalidHttpMethod); }; let router = new HTTP_Router_Server(); router.serve(mock_request, mock_response); } test.done(); } module.exports = { testDuplicateRoutesError, testRouteSpecificity, serve };
random_line_split
server.js
var HTTP = require('http'); function testDuplicateRoutesError(test) { var router = new HTTP_Router_Server(); router.get('/pokemon/:id/abilities', function handler() { }); try { router.get('/pokemon/:whatever/abilities', function handler() { }); test.ok(false, 'This should have thrown an error'); test.done(); } catch (error) { test.ok( error.toString().contains( 'Duplicate route defined for uri pattern' ) ); test.done(); } } function testRouteSpecificity(test) { var router = new HTTP_Router_Server(), server = HTTP.createServer(router.serve); server.listen(Network_Mapper.createAPIMapping().http_port); var complete_count = 0, expected = 4; function
() { complete_count++; if (complete_count === expected) { server.close(); test.done(); } } router.get('/foo/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); router.get('/foo/specific', function handler(request, response) { test.equals(request.url, '/foo/specific'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/foo/specific', { }, function finisher() { }); router.get('/bar/:id', function handler(request, response) { test.equals(request.url, '/bar/123'); response.send({ }); incrementCompleteCount(); }); router.get('/bar/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/bar/123', { }, function finisher() { }); router.get('/baz/specific', function handler(request, response) { test.equals(request.url, '/baz/specific'); response.send({ }); incrementCompleteCount(); }); router.get('/baz/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/baz/specific', { }, function finisher() { }); router.get('/wat/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); server.close(function handler() { Network_Mapper.deleteAPIMapping(); test.done(); }); }); router.get('/wat/:id', function handler(request, response) { test.equals(request.url, '/wat/123'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/wat/123', { } , function finisher() { }); } function serve(test) { // The router should trigger a response error // if an invalid HTTP method was specified: WithInvalidHTTPMethod: { let mock_headers = { }; let mock_native_request = { headers: mock_headers, url: '/', method: 'nonexistent_method' }; let mock_request = (new HTTP_Request()) .setNativeRequest(mock_native_request); let mock_response = (new HTTP_Response()) .setUrl('/'); mock_response.error = function handleError(error) { test.ok(error instanceof Error_InvalidHttpMethod); }; let router = new HTTP_Router_Server(); router.serve(mock_request, mock_response); } test.done(); } module.exports = { testDuplicateRoutesError, testRouteSpecificity, serve };
incrementCompleteCount
identifier_name
server.js
var HTTP = require('http'); function testDuplicateRoutesError(test) { var router = new HTTP_Router_Server(); router.get('/pokemon/:id/abilities', function handler() { }); try { router.get('/pokemon/:whatever/abilities', function handler() { }); test.ok(false, 'This should have thrown an error'); test.done(); } catch (error) { test.ok( error.toString().contains( 'Duplicate route defined for uri pattern' ) ); test.done(); } } function testRouteSpecificity(test)
function serve(test) { // The router should trigger a response error // if an invalid HTTP method was specified: WithInvalidHTTPMethod: { let mock_headers = { }; let mock_native_request = { headers: mock_headers, url: '/', method: 'nonexistent_method' }; let mock_request = (new HTTP_Request()) .setNativeRequest(mock_native_request); let mock_response = (new HTTP_Response()) .setUrl('/'); mock_response.error = function handleError(error) { test.ok(error instanceof Error_InvalidHttpMethod); }; let router = new HTTP_Router_Server(); router.serve(mock_request, mock_response); } test.done(); } module.exports = { testDuplicateRoutesError, testRouteSpecificity, serve };
{ var router = new HTTP_Router_Server(), server = HTTP.createServer(router.serve); server.listen(Network_Mapper.createAPIMapping().http_port); var complete_count = 0, expected = 4; function incrementCompleteCount() { complete_count++; if (complete_count === expected) { server.close(); test.done(); } } router.get('/foo/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); router.get('/foo/specific', function handler(request, response) { test.equals(request.url, '/foo/specific'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/foo/specific', { }, function finisher() { }); router.get('/bar/:id', function handler(request, response) { test.equals(request.url, '/bar/123'); response.send({ }); incrementCompleteCount(); }); router.get('/bar/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/bar/123', { }, function finisher() { }); router.get('/baz/specific', function handler(request, response) { test.equals(request.url, '/baz/specific'); response.send({ }); incrementCompleteCount(); }); router.get('/baz/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/baz/specific', { }, function finisher() { }); router.get('/wat/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); server.close(function handler() { Network_Mapper.deleteAPIMapping(); test.done(); }); }); router.get('/wat/:id', function handler(request, response) { test.equals(request.url, '/wat/123'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/wat/123', { } , function finisher() { }); }
identifier_body
server.js
var HTTP = require('http'); function testDuplicateRoutesError(test) { var router = new HTTP_Router_Server(); router.get('/pokemon/:id/abilities', function handler() { }); try { router.get('/pokemon/:whatever/abilities', function handler() { }); test.ok(false, 'This should have thrown an error'); test.done(); } catch (error) { test.ok( error.toString().contains( 'Duplicate route defined for uri pattern' ) ); test.done(); } } function testRouteSpecificity(test) { var router = new HTTP_Router_Server(), server = HTTP.createServer(router.serve); server.listen(Network_Mapper.createAPIMapping().http_port); var complete_count = 0, expected = 4; function incrementCompleteCount() { complete_count++; if (complete_count === expected)
} router.get('/foo/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); router.get('/foo/specific', function handler(request, response) { test.equals(request.url, '/foo/specific'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/foo/specific', { }, function finisher() { }); router.get('/bar/:id', function handler(request, response) { test.equals(request.url, '/bar/123'); response.send({ }); incrementCompleteCount(); }); router.get('/bar/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/bar/123', { }, function finisher() { }); router.get('/baz/specific', function handler(request, response) { test.equals(request.url, '/baz/specific'); response.send({ }); incrementCompleteCount(); }); router.get('/baz/:id', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); test.done(); }); Querier.get('/api/baz/specific', { }, function finisher() { }); router.get('/wat/specific', function handler(... response) { test.ok(false, 'We should not be here'); response.send({ }); server.close(function handler() { Network_Mapper.deleteAPIMapping(); test.done(); }); }); router.get('/wat/:id', function handler(request, response) { test.equals(request.url, '/wat/123'); response.send({ }); incrementCompleteCount(); }); Querier.get('/api/wat/123', { } , function finisher() { }); } function serve(test) { // The router should trigger a response error // if an invalid HTTP method was specified: WithInvalidHTTPMethod: { let mock_headers = { }; let mock_native_request = { headers: mock_headers, url: '/', method: 'nonexistent_method' }; let mock_request = (new HTTP_Request()) .setNativeRequest(mock_native_request); let mock_response = (new HTTP_Response()) .setUrl('/'); mock_response.error = function handleError(error) { test.ok(error instanceof Error_InvalidHttpMethod); }; let router = new HTTP_Router_Server(); router.serve(mock_request, mock_response); } test.done(); } module.exports = { testDuplicateRoutesError, testRouteSpecificity, serve };
{ server.close(); test.done(); }
conditional_block
game.js
/* * Copyright 2014, Gregg Tavares. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Gregg Tavares. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ "use strict"; function $(id) { return document.getElementById(id); } var InstrumentManager = (function() { var soundFiles = [ "assets/drum-samples/4OP-FM/hihat.wav", "assets/drum-samples/4OP-FM/kick.wav", "assets/drum-samples/4OP-FM/snare.wav", "assets/drum-samples/4OP-FM/tom1.wav", "assets/drum-samples/4OP-FM/tom2.wav", "assets/drum-samples/4OP-FM/tom3.wav", "assets/drum-samples/acoustic-kit/hihat.wav", "assets/drum-samples/acoustic-kit/kick.wav", "assets/drum-samples/acoustic-kit/snare.wav", "assets/drum-samples/acoustic-kit/tom1.wav", "assets/drum-samples/acoustic-kit/tom2.wav", "assets/drum-samples/acoustic-kit/tom3.wav", "assets/drum-samples/Bongos/hihat.wav", "assets/drum-samples/Bongos/kick.wav", "assets/drum-samples/Bongos/snare.wav", "assets/drum-samples/Bongos/tom1.wav", "assets/drum-samples/Bongos/tom2.wav", "assets/drum-samples/Bongos/tom3.wav", "assets/drum-samples/breakbeat13/hihat.wav", "assets/drum-samples/breakbeat13/kick.wav", "assets/drum-samples/breakbeat13/snare.wav", "assets/drum-samples/breakbeat13/tom1.wav", "assets/drum-samples/breakbeat13/tom2.wav", "assets/drum-samples/breakbeat13/tom3.wav", "assets/drum-samples/breakbeat8/hihat.wav", "assets/drum-samples/breakbeat8/kick.wav", "assets/drum-samples/breakbeat8/snare.wav", "assets/drum-samples/breakbeat8/tom1.wav", "assets/drum-samples/breakbeat8/tom2.wav", "assets/drum-samples/breakbeat8/tom3.wav", "assets/drum-samples/breakbeat9/hihat.wav", "assets/drum-samples/breakbeat9/kick.wav", "assets/drum-samples/breakbeat9/snare.wav", "assets/drum-samples/breakbeat9/tom1.wav", "assets/drum-samples/breakbeat9/tom2.wav", "assets/drum-samples/breakbeat9/tom3.wav", "assets/drum-samples/CR78/hihat.wav", "assets/drum-samples/CR78/kick.wav", "assets/drum-samples/CR78/snare.wav", "assets/drum-samples/CR78/tom1.wav", "assets/drum-samples/CR78/tom2.wav", "assets/drum-samples/CR78/tom3.wav", "assets/drum-samples/Kit3/hihat.wav", "assets/drum-samples/Kit3/kick.wav", "assets/drum-samples/Kit3/snare.wav", "assets/drum-samples/Kit3/tom1.wav", "assets/drum-samples/Kit3/tom2.wav", "assets/drum-samples/Kit3/tom3.wav", "assets/drum-samples/Kit8/hihat.wav", "assets/drum-samples/Kit8/kick.wav", "assets/drum-samples/Kit8/snare.wav", "assets/drum-samples/Kit8/tom1.wav", "assets/drum-samples/Kit8/tom2.wav", "assets/drum-samples/Kit8/tom3.wav", "assets/drum-samples/KPR77/hihat.wav", "assets/drum-samples/KPR77/kick.wav", "assets/drum-samples/KPR77/snare.wav", "assets/drum-samples/KPR77/tom1.wav", "assets/drum-samples/KPR77/tom2.wav", "assets/drum-samples/KPR77/tom3.wav", "assets/drum-samples/LINN/hihat.wav", "assets/drum-samples/LINN/kick.wav", "assets/drum-samples/LINN/snare.wav", "assets/drum-samples/LINN/tom1.wav", "assets/drum-samples/LINN/tom2.wav", "assets/drum-samples/LINN/tom3.wav", "assets/drum-samples/R8/hihat.wav", "assets/drum-samples/R8/kick.wav", "assets/drum-samples/R8/snare.wav", "assets/drum-samples/R8/tom1.wav", "assets/drum-samples/R8/tom2.wav", "assets/drum-samples/R8/tom3.wav", "assets/drum-samples/Stark/hihat.wav", "assets/drum-samples/Stark/kick.wav", "assets/drum-samples/Stark/snare.wav", "assets/drum-samples/Stark/tom1.wav", "assets/drum-samples/Stark/tom2.wav", "assets/drum-samples/Stark/tom3.wav", "assets/drum-samples/Techno/hihat.wav", "assets/drum-samples/Techno/kick.wav", "assets/drum-samples/Techno/snare.wav", "assets/drum-samples/Techno/tom1.wav", "assets/drum-samples/Techno/tom2.wav", "assets/drum-samples/Techno/tom3.wav", "assets/drum-samples/TheCheebacabra1/hihat.wav", "assets/drum-samples/TheCheebacabra1/kick.wav", "assets/drum-samples/TheCheebacabra1/snare.wav", "assets/drum-samples/TheCheebacabra1/tom1.wav", "assets/drum-samples/TheCheebacabra1/tom2.wav", "assets/drum-samples/TheCheebacabra1/tom3.wav", "assets/drum-samples/TheCheebacabra2/hihat.wav", "assets/drum-samples/TheCheebacabra2/kick.wav", "assets/drum-samples/TheCheebacabra2/snare.wav", "assets/drum-samples/TheCheebacabra2/tom1.wav", "assets/drum-samples/TheCheebacabra2/tom2.wav", "assets/drum-samples/TheCheebacabra2/tom3.wav", ]; return function(Misc) { this.instruments = []; this.getNextInstrument = function() { if (this.instruments.length == 0) { this.instruments = soundFiles.slice(); } var index = Misc.randInt(this.instruments.length); var instrument = this.instruments.splice(index, 1)[0]; return instrument }.bind(this); }; }()); var Player = function(services, netPlayer, name) { this.services = services; this.netPlayer = netPlayer; this.name = name; this.tracks = []; this.position = [services.misc.randInt(200), services.misc.randInt(200)]; //this.elem = document.createElement("div"); //this.elemState = -1; //var elem = this.elem; //var s = elem.style; //s.width = "32px"; //s.height = "32px"; //s.border = "1px solid black"; //s.position = "absolute"; //s.left = services.misc.randInt(window.innerWidth - 32) + "px"; //s.top = services.misc.randInt(window.innerHeight - 32) + "px"; //s.zIndex = 5; //document.body.appendChild(elem); netPlayer.addEventListener('disconnect', Player.prototype.disconnect.bind(this)); netPlayer.addEventListener('newInstrument', Player.prototype.chooseInstrument.bind(this)); netPlayer.addEventListener('setColor', Player.prototype.setColor.bind(this)); netPlayer.addEventListener('busy', Player.prototype.handleBusyMsg.bind(this)); netPlayer.addEventListener('setName', Player.prototype.handleNameMsg.bind(this)); netPlayer.addEventListener('tracks', Player.prototype.setTracks.bind(this)); netPlayer.addEventListener('note', Player.prototype.setNote.bind(this)); this.chooseInstrument(); }; Player.prototype.setColor = function(data) { this.color = data.color; this.glColor = this.services.cssParse.parseCSSColor(data.color, true); if (this.elem) { this.elem.style.backgroundColor = this.color; } }; Player.prototype.setTracks = function(data) { this.tracks = data.tracks; }; Player.prototype.setNote = function(data) { if (this.tracks) { var track = this.tracks[data.t]; if (track) { track.rhythm[data.r] = data.n; } } }; Player.prototype.handleNameMsg = function(msg) { if (!msg.name) { this.sendCmd('setName', { name: this.name }); } else { this.name = msg.name.replace(/[<>]/g, ''); } }; Player.prototype.handleBusyMsg = function(msg) { // ignore this for now }; Player.prototype.chooseInstrument = function() { var instrument = this.services.instrumentManager.getNextInstrument(); this.sendCmd('setInstrument', { filename: instrument, }); }; Player.prototype.sendCmd = function(cmd, data) { this.netPlayer.sendCmd(cmd, data); }; Player.prototype.disconnect = function() { this.netPlayer.removeAllListeners(); this.services.playerManager.removePlayer(this); if (this.elem) { this.elem.parentNode.removeChild(this.elem); } }; Player.prototype.drawNotes = function(trackIndex, rhythmIndex) { if (this.tracks) { var track = this.tracks[trackIndex]; if (track) { var on = track.rhythm[rhythmIndex]; if (this.elem && this.elemState != on) { this.elem.style.border = on ? "5px solid white" : "1px solid black"; this.elemState = on; } if (on) { var gridSize = this.services.globals.gridSize; var renderer = this.services.renderer; var width = renderer.canvas.width; var height = renderer.canvas.height; var gridWidth = Math.floor(width / gridSize); var gridHeight = Math.floor(height / gridSize); var offX = (width - (gridWidth * gridSize)) / 2; var offY = (height - (gridHeight * gridSize)) / 2; switch (rhythmIndex % 4) { case 0: ++this.position[0]; break; case 1: ++this.position[1]; break; case 2: --this.position[0]; break; case 3: --this.position[1]; break; } this.position[0] = (this.position[0] + gridWidth ) % gridWidth; this.position[1] = (this.position[1] + gridHeight) % gridHeight; this.services.renderer.drawCircle( [offX + this.position[0] * gridSize, offY + this.position[1] * gridSize], gridSize, this.glColor); } } } }; var PlayerManager = function(services) { this.services = services; this.players = []; this.numElem = $("numPeople").firstChild; }; PlayerManager.prototype.updatePlayers = function() { this.numElem.nodeValue = this.players.length; }; PlayerManager.prototype.startPlayer = function(netPlayer, name) { var player = new Player(this.services, netPlayer, name); this.players.push(player); this.updatePlayers(); }; PlayerManager.prototype.removePlayer = function(player) { this.players.splice(this.players.indexOf(player), 1); this.updatePlayers(); }; PlayerManager.prototype.drawNotes = function(trackIndex, rhythmIndex) { for (var ii = 0; ii < this.players.length; ++ii) { var player = this.players[ii]; player.drawNotes(trackIndex, rhythmIndex); } }; // Start the main app logic. requirejs( [ 'hft/gameserver', 'hft/gamesupport', 'hft/syncedclock', 'hft/misc/cssparse', 'hft/misc/misc', '../bower_components/hft-utils/dist/audio', './canvasrenderer', './webglrenderer', ], function( GameServer, GameSupport, SyncedClock, CSSParse, Misc, AudioManager, CanvasRenderer, WebGLRenderer) { var g_debug = false; var g_services = {}; var g_playerManager = new PlayerManager(g_services); g_services.playerManager = g_playerManager; g_services.cssParse = CSSParse; g_services.misc = Misc; var stop = false; // You can set these from the URL with // http://path/gameview.html?settings={name:value,name:value} var globals = { haveServer: true, bpm: 120, loopLength: 16, force2d: false, debug: false, gridSize: 32, }; function startPlayer(netPlayer, name)
Misc.applyUrlSettings(globals); g_services.globals = globals; var server; if (globals.haveServer) { server = new GameServer(); g_services.server = server; server.addEventListener('playerconnect', g_playerManager.startPlayer.bind(g_playerManager)); } GameSupport.init(server, globals); var clock = SyncedClock.createClock(true); g_services.clock = clock; var instrumentManager = new InstrumentManager(Misc); g_services.instrumentManager = instrumentManager; var canvas = $("canvas"); var renderer; if (!globals.force2d || globals.force2D) { renderer = new WebGLRenderer(g_services, canvas); } if (!renderer || !renderer.canRender()) { renderer = new CanvasRenderer(g_services, canvas); } g_services.renderer = renderer; var secondsPerBeat = 60 / globals.bpm; var secondsPerQuarterBeat = secondsPerBeat / 4; var lastDisplayedQuarterBeat = 0; function process() { var currentTime = clock.getTime(); var currentQuarterBeat = Math.floor(currentTime / secondsPerQuarterBeat); if (globals.debug) { var beat = Math.floor(currentQuarterBeat / 4) % 4; GameSupport.setStatus( "\n ct: " + currentTime.toFixed(2).substr(-5) + "\ncqb: " + currentQuarterBeat.toString().substr(-4) + "\n rt: " + currentQuarterBeat % globals.loopLength + "\n bt: " + beat + ((beat % 2) == 0 ? " ****" : "")); } if (lastDisplayedQuarterBeat != currentQuarterBeat) { lastDisplayedQuarterBeat = currentQuarterBeat; g_playerManager.drawNotes(0, currentQuarterBeat % globals.loopLength); } if (!stop) { setTimeout(process, 100); } } process(); GameSupport.run(globals, function() { //var x = Misc.randInt(150) + 500; //var y = Misc.randInt(150) + 300; //renderer.drawCircle([x, y], 40, [1,1,1,1]); renderer.begin(); renderer.end(); }); });
{ return new Player(g_services, netPlayer); }
identifier_body
game.js
/* * Copyright 2014, Gregg Tavares. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Gregg Tavares. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ "use strict"; function $(id) { return document.getElementById(id); } var InstrumentManager = (function() { var soundFiles = [ "assets/drum-samples/4OP-FM/hihat.wav", "assets/drum-samples/4OP-FM/kick.wav", "assets/drum-samples/4OP-FM/snare.wav", "assets/drum-samples/4OP-FM/tom1.wav", "assets/drum-samples/4OP-FM/tom2.wav", "assets/drum-samples/4OP-FM/tom3.wav", "assets/drum-samples/acoustic-kit/hihat.wav", "assets/drum-samples/acoustic-kit/kick.wav", "assets/drum-samples/acoustic-kit/snare.wav", "assets/drum-samples/acoustic-kit/tom1.wav", "assets/drum-samples/acoustic-kit/tom2.wav", "assets/drum-samples/acoustic-kit/tom3.wav", "assets/drum-samples/Bongos/hihat.wav", "assets/drum-samples/Bongos/kick.wav", "assets/drum-samples/Bongos/snare.wav", "assets/drum-samples/Bongos/tom1.wav", "assets/drum-samples/Bongos/tom2.wav", "assets/drum-samples/Bongos/tom3.wav", "assets/drum-samples/breakbeat13/hihat.wav", "assets/drum-samples/breakbeat13/kick.wav", "assets/drum-samples/breakbeat13/snare.wav", "assets/drum-samples/breakbeat13/tom1.wav", "assets/drum-samples/breakbeat13/tom2.wav", "assets/drum-samples/breakbeat13/tom3.wav", "assets/drum-samples/breakbeat8/hihat.wav", "assets/drum-samples/breakbeat8/kick.wav", "assets/drum-samples/breakbeat8/snare.wav", "assets/drum-samples/breakbeat8/tom1.wav", "assets/drum-samples/breakbeat8/tom2.wav", "assets/drum-samples/breakbeat8/tom3.wav", "assets/drum-samples/breakbeat9/hihat.wav", "assets/drum-samples/breakbeat9/kick.wav", "assets/drum-samples/breakbeat9/snare.wav", "assets/drum-samples/breakbeat9/tom1.wav", "assets/drum-samples/breakbeat9/tom2.wav", "assets/drum-samples/breakbeat9/tom3.wav", "assets/drum-samples/CR78/hihat.wav", "assets/drum-samples/CR78/kick.wav", "assets/drum-samples/CR78/snare.wav", "assets/drum-samples/CR78/tom1.wav", "assets/drum-samples/CR78/tom2.wav", "assets/drum-samples/CR78/tom3.wav", "assets/drum-samples/Kit3/hihat.wav", "assets/drum-samples/Kit3/kick.wav", "assets/drum-samples/Kit3/snare.wav", "assets/drum-samples/Kit3/tom1.wav", "assets/drum-samples/Kit3/tom2.wav", "assets/drum-samples/Kit3/tom3.wav", "assets/drum-samples/Kit8/hihat.wav", "assets/drum-samples/Kit8/kick.wav", "assets/drum-samples/Kit8/snare.wav", "assets/drum-samples/Kit8/tom1.wav", "assets/drum-samples/Kit8/tom2.wav", "assets/drum-samples/Kit8/tom3.wav", "assets/drum-samples/KPR77/hihat.wav", "assets/drum-samples/KPR77/kick.wav", "assets/drum-samples/KPR77/snare.wav", "assets/drum-samples/KPR77/tom1.wav", "assets/drum-samples/KPR77/tom2.wav", "assets/drum-samples/KPR77/tom3.wav", "assets/drum-samples/LINN/hihat.wav", "assets/drum-samples/LINN/kick.wav", "assets/drum-samples/LINN/snare.wav", "assets/drum-samples/LINN/tom1.wav", "assets/drum-samples/LINN/tom2.wav", "assets/drum-samples/LINN/tom3.wav", "assets/drum-samples/R8/hihat.wav", "assets/drum-samples/R8/kick.wav", "assets/drum-samples/R8/snare.wav", "assets/drum-samples/R8/tom1.wav", "assets/drum-samples/R8/tom2.wav", "assets/drum-samples/R8/tom3.wav", "assets/drum-samples/Stark/hihat.wav", "assets/drum-samples/Stark/kick.wav", "assets/drum-samples/Stark/snare.wav", "assets/drum-samples/Stark/tom1.wav", "assets/drum-samples/Stark/tom2.wav", "assets/drum-samples/Stark/tom3.wav", "assets/drum-samples/Techno/hihat.wav", "assets/drum-samples/Techno/kick.wav", "assets/drum-samples/Techno/snare.wav", "assets/drum-samples/Techno/tom1.wav", "assets/drum-samples/Techno/tom2.wav", "assets/drum-samples/Techno/tom3.wav", "assets/drum-samples/TheCheebacabra1/hihat.wav", "assets/drum-samples/TheCheebacabra1/kick.wav", "assets/drum-samples/TheCheebacabra1/snare.wav", "assets/drum-samples/TheCheebacabra1/tom1.wav", "assets/drum-samples/TheCheebacabra1/tom2.wav", "assets/drum-samples/TheCheebacabra1/tom3.wav", "assets/drum-samples/TheCheebacabra2/hihat.wav", "assets/drum-samples/TheCheebacabra2/kick.wav", "assets/drum-samples/TheCheebacabra2/snare.wav", "assets/drum-samples/TheCheebacabra2/tom1.wav", "assets/drum-samples/TheCheebacabra2/tom2.wav", "assets/drum-samples/TheCheebacabra2/tom3.wav", ]; return function(Misc) { this.instruments = []; this.getNextInstrument = function() { if (this.instruments.length == 0) { this.instruments = soundFiles.slice(); } var index = Misc.randInt(this.instruments.length); var instrument = this.instruments.splice(index, 1)[0]; return instrument }.bind(this); }; }()); var Player = function(services, netPlayer, name) { this.services = services; this.netPlayer = netPlayer; this.name = name; this.tracks = []; this.position = [services.misc.randInt(200), services.misc.randInt(200)]; //this.elem = document.createElement("div"); //this.elemState = -1; //var elem = this.elem; //var s = elem.style; //s.width = "32px"; //s.height = "32px"; //s.border = "1px solid black"; //s.position = "absolute"; //s.left = services.misc.randInt(window.innerWidth - 32) + "px"; //s.top = services.misc.randInt(window.innerHeight - 32) + "px"; //s.zIndex = 5; //document.body.appendChild(elem); netPlayer.addEventListener('disconnect', Player.prototype.disconnect.bind(this)); netPlayer.addEventListener('newInstrument', Player.prototype.chooseInstrument.bind(this)); netPlayer.addEventListener('setColor', Player.prototype.setColor.bind(this)); netPlayer.addEventListener('busy', Player.prototype.handleBusyMsg.bind(this)); netPlayer.addEventListener('setName', Player.prototype.handleNameMsg.bind(this)); netPlayer.addEventListener('tracks', Player.prototype.setTracks.bind(this)); netPlayer.addEventListener('note', Player.prototype.setNote.bind(this)); this.chooseInstrument(); }; Player.prototype.setColor = function(data) { this.color = data.color; this.glColor = this.services.cssParse.parseCSSColor(data.color, true); if (this.elem)
}; Player.prototype.setTracks = function(data) { this.tracks = data.tracks; }; Player.prototype.setNote = function(data) { if (this.tracks) { var track = this.tracks[data.t]; if (track) { track.rhythm[data.r] = data.n; } } }; Player.prototype.handleNameMsg = function(msg) { if (!msg.name) { this.sendCmd('setName', { name: this.name }); } else { this.name = msg.name.replace(/[<>]/g, ''); } }; Player.prototype.handleBusyMsg = function(msg) { // ignore this for now }; Player.prototype.chooseInstrument = function() { var instrument = this.services.instrumentManager.getNextInstrument(); this.sendCmd('setInstrument', { filename: instrument, }); }; Player.prototype.sendCmd = function(cmd, data) { this.netPlayer.sendCmd(cmd, data); }; Player.prototype.disconnect = function() { this.netPlayer.removeAllListeners(); this.services.playerManager.removePlayer(this); if (this.elem) { this.elem.parentNode.removeChild(this.elem); } }; Player.prototype.drawNotes = function(trackIndex, rhythmIndex) { if (this.tracks) { var track = this.tracks[trackIndex]; if (track) { var on = track.rhythm[rhythmIndex]; if (this.elem && this.elemState != on) { this.elem.style.border = on ? "5px solid white" : "1px solid black"; this.elemState = on; } if (on) { var gridSize = this.services.globals.gridSize; var renderer = this.services.renderer; var width = renderer.canvas.width; var height = renderer.canvas.height; var gridWidth = Math.floor(width / gridSize); var gridHeight = Math.floor(height / gridSize); var offX = (width - (gridWidth * gridSize)) / 2; var offY = (height - (gridHeight * gridSize)) / 2; switch (rhythmIndex % 4) { case 0: ++this.position[0]; break; case 1: ++this.position[1]; break; case 2: --this.position[0]; break; case 3: --this.position[1]; break; } this.position[0] = (this.position[0] + gridWidth ) % gridWidth; this.position[1] = (this.position[1] + gridHeight) % gridHeight; this.services.renderer.drawCircle( [offX + this.position[0] * gridSize, offY + this.position[1] * gridSize], gridSize, this.glColor); } } } }; var PlayerManager = function(services) { this.services = services; this.players = []; this.numElem = $("numPeople").firstChild; }; PlayerManager.prototype.updatePlayers = function() { this.numElem.nodeValue = this.players.length; }; PlayerManager.prototype.startPlayer = function(netPlayer, name) { var player = new Player(this.services, netPlayer, name); this.players.push(player); this.updatePlayers(); }; PlayerManager.prototype.removePlayer = function(player) { this.players.splice(this.players.indexOf(player), 1); this.updatePlayers(); }; PlayerManager.prototype.drawNotes = function(trackIndex, rhythmIndex) { for (var ii = 0; ii < this.players.length; ++ii) { var player = this.players[ii]; player.drawNotes(trackIndex, rhythmIndex); } }; // Start the main app logic. requirejs( [ 'hft/gameserver', 'hft/gamesupport', 'hft/syncedclock', 'hft/misc/cssparse', 'hft/misc/misc', '../bower_components/hft-utils/dist/audio', './canvasrenderer', './webglrenderer', ], function( GameServer, GameSupport, SyncedClock, CSSParse, Misc, AudioManager, CanvasRenderer, WebGLRenderer) { var g_debug = false; var g_services = {}; var g_playerManager = new PlayerManager(g_services); g_services.playerManager = g_playerManager; g_services.cssParse = CSSParse; g_services.misc = Misc; var stop = false; // You can set these from the URL with // http://path/gameview.html?settings={name:value,name:value} var globals = { haveServer: true, bpm: 120, loopLength: 16, force2d: false, debug: false, gridSize: 32, }; function startPlayer(netPlayer, name) { return new Player(g_services, netPlayer); } Misc.applyUrlSettings(globals); g_services.globals = globals; var server; if (globals.haveServer) { server = new GameServer(); g_services.server = server; server.addEventListener('playerconnect', g_playerManager.startPlayer.bind(g_playerManager)); } GameSupport.init(server, globals); var clock = SyncedClock.createClock(true); g_services.clock = clock; var instrumentManager = new InstrumentManager(Misc); g_services.instrumentManager = instrumentManager; var canvas = $("canvas"); var renderer; if (!globals.force2d || globals.force2D) { renderer = new WebGLRenderer(g_services, canvas); } if (!renderer || !renderer.canRender()) { renderer = new CanvasRenderer(g_services, canvas); } g_services.renderer = renderer; var secondsPerBeat = 60 / globals.bpm; var secondsPerQuarterBeat = secondsPerBeat / 4; var lastDisplayedQuarterBeat = 0; function process() { var currentTime = clock.getTime(); var currentQuarterBeat = Math.floor(currentTime / secondsPerQuarterBeat); if (globals.debug) { var beat = Math.floor(currentQuarterBeat / 4) % 4; GameSupport.setStatus( "\n ct: " + currentTime.toFixed(2).substr(-5) + "\ncqb: " + currentQuarterBeat.toString().substr(-4) + "\n rt: " + currentQuarterBeat % globals.loopLength + "\n bt: " + beat + ((beat % 2) == 0 ? " ****" : "")); } if (lastDisplayedQuarterBeat != currentQuarterBeat) { lastDisplayedQuarterBeat = currentQuarterBeat; g_playerManager.drawNotes(0, currentQuarterBeat % globals.loopLength); } if (!stop) { setTimeout(process, 100); } } process(); GameSupport.run(globals, function() { //var x = Misc.randInt(150) + 500; //var y = Misc.randInt(150) + 300; //renderer.drawCircle([x, y], 40, [1,1,1,1]); renderer.begin(); renderer.end(); }); });
{ this.elem.style.backgroundColor = this.color; }
conditional_block
game.js
/* * Copyright 2014, Gregg Tavares. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Gregg Tavares. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ "use strict"; function $(id) { return document.getElementById(id); } var InstrumentManager = (function() { var soundFiles = [ "assets/drum-samples/4OP-FM/hihat.wav", "assets/drum-samples/4OP-FM/kick.wav", "assets/drum-samples/4OP-FM/snare.wav", "assets/drum-samples/4OP-FM/tom1.wav", "assets/drum-samples/4OP-FM/tom2.wav", "assets/drum-samples/4OP-FM/tom3.wav", "assets/drum-samples/acoustic-kit/hihat.wav", "assets/drum-samples/acoustic-kit/kick.wav", "assets/drum-samples/acoustic-kit/snare.wav", "assets/drum-samples/acoustic-kit/tom1.wav", "assets/drum-samples/acoustic-kit/tom2.wav", "assets/drum-samples/acoustic-kit/tom3.wav", "assets/drum-samples/Bongos/hihat.wav", "assets/drum-samples/Bongos/kick.wav", "assets/drum-samples/Bongos/snare.wav", "assets/drum-samples/Bongos/tom1.wav", "assets/drum-samples/Bongos/tom2.wav", "assets/drum-samples/Bongos/tom3.wav", "assets/drum-samples/breakbeat13/hihat.wav", "assets/drum-samples/breakbeat13/kick.wav", "assets/drum-samples/breakbeat13/snare.wav", "assets/drum-samples/breakbeat13/tom1.wav", "assets/drum-samples/breakbeat13/tom2.wav", "assets/drum-samples/breakbeat13/tom3.wav", "assets/drum-samples/breakbeat8/hihat.wav", "assets/drum-samples/breakbeat8/kick.wav", "assets/drum-samples/breakbeat8/snare.wav", "assets/drum-samples/breakbeat8/tom1.wav", "assets/drum-samples/breakbeat8/tom2.wav", "assets/drum-samples/breakbeat8/tom3.wav", "assets/drum-samples/breakbeat9/hihat.wav", "assets/drum-samples/breakbeat9/kick.wav", "assets/drum-samples/breakbeat9/snare.wav", "assets/drum-samples/breakbeat9/tom1.wav", "assets/drum-samples/breakbeat9/tom2.wav", "assets/drum-samples/breakbeat9/tom3.wav", "assets/drum-samples/CR78/hihat.wav", "assets/drum-samples/CR78/kick.wav", "assets/drum-samples/CR78/snare.wav", "assets/drum-samples/CR78/tom1.wav", "assets/drum-samples/CR78/tom2.wav", "assets/drum-samples/CR78/tom3.wav", "assets/drum-samples/Kit3/hihat.wav", "assets/drum-samples/Kit3/kick.wav", "assets/drum-samples/Kit3/snare.wav", "assets/drum-samples/Kit3/tom1.wav", "assets/drum-samples/Kit3/tom2.wav", "assets/drum-samples/Kit3/tom3.wav", "assets/drum-samples/Kit8/hihat.wav", "assets/drum-samples/Kit8/kick.wav", "assets/drum-samples/Kit8/snare.wav", "assets/drum-samples/Kit8/tom1.wav", "assets/drum-samples/Kit8/tom2.wav", "assets/drum-samples/Kit8/tom3.wav", "assets/drum-samples/KPR77/hihat.wav", "assets/drum-samples/KPR77/kick.wav", "assets/drum-samples/KPR77/snare.wav", "assets/drum-samples/KPR77/tom1.wav", "assets/drum-samples/KPR77/tom2.wav", "assets/drum-samples/KPR77/tom3.wav", "assets/drum-samples/LINN/hihat.wav", "assets/drum-samples/LINN/kick.wav", "assets/drum-samples/LINN/snare.wav", "assets/drum-samples/LINN/tom1.wav", "assets/drum-samples/LINN/tom2.wav", "assets/drum-samples/LINN/tom3.wav", "assets/drum-samples/R8/hihat.wav", "assets/drum-samples/R8/kick.wav", "assets/drum-samples/R8/snare.wav", "assets/drum-samples/R8/tom1.wav", "assets/drum-samples/R8/tom2.wav", "assets/drum-samples/R8/tom3.wav", "assets/drum-samples/Stark/hihat.wav", "assets/drum-samples/Stark/kick.wav", "assets/drum-samples/Stark/snare.wav", "assets/drum-samples/Stark/tom1.wav", "assets/drum-samples/Stark/tom2.wav", "assets/drum-samples/Stark/tom3.wav", "assets/drum-samples/Techno/hihat.wav", "assets/drum-samples/Techno/kick.wav", "assets/drum-samples/Techno/snare.wav", "assets/drum-samples/Techno/tom1.wav", "assets/drum-samples/Techno/tom2.wav", "assets/drum-samples/Techno/tom3.wav", "assets/drum-samples/TheCheebacabra1/hihat.wav", "assets/drum-samples/TheCheebacabra1/kick.wav", "assets/drum-samples/TheCheebacabra1/snare.wav", "assets/drum-samples/TheCheebacabra1/tom1.wav", "assets/drum-samples/TheCheebacabra1/tom2.wav", "assets/drum-samples/TheCheebacabra1/tom3.wav", "assets/drum-samples/TheCheebacabra2/hihat.wav", "assets/drum-samples/TheCheebacabra2/kick.wav", "assets/drum-samples/TheCheebacabra2/snare.wav", "assets/drum-samples/TheCheebacabra2/tom1.wav", "assets/drum-samples/TheCheebacabra2/tom2.wav", "assets/drum-samples/TheCheebacabra2/tom3.wav", ]; return function(Misc) { this.instruments = []; this.getNextInstrument = function() { if (this.instruments.length == 0) { this.instruments = soundFiles.slice(); } var index = Misc.randInt(this.instruments.length); var instrument = this.instruments.splice(index, 1)[0]; return instrument }.bind(this); }; }()); var Player = function(services, netPlayer, name) { this.services = services; this.netPlayer = netPlayer; this.name = name; this.tracks = []; this.position = [services.misc.randInt(200), services.misc.randInt(200)]; //this.elem = document.createElement("div"); //this.elemState = -1; //var elem = this.elem; //var s = elem.style; //s.width = "32px"; //s.height = "32px"; //s.border = "1px solid black"; //s.position = "absolute"; //s.left = services.misc.randInt(window.innerWidth - 32) + "px"; //s.top = services.misc.randInt(window.innerHeight - 32) + "px"; //s.zIndex = 5; //document.body.appendChild(elem); netPlayer.addEventListener('disconnect', Player.prototype.disconnect.bind(this)); netPlayer.addEventListener('newInstrument', Player.prototype.chooseInstrument.bind(this)); netPlayer.addEventListener('setColor', Player.prototype.setColor.bind(this)); netPlayer.addEventListener('busy', Player.prototype.handleBusyMsg.bind(this)); netPlayer.addEventListener('setName', Player.prototype.handleNameMsg.bind(this)); netPlayer.addEventListener('tracks', Player.prototype.setTracks.bind(this)); netPlayer.addEventListener('note', Player.prototype.setNote.bind(this)); this.chooseInstrument(); }; Player.prototype.setColor = function(data) { this.color = data.color; this.glColor = this.services.cssParse.parseCSSColor(data.color, true); if (this.elem) { this.elem.style.backgroundColor = this.color; } }; Player.prototype.setTracks = function(data) { this.tracks = data.tracks; }; Player.prototype.setNote = function(data) { if (this.tracks) { var track = this.tracks[data.t]; if (track) { track.rhythm[data.r] = data.n; } } }; Player.prototype.handleNameMsg = function(msg) { if (!msg.name) { this.sendCmd('setName', { name: this.name }); } else { this.name = msg.name.replace(/[<>]/g, ''); } }; Player.prototype.handleBusyMsg = function(msg) { // ignore this for now }; Player.prototype.chooseInstrument = function() { var instrument = this.services.instrumentManager.getNextInstrument(); this.sendCmd('setInstrument', { filename: instrument, }); }; Player.prototype.sendCmd = function(cmd, data) { this.netPlayer.sendCmd(cmd, data); }; Player.prototype.disconnect = function() { this.netPlayer.removeAllListeners(); this.services.playerManager.removePlayer(this); if (this.elem) { this.elem.parentNode.removeChild(this.elem); } }; Player.prototype.drawNotes = function(trackIndex, rhythmIndex) { if (this.tracks) { var track = this.tracks[trackIndex]; if (track) { var on = track.rhythm[rhythmIndex]; if (this.elem && this.elemState != on) { this.elem.style.border = on ? "5px solid white" : "1px solid black"; this.elemState = on; } if (on) { var gridSize = this.services.globals.gridSize; var renderer = this.services.renderer; var width = renderer.canvas.width; var height = renderer.canvas.height; var gridWidth = Math.floor(width / gridSize); var gridHeight = Math.floor(height / gridSize); var offX = (width - (gridWidth * gridSize)) / 2; var offY = (height - (gridHeight * gridSize)) / 2; switch (rhythmIndex % 4) { case 0: ++this.position[0]; break; case 1: ++this.position[1]; break; case 2: --this.position[0]; break; case 3: --this.position[1]; break; } this.position[0] = (this.position[0] + gridWidth ) % gridWidth; this.position[1] = (this.position[1] + gridHeight) % gridHeight; this.services.renderer.drawCircle( [offX + this.position[0] * gridSize, offY + this.position[1] * gridSize], gridSize, this.glColor); } } } }; var PlayerManager = function(services) { this.services = services; this.players = []; this.numElem = $("numPeople").firstChild; }; PlayerManager.prototype.updatePlayers = function() { this.numElem.nodeValue = this.players.length; }; PlayerManager.prototype.startPlayer = function(netPlayer, name) { var player = new Player(this.services, netPlayer, name); this.players.push(player); this.updatePlayers(); }; PlayerManager.prototype.removePlayer = function(player) { this.players.splice(this.players.indexOf(player), 1); this.updatePlayers(); }; PlayerManager.prototype.drawNotes = function(trackIndex, rhythmIndex) { for (var ii = 0; ii < this.players.length; ++ii) { var player = this.players[ii]; player.drawNotes(trackIndex, rhythmIndex); } }; // Start the main app logic. requirejs( [ 'hft/gameserver', 'hft/gamesupport', 'hft/syncedclock', 'hft/misc/cssparse', 'hft/misc/misc', '../bower_components/hft-utils/dist/audio', './canvasrenderer', './webglrenderer', ], function( GameServer, GameSupport, SyncedClock, CSSParse, Misc, AudioManager, CanvasRenderer, WebGLRenderer) { var g_debug = false;
g_services.playerManager = g_playerManager; g_services.cssParse = CSSParse; g_services.misc = Misc; var stop = false; // You can set these from the URL with // http://path/gameview.html?settings={name:value,name:value} var globals = { haveServer: true, bpm: 120, loopLength: 16, force2d: false, debug: false, gridSize: 32, }; function startPlayer(netPlayer, name) { return new Player(g_services, netPlayer); } Misc.applyUrlSettings(globals); g_services.globals = globals; var server; if (globals.haveServer) { server = new GameServer(); g_services.server = server; server.addEventListener('playerconnect', g_playerManager.startPlayer.bind(g_playerManager)); } GameSupport.init(server, globals); var clock = SyncedClock.createClock(true); g_services.clock = clock; var instrumentManager = new InstrumentManager(Misc); g_services.instrumentManager = instrumentManager; var canvas = $("canvas"); var renderer; if (!globals.force2d || globals.force2D) { renderer = new WebGLRenderer(g_services, canvas); } if (!renderer || !renderer.canRender()) { renderer = new CanvasRenderer(g_services, canvas); } g_services.renderer = renderer; var secondsPerBeat = 60 / globals.bpm; var secondsPerQuarterBeat = secondsPerBeat / 4; var lastDisplayedQuarterBeat = 0; function process() { var currentTime = clock.getTime(); var currentQuarterBeat = Math.floor(currentTime / secondsPerQuarterBeat); if (globals.debug) { var beat = Math.floor(currentQuarterBeat / 4) % 4; GameSupport.setStatus( "\n ct: " + currentTime.toFixed(2).substr(-5) + "\ncqb: " + currentQuarterBeat.toString().substr(-4) + "\n rt: " + currentQuarterBeat % globals.loopLength + "\n bt: " + beat + ((beat % 2) == 0 ? " ****" : "")); } if (lastDisplayedQuarterBeat != currentQuarterBeat) { lastDisplayedQuarterBeat = currentQuarterBeat; g_playerManager.drawNotes(0, currentQuarterBeat % globals.loopLength); } if (!stop) { setTimeout(process, 100); } } process(); GameSupport.run(globals, function() { //var x = Misc.randInt(150) + 500; //var y = Misc.randInt(150) + 300; //renderer.drawCircle([x, y], 40, [1,1,1,1]); renderer.begin(); renderer.end(); }); });
var g_services = {}; var g_playerManager = new PlayerManager(g_services);
random_line_split
game.js
/* * Copyright 2014, Gregg Tavares. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Gregg Tavares. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ "use strict"; function $(id) { return document.getElementById(id); } var InstrumentManager = (function() { var soundFiles = [ "assets/drum-samples/4OP-FM/hihat.wav", "assets/drum-samples/4OP-FM/kick.wav", "assets/drum-samples/4OP-FM/snare.wav", "assets/drum-samples/4OP-FM/tom1.wav", "assets/drum-samples/4OP-FM/tom2.wav", "assets/drum-samples/4OP-FM/tom3.wav", "assets/drum-samples/acoustic-kit/hihat.wav", "assets/drum-samples/acoustic-kit/kick.wav", "assets/drum-samples/acoustic-kit/snare.wav", "assets/drum-samples/acoustic-kit/tom1.wav", "assets/drum-samples/acoustic-kit/tom2.wav", "assets/drum-samples/acoustic-kit/tom3.wav", "assets/drum-samples/Bongos/hihat.wav", "assets/drum-samples/Bongos/kick.wav", "assets/drum-samples/Bongos/snare.wav", "assets/drum-samples/Bongos/tom1.wav", "assets/drum-samples/Bongos/tom2.wav", "assets/drum-samples/Bongos/tom3.wav", "assets/drum-samples/breakbeat13/hihat.wav", "assets/drum-samples/breakbeat13/kick.wav", "assets/drum-samples/breakbeat13/snare.wav", "assets/drum-samples/breakbeat13/tom1.wav", "assets/drum-samples/breakbeat13/tom2.wav", "assets/drum-samples/breakbeat13/tom3.wav", "assets/drum-samples/breakbeat8/hihat.wav", "assets/drum-samples/breakbeat8/kick.wav", "assets/drum-samples/breakbeat8/snare.wav", "assets/drum-samples/breakbeat8/tom1.wav", "assets/drum-samples/breakbeat8/tom2.wav", "assets/drum-samples/breakbeat8/tom3.wav", "assets/drum-samples/breakbeat9/hihat.wav", "assets/drum-samples/breakbeat9/kick.wav", "assets/drum-samples/breakbeat9/snare.wav", "assets/drum-samples/breakbeat9/tom1.wav", "assets/drum-samples/breakbeat9/tom2.wav", "assets/drum-samples/breakbeat9/tom3.wav", "assets/drum-samples/CR78/hihat.wav", "assets/drum-samples/CR78/kick.wav", "assets/drum-samples/CR78/snare.wav", "assets/drum-samples/CR78/tom1.wav", "assets/drum-samples/CR78/tom2.wav", "assets/drum-samples/CR78/tom3.wav", "assets/drum-samples/Kit3/hihat.wav", "assets/drum-samples/Kit3/kick.wav", "assets/drum-samples/Kit3/snare.wav", "assets/drum-samples/Kit3/tom1.wav", "assets/drum-samples/Kit3/tom2.wav", "assets/drum-samples/Kit3/tom3.wav", "assets/drum-samples/Kit8/hihat.wav", "assets/drum-samples/Kit8/kick.wav", "assets/drum-samples/Kit8/snare.wav", "assets/drum-samples/Kit8/tom1.wav", "assets/drum-samples/Kit8/tom2.wav", "assets/drum-samples/Kit8/tom3.wav", "assets/drum-samples/KPR77/hihat.wav", "assets/drum-samples/KPR77/kick.wav", "assets/drum-samples/KPR77/snare.wav", "assets/drum-samples/KPR77/tom1.wav", "assets/drum-samples/KPR77/tom2.wav", "assets/drum-samples/KPR77/tom3.wav", "assets/drum-samples/LINN/hihat.wav", "assets/drum-samples/LINN/kick.wav", "assets/drum-samples/LINN/snare.wav", "assets/drum-samples/LINN/tom1.wav", "assets/drum-samples/LINN/tom2.wav", "assets/drum-samples/LINN/tom3.wav", "assets/drum-samples/R8/hihat.wav", "assets/drum-samples/R8/kick.wav", "assets/drum-samples/R8/snare.wav", "assets/drum-samples/R8/tom1.wav", "assets/drum-samples/R8/tom2.wav", "assets/drum-samples/R8/tom3.wav", "assets/drum-samples/Stark/hihat.wav", "assets/drum-samples/Stark/kick.wav", "assets/drum-samples/Stark/snare.wav", "assets/drum-samples/Stark/tom1.wav", "assets/drum-samples/Stark/tom2.wav", "assets/drum-samples/Stark/tom3.wav", "assets/drum-samples/Techno/hihat.wav", "assets/drum-samples/Techno/kick.wav", "assets/drum-samples/Techno/snare.wav", "assets/drum-samples/Techno/tom1.wav", "assets/drum-samples/Techno/tom2.wav", "assets/drum-samples/Techno/tom3.wav", "assets/drum-samples/TheCheebacabra1/hihat.wav", "assets/drum-samples/TheCheebacabra1/kick.wav", "assets/drum-samples/TheCheebacabra1/snare.wav", "assets/drum-samples/TheCheebacabra1/tom1.wav", "assets/drum-samples/TheCheebacabra1/tom2.wav", "assets/drum-samples/TheCheebacabra1/tom3.wav", "assets/drum-samples/TheCheebacabra2/hihat.wav", "assets/drum-samples/TheCheebacabra2/kick.wav", "assets/drum-samples/TheCheebacabra2/snare.wav", "assets/drum-samples/TheCheebacabra2/tom1.wav", "assets/drum-samples/TheCheebacabra2/tom2.wav", "assets/drum-samples/TheCheebacabra2/tom3.wav", ]; return function(Misc) { this.instruments = []; this.getNextInstrument = function() { if (this.instruments.length == 0) { this.instruments = soundFiles.slice(); } var index = Misc.randInt(this.instruments.length); var instrument = this.instruments.splice(index, 1)[0]; return instrument }.bind(this); }; }()); var Player = function(services, netPlayer, name) { this.services = services; this.netPlayer = netPlayer; this.name = name; this.tracks = []; this.position = [services.misc.randInt(200), services.misc.randInt(200)]; //this.elem = document.createElement("div"); //this.elemState = -1; //var elem = this.elem; //var s = elem.style; //s.width = "32px"; //s.height = "32px"; //s.border = "1px solid black"; //s.position = "absolute"; //s.left = services.misc.randInt(window.innerWidth - 32) + "px"; //s.top = services.misc.randInt(window.innerHeight - 32) + "px"; //s.zIndex = 5; //document.body.appendChild(elem); netPlayer.addEventListener('disconnect', Player.prototype.disconnect.bind(this)); netPlayer.addEventListener('newInstrument', Player.prototype.chooseInstrument.bind(this)); netPlayer.addEventListener('setColor', Player.prototype.setColor.bind(this)); netPlayer.addEventListener('busy', Player.prototype.handleBusyMsg.bind(this)); netPlayer.addEventListener('setName', Player.prototype.handleNameMsg.bind(this)); netPlayer.addEventListener('tracks', Player.prototype.setTracks.bind(this)); netPlayer.addEventListener('note', Player.prototype.setNote.bind(this)); this.chooseInstrument(); }; Player.prototype.setColor = function(data) { this.color = data.color; this.glColor = this.services.cssParse.parseCSSColor(data.color, true); if (this.elem) { this.elem.style.backgroundColor = this.color; } }; Player.prototype.setTracks = function(data) { this.tracks = data.tracks; }; Player.prototype.setNote = function(data) { if (this.tracks) { var track = this.tracks[data.t]; if (track) { track.rhythm[data.r] = data.n; } } }; Player.prototype.handleNameMsg = function(msg) { if (!msg.name) { this.sendCmd('setName', { name: this.name }); } else { this.name = msg.name.replace(/[<>]/g, ''); } }; Player.prototype.handleBusyMsg = function(msg) { // ignore this for now }; Player.prototype.chooseInstrument = function() { var instrument = this.services.instrumentManager.getNextInstrument(); this.sendCmd('setInstrument', { filename: instrument, }); }; Player.prototype.sendCmd = function(cmd, data) { this.netPlayer.sendCmd(cmd, data); }; Player.prototype.disconnect = function() { this.netPlayer.removeAllListeners(); this.services.playerManager.removePlayer(this); if (this.elem) { this.elem.parentNode.removeChild(this.elem); } }; Player.prototype.drawNotes = function(trackIndex, rhythmIndex) { if (this.tracks) { var track = this.tracks[trackIndex]; if (track) { var on = track.rhythm[rhythmIndex]; if (this.elem && this.elemState != on) { this.elem.style.border = on ? "5px solid white" : "1px solid black"; this.elemState = on; } if (on) { var gridSize = this.services.globals.gridSize; var renderer = this.services.renderer; var width = renderer.canvas.width; var height = renderer.canvas.height; var gridWidth = Math.floor(width / gridSize); var gridHeight = Math.floor(height / gridSize); var offX = (width - (gridWidth * gridSize)) / 2; var offY = (height - (gridHeight * gridSize)) / 2; switch (rhythmIndex % 4) { case 0: ++this.position[0]; break; case 1: ++this.position[1]; break; case 2: --this.position[0]; break; case 3: --this.position[1]; break; } this.position[0] = (this.position[0] + gridWidth ) % gridWidth; this.position[1] = (this.position[1] + gridHeight) % gridHeight; this.services.renderer.drawCircle( [offX + this.position[0] * gridSize, offY + this.position[1] * gridSize], gridSize, this.glColor); } } } }; var PlayerManager = function(services) { this.services = services; this.players = []; this.numElem = $("numPeople").firstChild; }; PlayerManager.prototype.updatePlayers = function() { this.numElem.nodeValue = this.players.length; }; PlayerManager.prototype.startPlayer = function(netPlayer, name) { var player = new Player(this.services, netPlayer, name); this.players.push(player); this.updatePlayers(); }; PlayerManager.prototype.removePlayer = function(player) { this.players.splice(this.players.indexOf(player), 1); this.updatePlayers(); }; PlayerManager.prototype.drawNotes = function(trackIndex, rhythmIndex) { for (var ii = 0; ii < this.players.length; ++ii) { var player = this.players[ii]; player.drawNotes(trackIndex, rhythmIndex); } }; // Start the main app logic. requirejs( [ 'hft/gameserver', 'hft/gamesupport', 'hft/syncedclock', 'hft/misc/cssparse', 'hft/misc/misc', '../bower_components/hft-utils/dist/audio', './canvasrenderer', './webglrenderer', ], function( GameServer, GameSupport, SyncedClock, CSSParse, Misc, AudioManager, CanvasRenderer, WebGLRenderer) { var g_debug = false; var g_services = {}; var g_playerManager = new PlayerManager(g_services); g_services.playerManager = g_playerManager; g_services.cssParse = CSSParse; g_services.misc = Misc; var stop = false; // You can set these from the URL with // http://path/gameview.html?settings={name:value,name:value} var globals = { haveServer: true, bpm: 120, loopLength: 16, force2d: false, debug: false, gridSize: 32, }; function startPlayer(netPlayer, name) { return new Player(g_services, netPlayer); } Misc.applyUrlSettings(globals); g_services.globals = globals; var server; if (globals.haveServer) { server = new GameServer(); g_services.server = server; server.addEventListener('playerconnect', g_playerManager.startPlayer.bind(g_playerManager)); } GameSupport.init(server, globals); var clock = SyncedClock.createClock(true); g_services.clock = clock; var instrumentManager = new InstrumentManager(Misc); g_services.instrumentManager = instrumentManager; var canvas = $("canvas"); var renderer; if (!globals.force2d || globals.force2D) { renderer = new WebGLRenderer(g_services, canvas); } if (!renderer || !renderer.canRender()) { renderer = new CanvasRenderer(g_services, canvas); } g_services.renderer = renderer; var secondsPerBeat = 60 / globals.bpm; var secondsPerQuarterBeat = secondsPerBeat / 4; var lastDisplayedQuarterBeat = 0; function
() { var currentTime = clock.getTime(); var currentQuarterBeat = Math.floor(currentTime / secondsPerQuarterBeat); if (globals.debug) { var beat = Math.floor(currentQuarterBeat / 4) % 4; GameSupport.setStatus( "\n ct: " + currentTime.toFixed(2).substr(-5) + "\ncqb: " + currentQuarterBeat.toString().substr(-4) + "\n rt: " + currentQuarterBeat % globals.loopLength + "\n bt: " + beat + ((beat % 2) == 0 ? " ****" : "")); } if (lastDisplayedQuarterBeat != currentQuarterBeat) { lastDisplayedQuarterBeat = currentQuarterBeat; g_playerManager.drawNotes(0, currentQuarterBeat % globals.loopLength); } if (!stop) { setTimeout(process, 100); } } process(); GameSupport.run(globals, function() { //var x = Misc.randInt(150) + 500; //var y = Misc.randInt(150) + 300; //renderer.drawCircle([x, y], 40, [1,1,1,1]); renderer.begin(); renderer.end(); }); });
process
identifier_name
sendFeedbackMutation.test.ts
/* eslint-disable promise/always-return */ import { runAuthenticatedQuery } from "schema/v2/test/utils" describe("Send Feedback", () => { const feedback = { id: "id", message: "Catty message", } const query = ` mutation { sendFeedback(input: {message: "Catty message"}) { feedbackOrError { ... on SendFeedbackMutationSuccess { feedback { message } } ... on SendFeedbackMutationFailure { mutationError { type message detail } } } } } ` const context = { sendFeedbackLoader: () => Promise.resolve(feedback), } it("returns a formatted error message", async () => { const errorRootValue = { sendFeedbackLoader: () => Promise.reject( new Error( `https://stagingapi.artsy.net/api/v1/feedback - {"error":"Message Cant Not Be Blank"}` ) ), } const data = await runAuthenticatedQuery(query, errorRootValue) expect(data).toEqual({ sendFeedback: { feedbackOrError: { mutationError: { detail: null, message: "Message Cant Not Be Blank", type: "error", }, }, }, }) }) it("sends a feedback message", async () => { const data = await runAuthenticatedQuery(query, context) expect(data).toEqual({ sendFeedback: { feedbackOrError: { feedback: { message: "Catty message" } }, }, })
}) })
random_line_split
cargo_expand.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::env; use std::error; use std::fmt; use std::io; use std::path::Path; use std::process::Command; use std::str::{from_utf8, Utf8Error}; extern crate tempfile; use self::tempfile::Builder; #[derive(Debug)] /// Possible errors that can occur during `rustc --pretty=expanded`. pub enum Error { /// Error during creation of temporary directory Io(io::Error), /// Output of `cargo metadata` was not valid utf8 Utf8(Utf8Error), /// Error during execution of `cargo rustc --pretty=expanded` Compile(String), } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::Io(err) } } impl From<Utf8Error> for Error { fn from(err: Utf8Error) -> Self { Error::Utf8(err) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Io(ref err) => err.fmt(f), Error::Utf8(ref err) => err.fmt(f), Error::Compile(ref err) => write!(f, "{}", err), } } } impl error::Error for Error { fn
(&self) -> Option<&(dyn error::Error + 'static)> { match self { Error::Io(ref err) => Some(err), Error::Utf8(ref err) => Some(err), Error::Compile(..) => None, } } } /// Use rustc to expand and pretty print the crate into a single file, /// removing any macros in the process. pub fn expand( manifest_path: &Path, crate_name: &str, version: &str, use_tempdir: bool, expand_all_features: bool, expand_default_features: bool, expand_features: &Option<Vec<String>>, ) -> Result<String, Error> { let cargo = env::var("CARGO").unwrap_or_else(|_| String::from("cargo")); let mut cmd = Command::new(cargo); let mut _temp_dir = None; // drop guard if use_tempdir { _temp_dir = Some(Builder::new().prefix("cbindgen-expand").tempdir()?); cmd.env("CARGO_TARGET_DIR", _temp_dir.unwrap().path()); } else if let Ok(ref path) = env::var("CARGO_EXPAND_TARGET_DIR") { cmd.env("CARGO_TARGET_DIR", path); } cmd.arg("rustc"); cmd.arg("--lib"); cmd.arg("--manifest-path"); cmd.arg(manifest_path); if let Some(features) = expand_features { cmd.arg("--features"); let mut features_str = String::new(); for (index, feature) in features.iter().enumerate() { if index != 0 { features_str.push_str(" "); } features_str.push_str(feature); } cmd.arg(features_str); } if expand_all_features { cmd.arg("--all-features"); } if !expand_default_features { cmd.arg("--no-default-features"); } cmd.arg("-p"); cmd.arg(&format!("{}:{}", crate_name, version)); cmd.arg("--"); cmd.arg("-Z"); cmd.arg("unstable-options"); cmd.arg("--pretty=expanded"); let output = cmd.output()?; let src = from_utf8(&output.stdout)?.to_owned(); let error = from_utf8(&output.stderr)?.to_owned(); if src.len() == 0 { Err(Error::Compile(error)) } else { Ok(src) } }
source
identifier_name
cargo_expand.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::env; use std::error; use std::fmt; use std::io; use std::path::Path; use std::process::Command; use std::str::{from_utf8, Utf8Error}; extern crate tempfile; use self::tempfile::Builder; #[derive(Debug)] /// Possible errors that can occur during `rustc --pretty=expanded`. pub enum Error { /// Error during creation of temporary directory Io(io::Error), /// Output of `cargo metadata` was not valid utf8 Utf8(Utf8Error), /// Error during execution of `cargo rustc --pretty=expanded` Compile(String), } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::Io(err) } } impl From<Utf8Error> for Error { fn from(err: Utf8Error) -> Self { Error::Utf8(err) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Error::Io(ref err) => err.fmt(f), Error::Utf8(ref err) => err.fmt(f), Error::Compile(ref err) => write!(f, "{}", err), } } } impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Error::Io(ref err) => Some(err), Error::Utf8(ref err) => Some(err), Error::Compile(..) => None, } } } /// Use rustc to expand and pretty print the crate into a single file, /// removing any macros in the process. pub fn expand( manifest_path: &Path, crate_name: &str, version: &str, use_tempdir: bool, expand_all_features: bool, expand_default_features: bool, expand_features: &Option<Vec<String>>, ) -> Result<String, Error> { let cargo = env::var("CARGO").unwrap_or_else(|_| String::from("cargo")); let mut cmd = Command::new(cargo); let mut _temp_dir = None; // drop guard if use_tempdir { _temp_dir = Some(Builder::new().prefix("cbindgen-expand").tempdir()?); cmd.env("CARGO_TARGET_DIR", _temp_dir.unwrap().path()); } else if let Ok(ref path) = env::var("CARGO_EXPAND_TARGET_DIR") { cmd.env("CARGO_TARGET_DIR", path); } cmd.arg("rustc"); cmd.arg("--lib"); cmd.arg("--manifest-path"); cmd.arg(manifest_path); if let Some(features) = expand_features { cmd.arg("--features"); let mut features_str = String::new(); for (index, feature) in features.iter().enumerate() { if index != 0 { features_str.push_str(" "); } features_str.push_str(feature); } cmd.arg(features_str); } if expand_all_features { cmd.arg("--all-features"); } if !expand_default_features { cmd.arg("--no-default-features"); } cmd.arg("-p"); cmd.arg(&format!("{}:{}", crate_name, version));
cmd.arg("-Z"); cmd.arg("unstable-options"); cmd.arg("--pretty=expanded"); let output = cmd.output()?; let src = from_utf8(&output.stdout)?.to_owned(); let error = from_utf8(&output.stderr)?.to_owned(); if src.len() == 0 { Err(Error::Compile(error)) } else { Ok(src) } }
cmd.arg("--");
random_line_split
cargo_expand.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::env; use std::error; use std::fmt; use std::io; use std::path::Path; use std::process::Command; use std::str::{from_utf8, Utf8Error}; extern crate tempfile; use self::tempfile::Builder; #[derive(Debug)] /// Possible errors that can occur during `rustc --pretty=expanded`. pub enum Error { /// Error during creation of temporary directory Io(io::Error), /// Output of `cargo metadata` was not valid utf8 Utf8(Utf8Error), /// Error during execution of `cargo rustc --pretty=expanded` Compile(String), } impl From<io::Error> for Error { fn from(err: io::Error) -> Self { Error::Io(err) } } impl From<Utf8Error> for Error { fn from(err: Utf8Error) -> Self { Error::Utf8(err) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} impl error::Error for Error { fn source(&self) -> Option<&(dyn error::Error + 'static)> { match self { Error::Io(ref err) => Some(err), Error::Utf8(ref err) => Some(err), Error::Compile(..) => None, } } } /// Use rustc to expand and pretty print the crate into a single file, /// removing any macros in the process. pub fn expand( manifest_path: &Path, crate_name: &str, version: &str, use_tempdir: bool, expand_all_features: bool, expand_default_features: bool, expand_features: &Option<Vec<String>>, ) -> Result<String, Error> { let cargo = env::var("CARGO").unwrap_or_else(|_| String::from("cargo")); let mut cmd = Command::new(cargo); let mut _temp_dir = None; // drop guard if use_tempdir { _temp_dir = Some(Builder::new().prefix("cbindgen-expand").tempdir()?); cmd.env("CARGO_TARGET_DIR", _temp_dir.unwrap().path()); } else if let Ok(ref path) = env::var("CARGO_EXPAND_TARGET_DIR") { cmd.env("CARGO_TARGET_DIR", path); } cmd.arg("rustc"); cmd.arg("--lib"); cmd.arg("--manifest-path"); cmd.arg(manifest_path); if let Some(features) = expand_features { cmd.arg("--features"); let mut features_str = String::new(); for (index, feature) in features.iter().enumerate() { if index != 0 { features_str.push_str(" "); } features_str.push_str(feature); } cmd.arg(features_str); } if expand_all_features { cmd.arg("--all-features"); } if !expand_default_features { cmd.arg("--no-default-features"); } cmd.arg("-p"); cmd.arg(&format!("{}:{}", crate_name, version)); cmd.arg("--"); cmd.arg("-Z"); cmd.arg("unstable-options"); cmd.arg("--pretty=expanded"); let output = cmd.output()?; let src = from_utf8(&output.stdout)?.to_owned(); let error = from_utf8(&output.stderr)?.to_owned(); if src.len() == 0 { Err(Error::Compile(error)) } else { Ok(src) } }
{ match self { Error::Io(ref err) => err.fmt(f), Error::Utf8(ref err) => err.fmt(f), Error::Compile(ref err) => write!(f, "{}", err), } }
identifier_body
ProgressBar.js
import {style} from './ProgressBar.scss'; import React from 'react'; // // Component // ----------------------------------------------------------------------------- class ProgressBarView extends React.Component { constructor(props) { super(props); } static defaultProps = { progress: 0, showText: false }; static propTypes = { progress: React.PropTypes.number.isRequired, showText: React.PropTypes.bool.isRequired }; // // Render // ----------------------------------------------------------------------------- render() { const {progress, showText} = this.props; const rounded = Math.round(progress);
<div className={style}> <div className='progress'> <div className='progress-bar' role='progressbar' aria-valuenow={rounded} aria-valuemax='100' style={{width: `${rounded}%`}}> {showText ? `${rounded}%` : null} </div> </div> </div> ); } } export default ProgressBarView
return (
random_line_split
ProgressBar.js
import {style} from './ProgressBar.scss'; import React from 'react'; // // Component // ----------------------------------------------------------------------------- class ProgressBarView extends React.Component { constructor(props) { super(props); } static defaultProps = { progress: 0, showText: false }; static propTypes = { progress: React.PropTypes.number.isRequired, showText: React.PropTypes.bool.isRequired }; // // Render // -----------------------------------------------------------------------------
() { const {progress, showText} = this.props; const rounded = Math.round(progress); return ( <div className={style}> <div className='progress'> <div className='progress-bar' role='progressbar' aria-valuenow={rounded} aria-valuemax='100' style={{width: `${rounded}%`}}> {showText ? `${rounded}%` : null} </div> </div> </div> ); } } export default ProgressBarView
render
identifier_name
type_of.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use middle::trans::adt; use middle::trans::common::*; use middle::ty; use util::ppaux; use middle::trans::type_::Type; use syntax::ast; pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: &ty::t) -> bool { !ty::type_is_immediate(ccx.tcx, *arg_ty) } pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: &ty::t) -> Type { let llty = type_of(ccx, *arg_ty); if arg_is_indirect(ccx, arg_ty) { llty.ptr_to() } else { llty } } pub fn type_of_explicit_args(ccx: &mut CrateContext, inputs: &[ty::t]) -> ~[Type] { inputs.map(|arg_ty| type_of_explicit_arg(ccx, arg_ty)) } pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Type { let mut atys: ~[Type] = ~[]; // Arg 0: Output pointer. // (if the output type is non-immediate) let output_is_immediate = ty::type_is_immediate(cx.tcx, output); let lloutputtype = type_of(cx, output); if !output_is_immediate { atys.push(lloutputtype.ptr_to()); } // Arg 1: Environment atys.push(Type::opaque_box(cx).ptr_to()); // ... then explicit args. atys.push_all(type_of_explicit_args(cx, inputs)); // Use the output as the actual return value if it's immediate. if output_is_immediate && !ty::type_is_nil(output) { Type::func(atys, &lloutputtype) } else { Type::func(atys, &Type::void()) } } // Given a function type and a count of ty params, construct an llvm type pub fn type_of_fn_from_ty(cx: &mut CrateContext, fty: ty::t) -> Type { match ty::get(fty).sty { ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output), ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output), _ => { cx.sess.bug("type_of_fn_from_ty given non-closure, non-bare-fn") } } } pub fn type_of_non_gc_box(cx: &mut CrateContext, t: ty::t) -> Type { assert!(!ty::type_needs_infer(t)); let t_norm = ty::normalize_ty(cx.tcx, t); if t != t_norm { type_of_non_gc_box(cx, t_norm) } else { match ty::get(t).sty { ty::ty_box(mt) => { let ty = type_of(cx, mt.ty); Type::box(cx, &ty).ptr_to() } ty::ty_uniq(mt) => { let ty = type_of(cx, mt.ty); Type::unique(cx, &ty).ptr_to() } _ => { cx.sess.bug("non-box in type_of_non_gc_box"); } } } } // A "sizing type" is an LLVM type, the size and alignment of which are // guaranteed to be equivalent to what you would get out of `type_of()`. It's // useful because: // // (1) It may be cheaper to compute the sizing type than the full type if all // you're interested in is the size and/or alignment; // // (2) It won't make any recursive calls to determine the structure of the // type behind pointers. This can help prevent infinite loops for // recursive types. For example, enum types rely on this behavior. pub fn sizing_type_of(cx: &mut CrateContext, t: ty::t) -> Type { match cx.llsizingtypes.find_copy(&t) { Some(t) => return t, None => () } let llsizingty = match ty::get(t).sty { ty::ty_nil | ty::ty_bot => Type::nil(), ty::ty_bool => Type::bool(), ty::ty_int(t) => Type::int_from_ty(cx, t), ty::ty_uint(t) => Type::uint_from_ty(cx, t), ty::ty_float(t) => Type::float_from_ty(cx, t), ty::ty_estr(ty::vstore_uniq) | ty::ty_estr(ty::vstore_box) | ty::ty_evec(_, ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_box) | ty::ty_box(*) | ty::ty_opaque_box | ty::ty_uniq(*) | ty::ty_ptr(*) | ty::ty_rptr(*) | ty::ty_type | ty::ty_opaque_closure_ptr(*) => Type::i8p(), ty::ty_estr(ty::vstore_slice(*)) | ty::ty_evec(_, ty::vstore_slice(*)) => { Type::struct_([Type::i8p(), Type::i8p()], false) } ty::ty_bare_fn(*) => Type::i8p(), ty::ty_closure(*) => Type::struct_([Type::i8p(), Type::i8p()], false), ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store), ty::ty_estr(ty::vstore_fixed(size)) => Type::array(&Type::i8(), size as u64), ty::ty_evec(mt, ty::vstore_fixed(size)) => { Type::array(&sizing_type_of(cx, mt.ty), size as u64) } ty::ty_unboxed_vec(mt) => { let sz_ty = sizing_type_of(cx, mt.ty); Type::vec(cx.sess.targ_cfg.arch, &sz_ty) } ty::ty_tup(*) | ty::ty_enum(*) => { let repr = adt::represent_type(cx, t); Type::struct_(adt::sizing_fields_of(cx, repr), false) } ty::ty_struct(did, _) => { if ty::type_is_simd(cx.tcx, t) { let et = ty::simd_type(cx.tcx, t); let n = ty::simd_size(cx.tcx, t); Type::vector(&type_of(cx, et), n as u64) } else { let repr = adt::represent_type(cx, t); let packed = ty::lookup_packed(cx.tcx, did); Type::struct_(adt::sizing_fields_of(cx, repr), packed) } } ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => { cx.tcx.sess.bug(fmt!("fictitious type %? in sizing_type_of()", ty::get(t).sty)) } }; cx.llsizingtypes.insert(t, llsizingty); llsizingty } // NB: If you update this, be sure to update `sizing_type_of()` as well. pub fn type_of(cx: &mut CrateContext, t: ty::t) -> Type { debug!("type_of %?: %?", t, ty::get(t)); // Check the cache. match cx.lltypes.find(&t) { Some(&t) => return t, None => () } // Replace any typedef'd types with their equivalent non-typedef // type. This ensures that all LLVM nominal types that contain // Rust types are defined as the same LLVM types. If we don't do // this then, e.g. `Option<{myfield: bool}>` would be a different // type than `Option<myrec>`. let t_norm = ty::normalize_ty(cx.tcx, t); if t != t_norm { let llty = type_of(cx, t_norm); cx.lltypes.insert(t, llty); return llty; } let mut llty = match ty::get(t).sty { ty::ty_nil | ty::ty_bot => Type::nil(), ty::ty_bool => Type::bool(), ty::ty_int(t) => Type::int_from_ty(cx, t), ty::ty_uint(t) => Type::uint_from_ty(cx, t), ty::ty_float(t) => Type::float_from_ty(cx, t), ty::ty_estr(ty::vstore_uniq) => { Type::unique(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to() } ty::ty_enum(did, ref substs) => { // Only create the named struct, but don't fill it in. We // fill it in *after* placing it into the type cache. This // avoids creating more than one copy of the enum when one // of the enum's variants refers to the enum itself. Type::named_struct(llvm_type_name(cx, an_enum, did, substs.tps)) } ty::ty_estr(ty::vstore_box) => { Type::box(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to() } ty::ty_evec(ref mt, ty::vstore_box) => { let e_ty = type_of(cx, mt.ty); let v_ty = Type::vec(cx.sess.targ_cfg.arch, &e_ty); Type::box(cx, &v_ty).ptr_to() } ty::ty_box(ref mt) => { let ty = type_of(cx, mt.ty); Type::box(cx, &ty).ptr_to() } ty::ty_opaque_box => Type::opaque_box(cx).ptr_to(), ty::ty_uniq(ref mt) => { let ty = type_of(cx, mt.ty); Type::unique(cx, &ty).ptr_to() } ty::ty_evec(ref mt, ty::vstore_uniq) => { let ty = type_of(cx, mt.ty); let ty = Type::vec(cx.sess.targ_cfg.arch, &ty); Type::unique(cx, &ty).ptr_to() } ty::ty_unboxed_vec(ref mt) => { let ty = type_of(cx, mt.ty); Type::vec(cx.sess.targ_cfg.arch, &ty) } ty::ty_ptr(ref mt) => type_of(cx, mt.ty).ptr_to(), ty::ty_rptr(_, ref mt) => type_of(cx, mt.ty).ptr_to(), ty::ty_evec(ref mt, ty::vstore_slice(_)) => { let p_ty = type_of(cx, mt.ty).ptr_to(); let u_ty = Type::uint_from_ty(cx, ast::ty_u); Type::struct_([p_ty, u_ty], false) } ty::ty_estr(ty::vstore_slice(_)) => { // This means we get a nicer name in the output cx.tn.find_type("str_slice").get() } ty::ty_estr(ty::vstore_fixed(n)) => { Type::array(&Type::i8(), (n + 1u) as u64) } ty::ty_evec(ref mt, ty::vstore_fixed(n)) => { Type::array(&type_of(cx, mt.ty), n as u64) } ty::ty_bare_fn(_) => type_of_fn_from_ty(cx, t).ptr_to(), ty::ty_closure(_) => { let ty = type_of_fn_from_ty(cx, t); Type::func_pair(cx, &ty) } ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store), ty::ty_type => cx.tydesc_type.ptr_to(), ty::ty_tup(*) => { let repr = adt::represent_type(cx, t); Type::struct_(adt::fields_of(cx, repr), false) } ty::ty_opaque_closure_ptr(_) => Type::opaque_box(cx).ptr_to(), ty::ty_struct(did, ref substs) => { if ty::type_is_simd(cx.tcx, t) { let et = ty::simd_type(cx.tcx, t); let n = ty::simd_size(cx.tcx, t); Type::vector(&type_of(cx, et), n as u64) } else { // Only create the named struct, but don't fill it in. We fill it // in *after* placing it into the type cache. This prevents // infinite recursion with recursive struct types. Type::named_struct(llvm_type_name(cx, a_struct, did, substs.tps)) } } ty::ty_self(*) => cx.tcx.sess.unimpl("type_of: ty_self"), ty::ty_infer(*) => cx.tcx.sess.bug("type_of with ty_infer"), ty::ty_param(*) => cx.tcx.sess.bug("type_of with ty_param"), ty::ty_err(*) => cx.tcx.sess.bug("type_of with ty_err") }; cx.lltypes.insert(t, llty); // If this was an enum or struct, fill in the type now. match ty::get(t).sty { ty::ty_enum(*) => { let repr = adt::represent_type(cx, t); llty.set_struct_body(adt::fields_of(cx, repr), false); } ty::ty_struct(did, _) => { if !ty::type_is_simd(cx.tcx, t) { let repr = adt::represent_type(cx, t); let packed = ty::lookup_packed(cx.tcx, did); llty.set_struct_body(adt::fields_of(cx, repr), packed); } }
// Want refinements! (Or case classes, I guess pub enum named_ty { a_struct, an_enum } pub fn llvm_type_name(cx: &CrateContext, what: named_ty, did: ast::def_id, tps: &[ty::t]) -> ~str { let name = match what { a_struct => { "struct" } an_enum => { "enum" } }; let tstr = ppaux::parameterized(cx.tcx, ty::item_path_str(cx.tcx, did), None, tps); if did.crate == 0 { fmt!("%s.%s", name, tstr) } else { fmt!("%s.%s[#%d]", name, tstr, did.crate) } } pub fn type_of_dtor(ccx: &mut CrateContext, self_ty: ty::t) -> Type { let self_ty = type_of(ccx, self_ty).ptr_to(); Type::func([self_ty], &Type::void()) }
_ => () } return llty; }
random_line_split
type_of.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use middle::trans::adt; use middle::trans::common::*; use middle::ty; use util::ppaux; use middle::trans::type_::Type; use syntax::ast; pub fn arg_is_indirect(ccx: &CrateContext, arg_ty: &ty::t) -> bool { !ty::type_is_immediate(ccx.tcx, *arg_ty) } pub fn type_of_explicit_arg(ccx: &mut CrateContext, arg_ty: &ty::t) -> Type { let llty = type_of(ccx, *arg_ty); if arg_is_indirect(ccx, arg_ty) { llty.ptr_to() } else { llty } } pub fn type_of_explicit_args(ccx: &mut CrateContext, inputs: &[ty::t]) -> ~[Type] { inputs.map(|arg_ty| type_of_explicit_arg(ccx, arg_ty)) } pub fn type_of_fn(cx: &mut CrateContext, inputs: &[ty::t], output: ty::t) -> Type { let mut atys: ~[Type] = ~[]; // Arg 0: Output pointer. // (if the output type is non-immediate) let output_is_immediate = ty::type_is_immediate(cx.tcx, output); let lloutputtype = type_of(cx, output); if !output_is_immediate { atys.push(lloutputtype.ptr_to()); } // Arg 1: Environment atys.push(Type::opaque_box(cx).ptr_to()); // ... then explicit args. atys.push_all(type_of_explicit_args(cx, inputs)); // Use the output as the actual return value if it's immediate. if output_is_immediate && !ty::type_is_nil(output) { Type::func(atys, &lloutputtype) } else { Type::func(atys, &Type::void()) } } // Given a function type and a count of ty params, construct an llvm type pub fn type_of_fn_from_ty(cx: &mut CrateContext, fty: ty::t) -> Type { match ty::get(fty).sty { ty::ty_closure(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output), ty::ty_bare_fn(ref f) => type_of_fn(cx, f.sig.inputs, f.sig.output), _ => { cx.sess.bug("type_of_fn_from_ty given non-closure, non-bare-fn") } } } pub fn type_of_non_gc_box(cx: &mut CrateContext, t: ty::t) -> Type { assert!(!ty::type_needs_infer(t)); let t_norm = ty::normalize_ty(cx.tcx, t); if t != t_norm { type_of_non_gc_box(cx, t_norm) } else { match ty::get(t).sty { ty::ty_box(mt) => { let ty = type_of(cx, mt.ty); Type::box(cx, &ty).ptr_to() } ty::ty_uniq(mt) => { let ty = type_of(cx, mt.ty); Type::unique(cx, &ty).ptr_to() } _ => { cx.sess.bug("non-box in type_of_non_gc_box"); } } } } // A "sizing type" is an LLVM type, the size and alignment of which are // guaranteed to be equivalent to what you would get out of `type_of()`. It's // useful because: // // (1) It may be cheaper to compute the sizing type than the full type if all // you're interested in is the size and/or alignment; // // (2) It won't make any recursive calls to determine the structure of the // type behind pointers. This can help prevent infinite loops for // recursive types. For example, enum types rely on this behavior. pub fn sizing_type_of(cx: &mut CrateContext, t: ty::t) -> Type { match cx.llsizingtypes.find_copy(&t) { Some(t) => return t, None => () } let llsizingty = match ty::get(t).sty { ty::ty_nil | ty::ty_bot => Type::nil(), ty::ty_bool => Type::bool(), ty::ty_int(t) => Type::int_from_ty(cx, t), ty::ty_uint(t) => Type::uint_from_ty(cx, t), ty::ty_float(t) => Type::float_from_ty(cx, t), ty::ty_estr(ty::vstore_uniq) | ty::ty_estr(ty::vstore_box) | ty::ty_evec(_, ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_box) | ty::ty_box(*) | ty::ty_opaque_box | ty::ty_uniq(*) | ty::ty_ptr(*) | ty::ty_rptr(*) | ty::ty_type | ty::ty_opaque_closure_ptr(*) => Type::i8p(), ty::ty_estr(ty::vstore_slice(*)) | ty::ty_evec(_, ty::vstore_slice(*)) => { Type::struct_([Type::i8p(), Type::i8p()], false) } ty::ty_bare_fn(*) => Type::i8p(), ty::ty_closure(*) => Type::struct_([Type::i8p(), Type::i8p()], false), ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store), ty::ty_estr(ty::vstore_fixed(size)) => Type::array(&Type::i8(), size as u64), ty::ty_evec(mt, ty::vstore_fixed(size)) => { Type::array(&sizing_type_of(cx, mt.ty), size as u64) } ty::ty_unboxed_vec(mt) => { let sz_ty = sizing_type_of(cx, mt.ty); Type::vec(cx.sess.targ_cfg.arch, &sz_ty) } ty::ty_tup(*) | ty::ty_enum(*) => { let repr = adt::represent_type(cx, t); Type::struct_(adt::sizing_fields_of(cx, repr), false) } ty::ty_struct(did, _) => { if ty::type_is_simd(cx.tcx, t) { let et = ty::simd_type(cx.tcx, t); let n = ty::simd_size(cx.tcx, t); Type::vector(&type_of(cx, et), n as u64) } else { let repr = adt::represent_type(cx, t); let packed = ty::lookup_packed(cx.tcx, did); Type::struct_(adt::sizing_fields_of(cx, repr), packed) } } ty::ty_self(_) | ty::ty_infer(*) | ty::ty_param(*) | ty::ty_err(*) => { cx.tcx.sess.bug(fmt!("fictitious type %? in sizing_type_of()", ty::get(t).sty)) } }; cx.llsizingtypes.insert(t, llsizingty); llsizingty } // NB: If you update this, be sure to update `sizing_type_of()` as well. pub fn type_of(cx: &mut CrateContext, t: ty::t) -> Type { debug!("type_of %?: %?", t, ty::get(t)); // Check the cache. match cx.lltypes.find(&t) { Some(&t) => return t, None => () } // Replace any typedef'd types with their equivalent non-typedef // type. This ensures that all LLVM nominal types that contain // Rust types are defined as the same LLVM types. If we don't do // this then, e.g. `Option<{myfield: bool}>` would be a different // type than `Option<myrec>`. let t_norm = ty::normalize_ty(cx.tcx, t); if t != t_norm { let llty = type_of(cx, t_norm); cx.lltypes.insert(t, llty); return llty; } let mut llty = match ty::get(t).sty { ty::ty_nil | ty::ty_bot => Type::nil(), ty::ty_bool => Type::bool(), ty::ty_int(t) => Type::int_from_ty(cx, t), ty::ty_uint(t) => Type::uint_from_ty(cx, t), ty::ty_float(t) => Type::float_from_ty(cx, t), ty::ty_estr(ty::vstore_uniq) => { Type::unique(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to() } ty::ty_enum(did, ref substs) => { // Only create the named struct, but don't fill it in. We // fill it in *after* placing it into the type cache. This // avoids creating more than one copy of the enum when one // of the enum's variants refers to the enum itself. Type::named_struct(llvm_type_name(cx, an_enum, did, substs.tps)) } ty::ty_estr(ty::vstore_box) => { Type::box(cx, &Type::vec(cx.sess.targ_cfg.arch, &Type::i8())).ptr_to() } ty::ty_evec(ref mt, ty::vstore_box) => { let e_ty = type_of(cx, mt.ty); let v_ty = Type::vec(cx.sess.targ_cfg.arch, &e_ty); Type::box(cx, &v_ty).ptr_to() } ty::ty_box(ref mt) => { let ty = type_of(cx, mt.ty); Type::box(cx, &ty).ptr_to() } ty::ty_opaque_box => Type::opaque_box(cx).ptr_to(), ty::ty_uniq(ref mt) => { let ty = type_of(cx, mt.ty); Type::unique(cx, &ty).ptr_to() } ty::ty_evec(ref mt, ty::vstore_uniq) => { let ty = type_of(cx, mt.ty); let ty = Type::vec(cx.sess.targ_cfg.arch, &ty); Type::unique(cx, &ty).ptr_to() } ty::ty_unboxed_vec(ref mt) => { let ty = type_of(cx, mt.ty); Type::vec(cx.sess.targ_cfg.arch, &ty) } ty::ty_ptr(ref mt) => type_of(cx, mt.ty).ptr_to(), ty::ty_rptr(_, ref mt) => type_of(cx, mt.ty).ptr_to(), ty::ty_evec(ref mt, ty::vstore_slice(_)) => { let p_ty = type_of(cx, mt.ty).ptr_to(); let u_ty = Type::uint_from_ty(cx, ast::ty_u); Type::struct_([p_ty, u_ty], false) } ty::ty_estr(ty::vstore_slice(_)) => { // This means we get a nicer name in the output cx.tn.find_type("str_slice").get() } ty::ty_estr(ty::vstore_fixed(n)) => { Type::array(&Type::i8(), (n + 1u) as u64) } ty::ty_evec(ref mt, ty::vstore_fixed(n)) => { Type::array(&type_of(cx, mt.ty), n as u64) } ty::ty_bare_fn(_) => type_of_fn_from_ty(cx, t).ptr_to(), ty::ty_closure(_) => { let ty = type_of_fn_from_ty(cx, t); Type::func_pair(cx, &ty) } ty::ty_trait(_, _, store, _, _) => Type::opaque_trait(cx, store), ty::ty_type => cx.tydesc_type.ptr_to(), ty::ty_tup(*) => { let repr = adt::represent_type(cx, t); Type::struct_(adt::fields_of(cx, repr), false) } ty::ty_opaque_closure_ptr(_) => Type::opaque_box(cx).ptr_to(), ty::ty_struct(did, ref substs) => { if ty::type_is_simd(cx.tcx, t) { let et = ty::simd_type(cx.tcx, t); let n = ty::simd_size(cx.tcx, t); Type::vector(&type_of(cx, et), n as u64) } else { // Only create the named struct, but don't fill it in. We fill it // in *after* placing it into the type cache. This prevents // infinite recursion with recursive struct types. Type::named_struct(llvm_type_name(cx, a_struct, did, substs.tps)) } } ty::ty_self(*) => cx.tcx.sess.unimpl("type_of: ty_self"), ty::ty_infer(*) => cx.tcx.sess.bug("type_of with ty_infer"), ty::ty_param(*) => cx.tcx.sess.bug("type_of with ty_param"), ty::ty_err(*) => cx.tcx.sess.bug("type_of with ty_err") }; cx.lltypes.insert(t, llty); // If this was an enum or struct, fill in the type now. match ty::get(t).sty { ty::ty_enum(*) => { let repr = adt::represent_type(cx, t); llty.set_struct_body(adt::fields_of(cx, repr), false); } ty::ty_struct(did, _) => { if !ty::type_is_simd(cx.tcx, t) { let repr = adt::represent_type(cx, t); let packed = ty::lookup_packed(cx.tcx, did); llty.set_struct_body(adt::fields_of(cx, repr), packed); } } _ => () } return llty; } // Want refinements! (Or case classes, I guess pub enum named_ty { a_struct, an_enum } pub fn
(cx: &CrateContext, what: named_ty, did: ast::def_id, tps: &[ty::t]) -> ~str { let name = match what { a_struct => { "struct" } an_enum => { "enum" } }; let tstr = ppaux::parameterized(cx.tcx, ty::item_path_str(cx.tcx, did), None, tps); if did.crate == 0 { fmt!("%s.%s", name, tstr) } else { fmt!("%s.%s[#%d]", name, tstr, did.crate) } } pub fn type_of_dtor(ccx: &mut CrateContext, self_ty: ty::t) -> Type { let self_ty = type_of(ccx, self_ty).ptr_to(); Type::func([self_ty], &Type::void()) }
llvm_type_name
identifier_name
currency_getter.py
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2009 CamptoCamp. All rights reserved. # @author Nicolas Bessi # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging _logger = logging.getLogger(__name__) class AbstractClassError(Exception): def __str__(self): return 'Abstract Class' def __repr__(self): return 'Abstract Class' class AbstractMethodError(Exception): def __str__(self): return 'Abstract Method' def __repr__(self): return 'Abstract Method' class UnknowClassError(Exception): def __str__(self): return 'Unknown Class' def __repr__(self): return 'Unknown Class' class UnsuportedCurrencyError(Exception): def __init__(self, value): self.curr = value def __str__(self): return 'Unsupported currency %s' % self.curr def __repr__(self): return 'Unsupported currency %s' % self.curr class Currency_getter_factory(): """Factory pattern class that will return a currency getter class base on the name passed to the register method """ def register(self, class_name): allowed = [ 'CH_ADMIN_getter', 'PL_NBP_getter', 'ECB_getter', 'GOOGLE_getter', 'YAHOO_getter', 'MX_BdM_getter', 'CA_BOC_getter', 'RO_BNR_getter', 'BG_CUSTOMS_getter', 'BG_SIBANK_getter', 'BG_UNICRDT_getter', ] if class_name in allowed: exec "from .update_service_%s import %s" % (class_name.replace('_getter', ''), class_name) class_def = eval(class_name) _logger.info("from .update_service_%s import %s: class_def %s:" % (class_name.replace('_getter', ''), class_name, class_def)) return class_def() else:
raise UnknowClassError
conditional_block