file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
sampleMultiMCTSAgentTrajectory.py | .isTerminal, self.getStateFromNode, approximateValue)
guidedMCTSPolicy = MCTS(self.numSimulations, self.selectChild, expand,
estimateValue, backup, establishPlainActionDist)
return guidedMCTSPolicy
class PrepareMultiAgentPolicy:
def __init__(self, composeSingleAgentGuidedMCTS, approximatePolicy, MCTSAgentIds):
self.composeSingleAgentGuidedMCTS = composeSingleAgentGuidedMCTS
self.approximatePolicy = approximatePolicy
self.MCTSAgentIds = MCTSAgentIds
def __call__(self, multiAgentNNModel):
multiAgentApproximatePolicy = np.array([self.approximatePolicy(NNModel) for NNModel in multiAgentNNModel])
otherAgentPolicyForMCTSAgents = np.array([np.concatenate([multiAgentApproximatePolicy[:agentId], multiAgentApproximatePolicy[agentId + 1:]])
for agentId in self.MCTSAgentIds])
MCTSAgentIdWithCorrespondingOtherPolicyPair = zip(self.MCTSAgentIds, otherAgentPolicyForMCTSAgents)
MCTSAgentsPolicy = np.array([self.composeSingleAgentGuidedMCTS(agentId, multiAgentNNModel[agentId], correspondingOtherAgentPolicy)
for agentId, correspondingOtherAgentPolicy in MCTSAgentIdWithCorrespondingOtherPolicyPair])
multiAgentPolicy = np.copy(multiAgentApproximatePolicy)
multiAgentPolicy[self.MCTSAgentIds] = MCTSAgentsPolicy
policy = lambda state: [agentPolicy(state) for agentPolicy in multiAgentPolicy]
return policy
def main():
#check file exists or not
dirName = os.path.dirname(__file__)
trajectoriesSaveDirectory = os.path.join(dirName, '..', '..', 'data',
'multiAgentTrain', 'multiMCTSAgent', 'trajectories')
if not os.path.exists(trajectoriesSaveDirectory):
os.makedirs(trajectoriesSaveDirectory)
trajectorySaveExtension = '.pickle'
maxRunningSteps = 20
numSimulations = 200
killzoneRadius = 2
fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius}
generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters)
parametersForTrajectoryPath = json.loads(sys.argv[1])
startSampleIndex = int(sys.argv[2])
endSampleIndex = int(sys.argv[3])
parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex)
trajectorySavePath = generateTrajectorySavePath(parametersForTrajectoryPath)
if not os.path.isfile(trajectorySavePath):
# Mujoco environment
physicsDynamicsPath = os.path.join(dirName, '..', '..', 'env', 'xmls', 'twoAgents.xml')
physicsModel = mujoco.load_model_from_path(physicsDynamicsPath)
physicsSimulation = mujoco.MjSim(physicsModel)
# MDP function
qPosInit = (0, 0, 0, 0)
qVelInit = [0, 0, 0, 0]
numAgents = 2
qVelInitNoise = 8
qPosInitNoise = 9.7
reset = ResetUniform(physicsSimulation, qPosInit, qVelInit, numAgents, qPosInitNoise, qVelInitNoise)
agentIds = list(range(numAgents))
sheepId = 0
wolfId = 1
xPosIndex = [2, 3]
getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex)
sheepAliveBonus = 1 / maxRunningSteps
wolfAlivePenalty = -sheepAliveBonus
sheepTerminalPenalty = -1
wolfTerminalReward = 1
terminalRewardList = [sheepTerminalPenalty, wolfTerminalReward]
isTerminal = IsTerminal(killzoneRadius, getSheepXPos, getWolfXPos)
numSimulationFrames = 20
transit = TransitionFunction(physicsSimulation, isTerminal, numSimulationFrames)
rewardSheep = RewardFunctionCompete(sheepAliveBonus, sheepTerminalPenalty, isTerminal)
rewardWolf = RewardFunctionCompete(wolfAlivePenalty, wolfTerminalReward, isTerminal)
rewardMultiAgents = [rewardSheep, rewardWolf]
decay = 1
accumulateMultiAgentRewards = AccumulateMultiAgentRewards(decay, rewardMultiAgents)
# NNGuidedMCTS init
cInit = 1
cBase = 100
calculateScore = ScoreChild(cInit, cBase)
selectChild = SelectChild(calculateScore)
actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7)]
getApproximatePolicy = lambda NNmodel: ApproximatePolicy(NNmodel, actionSpace)
getApproximateValue = lambda NNmodel: ApproximateValue(NNmodel)
getStateFromNode = lambda node: list(node.id.values())[0]
# sample trajectory
sampleTrajectory = SampleTrajectory(maxRunningSteps, transit, isTerminal, reset, sampleAction)
# neural network init
numStateSpace = 12
numActionSpace = len(actionSpace)
regularizationFactor = 1e-4
sharedWidths = [128]
actionLayerWidths = [128]
valueLayerWidths = [128]
generateModel = GenerateModel(numStateSpace, numActionSpace, regularizationFactor)
# load save dir
NNModelSaveExtension = ''
NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data',
'multiAgentTrain', 'multiMCTSAgent', 'NNModel')
if not os.path.exists(NNModelSaveDirectory):
os.makedirs(NNModelSaveDirectory)
generateNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters)
# load wolf baseline for init iteration
# wolfBaselineNNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data','SheepWolfBaselinePolicy', 'wolfBaselineNNPolicy')
# baselineSaveParameters = {'numSimulations': 10, 'killzoneRadius': 2,
# 'qPosInitNoise': 9.7, 'qVelInitNoise': 8,
# 'rolloutHeuristicWeight': 0.1, 'maxRunningSteps': 25}
# getWolfBaselineModelSavePath = GetSavePath(wolfBaselineNNModelSaveDirectory, NNModelSaveExtension, baselineSaveParameters)
# baselineModelTrainSteps = 1000
# wolfBaselineNNModelSavePath = getWolfBaselineModelSavePath({'trainSteps': baselineModelTrainSteps})
# wolfBaselienModel = restoreVariables(initializedNNModel, wolfBaselineNNModelSavePath)
# load sheep baseline for init iteration
# sheepBaselineNNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data','SheepWolfBaselinePolicy', 'sheepBaselineNNPolicy')
# baselineSaveParameters = {'numSimulations': 10, 'killzoneRadius': 2,
# 'qPosInitNoise': 9.7, 'qVelInitNoise': 8,
# 'rolloutHeuristicWeight': 0.1, 'maxRunningSteps': 25}
# getSheepBaselineModelSavePath = GetSavePath(sheepBaselineNNModelSaveDirectory, NNModelSaveExtension, baselineSaveParameters)
# baselineModelTrainSteps = 1000
# sheepBaselineNNModelSavePath = getSheepBaselineModelSavePath({'trainSteps': baselineModelTrainSteps})
# sheepBaselienModel = restoreVariables(initializedNNModel, sheepBaselineNNModelSavePath)
# multiAgentNNmodel = [sheepBaseLineModel, wolfBaseLineModel]
startTime = time.time()
trainableAgentIds = [sheepId, wolfId]
# otherAgentApproximatePolicy = lambda NNModel: stationaryAgentPolicy
otherAgentApproximatePolicy = lambda NNModel: ApproximatePolicy(NNModel, actionSpace)
composeSingleAgentGuidedMCTS = ComposeSingleAgentGuidedMCTS(numSimulations, actionSpace, terminalRewardList, selectChild, isTerminal, transit, getStateFromNode, getApproximatePolicy, getApproximateValue)
prepareMultiAgentPolicy = PrepareMultiAgentPolicy(composeSingleAgentGuidedMCTS, otherAgentApproximatePolicy, trainableAgentIds)
# load NN
multiAgentNNmodel = [generateModel(sharedWidths, actionLayerWidths, valueLayerWidths) for agentId in agentIds]
iterationIndex = parametersForTrajectoryPath['iterationIndex']
for agentId in trainableAgentIds:
| modelPath = generateNNModelSavePath({'iterationIndex': iterationIndex, 'agentId': agentId})
restoredNNModel = restoreVariables(multiAgentNNmodel[agentId], modelPath)
multiAgentNNmodel[agentId] = restoredNNModel | conditional_block |
|
sampleMultiMCTSAgentTrajectory.py | Buffer import SampleBatchFromBuffer, SaveToBuffer
from exec.preProcessing import AccumulateMultiAgentRewards, AddValuesToTrajectory, RemoveTerminalTupleFromTrajectory, \
ActionToOneHot, ProcessTrajectoryForPolicyValueNet
from src.algorithms.mcts import ScoreChild, SelectChild, InitializeChildren, Expand, MCTS, backup, establishPlainActionDist
from exec.trainMCTSNNIteratively.valueFromNode import EstimateValueFromNode
from src.constrainedChasingEscapingEnv.policies import stationaryAgentPolicy, HeatSeekingContinuesDeterministicPolicy
from src.episode import SampleTrajectory, sampleAction
from exec.parallelComputing import GenerateTrajectoriesParallel
def composeMultiAgentTransitInSingleAgentMCTS(agentId, state, selfAction, othersPolicy, transit):
multiAgentActions = [sampleAction(policy(state)) for policy in othersPolicy]
multiAgentActions.insert(agentId, selfAction)
transitInSelfMCTS = transit(state, multiAgentActions)
return transitInSelfMCTS
class ComposeSingleAgentGuidedMCTS():
def __init__(self, numSimulations, actionSpace, terminalRewardList, selectChild, isTerminal, transit, getStateFromNode, getApproximatePolicy, getApproximateValue):
self.numSimulations = numSimulations
self.actionSpace = actionSpace
self.terminalRewardList = terminalRewardList
self.selectChild = selectChild
self.isTerminal = isTerminal
self.transit = transit
self.getStateFromNode = getStateFromNode
self.getApproximatePolicy = getApproximatePolicy
self.getApproximateValue = getApproximateValue
def __call__(self, agentId, selfNNModel, othersPolicy):
approximateActionPrior = self.getApproximatePolicy(selfNNModel)
transitInMCTS = lambda state, selfAction: composeMultiAgentTransitInSingleAgentMCTS(agentId, state, selfAction, othersPolicy, self.transit)
initializeChildren = InitializeChildren(self.actionSpace, transitInMCTS, approximateActionPrior)
expand = Expand(self.isTerminal, initializeChildren)
terminalReward = self.terminalRewardList[agentId]
approximateValue = self.getApproximateValue(selfNNModel)
estimateValue = EstimateValueFromNode(terminalReward, self.isTerminal, self.getStateFromNode, approximateValue)
guidedMCTSPolicy = MCTS(self.numSimulations, self.selectChild, expand,
estimateValue, backup, establishPlainActionDist)
return guidedMCTSPolicy
class PrepareMultiAgentPolicy:
def __init__(self, composeSingleAgentGuidedMCTS, approximatePolicy, MCTSAgentIds):
self.composeSingleAgentGuidedMCTS = composeSingleAgentGuidedMCTS
self.approximatePolicy = approximatePolicy
self.MCTSAgentIds = MCTSAgentIds
def __call__(self, multiAgentNNModel):
multiAgentApproximatePolicy = np.array([self.approximatePolicy(NNModel) for NNModel in multiAgentNNModel])
otherAgentPolicyForMCTSAgents = np.array([np.concatenate([multiAgentApproximatePolicy[:agentId], multiAgentApproximatePolicy[agentId + 1:]])
for agentId in self.MCTSAgentIds])
MCTSAgentIdWithCorrespondingOtherPolicyPair = zip(self.MCTSAgentIds, otherAgentPolicyForMCTSAgents)
MCTSAgentsPolicy = np.array([self.composeSingleAgentGuidedMCTS(agentId, multiAgentNNModel[agentId], correspondingOtherAgentPolicy)
for agentId, correspondingOtherAgentPolicy in MCTSAgentIdWithCorrespondingOtherPolicyPair])
multiAgentPolicy = np.copy(multiAgentApproximatePolicy)
multiAgentPolicy[self.MCTSAgentIds] = MCTSAgentsPolicy
policy = lambda state: [agentPolicy(state) for agentPolicy in multiAgentPolicy]
return policy
def main():
#check file exists or not
dirName = os.path.dirname(__file__)
trajectoriesSaveDirectory = os.path.join(dirName, '..', '..', 'data',
'multiAgentTrain', 'multiMCTSAgent', 'trajectories')
if not os.path.exists(trajectoriesSaveDirectory):
os.makedirs(trajectoriesSaveDirectory)
trajectorySaveExtension = '.pickle'
maxRunningSteps = 20
numSimulations = 200
killzoneRadius = 2
fixedParameters = {'maxRunningSteps': maxRunningSteps, 'numSimulations': numSimulations, 'killzoneRadius': killzoneRadius}
generateTrajectorySavePath = GetSavePath(trajectoriesSaveDirectory, trajectorySaveExtension, fixedParameters)
parametersForTrajectoryPath = json.loads(sys.argv[1])
startSampleIndex = int(sys.argv[2])
endSampleIndex = int(sys.argv[3])
parametersForTrajectoryPath['sampleIndex'] = (startSampleIndex, endSampleIndex)
trajectorySavePath = generateTrajectorySavePath(parametersForTrajectoryPath)
if not os.path.isfile(trajectorySavePath):
# Mujoco environment
physicsDynamicsPath = os.path.join(dirName, '..', '..', 'env', 'xmls', 'twoAgents.xml')
physicsModel = mujoco.load_model_from_path(physicsDynamicsPath)
physicsSimulation = mujoco.MjSim(physicsModel)
# MDP function
qPosInit = (0, 0, 0, 0)
qVelInit = [0, 0, 0, 0] | numAgents = 2
qVelInitNoise = 8
qPosInitNoise = 9.7
reset = ResetUniform(physicsSimulation, qPosInit, qVelInit, numAgents, qPosInitNoise, qVelInitNoise)
agentIds = list(range(numAgents))
sheepId = 0
wolfId = 1
xPosIndex = [2, 3]
getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)
getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex)
sheepAliveBonus = 1 / maxRunningSteps
wolfAlivePenalty = -sheepAliveBonus
sheepTerminalPenalty = -1
wolfTerminalReward = 1
terminalRewardList = [sheepTerminalPenalty, wolfTerminalReward]
isTerminal = IsTerminal(killzoneRadius, getSheepXPos, getWolfXPos)
numSimulationFrames = 20
transit = TransitionFunction(physicsSimulation, isTerminal, numSimulationFrames)
rewardSheep = RewardFunctionCompete(sheepAliveBonus, sheepTerminalPenalty, isTerminal)
rewardWolf = RewardFunctionCompete(wolfAlivePenalty, wolfTerminalReward, isTerminal)
rewardMultiAgents = [rewardSheep, rewardWolf]
decay = 1
accumulateMultiAgentRewards = AccumulateMultiAgentRewards(decay, rewardMultiAgents)
# NNGuidedMCTS init
cInit = 1
cBase = 100
calculateScore = ScoreChild(cInit, cBase)
selectChild = SelectChild(calculateScore)
actionSpace = [(10, 0), (7, 7), (0, 10), (-7, 7), (-10, 0), (-7, -7), (0, -10), (7, -7)]
getApproximatePolicy = lambda NNmodel: ApproximatePolicy(NNmodel, actionSpace)
getApproximateValue = lambda NNmodel: ApproximateValue(NNmodel)
getStateFromNode = lambda node: list(node.id.values())[0]
# sample trajectory
sampleTrajectory = SampleTrajectory(maxRunningSteps, transit, isTerminal, reset, sampleAction)
# neural network init
numStateSpace = 12
numActionSpace = len(actionSpace)
regularizationFactor = 1e-4
sharedWidths = [128]
actionLayerWidths = [128]
valueLayerWidths = [128]
generateModel = GenerateModel(numStateSpace, numActionSpace, regularizationFactor)
# load save dir
NNModelSaveExtension = ''
NNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data',
'multiAgentTrain', 'multiMCTSAgent', 'NNModel')
if not os.path.exists(NNModelSaveDirectory):
os.makedirs(NNModelSaveDirectory)
generateNNModelSavePath = GetSavePath(NNModelSaveDirectory, NNModelSaveExtension, fixedParameters)
# load wolf baseline for init iteration
# wolfBaselineNNModelSaveDirectory = os.path.join(dirName, '..', '..', 'data','SheepWolfBaselinePolicy', 'wolfBaselineNNPolicy')
# baselineSaveParameters = {'numSimulations': 10, 'killzoneRadius': 2,
# 'qPosInitNoise': 9.7, 'qVelInitNoise': 8,
# 'rolloutHeuristicWeight': 0.1, 'maxRunningSteps': 25}
# getWolfBaselineModelSavePath = GetSavePath(wolfBaselineNNModelSaveDirectory, NNModelSaveExtension, baselineSaveParameters)
# baselineModelTrainSteps = 1000
# wolfBaselineNNModelSavePath = getWolfBaselineModelSavePath({'trainSteps': baselineModelTrainSteps})
| random_line_split |
|
ballot.rs | ism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Contest {
pub id: String,
pub index: u32,
pub contest_type: ContestType,
pub num_winners: u32,
pub write_in: bool,
pub candidates: Vec<Candidate>,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Candidate {
pub id: String,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ContestType {
/// Plurality voting is an electoral system in which each voter is allowed to vote for only one candidate and the candidate
/// who polls the most among their counterparts (a plurality) is elected. It may be called first-past-the-post (FPTP),
/// single-choice voting, simple plurality, or relative/simple majority.
///
/// For Plurality tally, `Selection.score` has no meaning.
Plurality,
/// Score voting or “range voting” is an electoral system in which voters give each candidate a score, the scores are summed,
/// and the candidate with the highest total is elected. It has been described by various other names including “evaluative voting”,
/// “utilitarian voting”, and “the point system”.
///
/// For Score tally, `Selection.score` represents the number of points assigned to each candidate. Zero is the worst score that can be asssigned to a candidate.
Score,
/// Approval voting is a single-winner electoral system where each voter may select (“approve”) any number of candidates.
/// The winner is the most-approved candidate.
///
/// For Approval tally, `Selection.score` has no meaning.
Approval,
/// The Condorcet method is a ranked-choice voting system that elects the candidate that would win a majority of the vote in all of the head-to-head elections against each of the other candidates.
/// The Condorcet method isn’t guarunteed to produce a single-winner due to the non-transitive nature of group choice.
///
/// For Condorcet tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Condorcet,
/// The standard Borda count where each candidate is assigned a number of points equal to the number of candidates ranked lower than them.
/// It is known as the "Starting at 0" Borda count since the least-significantly ranked candidate is given zero points.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position - 1```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 3 |
/// | 1 | Bob | 2 |
/// | 2 | Carlos | 1 |
/// | 3 | Dave | 0 |
///
/// For Borda tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Borda,
/// The classic Borda count as defined in Jean-Charles de Borda's [original proposal](http://gerardgreco.free.fr/IMG/pdf/MA_c_moire-Borda-1781.pdf).
/// It is known as the "Starting at 1" Borda count since the least-significantly ranked candidate is given one point.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 4 |
/// | 1 | Bob | 3 |
/// | 2 | Carlos | 2 |
/// | 3 | Dave | 1 |
///
/// For BordaClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaClassic,
/// In the Dowdall system, the highest-ranked candidate obtains 1 point, while the 2nd-ranked candidate receives ½ a point, the 3rd-ranked candidate receives ⅓ of a point, etc.
/// An important difference of this method from the others is that the number of points assigned to each preference does not depend on the number of candidates.
/// Each candidate is given points according to:
///
/// ```1 / (candidate-position + 1)```
///
/// If Dowdall is selected, tallystick will panic if an integer count type is used in the tally. This variant should only be used with a float or rational tally.
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 1 |
/// | 1 | Bob | ½ |
/// | 2 | Carlos | ⅓ |
/// | 3 | Dave | ¼ |
///
/// For BordaDowdall tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaDowdall,
/// In a modified Borda count, the number of points given for a voter's first and subsequent preferences is determined by the total number of candidates they have actually ranked, rather than the total number listed.
/// This is to say, typically, on a ballot of `n` candidates, if a voter casts only `m` preferences (where `n ≥ m ≥ 1`), a first preference gets `m` points, a second preference `m – 1` points, and so on.
/// Modified Borda counts are used to counteract the problem of [bullet voting](https://en.wikipedia.org/wiki/Bullet_voting).
/// Each candidate is given points according to:
///
/// ```number-marked - candidate-position```
///
/// For BordaModifiedClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaModifiedClassic,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeWinning Strength of a link is measured by its support. You should use this Schulze variant if you are unsure.
///
/// For SchulzeWinning tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeWinning,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeRatio, the strength of a link is measured by the difference between its support and opposition.
///
/// For SchulzeRatio tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeRatio,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeMargin, the strength of a link is measured by the ratio of its support and opposition.
///
/// For SchulzeMargin tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeMargin,
}
| #[serde(default)]
pub write_in: bool,
/// Score has different meanings depending on the tally type:
/// STV, Condorcet, Borda and Schulze: `score` means candidate rank, where a zero is the best rank that can be assigned to a candidate.
/// Score: `score` is the points assinged to this candidate. | #[derive(Serialize, Deserialize, Clone, Message, PartialEq, Eq)]
pub struct Selection {
/// true if the `selection` field is a free-form write-in, false if the `selection` field corresponds to a known candidate-id
#[prost(bool)] | random_line_split |
ballot.rs | {
pub id: String,
pub contests: Vec<u32>, // List of contest indexes
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Contest {
pub id: String,
pub index: u32,
pub contest_type: ContestType,
pub num_winners: u32,
pub write_in: bool,
pub candidates: Vec<Candidate>,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Candidate {
pub id: String,
/// Application specific properties.
///
/// Hashmaps are not allowed because their unstable ordering leads to non-determinism.
#[serde(default)]
#[serde(skip_serializing_if = "IndexMap::is_empty")]
pub properties: IndexMap<String, serde_json::Value>,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "snake_case")]
pub enum ContestType {
/// Plurality voting is an electoral system in which each voter is allowed to vote for only one candidate and the candidate
/// who polls the most among their counterparts (a plurality) is elected. It may be called first-past-the-post (FPTP),
/// single-choice voting, simple plurality, or relative/simple majority.
///
/// For Plurality tally, `Selection.score` has no meaning.
Plurality,
/// Score voting or “range voting” is an electoral system in which voters give each candidate a score, the scores are summed,
/// and the candidate with the highest total is elected. It has been described by various other names including “evaluative voting”,
/// “utilitarian voting”, and “the point system”.
///
/// For Score tally, `Selection.score` represents the number of points assigned to each candidate. Zero is the worst score that can be asssigned to a candidate.
Score,
/// Approval voting is a single-winner electoral system where each voter may select (“approve”) any number of candidates.
/// The winner is the most-approved candidate.
///
/// For Approval tally, `Selection.score` has no meaning.
Approval,
/// The Condorcet method is a ranked-choice voting system that elects the candidate that would win a majority of the vote in all of the head-to-head elections against each of the other candidates.
/// The Condorcet method isn’t guarunteed to produce a single-winner due to the non-transitive nature of group choice.
///
/// For Condorcet tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Condorcet,
/// The standard Borda count where each candidate is assigned a number of points equal to the number of candidates ranked lower than them.
/// It is known as the "Starting at 0" Borda count since the least-significantly ranked candidate is given zero points.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position - 1```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 3 |
/// | 1 | Bob | 2 |
/// | 2 | Carlos | 1 |
/// | 3 | Dave | 0 |
///
/// For Borda tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
Borda,
/// The classic Borda count as defined in Jean-Charles de Borda's [original proposal](http://gerardgreco.free.fr/IMG/pdf/MA_c_moire-Borda-1781.pdf).
/// It is known as the "Starting at 1" Borda count since the least-significantly ranked candidate is given one point.
/// Each candidate is given points according to:
///
/// ```number-candidates - candidate-position```
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 4 |
/// | 1 | Bob | 3 |
/// | 2 | Carlos | 2 |
/// | 3 | Dave | 1 |
///
/// For BordaClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaClassic,
/// In the Dowdall system, the highest-ranked candidate obtains 1 point, while the 2nd-ranked candidate receives ½ a point, the 3rd-ranked candidate receives ⅓ of a point, etc.
/// An important difference of this method from the others is that the number of points assigned to each preference does not depend on the number of candidates.
/// Each candidate is given points according to:
///
/// ```1 / (candidate-position + 1)```
///
/// If Dowdall is selected, tallystick will panic if an integer count type is used in the tally. This variant should only be used with a float or rational tally.
///
/// Example point allocation for a single ballot:
///
/// | Position on ballot | Candiate | Points |
/// | --------------------|----------|--------|
/// | 0 | Alice | 1 |
/// | 1 | Bob | ½ |
/// | 2 | Carlos | ⅓ |
/// | 3 | Dave | ¼ |
///
/// For BordaDowdall tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaDowdall,
/// In a modified Borda count, the number of points given for a voter's first and subsequent preferences is determined by the total number of candidates they have actually ranked, rather than the total number listed.
/// This is to say, typically, on a ballot of `n` candidates, if a voter casts only `m` preferences (where `n ≥ m ≥ 1`), a first preference gets `m` points, a second preference `m – 1` points, and so on.
/// Modified Borda counts are used to counteract the problem of [bullet voting](https://en.wikipedia.org/wiki/Bullet_voting).
/// Each candidate is given points according to:
///
/// ```number-marked - candidate-position```
///
/// For BordaModifiedClassic tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
BordaModifiedClassic,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeWinning Strength of a link is measured by its support. You should use this Schulze variant if you are unsure.
///
/// For SchulzeWinning tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeWinning,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeRatio, the strength of a link is measured by the difference between its support and opposition.
///
/// For SchulzeRatio tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeRatio,
/// The Schulze method is an voting system that selects a single winner using votes that express preferences.
/// In SchulzeMargin, the strength of a link is measured by the ratio of its support and opposition.
///
/// For SchulzeMargin tally, `Selection.score` is interpreted as the candidate rank, where the best ranked candidate has a rank of zero.
/// Candidates that have the same rank are considered to be of equal preference.
SchulzeMargin,
}
#[derive(Serialize, Deserialize, Clone, Message, PartialEq, Eq)]
pub struct Selection {
/// true if the `selection` field is a free-form write-in, false if the `selection` field corresponds to a known candidate-id
#[prost(bool)]
#[serde(default)]
pub write_in: bool,
/// Score has different meanings depending on the tally type:
/// STV, | Ballot | identifier_name |
|
universe.rs | but safely because all of the data is atomic.
///
/// The [`Poe`](crate::Poe) struct exposes some of these settings — start/stop,
/// audio, speed — to the end user, but the rest are fully closed off.
pub(crate) struct Universe;
impl Universe {
const ACTIVE: u8 = 0b0000_0001; // Poe is active.
const AUDIO: u8 = 0b0000_0010; // Audio is enabled.
const DRAGGING: u8 = 0b0000_0100; // Poe is currently being dragged.
const ASSIGN_CHILD: u8 = 0b0000_1000; // The primary mate needs a child animation.
const NO_CHILD: u8 = 0b0001_0000; // Children must be stopped!
const NO_FOCUS: u8 = 0b0010_0000; // Disable primary mate focus support.
const STATE: u8 = 0b0100_0000; // State is active.
#[cfg(feature = "firefox")]
const FIX_BINDINGS: u8 = 0b1000_0000; // Body element bindings were lost.
}
macro_rules! get {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Is ", $title, "?")]
#[inline]
pub(crate) fn $fn() -> bool {
Self::$flag == FLAGS.load(SeqCst) & Self::$flag
}
);
}
impl Universe {
get!("Active", ACTIVE, active);
get!("Audio Enabled", AUDIO, audio);
get!("Dragging", DRAGGING, dragging);
get!("No Focus Allowed", NO_FOCUS, no_focus);
/// # Assign Child Animation?
///
/// Returns `true` if the previous mate requested a new child since the
/// last time this method was called.
pub(crate) fn assign_child() -> bool {
| [cfg(feature = "firefox")]
/// # Fix Element Bindings?
///
/// Returns `true` if one or both elements seem to have disappeared from
/// the document body since the last time this method was called.
pub(crate) fn fix_bindings() -> bool {
let old = FLAGS.fetch_and(! Self::FIX_BINDINGS, SeqCst);
let expected = Self::FIX_BINDINGS | Self::ACTIVE;
expected == old & expected
}
/// # Stop Child Animations?
///
/// Returns `true` if the previous mate requested the end to childhood.
pub(crate) fn no_child() -> bool {
let old = FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
Self::NO_CHILD == old & Self::NO_CHILD
}
#[cfg(feature = "director")]
#[inline]
/// # Are We Paused?
pub(crate) fn paused() -> bool { SPEED.load(SeqCst) == 0 }
#[cfg(not(feature = "director"))]
/// # We Aren't Paused.
pub(crate) const fn paused() -> bool { false }
/// # Position.
///
/// The current — or last recorded — X/Y position of the mouse on the
/// screen.
///
/// This information is only captured when the primary Poe mate is being
/// dragged, so will otherwise grow stale.
pub(crate) fn pos() -> Position {
let pos = POS.load(SeqCst).to_le_bytes();
let x = i32::from_le_bytes([pos[0], pos[1], pos[2], pos[3]]);
let y = i32::from_le_bytes([pos[4], pos[5], pos[6], pos[7]]);
Position::new(x, y)
}
/// # Width/Height.
///
/// Returns the current — or last recorded — dimensions of the screen.
///
/// These are captured when the universe is first initialized and refreshed
/// whenever the window is resized, but will grow stale when Poe has been
/// de-activated.
pub(crate) fn size() -> (u16, u16) {
let size = SIZE.load(SeqCst).to_le_bytes();
let width = u16::from_le_bytes([size[0], size[1]]);
let height = u16::from_le_bytes([size[2], size[3]]);
match (width, height) {
(0, 0) => (1, 1),
(0, h) => (1, h),
(w, 0) => (w, 1),
(w, h) => (w, h),
}
}
}
impl Universe {
#[inline]
/// # Random Value.
///
/// Return a random `u64` (xoshiro256).
pub(crate) fn rand() -> u64 {
let mut seeds = get_seeds();
let out = seeds[1].overflowing_mul(5).0.rotate_left(7).overflowing_mul(9).0;
update_seeds(&mut seeds);
set_seeds(&seeds);
out
}
#[allow(clippy::cast_possible_truncation)]
/// # Random (Capped) U16.
///
/// Return a random number between `0..max`, mitigating bias the same way
/// as `fastrand` (i.e. <https://lemire.me/blog/2016/06/30/fast-random-shuffling/>).
pub(crate) fn rand_mod(n: u16) -> u16 {
let mut r = Self::rand() as u16;
let mut hi = mul_high_u16(r, n);
let mut lo = r.wrapping_mul(n);
if lo < n {
let t = n.wrapping_neg() % n;
while lo < t {
r = Self::rand() as u16;
hi = mul_high_u16(r, n);
lo = r.wrapping_mul(n);
}
}
hi
}
}
macro_rules! set {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Set ", $title, ".")]
pub(crate) fn $fn(v: bool) {
if v { FLAGS.fetch_or(Self::$flag, SeqCst); }
else { FLAGS.fetch_and(! Self::$flag, SeqCst); }
}
);
}
impl Universe {
set!("Allow Audio", AUDIO, set_audio);
set!("Dragging", DRAGGING, set_dragging);
set!("State", STATE, set_state);
/// # Set Active.
///
/// Enable or disable the universe (and Poe, etc.), returning `true` if
/// different than the previous state.
pub(crate) fn set_active(v: bool) -> bool {
if v == (0 != FLAGS.load(SeqCst) & (Self::ACTIVE | Self::STATE)) { false }
else {
if v {
// Set active flag.
FLAGS.fetch_or(Self::ACTIVE, SeqCst);
// Seed future randomness if we can.
#[cfg(target_arch = "wasm32")] reseed();
// Set up the DOM elements and event bindings, and begin the
// animation frame loop.
State::init();
}
else {
// Clear everything but the audio, focus, and state properties.
// (State will clear itself in a moment, hopefully.)
FLAGS.fetch_and(Self::AUDIO | Self::NO_FOCUS | Self::STATE, SeqCst);
}
true
}
}
/// # Set Assign Child Flag.
///
/// This will also remove the incompatible no-child flag.
pub(crate) fn set_assign_child() {
if Self::NO_CHILD == FLAGS.fetch_or(Self::ASSIGN_CHILD, SeqCst) & Self::NO_CHILD {
FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
}
}
#[cfg(feature = "firefox")]
/// # Require Element Re-Binding.
pub(crate) fn set_fix_bindings() {
if Self::active() { FLAGS.fetch_or(Self::FIX_BINDINGS, SeqCst); }
}
/// # Set No Child Flag.
///
/// This will also remove the incompatible assign-child flag.
pub(crate) fn set_no_child() {
if Self::ASSIGN_CHILD == FLAGS.fetch_or(Self::NO_CHILD, SeqCst) & Self::ASSIGN_CHILD {
FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
}
}
/// # Set No Focus.
///
/// If true, this will also disable dragging, since there wouldn't be
/// any way to undrag.
pub(crate) fn set_no_focus(v: bool) {
if v {
if Self::DRAGGING == FLAGS.fetch_or(Self::NO_FOCUS, SeqCst) & Self::DRAGGING {
FLAGS.fetch_and(! Self::DRAGGING, SeqCst);
}
}
else { FLAGS.fetch_and(! Self::NO_FOCUS, SeqC | let old = FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
Self::ASSIGN_CHILD == old & Self::ASSIGN_CHILD
}
# | identifier_body |
universe.rs | but safely because all of the data is atomic.
///
/// The [`Poe`](crate::Poe) struct exposes some of these settings — start/stop,
/// audio, speed — to the end user, but the rest are fully closed off.
pub(crate) struct Universe;
impl Universe {
const ACTIVE: u8 = 0b0000_0001; // Poe is active.
const AUDIO: u8 = 0b0000_0010; // Audio is enabled.
const DRAGGING: u8 = 0b0000_0100; // Poe is currently being dragged.
const ASSIGN_CHILD: u8 = 0b0000_1000; // The primary mate needs a child animation.
const NO_CHILD: u8 = 0b0001_0000; // Children must be stopped!
const NO_FOCUS: u8 = 0b0010_0000; // Disable primary mate focus support.
const STATE: u8 = 0b0100_0000; // State is active.
#[cfg(feature = "firefox")]
const FIX_BINDINGS: u8 = 0b1000_0000; // Body element bindings were lost.
}
macro_rules! get {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Is ", $title, "?")]
#[inline]
pub(crate) fn $fn() -> bool {
Self::$flag == FLAGS.load(SeqCst) & Self::$flag
}
);
}
impl Universe {
get!("Active", ACTIVE, active);
get!("Audio Enabled", AUDIO, audio);
get!("Dragging", DRAGGING, dragging);
get!("No Focus Allowed", NO_FOCUS, no_focus);
/// # Assign Child Animation?
///
/// Returns `true` if the previous mate requested a new child since the
/// last time this method was called.
pub(crate) fn assi | > bool {
let old = FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
Self::ASSIGN_CHILD == old & Self::ASSIGN_CHILD
}
#[cfg(feature = "firefox")]
/// # Fix Element Bindings?
///
/// Returns `true` if one or both elements seem to have disappeared from
/// the document body since the last time this method was called.
pub(crate) fn fix_bindings() -> bool {
let old = FLAGS.fetch_and(! Self::FIX_BINDINGS, SeqCst);
let expected = Self::FIX_BINDINGS | Self::ACTIVE;
expected == old & expected
}
/// # Stop Child Animations?
///
/// Returns `true` if the previous mate requested the end to childhood.
pub(crate) fn no_child() -> bool {
let old = FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
Self::NO_CHILD == old & Self::NO_CHILD
}
#[cfg(feature = "director")]
#[inline]
/// # Are We Paused?
pub(crate) fn paused() -> bool { SPEED.load(SeqCst) == 0 }
#[cfg(not(feature = "director"))]
/// # We Aren't Paused.
pub(crate) const fn paused() -> bool { false }
/// # Position.
///
/// The current — or last recorded — X/Y position of the mouse on the
/// screen.
///
/// This information is only captured when the primary Poe mate is being
/// dragged, so will otherwise grow stale.
pub(crate) fn pos() -> Position {
let pos = POS.load(SeqCst).to_le_bytes();
let x = i32::from_le_bytes([pos[0], pos[1], pos[2], pos[3]]);
let y = i32::from_le_bytes([pos[4], pos[5], pos[6], pos[7]]);
Position::new(x, y)
}
/// # Width/Height.
///
/// Returns the current — or last recorded — dimensions of the screen.
///
/// These are captured when the universe is first initialized and refreshed
/// whenever the window is resized, but will grow stale when Poe has been
/// de-activated.
pub(crate) fn size() -> (u16, u16) {
let size = SIZE.load(SeqCst).to_le_bytes();
let width = u16::from_le_bytes([size[0], size[1]]);
let height = u16::from_le_bytes([size[2], size[3]]);
match (width, height) {
(0, 0) => (1, 1),
(0, h) => (1, h),
(w, 0) => (w, 1),
(w, h) => (w, h),
}
}
}
impl Universe {
#[inline]
/// # Random Value.
///
/// Return a random `u64` (xoshiro256).
pub(crate) fn rand() -> u64 {
let mut seeds = get_seeds();
let out = seeds[1].overflowing_mul(5).0.rotate_left(7).overflowing_mul(9).0;
update_seeds(&mut seeds);
set_seeds(&seeds);
out
}
#[allow(clippy::cast_possible_truncation)]
/// # Random (Capped) U16.
///
/// Return a random number between `0..max`, mitigating bias the same way
/// as `fastrand` (i.e. <https://lemire.me/blog/2016/06/30/fast-random-shuffling/>).
pub(crate) fn rand_mod(n: u16) -> u16 {
let mut r = Self::rand() as u16;
let mut hi = mul_high_u16(r, n);
let mut lo = r.wrapping_mul(n);
if lo < n {
let t = n.wrapping_neg() % n;
while lo < t {
r = Self::rand() as u16;
hi = mul_high_u16(r, n);
lo = r.wrapping_mul(n);
}
}
hi
}
}
macro_rules! set {
($title:literal, $flag:ident, $fn:ident) => (
#[doc = concat!("# Set ", $title, ".")]
pub(crate) fn $fn(v: bool) {
if v { FLAGS.fetch_or(Self::$flag, SeqCst); }
else { FLAGS.fetch_and(! Self::$flag, SeqCst); }
}
);
}
impl Universe {
set!("Allow Audio", AUDIO, set_audio);
set!("Dragging", DRAGGING, set_dragging);
set!("State", STATE, set_state);
/// # Set Active.
///
/// Enable or disable the universe (and Poe, etc.), returning `true` if
/// different than the previous state.
pub(crate) fn set_active(v: bool) -> bool {
if v == (0 != FLAGS.load(SeqCst) & (Self::ACTIVE | Self::STATE)) { false }
else {
if v {
// Set active flag.
FLAGS.fetch_or(Self::ACTIVE, SeqCst);
// Seed future randomness if we can.
#[cfg(target_arch = "wasm32")] reseed();
// Set up the DOM elements and event bindings, and begin the
// animation frame loop.
State::init();
}
else {
// Clear everything but the audio, focus, and state properties.
// (State will clear itself in a moment, hopefully.)
FLAGS.fetch_and(Self::AUDIO | Self::NO_FOCUS | Self::STATE, SeqCst);
}
true
}
}
/// # Set Assign Child Flag.
///
/// This will also remove the incompatible no-child flag.
pub(crate) fn set_assign_child() {
if Self::NO_CHILD == FLAGS.fetch_or(Self::ASSIGN_CHILD, SeqCst) & Self::NO_CHILD {
FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
}
}
#[cfg(feature = "firefox")]
/// # Require Element Re-Binding.
pub(crate) fn set_fix_bindings() {
if Self::active() { FLAGS.fetch_or(Self::FIX_BINDINGS, SeqCst); }
}
/// # Set No Child Flag.
///
/// This will also remove the incompatible assign-child flag.
pub(crate) fn set_no_child() {
if Self::ASSIGN_CHILD == FLAGS.fetch_or(Self::NO_CHILD, SeqCst) & Self::ASSIGN_CHILD {
FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
}
}
/// # Set No Focus.
///
/// If true, this will also disable dragging, since there wouldn't be
/// any way to undrag.
pub(crate) fn set_no_focus(v: bool) {
if v {
if Self::DRAGGING == FLAGS.fetch_or(Self::NO_FOCUS, SeqCst) & Self::DRAGGING {
FLAGS.fetch_and(! Self::DRAGGING, SeqCst);
}
}
else { FLAGS.fetch_and(! Self::NO_FOCUS, | gn_child() - | identifier_name |
universe.rs | ) -> bool {
if v == (0 != FLAGS.load(SeqCst) & (Self::ACTIVE | Self::STATE)) { false }
else {
if v {
// Set active flag.
FLAGS.fetch_or(Self::ACTIVE, SeqCst);
// Seed future randomness if we can.
#[cfg(target_arch = "wasm32")] reseed();
// Set up the DOM elements and event bindings, and begin the
// animation frame loop.
State::init();
}
else {
// Clear everything but the audio, focus, and state properties.
// (State will clear itself in a moment, hopefully.)
FLAGS.fetch_and(Self::AUDIO | Self::NO_FOCUS | Self::STATE, SeqCst);
}
true
}
}
/// # Set Assign Child Flag.
///
/// This will also remove the incompatible no-child flag.
pub(crate) fn set_assign_child() {
if Self::NO_CHILD == FLAGS.fetch_or(Self::ASSIGN_CHILD, SeqCst) & Self::NO_CHILD {
FLAGS.fetch_and(! Self::NO_CHILD, SeqCst);
}
}
#[cfg(feature = "firefox")]
/// # Require Element Re-Binding.
pub(crate) fn set_fix_bindings() {
if Self::active() { FLAGS.fetch_or(Self::FIX_BINDINGS, SeqCst); }
}
/// # Set No Child Flag.
///
/// This will also remove the incompatible assign-child flag.
pub(crate) fn set_no_child() {
if Self::ASSIGN_CHILD == FLAGS.fetch_or(Self::NO_CHILD, SeqCst) & Self::ASSIGN_CHILD {
FLAGS.fetch_and(! Self::ASSIGN_CHILD, SeqCst);
}
}
/// # Set No Focus.
///
/// If true, this will also disable dragging, since there wouldn't be
/// any way to undrag.
pub(crate) fn set_no_focus(v: bool) {
if v {
if Self::DRAGGING == FLAGS.fetch_or(Self::NO_FOCUS, SeqCst) & Self::DRAGGING {
FLAGS.fetch_and(! Self::DRAGGING, SeqCst);
}
}
else { FLAGS.fetch_and(! Self::NO_FOCUS, SeqCst); }
}
/// # Set Position.
///
/// Update the cached X/Y mouse coordinates, only used when dragging a
/// Poe around the screen.
pub(crate) fn set_pos(x: i32, y: i32) {
let half_tile = Frame::SIZE_I.saturating_div(2);
let x = x.saturating_sub(half_tile).to_le_bytes();
let y = y.saturating_sub(half_tile).to_le_bytes();
let pos = u64::from_le_bytes([
x[0], x[1], x[2], x[3],
y[0], y[1], y[2], y[3],
]);
POS.store(pos, SeqCst);
}
/// # Set Width/Height.
///
/// This updates the cached window dimensions.
pub(crate) fn set_size(width: u16, height: u16) {
let width = width.to_le_bytes();
let height = height.to_le_bytes();
SIZE.store(u32::from_le_bytes([width[0], width[1], height[0], height[1]]), SeqCst);
}
}
#[cfg(feature = "director")]
impl Universe {
/// # Speed.
///
/// Returns the current playback speed if other than "normal" or paused.
pub(crate) fn speed() -> Option<f32> {
let speed = SPEED.load(SeqCst);
if speed == 0 || speed == 100 { None }
else { Some(f32::from(speed) / 100.0) }
}
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
/// # Set Speed.
///
/// Change the animation playback speed.
pub(crate) fn set_speed(speed: f32) {
// Clamp the range to something sane.
let speed =
if speed.is_nan() { 100.0 }
else { (speed * 100.0).clamp(0.0, 1000.0) };
// Store as an integer.
SPEED.store(speed as u16, SeqCst);
#[cfg(feature = "director")] dom::console_debug(&format!(
"Playback Speed: {speed:.2}%"
));
}
/// # Browserland Next Animation.
///
/// This returns (and clears) the animation set by `Poe.play`, if any.
pub(crate) fn next_animation() -> Option<Animation> {
Animation::from_u8(NEXT_ANIMATION.swap(0, SeqCst)).filter(|a| a.playable())
}
/// # Set Browserland Next Animation.
///
/// `Poe.play` uses this to manually override the primary mate's current
/// animation.
pub(crate) fn set_next_animation(next: u8) {
NEXT_ANIMATION.store(next, SeqCst);
}
}
#[inline]
/// # Get Seeds.
fn get_seeds() -> [u64; 4] {
[
SEED1.load(SeqCst),
SEED2.load(SeqCst),
SEED3.load(SeqCst),
SEED4.load(SeqCst),
]
}
#[inline]
/// # High 16 Product.
const fn mul_high_u16(a: u16, b: u16) -> u16 {
(((a as u32) * (b as u32)) >> 16) as u16
}
#[cfg(target_arch = "wasm32")]
/// # Reseed Randomness.
fn reseed() {
// Splitmix Math.random to give us a reasonable starting point for the
// subsequent Xoshi randomness.
let mut seed: u64 = js_random().to_bits();
let mut seeds = [0_u64; 4];
for i in &mut seeds { *i = splitmix(&mut seed); }
set_seeds(&seeds);
// Print a debug message if we care about that sort of thing.
#[cfg(feature = "director")]
dom::console_debug(&format!(
"PNRG1: {:016x}\nPNRG2: {:016x}\nPNRG3: {:016x}\nPNRG4: {:016x}",
seeds[0],
seeds[1],
seeds[2],
seeds[3],
));
}
/// # Set Seeds.
fn set_seeds(seeds: &[u64; 4]) {
// We are unlikely to wind up with all zeroes, but just in case…
if seeds[0] == 0 && seeds[1] == 0 && seeds[2] == 0 && seeds[3] == 0 {
SEED1.store(0x8596_cc44_bef0_1aa0, SeqCst);
SEED2.store(0x98d4_0948_da60_19ae, SeqCst);
SEED3.store(0x49f1_3013_c503_a6aa, SeqCst);
SEED4.store(0xc4d7_82ff_3c9f_7bef, SeqCst);
}
else {
SEED1.store(seeds[0], SeqCst);
SEED2.store(seeds[1], SeqCst);
SEED3.store(seeds[2], SeqCst);
SEED4.store(seeds[3], SeqCst);
}
}
/// # Update Seeds.
fn update_seeds(seeds: &mut[u64; 4]) {
let t = seeds[1] << 17;
seeds[2] ^= seeds[0];
seeds[3] ^= seeds[1];
seeds[1] ^= seeds[2];
seeds[0] ^= seeds[3];
seeds[2] ^= t;
seeds[3] = seeds[3].rotate_left(45);
}
#[cfg(target_arch = "wasm32")]
/// # Split/Mix.
///
/// This is used to generate our Xoshi256 seeds from a single source `u64`.
fn splitmix(seed: &mut u64) -> u64 {
// Update the source seed.
*seed = seed.overflowing_add(0x9e37_79b9_7f4a_7c15).0;
// Calculate and return a random value.
let mut z: u64 = (*seed ^ (*seed >> 30)).overflowing_mul(0xbf58_476d_1ce4_e5b9).0;
z = (z ^ (z >> 27)).overflowing_mul(0x94d0_49bb_1331_11eb).0;
z ^ (z >> 31)
}
#[cfg(test)]
mod tests { | use super::*;
use std::collections::HashSet;
#[test]
fn t_rand() { | random_line_split |
|
visual_functions-checkpoint.py | ):
"""
Returns subplots with an appropriate figure size and tight layout.
"""
fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)
fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)
return fig, axes
def legend(ax, ncol=3, loc=9, pos=(0.5, -0.1)):
leg=ax.legend(loc=loc, bbox_to_anchor=pos, ncol=ncol)
return leg
def savefig(filename, leg=None, format='.eps', *args, **kwargs):
"""
Save in PDF file with the given filename.
"""
if leg:
art=[leg]
plt.savefig(filename + format, additional_artists=art, bbox_inches="tight", *args, **kwargs)
else:
plt.savefig(filename + format, bbox_inches="tight", *args, **kwargs)
plt.close()
def plot_learning_curve(tra_loss_list, tra_f1_list, val_loss_list, val_f1_list):
def line_plot(y_train, y_val, early_stoping, y_label="Loss", y_min=None, y_max=None, best_score=None):
iterations = range(1,len(y_train)+1)
if y_min is None:
y_min = min(min(y_train), min(y_val))
y_min = max(0, (y_min - y_min*0.01))
if y_max is None:
y_max = max(max(y_train), max(y_val))
y_max = min(1, (y_max + 0.1*y_max))
plt.plot(iterations, y_train, label="training " )
plt.plot(iterations, y_val, label="validation ")
if best_score:
plt.title(r"\textbf{Learning curve}" f": best score: {best_score}", fontsize=8)
#plt.axvline(early_stoping, linestyle='--', color='r',label='Early Stopping')
else:
plt.title(r'\textbf{Learning curve}')
plt.ylabel(y_label)
#plt.ylim(y_min, y_max)
plt.xlabel(r"Iterations")
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.legend(loc="best")
ax = plt.gca()
ax.patch.set_alpha(0.0)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
format_axes(ax)
min_val_loss_poss = val_loss_list.index(min(val_loss_list))+1
min_val_score_poss = val_f1_list.index(max(val_f1_list))+1
fig = figure(fig_width=8)
plt.subplot(1,2,1)
line_plot(tra_loss_list, val_loss_list, min_val_loss_poss, y_label="Loss", y_min=0)
plt.subplot(1,2,2)
line_plot(tra_f1_list, val_f1_list, min_val_score_poss, y_label="Accuracy", y_min=None, y_max=1, best_score=np.max(val_f1_list))
plt.subplots_adjust(hspace=0.5)
plt.tight_layout(pad=1.0)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',save = True,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cmNorm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cmNorm, interpolation='nearest', cmap=cmap)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = 0.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, int(cm[i, j]),fontsize=8,
horizontalalignment="center",
color="white" if cmNorm[i, j] > thresh else "black") #10
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
ax = plt.gca()
#ax.tick_params(axis="both", which="both", bottom=False,
#top=False, labelbottom=True, left=False, right=False, labelleft=True)
#plt.yticks([])
#plt.xticks([])
if title:
plt.title(title)
def plot_Fmeasure(cm, n, title="Fmacro"):
av = 0
p = []
for i in range(len(n)):
teller = 2 * cm[i,i]
noemer = sum(cm[:,i]) + sum(cm[i,:])
F = float(teller) / float(noemer)
av += F
#print('{0} {1:.2f}'.format(names[i],F*100))
p.append(F*100)
av = av/len(n)*100
p = np.array(p)
volgorde = np.argsort(p)
fig, ax = plt.subplots(figsize=(6, 5))
sns.set_color_codes("pastel")
sns.barplot(x=p[volgorde],
y=np.array(n)[volgorde], color='b')
plt.axvline(x=av,color='orange', linewidth=1.0, linestyle="--")
a = '{0:0.02f}'.format(av)
b = '$Fmacro =\ $'+a
if av > 75:
plt.text(av-27,0.1,b,color='darkorange')
else:
plt.text(av+2,0.1,b,color='darkorange')
ax.set_xlabel("$Fmacro$",fontsize=18)
ax.set_ylabel("",fontsize=20)
ax.tick_params(axis='both', which='major', labelsize=18)
ax.set(xlim=(0, 100))
if title:
plt.title(title, fontsize=20)
#sns.despine(left=True, bottom=True)
def get_Fmeasure(cm, n):
av = 0
p = []
for i in range(len(n)):
teller = 2 * cm[i,i]
noemer = sum(cm[:,i]) + sum(cm[i,:])
F = float(teller) / float(noemer)
av += F
#print('{0} {1:.2f}'.format(names[i],F*100))
p.append(F*100)
av = av/len(n)*100
return av
def vis_results(true, pred, dataset, fig_path):
cm = confusion_matrix(true, pred)
plot_Fmeasure(cm, apps[dataset], title=None)
savefig(f"{fig_path}_fm",format=".pdf")
if dataset=="whited":
fig, ax = plt.subplots(figsize=(12, 10))
else:
fig, ax = plt.subplots(figsize=(10, 8))
plot_confusion_matrix(cm, apps[dataset], title=None)
def get_fscore(true, pred, dataset):
cm = confusion_matrix(true, pred)
f1 = get_Fmeasure(cm, apps[dataset])
return f1
def get_fscore(cm, names):
av = 0
p = []
for i in range(len(names)):
teller = 2 * cm[i,i]
noemer = sum(cm[:,i]) + sum(cm[i,:])
F = float(teller) / float(noemer)
av += F
#print('{0} {1:.2f}'.format(names[i],F*100))
p.append(F*100)
av = av/len(names)*100
p = np.array(p)
return p, av
def plot_multiple_fscore(names, cm_vi,cm_rp, labels=["baseline", "adaptive RP"]):
width = 0.4
#sns.set_color_codes("pastel")
f1_vi,av_vi = get_fscore(cm_vi, names)
f1_rp,av_rp = get_fscore(cm_rp, names)
av = max(av_vi, av_rp)
width=0.4
plt.barh(np.arange(len(f1_vi)), f1_vi, width, align='center', color=colors[0], label=labels[0])
plt.barh(np.arange(len(f1_rp))+ width, f1_rp, width, align='center',color=colors[1], label=labels[1])
ax = plt.gca()
ax.set(yticks=np.arange(len(names)) + width, yticklabels=names)
ax.set_xlabel("$F_1$ macro (\%)'")
ax.axvline(x=av,color='orange', linewidth=1.0, linestyle="--")
a = '{0:0.2f}'.format(av)
b = '$ $'+a
if av > 75:
| OFFSET = -0.7
plt.text(av-5,OFFSET,b,color='darkorange') | conditional_block |
|
visual_functions-checkpoint.py | ettle','Fan','AC','HairIron','LaptopCharger','SolderingIron','Fridge','Vacuum','CoffeeMaker','FridgeDefroster']
lilac_labels={'1-phase-async-motor':"1P-Motor", '3-phase-async-motor':"3P-Motor", 'Bulb':"ILB",
'Coffee-machine':"CM", 'Drilling-machine':"DRL", 'Dumper-machine':"3P-DPM",
'Fluorescent-lamp':"CFL", 'Freq-conv-squirrel-3-2x':"3P-FCS-2x", 'Hair-dryer':"Dryer",
'Kettle':"KT", 'Raclette':"RC", 'Refrigerator':"Fridge", 'Resistor':"Resistor",
'Squirrel-3-async':"3P-SQL", 'Squirrel-3-async-2x':"3P-SQL-2x", 'Vacuum-cleaner':"Vacuum"}
plaid_labels = {"Compact fluorescent lamp":'CFL',
'Bulb':"ILB",'Kettle':"KT",'Fan':"Fan",'AC':'AC',
'HairIron':"HairIron",'LaptopCharger':"Laptop",
'SolderingIron':"SLD",'Fridge':"Fridge",'Vacuum':"Vacuum",'CoffeeMaker':"CM",'FridgeDefroster':"FRZ"}
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.setp(bp['fliers'], markeredgecolor=color)
def set_figure_size(fig_width=None, fig_height=None, columns=2):
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
return (fig_width, fig_height)
def | (ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
def figure(fig_width=None, fig_height=None, columns=2):
"""
Returns a figure with an appropriate size and tight layout.
"""
fig_width, fig_height =set_figure_size(fig_width, fig_height, columns)
fig = plt.figure(figsize=(fig_width, fig_height))
return fig
def subplots(fig_width=None, fig_height=None, *args, **kwargs):
"""
Returns subplots with an appropriate figure size and tight layout.
"""
fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)
fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)
return fig, axes
def legend(ax, ncol=3, loc=9, pos=(0.5, -0.1)):
leg=ax.legend(loc=loc, bbox_to_anchor=pos, ncol=ncol)
return leg
def savefig(filename, leg=None, format='.eps', *args, **kwargs):
"""
Save in PDF file with the given filename.
"""
if leg:
art=[leg]
plt.savefig(filename + format, additional_artists=art, bbox_inches="tight", *args, **kwargs)
else:
plt.savefig(filename + format, bbox_inches="tight", *args, **kwargs)
plt.close()
def plot_learning_curve(tra_loss_list, tra_f1_list, val_loss_list, val_f1_list):
def line_plot(y_train, y_val, early_stoping, y_label="Loss", y_min=None, y_max=None, best_score=None):
iterations = range(1,len(y_train)+1)
if y_min is None:
y_min = min(min(y_train), min(y_val))
y_min = max(0, (y_min - y_min*0.01))
if y_max is None:
y_max = max(max(y_train), max(y_val))
y_max = min(1, (y_max + 0.1*y_max))
plt.plot(iterations, y_train, label="training " )
plt.plot(iterations, y_val, label="validation ")
if best_score:
plt.title(r"\textbf{Learning curve}" f": best score: {best_score}", fontsize=8)
#plt.axvline(early_stoping, linestyle='--', color='r',label='Early Stopping')
else:
plt.title(r'\textbf{Learning curve}')
plt.ylabel(y_label)
#plt.ylim(y_min, y_max)
plt.xlabel(r"Iterations")
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.legend(loc="best")
ax = plt.gca()
ax.patch.set_alpha(0.0)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
format_axes(ax)
min_val_loss_poss = val_loss_list.index(min(val_loss_list))+1
min_val_score_poss = val_f1_list.index(max(val_f1_list))+1
fig = figure(fig_width=8)
plt.subplot(1,2,1)
line_plot(tra_loss_list, val_loss_list, min_val_loss_poss, y_label="Loss", y_min=0)
plt.subplot(1,2,2)
line_plot(tra_f1_list, val_f1_list, min_val_score_poss, y_label="Accuracy", y_min=None, y_max=1, best_score=np.max(val_f1_list))
plt.subplots_adjust(hspace=0.5)
plt.tight_layout(pad=1.0)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',save = True,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cmNorm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cmNorm, interpolation='nearest', cmap=cmap)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = 0.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, int(cm[i, j]),fontsize=8,
horizontalalignment="center",
color="white" if cmNorm[i, j] > thresh else "black") #10
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
ax = plt.gca()
#ax.tick_params(axis="both", which="both", bottom=False,
#top=False, labelbottom=True, left=False, right=False, labelleft=True)
#plt.yticks([])
#plt.xticks([])
if title:
plt.title(title)
def plot_Fmeasure(cm, n, title="Fmacro"):
av = 0
p = []
for i in range(len(n)):
teller = 2 * cm[i,i]
noemer = sum(cm[:,i]) + sum(cm[i,:])
F = float(teller) / float(noemer)
av += F
#print('{0} {1:.2f}'.format(names[i],F*100))
p.append(F*100)
av = av/len(n)*100
p = np.array(p)
volgorde = np.argsort(p)
fig, ax = plt.subplots(figsize=(6, 5))
sns.set_color_codes("pastel")
sns.barplot(x=p[volgorde],
y=np.array(n)[volgorde], color='b')
plt.axvline(x=av,color='orange', linewidth=1.0, linestyle="--")
a = '{0:0.02f}'.format(av)
b = '$Fmacro =\ $'+a
if av > 75:
plt.text(av-27,0.1,b,color='darkorange')
else:
plt.text(av+2, | format_axes | identifier_name |
visual_functions-checkpoint.py | ettle','Fan','AC','HairIron','LaptopCharger','SolderingIron','Fridge','Vacuum','CoffeeMaker','FridgeDefroster']
lilac_labels={'1-phase-async-motor':"1P-Motor", '3-phase-async-motor':"3P-Motor", 'Bulb':"ILB",
'Coffee-machine':"CM", 'Drilling-machine':"DRL", 'Dumper-machine':"3P-DPM",
'Fluorescent-lamp':"CFL", 'Freq-conv-squirrel-3-2x':"3P-FCS-2x", 'Hair-dryer':"Dryer",
'Kettle':"KT", 'Raclette':"RC", 'Refrigerator':"Fridge", 'Resistor':"Resistor",
'Squirrel-3-async':"3P-SQL", 'Squirrel-3-async-2x':"3P-SQL-2x", 'Vacuum-cleaner':"Vacuum"}
plaid_labels = {"Compact fluorescent lamp":'CFL',
'Bulb':"ILB",'Kettle':"KT",'Fan':"Fan",'AC':'AC',
'HairIron':"HairIron",'LaptopCharger':"Laptop",
'SolderingIron':"SLD",'Fridge':"Fridge",'Vacuum':"Vacuum",'CoffeeMaker':"CM",'FridgeDefroster':"FRZ"}
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.setp(bp['fliers'], markeredgecolor=color)
def set_figure_size(fig_width=None, fig_height=None, columns=2):
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
return (fig_width, fig_height)
def format_axes(ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
def figure(fig_width=None, fig_height=None, columns=2):
"""
Returns a figure with an appropriate size and tight layout.
"""
fig_width, fig_height =set_figure_size(fig_width, fig_height, columns)
fig = plt.figure(figsize=(fig_width, fig_height))
return fig
def subplots(fig_width=None, fig_height=None, *args, **kwargs):
"""
Returns subplots with an appropriate figure size and tight layout.
"""
fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)
fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)
return fig, axes
def legend(ax, ncol=3, loc=9, pos=(0.5, -0.1)):
leg=ax.legend(loc=loc, bbox_to_anchor=pos, ncol=ncol)
return leg
def savefig(filename, leg=None, format='.eps', *args, **kwargs):
"""
Save in PDF file with the given filename.
"""
if leg:
art=[leg]
plt.savefig(filename + format, additional_artists=art, bbox_inches="tight", *args, **kwargs)
else:
plt.savefig(filename + format, bbox_inches="tight", *args, **kwargs)
plt.close()
def plot_learning_curve(tra_loss_list, tra_f1_list, val_loss_list, val_f1_list):
def line_plot(y_train, y_val, early_stoping, y_label="Loss", y_min=None, y_max=None, best_score=None):
iterations = range(1,len(y_train)+1)
if y_min is None:
y_min = min(min(y_train), min(y_val))
y_min = max(0, (y_min - y_min*0.01))
if y_max is None:
y_max = max(max(y_train), max(y_val))
y_max = min(1, (y_max + 0.1*y_max))
plt.plot(iterations, y_train, label="training " )
plt.plot(iterations, y_val, label="validation ")
if best_score:
plt.title(r"\textbf{Learning curve}" f": best score: {best_score}", fontsize=8)
#plt.axvline(early_stoping, linestyle='--', color='r',label='Early Stopping')
else:
plt.title(r'\textbf{Learning curve}')
plt.ylabel(y_label)
#plt.ylim(y_min, y_max)
plt.xlabel(r"Iterations")
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.legend(loc="best")
ax = plt.gca()
ax.patch.set_alpha(0.0)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
format_axes(ax)
min_val_loss_poss = val_loss_list.index(min(val_loss_list))+1
min_val_score_poss = val_f1_list.index(max(val_f1_list))+1
fig = figure(fig_width=8)
plt.subplot(1,2,1)
line_plot(tra_loss_list, val_loss_list, min_val_loss_poss, y_label="Loss", y_min=0)
plt.subplot(1,2,2)
line_plot(tra_f1_list, val_f1_list, min_val_score_poss, y_label="Accuracy", y_min=None, y_max=1, best_score=np.max(val_f1_list))
plt.subplots_adjust(hspace=0.5)
plt.tight_layout(pad=1.0)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',save = True,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cmNorm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cmNorm, interpolation='nearest', cmap=cmap)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = 0.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, int(cm[i, j]),fontsize=8,
horizontalalignment="center",
color="white" if cmNorm[i, j] > thresh else "black") #10
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
ax = plt.gca()
#ax.tick_params(axis="both", which="both", bottom=False,
#top=False, labelbottom=True, left=False, right=False, labelleft=True)
#plt.yticks([])
#plt.xticks([])
if title:
plt.title(title)
def plot_Fmeasure(cm, n, title="Fmacro"):
| a = '{0:0.02f}'.format(av)
b = '$Fmacro =\ $'+a
if av > 75:
plt.text(av-27,0.1,b,color='darkorange')
else:
plt.text(av+2,0 | av = 0
p = []
for i in range(len(n)):
teller = 2 * cm[i,i]
noemer = sum(cm[:,i]) + sum(cm[i,:])
F = float(teller) / float(noemer)
av += F
#print('{0} {1:.2f}'.format(names[i],F*100))
p.append(F*100)
av = av/len(n)*100
p = np.array(p)
volgorde = np.argsort(p)
fig, ax = plt.subplots(figsize=(6, 5))
sns.set_color_codes("pastel")
sns.barplot(x=p[volgorde],
y=np.array(n)[volgorde], color='b')
plt.axvline(x=av,color='orange', linewidth=1.0, linestyle="--") | identifier_body |
visual_functions-checkpoint.py | ettle','Fan','AC','HairIron','LaptopCharger','SolderingIron','Fridge','Vacuum','CoffeeMaker','FridgeDefroster']
lilac_labels={'1-phase-async-motor':"1P-Motor", '3-phase-async-motor':"3P-Motor", 'Bulb':"ILB",
'Coffee-machine':"CM", 'Drilling-machine':"DRL", 'Dumper-machine':"3P-DPM",
'Fluorescent-lamp':"CFL", 'Freq-conv-squirrel-3-2x':"3P-FCS-2x", 'Hair-dryer':"Dryer",
'Kettle':"KT", 'Raclette':"RC", 'Refrigerator':"Fridge", 'Resistor':"Resistor",
'Squirrel-3-async':"3P-SQL", 'Squirrel-3-async-2x':"3P-SQL-2x", 'Vacuum-cleaner':"Vacuum"}
plaid_labels = {"Compact fluorescent lamp":'CFL',
'Bulb':"ILB",'Kettle':"KT",'Fan':"Fan",'AC':'AC',
'HairIron':"HairIron",'LaptopCharger':"Laptop",
'SolderingIron':"SLD",'Fridge':"Fridge",'Vacuum':"Vacuum",'CoffeeMaker':"CM",'FridgeDefroster':"FRZ"}
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
plt.setp(bp['fliers'], markeredgecolor=color)
def set_figure_size(fig_width=None, fig_height=None, columns=2):
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
return (fig_width, fig_height)
def format_axes(ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
def figure(fig_width=None, fig_height=None, columns=2):
"""
Returns a figure with an appropriate size and tight layout.
"""
fig_width, fig_height =set_figure_size(fig_width, fig_height, columns)
fig = plt.figure(figsize=(fig_width, fig_height))
return fig
def subplots(fig_width=None, fig_height=None, *args, **kwargs):
"""
Returns subplots with an appropriate figure size and tight layout.
"""
fig_width, fig_height = get_width_height(fig_width, fig_height, columns=2)
fig, axes = plt.subplots(figsize=(fig_width, fig_height), *args, **kwargs)
return fig, axes
def legend(ax, ncol=3, loc=9, pos=(0.5, -0.1)):
leg=ax.legend(loc=loc, bbox_to_anchor=pos, ncol=ncol)
return leg
def savefig(filename, leg=None, format='.eps', *args, **kwargs):
"""
Save in PDF file with the given filename.
"""
if leg:
art=[leg]
plt.savefig(filename + format, additional_artists=art, bbox_inches="tight", *args, **kwargs)
else:
plt.savefig(filename + format, bbox_inches="tight", *args, **kwargs)
plt.close()
def plot_learning_curve(tra_loss_list, tra_f1_list, val_loss_list, val_f1_list):
def line_plot(y_train, y_val, early_stoping, y_label="Loss", y_min=None, y_max=None, best_score=None):
iterations = range(1,len(y_train)+1)
if y_min is None:
y_min = min(min(y_train), min(y_val))
y_min = max(0, (y_min - y_min*0.01))
if y_max is None:
y_max = max(max(y_train), max(y_val))
y_max = min(1, (y_max + 0.1*y_max))
plt.plot(iterations, y_train, label="training " )
plt.plot(iterations, y_val, label="validation ")
if best_score:
plt.title(r"\textbf{Learning curve}" f": best score: {best_score}", fontsize=8)
#plt.axvline(early_stoping, linestyle='--', color='r',label='Early Stopping')
else:
plt.title(r'\textbf{Learning curve}')
plt.ylabel(y_label)
#plt.ylim(y_min, y_max)
plt.xlabel(r"Iterations")
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.legend(loc="best")
ax = plt.gca()
ax.patch.set_alpha(0.0)
ax.xaxis.set_major_locator(ticker.AutoLocator())
ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax.xaxis.set_major_formatter(ticker.ScalarFormatter(useMathText=True))
format_axes(ax)
min_val_loss_poss = val_loss_list.index(min(val_loss_list))+1
min_val_score_poss = val_f1_list.index(max(val_f1_list))+1
fig = figure(fig_width=8)
plt.subplot(1,2,1)
line_plot(tra_loss_list, val_loss_list, min_val_loss_poss, y_label="Loss", y_min=0)
plt.subplot(1,2,2)
line_plot(tra_f1_list, val_f1_list, min_val_score_poss, y_label="Accuracy", y_min=None, y_max=1, best_score=np.max(val_f1_list))
plt.subplots_adjust(hspace=0.5)
plt.tight_layout(pad=1.0)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',save = True,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
cmNorm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cmNorm, interpolation='nearest', cmap=cmap)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = 0.5
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, int(cm[i, j]),fontsize=8,
horizontalalignment="center",
color="white" if cmNorm[i, j] > thresh else "black") #10
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
ax = plt.gca()
#ax.tick_params(axis="both", which="both", bottom=False,
#top=False, labelbottom=True, left=False, right=False, labelleft=True)
#plt.yticks([])
#plt.xticks([])
if title:
plt.title(title)
def plot_Fmeasure(cm, n, title="Fmacro"):
av = 0
p = []
for i in range(len(n)):
teller = 2 * cm[i,i]
noemer = sum(cm[:,i]) + sum(cm[i,:])
F = float(teller) / float(noemer)
av += F
#print('{0} {1:.2f}'.format(names[i],F*100)) | volgorde = np.argsort(p)
fig, ax = plt.subplots(figsize=(6, 5))
sns.set_color_codes("pastel")
sns.barplot(x=p[volgorde],
y=np.array(n)[volgorde], color='b')
plt.axvline(x=av,color='orange', linewidth=1.0, linestyle="--")
a = '{0:0.02f}'.format(av)
b = '$Fmacro =\ $'+a
if av > 75:
plt.text(av-27,0.1,b,color='darkorange')
else:
plt.text(av+2,0 | p.append(F*100)
av = av/len(n)*100
p = np.array(p)
| random_line_split |
lib.rs |
& !registers::PaLevel::Pa2_On,
}
}
}
#[allow(dead_code)]
enum AddressFiltering {
None,
AddressOnly(u8),
AddressOrBroadcast((u8,u8)), //(addr, broadcast_addr)
}
#[allow(dead_code)]
#[derive(Debug, PartialEq, Clone, Copy)]
enum RadioMode { //rename transeiver?
Sleep = 0, // Xtal Off
Standby = 1, // Xtal On
FreqSynth = 2, // Pll On
Rx = 3, // Rx Mode
Tx = 4, // Tx Mode
}
impl Default for RadioMode {
fn default() -> Self {
RadioMode::Standby
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum Bitrate {
Lowest,
Low,
Standard,
High,
Custom(u32),
}
impl Default for Bitrate {
fn default() -> Self {
Bitrate::Standard
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum PackageLength {
Fixed(u8), //in bytes
Max(u8),
}
impl Default for PackageLength {
fn default() -> Self {
PackageLength::Fixed(16)
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum FreqencyBand {
ISM315mhz,
ISM433mhz,
ISM868mhz,
ISM915mhz,
}
/// SPI mode
pub const SPI_MODE: Mode = Mode {
phase: Phase::CaptureOnFirstTransition,
polarity: Polarity::IdleLow,
};
pub const SPI_SPEED: u32 = 500_000;
impl<SPI,CS, D, E> Radio<SPI, CS, D>
where SPI: spi::Transfer<u8, Error = E> + spi::Write<u8, Error = E>,
D: DelayMs<u16>+DelayUs<u16>,
CS: OutputPin,
E: core::fmt::Debug {
fn configure_radio(&mut self) -> Result<(),&'static str> {
self.set_default_config();
self.set_package_filtering();
self.set_bitrate();
self.set_frequency()?;
self.set_payload_length();
self.set_power_level();
self.set_encryption_key();
Ok(())
}
pub fn | (&mut self) -> Result<(),&'static str> {
//self.cs.set_high();
//check if the radio responds by seeing if we can change a register
let mut synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0xAA); //170
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0xAA {
synced = true;
break;
}
}
if !synced {return Err("could not communicate with radio")}
synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0x55); //85
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0x55 {
synced = true;
break;
}
}
if !synced {return Err("could not communicate with radio")}
//configure the radio chips for normal use
self.configure_radio()?;
Ok(())
}
// To enable encryption: radio.encrypt("ABCDEFGHIJKLMNOP");
// To disable encryption: radio.encrypt(null) or radio.encrypt(0)
// KEY HAS TO BE 16 bytes !!!
fn set_encryption_key(&mut self) -> Result<(),&'static str> {
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
match self.encryption_key {
None =>
self.register_flags.config2 &= !registers::PacketConfig2::Aes_On, //set aes off
Some(mut key) => {
self.register_flags.config2 |= registers::PacketConfig2::Aes_On; //set aes on
key[0] = Register::Aeskey1.write_address();
self.spi.write(&key).unwrap();
},
}
self.delay.delay_us(15u16);
self.write_reg(Register::Packetconfig2, self.register_flags.config2.bits());
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_power_level(&mut self) {
use crate::registers::PaLevel;
self.register_flags.pa_level -= PaLevel::Power;
self.register_flags.pa_level |= PaLevel::from_bits(self.power_level).unwrap_or(PaLevel::Power);
self.write_reg(Register::Palevel, self.register_flags.pa_level.bits());
}
fn await_interrupt_flag(&mut self, register: Register, flag: registers::IrqFlags2) -> Result<(),&'static str> {
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = registers::IrqFlags2::from_bits(self.read_reg(register)).unwrap();
if interrupt_flag.contains(flag){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("interrupt flag was not set within timeout")
}
pub fn send_blocking(&mut self, adress: u8, buffer: &[u8]) -> Result<(),&'static str> {
use crate::registers::DioMapping1;
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
//setup the interrupt pin so an interrupt wil fire once the packet has been send
self.write_reg(Register::Diomapping1, DioMapping1::Dio0_00.bits()); //in tx mode Dio0_00: packet sent
let return_adress = match self.adress_filtering {
AddressFiltering::None => {
0
},
AddressFiltering::AddressOnly(node_addr) => {
node_addr
},
AddressFiltering::AddressOrBroadcast((node_addr,_broadcast_addr)) => {
node_addr
},
};
//spiXfer(spi_handle, (char*)rawDATA, (char*)rawDATA, bufferSize + 5 );
let mut packet = [0u8; registers::MAX_PACKET_SIZE+3];
let send_len = min(buffer.len() + 3, registers::MAX_PACKET_SIZE);
packet[0] = Register::Fifo.write_address();
packet[1] = send_len as u8;
packet[2] = adress; //1
packet[3] = return_adress; //2
packet[4] = 0;//reserved; //3
packet[5..5+buffer.len()].clone_from_slice(buffer);
//self.cs.set_low();
self.spi.write(&packet[..5+buffer.len()]).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
// no need to wait for transmit mode to be ready since its handled by the radio
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
self.await_interrupt_flag(Register::Irqflags2, registers::IrqFlags2::Packetsent)?;
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_payload_length(&mut self){
match self.package_len {
PackageLength::Fixed(len) => {
self.register_flags.config1 -= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
PackageLength::Max(len) => {
self.register_flags.config1 |= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
}
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
}
fn set_default_config(&mut self) {
for (register, bitflag) in registers::DEFAULT_RADIO_CONFIG.iter() {
self.write_reg(*register, *bitflag);
}
}
fn set_package_filtering(&mut self) {
use registers::SyncConfig;
use registers::PacketConfig1;
match self.network_filtering {
None => {//switch to one sync word (second one is used as network id)
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_1;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
},
Some(network_id) => {
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_2;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
self.write_reg(Register::Syncvalue2, network_id.get());
},
}
self.register_flags.config1 -= PacketConfig1::Adrsfiltering;
match self.adress_filtering {
AddressFiltering::None => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Off;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
},
AddressFiltering::AddressOnly(node_addr) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Node;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
| init | identifier_name |
lib.rs |
Some(mut key) => {
self.register_flags.config2 |= registers::PacketConfig2::Aes_On; //set aes on
key[0] = Register::Aeskey1.write_address();
self.spi.write(&key).unwrap();
},
}
self.delay.delay_us(15u16);
self.write_reg(Register::Packetconfig2, self.register_flags.config2.bits());
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_power_level(&mut self) {
use crate::registers::PaLevel;
self.register_flags.pa_level -= PaLevel::Power;
self.register_flags.pa_level |= PaLevel::from_bits(self.power_level).unwrap_or(PaLevel::Power);
self.write_reg(Register::Palevel, self.register_flags.pa_level.bits());
}
fn await_interrupt_flag(&mut self, register: Register, flag: registers::IrqFlags2) -> Result<(),&'static str> {
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = registers::IrqFlags2::from_bits(self.read_reg(register)).unwrap();
if interrupt_flag.contains(flag){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("interrupt flag was not set within timeout")
}
pub fn send_blocking(&mut self, adress: u8, buffer: &[u8]) -> Result<(),&'static str> {
use crate::registers::DioMapping1;
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
//setup the interrupt pin so an interrupt wil fire once the packet has been send
self.write_reg(Register::Diomapping1, DioMapping1::Dio0_00.bits()); //in tx mode Dio0_00: packet sent
let return_adress = match self.adress_filtering {
AddressFiltering::None => {
0
},
AddressFiltering::AddressOnly(node_addr) => {
node_addr
},
AddressFiltering::AddressOrBroadcast((node_addr,_broadcast_addr)) => {
node_addr
},
};
//spiXfer(spi_handle, (char*)rawDATA, (char*)rawDATA, bufferSize + 5 );
let mut packet = [0u8; registers::MAX_PACKET_SIZE+3];
let send_len = min(buffer.len() + 3, registers::MAX_PACKET_SIZE);
packet[0] = Register::Fifo.write_address();
packet[1] = send_len as u8;
packet[2] = adress; //1
packet[3] = return_adress; //2
packet[4] = 0;//reserved; //3
packet[5..5+buffer.len()].clone_from_slice(buffer);
//self.cs.set_low();
self.spi.write(&packet[..5+buffer.len()]).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
// no need to wait for transmit mode to be ready since its handled by the radio
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
self.await_interrupt_flag(Register::Irqflags2, registers::IrqFlags2::Packetsent)?;
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_payload_length(&mut self){
match self.package_len {
PackageLength::Fixed(len) => {
self.register_flags.config1 -= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
PackageLength::Max(len) => {
self.register_flags.config1 |= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
}
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
}
fn set_default_config(&mut self) {
for (register, bitflag) in registers::DEFAULT_RADIO_CONFIG.iter() {
self.write_reg(*register, *bitflag);
}
}
fn set_package_filtering(&mut self) {
use registers::SyncConfig;
use registers::PacketConfig1;
match self.network_filtering {
None => {//switch to one sync word (second one is used as network id)
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_1;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
},
Some(network_id) => {
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_2;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
self.write_reg(Register::Syncvalue2, network_id.get());
},
}
self.register_flags.config1 -= PacketConfig1::Adrsfiltering;
match self.adress_filtering {
AddressFiltering::None => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Off;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
},
AddressFiltering::AddressOnly(node_addr) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Node;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
},
AddressFiltering::AddressOrBroadcast((node_addr,broadcast_addr)) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Nodebroadcast;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
self.write_reg(Register::Nodeadrs, node_addr);
self.write_reg(Register::Broadcastadrs, broadcast_addr);
},
}
}
fn set_bitrate(&mut self) {
//bitrate reg value: F_xosc / bitrate (b/s)
match self.bitrate {
Bitrate::Lowest => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_1200.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_1200.bits());
},
Bitrate::Low => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_55555.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_55555.bits());
},
Bitrate::High => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_200kbps.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_200kbps.bits());
},
Bitrate::Standard => {
self.write_reg(Register::Bitratemsb, registers::Bitrate::Msb_100000.bits());
self.write_reg(Register::Bitratelsb, registers::Bitrate::Lsb_100000.bits());
},
Bitrate::Custom(bitrate) => {
let msb = (FXOSC/bitrate >> 8) as u8;
let lsb = (FXOSC/bitrate) as u8;
self.write_reg(Register::Bitratemsb, msb);
self.write_reg(Register::Bitratelsb, lsb);
},
}
}
fn switch_freq(&mut self) -> Result<(),&'static str> {
let frf = (self.freq as f32 / RF69_FSTEP) as u32; // divide down by FSTEP to get FRF
if self.mode == RadioMode::Tx {
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
} else {
let old_mode = self.mode;
self.write_reg(Register::Frfmsb, (frf >> 16) as u8);
self.write_reg(Register::Frfmid, (frf >> 8) as u8);
self.write_reg(Register::Frflsb, frf as u8);
self.switch_transeiver_mode_blocking(RadioMode::FreqSynth)?;
self.switch_transeiver_mode_blocking(old_mode)?;
}
Ok(())
}
//see page 38 in the datasheet,
//TODO research Fdev and do that too
fn set_frequency(&mut self) -> Result<(),&'static str> | {
if !self.register_flags.mode.contains(registers::OpMode::Sequencer_Off) {
self.register_flags.mode |= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
self.switch_freq()?;
self.register_flags.mode -= registers::OpMode::Sequencer_Off;
self.write_reg(Register::Opmode, self.register_flags.mode.bits());
} else {
self.switch_freq()?;
}
Ok(())
} | identifier_body |
|
lib.rs | _On
& !registers::PaLevel::Pa2_On,
}
}
}
#[allow(dead_code)]
enum AddressFiltering {
None,
AddressOnly(u8),
AddressOrBroadcast((u8,u8)), //(addr, broadcast_addr)
}
#[allow(dead_code)]
#[derive(Debug, PartialEq, Clone, Copy)]
enum RadioMode { //rename transeiver?
Sleep = 0, // Xtal Off
Standby = 1, // Xtal On
FreqSynth = 2, // Pll On
Rx = 3, // Rx Mode
Tx = 4, // Tx Mode
}
impl Default for RadioMode {
fn default() -> Self {
RadioMode::Standby
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum Bitrate {
Lowest,
Low,
Standard,
High,
Custom(u32),
}
impl Default for Bitrate {
fn default() -> Self {
Bitrate::Standard
}
}
#[allow(dead_code)]
#[derive(Debug,Clone)]
pub enum PackageLength {
Fixed(u8), //in bytes
Max(u8),
}
impl Default for PackageLength {
fn default() -> Self {
PackageLength::Fixed(16)
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum FreqencyBand {
ISM315mhz,
ISM433mhz,
ISM868mhz,
ISM915mhz,
}
/// SPI mode
pub const SPI_MODE: Mode = Mode {
phase: Phase::CaptureOnFirstTransition,
polarity: Polarity::IdleLow,
};
pub const SPI_SPEED: u32 = 500_000;
impl<SPI,CS, D, E> Radio<SPI, CS, D>
where SPI: spi::Transfer<u8, Error = E> + spi::Write<u8, Error = E>,
D: DelayMs<u16>+DelayUs<u16>,
CS: OutputPin,
E: core::fmt::Debug {
fn configure_radio(&mut self) -> Result<(),&'static str> {
self.set_default_config();
self.set_package_filtering();
self.set_bitrate();
self.set_frequency()?;
self.set_payload_length();
self.set_power_level();
self.set_encryption_key();
Ok(())
}
pub fn init(&mut self) -> Result<(),&'static str> { | //self.cs.set_high();
//check if the radio responds by seeing if we can change a register
let mut synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0xAA); //170
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0xAA {
synced = true;
break;
}
}
if !synced {return Err("could not communicate with radio")}
synced = false;
for _attempt in 0..100 {
self.write_reg(Register::Syncvalue1, 0x55); //85
self.delay.delay_ms(1);
if self.read_reg(Register::Syncvalue1) == 0x55 {
synced = true;
break;
}
}
if !synced {return Err("could not communicate with radio")}
//configure the radio chips for normal use
self.configure_radio()?;
Ok(())
}
// To enable encryption: radio.encrypt("ABCDEFGHIJKLMNOP");
// To disable encryption: radio.encrypt(null) or radio.encrypt(0)
// KEY HAS TO BE 16 bytes !!!
fn set_encryption_key(&mut self) -> Result<(),&'static str> {
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
match self.encryption_key {
None =>
self.register_flags.config2 &= !registers::PacketConfig2::Aes_On, //set aes off
Some(mut key) => {
self.register_flags.config2 |= registers::PacketConfig2::Aes_On; //set aes on
key[0] = Register::Aeskey1.write_address();
self.spi.write(&key).unwrap();
},
}
self.delay.delay_us(15u16);
self.write_reg(Register::Packetconfig2, self.register_flags.config2.bits());
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_power_level(&mut self) {
use crate::registers::PaLevel;
self.register_flags.pa_level -= PaLevel::Power;
self.register_flags.pa_level |= PaLevel::from_bits(self.power_level).unwrap_or(PaLevel::Power);
self.write_reg(Register::Palevel, self.register_flags.pa_level.bits());
}
fn await_interrupt_flag(&mut self, register: Register, flag: registers::IrqFlags2) -> Result<(),&'static str> {
for _attempt in 0..10 {//try for one millisecond
let interrupt_flag = registers::IrqFlags2::from_bits(self.read_reg(register)).unwrap();
if interrupt_flag.contains(flag){
return Ok(())
}
self.delay.delay_us(100u16);
}
Err("interrupt flag was not set within timeout")
}
pub fn send_blocking(&mut self, adress: u8, buffer: &[u8]) -> Result<(),&'static str> {
use crate::registers::DioMapping1;
self.switch_transeiver_mode_blocking(RadioMode::Standby)?;
//setup the interrupt pin so an interrupt wil fire once the packet has been send
self.write_reg(Register::Diomapping1, DioMapping1::Dio0_00.bits()); //in tx mode Dio0_00: packet sent
let return_adress = match self.adress_filtering {
AddressFiltering::None => {
0
},
AddressFiltering::AddressOnly(node_addr) => {
node_addr
},
AddressFiltering::AddressOrBroadcast((node_addr,_broadcast_addr)) => {
node_addr
},
};
//spiXfer(spi_handle, (char*)rawDATA, (char*)rawDATA, bufferSize + 5 );
let mut packet = [0u8; registers::MAX_PACKET_SIZE+3];
let send_len = min(buffer.len() + 3, registers::MAX_PACKET_SIZE);
packet[0] = Register::Fifo.write_address();
packet[1] = send_len as u8;
packet[2] = adress; //1
packet[3] = return_adress; //2
packet[4] = 0;//reserved; //3
packet[5..5+buffer.len()].clone_from_slice(buffer);
//self.cs.set_low();
self.spi.write(&packet[..5+buffer.len()]).unwrap();
//self.cs.set_high();
self.delay.delay_us(15u16);
// no need to wait for transmit mode to be ready since its handled by the radio
self.switch_transeiver_mode_blocking(RadioMode::Tx)?;
self.await_interrupt_flag(Register::Irqflags2, registers::IrqFlags2::Packetsent)?;
self.switch_transeiver_mode_blocking(RadioMode::Rx)?;
Ok(())
}
fn set_payload_length(&mut self){
match self.package_len {
PackageLength::Fixed(len) => {
self.register_flags.config1 -= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
PackageLength::Max(len) => {
self.register_flags.config1 |= registers::PacketConfig1::Format_Variable;
self.write_reg(Register::Payloadlength, len);
},
}
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
}
fn set_default_config(&mut self) {
for (register, bitflag) in registers::DEFAULT_RADIO_CONFIG.iter() {
self.write_reg(*register, *bitflag);
}
}
fn set_package_filtering(&mut self) {
use registers::SyncConfig;
use registers::PacketConfig1;
match self.network_filtering {
None => {//switch to one sync word (second one is used as network id)
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_1;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
},
Some(network_id) => {
self.register_flags.sync = (self.register_flags.sync - SyncConfig::Size) | SyncConfig::Size_2;
self.write_reg(Register::Syncconfig, self.register_flags.sync.bits());
self.write_reg(Register::Syncvalue2, network_id.get());
},
}
self.register_flags.config1 -= PacketConfig1::Adrsfiltering;
match self.adress_filtering {
AddressFiltering::None => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Off;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
},
AddressFiltering::AddressOnly(node_addr) => {
self.register_flags.config1 |= PacketConfig1::Adrsfiltering_Node;
self.write_reg(Register::Packetconfig1, self.register_flags.config1.bits());
| random_line_split |
|
http_service_util.rs | url_string: S) -> http::UrlRequest {
http::UrlRequest {
url: url_string.to_string(),
method: String::from("GET"),
headers: None,
body: None,
response_body_buffer_size: 0,
auto_follow_redirects: true,
cache_mode: http::CacheMode::Default,
response_body_mode: http::ResponseBodyMode::Stream,
}
}
// Object to hold results of a single download
#[derive(Default)]
pub struct IndividualDownload {
pub bytes: u64,
pub nanos: u64,
pub goodput_mbps: f64,
}
// TODO (NET-1664): verify checksum on data received
pub async fn fetch_and_discard_url(
http_service: &HttpServiceProxy,
mut url_request: http::UrlRequest,
) -> Result<IndividualDownload, Error> {
// Create a UrlLoader instance
let (s, p) = zx::Channel::create().context("failed to create zx channel")?;
let proxy = fasync::Channel::from_channel(p).context("failed to make async channel")?;
let loader_server = fidl::endpoints::ServerEnd::<http::UrlLoaderMarker>::new(s);
http_service.create_url_loader(loader_server)?;
let loader_proxy = http::UrlLoaderProxy::new(proxy);
let start_time = zx::Time::get(zx::ClockId::Monotonic);
let response = loader_proxy.start(&mut url_request).await?;
if let Some(e) = response.error {
bail!("UrlLoaderProxy error - code:{} ({})", e.code, e.description.unwrap_or("".into()))
}
let socket = match response.body.map(|x| *x) {
Some(http::UrlBody::Stream(s)) => fasync::Socket::from_socket(s)?,
_ => {
bail!("failed to read UrlBody from the stream - error: {}", zx::Status::BAD_STATE);
}
};
// discard the bytes
let mut stdio_sink = AllowStdIo::new(::std::io::sink());
let bytes_received = socket.copy_into(&mut stdio_sink).await?;
let stop_time = zx::Time::get(zx::ClockId::Monotonic);
let time_nanos = (stop_time - start_time).into_nanos() as u64;
let time_seconds = time_nanos as f64 * 1e-9;
let bits_received = (bytes_received * 8) as f64;
fx_log_info!("Received {} bytes in {:.3} seconds", bytes_received, time_seconds);
if bytes_received < 1 {
bail!("Failed to download data from url! bytes_received = {}", bytes_received);
}
let megabits_per_sec = bits_received * 1e-6 / time_seconds;
let mut individual_download = IndividualDownload::default();
individual_download.goodput_mbps = megabits_per_sec;
individual_download.bytes = bytes_received;
individual_download.nanos = time_nanos;
Ok(individual_download)
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints,
//fidl::endpoints::RequestStream,
fidl_fuchsia_net_oldhttp as http,
fidl_fuchsia_net_oldhttp::HttpError,
fidl_fuchsia_net_oldhttp::{HttpServiceMarker, HttpServiceProxy},
fidl_fuchsia_net_oldhttp::{HttpServiceRequest, HttpServiceRequestStream},
fidl_fuchsia_net_oldhttp::{UrlBody, UrlRequest, UrlResponse},
fidl_fuchsia_net_oldhttp::{UrlLoaderRequest, UrlLoaderRequestStream},
fuchsia_async as fasync,
futures::stream::{StreamExt, StreamFuture},
futures::task::Poll,
pin_utils::pin_mut,
};
#[test]
fn verify_basic_url_request_creation() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
assert_eq!(url_req.url, test_url);
assert_eq!(url_req.method, "GET".to_string());
assert!(url_req.headers.is_none());
assert!(url_req.body.is_none());
assert_eq!(url_req.response_body_buffer_size, 0);
assert!(url_req.auto_follow_redirects);
assert_eq!(url_req.cache_mode, http::CacheMode::Default);
assert_eq!(url_req.response_body_mode, http::ResponseBodyMode::Stream);
}
#[test]
fn response_error_triggers_error_path() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
let url_response = create_url_response(None, None, 404);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn successful_download_returns_valid_indvidual_download_data() |
#[test]
fn zero_byte_download_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
assert_eq!(expected_num_bytes, 0);
let url_response = create_url_response(None, url_body, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn null_response_body_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with 0 bytes downloaded
let url_response = create_url_response(None, None, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
fn trigger_download_with_supplied_response(
request: UrlRequest,
mut response: UrlResponse,
) -> Result<IndividualDownload, Error> {
let mut exec = fasync::Executor::new().expect("failed to create an executor");
let (http_service, server) = create_http_service_util();
let mut next_http_service_req = server.into_future();
let url_target = (&request).url.clone();
let fut = fetch_and_discard_url(&http_service, request);
pin_mut!(fut);
assert!(exec.run_until_stalled(&mut fut).is_pending());
let (url_loader_responder, _service_control_handle) =
match poll_http_service_request(&mut exec, &mut next_http_service_req) {
Poll::Ready(HttpServiceRequest::CreateUrlLoader { loader, control_handle }) => {
(loader, control_handle)
}
Poll::Pending => panic!("expected something"),
};
assert!(exec.run_until_stalled(&mut fut).is_pending());
let mut next_url_loader_req = url_loader_responder
.into_stream()
.expect("failed to create a url_loader response stream")
.into_future();
let (url_request, url_request_responder) =
match poll_url_loader_request(&mut exec, &mut next_url_loader_req) {
Poll::Ready(UrlLoaderRequest::Start { request, responder }) => (request, responder),
Poll::Pending => panic!("expected something"),
_ => panic!("got something unexpected!"),
};
assert_eq!(url_target, url_request.url);
url_request_responder.send(&mut response).expect("failed to send UrlResponse");
let complete = exec.run_until_stalled(&mut fut);
match complete {
Poll::Ready(result) => result,
Poll::Pending => panic!("future is pending and not ready"),
}
}
fn create_url_response(
error: Option<Box<HttpError>>,
body: Option<Box<UrlBody>>,
status_code: u32,
) -> http::UrlResponse {
http::UrlResponse {
error: error,
body: body,
url: None,
status_code: status_code,
status_line: None,
headers: None,
mime_type: None,
charset: None,
redirect_method: None,
redirect_url: None,
redirect_referrer: None,
}
}
fn poll_http_service | {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "there are some bytes".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
let url_response = create_url_response(None, url_body, 200);
let request_result = trigger_download_with_supplied_response(url_req, url_response);
let download_result = request_result.expect("failed to get individual_download");
assert_eq!(download_result.bytes, expected_num_bytes);
} | identifier_body |
http_service_util.rs | url_string: S) -> http::UrlRequest {
http::UrlRequest {
url: url_string.to_string(),
method: String::from("GET"),
headers: None,
body: None,
response_body_buffer_size: 0,
auto_follow_redirects: true,
cache_mode: http::CacheMode::Default,
response_body_mode: http::ResponseBodyMode::Stream,
}
}
// Object to hold results of a single download
#[derive(Default)]
pub struct IndividualDownload {
pub bytes: u64,
pub nanos: u64,
pub goodput_mbps: f64,
}
// TODO (NET-1664): verify checksum on data received
pub async fn fetch_and_discard_url(
http_service: &HttpServiceProxy,
mut url_request: http::UrlRequest,
) -> Result<IndividualDownload, Error> {
// Create a UrlLoader instance
let (s, p) = zx::Channel::create().context("failed to create zx channel")?;
let proxy = fasync::Channel::from_channel(p).context("failed to make async channel")?;
let loader_server = fidl::endpoints::ServerEnd::<http::UrlLoaderMarker>::new(s);
http_service.create_url_loader(loader_server)?;
let loader_proxy = http::UrlLoaderProxy::new(proxy);
let start_time = zx::Time::get(zx::ClockId::Monotonic);
let response = loader_proxy.start(&mut url_request).await?;
if let Some(e) = response.error {
bail!("UrlLoaderProxy error - code:{} ({})", e.code, e.description.unwrap_or("".into()))
}
let socket = match response.body.map(|x| *x) {
Some(http::UrlBody::Stream(s)) => fasync::Socket::from_socket(s)?,
_ => {
bail!("failed to read UrlBody from the stream - error: {}", zx::Status::BAD_STATE);
}
};
// discard the bytes
let mut stdio_sink = AllowStdIo::new(::std::io::sink());
let bytes_received = socket.copy_into(&mut stdio_sink).await?;
let stop_time = zx::Time::get(zx::ClockId::Monotonic);
let time_nanos = (stop_time - start_time).into_nanos() as u64;
let time_seconds = time_nanos as f64 * 1e-9;
let bits_received = (bytes_received * 8) as f64;
fx_log_info!("Received {} bytes in {:.3} seconds", bytes_received, time_seconds);
if bytes_received < 1 {
bail!("Failed to download data from url! bytes_received = {}", bytes_received);
}
let megabits_per_sec = bits_received * 1e-6 / time_seconds;
let mut individual_download = IndividualDownload::default();
individual_download.goodput_mbps = megabits_per_sec;
individual_download.bytes = bytes_received;
individual_download.nanos = time_nanos;
Ok(individual_download)
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints,
//fidl::endpoints::RequestStream,
fidl_fuchsia_net_oldhttp as http,
fidl_fuchsia_net_oldhttp::HttpError,
fidl_fuchsia_net_oldhttp::{HttpServiceMarker, HttpServiceProxy},
fidl_fuchsia_net_oldhttp::{HttpServiceRequest, HttpServiceRequestStream},
fidl_fuchsia_net_oldhttp::{UrlBody, UrlRequest, UrlResponse},
fidl_fuchsia_net_oldhttp::{UrlLoaderRequest, UrlLoaderRequestStream},
fuchsia_async as fasync,
futures::stream::{StreamExt, StreamFuture},
futures::task::Poll,
pin_utils::pin_mut,
};
#[test]
fn verify_basic_url_request_creation() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
assert_eq!(url_req.url, test_url);
assert_eq!(url_req.method, "GET".to_string());
assert!(url_req.headers.is_none());
assert!(url_req.body.is_none());
assert_eq!(url_req.response_body_buffer_size, 0);
assert!(url_req.auto_follow_redirects);
assert_eq!(url_req.cache_mode, http::CacheMode::Default);
assert_eq!(url_req.response_body_mode, http::ResponseBodyMode::Stream);
}
#[test]
fn response_error_triggers_error_path() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
let url_response = create_url_response(None, None, 404);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn successful_download_returns_valid_indvidual_download_data() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "there are some bytes".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
let url_response = create_url_response(None, url_body, 200);
let request_result = trigger_download_with_supplied_response(url_req, url_response);
let download_result = request_result.expect("failed to get individual_download");
assert_eq!(download_result.bytes, expected_num_bytes);
}
#[test]
fn zero_byte_download_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
assert_eq!(expected_num_bytes, 0);
let url_response = create_url_response(None, url_body, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn null_response_body_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with 0 bytes downloaded
let url_response = create_url_response(None, None, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
fn | (
request: UrlRequest,
mut response: UrlResponse,
) -> Result<IndividualDownload, Error> {
let mut exec = fasync::Executor::new().expect("failed to create an executor");
let (http_service, server) = create_http_service_util();
let mut next_http_service_req = server.into_future();
let url_target = (&request).url.clone();
let fut = fetch_and_discard_url(&http_service, request);
pin_mut!(fut);
assert!(exec.run_until_stalled(&mut fut).is_pending());
let (url_loader_responder, _service_control_handle) =
match poll_http_service_request(&mut exec, &mut next_http_service_req) {
Poll::Ready(HttpServiceRequest::CreateUrlLoader { loader, control_handle }) => {
(loader, control_handle)
}
Poll::Pending => panic!("expected something"),
};
assert!(exec.run_until_stalled(&mut fut).is_pending());
let mut next_url_loader_req = url_loader_responder
.into_stream()
.expect("failed to create a url_loader response stream")
.into_future();
let (url_request, url_request_responder) =
match poll_url_loader_request(&mut exec, &mut next_url_loader_req) {
Poll::Ready(UrlLoaderRequest::Start { request, responder }) => (request, responder),
Poll::Pending => panic!("expected something"),
_ => panic!("got something unexpected!"),
};
assert_eq!(url_target, url_request.url);
url_request_responder.send(&mut response).expect("failed to send UrlResponse");
let complete = exec.run_until_stalled(&mut fut);
match complete {
Poll::Ready(result) => result,
Poll::Pending => panic!("future is pending and not ready"),
}
}
fn create_url_response(
error: Option<Box<HttpError>>,
body: Option<Box<UrlBody>>,
status_code: u32,
) -> http::UrlResponse {
http::UrlResponse {
error: error,
body: body,
url: None,
status_code: status_code,
status_line: None,
headers: None,
mime_type: None,
charset: None,
redirect_method: None,
redirect_url: None,
redirect_referrer: None,
}
}
fn poll_http_service_request | trigger_download_with_supplied_response | identifier_name |
http_service_util.rs | url_string: S) -> http::UrlRequest {
http::UrlRequest {
url: url_string.to_string(),
method: String::from("GET"),
headers: None,
body: None,
response_body_buffer_size: 0, |
// Object to hold results of a single download
#[derive(Default)]
pub struct IndividualDownload {
pub bytes: u64,
pub nanos: u64,
pub goodput_mbps: f64,
}
// TODO (NET-1664): verify checksum on data received
pub async fn fetch_and_discard_url(
http_service: &HttpServiceProxy,
mut url_request: http::UrlRequest,
) -> Result<IndividualDownload, Error> {
// Create a UrlLoader instance
let (s, p) = zx::Channel::create().context("failed to create zx channel")?;
let proxy = fasync::Channel::from_channel(p).context("failed to make async channel")?;
let loader_server = fidl::endpoints::ServerEnd::<http::UrlLoaderMarker>::new(s);
http_service.create_url_loader(loader_server)?;
let loader_proxy = http::UrlLoaderProxy::new(proxy);
let start_time = zx::Time::get(zx::ClockId::Monotonic);
let response = loader_proxy.start(&mut url_request).await?;
if let Some(e) = response.error {
bail!("UrlLoaderProxy error - code:{} ({})", e.code, e.description.unwrap_or("".into()))
}
let socket = match response.body.map(|x| *x) {
Some(http::UrlBody::Stream(s)) => fasync::Socket::from_socket(s)?,
_ => {
bail!("failed to read UrlBody from the stream - error: {}", zx::Status::BAD_STATE);
}
};
// discard the bytes
let mut stdio_sink = AllowStdIo::new(::std::io::sink());
let bytes_received = socket.copy_into(&mut stdio_sink).await?;
let stop_time = zx::Time::get(zx::ClockId::Monotonic);
let time_nanos = (stop_time - start_time).into_nanos() as u64;
let time_seconds = time_nanos as f64 * 1e-9;
let bits_received = (bytes_received * 8) as f64;
fx_log_info!("Received {} bytes in {:.3} seconds", bytes_received, time_seconds);
if bytes_received < 1 {
bail!("Failed to download data from url! bytes_received = {}", bytes_received);
}
let megabits_per_sec = bits_received * 1e-6 / time_seconds;
let mut individual_download = IndividualDownload::default();
individual_download.goodput_mbps = megabits_per_sec;
individual_download.bytes = bytes_received;
individual_download.nanos = time_nanos;
Ok(individual_download)
}
#[cfg(test)]
mod tests {
use {
super::*,
fidl::endpoints,
//fidl::endpoints::RequestStream,
fidl_fuchsia_net_oldhttp as http,
fidl_fuchsia_net_oldhttp::HttpError,
fidl_fuchsia_net_oldhttp::{HttpServiceMarker, HttpServiceProxy},
fidl_fuchsia_net_oldhttp::{HttpServiceRequest, HttpServiceRequestStream},
fidl_fuchsia_net_oldhttp::{UrlBody, UrlRequest, UrlResponse},
fidl_fuchsia_net_oldhttp::{UrlLoaderRequest, UrlLoaderRequestStream},
fuchsia_async as fasync,
futures::stream::{StreamExt, StreamFuture},
futures::task::Poll,
pin_utils::pin_mut,
};
#[test]
fn verify_basic_url_request_creation() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
assert_eq!(url_req.url, test_url);
assert_eq!(url_req.method, "GET".to_string());
assert!(url_req.headers.is_none());
assert!(url_req.body.is_none());
assert_eq!(url_req.response_body_buffer_size, 0);
assert!(url_req.auto_follow_redirects);
assert_eq!(url_req.cache_mode, http::CacheMode::Default);
assert_eq!(url_req.response_body_mode, http::ResponseBodyMode::Stream);
}
#[test]
fn response_error_triggers_error_path() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
let url_response = create_url_response(None, None, 404);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn successful_download_returns_valid_indvidual_download_data() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "there are some bytes".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
let url_response = create_url_response(None, url_body, 200);
let request_result = trigger_download_with_supplied_response(url_req, url_response);
let download_result = request_result.expect("failed to get individual_download");
assert_eq!(download_result.bytes, expected_num_bytes);
}
#[test]
fn zero_byte_download_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with some bytes "downloaded"
let bytes = "".as_bytes();
let (s1, s2) = zx::Socket::create(zx::SocketOpts::STREAM).unwrap();
let url_body = Some(Box::new(http::UrlBody::Stream(s2)));
let expected_num_bytes = s1.write(bytes).expect("failed to write response body") as u64;
drop(s1);
assert_eq!(expected_num_bytes, 0);
let url_response = create_url_response(None, url_body, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
#[test]
fn null_response_body_triggers_error() {
let test_url = "https://test.example/sample/url";
let url_req = create_url_request(test_url.to_string());
// creating a response with 0 bytes downloaded
let url_response = create_url_response(None, None, 200);
let download_result = trigger_download_with_supplied_response(url_req, url_response);
assert!(download_result.is_err());
}
fn trigger_download_with_supplied_response(
request: UrlRequest,
mut response: UrlResponse,
) -> Result<IndividualDownload, Error> {
let mut exec = fasync::Executor::new().expect("failed to create an executor");
let (http_service, server) = create_http_service_util();
let mut next_http_service_req = server.into_future();
let url_target = (&request).url.clone();
let fut = fetch_and_discard_url(&http_service, request);
pin_mut!(fut);
assert!(exec.run_until_stalled(&mut fut).is_pending());
let (url_loader_responder, _service_control_handle) =
match poll_http_service_request(&mut exec, &mut next_http_service_req) {
Poll::Ready(HttpServiceRequest::CreateUrlLoader { loader, control_handle }) => {
(loader, control_handle)
}
Poll::Pending => panic!("expected something"),
};
assert!(exec.run_until_stalled(&mut fut).is_pending());
let mut next_url_loader_req = url_loader_responder
.into_stream()
.expect("failed to create a url_loader response stream")
.into_future();
let (url_request, url_request_responder) =
match poll_url_loader_request(&mut exec, &mut next_url_loader_req) {
Poll::Ready(UrlLoaderRequest::Start { request, responder }) => (request, responder),
Poll::Pending => panic!("expected something"),
_ => panic!("got something unexpected!"),
};
assert_eq!(url_target, url_request.url);
url_request_responder.send(&mut response).expect("failed to send UrlResponse");
let complete = exec.run_until_stalled(&mut fut);
match complete {
Poll::Ready(result) => result,
Poll::Pending => panic!("future is pending and not ready"),
}
}
fn create_url_response(
error: Option<Box<HttpError>>,
body: Option<Box<UrlBody>>,
status_code: u32,
) -> http::UrlResponse {
http::UrlResponse {
error: error,
body: body,
url: None,
status_code: status_code,
status_line: None,
headers: None,
mime_type: None,
charset: None,
redirect_method: None,
redirect_url: None,
redirect_referrer: None,
}
}
fn poll_http_service_request | auto_follow_redirects: true,
cache_mode: http::CacheMode::Default,
response_body_mode: http::ResponseBodyMode::Stream,
}
} | random_line_split |
sumtree.rs | {
self.root = Some(NodeData {
full: true,
node: Node::Leaf(elem_sum),
hash: elem_hash,
depth: 0,
});
self.index.insert(index_hash, 0);
return true;
}
// Next, move the old root out of the structure so that we are allowed to
// move it. We will move a new root back in at the end of the function
let old_root = mem::replace(&mut self.root, None).unwrap();
// Insert into tree, compute new root
let new_node = NodeData {
full: true,
node: Node::Leaf(elem_sum),
hash: elem_hash,
depth: 0,
};
// Put new root in place and record insertion
let index = old_root.n_leaves();
self.root = Some(SumTree::insert_right_of(old_root, new_node));
self.index.insert(index_hash, index);
true
}
fn replace_recurse(node: &mut NodeData<T>, index: usize, new_elem: T) {
assert!(index < (1 << node.depth));
if node.depth == 0 {
assert!(node.full);
node.hash = (0u8, new_elem.sum(), Hashed::hash(&new_elem)).hash();
node.node = Node::Leaf(new_elem.sum());
} else {
match node.node {
Node::Internal {
ref mut lchild,
ref mut rchild,
ref mut sum,
} => {
let bit = index & (1 << (node.depth - 1));
if bit > 0 {
SumTree::replace_recurse(rchild, index - bit, new_elem);
} else {
SumTree::replace_recurse(lchild, index, new_elem);
}
*sum = lchild.sum() + rchild.sum();
node.hash = (node.depth, &*sum, lchild.hash, rchild.hash).hash();
}
// Pruned data would not have been in the index
Node::Pruned(_) => unreachable!(),
Node::Leaf(_) => unreachable!(),
}
}
}
/// Replaces an element in the tree. Returns true if the element existed
/// and was replaced. Returns false if the old element did not exist or
/// if the new element already existed
pub fn replace(&mut self, elem: &T, new_elem: T) -> bool {
let index_hash = Hashed::hash(elem);
let root = match self.root {
Some(ref mut node) => node,
None => {
return false;
}
};
match self.index.remove(&index_hash) {
None => false,
Some(index) => {
let new_index_hash = Hashed::hash(&new_elem);
if self.index.contains_key(&new_index_hash) {
false
} else {
SumTree::replace_recurse(root, index, new_elem);
self.index.insert(new_index_hash, index);
true
}
}
}
}
/// Determine whether an element exists in the tree.
/// If so, return its index
pub fn contains(&self, elem: &T) -> Option<usize> {
let index_hash = Hashed::hash(elem);
self.index.get(&index_hash).map(|x| *x)
}
fn prune_recurse(node: &mut NodeData<T>, index: usize) {
assert!(index < (1 << node.depth));
if node.depth == 0 {
let sum = if let Node::Leaf(ref sum) = node.node {
sum.clone()
} else {
unreachable!()
};
node.node = Node::Pruned(sum);
} else {
let mut prune_me = None;
match node.node {
Node::Internal {
ref mut lchild,
ref mut rchild,
..
} => {
let bit = index & (1 << (node.depth - 1));
if bit > 0 {
SumTree::prune_recurse(rchild, index - bit);
} else {
SumTree::prune_recurse(lchild, index);
}
if let (&Node::Pruned(ref lsum), &Node::Pruned(ref rsum)) =
(&lchild.node, &rchild.node)
{
if node.full {
prune_me = Some(lsum.clone() + rsum.clone());
}
}
}
Node::Pruned(_) => {
// Already pruned. Ok.
}
Node::Leaf(_) => unreachable!(),
}
if let Some(sum) = prune_me {
node.node = Node::Pruned(sum);
}
}
}
/// Removes an element from storage, not affecting the tree
/// Returns true if the element was actually in the tree
pub fn prune(&mut self, elem: &T) -> bool {
let index_hash = Hashed::hash(elem);
let root = match self.root {
Some(ref mut node) => node,
None => {
return false;
}
};
match self.index.remove(&index_hash) {
None => false,
Some(index) => {
SumTree::prune_recurse(root, index);
true
}
}
}
fn | (node: &NodeData<T>) -> NodeData<T> {
if node.full {
// replaces full internal nodes, leaves and already pruned nodes are full
// as well
NodeData {
full: true,
node: Node::Pruned(node.sum()),
hash: node.hash,
depth: node.depth,
}
} else {
if let Node::Internal { ref lchild, ref rchild, ref sum } = node.node {
// just recurse on each side to get the pruned version
NodeData {
full: false,
node: Node::Internal {
lchild: Box::new(SumTree::clone_pruned_recurse(lchild)),
rchild: Box::new(SumTree::clone_pruned_recurse(rchild)),
sum: sum.clone(),
},
hash: node.hash,
depth: node.depth,
}
} else {
unreachable!()
}
}
}
/// Minimal clone of this tree, replacing all full nodes with a pruned node,
/// therefore only copying non-full subtrees.
pub fn clone_pruned(&self) -> SumTree<T> {
match self.root {
Some(ref node) => {
SumTree {
index: HashMap::new(),
root: Some(SumTree::clone_pruned_recurse(node)),
}
},
None => SumTree::new(),
}
}
// TODO push_many, truncate to allow bulk updates
}
// A SumTree is encoded as follows: an empty tree is the single byte 0x00.
// An nonempty tree is encoded recursively by encoding its root node. Each
// node is encoded as follows:
// flag: two bits, 01 for partial, 10 for full, 11 for pruned
// 00 is reserved so that the 0 byte can uniquely specify an empty tree
// depth: six bits, zero indicates a leaf
// hash: 32 bytes
// sum: <length of sum encoding>
//
// For a leaf, this is followed by an encoding of the element. For an
// internal node, the left child is encoded followed by the right child.
// For a pruned internal node, it is followed by nothing.
//
impl<T> Writeable for SumTree<T>
where
T: Summable + Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
match self.root {
None => writer.write_u8(0),
Some(ref node) => node.write(writer),
}
}
}
impl<T> Writeable for NodeData<T>
where
T: Summable + Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
assert!(self.depth < 64);
// Compute depth byte: 0x80 means full, 0xc0 means unpruned
let mut depth = 0;
if self.full {
depth |= 0x80;
}
if let Node::Pruned(_) = self.node {
} else {
depth |= 0xc0;
}
depth |= self.depth;
// Encode node
try!(writer.write_u8(depth));
try!(self.hash.write(writer));
match self.node {
Node::Pruned(ref sum) => sum.write(writer),
Node::Leaf(ref sum) => sum.write(writer),
Node::Internal {
ref lchild,
ref rchild,
ref sum,
} => {
try!(sum.write(writer));
try!(lchild.write(writer));
rchild.write(writer)
}
}
}
}
fn node_read_recurse<T>(
reader: &mut Reader,
index: &mut HashMap<Hash, usize>,
tree_index: &mut usize,
) -> Result<NodeData<T>, | clone_pruned_recurse | identifier_name |
sumtree.rs | ,
{
// Read depth byte
let depth = try!(reader.read_u8());
let full = depth & 0x80 == 0x80;
let pruned = depth & 0xc0 != 0xc0;
let depth = depth & 0x3f;
// Sanity-check for zero byte
if pruned && !full {
return Err(ser::Error::CorruptedData);
}
// Read remainder of node
let hash = try!(Readable::read(reader));
let sum = try!(Readable::read(reader));
let data = match (depth, pruned) {
(_, true) => {
*tree_index += 1 << depth as usize;
Node::Pruned(sum)
}
(0, _) => {
index.insert(hash, *tree_index);
*tree_index += 1;
Node::Leaf(sum)
}
(_, _) => {
Node::Internal {
lchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
rchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
sum: sum,
}
}
};
Ok(NodeData {
full: full,
node: data,
hash: hash,
depth: depth,
})
}
impl<T> Readable for SumTree<T>
where
T: Summable + Writeable + Readable + Hashed,
{
fn read(reader: &mut Reader) -> Result<SumTree<T>, ser::Error> {
// Read depth byte of root node
let depth = try!(reader.read_u8());
let full = depth & 0x80 == 0x80;
let pruned = depth & 0xc0 != 0xc0;
let depth = depth & 0x3f;
// Special-case the zero byte
if pruned && !full {
return Ok(SumTree {
index: HashMap::new(),
root: None,
});
}
// Otherwise continue reading it
let mut index = HashMap::new();
let hash = try!(Readable::read(reader));
let sum = try!(Readable::read(reader));
let data = match (depth, pruned) {
(_, true) => Node::Pruned(sum),
(0, _) => Node::Leaf(sum),
(_, _) => {
let mut tree_index = 0;
Node::Internal {
lchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
rchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
sum: sum,
}
}
};
Ok(SumTree {
index: index,
root: Some(NodeData {
full: full,
node: data,
hash: hash,
depth: depth,
}),
})
}
}
/// This is used to as a scratch space during root calculation so that we can
/// keep everything on the stack in a fixed-size array. It reflects a maximum
/// tree capacity of 2^48, which is not practically reachable.
const MAX_MMR_HEIGHT: usize = 48;
/// This algorithm is based on Peter Todd's in
/// https://github.com/opentimestamps/opentimestamps-server/blob/master/python-opentimestamps/opentimestamps/core/timestamp.py#L324
///
fn compute_peaks<S, I>(iter: I, peaks: &mut [Option<(u8, Hash, S)>])
where
S: Clone + ops::Add<Output = S> + Writeable,
I: Iterator<Item = (u8, Hash, S)>,
{
for peak in peaks.iter_mut() {
*peak = None;
}
for (mut new_depth, mut new_hash, mut new_sum) in iter {
let mut index = 0;
while let Some((old_depth, old_hash, old_sum)) = peaks[index].take() {
// Erase current peak (done by `take()` above), then combine
// it with the new addition, to be inserted one higher
index += 1;
new_depth = old_depth + 1;
new_sum = old_sum.clone() + new_sum.clone();
new_hash = (new_depth, &new_sum, old_hash, new_hash).hash();
}
peaks[index] = Some((new_depth, new_hash, new_sum));
}
}
/// Directly compute the Merkle root of a sum-tree whose contents are given
/// explicitly in the passed iterator.
pub fn compute_root<'a, T, I>(iter: I) -> Option<(Hash, T::Sum)>
where
T: 'a + Summable + Writeable,
I: Iterator<Item = &'a T>,
{
let mut peaks = vec![None; MAX_MMR_HEIGHT];
compute_peaks(
iter.map(|elem| {
let depth = 0u8;
let sum = elem.sum();
let hash = (depth, &sum, Hashed::hash(elem)).hash();
(depth, hash, sum)
}),
&mut peaks,
);
let mut ret = None;
for peak in peaks {
ret = match (peak, ret) {
(None, x) => x,
(Some((_, hash, sum)), None) => Some((hash, sum)),
(Some((depth, lhash, lsum)), Some((rhash, rsum))) => {
let sum = lsum + rsum;
let hash = (depth + 1, &sum, lhash, rhash).hash();
Some((hash, sum))
}
};
}
ret
}
// a couple functions that help debugging
#[allow(dead_code)]
fn print_node<T>(node: &NodeData<T>, tab_level: usize)
where
T: Summable + Writeable,
T::Sum: std::fmt::Debug,
{
for _ in 0..tab_level {
print!(" ");
}
print!("[{:03}] {} {:?}", node.depth, node.hash, node.sum());
match node.node {
Node::Pruned(_) => println!(" X"),
Node::Leaf(_) => println!(" L"),
Node::Internal {
ref lchild,
ref rchild,
..
} => {
println!(":");
print_node(lchild, tab_level + 1);
print_node(rchild, tab_level + 1);
}
}
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub fn print_tree<T>(tree: &SumTree<T>)
where
T: Summable + Writeable,
T::Sum: std::fmt::Debug,
{
match tree.root {
None => println!("[empty tree]"),
Some(ref node) => {
print_node(node, 0);
}
}
}
#[cfg(test)]
mod test {
use rand::{thread_rng, Rng};
use core::hash::Hashed;
use ser;
use super::*;
#[derive(Copy, Clone, Debug)]
struct TestElem([u32; 4]);
impl Summable for TestElem {
type Sum = u64;
fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
self.0[3] as u64
}
}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
try!(writer.write_u32(self.0[0]));
try!(writer.write_u32(self.0[1]));
try!(writer.write_u32(self.0[2]));
writer.write_u32(self.0[3])
}
}
fn sumtree_create_(prune: bool) {
let mut tree = SumTree::new();
macro_rules! leaf {
($data: expr) => ({
(0u8, $data.sum(), $data.hash())
})
};
macro_rules! node {
($left: expr, $right: expr) => (
($left.0 + 1, $left.1 + $right.1, $left.hash(), $right.hash())
)
};
macro_rules! prune {
($prune: expr, $tree: expr, $elem: expr) => {
if $prune {
assert_eq!($tree.len(), 1);
$tree.prune(&$elem);
assert_eq!($tree.len(), 0);
// double-pruning shouldn't hurt anything
$tree.prune(&$elem);
assert_eq!($tree.len(), 0);
} else {
assert_eq!($tree.len(), $tree.unpruned_len());
}
}
};
let mut elems = [
TestElem([0, 0, 0, 1]), | TestElem([0, 0, 0, 2]), | random_line_split |
|
ply.rs | {
format: Format,
elements: Vec<Element>,
offset: Vector3<f64>,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum DataType {
Int8,
Uint8,
Int16,
Uint16,
Int32,
Uint32,
Float32,
Float64,
}
impl DataType {
fn from_str(input: &str) -> Result<Self> {
match input {
"float" | "float32" => Ok(DataType::Float32),
"double" | "float64" => Ok(DataType::Float64),
"char" | "int8" => Ok(DataType::Int8),
"uchar" | "uint8" => Ok(DataType::Uint8),
"short" | "int16" => Ok(DataType::Int16),
"ushort" | "uint16" => Ok(DataType::Uint16),
"int" | "int32" => Ok(DataType::Int32),
"uint" | "uint32" => Ok(DataType::Uint32),
_ => Err(ErrorKind::InvalidInput(format!("Invalid data type: {}", input)).into()),
}
}
}
impl Header {
fn has_element(&self, name: &str) -> bool {
self.elements.iter().any(|e| e.name == name)
}
}
impl<'a> Index<&'a str> for Header {
type Output = Element;
fn index(&self, name: &'a str) -> &Self::Output {
for element in &self.elements {
if element.name == name {
return element;
}
}
panic!("Element {} does not exist.", name);
}
}
#[derive(Debug, PartialEq)]
enum Format {
BinaryLittleEndianV1,
BinaryBigEndianV1,
AsciiV1,
}
// TODO(hrapp): Maybe support list properties too?
#[derive(Debug)]
struct ScalarProperty {
name: String,
data_type: DataType,
}
#[derive(Debug)]
struct Element {
name: String,
count: i64,
properties: Vec<ScalarProperty>,
}
impl<'a> Index<&'a str> for Element {
type Output = ScalarProperty;
fn index(&self, name: &'a str) -> &Self::Output {
for p in &self.properties {
if p.name == name {
return p;
}
}
panic!("Property does not exist!")
}
}
fn parse_header<R: BufRead>(reader: &mut R) -> Result<(Header, usize)> {
use crate::errors::ErrorKind::InvalidInput;
let mut header_len = 0;
let mut line = String::new();
header_len += reader.read_line(&mut line)?;
if line.trim() != "ply" {
return Err(InvalidInput("Not a PLY file".to_string()).into());
}
let mut format = None;
let mut current_element = None;
let mut offset = Vector3::zero();
let mut elements = Vec::new();
loop {
line.clear();
header_len += reader.read_line(&mut line)?;
let entries: Vec<&str> = line.trim().split_whitespace().collect();
match entries[0] {
"format" if entries.len() == 3 => {
if entries[2] != "1.0" {
return Err(InvalidInput(format!("Invalid version: {}", entries[2])).into());
}
format = Some(match entries[1] {
"ascii" => Format::AsciiV1,
"binary_little_endian" => Format::BinaryLittleEndianV1,
"binary_big_endian" => Format::BinaryBigEndianV1,
_ => return Err(InvalidInput(format!("Invalid format: {}", entries[1])).into()),
});
}
"element" if entries.len() == 3 => {
if let Some(element) = current_element.take() {
elements.push(element);
}
current_element = Some(Element {
name: entries[1].to_string(),
count: entries[2]
.parse::<i64>()
.chain_err(|| InvalidInput(format!("Invalid count: {}", entries[2])))?,
properties: Vec::new(),
});
}
"property" => {
if current_element.is_none() {
return Err(
InvalidInput(format!("property outside of element: {}", line)).into(),
);
};
let property = match entries[1] {
"list" if entries.len() == 5 => {
// We do not support list properties.
continue;
}
data_type_str if entries.len() == 3 => {
let data_type = DataType::from_str(data_type_str)?;
ScalarProperty {
name: entries[2].to_string(),
data_type,
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
};
current_element.as_mut().unwrap().properties.push(property);
}
"end_header" => break,
"comment" => {
if entries.len() == 5 && entries[1] == "offset:" {
let x = entries[2]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[2])))?;
let y = entries[3]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[3])))?;
let z = entries[4]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[4])))?;
offset = Vector3::new(x, y, z)
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}
}
if let Some(element) = current_element {
elements.push(element);
}
if format.is_none() {
return Err(InvalidInput("No format specified".into()).into());
}
Ok((
Header {
elements,
format: format.unwrap(),
offset,
},
header_len,
))
}
type ReadingFn = fn(nread: &mut usize, buf: &[u8], val: &mut Point);
// The two macros create a 'ReadingFn' that reads a value of '$data_type' out of a reader, and
// calls '$assign' with it while casting it to the correct type. I did not find a way of doing this
// purely using generic programming, so I resorted to this macro.
macro_rules! create_and_return_reading_fn {
($assign:expr, $size:ident, $num_bytes:expr, $reading_fn:expr) => {{
$size += $num_bytes;
|nread: &mut usize, buf: &[u8], point: &mut Point| {
#[allow(clippy::cast_lossless)]
$assign(point, $reading_fn(buf) as _);
*nread += $num_bytes;
}
}};
}
macro_rules! read_casted_property {
($data_type:expr, $assign:expr, &mut $size:ident) => {
match $data_type {
DataType::Uint8 => {
create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0])
}
DataType::Int8 => create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0]),
DataType::Uint16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_u16)
}
DataType::Int16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_i16)
}
DataType::Uint32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_u32)
}
DataType::Int32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_i32)
}
DataType::Float32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_f32)
}
DataType::Float64 => {
create_and_return_reading_fn!($assign, $size, 8, LittleEndian::read_f64)
}
}
};
}
// Similar to 'create_and_return_reading_fn', but creates a function that just advances the read
// pointer.
macro_rules! create_skip_fn {
(&mut $size:ident, $num_bytes:expr) => {{
$size += $num_bytes;
fn _read_fn(nread: &mut usize, _: &[u8], _: &mut Point) {
*nread += $num_bytes;
}
_read_fn
}};
}
/// Abstraction to read binary points from ply files into points.
pub struct PlyIterator {
reader: BufReader<File>,
readers: Vec<ReadingFn>,
pub num_total_points: i64,
offset: Vector3<f64>,
point_count: usize,
}
impl PlyIterator {
pub fn from_file<P: AsRef<Path>>(ply_file: P) -> Result<Self> {
let mut file = File::open(ply_file).chain_err(|| "Could not open input file.")?;
let mut reader = BufReader::new(file);
let (header, header_len) = parse_header(&mut reader)?;
file = reader.into_inner();
file.seek(Seek | Header | identifier_name |
|
ply.rs | Index<&'a str> for Header {
type Output = Element;
fn index(&self, name: &'a str) -> &Self::Output {
for element in &self.elements {
if element.name == name {
return element;
}
}
panic!("Element {} does not exist.", name);
}
}
#[derive(Debug, PartialEq)]
enum Format {
BinaryLittleEndianV1,
BinaryBigEndianV1,
AsciiV1,
}
// TODO(hrapp): Maybe support list properties too?
#[derive(Debug)]
struct ScalarProperty {
name: String,
data_type: DataType,
}
#[derive(Debug)]
struct Element {
name: String,
count: i64,
properties: Vec<ScalarProperty>,
}
impl<'a> Index<&'a str> for Element {
type Output = ScalarProperty;
fn index(&self, name: &'a str) -> &Self::Output {
for p in &self.properties {
if p.name == name {
return p;
}
}
panic!("Property does not exist!")
}
}
fn parse_header<R: BufRead>(reader: &mut R) -> Result<(Header, usize)> {
use crate::errors::ErrorKind::InvalidInput;
let mut header_len = 0;
let mut line = String::new();
header_len += reader.read_line(&mut line)?;
if line.trim() != "ply" {
return Err(InvalidInput("Not a PLY file".to_string()).into());
}
let mut format = None;
let mut current_element = None;
let mut offset = Vector3::zero();
let mut elements = Vec::new();
loop {
line.clear();
header_len += reader.read_line(&mut line)?;
let entries: Vec<&str> = line.trim().split_whitespace().collect();
match entries[0] {
"format" if entries.len() == 3 => {
if entries[2] != "1.0" {
return Err(InvalidInput(format!("Invalid version: {}", entries[2])).into());
}
format = Some(match entries[1] {
"ascii" => Format::AsciiV1,
"binary_little_endian" => Format::BinaryLittleEndianV1,
"binary_big_endian" => Format::BinaryBigEndianV1,
_ => return Err(InvalidInput(format!("Invalid format: {}", entries[1])).into()),
});
}
"element" if entries.len() == 3 => {
if let Some(element) = current_element.take() {
elements.push(element);
}
current_element = Some(Element {
name: entries[1].to_string(),
count: entries[2]
.parse::<i64>()
.chain_err(|| InvalidInput(format!("Invalid count: {}", entries[2])))?,
properties: Vec::new(),
});
}
"property" => {
if current_element.is_none() {
return Err(
InvalidInput(format!("property outside of element: {}", line)).into(),
);
};
let property = match entries[1] {
"list" if entries.len() == 5 => {
// We do not support list properties.
continue;
}
data_type_str if entries.len() == 3 => {
let data_type = DataType::from_str(data_type_str)?;
ScalarProperty {
name: entries[2].to_string(),
data_type,
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
};
current_element.as_mut().unwrap().properties.push(property);
}
"end_header" => break,
"comment" => {
if entries.len() == 5 && entries[1] == "offset:" {
let x = entries[2]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[2])))?;
let y = entries[3]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[3])))?;
let z = entries[4]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[4])))?;
offset = Vector3::new(x, y, z)
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}
}
if let Some(element) = current_element {
elements.push(element);
}
if format.is_none() {
return Err(InvalidInput("No format specified".into()).into());
}
Ok((
Header {
elements,
format: format.unwrap(),
offset,
},
header_len,
))
}
type ReadingFn = fn(nread: &mut usize, buf: &[u8], val: &mut Point);
// The two macros create a 'ReadingFn' that reads a value of '$data_type' out of a reader, and
// calls '$assign' with it while casting it to the correct type. I did not find a way of doing this
// purely using generic programming, so I resorted to this macro.
macro_rules! create_and_return_reading_fn {
($assign:expr, $size:ident, $num_bytes:expr, $reading_fn:expr) => {{
$size += $num_bytes;
|nread: &mut usize, buf: &[u8], point: &mut Point| {
#[allow(clippy::cast_lossless)]
$assign(point, $reading_fn(buf) as _);
*nread += $num_bytes;
}
}};
}
macro_rules! read_casted_property {
($data_type:expr, $assign:expr, &mut $size:ident) => {
match $data_type {
DataType::Uint8 => {
create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0])
}
DataType::Int8 => create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0]),
DataType::Uint16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_u16)
}
DataType::Int16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_i16)
}
DataType::Uint32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_u32)
}
DataType::Int32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_i32)
}
DataType::Float32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_f32) | create_and_return_reading_fn!($assign, $size, 8, LittleEndian::read_f64)
}
}
};
}
// Similar to 'create_and_return_reading_fn', but creates a function that just advances the read
// pointer.
macro_rules! create_skip_fn {
(&mut $size:ident, $num_bytes:expr) => {{
$size += $num_bytes;
fn _read_fn(nread: &mut usize, _: &[u8], _: &mut Point) {
*nread += $num_bytes;
}
_read_fn
}};
}
/// Abstraction to read binary points from ply files into points.
pub struct PlyIterator {
reader: BufReader<File>,
readers: Vec<ReadingFn>,
pub num_total_points: i64,
offset: Vector3<f64>,
point_count: usize,
}
impl PlyIterator {
pub fn from_file<P: AsRef<Path>>(ply_file: P) -> Result<Self> {
let mut file = File::open(ply_file).chain_err(|| "Could not open input file.")?;
let mut reader = BufReader::new(file);
let (header, header_len) = parse_header(&mut reader)?;
file = reader.into_inner();
file.seek(SeekFrom::Start(header_len as u64))?;
if !header.has_element("vertex") {
panic!("Header does not have element 'vertex'");
}
if header.format != Format::BinaryLittleEndianV1 {
panic!("Unsupported PLY format: {:?}", header.format);
}
let vertex = &header["vertex"];
let mut seen_x = false;
let mut seen_y = false;
let mut seen_z = false;
let mut readers: Vec<ReadingFn> = Vec::new();
let mut num_bytes_per_point = 0;
for prop in &vertex.properties {
match &prop.name as &str {
"x" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.x = val,
&mut num_bytes_per_point
));
seen_x = true;
}
"y" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.y = val,
&mut num_bytes_per_point
));
seen_y = true;
}
"z" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.z = val,
&mut num_bytes_per_point
));
| }
DataType::Float64 => { | random_line_split |
ply.rs | Index<&'a str> for Header {
type Output = Element;
fn index(&self, name: &'a str) -> &Self::Output {
for element in &self.elements {
if element.name == name {
return element;
}
}
panic!("Element {} does not exist.", name);
}
}
#[derive(Debug, PartialEq)]
enum Format {
BinaryLittleEndianV1,
BinaryBigEndianV1,
AsciiV1,
}
// TODO(hrapp): Maybe support list properties too?
#[derive(Debug)]
struct ScalarProperty {
name: String,
data_type: DataType,
}
#[derive(Debug)]
struct Element {
name: String,
count: i64,
properties: Vec<ScalarProperty>,
}
impl<'a> Index<&'a str> for Element {
type Output = ScalarProperty;
fn index(&self, name: &'a str) -> &Self::Output {
for p in &self.properties {
if p.name == name {
return p;
}
}
panic!("Property does not exist!")
}
}
fn parse_header<R: BufRead>(reader: &mut R) -> Result<(Header, usize)> {
use crate::errors::ErrorKind::InvalidInput;
let mut header_len = 0;
let mut line = String::new();
header_len += reader.read_line(&mut line)?;
if line.trim() != "ply" {
return Err(InvalidInput("Not a PLY file".to_string()).into());
}
let mut format = None;
let mut current_element = None;
let mut offset = Vector3::zero();
let mut elements = Vec::new();
loop {
line.clear();
header_len += reader.read_line(&mut line)?;
let entries: Vec<&str> = line.trim().split_whitespace().collect();
match entries[0] {
"format" if entries.len() == 3 => {
if entries[2] != "1.0" {
return Err(InvalidInput(format!("Invalid version: {}", entries[2])).into());
}
format = Some(match entries[1] {
"ascii" => Format::AsciiV1,
"binary_little_endian" => Format::BinaryLittleEndianV1,
"binary_big_endian" => Format::BinaryBigEndianV1,
_ => return Err(InvalidInput(format!("Invalid format: {}", entries[1])).into()),
});
}
"element" if entries.len() == 3 => {
if let Some(element) = current_element.take() {
elements.push(element);
}
current_element = Some(Element {
name: entries[1].to_string(),
count: entries[2]
.parse::<i64>()
.chain_err(|| InvalidInput(format!("Invalid count: {}", entries[2])))?,
properties: Vec::new(),
});
}
"property" => | current_element.as_mut().unwrap().properties.push(property);
}
"end_header" => break,
"comment" => {
if entries.len() == 5 && entries[1] == "offset:" {
let x = entries[2]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[2])))?;
let y = entries[3]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[3])))?;
let z = entries[4]
.parse::<f64>()
.chain_err(|| InvalidInput(format!("Invalid offset: {}", entries[4])))?;
offset = Vector3::new(x, y, z)
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}
}
if let Some(element) = current_element {
elements.push(element);
}
if format.is_none() {
return Err(InvalidInput("No format specified".into()).into());
}
Ok((
Header {
elements,
format: format.unwrap(),
offset,
},
header_len,
))
}
type ReadingFn = fn(nread: &mut usize, buf: &[u8], val: &mut Point);
// The two macros create a 'ReadingFn' that reads a value of '$data_type' out of a reader, and
// calls '$assign' with it while casting it to the correct type. I did not find a way of doing this
// purely using generic programming, so I resorted to this macro.
macro_rules! create_and_return_reading_fn {
($assign:expr, $size:ident, $num_bytes:expr, $reading_fn:expr) => {{
$size += $num_bytes;
|nread: &mut usize, buf: &[u8], point: &mut Point| {
#[allow(clippy::cast_lossless)]
$assign(point, $reading_fn(buf) as _);
*nread += $num_bytes;
}
}};
}
macro_rules! read_casted_property {
($data_type:expr, $assign:expr, &mut $size:ident) => {
match $data_type {
DataType::Uint8 => {
create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0])
}
DataType::Int8 => create_and_return_reading_fn!($assign, $size, 1, |buf: &[u8]| buf[0]),
DataType::Uint16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_u16)
}
DataType::Int16 => {
create_and_return_reading_fn!($assign, $size, 2, LittleEndian::read_i16)
}
DataType::Uint32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_u32)
}
DataType::Int32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_i32)
}
DataType::Float32 => {
create_and_return_reading_fn!($assign, $size, 4, LittleEndian::read_f32)
}
DataType::Float64 => {
create_and_return_reading_fn!($assign, $size, 8, LittleEndian::read_f64)
}
}
};
}
// Similar to 'create_and_return_reading_fn', but creates a function that just advances the read
// pointer.
macro_rules! create_skip_fn {
(&mut $size:ident, $num_bytes:expr) => {{
$size += $num_bytes;
fn _read_fn(nread: &mut usize, _: &[u8], _: &mut Point) {
*nread += $num_bytes;
}
_read_fn
}};
}
/// Abstraction to read binary points from ply files into points.
pub struct PlyIterator {
reader: BufReader<File>,
readers: Vec<ReadingFn>,
pub num_total_points: i64,
offset: Vector3<f64>,
point_count: usize,
}
impl PlyIterator {
pub fn from_file<P: AsRef<Path>>(ply_file: P) -> Result<Self> {
let mut file = File::open(ply_file).chain_err(|| "Could not open input file.")?;
let mut reader = BufReader::new(file);
let (header, header_len) = parse_header(&mut reader)?;
file = reader.into_inner();
file.seek(SeekFrom::Start(header_len as u64))?;
if !header.has_element("vertex") {
panic!("Header does not have element 'vertex'");
}
if header.format != Format::BinaryLittleEndianV1 {
panic!("Unsupported PLY format: {:?}", header.format);
}
let vertex = &header["vertex"];
let mut seen_x = false;
let mut seen_y = false;
let mut seen_z = false;
let mut readers: Vec<ReadingFn> = Vec::new();
let mut num_bytes_per_point = 0;
for prop in &vertex.properties {
match &prop.name as &str {
"x" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.x = val,
&mut num_bytes_per_point
));
seen_x = true;
}
"y" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.y = val,
&mut num_bytes_per_point
));
seen_y = true;
}
"z" => {
readers.push(read_casted_property!(
prop.data_type,
|p: &mut Point, val: f64| p.position.z = val,
&mut num_bytes_per_point
| {
if current_element.is_none() {
return Err(
InvalidInput(format!("property outside of element: {}", line)).into(),
);
};
let property = match entries[1] {
"list" if entries.len() == 5 => {
// We do not support list properties.
continue;
}
data_type_str if entries.len() == 3 => {
let data_type = DataType::from_str(data_type_str)?;
ScalarProperty {
name: entries[2].to_string(),
data_type,
}
}
_ => return Err(InvalidInput(format!("Invalid line: {}", line)).into()),
}; | conditional_block |
CO542_project.py | 1000 images split will 200 for testing
validationRatio = 0.2 # if 1000 images 20% of remaining 800 will be 160 for validation
###################################################
############################### Importing of the Images
count = 0
images = []
classNo = []
myList = os.listdir(path)
print("Total Classes Detected:",len(myList))
noOfClasses=len(myList)
print("Importing Classes.....")
for x in range (0,len(myList)):
|
print(" ")
images = np.array(images)
classNo = np.array(classNo)
############################### Split Data
X_train, X_test, y_train, y_test = train_test_split(images, classNo, test_size=testRatio)
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validationRatio)
# X_train = ARRAY OF IMAGES TO TRAIN
# y_train = CORRESPONDING CLASS ID
############################### TO CHECK IF NUMBER OF IMAGES MATCHES TO NUMBER OF LABELS FOR EACH DATA SET
print("Data Shapes")
print("Train",end = "");print(X_train.shape,y_train.shape)
print("Validation",end = "");print(X_validation.shape,y_validation.shape)
print("Test",end = "");print(X_test.shape,y_test.shape)
assert(X_train.shape[0]==y_train.shape[0]), "The number of images in not equal to the number of lables in training set"
assert(X_validation.shape[0]==y_validation.shape[0]), "The number of images in not equal to the number of lables in validation set"
assert(X_test.shape[0]==y_test.shape[0]), "The number of images in not equal to the number of lables in test set"
assert(X_train.shape[1:]==(imageDimesions))," The dimesions of the Training images are wrong "
assert(X_validation.shape[1:]==(imageDimesions))," The dimesionas of the Validation images are wrong "
assert(X_test.shape[1:]==(imageDimesions))," The dimesionas of the Test images are wrong"
############################### READ CSV FILE
labelFile = 'labels.csv' # file with all names of classes
data=pd.read_csv(labelFile)
print("data shape ",data.shape,type(data))
############################### DISPLAY SOME SAMPLES IMAGES OF ALL THE CLASSES
num_of_samples = []
cols = 5
num_classes = noOfClasses
fig, axs = plt.subplots(nrows=num_classes, ncols=cols, figsize=(5, 300))
fig.tight_layout()
for i in range(cols):
for j,row in data.iterrows():
x_selected = X_train[y_train == j]
axs[j][i].imshow(x_selected[random.randint(0, len(x_selected)- 1), :, :], cmap=plt.get_cmap("gray"))
axs[j][i].axis("off")
if i == 2:
axs[j][i].set_title(str(j)+ "-"+row["Name"])
num_of_samples.append(len(x_selected))
############################### DISPLAY A BAR CHART SHOWING NO OF SAMPLES FOR EACH CATEGORY
print(num_of_samples)
plt.figure(figsize=(12, 4))
plt.bar(range(0, num_classes), num_of_samples)
plt.title("Distribution of the training dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
############################### PREPROCESSING THE IMAGES
def grayscale(img):
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
return img
def equalize(img):
img =cv2.equalizeHist(img)
return img
def preprocessing(img):
img = grayscale(img) # CONVERT TO GRAYSCALE
img = equalize(img) # STANDARDIZE THE LIGHTING IN AN IMAGE
img = img/255 # TO NORMALIZE VALUES BETWEEN 0 AND 1 INSTEAD OF 0 TO 255
return img
X_train=np.array(list(map(preprocessing,X_train))) # TO IRETATE AND PREPROCESS ALL IMAGES
X_validation=np.array(list(map(preprocessing,X_validation)))
X_test=np.array(list(map(preprocessing,X_test)))
# cv2.imshow("GrayScale Images",X_train[random.randint(0,len(X_train)-1)]) # TO CHECK IF THE TRAINING IS DONE PROPERLY
############################### ADD A DEPTH OF 1
X_train=X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1)
X_validation=X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1)
X_test=X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1)
############################### AUGMENTATAION OF IMAGES: TO MAKEIT MORE GENERIC
dataGen= ImageDataGenerator(width_shift_range=0.1, # 0.1 = 10% IF MORE THAN 1 E.G 10 THEN IT REFFERS TO NO. OF PIXELS EG 10 PIXELS
height_shift_range=0.1,
zoom_range=0.2, # 0.2 MEANS CAN GO FROM 0.8 TO 1.2
shear_range=0.1, # MAGNITUDE OF SHEAR ANGLE
rotation_range=10) # DEGREES
dataGen.fit(X_train)
batches= dataGen.flow(X_train,y_train,batch_size=20) # REQUESTING DATA GENRATOR TO GENERATE IMAGES BATCH SIZE = NO. OF IMAGES CREAED EACH TIME ITS CALLED
X_batch,y_batch = next(batches)
# TO SHOW AGMENTED IMAGE SAMPLES
fig,axs=plt.subplots(1,15,figsize=(20,5))
fig.tight_layout()
for i in range(15):
axs[i].imshow(X_batch[i].reshape(imageDimesions[0],imageDimesions[1]))
axs[i].axis('off')
plt.show()
y_train = to_categorical(y_train,noOfClasses)
y_validation = to_categorical(y_validation,noOfClasses)
y_test = to_categorical(y_test,noOfClasses)
############################### CONVOLUTION NEURAL NETWORK MODEL
def myModel():
no_Of_Filters=60
size_of_Filter=(5,5)
size_of_Filter2=(3,3)
size_of_pool=(2,2)
no_Of_Nodes = 500
model= Sequential()
model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu')))
model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(no_Of_Nodes,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(noOfClasses,activation='softmax'))
# COMPILE MODEL
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
############################### TRAIN
model = myModel()
print(model.summary())
history=model.fit_generator(dataGen.flow(X_train,y_train,batch_size=batch_size_val),epochs=epochs_val,validation_data=(X_validation,y_validation),shuffle=1)
# no_Of_Filters=60
# size_of_Filter=(5,5) # THIS IS THE KERNEL THAT MOVE AROUND THE IMAGE TO GET THE FEATURES.
# # THIS WOULD REMOVE 2 PIXELS FROM EACH BORDER WHEN USING 32 32 IMAGE
# size_of_Filter2=(3,3)
# size_of_pool=(2,2) # SCALE DOWN ALL FEATURE MAP TO GERNALIZE MORE, TO REDUCE OVERFITTING
# no_Of_Nodes = 500 # NO. OF NODES IN HIDDEN LAYERS
# model= Sequential()
# model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu'))) # ADDING MORE CONVOLUTION LAYERS = LESS FEATURES BUT CAN CAUSE ACCURACY TO INCREASE
# model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool)) # DOES NOT EFFECT THE DEPTH/NO OF FILTERS
# model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
# model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool))
# model.add(Dropout(0.5))
# model.add(Flatten())
# model.add(Dense(no_Of_Nodes,activation='relu'))
# model.add(Dropout | myPicList = os.listdir(path+"/"+str(count))
for y in myPicList:
curImg = cv2.imread(path+"/"+str(count)+"/"+y)
images.append(curImg)
classNo.append(count)
print(count, end =" ")
count +=1 | conditional_block |
CO542_project.py | MORE GENERIC
dataGen= ImageDataGenerator(width_shift_range=0.1, # 0.1 = 10% IF MORE THAN 1 E.G 10 THEN IT REFFERS TO NO. OF PIXELS EG 10 PIXELS
height_shift_range=0.1,
zoom_range=0.2, # 0.2 MEANS CAN GO FROM 0.8 TO 1.2
shear_range=0.1, # MAGNITUDE OF SHEAR ANGLE
rotation_range=10) # DEGREES
dataGen.fit(X_train)
batches= dataGen.flow(X_train,y_train,batch_size=20) # REQUESTING DATA GENRATOR TO GENERATE IMAGES BATCH SIZE = NO. OF IMAGES CREAED EACH TIME ITS CALLED
X_batch,y_batch = next(batches)
# TO SHOW AGMENTED IMAGE SAMPLES
fig,axs=plt.subplots(1,15,figsize=(20,5))
fig.tight_layout()
for i in range(15):
axs[i].imshow(X_batch[i].reshape(imageDimesions[0],imageDimesions[1]))
axs[i].axis('off')
plt.show()
y_train = to_categorical(y_train,noOfClasses)
y_validation = to_categorical(y_validation,noOfClasses)
y_test = to_categorical(y_test,noOfClasses)
############################### CONVOLUTION NEURAL NETWORK MODEL
def myModel():
no_Of_Filters=60
size_of_Filter=(5,5)
size_of_Filter2=(3,3)
size_of_pool=(2,2)
no_Of_Nodes = 500
model= Sequential()
model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu')))
model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(no_Of_Nodes,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(noOfClasses,activation='softmax'))
# COMPILE MODEL
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
############################### TRAIN
model = myModel()
print(model.summary())
history=model.fit_generator(dataGen.flow(X_train,y_train,batch_size=batch_size_val),epochs=epochs_val,validation_data=(X_validation,y_validation),shuffle=1)
# no_Of_Filters=60
# size_of_Filter=(5,5) # THIS IS THE KERNEL THAT MOVE AROUND THE IMAGE TO GET THE FEATURES.
# # THIS WOULD REMOVE 2 PIXELS FROM EACH BORDER WHEN USING 32 32 IMAGE
# size_of_Filter2=(3,3)
# size_of_pool=(2,2) # SCALE DOWN ALL FEATURE MAP TO GERNALIZE MORE, TO REDUCE OVERFITTING
# no_Of_Nodes = 500 # NO. OF NODES IN HIDDEN LAYERS
# model= Sequential()
# model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu'))) # ADDING MORE CONVOLUTION LAYERS = LESS FEATURES BUT CAN CAUSE ACCURACY TO INCREASE
# model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool)) # DOES NOT EFFECT THE DEPTH/NO OF FILTERS
# model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
# model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool))
# model.add(Dropout(0.5))
# model.add(Flatten())
# model.add(Dense(no_Of_Nodes,activation='relu'))
# model.add(Dropout(0.5)) # INPUTS NODES TO DROP WITH EACH UPDATE 1 ALL 0 NONE
# model.add(Dense(noOfClasses,activation='softmax')) # OUTPUT LAYER
# # COMPILE MODEL
# model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
# print(model.summary())
# history=model.fit_generator(dataGen.flow(X_train,y_train,batch_size=batch_size_val),steps_per_epoch=steps_per_epoch_val,epochs=epochs_val,validation_data=(X_validation,y_validation),shuffle=1)
############################### PLOT
plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training','validation'])
plt.title('loss')
plt.xlabel('epoch')
plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training','validation'])
plt.title('Acurracy')
plt.xlabel('epoch')
plt.show()
score =model.evaluate(X_test,y_test,verbose=0)
print('Test Score:',score[0])
print('Test Accuracy:',score[1])
# #STORE THE MODEL AS A PICKLE OBJECT
# pickle_out= open("model_trained.p","wb") # wb = WRITE BYTE
# pickle.dump(model,pickle_out)
# pickle_out.close()
# cv2.waitKey(0)
# import weakref
# #STORE THE MODEL AS A PICKLE OBJECT
# pickle_out= open("model_trained.p","wb") # wb = WRITE BYTE
# pickle.dump(model,pickle_out)
# pickle_out.close()
# cv2.waitKey(0)
model.save("./training/TrainedModule_epoch30.h5")
# print(data)
# print(data.Name[0])
from keras.models import load_model
model = load_model('./training/TrainedModule_epoch30.h5')
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def test_on_img(img):
data=[]
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
X_test=np.array(data)
# Y_pred = model.predict_classes(X_test)
predict_x=model.predict(X_test)
Y_pred=np.argmax(predict_x,axis=1)
return image,Y_pred
# # plot,prediction = test_on_img(r'D:\Traffic_Sign_Recognition\Test\00500.png')
# plot,prediction = test_on_img(r'./Test/001.png')
# s = [str(i) for i in prediction]
# a = int("".join(s))
# print("Predicted traffic sign is: ", s)
# # classes[a]
# plt.imshow(plot)
# plt.show()
classes = { 0:'Speed limit (20km/h)',
1:'Speed limit (30km/h)',
2:'Speed limit (50km/h)',
3:'Speed limit (60km/h)',
4:'Speed limit (70km/h)',
5:'Speed limit (80km/h)',
6:'End of speed limit (80km/h)',
7:'Speed limit (100km/h)',
8:'Speed limit (120km/h)',
9:'No passing',
10:'No passing veh over 3.5 tons',
11:'Right-of-way at intersection',
12:'Priority road',
13:'Yield',
14:'Stop',
15:'No vehicles',
16:'Veh > 3.5 tons prohibited',
17:'No entry',
18:'General caution',
19:'Dangerous curve left',
20:'Dangerous curve right',
21:'Double curve',
22:'Bumpy road',
23:'Slippery road',
24:'Road narrows on the right',
25:'Road work',
26:'Traffic signals',
27:'Pedestrians',
28:'Children crossing',
29:'Bicycles crossing',
30:'Beware of ice/snow',
31:'Wild animals crossing',
32:'End speed + passing limits',
33:'Turn right ahead',
34:'Turn left ahead',
35:'Ahead only',
36:'Go straight or right',
37:'Go straight or left',
38:'Keep right',
39:'Keep left',
40:'Roundabout mandatory',
41:'End of no passing',
42:'End no passing veh > 3.5 tons' }
def grayscale(img):
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
return img
def | equalize | identifier_name |
|
CO542_project.py | 1000 images split will 200 for testing
validationRatio = 0.2 # if 1000 images 20% of remaining 800 will be 160 for validation
###################################################
############################### Importing of the Images
count = 0
images = []
classNo = []
myList = os.listdir(path)
print("Total Classes Detected:",len(myList))
noOfClasses=len(myList)
print("Importing Classes.....")
for x in range (0,len(myList)):
myPicList = os.listdir(path+"/"+str(count))
for y in myPicList:
curImg = cv2.imread(path+"/"+str(count)+"/"+y)
images.append(curImg)
classNo.append(count)
print(count, end =" ")
count +=1
print(" ")
images = np.array(images)
classNo = np.array(classNo)
############################### Split Data
X_train, X_test, y_train, y_test = train_test_split(images, classNo, test_size=testRatio)
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validationRatio)
# X_train = ARRAY OF IMAGES TO TRAIN
# y_train = CORRESPONDING CLASS ID
############################### TO CHECK IF NUMBER OF IMAGES MATCHES TO NUMBER OF LABELS FOR EACH DATA SET
print("Data Shapes")
print("Train",end = "");print(X_train.shape,y_train.shape)
print("Validation",end = "");print(X_validation.shape,y_validation.shape)
print("Test",end = "");print(X_test.shape,y_test.shape)
assert(X_train.shape[0]==y_train.shape[0]), "The number of images in not equal to the number of lables in training set"
assert(X_validation.shape[0]==y_validation.shape[0]), "The number of images in not equal to the number of lables in validation set"
assert(X_test.shape[0]==y_test.shape[0]), "The number of images in not equal to the number of lables in test set"
assert(X_train.shape[1:]==(imageDimesions))," The dimesions of the Training images are wrong "
assert(X_validation.shape[1:]==(imageDimesions))," The dimesionas of the Validation images are wrong "
assert(X_test.shape[1:]==(imageDimesions))," The dimesionas of the Test images are wrong"
############################### READ CSV FILE
labelFile = 'labels.csv' # file with all names of classes
data=pd.read_csv(labelFile)
print("data shape ",data.shape,type(data))
############################### DISPLAY SOME SAMPLES IMAGES OF ALL THE CLASSES
num_of_samples = []
cols = 5
num_classes = noOfClasses
fig, axs = plt.subplots(nrows=num_classes, ncols=cols, figsize=(5, 300))
fig.tight_layout()
for i in range(cols):
for j,row in data.iterrows():
x_selected = X_train[y_train == j]
axs[j][i].imshow(x_selected[random.randint(0, len(x_selected)- 1), :, :], cmap=plt.get_cmap("gray"))
axs[j][i].axis("off")
if i == 2:
axs[j][i].set_title(str(j)+ "-"+row["Name"])
num_of_samples.append(len(x_selected))
############################### DISPLAY A BAR CHART SHOWING NO OF SAMPLES FOR EACH CATEGORY
print(num_of_samples)
plt.figure(figsize=(12, 4))
plt.bar(range(0, num_classes), num_of_samples)
plt.title("Distribution of the training dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
############################### PREPROCESSING THE IMAGES
def grayscale(img):
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
return img
def equalize(img):
img =cv2.equalizeHist(img)
return img
def preprocessing(img):
img = grayscale(img) # CONVERT TO GRAYSCALE
img = equalize(img) # STANDARDIZE THE LIGHTING IN AN IMAGE
img = img/255 # TO NORMALIZE VALUES BETWEEN 0 AND 1 INSTEAD OF 0 TO 255
return img
X_train=np.array(list(map(preprocessing,X_train))) # TO IRETATE AND PREPROCESS ALL IMAGES
X_validation=np.array(list(map(preprocessing,X_validation)))
X_test=np.array(list(map(preprocessing,X_test)))
# cv2.imshow("GrayScale Images",X_train[random.randint(0,len(X_train)-1)]) # TO CHECK IF THE TRAINING IS DONE PROPERLY
############################### ADD A DEPTH OF 1
X_train=X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1)
X_validation=X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1)
X_test=X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1)
############################### AUGMENTATAION OF IMAGES: TO MAKEIT MORE GENERIC
dataGen= ImageDataGenerator(width_shift_range=0.1, # 0.1 = 10% IF MORE THAN 1 E.G 10 THEN IT REFFERS TO NO. OF PIXELS EG 10 PIXELS
height_shift_range=0.1,
zoom_range=0.2, # 0.2 MEANS CAN GO FROM 0.8 TO 1.2
shear_range=0.1, # MAGNITUDE OF SHEAR ANGLE
rotation_range=10) # DEGREES
dataGen.fit(X_train)
batches= dataGen.flow(X_train,y_train,batch_size=20) # REQUESTING DATA GENRATOR TO GENERATE IMAGES BATCH SIZE = NO. OF IMAGES CREAED EACH TIME ITS CALLED
X_batch,y_batch = next(batches)
# TO SHOW AGMENTED IMAGE SAMPLES
fig,axs=plt.subplots(1,15,figsize=(20,5))
fig.tight_layout()
for i in range(15):
axs[i].imshow(X_batch[i].reshape(imageDimesions[0],imageDimesions[1]))
axs[i].axis('off')
plt.show()
y_train = to_categorical(y_train,noOfClasses)
y_validation = to_categorical(y_validation,noOfClasses)
y_test = to_categorical(y_test,noOfClasses)
############################### CONVOLUTION NEURAL NETWORK MODEL
def myModel():
no_Of_Filters=60
size_of_Filter=(5,5)
size_of_Filter2=(3,3)
size_of_pool=(2,2)
no_Of_Nodes = 500
model= Sequential()
model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu')))
model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
| model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(no_Of_Nodes,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(noOfClasses,activation='softmax'))
# COMPILE MODEL
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
############################### TRAIN
model = myModel()
print(model.summary())
history=model.fit_generator(dataGen.flow(X_train,y_train,batch_size=batch_size_val),epochs=epochs_val,validation_data=(X_validation,y_validation),shuffle=1)
# no_Of_Filters=60
# size_of_Filter=(5,5) # THIS IS THE KERNEL THAT MOVE AROUND THE IMAGE TO GET THE FEATURES.
# # THIS WOULD REMOVE 2 PIXELS FROM EACH BORDER WHEN USING 32 32 IMAGE
# size_of_Filter2=(3,3)
# size_of_pool=(2,2) # SCALE DOWN ALL FEATURE MAP TO GERNALIZE MORE, TO REDUCE OVERFITTING
# no_Of_Nodes = 500 # NO. OF NODES IN HIDDEN LAYERS
# model= Sequential()
# model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu'))) # ADDING MORE CONVOLUTION LAYERS = LESS FEATURES BUT CAN CAUSE ACCURACY TO INCREASE
# model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool)) # DOES NOT EFFECT THE DEPTH/NO OF FILTERS
# model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
# model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool))
# model.add(Dropout(0.5))
# model.add(Flatten())
# model.add(Dense(no_Of_Nodes,activation='relu'))
# model.add(Dropout | model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu'))) | random_line_split |
CO542_project.py | 1000 images split will 200 for testing
validationRatio = 0.2 # if 1000 images 20% of remaining 800 will be 160 for validation
###################################################
############################### Importing of the Images
count = 0
images = []
classNo = []
myList = os.listdir(path)
print("Total Classes Detected:",len(myList))
noOfClasses=len(myList)
print("Importing Classes.....")
for x in range (0,len(myList)):
myPicList = os.listdir(path+"/"+str(count))
for y in myPicList:
curImg = cv2.imread(path+"/"+str(count)+"/"+y)
images.append(curImg)
classNo.append(count)
print(count, end =" ")
count +=1
print(" ")
images = np.array(images)
classNo = np.array(classNo)
############################### Split Data
X_train, X_test, y_train, y_test = train_test_split(images, classNo, test_size=testRatio)
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validationRatio)
# X_train = ARRAY OF IMAGES TO TRAIN
# y_train = CORRESPONDING CLASS ID
############################### TO CHECK IF NUMBER OF IMAGES MATCHES TO NUMBER OF LABELS FOR EACH DATA SET
print("Data Shapes")
print("Train",end = "");print(X_train.shape,y_train.shape)
print("Validation",end = "");print(X_validation.shape,y_validation.shape)
print("Test",end = "");print(X_test.shape,y_test.shape)
assert(X_train.shape[0]==y_train.shape[0]), "The number of images in not equal to the number of lables in training set"
assert(X_validation.shape[0]==y_validation.shape[0]), "The number of images in not equal to the number of lables in validation set"
assert(X_test.shape[0]==y_test.shape[0]), "The number of images in not equal to the number of lables in test set"
assert(X_train.shape[1:]==(imageDimesions))," The dimesions of the Training images are wrong "
assert(X_validation.shape[1:]==(imageDimesions))," The dimesionas of the Validation images are wrong "
assert(X_test.shape[1:]==(imageDimesions))," The dimesionas of the Test images are wrong"
############################### READ CSV FILE
labelFile = 'labels.csv' # file with all names of classes
data=pd.read_csv(labelFile)
print("data shape ",data.shape,type(data))
############################### DISPLAY SOME SAMPLES IMAGES OF ALL THE CLASSES
num_of_samples = []
cols = 5
num_classes = noOfClasses
fig, axs = plt.subplots(nrows=num_classes, ncols=cols, figsize=(5, 300))
fig.tight_layout()
for i in range(cols):
for j,row in data.iterrows():
x_selected = X_train[y_train == j]
axs[j][i].imshow(x_selected[random.randint(0, len(x_selected)- 1), :, :], cmap=plt.get_cmap("gray"))
axs[j][i].axis("off")
if i == 2:
axs[j][i].set_title(str(j)+ "-"+row["Name"])
num_of_samples.append(len(x_selected))
############################### DISPLAY A BAR CHART SHOWING NO OF SAMPLES FOR EACH CATEGORY
print(num_of_samples)
plt.figure(figsize=(12, 4))
plt.bar(range(0, num_classes), num_of_samples)
plt.title("Distribution of the training dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
############################### PREPROCESSING THE IMAGES
def grayscale(img):
|
def equalize(img):
img =cv2.equalizeHist(img)
return img
def preprocessing(img):
img = grayscale(img) # CONVERT TO GRAYSCALE
img = equalize(img) # STANDARDIZE THE LIGHTING IN AN IMAGE
img = img/255 # TO NORMALIZE VALUES BETWEEN 0 AND 1 INSTEAD OF 0 TO 255
return img
X_train=np.array(list(map(preprocessing,X_train))) # TO IRETATE AND PREPROCESS ALL IMAGES
X_validation=np.array(list(map(preprocessing,X_validation)))
X_test=np.array(list(map(preprocessing,X_test)))
# cv2.imshow("GrayScale Images",X_train[random.randint(0,len(X_train)-1)]) # TO CHECK IF THE TRAINING IS DONE PROPERLY
############################### ADD A DEPTH OF 1
X_train=X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1)
X_validation=X_validation.reshape(X_validation.shape[0],X_validation.shape[1],X_validation.shape[2],1)
X_test=X_test.reshape(X_test.shape[0],X_test.shape[1],X_test.shape[2],1)
############################### AUGMENTATAION OF IMAGES: TO MAKEIT MORE GENERIC
dataGen= ImageDataGenerator(width_shift_range=0.1, # 0.1 = 10% IF MORE THAN 1 E.G 10 THEN IT REFFERS TO NO. OF PIXELS EG 10 PIXELS
height_shift_range=0.1,
zoom_range=0.2, # 0.2 MEANS CAN GO FROM 0.8 TO 1.2
shear_range=0.1, # MAGNITUDE OF SHEAR ANGLE
rotation_range=10) # DEGREES
dataGen.fit(X_train)
batches= dataGen.flow(X_train,y_train,batch_size=20) # REQUESTING DATA GENRATOR TO GENERATE IMAGES BATCH SIZE = NO. OF IMAGES CREAED EACH TIME ITS CALLED
X_batch,y_batch = next(batches)
# TO SHOW AGMENTED IMAGE SAMPLES
fig,axs=plt.subplots(1,15,figsize=(20,5))
fig.tight_layout()
for i in range(15):
axs[i].imshow(X_batch[i].reshape(imageDimesions[0],imageDimesions[1]))
axs[i].axis('off')
plt.show()
y_train = to_categorical(y_train,noOfClasses)
y_validation = to_categorical(y_validation,noOfClasses)
y_test = to_categorical(y_test,noOfClasses)
############################### CONVOLUTION NEURAL NETWORK MODEL
def myModel():
no_Of_Filters=60
size_of_Filter=(5,5)
size_of_Filter2=(3,3)
size_of_pool=(2,2)
no_Of_Nodes = 500
model= Sequential()
model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu')))
model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
model.add(MaxPooling2D(pool_size=size_of_pool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(no_Of_Nodes,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(noOfClasses,activation='softmax'))
# COMPILE MODEL
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
############################### TRAIN
model = myModel()
print(model.summary())
history=model.fit_generator(dataGen.flow(X_train,y_train,batch_size=batch_size_val),epochs=epochs_val,validation_data=(X_validation,y_validation),shuffle=1)
# no_Of_Filters=60
# size_of_Filter=(5,5) # THIS IS THE KERNEL THAT MOVE AROUND THE IMAGE TO GET THE FEATURES.
# # THIS WOULD REMOVE 2 PIXELS FROM EACH BORDER WHEN USING 32 32 IMAGE
# size_of_Filter2=(3,3)
# size_of_pool=(2,2) # SCALE DOWN ALL FEATURE MAP TO GERNALIZE MORE, TO REDUCE OVERFITTING
# no_Of_Nodes = 500 # NO. OF NODES IN HIDDEN LAYERS
# model= Sequential()
# model.add((Conv2D(no_Of_Filters,size_of_Filter,input_shape=(imageDimesions[0],imageDimesions[1],1),activation='relu'))) # ADDING MORE CONVOLUTION LAYERS = LESS FEATURES BUT CAN CAUSE ACCURACY TO INCREASE
# model.add((Conv2D(no_Of_Filters, size_of_Filter, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool)) # DOES NOT EFFECT THE DEPTH/NO OF FILTERS
# model.add((Conv2D(no_Of_Filters//2, size_of_Filter2,activation='relu')))
# model.add((Conv2D(no_Of_Filters // 2, size_of_Filter2, activation='relu')))
# model.add(MaxPooling2D(pool_size=size_of_pool))
# model.add(Dropout(0.5))
# model.add(Flatten())
# model.add(Dense(no_Of_Nodes,activation='relu'))
# model.add(Dropout | img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
return img | identifier_body |
proxyws.go | {
defer util.HandlePanic()
atomic.AddUint64(&(pws.ConnCount), 1)
var (
serverConn *websocket.Conn
//tcpAddr *net.TCPAddr
wsaddr = r.RemoteAddr
)
//获取客户端的serverID 从http.head的Sec-Websocket-Protocol字段中获取,是与客户端商定的
whead := w.Header()
serverID := r.Header.Get("Sec-Websocket-Protocol")
if "" == serverID { //默认走大厅
serverID = "HALL"
} else {
whead.Add("Sec-Websocket-Protocol", serverID)
}
// 向后端透传真实IP的方式
if "http" == pws.RealIpMode {
wsaddr = realip.FromRequest(r)
}
//根据serverID获取有效线路
wsline := pws.AssignLine(serverID)
if wsline == nil {
log.Info("Session(%s -> null, TLS: %v serverID:%v) Failed", wsaddr, pws.EnableTls, serverID)
http.NotFound(w, r)
return
}
// http升级至websocket
wsConn, err := DefaultUpgrader.Upgrade(w, r, whead)
wsConn.SetCloseHandler(func(closeCode int, text string) error {
wsConn.Close()
return errors.New(" the server stops processing! ")
})
line := wsline
ConnMgr.UpdateInNum(1)
defer ConnMgr.UpdateInNum(-1)
//服务端根据域名获取IP
addrs, _ := net.LookupHost(line.Remote)
if 0 < len(addrs) {
line.Remote = addrs[0]
}
//检测IP是否可用
if _, err = net.ResolveTCPAddr("tcp", line.Remote); err != nil {
log.Info("Session(%s -> %s, TLS: %v) ResolveTCPAddr Err: %s", wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
line.UpdateDelay(UnreachableTime)
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
log.Info("ServerID: %v name: %v Remote: %v", line.LineID, pws.name, line.Remote)
var (
clientRecv int64 = 0
clientSend int64 = 0
serverRecv int64 = 0
serverSend int64 = 0
)
// 服务端 --> 客户端
s2c := func() {
defer util.HandlePanic()
defer func() {
wsConn.Close()
if serverConn != nil {
serverConn.Close()
}
}()
//var headlen = HEAD_LEN
var nread int
var err error
var buf = make([]byte, pws.RecvBufLen)
for {
if err = serverConn.SetReadDeadline(time.Now().Add(pws.RecvBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
nread, buf, err = serverConn.ReadMessage()
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server Read Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = serverConn.SetReadDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
nread = len(buf)
serverRecv += int64(nread)
ConnMgr.UpdateServerInSize(int64(nread))
if err = wsConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = wsConn.WriteMessage(websocket.BinaryMessage, buf)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server WriteMessage Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = wsConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
serverSend += int64(nread)
ConnMgr.UpdateServerOutSize(int64(nread))
log.Info("server:[%v] send-->>> <%v> MsgLen::%v", line.Remote, wsaddr, nread)
}
}
// 客户端 --> 服务端
c2s := func() {
defer func() {
wsConn.Close()
if serverConn != nil {
serverConn.Close()
}
}()
var nwrite int
var err error
var message []byte
for {
if err = wsConn.SetReadDeadline(time.Now().Add(pws.RecvBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
_, message, err = wsConn.ReadMessage()
if err != nil {
wsConn.Close()
log.Info("Session(%s -> %s, TLS: %v) Closed, Client ReadMessage Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = wsConn.SetReadDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
// 建立连接
if serverConn == nil {
// 校验第一个数据包是否有效
t1 := time.Now()
dialer := &websocket.Dialer{}
dialer.TLSClientConfig = &tls.Config{}
addr := "ws://" + line.Remote
if pws.EnableTls {
dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
addr = "wss://" + line.Remote
}
serverConn, _, err = dialer.Dial(addr, nil)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) DialTCP Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
//线路延迟
line.UpdateDelay(UnreachableTime)
//统计连接失败数
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
line.UpdateDelay(time.Since(t1))
//统计负载量
line.UpdateLoad(1)
defer line.UpdateLoad(-1)
//统计当前连接数
ConnMgr.UpdateOutNum(1)
defer ConnMgr.UpdateOutNum(-1)
//统计连接成功数
ConnMgr.UpdateSuccessNum(1)
log.Info("Session(%s -> %s, TLS: %v) Established", wsaddr, line.Remote, pws.EnableTls)
//传真实IP
if err = line.HandleRedirectWeb(serverConn, wsaddr); err != nil {
log.Info("Session(%s -> %s) HandleRedirect Failed: %s", wsaddr, line.Remote, err.Error())
return
}
if err = serverConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s", | }
nwrite = len(message)
clientRecv += int64(nwrite)
ConnMgr.UpdateClientInSize(int64(nwrite))
if err = serverConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = serverConn.WriteMessage(websocket.BinaryMessage, message)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client Write len:%v Err: %s ",
w | wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
util.Go(s2c) | random_line_split |
proxyws.go | }
serverConn, _, err = dialer.Dial(addr, nil)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) DialTCP Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
//线路延迟
line.UpdateDelay(UnreachableTime)
//统计连接失败数
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
line.UpdateDelay(time.Since(t1))
//统计负载量
line.UpdateLoad(1)
defer line.UpdateLoad(-1)
//统计当前连接数
ConnMgr.UpdateOutNum(1)
defer ConnMgr.UpdateOutNum(-1)
//统计连接成功数
ConnMgr.UpdateSuccessNum(1)
log.Info("Session(%s -> %s, TLS: %v) Established", wsaddr, line.Remote, pws.EnableTls)
//传真实IP
if err = line.HandleRedirectWeb(serverConn, wsaddr); err != nil {
log.Info("Session(%s -> %s) HandleRedirect Failed: %s", wsaddr, line.Remote, err.Error())
return
}
if err = serverConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
util.Go(s2c)
}
nwrite = len(message)
clientRecv += int64(nwrite)
ConnMgr.UpdateClientInSize(int64(nwrite))
if err = serverConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = serverConn.WriteMessage(websocket.BinaryMessage, message)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client Write len:%v Err: %s ",
wsaddr, line.Remote, pws.EnableTls, nwrite, err.Error())
break
}
if err = serverConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
clientSend += int64(nwrite)
ConnMgr.UpdateClientOutSize(int64(nwrite))
//仅打印长度
log.Info("client:[%v] send-->> <%v> MsgLen:%v", wsaddr, line.Remote, nwrite)
}
}
// 客户端-->服务端
c2s()
log.Info("Session(%s -> %s, TLS: %v SID: %s) Over, DataInfo(CR: %d, CW: %d, SR: %d, SW: %d)",
wsaddr, line.Remote, pws.EnableTls, line.LineID, clientRecv, clientSend, serverRecv, serverSend)
}
func (pws *ProxyWebsocket) Start() {
if len(pws.lines) == 0 {
log.Info("ProxyWebsocket(%v TLS: %v) Start Err: No Line !", pws.name, pws.EnableTls)
return
}
// 监听数据
if pws.Listener == nil {
var err error
pws.Listener, err = knet.NewListener(pws.local, DefaultSocketOpt)
if err != nil {
log.Fatal("ProxyWebsocket(%v TLS: %v) NewListener Failed: %v", pws.name, pws.EnableTls, err)
} else {
log.Info(" Listen:%v local:%v", pws.name, pws.local)
}
}
util.Go(func() {
pws.Lock()
defer pws.Unlock()
if !pws.Running {
pws.Running = true
util.Go(func() {
//由于部分线路共用busline的端口,故牵至goroutine外
//l, err := knet.NewListener(pws.local, DefaultSocketOpt)
//if err != nil {
// log.Fatal("ProxyWebsocket(%v TLS: %v) NewListener Failed: %v", pws.name, pws.EnableTls, err)
//} else {
// log.Info(" Listen:%v local:%v", pws.name, pws.local)
//}
s := &http.Server{
Addr: pws.local,
Handler: pws,
ReadTimeout: DefaultSocketOpt.ReadTimeout,
ReadHeaderTimeout: DefaultSocketOpt.ReadHeaderTimeout,
WriteTimeout: DefaultSocketOpt.WriteTimeout,
MaxHeaderBytes: DefaultSocketOpt.MaxHeaderBytes,
}
if pws.EnableTls {
if len(pws.Routes) == 0 {
pws.Routes["/gate/wss"] = pws.OnNew
}
log.Info("ProxyWebsocket(%v TLS: %v) Running On: %s, Routes: %+v, Certs: %+v", pws.name, pws.EnableTls, pws.local, pws.Routes, pws.Certs)
pws.StartCheckLines()
defer pws.StopCheckLines()
if len(pws.Certs) == 0 {
log.Fatal("ProxyWebsocket(%v TLS: %v) ListenAndServeTLS Error: No Cert And Key Files", pws.name, pws.EnableTls)
}
s.TLSConfig = &tls.Config{}
for _, v := range pws.Certs {
cert, err := tls.LoadX509KeyPair(v.Certfile, v.Keyfile)
if err != nil {
log.Fatal("ProxyWebsocket(%v TLS: %v) tls.LoadX509KeyPair(%v, %v) Failed: %v", pws.name, pws.EnableTls, v.Certfile, v.Keyfile, err)
}
s.TLSConfig.Certificates = append(s.TLSConfig.Certificates, cert)
}
tlsListener := tls.NewListener(pws.Listener, s.TLSConfig)
if err := s.Serve(tlsListener); err != nil {
log.Fatal("ProxyWebsocket(%v TLS: %v) Serve Error: %v", pws.name, pws.EnableTls, err)
}
//if err := s.Serve(l); err != nil {
// log.Fatal("ProxyWebsocket(%v TLS: %v) Serve Error: %v", pws.name, pws.EnableTls, err)
//}
} else {
if len(pws.Routes) == 0 {
pws.Routes["/gate/ws"] = pws.OnNew
}
log.Info("ProxyWebsocket(%v TLS: %v, Routes: %+v) Running On: %s", pws.name, pws.EnableTls, pws.Routes, pws.local)
//线路检测
pws.StartCheckLines()
defer pws.StopCheckLines()
if err := s.Serve(pws.Listener); err != nil {
log.Fatal("ProxyWebsocket(TLS: %v) Serve Error: %v", pws.EnableTls, err)
}
//if err := s.Serve(l); err != nil {
// log.Fatal("ProxyWebsocket(TLS: %v) Serve Error: %v", pws.EnableTls, err)
//}
}
})
}
})
}
func (pws *ProxyWebsocket) Stop() {
pws.Lock()
defer pws.Unlock()
if pws.Running {
pws.Running = false
}
}
func NewWebsocketProxy(name string, local string, realIpModel string, paths []string, tls bool, certs []*config.XMLCert) *ProxyWebsocket {
pws := &ProxyWebsocket{
Running: false,
EnableTls: tls,
Listener: nil,
Heartbeat: DEFAULT_TCP_HEARTBEAT,
AliveTime: DEFAULT_TCP_KEEPALIVE_INTERVAL,
RecvBlockTime: DEFAULT_TCP_READ_BLOCK_TIME,
RecvBufLen: DEFAULT_TCP_READ_BUF_LEN,
SendBlockTime: DEFAULT_TCP_WRITE_BLOCK_TIME | ,
SendBufLen: DEFAULT_TCP_WRITE_BUF_LEN,
linelay: DEFAULT_TCP_NODELAY,
Certs: certs,
Routes: map[string]func(w http.ResponseWriter, r *http.Request){},
RealIpMode: realIpModel,
ProxyBase: &ProxyBase{
name: name,
ptype: PT_WEBSOCKET,
local: local,
lines: []*Line{},
},
}
for _, path := range paths {
pws.Routes[path] = pws.OnNew
}
return pws
}
| identifier_body |
|
proxyws.go | defer util.HandlePanic()
atomic.AddUint64(&(pws.ConnCount), 1)
var (
serverConn *websocket.Conn
//tcpAddr *net.TCPAddr
wsaddr = r.RemoteAddr
)
//获取客户端的serverID 从http.head的Sec-Websocket-Protocol字段中获取,是与客户端商定的
whead := w.Header()
serverID := r.Header.Get("Sec-Websocket-Protocol")
if "" == serverID { //默认走大厅
serverID = "HALL"
} else {
whead.Add("Sec-Websocket-Protocol", serverID)
}
// 向后端透传真实IP的方式
if "http" == pws.RealIpMode {
wsaddr = realip.FromRequest(r)
}
//根据serverID获取有效线路
wsline := pws.AssignLine(serverID)
if wsline == nil {
log.Info("Session(%s -> null, TLS: %v serverID:%v) Failed", wsaddr, pws.EnableTls, serverID)
http.NotFound(w, r)
return
}
// http升级至websocket
wsConn, err := DefaultUpgrader.Upgrade(w, r, whead)
wsConn.SetCloseHandler(func(closeCode int, text string) error {
wsConn.Close()
return errors.New(" the server stops processing! ")
})
line := wsline
ConnMgr.UpdateInNum(1)
defer ConnMgr.UpdateInNum(-1)
//服务端根据域名获取IP
addrs, _ := net.LookupHost(line.Remote)
if 0 < len(addrs) {
line.Remote = addrs[0]
}
//检测IP是否可用
if _, err = net.ResolveTCPAddr("tcp", line.Remote); err != nil {
log.Info("Session(%s -> %s, TLS: %v) ResolveTCPAddr Err: %s", wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
line.UpdateDelay(UnreachableTime)
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
log.Info("ServerID: %v name: %v Remote: %v", line.LineID, pws.name, line.Remote)
var (
clientRecv int64 = 0
clientSend int64 = 0
serverRecv int64 = 0
serverSend int64 = 0
)
// 服务端 --> 客户端
s2c := func() {
defer util.HandlePanic()
defer func() {
wsConn.Close()
if serverConn != nil {
serverConn.Close()
}
}()
//var headlen = HEAD_LEN
var nread int
var err error
var buf = make([]byte, pws.RecvBufLen)
for {
if err = serverConn.SetReadDeadline(time.Now().Add(pws.RecvBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
nread, buf, err = serverConn.ReadMessage()
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server Read Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = serverConn.SetReadDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
nread = len(buf)
serverRecv += int64(nread)
ConnMgr.UpdateServerInSize(int64(nread))
if err = wsConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = wsConn.WriteMessage(websocket.BinaryMessage, buf)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server WriteMessage Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = wsConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
serverSend += int64(nread)
ConnMgr.UpdateServerOutSize(int64(nread))
log.Info("server:[%v] send-->>> <%v> MsgLen::%v", line.Remote, wsaddr, nread)
}
}
// 客户端 --> 服务端
c2s := func() {
defer func() {
wsConn.Close()
if serverConn != nil {
serverConn.Close()
}
}()
var nwrite int
var err error
var message []byte
for {
if err = wsConn.SetReadDeadline(time.Now().Add(pws.RecvBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
_, message, err = wsConn.ReadMessage()
if err != nil {
wsConn.Close()
log.Info("Session(%s -> %s, TLS: %v) Closed, Client ReadMessage Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = wsConn.SetReadDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
// 建立连接
if serverConn == nil {
// 校验第一个数据包是否有效
t1 := time.Now()
dialer := &websocket.Dialer{}
dialer.TLSClientConfig = &tls.Config{}
addr := "ws://" + line.Remote
i | //统计负载量
line.UpdateLoad(1)
defer line.UpdateLoad(-1)
//统计当前连接数
ConnMgr.UpdateOutNum(1)
defer ConnMgr.UpdateOutNum(-1)
//统计连接成功数
ConnMgr.UpdateSuccessNum(1)
log.Info("Session(%s -> %s, TLS: %v) Established", wsaddr, line.Remote, pws.EnableTls)
//传真实IP
if err = line.HandleRedirectWeb(serverConn, wsaddr); err != nil {
log.Info("Session(%s -> %s) HandleRedirect Failed: %s", wsaddr, line.Remote, err.Error())
return
}
if err = serverConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
util.Go(s2c)
}
nwrite = len(message)
clientRecv += int64(nwrite)
ConnMgr.UpdateClientInSize(int64(nwrite))
if err = serverConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client S
etWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = serverConn.WriteMessage(websocket.BinaryMessage, message)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client Write len:%v Err: %s ",
| f pws.EnableTls {
dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
addr = "wss://" + line.Remote
}
serverConn, _, err = dialer.Dial(addr, nil)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) DialTCP Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
//线路延迟
line.UpdateDelay(UnreachableTime)
//统计连接失败数
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
line.UpdateDelay(time.Since(t1))
| conditional_block |
proxyws.go | ttp.ResponseWriter, r *http.Request) {
defer util.HandlePanic()
atomic.AddUint64(&(pws.ConnCount), 1)
var (
serverConn *websocket.Conn
//tcpAddr *net.TCPAddr
wsaddr = r.RemoteAddr
)
//获取客户端的serverID 从http.head的Sec-Websocket-Protocol字段中获取,是与客户端商定的
whead := w.Header()
serverID := r.Header.Get("Sec-Websocket-Protocol")
if "" == serverID { //默认走大厅
serverID = "HALL"
} else {
whead.Add("Sec-Websocket-Protocol", serverID)
}
// 向后端透传真实IP的方式
if "http" == pws.RealIpMode {
wsaddr = realip.FromRequest(r)
}
//根据serverID获取有效线路
wsline := pws.AssignLine(serverID)
if wsline == nil {
log.Info("Session(%s -> null, TLS: %v serverID:%v) Failed", wsaddr, pws.EnableTls, serverID)
http.NotFound(w, r)
return
}
// http升级至websocket
wsConn, err := DefaultUpgrader.Upgrade(w, r, whead)
wsConn.SetCloseHandler(func(closeCode int, text string) error {
wsConn.Close()
return errors.New(" the server stops processing! ")
})
line := wsline
ConnMgr.UpdateInNum(1)
defer ConnMgr.UpdateInNum(-1)
//服务端根据域名获取IP
addrs, _ := net.LookupHost(line.Remote)
if 0 < len(addrs) {
line.Remote = addrs[0]
}
//检测IP是否可用
if _, err = net.ResolveTCPAddr("tcp", line.Remote); err != nil {
log.Info("Session(%s -> %s, TLS: %v) ResolveTCPAddr Err: %s", wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
line.UpdateDelay(UnreachableTime)
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
log.Info("ServerID: %v name: %v Remote: %v", line.LineID, pws.name, line.Remote)
var (
clientRecv int64 = 0
clientSend int64 = 0
serverRecv int64 = 0
serverSend int64 = 0
)
// 服务端 --> 客户端
s2c := func() {
defer util.HandlePanic()
defer func() {
wsConn.Close()
if serverConn != nil {
serverConn.Close()
}
}()
//var headlen = HEAD_LEN
var nread int
var err error
var buf = make([]byte, pws.RecvBufLen)
for {
if err = serverConn.SetReadDeadline(time.Now().Add(pws.RecvBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
nread, buf, err = serverConn.ReadMessage()
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server Read Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = serverConn.SetReadDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
nread = len(buf)
serverRecv += int64(nread)
ConnMgr.UpdateServerInSize(int64(nread))
if err = wsConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = wsConn.WriteMessage(websocket.BinaryMessage, buf)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server WriteMessage Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = wsConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Server SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
serverSend += int64(nread)
ConnMgr.UpdateServerOutSize(int64(nread))
log.Info("server:[%v] send-->>> <%v> MsgLen::%v", line.Remote, wsaddr, nread)
}
}
// 客户端 --> 服务端
c2s := func() {
defer func() {
wsConn.Close()
if serverConn != nil {
serverConn.Close()
}
}()
var nwrite int
var err error
var message []byte
for {
if err = wsConn.SetReadDeadline(time.Now().Add(pws.RecvBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
_, message, err = wsConn.ReadMessage()
if err != nil {
wsConn.Close()
log.Info("Session(%s -> %s, TLS: %v) Closed, Client ReadMessage Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
if err = wsConn.SetReadDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
// 建立连接
if serverConn == nil {
// 校验第一个数据包是否有效
t1 := time.Now()
dialer := &websocket.Dialer{}
dialer.TLSClientConfig = &tls.Config{}
addr := "ws://" + line.Remote
if pws.EnableTls {
dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
addr = "wss://" + line.Remote
}
serverConn, _, err = dialer.Dial(addr, nil)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) DialTCP Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
wsConn.Close()
//线路延迟
line.UpdateDelay(UnreachableTime)
//统计连接失败数
line.UpdateFailedNum(1)
ConnMgr.UpdateFailedNum(1)
return
}
line.UpdateDelay(time.Since(t1))
//统计负载量
line.UpdateLoad(1)
defer line.UpdateLoad(-1)
//统计当前连接数
ConnMgr.UpdateOutNum(1)
defer ConnMgr.UpdateOutNum(-1)
//统计连接成功数
ConnMgr.UpdateSuccessNum(1)
log.Info("Session(%s -> %s, TLS: %v) Established", wsaddr, line.Remote, pws.EnableTls)
//传真实IP
if err = line.HandleRedirectWeb(serverConn, wsaddr); err != nil {
log.Info("Session(%s -> %s) HandleRedirect Failed: %s", wsaddr, line.Remote, err.Error())
return
}
if err = serverConn.SetWriteDeadline(time.Time{}); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetReadDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
util.Go(s2c)
}
nwrite = len(message)
clientRecv += int64(nwrite)
ConnMgr.UpdateClientInSize(int64(nwrite))
if err = serverConn.SetWriteDeadline(time.Now().Add(pws.SendBlockTime)); err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client SetWriteDeadline Err: %s",
wsaddr, line.Remote, pws.EnableTls, err.Error())
break
}
err = serverConn.WriteMessage(websocket.BinaryMessage, message)
if err != nil {
log.Info("Session(%s -> %s, TLS: %v) Closed, Client | w(w h | identifier_name |
|
MAIN PROGRAMME.py | , text='Enter ticker (e.g. "AAPL", "MSFT", "TSLA"):').grid(row=0, column=0, sticky=W)
self.ticker = StringVar()
Entry(self.root, width=6, textvariable=self.ticker).grid(row=0, column=1, sticky=W)
Button(self.root, text='get info', command=self.get_info).grid(row=0, column=2, sticky=W)
self.chart_type = 'yearly'
def get_info(self):
''' gets the ball rolling after a ticker was put in '''
if not self.ticker.get():
return
self.get_chart()
self.ask_for_specific_chart()
self.display_yahoo()
self.filings()
self.articles()
#chart:
def | (self): #, chart_type='yearly'):
''' gets the chart: currently: daily chart from bigcharts
to do: specify which chart we want '''
ticker = str(self.ticker.get()).upper()
urls = {'yearly': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=&show=',
'1 Month': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=1&show=&time=4',
'5 Days': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=7&show=&time=3',
'Intraday': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=9&show=&time=1'}
url = urls[self.chart_type]
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
try:
image = soup.find('td', {'class' : 'padded vatop'}).contents[1]['src']
image2 = urlretrieve(image, 'image.gif')
self.photo = PhotoImage(file='image.gif')
self.label = Label(self.root, image=self.photo)
self.label.grid(row=2, column=0, sticky=W)
except:
self.label = Label(self.root, text='No chart available\nCheck for typos!')
self.label.grid(row=2, column=0)
def ask_for_specific_chart(self):
''' puts buttons that if clicked will updated the chart to yearly, 1 month,
5 days or intrady '''
self.chart_time = Listbox(self.root)
self.chart_time.insert(0, 'yearly (default)')
self.chart_time.insert(0, '1 Month')
self.chart_time.insert(0, '5 Days')
self.chart_time.insert(0, 'Intraday')
self.chart_time.grid(row=2, column=1, sticky=W)
Button(self.root, text='update chart', command=self.update_chart).grid(row=2, column=1, sticky=SW)
def update_chart(self):
''' takes a chart time and then refreshes the chart '''
curser = self.chart_time.curselection()[-1]
types = {0: 'Intraday', 1: '5 Days', 2: '1 Month', 3: 'yearly'}
self.chart_type = types[curser]
self.get_chart()
#yahoo:
def int_to_millions(self, number):
''' takes an int and turns it into a millions format '''
try:
if int(number) > 100000:
n = ('{:.1f}M'.format(int(number)/1000000))
else:
n = ('{:.1f}K'.format(int(number)/1000))
except ValueError:
n = 'N/A'
return n
def two_decimals(self, number):
''' takes a float and formats it to only two decimals '''
return ('{:.2f}'.format(float(number)))
def display_yahoo(self):
''' opens the yahoo information, also closes the old information if
there is any '''
self.get_yahoo()
if self.count > 0:
self.label_1.grid_forget()
self.label_2.grid_forget()
self.label_3.grid_forget()
self.label_4.grid_forget()
self.label_6.grid_forget()
self.label_7.grid_forget()
self.count += 1
self.label_1 = Label(self.root, text=self.ticker_name)
self.label_1.grid(row=1 , column=0, sticky = W)
self.label_2 =Label(self.root, text=self.change)
self.label_2.grid(row=3, column=0, sticky = W)
self.label_3 = Label(self.root, text=self.volume)
self.label_3.grid(row=4, column=0, sticky = W)
self.label_4 = Label(self.root, text=self.high_52)
self.label_4.grid(row=5, column=0, sticky = W)
self.label_6 = Label(self.root, text=self.low_52)
self.label_6.grid(row=6, column=0, sticky = W)
self.label_7 = Label(self.root, text=self.float)
self.label_7.grid(row=7, column=0, sticky = W)
def get_yahoo(self):
''' gets the ticker data from the API '''
ticker = str(self.ticker.get()).upper()
url = 'http://finance.yahoo.com/d/quotes.csv?s=' + ticker +'&f=nf6cvkj'
req = urlopen(url)
f = codecs.getreader('utf8')(req)
for row in csv.reader(f):
self.ticker_name = row[0]
self.float = str('Public Float: ') + self.int_to_millions(row[1])
change = row[2].split()
self.change = '$ Change: ' + change[0] + '\n% Change: ' + change[2]
self.volume = 'Volume: ' + self.int_to_millions(row[3])
self.high_52 = '52 Week High: ' + self.two_decimals(row[4])
self.low_52 = '52 Week Low: ' + self.two_decimals(row[5])
# SEC:
def filings(self):
''' prints the links for latest 10-Q and 10-K filings '''
ticker = str(self.ticker.get()).upper()
self.count2 = 0
if self.count2 > 0:
self.link1.grid_forget()
self.link2.grid_forget()
self.count2 += 1
if sec.get_10Q(self, ticker) == '':
self.link1 = Label(self.root, text='Quarterly report not available', fg='black')
self.link1.grid(row=3, column=1, sticky = W)
else:
self.link1 = Label(self.root, text='Most recent quarterly report', fg='blue', cursor='hand2')
self.link1.grid(row=3, column=1, sticky = W)
self.link1.bind('<Button-1>', self.callback1)
if sec.get_10K(self, ticker) == '':
self.link2 = Label(self.root, text='Yearly report not available\nNote: Yearly/Quarterly reports not available for foreign companies', fg='black')
self.link2.grid(row=4, column=1, sticky = W)
else:
self.link2 = Label(self.root, text='Most recent yearly report', fg='blue', cursor='hand2')
self.link2.grid(row=4, column=1, sticky = W)
self.link2.bind('<Button-1>', self.callback2)
def callback1(self, x):
''' creates hyperlink for 10-Q '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10Q(self, ticker))
def callback2(self, x):
''' creates hyperlink for 10-K '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10K(self, ticker))
# News Articles:
def articles(self):
''' prints the links for latest articles '''
ticker = str(self.ticker.get()).upper()
self.news = ns.get_yahoo_news(self, ticker)
self.press = ns.get_yahoo_press_releases(self, ticker)
if self.count > 1:
try:
self.news1.grid_forget()
self.news2.grid_forget()
self.news3.grid_forget()
except AttributeError:
pass
try:
self.error1.grid_forget()
except AttributeError:
pass
try:
self.press1.grid_forget()
self.press2.grid_forget()
self.press3.grid_forget()
except AttributeError:
pass
try:
self.error2.grid_forget()
except AttributeError:
pass
#news
news_headline = Label(self.root, text='Recent news articles')
news_headline.grid(row=6, column=1, sticky = NW)
try:
self.news1 = Label(self.root, text=self.news[0][0], fg='blue', cursor='hand2')
self.news1.grid(row= | get_chart | identifier_name |
MAIN PROGRAMME.py | )
def update_chart(self):
''' takes a chart time and then refreshes the chart '''
curser = self.chart_time.curselection()[-1]
types = {0: 'Intraday', 1: '5 Days', 2: '1 Month', 3: 'yearly'}
self.chart_type = types[curser]
self.get_chart()
#yahoo:
def int_to_millions(self, number):
''' takes an int and turns it into a millions format '''
try:
if int(number) > 100000:
n = ('{:.1f}M'.format(int(number)/1000000))
else:
n = ('{:.1f}K'.format(int(number)/1000))
except ValueError:
n = 'N/A'
return n
def two_decimals(self, number):
''' takes a float and formats it to only two decimals '''
return ('{:.2f}'.format(float(number)))
def display_yahoo(self):
''' opens the yahoo information, also closes the old information if
there is any '''
self.get_yahoo()
if self.count > 0:
self.label_1.grid_forget()
self.label_2.grid_forget()
self.label_3.grid_forget()
self.label_4.grid_forget()
self.label_6.grid_forget()
self.label_7.grid_forget()
self.count += 1
self.label_1 = Label(self.root, text=self.ticker_name)
self.label_1.grid(row=1 , column=0, sticky = W)
self.label_2 =Label(self.root, text=self.change)
self.label_2.grid(row=3, column=0, sticky = W)
self.label_3 = Label(self.root, text=self.volume)
self.label_3.grid(row=4, column=0, sticky = W)
self.label_4 = Label(self.root, text=self.high_52)
self.label_4.grid(row=5, column=0, sticky = W)
self.label_6 = Label(self.root, text=self.low_52)
self.label_6.grid(row=6, column=0, sticky = W)
self.label_7 = Label(self.root, text=self.float)
self.label_7.grid(row=7, column=0, sticky = W)
def get_yahoo(self):
''' gets the ticker data from the API '''
ticker = str(self.ticker.get()).upper()
url = 'http://finance.yahoo.com/d/quotes.csv?s=' + ticker +'&f=nf6cvkj'
req = urlopen(url)
f = codecs.getreader('utf8')(req)
for row in csv.reader(f):
self.ticker_name = row[0]
self.float = str('Public Float: ') + self.int_to_millions(row[1])
change = row[2].split()
self.change = '$ Change: ' + change[0] + '\n% Change: ' + change[2]
self.volume = 'Volume: ' + self.int_to_millions(row[3])
self.high_52 = '52 Week High: ' + self.two_decimals(row[4])
self.low_52 = '52 Week Low: ' + self.two_decimals(row[5])
# SEC:
def filings(self):
''' prints the links for latest 10-Q and 10-K filings '''
ticker = str(self.ticker.get()).upper()
self.count2 = 0
if self.count2 > 0:
self.link1.grid_forget()
self.link2.grid_forget()
self.count2 += 1
if sec.get_10Q(self, ticker) == '':
self.link1 = Label(self.root, text='Quarterly report not available', fg='black')
self.link1.grid(row=3, column=1, sticky = W)
else:
self.link1 = Label(self.root, text='Most recent quarterly report', fg='blue', cursor='hand2')
self.link1.grid(row=3, column=1, sticky = W)
self.link1.bind('<Button-1>', self.callback1)
if sec.get_10K(self, ticker) == '':
self.link2 = Label(self.root, text='Yearly report not available\nNote: Yearly/Quarterly reports not available for foreign companies', fg='black')
self.link2.grid(row=4, column=1, sticky = W)
else:
self.link2 = Label(self.root, text='Most recent yearly report', fg='blue', cursor='hand2')
self.link2.grid(row=4, column=1, sticky = W)
self.link2.bind('<Button-1>', self.callback2)
def callback1(self, x):
''' creates hyperlink for 10-Q '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10Q(self, ticker))
def callback2(self, x):
''' creates hyperlink for 10-K '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10K(self, ticker))
# News Articles:
def articles(self):
''' prints the links for latest articles '''
ticker = str(self.ticker.get()).upper()
self.news = ns.get_yahoo_news(self, ticker)
self.press = ns.get_yahoo_press_releases(self, ticker)
if self.count > 1:
try:
self.news1.grid_forget()
self.news2.grid_forget()
self.news3.grid_forget()
except AttributeError:
pass
try:
self.error1.grid_forget()
except AttributeError:
pass
try:
self.press1.grid_forget()
self.press2.grid_forget()
self.press3.grid_forget()
except AttributeError:
pass
try:
self.error2.grid_forget()
except AttributeError:
pass
#news
news_headline = Label(self.root, text='Recent news articles')
news_headline.grid(row=6, column=1, sticky = NW)
try:
self.news1 = Label(self.root, text=self.news[0][0], fg='blue', cursor='hand2')
self.news1.grid(row=7, column=1, sticky = NW)
self.news1.bind('<Button-1>', self.callback3)
self.news2 = Label(self.root, text=self.news[1][0], fg='blue', cursor='hand2')
self.news2.grid(row=8, column=1, sticky = NW)
self.news2.bind('<Button-1>', self.callback3_1)
self.news3 = Label(self.root, text=self.news[2][0], fg='blue', cursor='hand2')
self.news3.grid(row=9, column=1, sticky = NW)
self.news3.bind('<Button-1>', self.callback3_2)
except:
self.error1 = Label(self.root, text='News not available :(', fg='black')
self.error1.grid(row=7, column=1, sticky = NW)
try:
if self.news1:
try:
self.error1.grid_forget()
except AttributeError:
pass
except:
pass
# press releases
press_headline = Label(self.root, text='\nRecent press releases')
press_headline.grid(row=10, column=1, sticky = NW)
try:
self.press1 = Label(self.root, text=self.press[0][0], fg='blue', cursor='hand2')
self.press1.grid(row=11, column=1, sticky = NW)
self.press1.bind('<Button-1>', self.callback4)
self.press2 = Label(self.root, text=self.press[1][0], fg='blue', cursor='hand2')
self.press2.grid(row=12, column=1, sticky = NW)
self.press2.bind('<Button-1>', self.callback4_1)
self.press3 = Label(self.root, text=self.press[2][0], fg='blue', cursor='hand2')
self.press3.grid(row=13, column=1, sticky = NW)
self.press3.bind('<Button-1>', self.callback4_2)
except:
self.error1 = Label(self.root, text='Press releases not available :(', fg='black')
self.error1.grid(row=11, column=1, sticky = NW)
try:
if self.press1:
try:
self.error2.grid_forget()
except AttributeError:
pass
except:
pass
def callback3(self, x):
''' creates hyperlink for news'''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(self.news[0][1])
def callback3_1(self, x):
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(self.news[1][1])
def callback3_2(self, x):
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(self.news[2][1])
def callback4(self, x):
| ''' creates hyperlink for press releases '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(self.press[0][1]) | identifier_body |
|
MAIN PROGRAMME.py | , text='Enter ticker (e.g. "AAPL", "MSFT", "TSLA"):').grid(row=0, column=0, sticky=W)
self.ticker = StringVar()
Entry(self.root, width=6, textvariable=self.ticker).grid(row=0, column=1, sticky=W)
Button(self.root, text='get info', command=self.get_info).grid(row=0, column=2, sticky=W)
self.chart_type = 'yearly'
def get_info(self):
''' gets the ball rolling after a ticker was put in '''
if not self.ticker.get():
return
self.get_chart()
self.ask_for_specific_chart()
self.display_yahoo()
self.filings()
self.articles()
#chart:
def get_chart(self): #, chart_type='yearly'):
''' gets the chart: currently: daily chart from bigcharts
to do: specify which chart we want '''
ticker = str(self.ticker.get()).upper()
urls = {'yearly': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=&show=',
'1 Month': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=1&show=&time=4',
'5 Days': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=7&show=&time=3',
'Intraday': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=9&show=&time=1'}
url = urls[self.chart_type]
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
try:
image = soup.find('td', {'class' : 'padded vatop'}).contents[1]['src']
image2 = urlretrieve(image, 'image.gif')
self.photo = PhotoImage(file='image.gif')
self.label = Label(self.root, image=self.photo)
self.label.grid(row=2, column=0, sticky=W)
except:
self.label = Label(self.root, text='No chart available\nCheck for typos!')
self.label.grid(row=2, column=0)
def ask_for_specific_chart(self):
''' puts buttons that if clicked will updated the chart to yearly, 1 month,
5 days or intrady '''
self.chart_time = Listbox(self.root)
self.chart_time.insert(0, 'yearly (default)')
self.chart_time.insert(0, '1 Month')
self.chart_time.insert(0, '5 Days')
self.chart_time.insert(0, 'Intraday')
self.chart_time.grid(row=2, column=1, sticky=W)
Button(self.root, text='update chart', command=self.update_chart).grid(row=2, column=1, sticky=SW)
def update_chart(self):
''' takes a chart time and then refreshes the chart '''
curser = self.chart_time.curselection()[-1]
types = {0: 'Intraday', 1: '5 Days', 2: '1 Month', 3: 'yearly'}
self.chart_type = types[curser]
self.get_chart()
#yahoo:
def int_to_millions(self, number):
''' takes an int and turns it into a millions format '''
try:
if int(number) > 100000:
|
else:
n = ('{:.1f}K'.format(int(number)/1000))
except ValueError:
n = 'N/A'
return n
def two_decimals(self, number):
''' takes a float and formats it to only two decimals '''
return ('{:.2f}'.format(float(number)))
def display_yahoo(self):
''' opens the yahoo information, also closes the old information if
there is any '''
self.get_yahoo()
if self.count > 0:
self.label_1.grid_forget()
self.label_2.grid_forget()
self.label_3.grid_forget()
self.label_4.grid_forget()
self.label_6.grid_forget()
self.label_7.grid_forget()
self.count += 1
self.label_1 = Label(self.root, text=self.ticker_name)
self.label_1.grid(row=1 , column=0, sticky = W)
self.label_2 =Label(self.root, text=self.change)
self.label_2.grid(row=3, column=0, sticky = W)
self.label_3 = Label(self.root, text=self.volume)
self.label_3.grid(row=4, column=0, sticky = W)
self.label_4 = Label(self.root, text=self.high_52)
self.label_4.grid(row=5, column=0, sticky = W)
self.label_6 = Label(self.root, text=self.low_52)
self.label_6.grid(row=6, column=0, sticky = W)
self.label_7 = Label(self.root, text=self.float)
self.label_7.grid(row=7, column=0, sticky = W)
def get_yahoo(self):
''' gets the ticker data from the API '''
ticker = str(self.ticker.get()).upper()
url = 'http://finance.yahoo.com/d/quotes.csv?s=' + ticker +'&f=nf6cvkj'
req = urlopen(url)
f = codecs.getreader('utf8')(req)
for row in csv.reader(f):
self.ticker_name = row[0]
self.float = str('Public Float: ') + self.int_to_millions(row[1])
change = row[2].split()
self.change = '$ Change: ' + change[0] + '\n% Change: ' + change[2]
self.volume = 'Volume: ' + self.int_to_millions(row[3])
self.high_52 = '52 Week High: ' + self.two_decimals(row[4])
self.low_52 = '52 Week Low: ' + self.two_decimals(row[5])
# SEC:
def filings(self):
''' prints the links for latest 10-Q and 10-K filings '''
ticker = str(self.ticker.get()).upper()
self.count2 = 0
if self.count2 > 0:
self.link1.grid_forget()
self.link2.grid_forget()
self.count2 += 1
if sec.get_10Q(self, ticker) == '':
self.link1 = Label(self.root, text='Quarterly report not available', fg='black')
self.link1.grid(row=3, column=1, sticky = W)
else:
self.link1 = Label(self.root, text='Most recent quarterly report', fg='blue', cursor='hand2')
self.link1.grid(row=3, column=1, sticky = W)
self.link1.bind('<Button-1>', self.callback1)
if sec.get_10K(self, ticker) == '':
self.link2 = Label(self.root, text='Yearly report not available\nNote: Yearly/Quarterly reports not available for foreign companies', fg='black')
self.link2.grid(row=4, column=1, sticky = W)
else:
self.link2 = Label(self.root, text='Most recent yearly report', fg='blue', cursor='hand2')
self.link2.grid(row=4, column=1, sticky = W)
self.link2.bind('<Button-1>', self.callback2)
def callback1(self, x):
''' creates hyperlink for 10-Q '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10Q(self, ticker))
def callback2(self, x):
''' creates hyperlink for 10-K '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10K(self, ticker))
# News Articles:
def articles(self):
''' prints the links for latest articles '''
ticker = str(self.ticker.get()).upper()
self.news = ns.get_yahoo_news(self, ticker)
self.press = ns.get_yahoo_press_releases(self, ticker)
if self.count > 1:
try:
self.news1.grid_forget()
self.news2.grid_forget()
self.news3.grid_forget()
except AttributeError:
pass
try:
self.error1.grid_forget()
except AttributeError:
pass
try:
self.press1.grid_forget()
self.press2.grid_forget()
self.press3.grid_forget()
except AttributeError:
pass
try:
self.error2.grid_forget()
except AttributeError:
pass
#news
news_headline = Label(self.root, text='Recent news articles')
news_headline.grid(row=6, column=1, sticky = NW)
try:
self.news1 = Label(self.root, text=self.news[0][0], fg='blue', cursor='hand2')
self.news1.grid(row= | n = ('{:.1f}M'.format(int(number)/1000000)) | conditional_block |
MAIN PROGRAMME.py | , text='Enter ticker (e.g. "AAPL", "MSFT", "TSLA"):').grid(row=0, column=0, sticky=W)
self.ticker = StringVar()
Entry(self.root, width=6, textvariable=self.ticker).grid(row=0, column=1, sticky=W)
Button(self.root, text='get info', command=self.get_info).grid(row=0, column=2, sticky=W)
self.chart_type = 'yearly'
def get_info(self):
''' gets the ball rolling after a ticker was put in '''
if not self.ticker.get():
return
self.get_chart()
self.ask_for_specific_chart()
self.display_yahoo()
self.filings()
self.articles()
#chart:
def get_chart(self): #, chart_type='yearly'):
''' gets the chart: currently: daily chart from bigcharts
to do: specify which chart we want '''
ticker = str(self.ticker.get()).upper()
urls = {'yearly': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=&show=',
'1 Month': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=1&show=&time=4',
'5 Days': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=7&show=&time=3',
'Intraday': 'http://bigcharts.marketwatch.com/quickchart/quickchart.asp?symb=' + ticker + '&insttype=&freq=9&show=&time=1'}
url = urls[self.chart_type]
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
try:
image = soup.find('td', {'class' : 'padded vatop'}).contents[1]['src']
image2 = urlretrieve(image, 'image.gif')
self.photo = PhotoImage(file='image.gif')
self.label = Label(self.root, image=self.photo)
self.label.grid(row=2, column=0, sticky=W)
except:
self.label = Label(self.root, text='No chart available\nCheck for typos!')
self.label.grid(row=2, column=0)
def ask_for_specific_chart(self):
''' puts buttons that if clicked will updated the chart to yearly, 1 month,
5 days or intrady '''
self.chart_time = Listbox(self.root)
self.chart_time.insert(0, 'yearly (default)')
self.chart_time.insert(0, '1 Month')
self.chart_time.insert(0, '5 Days')
self.chart_time.insert(0, 'Intraday')
self.chart_time.grid(row=2, column=1, sticky=W)
Button(self.root, text='update chart', command=self.update_chart).grid(row=2, column=1, sticky=SW)
def update_chart(self):
''' takes a chart time and then refreshes the chart '''
curser = self.chart_time.curselection()[-1]
types = {0: 'Intraday', 1: '5 Days', 2: '1 Month', 3: 'yearly'}
self.chart_type = types[curser]
self.get_chart()
#yahoo:
def int_to_millions(self, number):
''' takes an int and turns it into a millions format '''
try:
if int(number) > 100000:
n = ('{:.1f}M'.format(int(number)/1000000))
else:
n = ('{:.1f}K'.format(int(number)/1000))
except ValueError:
n = 'N/A'
return n
def two_decimals(self, number):
''' takes a float and formats it to only two decimals '''
return ('{:.2f}'.format(float(number)))
def display_yahoo(self):
''' opens the yahoo information, also closes the old information if
there is any '''
self.get_yahoo()
if self.count > 0:
self.label_1.grid_forget()
self.label_2.grid_forget()
self.label_3.grid_forget()
self.label_4.grid_forget()
self.label_6.grid_forget()
self.label_7.grid_forget()
self.count += 1
self.label_1 = Label(self.root, text=self.ticker_name)
self.label_1.grid(row=1 , column=0, sticky = W) | self.label_2.grid(row=3, column=0, sticky = W)
self.label_3 = Label(self.root, text=self.volume)
self.label_3.grid(row=4, column=0, sticky = W)
self.label_4 = Label(self.root, text=self.high_52)
self.label_4.grid(row=5, column=0, sticky = W)
self.label_6 = Label(self.root, text=self.low_52)
self.label_6.grid(row=6, column=0, sticky = W)
self.label_7 = Label(self.root, text=self.float)
self.label_7.grid(row=7, column=0, sticky = W)
def get_yahoo(self):
''' gets the ticker data from the API '''
ticker = str(self.ticker.get()).upper()
url = 'http://finance.yahoo.com/d/quotes.csv?s=' + ticker +'&f=nf6cvkj'
req = urlopen(url)
f = codecs.getreader('utf8')(req)
for row in csv.reader(f):
self.ticker_name = row[0]
self.float = str('Public Float: ') + self.int_to_millions(row[1])
change = row[2].split()
self.change = '$ Change: ' + change[0] + '\n% Change: ' + change[2]
self.volume = 'Volume: ' + self.int_to_millions(row[3])
self.high_52 = '52 Week High: ' + self.two_decimals(row[4])
self.low_52 = '52 Week Low: ' + self.two_decimals(row[5])
# SEC:
def filings(self):
''' prints the links for latest 10-Q and 10-K filings '''
ticker = str(self.ticker.get()).upper()
self.count2 = 0
if self.count2 > 0:
self.link1.grid_forget()
self.link2.grid_forget()
self.count2 += 1
if sec.get_10Q(self, ticker) == '':
self.link1 = Label(self.root, text='Quarterly report not available', fg='black')
self.link1.grid(row=3, column=1, sticky = W)
else:
self.link1 = Label(self.root, text='Most recent quarterly report', fg='blue', cursor='hand2')
self.link1.grid(row=3, column=1, sticky = W)
self.link1.bind('<Button-1>', self.callback1)
if sec.get_10K(self, ticker) == '':
self.link2 = Label(self.root, text='Yearly report not available\nNote: Yearly/Quarterly reports not available for foreign companies', fg='black')
self.link2.grid(row=4, column=1, sticky = W)
else:
self.link2 = Label(self.root, text='Most recent yearly report', fg='blue', cursor='hand2')
self.link2.grid(row=4, column=1, sticky = W)
self.link2.bind('<Button-1>', self.callback2)
def callback1(self, x):
''' creates hyperlink for 10-Q '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10Q(self, ticker))
def callback2(self, x):
''' creates hyperlink for 10-K '''
ticker = str(self.ticker.get()).upper()
webbrowser.open_new(sec.get_10K(self, ticker))
# News Articles:
def articles(self):
''' prints the links for latest articles '''
ticker = str(self.ticker.get()).upper()
self.news = ns.get_yahoo_news(self, ticker)
self.press = ns.get_yahoo_press_releases(self, ticker)
if self.count > 1:
try:
self.news1.grid_forget()
self.news2.grid_forget()
self.news3.grid_forget()
except AttributeError:
pass
try:
self.error1.grid_forget()
except AttributeError:
pass
try:
self.press1.grid_forget()
self.press2.grid_forget()
self.press3.grid_forget()
except AttributeError:
pass
try:
self.error2.grid_forget()
except AttributeError:
pass
#news
news_headline = Label(self.root, text='Recent news articles')
news_headline.grid(row=6, column=1, sticky = NW)
try:
self.news1 = Label(self.root, text=self.news[0][0], fg='blue', cursor='hand2')
self.news1.grid(row=7 | self.label_2 =Label(self.root, text=self.change) | random_line_split |
types.go | .Ref) bool
// MonitorProcess creates monitor between the processes.
// Allowed types for the 'process' value: etf.Pid, gen.ProcessID
// When a process monitor is triggered, a MessageDown sends to the caller.
// Note: The monitor request is an asynchronous signal. That is, it takes
// time before the signal reaches its destination.
MonitorProcess(process interface{}) etf.Ref
// DemonitorProcess removes monitor. Returns false if the given reference wasn't found
DemonitorProcess(ref etf.Ref) bool
// Behavior returns the object this process runs on.
Behavior() ProcessBehavior
// GroupLeader returns group leader process. Usually it points to the application process.
GroupLeader() Process
// Parent returns parent process. It returns nil if this process was spawned using Node.Spawn.
Parent() Process
// Context returns process context.
Context() context.Context
// Children returns list of children pid (Application, Supervisor)
Children() ([]etf.Pid, error)
// Links returns list of the process pids this process has linked to.
Links() []etf.Pid
// Monitors returns list of monitors created this process by pid.
Monitors() []etf.Pid
// Monitors returns list of monitors created this process by name.
MonitorsByName() []ProcessID
// MonitoredBy returns list of process pids monitored this process.
MonitoredBy() []etf.Pid
// Aliases returns list of aliases of this process.
Aliases() []etf.Alias
// RegisterEvent
RegisterEvent(event Event, messages ...EventMessage) error
UnregisterEvent(event Event) error
MonitorEvent(event Event) error
DemonitorEvent(event Event) error
SendEventMessage(event Event, message EventMessage) error
PutSyncRequest(ref etf.Ref) error
CancelSyncRequest(ref etf.Ref)
WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error)
PutSyncReply(ref etf.Ref, term etf.Term, err error) error
ProcessChannels() ProcessChannels
}
// ProcessInfo struct with process details
type ProcessInfo struct {
PID etf.Pid
Name string
CurrentFunction string
Status string
MessageQueueLen int
Links []etf.Pid
Monitors []etf.Pid
MonitorsByName []ProcessID
MonitoredBy []etf.Pid
Aliases []etf.Alias
Dictionary etf.Map
TrapExit bool
GroupLeader etf.Pid
Compression bool
}
// ProcessOptions
type ProcessOptions struct {
// Context allows mixing the system context with the custom one. E.g., to limit
// the lifespan using context.WithTimeout. This context MUST be based on the
// other Process' context. Otherwise, you get the error lib.ErrProcessContext
Context context.Context
// MailboxSize defines the length of message queue for the process
MailboxSize uint16
// DirectboxSize defines the length of message queue for the direct requests
DirectboxSize uint16
// GroupLeader
GroupLeader Process
// Env set the process environment variables
Env map[EnvKey]interface{}
// Fallback defines the process to where messages will be forwarded
// if the mailbox is overflowed. The tag value could be used to
// differentiate the source processes. Forwarded messages are wrapped
// into the MessageFallback struct.
Fallback ProcessFallback
}
// ProcessFallback
type ProcessFallback struct {
Name string
Tag string
}
// RemoteSpawnRequest
type RemoteSpawnRequest struct {
From etf.Pid
Ref etf.Ref
Options RemoteSpawnOptions
}
// RemoteSpawnOptions defines options for RemoteSpawn method
type RemoteSpawnOptions struct {
// Name register associated name with spawned process
Name string
// Monitor enables monitor on the spawned process using provided reference
Monitor etf.Ref
// Link enables link between the calling and spawned processes
Link bool
// Function in order to support {M,F,A} request to the Erlang node
Function string
}
// ProcessChannels
type ProcessChannels struct {
Mailbox <-chan ProcessMailboxMessage
Direct <-chan ProcessDirectMessage
GracefulExit <-chan ProcessGracefulExitRequest
}
// ProcessMailboxMessage
type ProcessMailboxMessage struct {
From etf.Pid
Message interface{}
}
// ProcessDirectMessage
type ProcessDirectMessage struct {
Ref etf.Ref
Message interface{}
Err error
}
// ProcessGracefulExitRequest
type ProcessGracefulExitRequest struct {
From etf.Pid
Reason string
}
// ProcessState
type ProcessState struct {
Process
State interface{}
}
// ProcessBehavior interface contains methods you should implement to make your own process behavior
type ProcessBehavior interface {
ProcessInit(Process, ...etf.Term) (ProcessState, error)
ProcessLoop(ProcessState, chan<- bool) string // method which implements control flow of process
}
// Core the common set of methods provided by Process and node.Node interfaces
type Core interface {
// ProcessByName returns Process for the given name.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByName(name string) Process
// ProcessByPid returns Process for the given Pid.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByPid(pid etf.Pid) Process
// ProcessByAlias returns Process for the given alias.
// Returns nil if it doesn't exist (not found) or terminated
ProcessByAlias(alias etf.Alias) Process
// ProcessInfo returns the details about given Pid
ProcessInfo(pid etf.Pid) (ProcessInfo, error)
// ProcessList returns the list of running processes
ProcessList() []Process
// MakeRef creates an unique reference within this node
MakeRef() etf.Ref
// IsAlias checks whether the given alias is belongs to the alive process on this node.
// If the process died all aliases are cleaned up and this function returns
// false for the given alias. For alias from the remote node always returns false.
IsAlias(etf.Alias) bool
// IsMonitor returns true if the given references is a monitor
IsMonitor(ref etf.Ref) bool
// RegisterBehavior
RegisterBehavior(group, name string, behavior ProcessBehavior, data interface{}) error
// RegisteredBehavior
RegisteredBehavior(group, name string) (RegisteredBehavior, error)
// RegisteredBehaviorGroup
RegisteredBehaviorGroup(group string) []RegisteredBehavior
// UnregisterBehavior
UnregisterBehavior(group, name string) error
}
// RegisteredBehavior
type RegisteredBehavior struct {
Behavior ProcessBehavior
Data interface{}
}
// ProcessID long notation of registered process {process_name, node_name}
type ProcessID struct {
Name string
Node string
}
// String string representaion of ProcessID value
func (p ProcessID) String() string {
return fmt.Sprintf("<%s:%s>", p.Name, p.Node)
}
// MessageDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorProcess.
// Reason values:
// - the exit reason of the process
// - 'noproc' (process did not exist at the time of monitor creation)
// - 'noconnection' (no connection to the node where the monitored process resides)
// - 'noproxy' (no connection to the proxy this node had has a connection through. monitored process could be still alive)
type MessageDown struct {
Ref etf.Ref // a monitor reference
ProcessID ProcessID // if monitor was created by name
Pid etf.Pid
Reason string
}
// MessageNodeDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorNode
type MessageNodeDown struct {
Ref etf.Ref
Name string
}
// MessageProxyDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorNode if the connection to the node was through the proxy
// nodes and one of them went down.
type MessageProxyDown struct {
Ref etf.Ref
Node string
Proxy string
Reason string
}
// MessageExit delievers to Server's HandleInfo callback on enabled trap exit using SetTrapExit(true)
// Reason values:
// - the exit reason of the process
// - 'noproc' (process did not exist at the time of link creation)
// - 'noconnection' (no connection to the node where the linked process resides)
// - 'noproxy' (no connection to the proxy this node had has a connection through. linked process could be still alive)
type MessageExit struct {
Pid etf.Pid
Reason string
}
// MessageFallback delivers to the process specified as a fallback process in ProcessOptions.Fallback.Name if the mailbox has been overflowed
type MessageFallback struct {
Process etf.Pid
Tag string
Message etf.Term
}
// MessageDirectChildren type intended to be used in Process.Children which returns []etf.Pid
// You can handle this type of message in your HandleDirect callback to enable Process.Children
// support for your gen.Server actor.
type MessageDirectChildren struct{}
// IsMessageDown
func | IsMessageDown | identifier_name |
|
types.go | a MessageDown sends to the caller.
// Note: The monitor request is an asynchronous signal. That is, it takes
// time before the signal reaches its destination.
MonitorProcess(process interface{}) etf.Ref
// DemonitorProcess removes monitor. Returns false if the given reference wasn't found
DemonitorProcess(ref etf.Ref) bool
// Behavior returns the object this process runs on.
Behavior() ProcessBehavior
// GroupLeader returns group leader process. Usually it points to the application process.
GroupLeader() Process
// Parent returns parent process. It returns nil if this process was spawned using Node.Spawn.
Parent() Process
// Context returns process context.
Context() context.Context
// Children returns list of children pid (Application, Supervisor)
Children() ([]etf.Pid, error)
// Links returns list of the process pids this process has linked to.
Links() []etf.Pid
// Monitors returns list of monitors created this process by pid.
Monitors() []etf.Pid
// Monitors returns list of monitors created this process by name.
MonitorsByName() []ProcessID
// MonitoredBy returns list of process pids monitored this process.
MonitoredBy() []etf.Pid
// Aliases returns list of aliases of this process.
Aliases() []etf.Alias
// RegisterEvent
RegisterEvent(event Event, messages ...EventMessage) error
UnregisterEvent(event Event) error
MonitorEvent(event Event) error
DemonitorEvent(event Event) error
SendEventMessage(event Event, message EventMessage) error
PutSyncRequest(ref etf.Ref) error
CancelSyncRequest(ref etf.Ref)
WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error)
PutSyncReply(ref etf.Ref, term etf.Term, err error) error
ProcessChannels() ProcessChannels
}
// ProcessInfo struct with process details
type ProcessInfo struct {
PID etf.Pid
Name string
CurrentFunction string
Status string
MessageQueueLen int
Links []etf.Pid
Monitors []etf.Pid
MonitorsByName []ProcessID
MonitoredBy []etf.Pid
Aliases []etf.Alias
Dictionary etf.Map
TrapExit bool
GroupLeader etf.Pid
Compression bool
}
// ProcessOptions
type ProcessOptions struct {
// Context allows mixing the system context with the custom one. E.g., to limit
// the lifespan using context.WithTimeout. This context MUST be based on the
// other Process' context. Otherwise, you get the error lib.ErrProcessContext
Context context.Context
// MailboxSize defines the length of message queue for the process
MailboxSize uint16
// DirectboxSize defines the length of message queue for the direct requests
DirectboxSize uint16
// GroupLeader
GroupLeader Process
// Env set the process environment variables
Env map[EnvKey]interface{}
// Fallback defines the process to where messages will be forwarded
// if the mailbox is overflowed. The tag value could be used to
// differentiate the source processes. Forwarded messages are wrapped
// into the MessageFallback struct.
Fallback ProcessFallback
}
// ProcessFallback
type ProcessFallback struct {
Name string
Tag string
}
// RemoteSpawnRequest
type RemoteSpawnRequest struct {
From etf.Pid
Ref etf.Ref
Options RemoteSpawnOptions
}
// RemoteSpawnOptions defines options for RemoteSpawn method
type RemoteSpawnOptions struct {
// Name register associated name with spawned process
Name string
// Monitor enables monitor on the spawned process using provided reference
Monitor etf.Ref
// Link enables link between the calling and spawned processes
Link bool
// Function in order to support {M,F,A} request to the Erlang node
Function string
}
// ProcessChannels
type ProcessChannels struct {
Mailbox <-chan ProcessMailboxMessage
Direct <-chan ProcessDirectMessage
GracefulExit <-chan ProcessGracefulExitRequest
}
// ProcessMailboxMessage
type ProcessMailboxMessage struct {
From etf.Pid
Message interface{}
}
// ProcessDirectMessage
type ProcessDirectMessage struct {
Ref etf.Ref
Message interface{}
Err error
}
// ProcessGracefulExitRequest
type ProcessGracefulExitRequest struct {
From etf.Pid
Reason string
}
// ProcessState
type ProcessState struct {
Process
State interface{}
}
// ProcessBehavior interface contains methods you should implement to make your own process behavior
type ProcessBehavior interface {
ProcessInit(Process, ...etf.Term) (ProcessState, error)
ProcessLoop(ProcessState, chan<- bool) string // method which implements control flow of process
}
// Core the common set of methods provided by Process and node.Node interfaces
type Core interface {
// ProcessByName returns Process for the given name.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByName(name string) Process
// ProcessByPid returns Process for the given Pid.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByPid(pid etf.Pid) Process
// ProcessByAlias returns Process for the given alias.
// Returns nil if it doesn't exist (not found) or terminated
ProcessByAlias(alias etf.Alias) Process
// ProcessInfo returns the details about given Pid
ProcessInfo(pid etf.Pid) (ProcessInfo, error)
// ProcessList returns the list of running processes
ProcessList() []Process
// MakeRef creates an unique reference within this node
MakeRef() etf.Ref
// IsAlias checks whether the given alias is belongs to the alive process on this node.
// If the process died all aliases are cleaned up and this function returns
// false for the given alias. For alias from the remote node always returns false.
IsAlias(etf.Alias) bool
// IsMonitor returns true if the given references is a monitor
IsMonitor(ref etf.Ref) bool
// RegisterBehavior
RegisterBehavior(group, name string, behavior ProcessBehavior, data interface{}) error
// RegisteredBehavior
RegisteredBehavior(group, name string) (RegisteredBehavior, error)
// RegisteredBehaviorGroup
RegisteredBehaviorGroup(group string) []RegisteredBehavior
// UnregisterBehavior
UnregisterBehavior(group, name string) error
}
// RegisteredBehavior
type RegisteredBehavior struct {
Behavior ProcessBehavior
Data interface{}
}
// ProcessID long notation of registered process {process_name, node_name}
type ProcessID struct {
Name string
Node string
}
// String string representaion of ProcessID value
func (p ProcessID) String() string {
return fmt.Sprintf("<%s:%s>", p.Name, p.Node)
}
// MessageDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorProcess.
// Reason values:
// - the exit reason of the process
// - 'noproc' (process did not exist at the time of monitor creation)
// - 'noconnection' (no connection to the node where the monitored process resides)
// - 'noproxy' (no connection to the proxy this node had has a connection through. monitored process could be still alive)
type MessageDown struct {
Ref etf.Ref // a monitor reference
ProcessID ProcessID // if monitor was created by name
Pid etf.Pid
Reason string
}
// MessageNodeDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorNode
type MessageNodeDown struct {
Ref etf.Ref
Name string
}
// MessageProxyDown delivers as a message to Server's HandleInfo callback of the process
// that created monitor using MonitorNode if the connection to the node was through the proxy
// nodes and one of them went down.
type MessageProxyDown struct {
Ref etf.Ref
Node string
Proxy string
Reason string
}
// MessageExit delievers to Server's HandleInfo callback on enabled trap exit using SetTrapExit(true)
// Reason values:
// - the exit reason of the process
// - 'noproc' (process did not exist at the time of link creation)
// - 'noconnection' (no connection to the node where the linked process resides)
// - 'noproxy' (no connection to the proxy this node had has a connection through. linked process could be still alive)
type MessageExit struct {
Pid etf.Pid
Reason string
}
// MessageFallback delivers to the process specified as a fallback process in ProcessOptions.Fallback.Name if the mailbox has been overflowed
type MessageFallback struct {
Process etf.Pid
Tag string
Message etf.Term
}
// MessageDirectChildren type intended to be used in Process.Children which returns []etf.Pid
// You can handle this type of message in your HandleDirect callback to enable Process.Children
// support for your gen.Server actor.
type MessageDirectChildren struct{}
// IsMessageDown
func IsMessageDown(message etf.Term) (MessageDown, bool) | {
var md MessageDown
switch m := message.(type) {
case MessageDown:
return m, true
}
return md, false
} | identifier_body |
|
types.go | Send(to interface{}, message etf.Term) error
// SendAfter starts a timer. When the timer expires, the message sends to the process
// identified by 'to'. 'to' can be a Pid, registered local name or
// gen.ProcessID{RegisteredName, NodeName}. Returns cancel function in order to discard
// sending a message. CancelFunc returns bool value. If it returns false, than the timer has
// already expired and the message has been sent.
SendAfter(to interface{}, message etf.Term, after time.Duration) CancelFunc
// Exit initiate a graceful stopping process
Exit(reason string) error
// Kill immediately stops process
Kill()
// CreateAlias creates a new alias for the Process
CreateAlias() (etf.Alias, error)
// DeleteAlias deletes the given alias
DeleteAlias(alias etf.Alias) error
// ListEnv returns a map of configured environment variables.
// It also includes environment variables from the GroupLeader, Parent and Node.
// which are overlapped by priority: Process(Parent(GroupLeader(Node)))
ListEnv() map[EnvKey]interface{}
// SetEnv set environment variable with given name. Use nil value to remove variable with given name.
SetEnv(name EnvKey, value interface{})
// Env returns value associated with given environment name.
Env(name EnvKey) interface{}
// Wait waits until process stopped
Wait()
// WaitWithTimeout waits until process stopped. Return ErrTimeout
// if given timeout is exceeded
WaitWithTimeout(d time.Duration) error
// Link creates a link between the calling process and another process.
// Links are bidirectional and there can only be one link between two processes.
// Repeated calls to Process.Link(Pid) have no effect. If one of the participants
// of a link terminates, it will send an exit signal to the other participant and caused
// termination of the last one. If process set a trap using Process.SetTrapExit(true) the exit signal transorms into the MessageExit and delivers as a regular message.
Link(with etf.Pid) error
// Unlink removes the link, if there is one, between the calling process and
// the process referred to by Pid.
Unlink(with etf.Pid) error
// IsAlive returns whether the process is alive
IsAlive() bool
// SetTrapExit enables/disables the trap on terminate process. When a process is trapping exits,
// it will not terminate when an exit signal is received. Instead, the signal is transformed
// into a 'gen.MessageExit' which is put into the mailbox of the process just like a regular message.
SetTrapExit(trap bool)
// TrapExit returns whether the trap was enabled on this process
TrapExit() bool
// Compression returns true if compression is enabled for this process
Compression() bool
// SetCompression enables/disables compression for the messages sent outside of this node
SetCompression(enabled bool)
// CompressionLevel returns comression level for the process
CompressionLevel() int
// SetCompressionLevel defines compression level. Value must be in range:
// 1 (best speed) ... 9 (best compression), or -1 for the default compression level
SetCompressionLevel(level int) bool
// CompressionThreshold returns compression threshold for the process
CompressionThreshold() int
// SetCompressionThreshold defines the minimal size for the message that must be compressed
// Value must be greater than DefaultCompressionThreshold (1024)
SetCompressionThreshold(threshold int) bool
// MonitorNode creates monitor between the current process and node. If Node fails or does not exist,
// the message MessageNodeDown is delivered to the process.
MonitorNode(name string) etf.Ref
// DemonitorNode removes monitor. Returns false if the given reference wasn't found
DemonitorNode(ref etf.Ref) bool
// MonitorProcess creates monitor between the processes.
// Allowed types for the 'process' value: etf.Pid, gen.ProcessID
// When a process monitor is triggered, a MessageDown sends to the caller.
// Note: The monitor request is an asynchronous signal. That is, it takes
// time before the signal reaches its destination.
MonitorProcess(process interface{}) etf.Ref
// DemonitorProcess removes monitor. Returns false if the given reference wasn't found
DemonitorProcess(ref etf.Ref) bool
// Behavior returns the object this process runs on.
Behavior() ProcessBehavior
// GroupLeader returns group leader process. Usually it points to the application process.
GroupLeader() Process
// Parent returns parent process. It returns nil if this process was spawned using Node.Spawn.
Parent() Process
// Context returns process context.
Context() context.Context
// Children returns list of children pid (Application, Supervisor)
Children() ([]etf.Pid, error)
// Links returns list of the process pids this process has linked to.
Links() []etf.Pid
// Monitors returns list of monitors created this process by pid.
Monitors() []etf.Pid
// Monitors returns list of monitors created this process by name.
MonitorsByName() []ProcessID
// MonitoredBy returns list of process pids monitored this process.
MonitoredBy() []etf.Pid
// Aliases returns list of aliases of this process.
Aliases() []etf.Alias
// RegisterEvent
RegisterEvent(event Event, messages ...EventMessage) error
UnregisterEvent(event Event) error
MonitorEvent(event Event) error
DemonitorEvent(event Event) error
SendEventMessage(event Event, message EventMessage) error
PutSyncRequest(ref etf.Ref) error
CancelSyncRequest(ref etf.Ref)
WaitSyncReply(ref etf.Ref, timeout int) (etf.Term, error)
PutSyncReply(ref etf.Ref, term etf.Term, err error) error
ProcessChannels() ProcessChannels
}
// ProcessInfo struct with process details
type ProcessInfo struct {
PID etf.Pid
Name string
CurrentFunction string
Status string
MessageQueueLen int
Links []etf.Pid
Monitors []etf.Pid
MonitorsByName []ProcessID
MonitoredBy []etf.Pid
Aliases []etf.Alias
Dictionary etf.Map
TrapExit bool
GroupLeader etf.Pid
Compression bool
}
// ProcessOptions
type ProcessOptions struct {
// Context allows mixing the system context with the custom one. E.g., to limit
// the lifespan using context.WithTimeout. This context MUST be based on the
// other Process' context. Otherwise, you get the error lib.ErrProcessContext
Context context.Context
// MailboxSize defines the length of message queue for the process
MailboxSize uint16
// DirectboxSize defines the length of message queue for the direct requests
DirectboxSize uint16
// GroupLeader
GroupLeader Process
// Env set the process environment variables
Env map[EnvKey]interface{}
// Fallback defines the process to where messages will be forwarded
// if the mailbox is overflowed. The tag value could be used to
// differentiate the source processes. Forwarded messages are wrapped
// into the MessageFallback struct.
Fallback ProcessFallback
}
// ProcessFallback
type ProcessFallback struct {
Name string
Tag string
}
// RemoteSpawnRequest
type RemoteSpawnRequest struct {
From etf.Pid
Ref etf.Ref
Options RemoteSpawnOptions
}
// RemoteSpawnOptions defines options for RemoteSpawn method
type RemoteSpawnOptions struct {
// Name register associated name with spawned process
Name string
// Monitor enables monitor on the spawned process using provided reference
Monitor etf.Ref
// Link enables link between the calling and spawned processes
Link bool
// Function in order to support {M,F,A} request to the Erlang node
Function string
}
// ProcessChannels
type ProcessChannels struct {
Mailbox <-chan ProcessMailboxMessage
Direct <-chan ProcessDirectMessage
GracefulExit <-chan ProcessGracefulExitRequest
}
| From etf.Pid
Message interface{}
}
// ProcessDirectMessage
type ProcessDirectMessage struct {
Ref etf.Ref
Message interface{}
Err error
}
// ProcessGracefulExitRequest
type ProcessGracefulExitRequest struct {
From etf.Pid
Reason string
}
// ProcessState
type ProcessState struct {
Process
State interface{}
}
// ProcessBehavior interface contains methods you should implement to make your own process behavior
type ProcessBehavior interface {
ProcessInit(Process, ...etf.Term) (ProcessState, error)
ProcessLoop(ProcessState, chan<- bool) string // method which implements control flow of process
}
// Core the common set of methods provided by Process and node.Node interfaces
type Core interface {
// ProcessByName returns Process for the given name.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByName(name string) Process
// ProcessByPid returns Process for the given Pid.
// Returns nil if it doesn't exist (not found) or terminated.
ProcessByPid(pid etf.Pid) Process
// ProcessByAlias returns Process for | // ProcessMailboxMessage
type ProcessMailboxMessage struct { | random_line_split |
ai.py | ', 'board score wc bc ep kp depth captured')):
""" A state of a chess game
board -- a 120 char representation of the board
score -- the board evaluation
wc -- the castling rights, [west/queen side, east/king side]
bc -- the opponent castling rights, [west/king side, east/queen side]
ep - the en passant square
kp - the king passant square
depth - the node depth of the position
captured - the piece that was captured as the result of the last move
"""
def gen_moves(self):
for i, p in enumerate(self.board):
# i - initial position index
# p - piece code
# if the piece doesn't belong to us, skip it
if not p.isupper(): continue
for d in directions[p]:
# d - potential action for a given piece
|
def rotate(self):
# Rotates the board, preserving enpassant
# Allows logic to be reused, as only one board configuration must be considered
return Position(
self.board[::-1].swapcase(), -self.score, self.bc, self.wc,
119 - self.ep if self.ep else 0,
119 - self.kp if self.kp else 0, self.depth, None)
def nullmove(self):
# Like rotate, but clears ep and kp
return Position(
self.board[::-1].swapcase(), -self.score,
self.bc, self.wc, 0, 0, self.depth + 1, None)
def move(self, move):
# i - original position index
# j - final position index
i, j = move
# p - piece code of moving piece
# q - piece code at final square
p, q = self.board[i], self.board[j]
# put replaces string character at i with character p
put = lambda board, i, p: board[:i] + p + board[i + 1:]
# copy variables and reset eq and kp and increment depth
board = self.board
wc, bc, ep, kp, depth = self.wc, self.bc, 0, 0, self.depth + 1
# score = self.score + self.value(move)
# perform the move
board = put(board, j, board[i])
board = put(board, i, '.')
# update castling rights, if we move our rook or capture the opponent's rook
if i == A1: wc = (False, wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling Logic
if p == 'K':
wc = (False, False)
if abs(j - i) == 2:
kp = (i + j) // 2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Pawn promotion, double move, and en passant capture
if p == 'P':
if A8 <= j <= H8:
# Promote the pawn to Queen
board = put(board, j, 'Q')
if j - i == 2 * N:
ep = i + N
if j - i in (N + W, N + E) and q == '.':
board = put(board, j + S, '.')
# Rotate the returned position so it's ready for the next player
return Position(board, 0, wc, bc, ep, kp, depth, q.upper()).rotate()
def value(self):
score = 0
# evaluate material advantage
for k, p in enumerate(self.board):
# k - position index
# p - piece code
if p.isupper(): score += piece_values[p]
if p.islower(): score -= piece_values[p.upper()]
return score
def is_check(self):
# returns if the state represented by the current position is check
op_board = self.nullmove()
for move in op_board.gen_moves():
i, j = move
p, q = op_board.board[i], op_board.board[j]
# opponent can take our king
if q == 'k':
return True
return False
def is_quiescent(self):
return self.is_check() or self.captured
def z_hash(self):
# Zobrist Hash of board position
# strip all whitespace from board
stripboard = re.sub(r'[\s+]', '', self.board)
h = 0
for i in range(0, 64):
j = z_indicies.get(stripboard[i], 0)
h = xor(h, z_table[i][j - 1])
return h
####################################
# square formatting helper functions
####################################
def square_index(file_index, rank_index):
# Gets a square index by file and rank index
file_index = ord(file_index.upper()) - 65
rank_index = int(rank_index) - 1
return A1 + file_index - (10 * rank_index)
def square_file(square_index):
file_names = ["a", "b", "c", "d", "e", "f", "g", "h"]
return file_names[(square_index % 10) - 1]
def square_rank(square_index):
return 10 - (square_index // 10)
def square_san(square_index):
# convert square index (21 - 98) to Standard Algebraic Notation
square = namedtuple('square', 'file rank')
return square(square_file(square_index), square_rank(square_index))
def fen_to_position(fen_string):
# generate a Position object from a FEN string
board, player, castling, enpassant, halfmove, move = fen_string.split()
board = board.split('/')
board_out = ' \n \n'
for row in board:
board_out += ' '
for piece in row:
if piece.isdigit():
for _ in range(int(piece)):
board_out += '.'
else:
board_out += piece
board_out += '\n'
board_out += ' \n \n'
wc = (False, False)
bc = (False, False)
if 'K' in castling: wc = (True, wc[1])
if 'Q' in castling: wc = (wc[0], True)
if 'k' in castling: bc = (True, bc[1])
if 'q' in castling: bc = (bc[0], True)
if enpassant != '-':
enpassant = square_index(enpassant[0], enpassant[1])
else:
enpassant = 0
# Position(board score wc bc ep kp depth)
if player == 'w':
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None)
else:
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None).rotate()
class AI(BaseAI):
""" The basic AI functions that are the same between games. """
def get_name(self):
""" This is the name you send to the server so your AI will control the
player named this string.
Returns
str: The name of your Player.
"""
return "Sawyer McLane"
def start(self):
""" This is called once the game starts and your AI knows its playerID
and game. You can initialize your AI here.
"""
# store a sign controlling addition or subtraction so pieces move in the right direction
self.board = fen_to_position(self.game.fen)
self.transposition_table = dict()
def game_updated(self):
""" This is called | for j in count(i + d, d):
# j - final position index
# q - occupying piece code
q = self.board[j]
# Stay inside the board, and off friendly pieces
if q.isspace() or q.isupper(): break
# Pawn move, double move and capture
if p == 'P' and d in (N, N + N) and q != '.': break
if p == 'P' and d == N + N and (i < A1 + N or self.board[i + N] != '.'): break
if p == 'P' and d in (N + W, N + E) and q == '.' and j not in (self.ep, self.kp): break
# Move it
yield (i, j)
# Stop non-sliders from sliding and sliding after captures
if p in 'PNK' or q.islower(): break
# Castling by sliding rook next to king
if i == A1 and self.board[j + E] == 'K' and self.wc[0]: yield (j + E, j + W)
if i == H1 and self.board[j + W] == 'K' and self.wc[1]: yield (j + W, j + E) | conditional_block |
ai.py | wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling Logic
if p == 'K':
wc = (False, False)
if abs(j - i) == 2:
kp = (i + j) // 2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Pawn promotion, double move, and en passant capture
if p == 'P':
if A8 <= j <= H8:
# Promote the pawn to Queen
board = put(board, j, 'Q')
if j - i == 2 * N:
ep = i + N
if j - i in (N + W, N + E) and q == '.':
board = put(board, j + S, '.')
# Rotate the returned position so it's ready for the next player
return Position(board, 0, wc, bc, ep, kp, depth, q.upper()).rotate()
def value(self):
score = 0
# evaluate material advantage
for k, p in enumerate(self.board):
# k - position index
# p - piece code
if p.isupper(): score += piece_values[p]
if p.islower(): score -= piece_values[p.upper()]
return score
def is_check(self):
# returns if the state represented by the current position is check
op_board = self.nullmove()
for move in op_board.gen_moves():
i, j = move
p, q = op_board.board[i], op_board.board[j]
# opponent can take our king
if q == 'k':
return True
return False
def is_quiescent(self):
return self.is_check() or self.captured
def z_hash(self):
# Zobrist Hash of board position
# strip all whitespace from board
stripboard = re.sub(r'[\s+]', '', self.board)
h = 0
for i in range(0, 64):
j = z_indicies.get(stripboard[i], 0)
h = xor(h, z_table[i][j - 1])
return h
####################################
# square formatting helper functions
####################################
def square_index(file_index, rank_index):
# Gets a square index by file and rank index
file_index = ord(file_index.upper()) - 65
rank_index = int(rank_index) - 1
return A1 + file_index - (10 * rank_index)
def square_file(square_index):
file_names = ["a", "b", "c", "d", "e", "f", "g", "h"]
return file_names[(square_index % 10) - 1]
def square_rank(square_index):
return 10 - (square_index // 10)
def square_san(square_index):
# convert square index (21 - 98) to Standard Algebraic Notation
square = namedtuple('square', 'file rank')
return square(square_file(square_index), square_rank(square_index))
def fen_to_position(fen_string):
# generate a Position object from a FEN string
board, player, castling, enpassant, halfmove, move = fen_string.split()
board = board.split('/')
board_out = ' \n \n'
for row in board:
board_out += ' '
for piece in row:
if piece.isdigit():
for _ in range(int(piece)):
board_out += '.'
else:
board_out += piece
board_out += '\n'
board_out += ' \n \n'
wc = (False, False)
bc = (False, False)
if 'K' in castling: wc = (True, wc[1])
if 'Q' in castling: wc = (wc[0], True)
if 'k' in castling: bc = (True, bc[1])
if 'q' in castling: bc = (bc[0], True)
if enpassant != '-':
enpassant = square_index(enpassant[0], enpassant[1])
else:
enpassant = 0
# Position(board score wc bc ep kp depth)
if player == 'w':
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None)
else:
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None).rotate()
class AI(BaseAI):
""" The basic AI functions that are the same between games. """
def get_name(self):
""" This is the name you send to the server so your AI will control the
player named this string.
Returns
str: The name of your Player.
"""
return "Sawyer McLane"
def start(self):
""" This is called once the game starts and your AI knows its playerID
and game. You can initialize your AI here.
"""
# store a sign controlling addition or subtraction so pieces move in the right direction
self.board = fen_to_position(self.game.fen)
self.transposition_table = dict()
def game_updated(self):
""" This is called every time the game's state updates, so if you are
tracking anything you can update it here.
"""
# replace with your game updated logic
self.update_board()
def end(self, won, reason):
""" This is called when the game ends, you can clean up your data and
dump files here if need be.
Args:
won (bool): True means you won, False means you lost.
reason (str): The human readable string explaining why you won or
lost.
"""
pass
# replace with your end logic
def run_turn(self):
""" This is called every time it is this AI.player's turn.
Returns:
bool: Represents if you want to end your turn. True means end your
turn, False means to keep your turn going and re-call this
function.
"""
# Here is where you'll want to code your AI.
# We've provided sample code that:
# 1) prints the board to the console
# 2) prints the opponent's last move to the console
# 3) prints how much time remaining this AI has to calculate moves
# 4) makes a random (and probably invalid) move.
# 1) print the board to the console
self.print_current_board()
# 2) print the opponent's last move to the console
if len(self.game.moves) > 0:
print("Opponent's Last Move: '" + self.game.moves[-1].san + "'")
# 3) print how much time remaining this AI has to calculate moves
print("Time Remaining: " + str(self.player.time_remaining) + " ns")
# 4) make a move
(piece_index, move_index) = self.tlabiddl_minimax()
# flip board indicies if playing from other side
if self.player.color == "Black":
piece_index = 119 - piece_index
move_index = 119 - move_index
# convert indices to SAN
piece_pos = square_san(piece_index)
move_pos = square_san(move_index)
piece = self.get_game_piece(piece_pos.rank, piece_pos.file)
piece.move(move_pos.file, move_pos.rank, promotionType="Queen")
return True # to signify we are done with our turn.
def get_game_piece(self, rank, file):
# used to go between rank and file notation and actual game object
return next((piece for piece in self.game.pieces if piece.rank == rank and piece.file == file), None)
def update_board(self):
# update current board state by converting current FEN to Position object
self.board = fen_to_position(self.game.fen)
def tlabiddl_minimax(self):
# Time Limited Alpha Beta Iterative-Deepening Depth-Limited MiniMax
initial_board = self.board
l_depth = 0
depth_limit = 4
# time limiting stuff
time_limit = 10 # 10 seconds to find the best move
start_time = timer()
# history stuff
history = defaultdict(dict)
if initial_board.z_hash() in self.transposition_table.keys():
return self.transposition_table[initial_board.z_hash()]
def min_play(board, alpha=(-inf), beta=(inf)):
if board.depth >= l_depth:
return board.value()
best_score = inf
for move in board.gen_moves():
next_board = board.move(move)
if next_board.is_check(): continue
if next_board.is_quiescent():
score = quiescence(next_board, alpha, beta)
else:
score = max_play(next_board, alpha, beta)
if score < best_score:
best_move = move
best_score = score
if score <= alpha:
return score
beta = min(beta, score)
return best_score
def | max_play | identifier_name |
|
ai.py | ', 'board score wc bc ep kp depth captured')):
""" A state of a chess game
board -- a 120 char representation of the board
score -- the board evaluation
wc -- the castling rights, [west/queen side, east/king side]
bc -- the opponent castling rights, [west/king side, east/queen side]
ep - the en passant square
kp - the king passant square
depth - the node depth of the position
captured - the piece that was captured as the result of the last move
"""
def gen_moves(self):
for i, p in enumerate(self.board):
# i - initial position index
# p - piece code
# if the piece doesn't belong to us, skip it
if not p.isupper(): continue
for d in directions[p]:
# d - potential action for a given piece
for j in count(i + d, d):
# j - final position index
# q - occupying piece code
q = self.board[j]
# Stay inside the board, and off friendly pieces
if q.isspace() or q.isupper(): break
# Pawn move, double move and capture
if p == 'P' and d in (N, N + N) and q != '.': break
if p == 'P' and d == N + N and (i < A1 + N or self.board[i + N] != '.'): break
if p == 'P' and d in (N + W, N + E) and q == '.' and j not in (self.ep, self.kp): break
# Move it
yield (i, j)
# Stop non-sliders from sliding and sliding after captures
if p in 'PNK' or q.islower(): break
# Castling by sliding rook next to king
if i == A1 and self.board[j + E] == 'K' and self.wc[0]: yield (j + E, j + W)
if i == H1 and self.board[j + W] == 'K' and self.wc[1]: yield (j + W, j + E)
def rotate(self):
# Rotates the board, preserving enpassant
# Allows logic to be reused, as only one board configuration must be considered
return Position(
self.board[::-1].swapcase(), -self.score, self.bc, self.wc,
119 - self.ep if self.ep else 0,
119 - self.kp if self.kp else 0, self.depth, None)
def nullmove(self):
# Like rotate, but clears ep and kp
return Position(
self.board[::-1].swapcase(), -self.score,
self.bc, self.wc, 0, 0, self.depth + 1, None)
def move(self, move):
# i - original position index
# j - final position index
i, j = move
# p - piece code of moving piece
# q - piece code at final square
p, q = self.board[i], self.board[j]
# put replaces string character at i with character p
put = lambda board, i, p: board[:i] + p + board[i + 1:]
# copy variables and reset eq and kp and increment depth
board = self.board
wc, bc, ep, kp, depth = self.wc, self.bc, 0, 0, self.depth + 1
# score = self.score + self.value(move)
# perform the move
board = put(board, j, board[i])
board = put(board, i, '.')
# update castling rights, if we move our rook or capture the opponent's rook
if i == A1: wc = (False, wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling Logic
if p == 'K':
wc = (False, False)
if abs(j - i) == 2:
kp = (i + j) // 2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Pawn promotion, double move, and en passant capture
if p == 'P':
if A8 <= j <= H8:
# Promote the pawn to Queen
board = put(board, j, 'Q')
if j - i == 2 * N:
ep = i + N
if j - i in (N + W, N + E) and q == '.':
board = put(board, j + S, '.')
# Rotate the returned position so it's ready for the next player
return Position(board, 0, wc, bc, ep, kp, depth, q.upper()).rotate()
def value(self):
score = 0
# evaluate material advantage
for k, p in enumerate(self.board):
# k - position index
# p - piece code
if p.isupper(): score += piece_values[p]
if p.islower(): score -= piece_values[p.upper()]
return score
def is_check(self):
# returns if the state represented by the current position is check
op_board = self.nullmove()
for move in op_board.gen_moves():
i, j = move
p, q = op_board.board[i], op_board.board[j]
# opponent can take our king
if q == 'k':
return True
return False
def is_quiescent(self):
return self.is_check() or self.captured
def z_hash(self):
# Zobrist Hash of board position
# strip all whitespace from board
stripboard = re.sub(r'[\s+]', '', self.board)
h = 0
for i in range(0, 64):
j = z_indicies.get(stripboard[i], 0)
h = xor(h, z_table[i][j - 1])
return h
####################################
# square formatting helper functions
####################################
def square_index(file_index, rank_index):
# Gets a square index by file and rank index
file_index = ord(file_index.upper()) - 65
rank_index = int(rank_index) - 1 | file_names = ["a", "b", "c", "d", "e", "f", "g", "h"]
return file_names[(square_index % 10) - 1]
def square_rank(square_index):
return 10 - (square_index // 10)
def square_san(square_index):
# convert square index (21 - 98) to Standard Algebraic Notation
square = namedtuple('square', 'file rank')
return square(square_file(square_index), square_rank(square_index))
def fen_to_position(fen_string):
# generate a Position object from a FEN string
board, player, castling, enpassant, halfmove, move = fen_string.split()
board = board.split('/')
board_out = ' \n \n'
for row in board:
board_out += ' '
for piece in row:
if piece.isdigit():
for _ in range(int(piece)):
board_out += '.'
else:
board_out += piece
board_out += '\n'
board_out += ' \n \n'
wc = (False, False)
bc = (False, False)
if 'K' in castling: wc = (True, wc[1])
if 'Q' in castling: wc = (wc[0], True)
if 'k' in castling: bc = (True, bc[1])
if 'q' in castling: bc = (bc[0], True)
if enpassant != '-':
enpassant = square_index(enpassant[0], enpassant[1])
else:
enpassant = 0
# Position(board score wc bc ep kp depth)
if player == 'w':
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None)
else:
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None).rotate()
class AI(BaseAI):
""" The basic AI functions that are the same between games. """
def get_name(self):
""" This is the name you send to the server so your AI will control the
player named this string.
Returns
str: The name of your Player.
"""
return "Sawyer McLane"
def start(self):
""" This is called once the game starts and your AI knows its playerID
and game. You can initialize your AI here.
"""
# store a sign controlling addition or subtraction so pieces move in the right direction
self.board = fen_to_position(self.game.fen)
self.transposition_table = dict()
def game_updated(self):
""" This is called every time | return A1 + file_index - (10 * rank_index)
def square_file(square_index): | random_line_split |
ai.py | the last move
"""
def gen_moves(self):
for i, p in enumerate(self.board):
# i - initial position index
# p - piece code
# if the piece doesn't belong to us, skip it
if not p.isupper(): continue
for d in directions[p]:
# d - potential action for a given piece
for j in count(i + d, d):
# j - final position index
# q - occupying piece code
q = self.board[j]
# Stay inside the board, and off friendly pieces
if q.isspace() or q.isupper(): break
# Pawn move, double move and capture
if p == 'P' and d in (N, N + N) and q != '.': break
if p == 'P' and d == N + N and (i < A1 + N or self.board[i + N] != '.'): break
if p == 'P' and d in (N + W, N + E) and q == '.' and j not in (self.ep, self.kp): break
# Move it
yield (i, j)
# Stop non-sliders from sliding and sliding after captures
if p in 'PNK' or q.islower(): break
# Castling by sliding rook next to king
if i == A1 and self.board[j + E] == 'K' and self.wc[0]: yield (j + E, j + W)
if i == H1 and self.board[j + W] == 'K' and self.wc[1]: yield (j + W, j + E)
def rotate(self):
# Rotates the board, preserving enpassant
# Allows logic to be reused, as only one board configuration must be considered
return Position(
self.board[::-1].swapcase(), -self.score, self.bc, self.wc,
119 - self.ep if self.ep else 0,
119 - self.kp if self.kp else 0, self.depth, None)
def nullmove(self):
# Like rotate, but clears ep and kp
return Position(
self.board[::-1].swapcase(), -self.score,
self.bc, self.wc, 0, 0, self.depth + 1, None)
def move(self, move):
# i - original position index
# j - final position index
i, j = move
# p - piece code of moving piece
# q - piece code at final square
p, q = self.board[i], self.board[j]
# put replaces string character at i with character p
put = lambda board, i, p: board[:i] + p + board[i + 1:]
# copy variables and reset eq and kp and increment depth
board = self.board
wc, bc, ep, kp, depth = self.wc, self.bc, 0, 0, self.depth + 1
# score = self.score + self.value(move)
# perform the move
board = put(board, j, board[i])
board = put(board, i, '.')
# update castling rights, if we move our rook or capture the opponent's rook
if i == A1: wc = (False, wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling Logic
if p == 'K':
wc = (False, False)
if abs(j - i) == 2:
kp = (i + j) // 2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Pawn promotion, double move, and en passant capture
if p == 'P':
if A8 <= j <= H8:
# Promote the pawn to Queen
board = put(board, j, 'Q')
if j - i == 2 * N:
ep = i + N
if j - i in (N + W, N + E) and q == '.':
board = put(board, j + S, '.')
# Rotate the returned position so it's ready for the next player
return Position(board, 0, wc, bc, ep, kp, depth, q.upper()).rotate()
def value(self):
score = 0
# evaluate material advantage
for k, p in enumerate(self.board):
# k - position index
# p - piece code
if p.isupper(): score += piece_values[p]
if p.islower(): score -= piece_values[p.upper()]
return score
def is_check(self):
# returns if the state represented by the current position is check
op_board = self.nullmove()
for move in op_board.gen_moves():
i, j = move
p, q = op_board.board[i], op_board.board[j]
# opponent can take our king
if q == 'k':
return True
return False
def is_quiescent(self):
return self.is_check() or self.captured
def z_hash(self):
# Zobrist Hash of board position
# strip all whitespace from board
stripboard = re.sub(r'[\s+]', '', self.board)
h = 0
for i in range(0, 64):
j = z_indicies.get(stripboard[i], 0)
h = xor(h, z_table[i][j - 1])
return h
####################################
# square formatting helper functions
####################################
def square_index(file_index, rank_index):
# Gets a square index by file and rank index
file_index = ord(file_index.upper()) - 65
rank_index = int(rank_index) - 1
return A1 + file_index - (10 * rank_index)
def square_file(square_index):
file_names = ["a", "b", "c", "d", "e", "f", "g", "h"]
return file_names[(square_index % 10) - 1]
def square_rank(square_index):
return 10 - (square_index // 10)
def square_san(square_index):
# convert square index (21 - 98) to Standard Algebraic Notation
square = namedtuple('square', 'file rank')
return square(square_file(square_index), square_rank(square_index))
def fen_to_position(fen_string):
# generate a Position object from a FEN string
board, player, castling, enpassant, halfmove, move = fen_string.split()
board = board.split('/')
board_out = ' \n \n'
for row in board:
board_out += ' '
for piece in row:
if piece.isdigit():
for _ in range(int(piece)):
board_out += '.'
else:
board_out += piece
board_out += '\n'
board_out += ' \n \n'
wc = (False, False)
bc = (False, False)
if 'K' in castling: wc = (True, wc[1])
if 'Q' in castling: wc = (wc[0], True)
if 'k' in castling: bc = (True, bc[1])
if 'q' in castling: bc = (bc[0], True)
if enpassant != '-':
enpassant = square_index(enpassant[0], enpassant[1])
else:
enpassant = 0
# Position(board score wc bc ep kp depth)
if player == 'w':
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None)
else:
return Position(board_out, 0, wc, bc, enpassant, 0, 0, None).rotate()
class AI(BaseAI):
""" The basic AI functions that are the same between games. """
def get_name(self):
""" This is the name you send to the server so your AI will control the
player named this string.
Returns
str: The name of your Player.
"""
return "Sawyer McLane"
def start(self):
""" This is called once the game starts and your AI knows its playerID
and game. You can initialize your AI here.
"""
# store a sign controlling addition or subtraction so pieces move in the right direction
self.board = fen_to_position(self.game.fen)
self.transposition_table = dict()
def game_updated(self):
""" This is called every time the game's state updates, so if you are
tracking anything you can update it here.
"""
# replace with your game updated logic
self.update_board()
def end(self, won, reason):
| """ This is called when the game ends, you can clean up your data and
dump files here if need be.
Args:
won (bool): True means you won, False means you lost.
reason (str): The human readable string explaining why you won or
lost.
"""
pass
# replace with your end logic | identifier_body |
|
helper.py |
def create_dir(directory):
"""
create directory recursively
"""
try:
os.makedirs(directory)
logging.info('successfully created directory {}'.format(directory))
except OSError:
logging.error('creating directory {} failed'.format(directory))
def check_path(path, isfile=False, isdir=False):
"""
returns if path given is a file or directory
"""
return os.path.isfile(path) if isfile else os.path.isdir(path)
def set_values(user_input, default, check=''):
"""
sets default value if user input is empty value.
ensures integer value if necessary
"""
if check == 'integer' and user_input != '':
user_input = check_user_input_if_integer(user_input)
return default if not user_input else user_input
def validate_url(url):
"""
validates url and checks if any HTTP Errors
"""
url_verify = ''
try:
url_verify = urlopen(url)
except HTTPError:
get_user_response(message='Error validating URL: {}'.format(url))
return url_verify
def check_user_input_if_integer(user_input):
"""
check if user input is integer and not any other data type
"""
integer_input = ''
while not integer_input:
try:
integer_input = int(user_input)
except ValueError:
logging.warn('only integer number accepted')
user_input = input('enter a number: ')
return integer_input
def get_ip(node_name='', ip_type=''):
"""
get the ip address of a node
"""
ip = ''
while True:
ip = input('ip address for {} in {} node: '.format(ip_type, node_name))
ip_check = validate_ip(ip)
if ip_check:
break
else:
logging.warn('ip address should be in format: x.x.x.x')
return ip
def validate_file(directory, filename, url):
re_hash = ''
hash_value = False
logging.info('validating file {} in {}'.format(filename, directory))
with open('{}/{}'.format(directory, filename), 'rb') as f:
bytes = f.read()
re_hash = hashlib.sha256(bytes).hexdigest()
logging.info('sha256 for file {} is {}'.format(filename, re_hash))
with open('{}/rhcos.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in rhcos.txt'.format(filename))
hash_value = True
if not hash_value:
with open('{}/client.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in client.txt'.format(filename))
hash_value = True
return hash_value
def validate_ip(ip):
"""
validates ip address format
"""
valid_ip = ''
try:
valid_ip = str(ipaddress.ip_address(ip))
except ValueError:
logging.error('ip address \'{}\' is not valid: '.format(ip))
return valid_ip
def validate_port(port):
"""
validate ports to ensure HAProxy ports are not reused
"""
invalid_ports = [80, 443, 6443, 22623]
while True:
try:
check_for_string = port.isdigit()
if not check_for_string:
logging.warn('port has to be an integer')
else:
invalid_ports.index(int(port))
logging.warn('ports {} are not allowed'.format(invalid_ports))
port = input('enter a port: ')
except AttributeError:
break
except ValueError:
break
return port
def validate_network_cidr(network_cidr):
"""
validate ip address with cidr format. defaults to /24 if only IP is given
"""
compressed_network_cidr = ''
while True:
try:
compressed_network_cidr = ipaddress.ip_network(network_cidr)
break
except ValueError:
logging.warn('input should be in format x.x.x.x/x')
network_cidr = input('enter the network cidr: ')
return compressed_network_cidr.compressed
def validate_cidr(cidr):
"""
validates subnet in cidr format.
"""
check_integer = ''
while not check_integer:
check_integer = check_user_input_if_integer(cidr)
if check_integer and check_integer < 32:
pass
else:
cidr = input('user input has to be an integer and less than 32: ')
return cidr
def check_ip_ping(ip):
command = 'ping -c 3 {} > /dev/null'.format(ip)
response = os.system(command)
return response
def get_idrac_creds(ip):
user = input('enter the idrac user for {}: '.format(ip))
passwd = getpass.getpass('enter the idrac password for {}: '.format(ip))
return user, passwd
def map_interfaces_network(network_devices):
devices = []
if network_devices:
for network_device in network_devices:
device = list(map(lambda interface: interface.encode('ascii'), network_device.values()))
try:
devices.append(device[0].decode("utf-8").split('/')[-1])
except IndexError:
logging.error('Did not find any network devices')
return devices
def connect_to_idrac(user, passwd, base_api_url):
"""
establishes connection to idrac
"""
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
response = ''
status_code = None
try:
response = requests.get(base_api_url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectTimeout:
logging.info('timeout')
get_user_response(message='connecting to idrac timeout')
except Exception as e:
logging.error('{}'.format(e))
get_user_response(message='connecting to idrac unknown exception occurred')
try:
status_code = response.status_code
except AttributeError:
logging.error('could not get idrac response status code')
return None
return response if status_code == 200 else None
def get_network_devices(user, passwd, base_api_url):
"""
get list of network devices from iDRAC
"""
network_devices = ''
response = connect_to_idrac(user, passwd, base_api_url)
if response and response.json():
network_devices_info = response.json()
try:
network_devices = network_devices_info[u'Members']
except KeyError:
network_devices = ''
get_user_response(message='could not get network devices info')
else:
get_user_response(message='idrac connection status code is 401')
return network_devices
def generate_network_devices_menu(devices, purpose=''):
"""
generate a list of network devices menu obtained from iDRAC
"""
menu = {}
i = 1
choice = ''
devices.sort()
for device in devices:
menu[int(i)] = device
i += 1
while True:
options = menu.keys()
for entry in options:
logging.info('{} -> {}'.format(entry, menu[entry]))
choice = input('Select the interface used by {}: '.format(purpose))
try:
menu[int(choice)]
break
except KeyError:
logging.warn('Invalid option')
continue
except ValueError:
logging.warn('Input option should be integer and not string')
continue
selected_network_device = menu[int(choice)]
logging.info('selected interface is: {}'.format(menu[int(choice)]))
return selected_network_device
def get_mac_address(selected_network_device, base_api_url, user, passwd):
"""
get mac address for a selected network device
"""
url = '{}/{}'.format(base_api_url, selected_network_device)
device_mac_address = ''
try:
response = requests.get(url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectionTimeout:
logging.error('failed to establish connection to get mac address')
try:
network_device_info = response.json()
except ValueError:
logging.error('check URL, iDRAC user and password may be invalid')
logging.info('{}'.format(url))
try:
device_mac_address = network_device_info[u'MACAddress']
except KeyError:
logging.error('No MAC Address found for network devices')
logging.info('{}'.format(selected_network_device))
return device_mac_address
def get_network_device_mac(devices, user, passwd, base_api_url):
"""
lists available network devices from iDRAC
generates a menu of network devices
obtains mac address for the network device
"""
network_device_mac_address = ''
if devices:
selected_network_device = generate_network_devices_menu(devices, purpose='DHCP')
network_device_mac_address = get_mac_address(selected_network_device, base_api_url, user, passwd)
if network_device_mac_address:
logging.info('device | """
User response invoked when error exists
"""
valid_responses = ['y', 'NO']
response = ''
while response not in valid_responses:
logging.error('{}'.format(message))
response = input('Do you want to continue (y/NO): ')
if response not in valid_responses:
logging.info('Valid responses are \'y\' or \'NO\'')
if response == 'NO':
logging.info('QUITTING!!')
sys.exit() | identifier_body |
|
helper.py | valid_responses:
logging.error('{}'.format(message))
response = input('Do you want to continue (y/NO): ')
if response not in valid_responses:
logging.info('Valid responses are \'y\' or \'NO\'')
if response == 'NO':
logging.info('QUITTING!!')
sys.exit()
def create_dir(directory):
"""
create directory recursively
"""
try:
os.makedirs(directory)
logging.info('successfully created directory {}'.format(directory))
except OSError:
logging.error('creating directory {} failed'.format(directory))
def check_path(path, isfile=False, isdir=False):
"""
returns if path given is a file or directory
"""
return os.path.isfile(path) if isfile else os.path.isdir(path)
def set_values(user_input, default, check=''):
"""
sets default value if user input is empty value.
ensures integer value if necessary
"""
if check == 'integer' and user_input != '':
user_input = check_user_input_if_integer(user_input)
return default if not user_input else user_input
def validate_url(url):
"""
validates url and checks if any HTTP Errors
"""
url_verify = ''
try:
url_verify = urlopen(url)
except HTTPError:
get_user_response(message='Error validating URL: {}'.format(url))
return url_verify
def check_user_input_if_integer(user_input):
"""
check if user input is integer and not any other data type
"""
integer_input = ''
while not integer_input:
try:
integer_input = int(user_input)
except ValueError:
logging.warn('only integer number accepted')
user_input = input('enter a number: ')
return integer_input
def get_ip(node_name='', ip_type=''):
"""
get the ip address of a node
"""
ip = ''
while True:
|
return ip
def validate_file(directory, filename, url):
re_hash = ''
hash_value = False
logging.info('validating file {} in {}'.format(filename, directory))
with open('{}/{}'.format(directory, filename), 'rb') as f:
bytes = f.read()
re_hash = hashlib.sha256(bytes).hexdigest()
logging.info('sha256 for file {} is {}'.format(filename, re_hash))
with open('{}/rhcos.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in rhcos.txt'.format(filename))
hash_value = True
if not hash_value:
with open('{}/client.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in client.txt'.format(filename))
hash_value = True
return hash_value
def validate_ip(ip):
"""
validates ip address format
"""
valid_ip = ''
try:
valid_ip = str(ipaddress.ip_address(ip))
except ValueError:
logging.error('ip address \'{}\' is not valid: '.format(ip))
return valid_ip
def validate_port(port):
"""
validate ports to ensure HAProxy ports are not reused
"""
invalid_ports = [80, 443, 6443, 22623]
while True:
try:
check_for_string = port.isdigit()
if not check_for_string:
logging.warn('port has to be an integer')
else:
invalid_ports.index(int(port))
logging.warn('ports {} are not allowed'.format(invalid_ports))
port = input('enter a port: ')
except AttributeError:
break
except ValueError:
break
return port
def validate_network_cidr(network_cidr):
"""
validate ip address with cidr format. defaults to /24 if only IP is given
"""
compressed_network_cidr = ''
while True:
try:
compressed_network_cidr = ipaddress.ip_network(network_cidr)
break
except ValueError:
logging.warn('input should be in format x.x.x.x/x')
network_cidr = input('enter the network cidr: ')
return compressed_network_cidr.compressed
def validate_cidr(cidr):
"""
validates subnet in cidr format.
"""
check_integer = ''
while not check_integer:
check_integer = check_user_input_if_integer(cidr)
if check_integer and check_integer < 32:
pass
else:
cidr = input('user input has to be an integer and less than 32: ')
return cidr
def check_ip_ping(ip):
command = 'ping -c 3 {} > /dev/null'.format(ip)
response = os.system(command)
return response
def get_idrac_creds(ip):
user = input('enter the idrac user for {}: '.format(ip))
passwd = getpass.getpass('enter the idrac password for {}: '.format(ip))
return user, passwd
def map_interfaces_network(network_devices):
devices = []
if network_devices:
for network_device in network_devices:
device = list(map(lambda interface: interface.encode('ascii'), network_device.values()))
try:
devices.append(device[0].decode("utf-8").split('/')[-1])
except IndexError:
logging.error('Did not find any network devices')
return devices
def connect_to_idrac(user, passwd, base_api_url):
"""
establishes connection to idrac
"""
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
response = ''
status_code = None
try:
response = requests.get(base_api_url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectTimeout:
logging.info('timeout')
get_user_response(message='connecting to idrac timeout')
except Exception as e:
logging.error('{}'.format(e))
get_user_response(message='connecting to idrac unknown exception occurred')
try:
status_code = response.status_code
except AttributeError:
logging.error('could not get idrac response status code')
return None
return response if status_code == 200 else None
def get_network_devices(user, passwd, base_api_url):
"""
get list of network devices from iDRAC
"""
network_devices = ''
response = connect_to_idrac(user, passwd, base_api_url)
if response and response.json():
network_devices_info = response.json()
try:
network_devices = network_devices_info[u'Members']
except KeyError:
network_devices = ''
get_user_response(message='could not get network devices info')
else:
get_user_response(message='idrac connection status code is 401')
return network_devices
def generate_network_devices_menu(devices, purpose=''):
"""
generate a list of network devices menu obtained from iDRAC
"""
menu = {}
i = 1
choice = ''
devices.sort()
for device in devices:
menu[int(i)] = device
i += 1
while True:
options = menu.keys()
for entry in options:
logging.info('{} -> {}'.format(entry, menu[entry]))
choice = input('Select the interface used by {}: '.format(purpose))
try:
menu[int(choice)]
break
except KeyError:
logging.warn('Invalid option')
continue
except ValueError:
logging.warn('Input option should be integer and not string')
continue
selected_network_device = menu[int(choice)]
logging.info('selected interface is: {}'.format(menu[int(choice)]))
return selected_network_device
def get_mac_address(selected_network_device, base_api_url, user, passwd):
"""
get mac address for a selected network device
"""
url = '{}/{}'.format(base_api_url, selected_network_device)
device_mac_address = ''
try:
response = requests.get(url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectionTimeout:
logging.error('failed to establish connection to get mac address')
try:
network_device_info = response.json()
except ValueError:
logging.error('check URL, iDRAC user and password may be invalid')
logging.info('{}'.format(url))
try:
device_mac_address = network_device_info[u'MACAddress']
except KeyError:
logging.error('No MAC Address found for network devices')
logging.info('{}'.format(selected_network_device))
return device_mac_address
def get_network_device_mac(devices, user, passwd, base_api_url):
"""
lists available network devices from iDRAC
generates a menu of network devices
obtains mac address for the network device
"""
network_device_mac_address = ''
if devices:
selected_network_device = generate_network_devices_menu(devices, purpose='DHCP')
network_device_mac_address = get_mac_address(selected_network_device, base_api_url, user, passwd)
if network_device_mac_address:
logging.info('device {} mac address is {}'.format(selected_network_device, network_device_mac_address))
return network_device_mac_address
def get_device_enumeration(device | ip = input('ip address for {} in {} node: '.format(ip_type, node_name))
ip_check = validate_ip(ip)
if ip_check:
break
else:
logging.warn('ip address should be in format: x.x.x.x') | conditional_block |
helper.py | valid_responses:
logging.error('{}'.format(message))
response = input('Do you want to continue (y/NO): ')
if response not in valid_responses:
logging.info('Valid responses are \'y\' or \'NO\'')
if response == 'NO':
logging.info('QUITTING!!')
sys.exit()
def create_dir(directory):
"""
create directory recursively
"""
try:
os.makedirs(directory)
logging.info('successfully created directory {}'.format(directory)) | logging.error('creating directory {} failed'.format(directory))
def check_path(path, isfile=False, isdir=False):
"""
returns if path given is a file or directory
"""
return os.path.isfile(path) if isfile else os.path.isdir(path)
def set_values(user_input, default, check=''):
"""
sets default value if user input is empty value.
ensures integer value if necessary
"""
if check == 'integer' and user_input != '':
user_input = check_user_input_if_integer(user_input)
return default if not user_input else user_input
def validate_url(url):
"""
validates url and checks if any HTTP Errors
"""
url_verify = ''
try:
url_verify = urlopen(url)
except HTTPError:
get_user_response(message='Error validating URL: {}'.format(url))
return url_verify
def check_user_input_if_integer(user_input):
"""
check if user input is integer and not any other data type
"""
integer_input = ''
while not integer_input:
try:
integer_input = int(user_input)
except ValueError:
logging.warn('only integer number accepted')
user_input = input('enter a number: ')
return integer_input
def get_ip(node_name='', ip_type=''):
"""
get the ip address of a node
"""
ip = ''
while True:
ip = input('ip address for {} in {} node: '.format(ip_type, node_name))
ip_check = validate_ip(ip)
if ip_check:
break
else:
logging.warn('ip address should be in format: x.x.x.x')
return ip
def validate_file(directory, filename, url):
re_hash = ''
hash_value = False
logging.info('validating file {} in {}'.format(filename, directory))
with open('{}/{}'.format(directory, filename), 'rb') as f:
bytes = f.read()
re_hash = hashlib.sha256(bytes).hexdigest()
logging.info('sha256 for file {} is {}'.format(filename, re_hash))
with open('{}/rhcos.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in rhcos.txt'.format(filename))
hash_value = True
if not hash_value:
with open('{}/client.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in client.txt'.format(filename))
hash_value = True
return hash_value
def validate_ip(ip):
"""
validates ip address format
"""
valid_ip = ''
try:
valid_ip = str(ipaddress.ip_address(ip))
except ValueError:
logging.error('ip address \'{}\' is not valid: '.format(ip))
return valid_ip
def validate_port(port):
"""
validate ports to ensure HAProxy ports are not reused
"""
invalid_ports = [80, 443, 6443, 22623]
while True:
try:
check_for_string = port.isdigit()
if not check_for_string:
logging.warn('port has to be an integer')
else:
invalid_ports.index(int(port))
logging.warn('ports {} are not allowed'.format(invalid_ports))
port = input('enter a port: ')
except AttributeError:
break
except ValueError:
break
return port
def validate_network_cidr(network_cidr):
"""
validate ip address with cidr format. defaults to /24 if only IP is given
"""
compressed_network_cidr = ''
while True:
try:
compressed_network_cidr = ipaddress.ip_network(network_cidr)
break
except ValueError:
logging.warn('input should be in format x.x.x.x/x')
network_cidr = input('enter the network cidr: ')
return compressed_network_cidr.compressed
def validate_cidr(cidr):
"""
validates subnet in cidr format.
"""
check_integer = ''
while not check_integer:
check_integer = check_user_input_if_integer(cidr)
if check_integer and check_integer < 32:
pass
else:
cidr = input('user input has to be an integer and less than 32: ')
return cidr
def check_ip_ping(ip):
command = 'ping -c 3 {} > /dev/null'.format(ip)
response = os.system(command)
return response
def get_idrac_creds(ip):
user = input('enter the idrac user for {}: '.format(ip))
passwd = getpass.getpass('enter the idrac password for {}: '.format(ip))
return user, passwd
def map_interfaces_network(network_devices):
devices = []
if network_devices:
for network_device in network_devices:
device = list(map(lambda interface: interface.encode('ascii'), network_device.values()))
try:
devices.append(device[0].decode("utf-8").split('/')[-1])
except IndexError:
logging.error('Did not find any network devices')
return devices
def connect_to_idrac(user, passwd, base_api_url):
"""
establishes connection to idrac
"""
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
response = ''
status_code = None
try:
response = requests.get(base_api_url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectTimeout:
logging.info('timeout')
get_user_response(message='connecting to idrac timeout')
except Exception as e:
logging.error('{}'.format(e))
get_user_response(message='connecting to idrac unknown exception occurred')
try:
status_code = response.status_code
except AttributeError:
logging.error('could not get idrac response status code')
return None
return response if status_code == 200 else None
def get_network_devices(user, passwd, base_api_url):
"""
get list of network devices from iDRAC
"""
network_devices = ''
response = connect_to_idrac(user, passwd, base_api_url)
if response and response.json():
network_devices_info = response.json()
try:
network_devices = network_devices_info[u'Members']
except KeyError:
network_devices = ''
get_user_response(message='could not get network devices info')
else:
get_user_response(message='idrac connection status code is 401')
return network_devices
def generate_network_devices_menu(devices, purpose=''):
"""
generate a list of network devices menu obtained from iDRAC
"""
menu = {}
i = 1
choice = ''
devices.sort()
for device in devices:
menu[int(i)] = device
i += 1
while True:
options = menu.keys()
for entry in options:
logging.info('{} -> {}'.format(entry, menu[entry]))
choice = input('Select the interface used by {}: '.format(purpose))
try:
menu[int(choice)]
break
except KeyError:
logging.warn('Invalid option')
continue
except ValueError:
logging.warn('Input option should be integer and not string')
continue
selected_network_device = menu[int(choice)]
logging.info('selected interface is: {}'.format(menu[int(choice)]))
return selected_network_device
def get_mac_address(selected_network_device, base_api_url, user, passwd):
"""
get mac address for a selected network device
"""
url = '{}/{}'.format(base_api_url, selected_network_device)
device_mac_address = ''
try:
response = requests.get(url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectionTimeout:
logging.error('failed to establish connection to get mac address')
try:
network_device_info = response.json()
except ValueError:
logging.error('check URL, iDRAC user and password may be invalid')
logging.info('{}'.format(url))
try:
device_mac_address = network_device_info[u'MACAddress']
except KeyError:
logging.error('No MAC Address found for network devices')
logging.info('{}'.format(selected_network_device))
return device_mac_address
def get_network_device_mac(devices, user, passwd, base_api_url):
"""
lists available network devices from iDRAC
generates a menu of network devices
obtains mac address for the network device
"""
network_device_mac_address = ''
if devices:
selected_network_device = generate_network_devices_menu(devices, purpose='DHCP')
network_device_mac_address = get_mac_address(selected_network_device, base_api_url, user, passwd)
if network_device_mac_address:
logging.info('device {} mac address is {}'.format(selected_network_device, network_device_mac_address))
return network_device_mac_address
def get_device_enumeration(device | except OSError: | random_line_split |
helper.py | valid_responses:
logging.error('{}'.format(message))
response = input('Do you want to continue (y/NO): ')
if response not in valid_responses:
logging.info('Valid responses are \'y\' or \'NO\'')
if response == 'NO':
logging.info('QUITTING!!')
sys.exit()
def create_dir(directory):
"""
create directory recursively
"""
try:
os.makedirs(directory)
logging.info('successfully created directory {}'.format(directory))
except OSError:
logging.error('creating directory {} failed'.format(directory))
def check_path(path, isfile=False, isdir=False):
"""
returns if path given is a file or directory
"""
return os.path.isfile(path) if isfile else os.path.isdir(path)
def set_values(user_input, default, check=''):
"""
sets default value if user input is empty value.
ensures integer value if necessary
"""
if check == 'integer' and user_input != '':
user_input = check_user_input_if_integer(user_input)
return default if not user_input else user_input
def validate_url(url):
"""
validates url and checks if any HTTP Errors
"""
url_verify = ''
try:
url_verify = urlopen(url)
except HTTPError:
get_user_response(message='Error validating URL: {}'.format(url))
return url_verify
def check_user_input_if_integer(user_input):
"""
check if user input is integer and not any other data type
"""
integer_input = ''
while not integer_input:
try:
integer_input = int(user_input)
except ValueError:
logging.warn('only integer number accepted')
user_input = input('enter a number: ')
return integer_input
def get_ip(node_name='', ip_type=''):
"""
get the ip address of a node
"""
ip = ''
while True:
ip = input('ip address for {} in {} node: '.format(ip_type, node_name))
ip_check = validate_ip(ip)
if ip_check:
break
else:
logging.warn('ip address should be in format: x.x.x.x')
return ip
def validate_file(directory, filename, url):
re_hash = ''
hash_value = False
logging.info('validating file {} in {}'.format(filename, directory))
with open('{}/{}'.format(directory, filename), 'rb') as f:
bytes = f.read()
re_hash = hashlib.sha256(bytes).hexdigest()
logging.info('sha256 for file {} is {}'.format(filename, re_hash))
with open('{}/rhcos.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in rhcos.txt'.format(filename))
hash_value = True
if not hash_value:
with open('{}/client.txt'.format(directory)) as f:
if re_hash and re_hash in f.read():
logging.info('sha256sum for file {} is validated in client.txt'.format(filename))
hash_value = True
return hash_value
def validate_ip(ip):
"""
validates ip address format
"""
valid_ip = ''
try:
valid_ip = str(ipaddress.ip_address(ip))
except ValueError:
logging.error('ip address \'{}\' is not valid: '.format(ip))
return valid_ip
def validate_port(port):
"""
validate ports to ensure HAProxy ports are not reused
"""
invalid_ports = [80, 443, 6443, 22623]
while True:
try:
check_for_string = port.isdigit()
if not check_for_string:
logging.warn('port has to be an integer')
else:
invalid_ports.index(int(port))
logging.warn('ports {} are not allowed'.format(invalid_ports))
port = input('enter a port: ')
except AttributeError:
break
except ValueError:
break
return port
def validate_network_cidr(network_cidr):
"""
validate ip address with cidr format. defaults to /24 if only IP is given
"""
compressed_network_cidr = ''
while True:
try:
compressed_network_cidr = ipaddress.ip_network(network_cidr)
break
except ValueError:
logging.warn('input should be in format x.x.x.x/x')
network_cidr = input('enter the network cidr: ')
return compressed_network_cidr.compressed
def validate_cidr(cidr):
"""
validates subnet in cidr format.
"""
check_integer = ''
while not check_integer:
check_integer = check_user_input_if_integer(cidr)
if check_integer and check_integer < 32:
pass
else:
cidr = input('user input has to be an integer and less than 32: ')
return cidr
def check_ip_ping(ip):
command = 'ping -c 3 {} > /dev/null'.format(ip)
response = os.system(command)
return response
def get_idrac_creds(ip):
user = input('enter the idrac user for {}: '.format(ip))
passwd = getpass.getpass('enter the idrac password for {}: '.format(ip))
return user, passwd
def map_interfaces_network(network_devices):
devices = []
if network_devices:
for network_device in network_devices:
device = list(map(lambda interface: interface.encode('ascii'), network_device.values()))
try:
devices.append(device[0].decode("utf-8").split('/')[-1])
except IndexError:
logging.error('Did not find any network devices')
return devices
def connect_to_idrac(user, passwd, base_api_url):
"""
establishes connection to idrac
"""
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
response = ''
status_code = None
try:
response = requests.get(base_api_url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectTimeout:
logging.info('timeout')
get_user_response(message='connecting to idrac timeout')
except Exception as e:
logging.error('{}'.format(e))
get_user_response(message='connecting to idrac unknown exception occurred')
try:
status_code = response.status_code
except AttributeError:
logging.error('could not get idrac response status code')
return None
return response if status_code == 200 else None
def get_network_devices(user, passwd, base_api_url):
"""
get list of network devices from iDRAC
"""
network_devices = ''
response = connect_to_idrac(user, passwd, base_api_url)
if response and response.json():
network_devices_info = response.json()
try:
network_devices = network_devices_info[u'Members']
except KeyError:
network_devices = ''
get_user_response(message='could not get network devices info')
else:
get_user_response(message='idrac connection status code is 401')
return network_devices
def generate_network_devices_menu(devices, purpose=''):
"""
generate a list of network devices menu obtained from iDRAC
"""
menu = {}
i = 1
choice = ''
devices.sort()
for device in devices:
menu[int(i)] = device
i += 1
while True:
options = menu.keys()
for entry in options:
logging.info('{} -> {}'.format(entry, menu[entry]))
choice = input('Select the interface used by {}: '.format(purpose))
try:
menu[int(choice)]
break
except KeyError:
logging.warn('Invalid option')
continue
except ValueError:
logging.warn('Input option should be integer and not string')
continue
selected_network_device = menu[int(choice)]
logging.info('selected interface is: {}'.format(menu[int(choice)]))
return selected_network_device
def | (selected_network_device, base_api_url, user, passwd):
"""
get mac address for a selected network device
"""
url = '{}/{}'.format(base_api_url, selected_network_device)
device_mac_address = ''
try:
response = requests.get(url, verify=False, auth=(user, passwd),
timeout=5)
except requests.exceptions.ConnectionTimeout:
logging.error('failed to establish connection to get mac address')
try:
network_device_info = response.json()
except ValueError:
logging.error('check URL, iDRAC user and password may be invalid')
logging.info('{}'.format(url))
try:
device_mac_address = network_device_info[u'MACAddress']
except KeyError:
logging.error('No MAC Address found for network devices')
logging.info('{}'.format(selected_network_device))
return device_mac_address
def get_network_device_mac(devices, user, passwd, base_api_url):
"""
lists available network devices from iDRAC
generates a menu of network devices
obtains mac address for the network device
"""
network_device_mac_address = ''
if devices:
selected_network_device = generate_network_devices_menu(devices, purpose='DHCP')
network_device_mac_address = get_mac_address(selected_network_device, base_api_url, user, passwd)
if network_device_mac_address:
logging.info('device {} mac address is {}'.format(selected_network_device, network_device_mac_address))
return network_device_mac_address
def get_device_enumeration | get_mac_address | identifier_name |
main.rs | 16)? | 0xff000000)),
_ => Err(anyhow!(ARGB_FORMAT_MSG)),
}
}
}
}
use conf::{Argb, Config};
use font::Font;
mod font {
use anyhow::{Context, Result};
use rusttype::{self, point, Font as rtFont, Point, PositionedGlyph, Scale};
#[derive(Debug)]
pub struct Font {
font: rtFont<'static>,
scale: Scale,
offset: Point<f32>,
}
#[derive(Debug)]
pub struct Glyphs<'f> {
glyphs: Vec<PositionedGlyph<'f>>,
pub width: f32,
pub height: f32,
}
impl Default for Font {
fn default() -> Self {
let font =
rtFont::try_from_bytes(include_bytes!("../SourceCodePro-Regular.otf") as &[u8])
.expect("Failed constructing a Font from bytes");
Font::new(font)
}
}
impl Font {
fn new(font: rtFont<'static>) -> Self {
let scale = Scale::uniform(40.0);
let v_metrics = font.v_metrics(scale);
let offset = point(0.0, v_metrics.ascent);
Font {
font,
scale,
offset,
}
}
pub fn load<P: AsRef<std::path::Path>>(name: &P) -> Result<Font> {
let bytes = std::fs::read(name)?;
let font = rtFont::try_from_vec(bytes).context("Failed loading the default font")?;
Ok(Self::new(font))
}
pub fn glyphs(&self, s: &str) -> Glyphs {
let glyphs: Vec<_> = self.font.layout(s, self.scale, self.offset).collect();
let width = glyphs
.last()
.map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width)
.unwrap_or(0.0);
Glyphs {
glyphs,
width,
height: self.scale.y,
}
}
}
impl<'f> Glyphs<'f> {
pub fn render(self, mut d: impl FnMut(usize, usize, u8)) {
let (width, height) = (self.width.ceil(), self.height.ceil());
self.glyphs
.iter()
.filter_map(|g| g.pixel_bounding_box().map(|bb| (g, bb)))
.for_each(|(g, bb)| {
g.draw(|x, y, v| {
let v = (v * 255.0).ceil() as u8;
let x = x as i32 + bb.min.x;
let y = y as i32 + bb.min.y;
if x >= 0 && x < width as i32 && y >= 0 && y < height as i32 {
d(x as usize, y as usize, v);
}
})
})
}
}
}
#[derive(Debug)]
struct Registry {
compositor: Main<WlCompositor>,
seat: Main<WlSeat>,
shm: Main<WlShm>,
wmbase: Main<XdgWmBase>,
layer_shell: Main<LayerShell>,
}
#[derive(Debug, Default)]
struct Pointer {
pos: Option<(f64, f64)>,
pos_prev: Option<(f64, f64)>,
btn: Option<wl_pointer::ButtonState>,
btn_prev: Option<wl_pointer::ButtonState>,
frame: bool,
}
#[derive(Debug)]
struct Surface {
wl: Main<WlSurface>,
layer: Main<LayerSurface>,
committed: bool,
configured: bool,
}
#[derive(Debug)]
struct Data {
cfg: Config,
registry: Registry,
ptr: Pointer,
seat_cap: wl_seat::Capability,
shm_formats: Vec<wl_shm::Format>,
buffer: ShmPixelBuffer,
surface: Surface,
rendered: bool,
}
impl Data {
fn new(cfg: Config, mut registry: Registry) -> Data {
let seat = &mut registry.seat;
filter!(seat, data,
wl_seat::Event::Capabilities{capabilities} => data.seat_cap = capabilities
);
let pointer = seat.get_pointer();
filter!(pointer, data,
wl_pointer::Event::Enter { surface_x, surface_y, .. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Leave { .. } => {
data.ptr.pos.take();
data.ptr.btn.take();
},
wl_pointer::Event::Motion { surface_x, surface_y, .. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Button { button: 0x110, state, .. } => {
// 0x110 is BUTTON1
data.ptr.btn.replace(state);
},
wl_pointer::Event::Frame => {
data.ptr.frame = true;
}
);
let wmbase = &mut registry.wmbase;
filter!(wmbase, data,
xdg_wm_base::Event::Ping { serial } => data.registry.wmbase.detach().pong(serial)
);
let shm = &mut registry.shm;
filter!(shm, data,
wl_shm::Event::Format { format } => data.shm_formats.push(format)
);
let (width, height) = cfg.buttons_bounds();
let shmbuffer = create_shmbuffer(width, height, shm).expect("failed to create shm");
let (width, height) = cfg.buttons_bounds();
let surface =
Data::create_surface(width, height, ®istry.compositor, ®istry.layer_shell);
let mut data = Data {
cfg,
registry,
ptr: Pointer::default(),
buffer: shmbuffer,
surface: surface,
seat_cap: wl_seat::Capability::from_raw(0).unwrap(),
shm_formats: vec![],
rendered: false,
};
data.render();
data
}
fn create_surface(
width: usize,
height: usize,
compositor: &Main<WlCompositor>,
layer_shell: &Main<LayerShell>,
) -> Surface {
let wl = compositor.create_surface();
let (width, height) = (width as i32, height as i32);
let namespace = String::from("wtmenu");
let layer = layer_shell.get_layer_surface(&wl.detach(), None, Layer::Overlay, namespace);
layer.set_size(width as u32, height as u32);
filter!(layer, data,
layer_surface::Event::Configure { serial, .. } => {
data.surface.layer.detach().ack_configure(serial);
data.surface.configured = true;
},
layer_surface::Event::Closed => {
data.cfg.should_close = true;
}
);
wl.commit();
Surface {
wl,
layer,
committed: false,
configured: false,
}
}
fn render(&mut self) | } else {
self.cfg.nb
};
} else {
shm[(i, j)] = (self.cfg.nb & 0xffffff) | 0x22000000;
}
}
}
let scale = |v: u8, s: u8| ((v as u32 * s as u32) / 255) as u8;
let (nf, sf) = (self.cfg.nf, self.cfg.sf);
let rendered = self.rendered;
for i in 0..self.cfg.options.len() {
let opt = self.cfg.options.get(i).unwrap();
let g = self.cfg.font.glyphs(opt);
let (left, right, top, bottom) = self.cfg.button_bounds(i);
let trans_x: i32 = max(left, left - (g.width.ceil() as i32 - bw as i32) / 2);
let trans_y: i32 = max(top, top - (g.height.ceil() as i32 - bh as i32) / 2);
let (mut warn_btn, mut warn_buf) = (false, false);
g.render(|x, y, v| {
let (x, y) = (x as i32 + trans_x, y as i32 + trans_y);
if x < 0 || x as usize >= shm.width || y < 0 || y as usize >= shm.height {
if !rendered && !warn_buf {
eprintln!(
"glyph for {:?} exceeds | {
if self.buffer.locked {
return;
}
let shm = &mut self.buffer;
let (bw, bh) = self.cfg.button_dim;
let focus = {
let cfg = &self.cfg;
(self.ptr.btn)
.filter(|s| s == &wl_pointer::ButtonState::Pressed)
.and(self.ptr.pos)
.and_then(|(x, y)| cfg.in_button(x.ceil() as usize, y.ceil() as usize))
};
for i in 0..shm.width {
for j in 0..shm.height {
if let Some(opti) = self.cfg.in_button(i, j) {
shm[(i, j)] = if Some(opti) == focus {
self.cfg.sb | identifier_body |
main.rs | 16)? | 0xff000000)),
_ => Err(anyhow!(ARGB_FORMAT_MSG)),
}
}
}
}
use conf::{Argb, Config};
use font::Font;
mod font {
use anyhow::{Context, Result};
use rusttype::{self, point, Font as rtFont, Point, PositionedGlyph, Scale};
#[derive(Debug)]
pub struct Font {
font: rtFont<'static>,
scale: Scale,
offset: Point<f32>,
}
#[derive(Debug)]
pub struct Glyphs<'f> {
glyphs: Vec<PositionedGlyph<'f>>,
pub width: f32,
pub height: f32,
}
impl Default for Font {
fn default() -> Self {
let font =
rtFont::try_from_bytes(include_bytes!("../SourceCodePro-Regular.otf") as &[u8])
.expect("Failed constructing a Font from bytes");
Font::new(font)
}
}
impl Font {
fn new(font: rtFont<'static>) -> Self {
let scale = Scale::uniform(40.0);
let v_metrics = font.v_metrics(scale);
let offset = point(0.0, v_metrics.ascent);
Font {
font,
scale,
offset,
}
}
pub fn load<P: AsRef<std::path::Path>>(name: &P) -> Result<Font> {
let bytes = std::fs::read(name)?;
let font = rtFont::try_from_vec(bytes).context("Failed loading the default font")?;
Ok(Self::new(font))
}
pub fn glyphs(&self, s: &str) -> Glyphs {
let glyphs: Vec<_> = self.font.layout(s, self.scale, self.offset).collect();
let width = glyphs
.last()
.map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width)
.unwrap_or(0.0);
Glyphs {
glyphs,
width,
height: self.scale.y,
}
}
}
impl<'f> Glyphs<'f> {
pub fn render(self, mut d: impl FnMut(usize, usize, u8)) {
let (width, height) = (self.width.ceil(), self.height.ceil());
self.glyphs
.iter()
.filter_map(|g| g.pixel_bounding_box().map(|bb| (g, bb)))
.for_each(|(g, bb)| {
g.draw(|x, y, v| {
let v = (v * 255.0).ceil() as u8;
let x = x as i32 + bb.min.x;
let y = y as i32 + bb.min.y;
if x >= 0 && x < width as i32 && y >= 0 && y < height as i32 {
d(x as usize, y as usize, v);
}
})
})
}
}
}
#[derive(Debug)]
struct Registry {
compositor: Main<WlCompositor>,
seat: Main<WlSeat>,
shm: Main<WlShm>,
wmbase: Main<XdgWmBase>,
layer_shell: Main<LayerShell>,
}
#[derive(Debug, Default)]
struct Pointer {
pos: Option<(f64, f64)>,
pos_prev: Option<(f64, f64)>,
btn: Option<wl_pointer::ButtonState>,
btn_prev: Option<wl_pointer::ButtonState>,
frame: bool,
}
#[derive(Debug)]
struct Surface {
wl: Main<WlSurface>,
layer: Main<LayerSurface>,
committed: bool,
configured: bool,
}
#[derive(Debug)]
struct Data {
cfg: Config,
registry: Registry,
ptr: Pointer,
seat_cap: wl_seat::Capability,
shm_formats: Vec<wl_shm::Format>,
buffer: ShmPixelBuffer,
surface: Surface,
rendered: bool,
}
impl Data {
fn new(cfg: Config, mut registry: Registry) -> Data {
let seat = &mut registry.seat;
filter!(seat, data,
wl_seat::Event::Capabilities{capabilities} => data.seat_cap = capabilities
);
let pointer = seat.get_pointer();
filter!(pointer, data,
wl_pointer::Event::Enter { surface_x, surface_y, .. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Leave { .. } => {
data.ptr.pos.take();
data.ptr.btn.take();
},
wl_pointer::Event::Motion { surface_x, surface_y, .. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Button { button: 0x110, state, .. } => {
// 0x110 is BUTTON1
data.ptr.btn.replace(state);
},
wl_pointer::Event::Frame => {
data.ptr.frame = true;
}
);
let wmbase = &mut registry.wmbase;
filter!(wmbase, data,
xdg_wm_base::Event::Ping { serial } => data.registry.wmbase.detach().pong(serial)
);
let shm = &mut registry.shm;
filter!(shm, data,
wl_shm::Event::Format { format } => data.shm_formats.push(format)
);
let (width, height) = cfg.buttons_bounds();
let shmbuffer = create_shmbuffer(width, height, shm).expect("failed to create shm");
let (width, height) = cfg.buttons_bounds();
let surface =
Data::create_surface(width, height, ®istry.compositor, ®istry.layer_shell);
let mut data = Data {
cfg,
registry,
ptr: Pointer::default(),
buffer: shmbuffer,
surface: surface,
seat_cap: wl_seat::Capability::from_raw(0).unwrap(),
shm_formats: vec![],
rendered: false,
};
data.render();
data
}
fn create_surface(
width: usize,
height: usize,
compositor: &Main<WlCompositor>,
layer_shell: &Main<LayerShell>,
) -> Surface {
let wl = compositor.create_surface();
let (width, height) = (width as i32, height as i32);
let namespace = String::from("wtmenu");
let layer = layer_shell.get_layer_surface(&wl.detach(), None, Layer::Overlay, namespace);
layer.set_size(width as u32, height as u32);
filter!(layer, data,
layer_surface::Event::Configure { serial, .. } => {
data.surface.layer.detach().ack_configure(serial);
data.surface.configured = true;
},
layer_surface::Event::Closed => {
data.cfg.should_close = true;
}
);
wl.commit(); |
Surface {
wl,
layer,
committed: false,
configured: false,
}
}
fn render(&mut self) {
if self.buffer.locked {
return;
}
let shm = &mut self.buffer;
let (bw, bh) = self.cfg.button_dim;
let focus = {
let cfg = &self.cfg;
(self.ptr.btn)
.filter(|s| s == &wl_pointer::ButtonState::Pressed)
.and(self.ptr.pos)
.and_then(|(x, y)| cfg.in_button(x.ceil() as usize, y.ceil() as usize))
};
for i in 0..shm.width {
for j in 0..shm.height {
if let Some(opti) = self.cfg.in_button(i, j) {
shm[(i, j)] = if Some(opti) == focus {
self.cfg.sb
} else {
self.cfg.nb
};
} else {
shm[(i, j)] = (self.cfg.nb & 0xffffff) | 0x22000000;
}
}
}
let scale = |v: u8, s: u8| ((v as u32 * s as u32) / 255) as u8;
let (nf, sf) = (self.cfg.nf, self.cfg.sf);
let rendered = self.rendered;
for i in 0..self.cfg.options.len() {
let opt = self.cfg.options.get(i).unwrap();
let g = self.cfg.font.glyphs(opt);
let (left, right, top, bottom) = self.cfg.button_bounds(i);
let trans_x: i32 = max(left, left - (g.width.ceil() as i32 - bw as i32) / 2);
let trans_y: i32 = max(top, top - (g.height.ceil() as i32 - bh as i32) / 2);
let (mut warn_btn, mut warn_buf) = (false, false);
g.render(|x, y, v| {
let (x, y) = (x as i32 + trans_x, y as i32 + trans_y);
if x < 0 || x as usize >= shm.width || y < 0 || y as usize >= shm.height {
if !rendered && !warn_buf {
eprintln!(
"glyph for {:?} | random_line_split |
|
main.rs | (pub u32);
static ARGB_FORMAT_MSG: &str =
"Argb must be specified by a '#' followed by exactly 3, 4, 6, or 8 digits";
impl FromStr for Argb {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self> {
if !s.starts_with('#') || !s[1..].chars().all(|c| c.is_ascii_hexdigit()) {
return Err(anyhow!(ARGB_FORMAT_MSG));
}
let s = &s[1..];
let dup = |s: &str| {
s.chars().fold(String::new(), |mut s, c| {
s.push(c);
s.push(c);
s
})
};
match s.len() {
8 => Ok(Argb(u32::from_str_radix(s, 16)?)),
6 => Ok(Argb(u32::from_str_radix(s, 16)? | 0xff000000)),
4 => Ok(Argb(u32::from_str_radix(&dup(s), 16)?)),
3 => Ok(Argb(u32::from_str_radix(&dup(s), 16)? | 0xff000000)),
_ => Err(anyhow!(ARGB_FORMAT_MSG)),
}
}
}
}
use conf::{Argb, Config};
use font::Font;
mod font {
use anyhow::{Context, Result};
use rusttype::{self, point, Font as rtFont, Point, PositionedGlyph, Scale};
#[derive(Debug)]
pub struct Font {
font: rtFont<'static>,
scale: Scale,
offset: Point<f32>,
}
#[derive(Debug)]
pub struct Glyphs<'f> {
glyphs: Vec<PositionedGlyph<'f>>,
pub width: f32,
pub height: f32,
}
impl Default for Font {
fn default() -> Self {
let font =
rtFont::try_from_bytes(include_bytes!("../SourceCodePro-Regular.otf") as &[u8])
.expect("Failed constructing a Font from bytes");
Font::new(font)
}
}
impl Font {
fn new(font: rtFont<'static>) -> Self {
let scale = Scale::uniform(40.0);
let v_metrics = font.v_metrics(scale);
let offset = point(0.0, v_metrics.ascent);
Font {
font,
scale,
offset,
}
}
pub fn load<P: AsRef<std::path::Path>>(name: &P) -> Result<Font> {
let bytes = std::fs::read(name)?;
let font = rtFont::try_from_vec(bytes).context("Failed loading the default font")?;
Ok(Self::new(font))
}
pub fn glyphs(&self, s: &str) -> Glyphs {
let glyphs: Vec<_> = self.font.layout(s, self.scale, self.offset).collect();
let width = glyphs
.last()
.map(|g| g.position().x as f32 + g.unpositioned().h_metrics().advance_width)
.unwrap_or(0.0);
Glyphs {
glyphs,
width,
height: self.scale.y,
}
}
}
impl<'f> Glyphs<'f> {
pub fn render(self, mut d: impl FnMut(usize, usize, u8)) {
let (width, height) = (self.width.ceil(), self.height.ceil());
self.glyphs
.iter()
.filter_map(|g| g.pixel_bounding_box().map(|bb| (g, bb)))
.for_each(|(g, bb)| {
g.draw(|x, y, v| {
let v = (v * 255.0).ceil() as u8;
let x = x as i32 + bb.min.x;
let y = y as i32 + bb.min.y;
if x >= 0 && x < width as i32 && y >= 0 && y < height as i32 {
d(x as usize, y as usize, v);
}
})
})
}
}
}
#[derive(Debug)]
struct Registry {
compositor: Main<WlCompositor>,
seat: Main<WlSeat>,
shm: Main<WlShm>,
wmbase: Main<XdgWmBase>,
layer_shell: Main<LayerShell>,
}
#[derive(Debug, Default)]
struct Pointer {
pos: Option<(f64, f64)>,
pos_prev: Option<(f64, f64)>,
btn: Option<wl_pointer::ButtonState>,
btn_prev: Option<wl_pointer::ButtonState>,
frame: bool,
}
#[derive(Debug)]
struct Surface {
wl: Main<WlSurface>,
layer: Main<LayerSurface>,
committed: bool,
configured: bool,
}
#[derive(Debug)]
struct Data {
cfg: Config,
registry: Registry,
ptr: Pointer,
seat_cap: wl_seat::Capability,
shm_formats: Vec<wl_shm::Format>,
buffer: ShmPixelBuffer,
surface: Surface,
rendered: bool,
}
impl Data {
fn new(cfg: Config, mut registry: Registry) -> Data {
let seat = &mut registry.seat;
filter!(seat, data,
wl_seat::Event::Capabilities{capabilities} => data.seat_cap = capabilities
);
let pointer = seat.get_pointer();
filter!(pointer, data,
wl_pointer::Event::Enter { surface_x, surface_y, .. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Leave { .. } => {
data.ptr.pos.take();
data.ptr.btn.take();
},
wl_pointer::Event::Motion { surface_x, surface_y, .. } => {
data.ptr.pos.replace((surface_x, surface_y));
},
wl_pointer::Event::Button { button: 0x110, state, .. } => {
// 0x110 is BUTTON1
data.ptr.btn.replace(state);
},
wl_pointer::Event::Frame => {
data.ptr.frame = true;
}
);
let wmbase = &mut registry.wmbase;
filter!(wmbase, data,
xdg_wm_base::Event::Ping { serial } => data.registry.wmbase.detach().pong(serial)
);
let shm = &mut registry.shm;
filter!(shm, data,
wl_shm::Event::Format { format } => data.shm_formats.push(format)
);
let (width, height) = cfg.buttons_bounds();
let shmbuffer = create_shmbuffer(width, height, shm).expect("failed to create shm");
let (width, height) = cfg.buttons_bounds();
let surface =
Data::create_surface(width, height, ®istry.compositor, ®istry.layer_shell);
let mut data = Data {
cfg,
registry,
ptr: Pointer::default(),
buffer: shmbuffer,
surface: surface,
seat_cap: wl_seat::Capability::from_raw(0).unwrap(),
shm_formats: vec![],
rendered: false,
};
data.render();
data
}
fn create_surface(
width: usize,
height: usize,
compositor: &Main<WlCompositor>,
layer_shell: &Main<LayerShell>,
) -> Surface {
let wl = compositor.create_surface();
let (width, height) = (width as i32, height as i32);
let namespace = String::from("wtmenu");
let layer = layer_shell.get_layer_surface(&wl.detach(), None, Layer::Overlay, namespace);
layer.set_size(width as u32, height as u32);
filter!(layer, data,
layer_surface::Event::Configure { serial, .. } => {
data.surface.layer.detach().ack_configure(serial);
data.surface.configured = true;
},
layer_surface::Event::Closed => {
data.cfg.should_close = true;
}
);
wl.commit();
Surface {
wl,
layer,
committed: false,
configured: false,
}
}
fn render(&mut self) {
if self.buffer.locked {
return;
}
let shm = &mut self.buffer;
let (bw, bh) = self.cfg.button_dim;
let focus = {
let cfg = &self.cfg;
(self.ptr.btn)
.filter(|s| s == &wl_pointer::ButtonState::Pressed)
.and(self.ptr.pos)
.and_then(|(x, y)| cfg.in_button(x.ceil() as usize, y.ceil() as usize))
};
for i in 0..shm.width {
for j in 0..shm.height {
if let Some(opti) = self.cfg.in_button(i, j) {
shm[(i, j)] = if Some(opti) == focus {
self.cfg.sb
} else {
self.cfg.nb
};
} else {
shm[(i, j)] = (self.cfg.nb & 0xffffff) | 0x22000000;
}
}
}
let scale = |v: u8, s: u8| ((v as | Argb | identifier_name |
|
lib.rs | ifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`, .., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn bit_ops_impl() -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & !((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty);
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^
if attr.path.is_ident("bits") |
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if !variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase | {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
} | conditional_block |
lib.rs | Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`, .., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn bit_ops_impl() -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & !((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty); |
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^
if attr.path.is_ident("bits") {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
}
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if !variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase | random_line_split |
|
lib.rs | Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`, .., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn | () -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & !((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty);
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^
if attr.path.is_ident("bits") {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
}
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if !variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase | bit_ops_impl | identifier_name |
lib.rs | Specifier for #ident {
const BITS: usize = #idx;
type IntType = #size_type;
type Interface = #size_type;
fn to_interface(int_val: Self::IntType) -> Self::Interface {
int_val as Self::Interface
}
}
)
});
// Implement LastByte trait for integer primitives with `as u8`
output.extend(impl_last_byte());
// Implement BitOps
output.extend(bit_ops_impl());
output.extend(bit_specifiers);
output.into()
}
/// Implement LastByte trait for integer primitives `u8`, `u16`, .., `u128` using `as u8`
fn impl_last_byte() -> TokenStream {
let int_types = [
quote!(u8),
quote!(u16),
quote!(u32),
quote!(u64),
quote!(u128),
];
// Implement LastByte trait for primitives
quote!(
#[doc = "Implement last byte for integer primitives using `as u8`"]
#(impl LastByte for #int_types {
fn last_byte(self) -> u8 {
self as u8
}
})*
)
}
/// Match a given number of bits to the narrowest unsigned integer type that can hold it
fn size_to_type(bits: usize) -> TokenStream {
match bits {
1..=8 => quote!(u8),
9..=16 => quote!(u16),
17..=32 => quote!(u32),
33..=64 => quote!(u64),
65..=128 => quote!(u128),
_ => unreachable!(),
}
}
/// Defines BitOps trait and implement it for `u8`
fn bit_ops_impl() -> TokenStream {
quote!(
#[doc = "Simple trait to extract bits from primitive integer type"]
trait BitOps {
fn first(self, n: usize) -> u8;
fn last(self, n: usize) -> u8;
fn mid(self, start: usize, len: usize) -> u8;
}
#[doc = "Ops to extract bits from `u8` byte"]
impl BitOps for u8 {
fn first(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & ((1 << n) - 1),
_ => self,
}
}
fn last(self, n: usize) -> u8 {
match n {
0 => 0,
1..=7 => self & !((1 << (8 - n)) - 1),
_ => self,
}
}
fn mid(self, start: usize, len: usize) -> u8 {
match (start, start + len) {
(0, _) => self.first(len),
(_, l) if l >= 8 => self.last(8 - start),
_ => self & (((1 << len) - 1) << start),
}
}
}
)
}
/// syn helper struct to parse bits attributes
struct BitAttribute {
bits: syn::LitInt,
}
/// Parses the following attribute:
/// ```
/// #[bits=8]
/// ^^
/// ```
impl syn::parse::Parse for BitAttribute {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
let _: syn::Token![=] = input.parse()?;
let bits: syn::LitInt = input.parse()?;
Ok(Self { bits })
}
}
/// Main macro `bitfield`
/// Parses a Struct, validates field sizes,
#[proc_macro_attribute]
pub fn bitfield(
args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream | if attr.path.is_ident("bits") {
// At this point `attr.tokens` is the following part of the attribute:
// #[bits=..]
// ^^^
let bits = syn::parse2::<BitAttribute>(attr.tokens.clone()).ok()?.bits;
return Some(
quote_spanned!(bits.span() => const _: [(); #bits] = [(); <#ty as Specifier>::BITS];),
);
}
}
None
});
let getters_setters = define_getters_setters(&s.fields);
// Total size calculated as the sum of the inner `<T as Specifier>::BITS` associated consts
let total_bit_size = quote!(0 #(+ <#fields_ty as Specifier>::BITS)*);
// Formatted error message for the size check
let error = format!(
"#[bitfield] on `{}` requires the total bit size to be a multiple of 8 bits.",
ident.to_string()
);
quote!(
#[doc = "Converted bitfield struct"]
pub struct #ident {
data: [u8; ( #total_bit_size ) / 8],
}
#(#bits_attrs_check)*
// Conditional consts and panic in consts requires nightly
#[cfg(feature="nightly")]
const _: usize = if ( ( #total_bit_size ) % 8 == 0 ) {
0
}else{
panic!(#error)
};
impl #ident {
pub fn new() -> Self {
Self { data: [0u8; ( #total_bit_size ) / 8] }
}
#getters_setters
}
)
.into()
}
_ => unimplemented!("Only struct"),
}
}
fn define_getters_setters(fields: &syn::Fields) -> TokenStream {
let getters = fields.iter().scan(quote!(0), |offset, field| {
let ident = field.ident.as_ref().expect("Namef field");
// get_[field name] and set_[field name] idents
let get_ident = quote::format_ident!("get_{}", ident);
let set_ident = quote::format_ident!("set_{}", ident);
let ty = &field.ty;
let output = quote!(
pub fn #get_ident(&self) -> <#ty as Specifier>::Interface {
#ty::get(&self.data, #offset)
}
pub fn #set_ident(&mut self, val: <#ty as Specifier>::Interface) {
#ty::set(&mut self.data, #offset, val)
}
);
// Move the offset by the number of bits in the current type
*offset = quote!((#offset + <#ty as Specifier>::BITS));
Some(output)
});
quote!(#(#getters)*)
}
/// Derive BitfieldSpecifier macro for Enums
/// Parses enums and implements Specifier for it.
// In particular, constructs `to_interface` to match a discriminant to its variant.
// Compile time checks: number of variants is a power of two and discriminant size within bit range
#[proc_macro_derive(BitfieldSpecifier)]
pub fn derive_bitfield_specifier(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = parse_macro_input!(input as syn::DeriveInput);
let enum_ident = ast.ident;
// No attributes
let attrs = ast.attrs;
assert!(attrs.is_empty());
// Variants of the enum
// returns an error if underlying `syn::Data` is not an Enum
let variants = match ast.data {
syn::Data::Enum(e) => e.variants,
// Struct / Union
_ => {
return syn::Error::new_spanned(enum_ident, "BitfieldSpecifier only supports enum")
.to_compile_error()
.into()
}
};
// Check that the number of variants is a power of two.
// If not, return an error.
let variant_count = variants.len();
if !variant_count.is_power_of_two() {
return syn::Error::new(
proc_macro2::Span::call_site(),
"BitfieldSpecifier expected a number of variants which is a power of 2",
)
.to_compile_error()
.into();
}
// Number of bits (i.e. which power of two) is the number of trailing zeros
let bits = variant_count.trailing_zeros() as usize;
let size_type = size_to_type(bits);
// Build match patterns for variants
let match_variants = variants.iter().map(|variant| {
let ident = &variant.ident;
// Create a new ident `[enum]_[variant]` to be used in the match patterns
// Not really needed but clearer in expansion
let unique_ident = syn::Ident::new(
&format!(
"{}_{}",
enum_ident.to_string().to_lowercase(),
| {
let _ = args;
let item = parse_macro_input!(input as syn::Item);
match item {
Item::Struct(s) => {
let ident = &s.ident;
let fields_ty = s.fields.iter().map(|field| &field.ty);
// Check that fields with #[bits=X] attribute have a type of size `X`
// We use an array size check to validate the size is correct
let bits_attrs_check = s
.fields
.iter()
.filter_map(|field| {
let ty = &field.ty;
let attrs = &field.attrs;
for attr in attrs {
// #[bits=..]
// ^^^^ | identifier_body |
signalStream.js | typeof message.__internal_webrtc === 'undefined' || senderClientId === clientId) {
return false;
}
// No reason to do this all now, so we use setImmdiate.
setImmediate(handleSignal, payload.id, senderClientId, message);
return true;
});
function handleSignal(wid, senderClientId, message) | // Other clients may be responding to the same recipientId/stream. Therefore, the streamer
// can't create a WebRTCClient with the original recipientId as these would override each
// other in the webrtcClients map (the streamer would be creating two entries with the same
// recipientId). Instead, the streamer needs to generate a new recipientId for each listener
// to use, so the streamer can distinguish the clients from each other. However, it's
// difficult to communicate this back to the listeners. Therefore, the listener instead
// creates a new recipientId, telling the streamer "In the future, I'll address you with this
// id instead".
const newRecipientId = coreUtils.randomString();
const webrtcClient = new WebRTCClient(ownId, newRecipientId, senderClientId,
node, { listener: true });
webrtcClients.get(wid).set(ownId, webrtcClient);
// If the client hasn't connected after 30 seconds, we conclude that it won't happen and clean
// up the client we created.
setTimeout(() => {
if (!webrtcClient.stub.isConnected()) {
webrtcClients.get(wid).delete(ownId);
}
}, 30 * 1000);
callback(senderClientId, 'META_DATA_IS_DEPRECATED', clientAcceptCallback => {
webrtcClient.onRemoteStream(clientAcceptCallback);
node.webstrate.signal({
__internal_webrtc: true,
wantToListen: true,
recipientId: message.senderId,
newRecipientId,
senderId: ownId,
}, senderClientId);
return webrtcClient.stub;
});
});
return;
}
if (message.wantToListen) {
const callback = wantToStreamCallbacks.get(wid).get(message.recipientId);
if (callback) {
callback(senderClientId, (localStream, meta, onConnectCallback) => {
const webrtcClient = new WebRTCClient(message.newRecipientId, message.senderId,
senderClientId, node, { streamer: true });
webrtcClient.onConnect(onConnectCallback);
webrtcClients.get(wid).set(message.newRecipientId, webrtcClient);
webrtcClient.start(localStream);
return webrtcClient.stub;
});
}
return;
}
if (message.sdp || message.ice) {
const webrtcClient = webrtcClients.get(wid).get(message.recipientId);
if (webrtcClient) {
webrtcClient.handleMessage(message);
} else {
console.error('Got message for unknown recipient', message, webrtcClients);
}
return;
}
}
const wantToStreamCallbacks = new Map();
const wantToListenCallbacks = new Map();
const webrtcClients = new Map();
function WebRTCClient(ownId, recipientId, clientRecipientId, node, { listener, streamer }) {
let active = false, peerConnection, onRemoteStreamCallback, onConnectCallback, onCloseCallback;
const start = (localStream) => {
active = true;
peerConnection = new RTCPeerConnection(config.peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.oniceconnectionstatechange = gotStateChange;
if (streamer) {
localStream.getTracks().forEach((track)=>{
peerConnection.addTrack(track, localStream);
});
peerConnection.createOffer().then(createdDescription).catch(errorHandler);
}
if (listener) {
peerConnection.ontrack = gotRemoteStream;
}
};
const createdDescription = (description) => {
peerConnection.setLocalDescription(description).then(function() {
node.webstrate.signal({
sdp: peerConnection.localDescription,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}).catch(errorHandler);
};
const handleMessage = (message) => {
if(!peerConnection) start();
if (message.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(message.sdp)).then(function() {
// Only create answers in response to offers
if(message.sdp.type == 'offer') {
peerConnection.createAnswer().then(createdDescription).catch(errorHandler);
}
}).catch(errorHandler);
} else if(message.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(message.ice)).catch(errorHandler);
}
};
const gotIceCandidate = (event) => {
if(event.candidate != null) {
node.webstrate.signal({
ice: event.candidate,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}
};
const gotStateChange = event => {
switch (peerConnection.iceConnectionState) {
case 'connected':
onConnectCallback && onConnectCallback(event);
break;
case 'closed':
case 'disconnected':
case 'failed':
onCloseCallback && onCloseCallback(event);
webrtcClients.delete(ownId);
break;
}
};
const gotRemoteStream = (event) => {
onRemoteStreamCallback(event.streams[0]);
};
const errorHandler = (...error) => {
console.error(...error);
};
return {
id: ownId, active, listener, streamer, peerConnection,
onRemoteStream: callback => onRemoteStreamCallback = callback,
onConnect: callback => onConnectCallback = callback,
stub: {
isConnected: () => !!peerConnection &&
['checking', 'connected', 'completed'].includes(peerConnection.iceConnectionState),
close: () => peerConnection.close(),
onclose: callback => onCloseCallback = callback
},
start, handleMessage
};
}
function setupSignalStream(publicObject, eventObject) {
const wid = publicObject.id;
// Text nodes and transient elements won't have wids, meaning there's way for us to signal on
// them, and thus it'd be pointless to add a signaling method and event.
if (!wid) return;
// Check if we already setup singal streaming
if(publicObject.signalStream != null) {
return;
}
webrtcClients.set(wid, new Map());
wantToStreamCallbacks.set(wid, new Map());
wantToListenCallbacks.set(wid, new Map());
// A mapping from user callbacks to our internal callbacks: fn -> fn.
//const callbacks = new Map();
const node = coreUtils.getElementByWid(wid);
Object.defineProperty(publicObject, 'signalStream', {
value: (callback) => {
signaling.subscribe(wid);
const ownId = coreUtils.randomString();
wantToStreamCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId
});
},
writable: false
});
Object.defineProperty(publicObject, 'stopStreamSignal', {
value: (callback) => {
// Find the ownId that was generated when adding this callback.
const streamers = Array.from(wantToStreamCallbacks.get(wid).entries());
const [ownId, ] = streamers.find(([ownId, callback]) => callback === callback);
if (ownId) {
wantToStreamCallbacks.get(wid).delete(ownId);
}
// "But what if somebody else is still listening? Then we shouldn't unsubscribe". Worry not,
// the signaling module keeps track of how many people are actually listening and doesn't
// unsubcribe unless we're the last/only listener.
signaling.unsubscribe(wid);
},
writable: false
});
eventObject.createEvent('signalStream', {
addListener: (callback) => {
if (wantToListenCallbacks.get(wid).size === 0) {
signaling.subscribe(wid);
}
const ownId = coreUtils.randomString();
wantToListenCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
requestFor | {
const node = coreUtils.getElementByWid(wid);
if (message.requestForStreams) {
Array.from(wantToStreamCallbacks.get(wid).keys()).forEach(ownId => {
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId,
recipientId: message.senderId
}, senderClientId);
});
return;
}
// If we get a message from somebody wanting to stream, we ask all the people listening for
// streams. If they accept, we create the webrtcClient.
if (message.wantToStream) {
Array.from(wantToListenCallbacks.get(wid).values()).forEach(callback => {
const ownId = coreUtils.randomString(); | identifier_body |
signalStream.js | typeof message.__internal_webrtc === 'undefined' || senderClientId === clientId) {
return false;
}
// No reason to do this all now, so we use setImmdiate.
setImmediate(handleSignal, payload.id, senderClientId, message);
return true;
});
function | (wid, senderClientId, message) {
const node = coreUtils.getElementByWid(wid);
if (message.requestForStreams) {
Array.from(wantToStreamCallbacks.get(wid).keys()).forEach(ownId => {
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId,
recipientId: message.senderId
}, senderClientId);
});
return;
}
// If we get a message from somebody wanting to stream, we ask all the people listening for
// streams. If they accept, we create the webrtcClient.
if (message.wantToStream) {
Array.from(wantToListenCallbacks.get(wid).values()).forEach(callback => {
const ownId = coreUtils.randomString();
// Other clients may be responding to the same recipientId/stream. Therefore, the streamer
// can't create a WebRTCClient with the original recipientId as these would override each
// other in the webrtcClients map (the streamer would be creating two entries with the same
// recipientId). Instead, the streamer needs to generate a new recipientId for each listener
// to use, so the streamer can distinguish the clients from each other. However, it's
// difficult to communicate this back to the listeners. Therefore, the listener instead
// creates a new recipientId, telling the streamer "In the future, I'll address you with this
// id instead".
const newRecipientId = coreUtils.randomString();
const webrtcClient = new WebRTCClient(ownId, newRecipientId, senderClientId,
node, { listener: true });
webrtcClients.get(wid).set(ownId, webrtcClient);
// If the client hasn't connected after 30 seconds, we conclude that it won't happen and clean
// up the client we created.
setTimeout(() => {
if (!webrtcClient.stub.isConnected()) {
webrtcClients.get(wid).delete(ownId);
}
}, 30 * 1000);
callback(senderClientId, 'META_DATA_IS_DEPRECATED', clientAcceptCallback => {
webrtcClient.onRemoteStream(clientAcceptCallback);
node.webstrate.signal({
__internal_webrtc: true,
wantToListen: true,
recipientId: message.senderId,
newRecipientId,
senderId: ownId,
}, senderClientId);
return webrtcClient.stub;
});
});
return;
}
if (message.wantToListen) {
const callback = wantToStreamCallbacks.get(wid).get(message.recipientId);
if (callback) {
callback(senderClientId, (localStream, meta, onConnectCallback) => {
const webrtcClient = new WebRTCClient(message.newRecipientId, message.senderId,
senderClientId, node, { streamer: true });
webrtcClient.onConnect(onConnectCallback);
webrtcClients.get(wid).set(message.newRecipientId, webrtcClient);
webrtcClient.start(localStream);
return webrtcClient.stub;
});
}
return;
}
if (message.sdp || message.ice) {
const webrtcClient = webrtcClients.get(wid).get(message.recipientId);
if (webrtcClient) {
webrtcClient.handleMessage(message);
} else {
console.error('Got message for unknown recipient', message, webrtcClients);
}
return;
}
}
const wantToStreamCallbacks = new Map();
const wantToListenCallbacks = new Map();
const webrtcClients = new Map();
function WebRTCClient(ownId, recipientId, clientRecipientId, node, { listener, streamer }) {
let active = false, peerConnection, onRemoteStreamCallback, onConnectCallback, onCloseCallback;
const start = (localStream) => {
active = true;
peerConnection = new RTCPeerConnection(config.peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.oniceconnectionstatechange = gotStateChange;
if (streamer) {
localStream.getTracks().forEach((track)=>{
peerConnection.addTrack(track, localStream);
});
peerConnection.createOffer().then(createdDescription).catch(errorHandler);
}
if (listener) {
peerConnection.ontrack = gotRemoteStream;
}
};
const createdDescription = (description) => {
peerConnection.setLocalDescription(description).then(function() {
node.webstrate.signal({
sdp: peerConnection.localDescription,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}).catch(errorHandler);
};
const handleMessage = (message) => {
if(!peerConnection) start();
if (message.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(message.sdp)).then(function() {
// Only create answers in response to offers
if(message.sdp.type == 'offer') {
peerConnection.createAnswer().then(createdDescription).catch(errorHandler);
}
}).catch(errorHandler);
} else if(message.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(message.ice)).catch(errorHandler);
}
};
const gotIceCandidate = (event) => {
if(event.candidate != null) {
node.webstrate.signal({
ice: event.candidate,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}
};
const gotStateChange = event => {
switch (peerConnection.iceConnectionState) {
case 'connected':
onConnectCallback && onConnectCallback(event);
break;
case 'closed':
case 'disconnected':
case 'failed':
onCloseCallback && onCloseCallback(event);
webrtcClients.delete(ownId);
break;
}
};
const gotRemoteStream = (event) => {
onRemoteStreamCallback(event.streams[0]);
};
const errorHandler = (...error) => {
console.error(...error);
};
return {
id: ownId, active, listener, streamer, peerConnection,
onRemoteStream: callback => onRemoteStreamCallback = callback,
onConnect: callback => onConnectCallback = callback,
stub: {
isConnected: () => !!peerConnection &&
['checking', 'connected', 'completed'].includes(peerConnection.iceConnectionState),
close: () => peerConnection.close(),
onclose: callback => onCloseCallback = callback
},
start, handleMessage
};
}
function setupSignalStream(publicObject, eventObject) {
const wid = publicObject.id;
// Text nodes and transient elements won't have wids, meaning there's way for us to signal on
// them, and thus it'd be pointless to add a signaling method and event.
if (!wid) return;
// Check if we already setup singal streaming
if(publicObject.signalStream != null) {
return;
}
webrtcClients.set(wid, new Map());
wantToStreamCallbacks.set(wid, new Map());
wantToListenCallbacks.set(wid, new Map());
// A mapping from user callbacks to our internal callbacks: fn -> fn.
//const callbacks = new Map();
const node = coreUtils.getElementByWid(wid);
Object.defineProperty(publicObject, 'signalStream', {
value: (callback) => {
signaling.subscribe(wid);
const ownId = coreUtils.randomString();
wantToStreamCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId
});
},
writable: false
});
Object.defineProperty(publicObject, 'stopStreamSignal', {
value: (callback) => {
// Find the ownId that was generated when adding this callback.
const streamers = Array.from(wantToStreamCallbacks.get(wid).entries());
const [ownId, ] = streamers.find(([ownId, callback]) => callback === callback);
if (ownId) {
wantToStreamCallbacks.get(wid).delete(ownId);
}
// "But what if somebody else is still listening? Then we shouldn't unsubscribe". Worry not,
// the signaling module keeps track of how many people are actually listening and doesn't
// unsubcribe unless we're the last/only listener.
signaling.unsubscribe(wid);
},
writable: false
});
eventObject.createEvent('signalStream', {
addListener: (callback) => {
if (wantToListenCallbacks.get(wid).size === 0) {
signaling.subscribe(wid);
}
const ownId = coreUtils.randomString();
wantToListenCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
requestFor | handleSignal | identifier_name |
signalStream.js | typeof message.__internal_webrtc === 'undefined' || senderClientId === clientId) {
return false;
}
// No reason to do this all now, so we use setImmdiate.
setImmediate(handleSignal, payload.id, senderClientId, message);
return true;
});
function handleSignal(wid, senderClientId, message) {
const node = coreUtils.getElementByWid(wid);
if (message.requestForStreams) {
Array.from(wantToStreamCallbacks.get(wid).keys()).forEach(ownId => {
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId,
recipientId: message.senderId
}, senderClientId);
});
return;
}
// If we get a message from somebody wanting to stream, we ask all the people listening for
// streams. If they accept, we create the webrtcClient.
if (message.wantToStream) {
Array.from(wantToListenCallbacks.get(wid).values()).forEach(callback => {
const ownId = coreUtils.randomString();
// Other clients may be responding to the same recipientId/stream. Therefore, the streamer
// can't create a WebRTCClient with the original recipientId as these would override each
// other in the webrtcClients map (the streamer would be creating two entries with the same
// recipientId). Instead, the streamer needs to generate a new recipientId for each listener
// to use, so the streamer can distinguish the clients from each other. However, it's
// difficult to communicate this back to the listeners. Therefore, the listener instead
// creates a new recipientId, telling the streamer "In the future, I'll address you with this
// id instead".
const newRecipientId = coreUtils.randomString();
const webrtcClient = new WebRTCClient(ownId, newRecipientId, senderClientId,
node, { listener: true });
webrtcClients.get(wid).set(ownId, webrtcClient);
// If the client hasn't connected after 30 seconds, we conclude that it won't happen and clean
// up the client we created.
setTimeout(() => {
if (!webrtcClient.stub.isConnected()) {
webrtcClients.get(wid).delete(ownId);
}
}, 30 * 1000);
callback(senderClientId, 'META_DATA_IS_DEPRECATED', clientAcceptCallback => {
webrtcClient.onRemoteStream(clientAcceptCallback);
node.webstrate.signal({
__internal_webrtc: true,
wantToListen: true,
recipientId: message.senderId,
newRecipientId,
senderId: ownId,
}, senderClientId);
return webrtcClient.stub;
});
});
return;
}
if (message.wantToListen) {
const callback = wantToStreamCallbacks.get(wid).get(message.recipientId);
if (callback) {
callback(senderClientId, (localStream, meta, onConnectCallback) => {
const webrtcClient = new WebRTCClient(message.newRecipientId, message.senderId,
senderClientId, node, { streamer: true });
webrtcClient.onConnect(onConnectCallback);
webrtcClients.get(wid).set(message.newRecipientId, webrtcClient);
webrtcClient.start(localStream);
return webrtcClient.stub;
});
}
return;
}
if (message.sdp || message.ice) {
const webrtcClient = webrtcClients.get(wid).get(message.recipientId);
if (webrtcClient) {
webrtcClient.handleMessage(message);
} else {
console.error('Got message for unknown recipient', message, webrtcClients);
}
return;
}
}
const wantToStreamCallbacks = new Map();
const wantToListenCallbacks = new Map();
const webrtcClients = new Map();
function WebRTCClient(ownId, recipientId, clientRecipientId, node, { listener, streamer }) {
let active = false, peerConnection, onRemoteStreamCallback, onConnectCallback, onCloseCallback;
const start = (localStream) => {
active = true;
peerConnection = new RTCPeerConnection(config.peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.oniceconnectionstatechange = gotStateChange;
if (streamer) {
localStream.getTracks().forEach((track)=>{
peerConnection.addTrack(track, localStream);
});
peerConnection.createOffer().then(createdDescription).catch(errorHandler);
}
if (listener) {
peerConnection.ontrack = gotRemoteStream;
}
};
const createdDescription = (description) => {
peerConnection.setLocalDescription(description).then(function() {
node.webstrate.signal({
sdp: peerConnection.localDescription,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}).catch(errorHandler);
};
const handleMessage = (message) => {
if(!peerConnection) start();
if (message.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(message.sdp)).then(function() {
// Only create answers in response to offers
if(message.sdp.type == 'offer') {
peerConnection.createAnswer().then(createdDescription).catch(errorHandler);
}
}).catch(errorHandler);
} else if(message.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(message.ice)).catch(errorHandler);
}
};
const gotIceCandidate = (event) => {
if(event.candidate != null) {
node.webstrate.signal({
ice: event.candidate,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}
};
const gotStateChange = event => {
switch (peerConnection.iceConnectionState) {
case 'connected':
onConnectCallback && onConnectCallback(event);
break;
case 'closed':
case 'disconnected':
case 'failed':
onCloseCallback && onCloseCallback(event);
webrtcClients.delete(ownId);
break;
}
};
const gotRemoteStream = (event) => {
onRemoteStreamCallback(event.streams[0]);
};
const errorHandler = (...error) => {
console.error(...error);
};
return {
id: ownId, active, listener, streamer, peerConnection,
onRemoteStream: callback => onRemoteStreamCallback = callback,
onConnect: callback => onConnectCallback = callback,
stub: {
isConnected: () => !!peerConnection &&
['checking', 'connected', 'completed'].includes(peerConnection.iceConnectionState),
close: () => peerConnection.close(),
onclose: callback => onCloseCallback = callback
},
start, handleMessage
};
}
function setupSignalStream(publicObject, eventObject) {
const wid = publicObject.id;
// Text nodes and transient elements won't have wids, meaning there's way for us to signal on
// them, and thus it'd be pointless to add a signaling method and event.
if (!wid) return;
// Check if we already setup singal streaming
if(publicObject.signalStream != null) {
return;
}
webrtcClients.set(wid, new Map());
wantToStreamCallbacks.set(wid, new Map());
wantToListenCallbacks.set(wid, new Map());
// A mapping from user callbacks to our internal callbacks: fn -> fn.
//const callbacks = new Map();
const node = coreUtils.getElementByWid(wid);
Object.defineProperty(publicObject, 'signalStream', {
value: (callback) => {
signaling.subscribe(wid);
const ownId = coreUtils.randomString();
wantToStreamCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId
});
},
writable: false
});
Object.defineProperty(publicObject, 'stopStreamSignal', {
value: (callback) => {
// Find the ownId that was generated when adding this callback.
const streamers = Array.from(wantToStreamCallbacks.get(wid).entries());
const [ownId, ] = streamers.find(([ownId, callback]) => callback === callback);
if (ownId) {
wantToStreamCallbacks.get(wid).delete(ownId);
}
// "But what if somebody else is still listening? Then we shouldn't unsubscribe". Worry not,
// the signaling module keeps track of how many people are actually listening and doesn't
// unsubcribe unless we're the last/only listener.
signaling.unsubscribe(wid);
},
writable: false
});
eventObject.createEvent('signalStream', {
addListener: (callback) => {
if (wantToListenCallbacks.get(wid).size === 0) |
const ownId = coreUtils.randomString();
wantToListenCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
request | {
signaling.subscribe(wid);
} | conditional_block |
signalStream.js | typeof message.__internal_webrtc === 'undefined' || senderClientId === clientId) {
return false;
}
// No reason to do this all now, so we use setImmdiate.
setImmediate(handleSignal, payload.id, senderClientId, message);
return true;
});
function handleSignal(wid, senderClientId, message) {
const node = coreUtils.getElementByWid(wid);
if (message.requestForStreams) {
Array.from(wantToStreamCallbacks.get(wid).keys()).forEach(ownId => {
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId,
recipientId: message.senderId
}, senderClientId);
});
return;
}
// If we get a message from somebody wanting to stream, we ask all the people listening for
// streams. If they accept, we create the webrtcClient.
if (message.wantToStream) {
Array.from(wantToListenCallbacks.get(wid).values()).forEach(callback => {
const ownId = coreUtils.randomString();
// Other clients may be responding to the same recipientId/stream. Therefore, the streamer
// can't create a WebRTCClient with the original recipientId as these would override each
// other in the webrtcClients map (the streamer would be creating two entries with the same
// recipientId). Instead, the streamer needs to generate a new recipientId for each listener
// to use, so the streamer can distinguish the clients from each other. However, it's
// difficult to communicate this back to the listeners. Therefore, the listener instead
// creates a new recipientId, telling the streamer "In the future, I'll address you with this
// id instead".
const newRecipientId = coreUtils.randomString();
const webrtcClient = new WebRTCClient(ownId, newRecipientId, senderClientId,
node, { listener: true });
webrtcClients.get(wid).set(ownId, webrtcClient);
// If the client hasn't connected after 30 seconds, we conclude that it won't happen and clean
// up the client we created.
setTimeout(() => {
if (!webrtcClient.stub.isConnected()) {
webrtcClients.get(wid).delete(ownId);
}
}, 30 * 1000);
callback(senderClientId, 'META_DATA_IS_DEPRECATED', clientAcceptCallback => {
webrtcClient.onRemoteStream(clientAcceptCallback);
node.webstrate.signal({
__internal_webrtc: true,
wantToListen: true,
recipientId: message.senderId,
newRecipientId,
senderId: ownId,
}, senderClientId);
return webrtcClient.stub;
});
});
return;
}
if (message.wantToListen) {
const callback = wantToStreamCallbacks.get(wid).get(message.recipientId);
if (callback) {
callback(senderClientId, (localStream, meta, onConnectCallback) => {
const webrtcClient = new WebRTCClient(message.newRecipientId, message.senderId,
senderClientId, node, { streamer: true });
webrtcClient.onConnect(onConnectCallback);
webrtcClients.get(wid).set(message.newRecipientId, webrtcClient);
webrtcClient.start(localStream);
return webrtcClient.stub;
});
}
return;
}
if (message.sdp || message.ice) {
const webrtcClient = webrtcClients.get(wid).get(message.recipientId);
if (webrtcClient) {
webrtcClient.handleMessage(message);
} else {
console.error('Got message for unknown recipient', message, webrtcClients);
}
return;
}
}
const wantToStreamCallbacks = new Map();
const wantToListenCallbacks = new Map();
const webrtcClients = new Map();
function WebRTCClient(ownId, recipientId, clientRecipientId, node, { listener, streamer }) {
let active = false, peerConnection, onRemoteStreamCallback, onConnectCallback, onCloseCallback;
const start = (localStream) => {
active = true;
peerConnection = new RTCPeerConnection(config.peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.oniceconnectionstatechange = gotStateChange;
if (streamer) {
localStream.getTracks().forEach((track)=>{
peerConnection.addTrack(track, localStream);
});
peerConnection.createOffer().then(createdDescription).catch(errorHandler);
}
if (listener) {
peerConnection.ontrack = gotRemoteStream;
}
};
const createdDescription = (description) => {
peerConnection.setLocalDescription(description).then(function() {
node.webstrate.signal({
sdp: peerConnection.localDescription,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}).catch(errorHandler);
};
const handleMessage = (message) => {
if(!peerConnection) start();
if (message.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(message.sdp)).then(function() {
// Only create answers in response to offers
if(message.sdp.type == 'offer') {
peerConnection.createAnswer().then(createdDescription).catch(errorHandler);
}
}).catch(errorHandler);
} else if(message.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(message.ice)).catch(errorHandler);
} | if(event.candidate != null) {
node.webstrate.signal({
ice: event.candidate,
__internal_webrtc: true,
senderId: ownId,
recipientId
}, clientRecipientId);
}
};
const gotStateChange = event => {
switch (peerConnection.iceConnectionState) {
case 'connected':
onConnectCallback && onConnectCallback(event);
break;
case 'closed':
case 'disconnected':
case 'failed':
onCloseCallback && onCloseCallback(event);
webrtcClients.delete(ownId);
break;
}
};
const gotRemoteStream = (event) => {
onRemoteStreamCallback(event.streams[0]);
};
const errorHandler = (...error) => {
console.error(...error);
};
return {
id: ownId, active, listener, streamer, peerConnection,
onRemoteStream: callback => onRemoteStreamCallback = callback,
onConnect: callback => onConnectCallback = callback,
stub: {
isConnected: () => !!peerConnection &&
['checking', 'connected', 'completed'].includes(peerConnection.iceConnectionState),
close: () => peerConnection.close(),
onclose: callback => onCloseCallback = callback
},
start, handleMessage
};
}
function setupSignalStream(publicObject, eventObject) {
const wid = publicObject.id;
// Text nodes and transient elements won't have wids, meaning there's way for us to signal on
// them, and thus it'd be pointless to add a signaling method and event.
if (!wid) return;
// Check if we already setup singal streaming
if(publicObject.signalStream != null) {
return;
}
webrtcClients.set(wid, new Map());
wantToStreamCallbacks.set(wid, new Map());
wantToListenCallbacks.set(wid, new Map());
// A mapping from user callbacks to our internal callbacks: fn -> fn.
//const callbacks = new Map();
const node = coreUtils.getElementByWid(wid);
Object.defineProperty(publicObject, 'signalStream', {
value: (callback) => {
signaling.subscribe(wid);
const ownId = coreUtils.randomString();
wantToStreamCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
wantToStream: true,
senderId: ownId
});
},
writable: false
});
Object.defineProperty(publicObject, 'stopStreamSignal', {
value: (callback) => {
// Find the ownId that was generated when adding this callback.
const streamers = Array.from(wantToStreamCallbacks.get(wid).entries());
const [ownId, ] = streamers.find(([ownId, callback]) => callback === callback);
if (ownId) {
wantToStreamCallbacks.get(wid).delete(ownId);
}
// "But what if somebody else is still listening? Then we shouldn't unsubscribe". Worry not,
// the signaling module keeps track of how many people are actually listening and doesn't
// unsubcribe unless we're the last/only listener.
signaling.unsubscribe(wid);
},
writable: false
});
eventObject.createEvent('signalStream', {
addListener: (callback) => {
if (wantToListenCallbacks.get(wid).size === 0) {
signaling.subscribe(wid);
}
const ownId = coreUtils.randomString();
wantToListenCallbacks.get(wid).set(ownId, callback);
node.webstrate.signal({
__internal_webrtc: true,
requestForStreams | };
const gotIceCandidate = (event) => { | random_line_split |
gfsworkflow.py | 120', '126', '132', '138', '144', '150', '156', '162', '168']
# this is where the actual downloads happen. set the url, filepath, then download
subregions = {
'hispaniola': 'subregion=&leftlon=-75&rightlon=-68&toplat=20.5&bottomlat=17',
'centralamerica': 'subregion=&leftlon=-94.25&rightlon=-75.5&toplat=19.5&bottomlat=5.5',
}
for step in fc_steps:
url = 'https://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p25.pl?file=gfs.t' + time + 'z.pgrb2.0p25.f' + step + \
'&lev_surface=on&var_APCP=on&' + subregions[region] + '&dir=%2Fgfs.' + fc_date + '%2F' + time
fc_timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
file_timestep = fc_timestamp + datetime.timedelta(hours=int(step))
filename_timestep = datetime.datetime.strftime(file_timestep, "%Y%m%d%H")
filename = filename_timestep + '.grb'
logging.info('downloading the file ' + filename)
filepath = os.path.join(gribsdir, filename)
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except requests.HTTPError as e:
errorcode = e.response.status_code
logging.info('\nHTTPError ' + str(errorcode) + ' downloading ' + filename + ' from\n' + url)
if errorcode == 404:
logging.info('The file was not found on the server, trying an older forecast time')
elif errorcode == 500:
|
return False
logging.info('Finished Downloads')
return True
def gfs_tiffs(threddspath, wrksppath, timestamp, region, model):
"""
Script to combine 6-hr accumulation grib files into 24-hr accumulation geotiffs.
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nStarting to process the ' + model + ' gribs into GeoTIFFs')
# declare the environment
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
gribs = os.path.join(threddspath, region, model, timestamp, 'gribs')
netcdfs = os.path.join(threddspath, region, model, timestamp, 'netcdfs')
# if you already have gfs netcdfs in the netcdfs folder, quit the function
if not os.path.exists(gribs):
logging.info('There is no gribs folder, you must have already run this step. Skipping conversions')
return
# otherwise, remove anything in the folder before starting (in case there was a partial conversion)
else:
shutil.rmtree(netcdfs)
os.mkdir(netcdfs)
os.chmod(netcdfs, 0o777)
shutil.rmtree(tiffs)
os.mkdir(tiffs)
os.chmod(tiffs, 0o777)
# create a list of all the files of type grib and convert to a list of their file paths
files = os.listdir(gribs)
files = [grib for grib in files if grib.endswith('.grb')]
files.sort()
# Read raster dimensions only once to apply to all rasters
path = os.path.join(gribs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each 24-hr raster (east, south, west, north, width, height)
geotransform = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width, height)
# Add rasters together to form 24-hr raster
for i in files:
logging.info('working on file ' + i)
path = os.path.join(gribs, i)
src = rasterio.open(path)
file_array = src.read(1)
# using the last grib file for the day (path) convert it to a netcdf and set the variable to file_array
logging.info('opening grib file ' + path)
obj = xarray.open_dataset(path, engine='cfgrib', backend_kwargs={'filter_by_keys': {'typeOfLevel': 'surface'}})
logging.info('converting it to a netcdf')
ncname = i.replace('.grb', '.nc')
logging.info('saving it to the path ' + path)
ncpath = os.path.join(netcdfs, ncname)
obj.to_netcdf(ncpath, mode='w')
logging.info('converted')
logging.info('writing the correct values to the tp array')
nc = netCDF4.Dataset(ncpath, 'a')
nc['tp'][:] = file_array
nc.close()
logging.info('created a netcdf')
# Specify the GeoTIFF filepath
tif_filename = i.replace('grb', 'tif')
tif_filepath = os.path.join(tiffs, tif_filename)
# Save the 24-hr raster
with rasterio.open(
tif_filepath,
'w',
driver='GTiff',
height=file_array.shape[0],
width=file_array.shape[1],
count=1,
dtype=file_array.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform,
) as dst:
dst.write(file_array, 1)
logging.info('wrote it to a GeoTIFF\n')
# clear the gribs folder now that we're done with this
shutil.rmtree(gribs)
return
def resample(wrksppath, region, model):
"""
Script to resample rasters from .25 o .0025 degree in order for rasterstats to work
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nResampling the rasters for ' + region)
# Define app workspace and sub-paths
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')
# Create directory for the resampled GeoTIFFs
if not os.path.exists(tiffs):
logging.info('There is no tiffs folder. You must have already resampled them. Skipping resampling')
return
# List all Resampled GeoTIFFs
files = os.listdir(tiffs)
files = [tif for tif in files if tif.endswith('.tif')]
files.sort()
# Read raster dimensions
path = os.path.join(tiffs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each resampled raster (east, south, west, north, width, height)
geotransform_res = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * 100, height * 100)
# Resample each GeoTIFF
for file in files:
path = os.path.join(tiffs, file)
logging.info(path)
with rasterio.open(path) as dataset:
data = dataset.read(
out_shape=(int(dataset.height * 100), int(dataset.width * 100)),
# Reduce 100 to 10 if using the whole globe
resampling=Resampling.nearest
)
# Convert new resampled array from 3D to 2D
data = numpy.squeeze(data, axis=0)
# Specify the filepath of the resampled raster
resample_filename = file.replace('.tif', '_resampled.tif')
resample_filepath = os.path.join(resampleds, resample_filename)
# Save the GeoTIFF
with rasterio.open(
resample_filepath,
'w',
driver='GTiff',
height=data.shape[0],
width=data.shape[1],
count=1,
dtype=data.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform_res,
) as dst:
dst.write(data, 1)
# delete the non-resampled tiffs now that we | logging.info('Probably a problem with the URL. Check the log and try the link') | conditional_block |
gfsworkflow.py | fc_date = datetime.datetime.strptime(timestamp, "%Y%m%d%H").strftime("%Y%m%d")
# This is the List of forecast timesteps for 5 days (6-hr increments). download them all
fc_steps = ['006', '012', '018', '024', '030', '036', '042', '048', '054', '060', '066', '072', '078', '084',
'090', '096', '102', '108', '114', '120', '126', '132', '138', '144', '150', '156', '162', '168']
# this is where the actual downloads happen. set the url, filepath, then download
subregions = {
'hispaniola': 'subregion=&leftlon=-75&rightlon=-68&toplat=20.5&bottomlat=17',
'centralamerica': 'subregion=&leftlon=-94.25&rightlon=-75.5&toplat=19.5&bottomlat=5.5',
}
for step in fc_steps:
url = 'https://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p25.pl?file=gfs.t' + time + 'z.pgrb2.0p25.f' + step + \
'&lev_surface=on&var_APCP=on&' + subregions[region] + '&dir=%2Fgfs.' + fc_date + '%2F' + time
fc_timestamp = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
file_timestep = fc_timestamp + datetime.timedelta(hours=int(step))
filename_timestep = datetime.datetime.strftime(file_timestep, "%Y%m%d%H")
filename = filename_timestep + '.grb'
logging.info('downloading the file ' + filename)
filepath = os.path.join(gribsdir, filename)
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(filepath, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except requests.HTTPError as e:
errorcode = e.response.status_code
logging.info('\nHTTPError ' + str(errorcode) + ' downloading ' + filename + ' from\n' + url)
if errorcode == 404:
logging.info('The file was not found on the server, trying an older forecast time')
elif errorcode == 500:
logging.info('Probably a problem with the URL. Check the log and try the link')
return False
logging.info('Finished Downloads')
return True
def gfs_tiffs(threddspath, wrksppath, timestamp, region, model):
"""
Script to combine 6-hr accumulation grib files into 24-hr accumulation geotiffs.
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nStarting to process the ' + model + ' gribs into GeoTIFFs')
# declare the environment
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
gribs = os.path.join(threddspath, region, model, timestamp, 'gribs')
netcdfs = os.path.join(threddspath, region, model, timestamp, 'netcdfs')
# if you already have gfs netcdfs in the netcdfs folder, quit the function
if not os.path.exists(gribs):
logging.info('There is no gribs folder, you must have already run this step. Skipping conversions')
return
# otherwise, remove anything in the folder before starting (in case there was a partial conversion)
else:
shutil.rmtree(netcdfs)
os.mkdir(netcdfs)
os.chmod(netcdfs, 0o777)
shutil.rmtree(tiffs)
os.mkdir(tiffs)
os.chmod(tiffs, 0o777)
# create a list of all the files of type grib and convert to a list of their file paths
files = os.listdir(gribs)
files = [grib for grib in files if grib.endswith('.grb')]
files.sort()
# Read raster dimensions only once to apply to all rasters
path = os.path.join(gribs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each 24-hr raster (east, south, west, north, width, height)
geotransform = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width, height)
# Add rasters together to form 24-hr raster
for i in files:
logging.info('working on file ' + i)
path = os.path.join(gribs, i)
src = rasterio.open(path)
file_array = src.read(1)
# using the last grib file for the day (path) convert it to a netcdf and set the variable to file_array
logging.info('opening grib file ' + path)
obj = xarray.open_dataset(path, engine='cfgrib', backend_kwargs={'filter_by_keys': {'typeOfLevel': 'surface'}})
logging.info('converting it to a netcdf')
ncname = i.replace('.grb', '.nc')
logging.info('saving it to the path ' + path)
ncpath = os.path.join(netcdfs, ncname)
obj.to_netcdf(ncpath, mode='w')
logging.info('converted')
logging.info('writing the correct values to the tp array')
nc = netCDF4.Dataset(ncpath, 'a')
nc['tp'][:] = file_array
nc.close()
logging.info('created a netcdf')
# Specify the GeoTIFF filepath
tif_filename = i.replace('grb', 'tif')
tif_filepath = os.path.join(tiffs, tif_filename)
# Save the 24-hr raster
with rasterio.open(
tif_filepath,
'w',
driver='GTiff',
height=file_array.shape[0],
width=file_array.shape[1],
count=1,
dtype=file_array.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform,
) as dst:
dst.write(file_array, 1)
logging.info('wrote it to a GeoTIFF\n')
# clear the gribs folder now that we're done with this
shutil.rmtree(gribs)
return
def resample(wrksppath, region, model):
"""
Script to resample rasters from .25 o .0025 degree in order for rasterstats to work
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nResampling the rasters for ' + region)
# Define app workspace and sub-paths
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')
# Create directory for the resampled GeoTIFFs
if not os.path.exists(tiffs):
logging.info('There is no tiffs folder. You must have already resampled them. Skipping resampling')
return
# List all Resampled GeoTIFFs
files = os.listdir(tiffs)
files = [tif for tif in files if tif.endswith('.tif')]
files.sort()
# Read raster dimensions
path = os.path.join(tiffs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each resampled raster (east, south, west, north, width, height)
geotransform_res = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * 100, height * 100)
# Resample each GeoTIFF
for file in files:
path = os.path.join(tiffs, file)
logging.info(path)
with rasterio.open(path) as dataset:
data = dataset.read(
out_shape=(int(dataset.height * 100), int(dataset.width * 100)),
# Reduce 100 to 10 if using the whole globe
resampling=Resampling.nearest
)
# Convert new resampled array from | time = datetime.datetime.strptime(timestamp, "%Y%m%d%H").strftime("%H") | random_line_split |
|
gfsworkflow.py | datetime, os, numpy, rasterio
"""
logging.info('\nStarting to process the ' + model + ' gribs into GeoTIFFs')
# declare the environment
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
gribs = os.path.join(threddspath, region, model, timestamp, 'gribs')
netcdfs = os.path.join(threddspath, region, model, timestamp, 'netcdfs')
# if you already have gfs netcdfs in the netcdfs folder, quit the function
if not os.path.exists(gribs):
logging.info('There is no gribs folder, you must have already run this step. Skipping conversions')
return
# otherwise, remove anything in the folder before starting (in case there was a partial conversion)
else:
shutil.rmtree(netcdfs)
os.mkdir(netcdfs)
os.chmod(netcdfs, 0o777)
shutil.rmtree(tiffs)
os.mkdir(tiffs)
os.chmod(tiffs, 0o777)
# create a list of all the files of type grib and convert to a list of their file paths
files = os.listdir(gribs)
files = [grib for grib in files if grib.endswith('.grb')]
files.sort()
# Read raster dimensions only once to apply to all rasters
path = os.path.join(gribs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each 24-hr raster (east, south, west, north, width, height)
geotransform = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width, height)
# Add rasters together to form 24-hr raster
for i in files:
logging.info('working on file ' + i)
path = os.path.join(gribs, i)
src = rasterio.open(path)
file_array = src.read(1)
# using the last grib file for the day (path) convert it to a netcdf and set the variable to file_array
logging.info('opening grib file ' + path)
obj = xarray.open_dataset(path, engine='cfgrib', backend_kwargs={'filter_by_keys': {'typeOfLevel': 'surface'}})
logging.info('converting it to a netcdf')
ncname = i.replace('.grb', '.nc')
logging.info('saving it to the path ' + path)
ncpath = os.path.join(netcdfs, ncname)
obj.to_netcdf(ncpath, mode='w')
logging.info('converted')
logging.info('writing the correct values to the tp array')
nc = netCDF4.Dataset(ncpath, 'a')
nc['tp'][:] = file_array
nc.close()
logging.info('created a netcdf')
# Specify the GeoTIFF filepath
tif_filename = i.replace('grb', 'tif')
tif_filepath = os.path.join(tiffs, tif_filename)
# Save the 24-hr raster
with rasterio.open(
tif_filepath,
'w',
driver='GTiff',
height=file_array.shape[0],
width=file_array.shape[1],
count=1,
dtype=file_array.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform,
) as dst:
dst.write(file_array, 1)
logging.info('wrote it to a GeoTIFF\n')
# clear the gribs folder now that we're done with this
shutil.rmtree(gribs)
return
def resample(wrksppath, region, model):
"""
Script to resample rasters from .25 o .0025 degree in order for rasterstats to work
Dependencies: datetime, os, numpy, rasterio
"""
logging.info('\nResampling the rasters for ' + region)
# Define app workspace and sub-paths
tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')
resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')
# Create directory for the resampled GeoTIFFs
if not os.path.exists(tiffs):
logging.info('There is no tiffs folder. You must have already resampled them. Skipping resampling')
return
# List all Resampled GeoTIFFs
files = os.listdir(tiffs)
files = [tif for tif in files if tif.endswith('.tif')]
files.sort()
# Read raster dimensions
path = os.path.join(tiffs, files[0])
raster_dim = rasterio.open(path)
width = raster_dim.width
height = raster_dim.height
lon_min = raster_dim.bounds.left
lon_max = raster_dim.bounds.right
lat_min = raster_dim.bounds.bottom
lat_max = raster_dim.bounds.top
# Geotransform for each resampled raster (east, south, west, north, width, height)
geotransform_res = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * 100, height * 100)
# Resample each GeoTIFF
for file in files:
path = os.path.join(tiffs, file)
logging.info(path)
with rasterio.open(path) as dataset:
data = dataset.read(
out_shape=(int(dataset.height * 100), int(dataset.width * 100)),
# Reduce 100 to 10 if using the whole globe
resampling=Resampling.nearest
)
# Convert new resampled array from 3D to 2D
data = numpy.squeeze(data, axis=0)
# Specify the filepath of the resampled raster
resample_filename = file.replace('.tif', '_resampled.tif')
resample_filepath = os.path.join(resampleds, resample_filename)
# Save the GeoTIFF
with rasterio.open(
resample_filepath,
'w',
driver='GTiff',
height=data.shape[0],
width=data.shape[1],
count=1,
dtype=data.dtype,
nodata=numpy.nan,
crs='+proj=latlong',
transform=geotransform_res,
) as dst:
dst.write(data, 1)
# delete the non-resampled tiffs now that we dont need them
shutil.rmtree(tiffs)
return
def zonal_statistics(wrksppath, timestamp, region, model):
"""
Script to calculate average precip over FFGS polygon shapefile
Dependencies: datetime, os, pandas, rasterstats
"""
logging.info('\nDoing Zonal Statistics on ' + region)
# Define app workspace and sub-paths
resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')
shp_path = os.path.join(wrksppath, region, 'shapefiles', 'ffgs_' + region + '.shp')
stat_file = os.path.join(wrksppath, region, model + 'results.csv')
# check that there are resampled tiffs to do zonal statistics on
if not os.path.exists(resampleds):
logging.info('There are no resampled tiffs to do zonal statistics on. Skipping Zonal Statistics')
return
# List all Resampled GeoTIFFs
files = os.listdir(resampleds)
files = [tif for tif in files if tif.endswith('.tif')]
files.sort()
# do zonal statistics for each resampled tiff file and put it in the stats dataframe
stats_df = pd.DataFrame()
for i in range(len(files)):
logging.info('starting zonal statistics for ' + files[i])
ras_path = os.path.join(resampleds, files[i])
stats = rasterstats.zonal_stats(
shp_path,
ras_path,
stats=['count', 'max', 'mean'],
geojson_out=True
)
timestep = files[i][:10]
# for each stat that you get out, write it to the dataframe
logging.info('writing the statistics for this file to the dataframe')
for j in range(len(stats)):
temp_data = stats[j]['properties']
temp_data.update({'Forecast Timestamp': timestamp})
temp_data.update({'Timestep': timestep})
temp_df = pd.DataFrame([temp_data])
stats_df = stats_df.append(temp_df, ignore_index=True)
# write the resulting dataframe to a csv
logging.info('\ndone with zonal statistics, rounding values, writing to a csv file')
stats_df = stats_df.round({'max': 1, 'mean': 1})
stats_df.to_csv(stat_file, index=False)
# delete the resampled tiffs now that we dont need them
logging.info('deleting the resampled tiffs directory')
shutil.rmtree(resampleds)
return
def | nc_georeference | identifier_name |
|
gfsworkflow.py | logging.info('\ndone with zonal statistics, rounding values, writing to a csv file')
stats_df = stats_df.round({'max': 1, 'mean': 1})
stats_df.to_csv(stat_file, index=False)
# delete the resampled tiffs now that we dont need them
logging.info('deleting the resampled tiffs directory')
shutil.rmtree(resampleds)
return
def nc_georeference(threddspath, timestamp, region, model):
"""
Description: Intended to make a THREDDS data server compatible netcdf file out of an incorrectly structured
netcdf file.
Author: Riley Hales, 2019
Dependencies: netCDF4, os, datetime
see github/rileyhales/datatools for more details
"""
logging.info('\nProcessing the netCDF files')
# setting the environment file paths
netcdfs = os.path.join(threddspath, region, model, timestamp, 'netcdfs')
processed = os.path.join(threddspath, region, model, timestamp, 'processed')
# if you already have processed netcdfs files, skip this and quit the function
if not os.path.exists(netcdfs):
logging.info('There are no netcdfs to be converted. Skipping netcdf processing.')
return
# otherwise, remove anything in the folder before starting (in case there was a partial processing)
else:
shutil.rmtree(processed)
os.mkdir(processed)
os.chmod(processed, 0o777)
# list the files that need to be converted
net_files = os.listdir(netcdfs)
files = [file for file in net_files if file.endswith('.nc')]
logging.info('There are ' + str(len(files)) + ' compatible files.')
# read the first file that we'll copy data from in the next blocks of code
logging.info('Preparing the reference file')
path = os.path.join(netcdfs, net_files[0])
netcdf_obj = netCDF4.Dataset(path, 'r', clobber=False, diskless=True)
# get a dictionary of the dimensions and their size and rename the north/south and east/west ones
dimensions = {}
for dimension in netcdf_obj.dimensions.keys():
dimensions[dimension] = netcdf_obj.dimensions[dimension].size
dimensions['lat'] = dimensions['latitude']
dimensions['lon'] = dimensions['longitude']
dimensions['time'] = 1
del dimensions['latitude'], dimensions['longitude']
# get a list of the variables and remove the one's i'm going to 'manually' correct
variables = netcdf_obj.variables
del variables['valid_time'], variables['step'], variables['latitude'], variables['longitude'], variables['surface']
variables = variables.keys()
# min lat and lon and the interval between values (these are static values
netcdf_obj.close()
# this is where the files start getting copied
for file in files:
logging.info('Working on file ' + str(file))
openpath = os.path.join(netcdfs, file)
savepath = os.path.join(processed, 'processed_' + file)
# open the file to be copied
original = netCDF4.Dataset(openpath, 'r', clobber=False, diskless=True)
duplicate = netCDF4.Dataset(savepath, 'w', clobber=True, format='NETCDF4', diskless=False)
# set the global netcdf attributes - important for georeferencing
duplicate.setncatts(original.__dict__)
# specify dimensions from what we copied before
for dimension in dimensions:
duplicate.createDimension(dimension, dimensions[dimension])
# 'Manually' create the dimensions that need to be set carefully
duplicate.createVariable(varname='lat', datatype='f4', dimensions='lat')
duplicate.createVariable(varname='lon', datatype='f4', dimensions='lon')
# create the lat and lon values as a 1D array
duplicate['lat'][:] = original['latitude'][:]
duplicate['lon'][:] = original['longitude'][:]
# set the attributes for lat and lon (except fill value, you just can't copy it)
for attr in original['latitude'].__dict__:
if attr != "_FillValue":
duplicate['lat'].setncattr(attr, original['latitude'].__dict__[attr])
for attr in original['longitude'].__dict__:
if attr != "_FillValue":
duplicate['lon'].setncattr(attr, original['longitude'].__dict__[attr])
# copy the rest of the variables
hour = 6
for variable in variables:
# check to use the lat/lon dimension names
dimension = original[variable].dimensions
if 'latitude' in dimension:
dimension = list(dimension)
dimension.remove('latitude')
dimension.append('lat')
dimension = tuple(dimension)
if 'longitude' in dimension:
dimension = list(dimension)
dimension.remove('longitude')
dimension.append('lon')
dimension = tuple(dimension)
if len(dimension) == 2:
dimension = ('time', 'lat', 'lon')
if variable == 'time':
dimension = ('time',)
# create the variable
duplicate.createVariable(varname=variable, datatype='f4', dimensions=dimension)
# copy the arrays of data and set the timestamp/properties
date = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
date = datetime.datetime.strftime(date, "%Y-%m-%d %H:00:00")
if variable == 'time':
duplicate[variable][:] = [hour]
hour = hour + 6
duplicate[variable].long_name = original[variable].long_name
duplicate[variable].units = "hours since " + date
duplicate[variable].axis = "T"
# also set the begin date of this data
duplicate[variable].begin_date = timestamp
if variable == 'lat':
duplicate[variable][:] = original[variable][:]
duplicate[variable].axis = "Y"
if variable == 'lon':
duplicate[variable][:] = original[variable][:]
duplicate[variable].axis = "X"
else:
duplicate[variable][:] = original[variable][:]
duplicate[variable].axis = "lat lon"
duplicate[variable].long_name = original[variable].long_name
duplicate[variable].begin_date = timestamp
duplicate[variable].units = original[variable].units
# close the files, delete the one you just did, start again
original.close()
duplicate.sync()
duplicate.close()
# delete the netcdfs now that we're done with them triggering future runs to skip this step
shutil.rmtree(netcdfs)
logging.info('Finished File Conversions')
return
def new_ncml(threddspath, timestamp, region, model):
logging.info('\nWriting a new ncml file for this date')
# create a new ncml file by filling in the template with the right dates and writing to a file
ncml = os.path.join(threddspath, region, model, 'wms.ncml')
date = datetime.datetime.strptime(timestamp, "%Y%m%d%H")
date = datetime.datetime.strftime(date, "%Y-%m-%d %H:00:00")
with open(ncml, 'w') as file:
file.write(
'<netcdf xmlns="http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2">\n'
' <variable name="time" type="int" shape="time">\n'
' <attribute name="units" value="hours since ' + date + '"/>\n'
' <attribute name="_CoordinateAxisType" value="Time" />\n'
' <values start="6" increment="6" />\n'
' </variable>\n'
' <aggregation dimName="time" type="joinExisting" recheckEvery="1 hour">\n'
' <scan location="' + timestamp + '/processed/"/>\n'
' </aggregation>\n'
'</netcdf>'
)
logging.info('Wrote New .ncml')
return
def new_colorscales(wrksppath, region, model):
# set the environment
| logging.info('\nGenerating a new color scale csv for the ' + model + ' results')
colorscales = os.path.join(wrksppath, region, model + 'colorscales.csv')
results = os.path.join(wrksppath, region, model + 'results.csv')
logging.info(results)
answers = pd.DataFrame(columns=['cat_id', 'cum_mean', 'mean', 'max'])
res_df = pd.read_csv(results, index_col=False)[['cat_id', 'mean', 'max']]
ids = res_df.cat_id.unique()
for catid in ids:
df = res_df.query("cat_id == @catid")
cum_mean = round(sum(df['mean'].values), 1)
mean = max(df['mean'].values)
maximum = max(df['max'].values)
answers = answers.append(
{'cat_id': catid, 'cum_mean': cum_mean, 'mean': mean, 'max': maximum},
ignore_index=True)
answers.to_csv(colorscales, mode='w', index=False)
logging.info('Wrote new rules to csv')
return | identifier_body |
|
frontend.rs | pub enum Authentication {
Failed,
InProgress,
Authenticated(String),
}
struct AccessTokenParameter<'a> {
valid: bool,
client_id: Option<Cow<'a, str>>,
redirect_url: Option<Cow<'a, str>>,
grant_type: Option<Cow<'a, str>>,
code: Option<Cow<'a, str>>,
authorization: Option<(String, Vec<u8>)>,
}
struct GuardParameter<'a> {
valid: bool,
token: Option<Cow<'a, str>>,
}
/// Abstraction of web requests with several different abstractions and constructors needed by this
/// frontend. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error: From<OAuthError>;
type Response: WebResponse<Error=Self::Error>;
/// Retrieve a parsed version of the url query. An Err return value indicates a malformed query
/// or an otherwise malformed WebRequest. Note that an empty query should result in
/// `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<HashMap<String, Vec<String>>, ()>;
/// Retriev the parsed `application/x-form-urlencoded` body of the request. An Err value
/// indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<&HashMap<String, Vec<String>>, ()>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, ()>;
}
/// Response representation into which the Request is transformed by the code_grant types.
pub trait WebResponse where Self: Sized {
/// The error generated when trying to construct an unhandled or invalid response.
type Error: From<OAuthError>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(url: Url) -> Result<Self, Self::Error>;
/// A pure text response with no special media type set.
fn text(text: &str) -> Result<Self, Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn json(data: &str) -> Result<Self, Self::Error>;
/// Construct a redirect for the error. Here the response may choose to augment the error with
/// additional information (such as help websites, description strings), hence the default
/// implementation which does not do any of that.
fn redirect_error(target: ErrorUrl) -> Result<Self, Self::Error> {
Self::redirect(target.into())
}
/// Set the response status to 400
fn as_client_error(self) -> Result<Self, Self::Error>;
/// Set the response status to 401
fn as_unauthorized(self) -> Result<Self, Self::Error>;
/// Add an Authorization header
fn with_authorization(self, kind: &str) -> Result<Self, Self::Error>;
}
pub trait OwnerAuthorizer {
type Request: WebRequest;
fn get_owner_authorization(&self, &mut Self::Request, &PreGrant)
-> Result<(Authentication, <Self::Request as WebRequest>::Response), <Self::Request as WebRequest>::Error>;
}
pub struct AuthorizationFlow;
pub struct PreparedAuthorization<'l, Req> where
Req: WebRequest + 'l,
{
request: &'l mut Req,
urldecoded: AuthorizationParameter<'l>,
}
fn extract_parameters(params: HashMap<String, Vec<String>>) -> AuthorizationParameter<'static> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<&str, &str>>();
AuthorizationParameter{
valid: true,
method: map.get("response_type").map(|method| method.to_string().into()),
client_id: map.get("client_id").map(|client| client.to_string().into()),
scope: map.get("scope").map(|scope| scope.to_string().into()),
redirect_url: map.get("redirect_url").map(|url| url.to_string().into()),
state: map.get("state").map(|state| state.to_string().into()),
}
}
impl<'s> CodeRequest for AuthorizationParameter<'s> {
fn valid(&self) -> bool { self.valid }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.as_ref().map(|c| c.as_ref().into()) }
fn scope(&self) -> Option<Cow<str>> { self.scope.as_ref().map(|c| c.as_ref().into()) }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.as_ref().map(|c| c.as_ref().into()) }
fn state(&self) -> Option<Cow<str>> { self.state.as_ref().map(|c| c.as_ref().into()) }
fn method(&self) -> Option<Cow<str>> { self.method.as_ref().map(|c| c.as_ref().into()) }
}
impl<'s> AuthorizationParameter<'s> {
fn invalid() -> Self {
AuthorizationParameter { valid: false, method: None, client_id: None, scope: None,
redirect_url: None, state: None }
}
}
impl AuthorizationFlow {
/// Idempotent data processing, checks formats.
pub fn prepare<W: WebRequest>(incoming: &mut W) -> Result<PreparedAuthorization<W>, W::Error> {
let urldecoded = incoming.query()
.map(extract_parameters)
.unwrap_or_else(|_| AuthorizationParameter::invalid());
Ok(PreparedAuthorization{request: incoming, urldecoded})
}
pub fn handle<'c, Req>(granter: CodeRef<'c>, prepared: PreparedAuthorization<'c, Req>, page_handler: &OwnerAuthorizer<Request=Req>)
-> Result<Req::Response, Req::Error> where
Req: WebRequest,
{
let PreparedAuthorization { request: req, urldecoded } = prepared;
let negotiated = match granter.negotiate(&urldecoded) {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
let authorization = match page_handler.get_owner_authorization(req, negotiated.pre_grant())? {
(Authentication::Failed, _)
=> negotiated.deny(),
(Authentication::InProgress, response)
=> return Ok(response),
(Authentication::Authenticated(owner), _)
=> negotiated.authorize(owner.into()),
};
let redirect_to = match authorization {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
Req::Response::redirect(redirect_to)
}
}
pub struct GrantFlow;
pub struct PreparedGrant<'l, Req> where
Req: WebRequest + 'l,
{
params: AccessTokenParameter<'l>,
req: PhantomData<Req>,
}
fn extract_access_token<'l>(params: &'l HashMap<String, Vec<String>>) -> AccessTokenParameter<'l> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<_, _>>();
AccessTokenParameter {
valid: true,
client_id: map.get("client_id").map(|v| (*v).into()),
code: map.get("code").map(|v| (*v).into()),
redirect_url: map.get("redirect_url").map(|v| (*v).into()),
grant_type: map.get("grant_type").map(|v| (*v).into()),
authorization: None,
}
}
impl<'l> AccessTokenRequest for AccessTokenParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn code(&self) -> Option<Cow<str>> { self.code.clone() }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.clone() }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.clone() }
fn grant_type(&self) -> Option<Cow<str>> { self.grant_type.clone() }
fn authorization(&self) -> Option<(Cow<str>, Cow<[u8]>)> {
match self.authorization {
None => None,
Some((ref id, ref pass))
=> Some((id.as_str().into(), pass.as_slice().into())),
}
}
}
impl<'l> AccessTokenParameter<'l> {
fn invalid() -> Self {
AccessTokenParameter { valid: false, code: None, client_id: None, redirect_url: None,
grant_type: None, authorization: None }
}
}
impl GrantFlow {
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedGrant<W>, W::Error> {
let params = GrantFlow::create_valid_params(req)
.unwrap_or(AccessTokenParameter::invalid());
Ok(PreparedGrant { params: params, req: PhantomData })
}
| #[derive(Clone)] | random_line_split |
|
frontend.rs | <'a, str>>,
authorization: Option<(String, Vec<u8>)>,
}
struct GuardParameter<'a> {
valid: bool,
token: Option<Cow<'a, str>>,
}
/// Abstraction of web requests with several different abstractions and constructors needed by this
/// frontend. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error: From<OAuthError>;
type Response: WebResponse<Error=Self::Error>;
/// Retrieve a parsed version of the url query. An Err return value indicates a malformed query
/// or an otherwise malformed WebRequest. Note that an empty query should result in
/// `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<HashMap<String, Vec<String>>, ()>;
/// Retriev the parsed `application/x-form-urlencoded` body of the request. An Err value
/// indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<&HashMap<String, Vec<String>>, ()>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, ()>;
}
/// Response representation into which the Request is transformed by the code_grant types.
pub trait WebResponse where Self: Sized {
/// The error generated when trying to construct an unhandled or invalid response.
type Error: From<OAuthError>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(url: Url) -> Result<Self, Self::Error>;
/// A pure text response with no special media type set.
fn text(text: &str) -> Result<Self, Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn json(data: &str) -> Result<Self, Self::Error>;
/// Construct a redirect for the error. Here the response may choose to augment the error with
/// additional information (such as help websites, description strings), hence the default
/// implementation which does not do any of that.
fn redirect_error(target: ErrorUrl) -> Result<Self, Self::Error> {
Self::redirect(target.into())
}
/// Set the response status to 400
fn as_client_error(self) -> Result<Self, Self::Error>;
/// Set the response status to 401
fn as_unauthorized(self) -> Result<Self, Self::Error>;
/// Add an Authorization header
fn with_authorization(self, kind: &str) -> Result<Self, Self::Error>;
}
pub trait OwnerAuthorizer {
type Request: WebRequest;
fn get_owner_authorization(&self, &mut Self::Request, &PreGrant)
-> Result<(Authentication, <Self::Request as WebRequest>::Response), <Self::Request as WebRequest>::Error>;
}
pub struct AuthorizationFlow;
pub struct PreparedAuthorization<'l, Req> where
Req: WebRequest + 'l,
{
request: &'l mut Req,
urldecoded: AuthorizationParameter<'l>,
}
fn extract_parameters(params: HashMap<String, Vec<String>>) -> AuthorizationParameter<'static> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<&str, &str>>();
AuthorizationParameter{
valid: true,
method: map.get("response_type").map(|method| method.to_string().into()),
client_id: map.get("client_id").map(|client| client.to_string().into()),
scope: map.get("scope").map(|scope| scope.to_string().into()),
redirect_url: map.get("redirect_url").map(|url| url.to_string().into()),
state: map.get("state").map(|state| state.to_string().into()),
}
}
impl<'s> CodeRequest for AuthorizationParameter<'s> {
fn valid(&self) -> bool { self.valid }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.as_ref().map(|c| c.as_ref().into()) }
fn scope(&self) -> Option<Cow<str>> { self.scope.as_ref().map(|c| c.as_ref().into()) }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.as_ref().map(|c| c.as_ref().into()) }
fn state(&self) -> Option<Cow<str>> { self.state.as_ref().map(|c| c.as_ref().into()) }
fn method(&self) -> Option<Cow<str>> |
}
impl<'s> AuthorizationParameter<'s> {
fn invalid() -> Self {
AuthorizationParameter { valid: false, method: None, client_id: None, scope: None,
redirect_url: None, state: None }
}
}
impl AuthorizationFlow {
/// Idempotent data processing, checks formats.
pub fn prepare<W: WebRequest>(incoming: &mut W) -> Result<PreparedAuthorization<W>, W::Error> {
let urldecoded = incoming.query()
.map(extract_parameters)
.unwrap_or_else(|_| AuthorizationParameter::invalid());
Ok(PreparedAuthorization{request: incoming, urldecoded})
}
pub fn handle<'c, Req>(granter: CodeRef<'c>, prepared: PreparedAuthorization<'c, Req>, page_handler: &OwnerAuthorizer<Request=Req>)
-> Result<Req::Response, Req::Error> where
Req: WebRequest,
{
let PreparedAuthorization { request: req, urldecoded } = prepared;
let negotiated = match granter.negotiate(&urldecoded) {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
let authorization = match page_handler.get_owner_authorization(req, negotiated.pre_grant())? {
(Authentication::Failed, _)
=> negotiated.deny(),
(Authentication::InProgress, response)
=> return Ok(response),
(Authentication::Authenticated(owner), _)
=> negotiated.authorize(owner.into()),
};
let redirect_to = match authorization {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
Req::Response::redirect(redirect_to)
}
}
pub struct GrantFlow;
pub struct PreparedGrant<'l, Req> where
Req: WebRequest + 'l,
{
params: AccessTokenParameter<'l>,
req: PhantomData<Req>,
}
fn extract_access_token<'l>(params: &'l HashMap<String, Vec<String>>) -> AccessTokenParameter<'l> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<_, _>>();
AccessTokenParameter {
valid: true,
client_id: map.get("client_id").map(|v| (*v).into()),
code: map.get("code").map(|v| (*v).into()),
redirect_url: map.get("redirect_url").map(|v| (*v).into()),
grant_type: map.get("grant_type").map(|v| (*v).into()),
authorization: None,
}
}
impl<'l> AccessTokenRequest for AccessTokenParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn code(&self) -> Option<Cow<str>> { self.code.clone() }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.clone() }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.clone() }
fn grant_type(&self) -> Option<Cow<str>> { self.grant_type.clone() }
fn authorization(&self) -> Option<(Cow<str>, Cow<[u8]>)> {
match self.authorization {
None => None,
Some((ref id, ref pass))
=> Some((id.as_str().into(), pass.as_slice().into())),
}
}
}
impl<'l> AccessTokenParameter<'l> {
fn invalid() -> Self {
AccessTokenParameter { valid: false, code: None, client_id: None, redirect_url: None,
grant_type: None, authorization: None }
}
}
impl GrantFlow {
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedGrant<W>, W::Error> {
let params = GrantFlow::create_valid_params(req)
.unwrap_or(AccessTokenParameter::invalid());
Ok(PreparedGrant { params: params, req: PhantomData })
}
fn create_valid_params<'a, W: WebRequest>(req: &'a mut W) -> Option<AccessTokenParameter<'a>> {
let authorization = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(ref header)) => {
if !header.starts_with("Basic ") {
| { self.method.as_ref().map(|c| c.as_ref().into()) } | identifier_body |
frontend.rs | <'a, str>>,
authorization: Option<(String, Vec<u8>)>,
}
struct GuardParameter<'a> {
valid: bool,
token: Option<Cow<'a, str>>,
}
/// Abstraction of web requests with several different abstractions and constructors needed by this
/// frontend. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error: From<OAuthError>;
type Response: WebResponse<Error=Self::Error>;
/// Retrieve a parsed version of the url query. An Err return value indicates a malformed query
/// or an otherwise malformed WebRequest. Note that an empty query should result in
/// `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<HashMap<String, Vec<String>>, ()>;
/// Retriev the parsed `application/x-form-urlencoded` body of the request. An Err value
/// indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<&HashMap<String, Vec<String>>, ()>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, ()>;
}
/// Response representation into which the Request is transformed by the code_grant types.
pub trait WebResponse where Self: Sized {
/// The error generated when trying to construct an unhandled or invalid response.
type Error: From<OAuthError>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(url: Url) -> Result<Self, Self::Error>;
/// A pure text response with no special media type set.
fn text(text: &str) -> Result<Self, Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn json(data: &str) -> Result<Self, Self::Error>;
/// Construct a redirect for the error. Here the response may choose to augment the error with
/// additional information (such as help websites, description strings), hence the default
/// implementation which does not do any of that.
fn redirect_error(target: ErrorUrl) -> Result<Self, Self::Error> {
Self::redirect(target.into())
}
/// Set the response status to 400
fn as_client_error(self) -> Result<Self, Self::Error>;
/// Set the response status to 401
fn as_unauthorized(self) -> Result<Self, Self::Error>;
/// Add an Authorization header
fn with_authorization(self, kind: &str) -> Result<Self, Self::Error>;
}
pub trait OwnerAuthorizer {
type Request: WebRequest;
fn get_owner_authorization(&self, &mut Self::Request, &PreGrant)
-> Result<(Authentication, <Self::Request as WebRequest>::Response), <Self::Request as WebRequest>::Error>;
}
pub struct AuthorizationFlow;
pub struct | <'l, Req> where
Req: WebRequest + 'l,
{
request: &'l mut Req,
urldecoded: AuthorizationParameter<'l>,
}
fn extract_parameters(params: HashMap<String, Vec<String>>) -> AuthorizationParameter<'static> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<&str, &str>>();
AuthorizationParameter{
valid: true,
method: map.get("response_type").map(|method| method.to_string().into()),
client_id: map.get("client_id").map(|client| client.to_string().into()),
scope: map.get("scope").map(|scope| scope.to_string().into()),
redirect_url: map.get("redirect_url").map(|url| url.to_string().into()),
state: map.get("state").map(|state| state.to_string().into()),
}
}
impl<'s> CodeRequest for AuthorizationParameter<'s> {
fn valid(&self) -> bool { self.valid }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.as_ref().map(|c| c.as_ref().into()) }
fn scope(&self) -> Option<Cow<str>> { self.scope.as_ref().map(|c| c.as_ref().into()) }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.as_ref().map(|c| c.as_ref().into()) }
fn state(&self) -> Option<Cow<str>> { self.state.as_ref().map(|c| c.as_ref().into()) }
fn method(&self) -> Option<Cow<str>> { self.method.as_ref().map(|c| c.as_ref().into()) }
}
impl<'s> AuthorizationParameter<'s> {
fn invalid() -> Self {
AuthorizationParameter { valid: false, method: None, client_id: None, scope: None,
redirect_url: None, state: None }
}
}
impl AuthorizationFlow {
/// Idempotent data processing, checks formats.
pub fn prepare<W: WebRequest>(incoming: &mut W) -> Result<PreparedAuthorization<W>, W::Error> {
let urldecoded = incoming.query()
.map(extract_parameters)
.unwrap_or_else(|_| AuthorizationParameter::invalid());
Ok(PreparedAuthorization{request: incoming, urldecoded})
}
pub fn handle<'c, Req>(granter: CodeRef<'c>, prepared: PreparedAuthorization<'c, Req>, page_handler: &OwnerAuthorizer<Request=Req>)
-> Result<Req::Response, Req::Error> where
Req: WebRequest,
{
let PreparedAuthorization { request: req, urldecoded } = prepared;
let negotiated = match granter.negotiate(&urldecoded) {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
let authorization = match page_handler.get_owner_authorization(req, negotiated.pre_grant())? {
(Authentication::Failed, _)
=> negotiated.deny(),
(Authentication::InProgress, response)
=> return Ok(response),
(Authentication::Authenticated(owner), _)
=> negotiated.authorize(owner.into()),
};
let redirect_to = match authorization {
Err(CodeError::Ignore) => return Err(OAuthError::InternalCodeError().into()),
Err(CodeError::Redirect(url)) => return Req::Response::redirect_error(url),
Ok(v) => v,
};
Req::Response::redirect(redirect_to)
}
}
pub struct GrantFlow;
pub struct PreparedGrant<'l, Req> where
Req: WebRequest + 'l,
{
params: AccessTokenParameter<'l>,
req: PhantomData<Req>,
}
fn extract_access_token<'l>(params: &'l HashMap<String, Vec<String>>) -> AccessTokenParameter<'l> {
let map = params.iter()
.filter(|&(_, v)| v.len() == 1)
.map(|(k, v)| (k.as_str(), v[0].as_str()))
.collect::<HashMap<_, _>>();
AccessTokenParameter {
valid: true,
client_id: map.get("client_id").map(|v| (*v).into()),
code: map.get("code").map(|v| (*v).into()),
redirect_url: map.get("redirect_url").map(|v| (*v).into()),
grant_type: map.get("grant_type").map(|v| (*v).into()),
authorization: None,
}
}
impl<'l> AccessTokenRequest for AccessTokenParameter<'l> {
fn valid(&self) -> bool { self.valid }
fn code(&self) -> Option<Cow<str>> { self.code.clone() }
fn client_id(&self) -> Option<Cow<str>> { self.client_id.clone() }
fn redirect_url(&self) -> Option<Cow<str>> { self.redirect_url.clone() }
fn grant_type(&self) -> Option<Cow<str>> { self.grant_type.clone() }
fn authorization(&self) -> Option<(Cow<str>, Cow<[u8]>)> {
match self.authorization {
None => None,
Some((ref id, ref pass))
=> Some((id.as_str().into(), pass.as_slice().into())),
}
}
}
impl<'l> AccessTokenParameter<'l> {
fn invalid() -> Self {
AccessTokenParameter { valid: false, code: None, client_id: None, redirect_url: None,
grant_type: None, authorization: None }
}
}
impl GrantFlow {
pub fn prepare<W: WebRequest>(req: &mut W) -> Result<PreparedGrant<W>, W::Error> {
let params = GrantFlow::create_valid_params(req)
.unwrap_or(AccessTokenParameter::invalid());
Ok(PreparedGrant { params: params, req: PhantomData })
}
fn create_valid_params<'a, W: WebRequest>(req: &'a mut W) -> Option<AccessTokenParameter<'a>> {
let authorization = match req.authheader() {
Err(_) => return None,
Ok(None) => None,
Ok(Some(ref header)) => {
if !header.starts_with("Basic ") {
| PreparedAuthorization | identifier_name |
wx.py | [(block, [])])
])
else:
return ({'type': 'Anchor', 'point': point}, [])
elif macro in layer_macros:
return ({'type': 'Layer', 'level': macro.upper()}, children)
elif macro == 'anchor':
anchor = {'type': 'Anchor'}
for key in ('point', 'relativePoint', 'relativeTo'):
if key in block:
anchor[key] = block[key]
dimension = {}
if 'x' in block:
dimension['x'] = block['x']
if 'y' in block:
dimension['y'] = block['y']
children = []
if dimension:
dimension['type'] = 'AbsDimension'
children.append(({'type': 'Offset'}, [(dimension, [])]))
return (anchor, children)
elif macro == 'color':
if not (3 <= len(args) <= 4):
raise ValueError()
block.update(type='Color', r=args[0], g=args[1], b=args[2])
if len(args) == 4:
block['a'] = args[3]
return (block, [])
elif macro == 'hitrectinsets':
if len(args) != 4:
raise ValueError()
return ({'type': 'HitRectInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
elif macro in ('normalfont', 'highlightfont'):
block['type'] = block['type'][1:]
if 'inherits' in block:
if 'style' not in block:
block['style'] = block['inherits']
del block['inherits']
for arg in args:
if arg in ('LEFT', 'CENTER', 'RIGHT'):
if 'justifyH' not in block:
block['justifyH'] = arg
elif arg in ('TOP', 'MIDDLE', 'BOTTOM'):
if 'justifyV' not in block:
block['justifyV'] = arg
return (block, [])
elif macro == 'pushedtextoffset':
block['type'] = 'AbsDimension'
if len(args) == 2 and 'x' not in block and 'y' not in block:
block.update(x=args[0], y=args[1])
return ({'type': 'PushedTextOffset'}, [(block, [])])
elif macro == 'size':
block['type'] = 'AbsDimension'
if args and 'x' not in block and 'y' not in block:
block['x'] = args[0]
if len(args) >= 2:
block['y'] = args[1]
return ({'type': 'Size'}, [(block, [])])
elif macro == 'texcoords':
if len(args) != 4:
raise ValueError()
return ({'type': 'TexCoords', 'left': args[0], 'right': args[1], 'top': args[2], 'bottom': args[3]}, [])
elif macro == 'textinsets':
if len(args) != 4:
raise ValueError('textinsets')
return ({'type': 'TextInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
else:
raise ValueError(macro)
colon = ~Token(':')
comma = ~Token(',')
double_quoted_string = Token(r'"[^"]*"') > (lambda v: v[0].strip('"'))
integer = Token(Integer())
real = Token(Real())
single_quoted_string = Token(r"'[^']*'") > (lambda v: v[0].strip("'"))
token = Token(Regexp('[!%\$A-Za-z_][A-Za-z0-9_]*'))
value = double_quoted_string | integer | real | single_quoted_string | token
keyvalue = token & colon & value > (lambda v: (v[0], v[1]))
parent = ~Token('\^') & token > (lambda v: ('parent', v[0]))
parentkey = ~Token('\.') & token > (lambda v: ('parentKey', v[0]))
flag = Token(Any('+-')) & token > (lambda v: (v[1], 'true' if v[0] == '+' else 'false'))
def _is_number(value):
try:
int(value)
except ValueError:
try:
float(value)
except ValueError:
return False
return True
def _parse_arguments(tokens):
arguments = {}
if not tokens:
return arguments
if isinstance(tokens[0], basestring) and tokens[0] not in implicit_flags and not _is_number(tokens[0]):
arguments['inherits'] = tokens.pop(0)
for token in tokens:
if isinstance(token, tuple):
arguments[token[0]] = token[1]
elif token in implicit_flags:
arguments[token] = 'true'
else:
if '__args__' not in arguments:
arguments['__args__'] = []
arguments['__args__'].append(token)
return arguments
def _parse_script_tokens(tokens):
offset = 1
if tokens[0][1] == ' ':
offset = 2
return '\n'.join([token[offset:] for token in tokens])
candidates = flag | keyvalue | parent | parentkey | token | value
arguments = ~Token('\(') & Extend(candidates[:, comma]) & ~Token('\)') > _parse_arguments
declaration = token[1:] & arguments[0:1]
assignment_line = Line(Token('%[A-Za-z]+') & ~Token('=') & value) > (lambda v: {v[0]: v[1]})
blank_line = ~Line(Empty(), indent=False)
comment_line = ~Line(Token('#.*'), indent=False)
declaration_line = Line(declaration)
script_line = Line(Token('\*[^\n]*'))
def _parse_block(tokens):
equivalent = frame_type_equivalents.get(tokens[0])
if equivalent:
block = {'type': equivalent[0], 'inherits': equivalent[1]}
else:
block = {'type': tokens[0]}
print 'parsing ' + tokens[0]
i = 1
if len(tokens) > i and isinstance(tokens[i], basestring) and not tokens[i].startswith('*'):
block['name'] = tokens[i]
print ' name = ' + block['name']
i += 1
if len(tokens) > i and isinstance(tokens[i], dict):
block.update(tokens[i])
if block.get('inherits') == 'NoInherit':
del block['inherits']
i += 1
if len(tokens) > i and isinstance(tokens[i], basestring) and tokens[i].startswith('*'):
block['script'] = _parse_script_tokens(tokens[i:])
#block['script'] = '\n'.join(token.lstrip('*') for token in tokens[i:])
return (block, [])
children = tokens[i:]
if block['type'].startswith('!'):
macro = block['type'].lstrip('!').lower()
block, children = _evaluate_macro(macro, block, children)
if children and block['type'] != 'ScrollChild':
anchors, frames, layers, scripts, remaining = [], [], [], [], []
for child in children:
type = child[0]['type']
if type == 'Anchor':
anchors.append(child)
elif type in frame_types:
frames.append(child)
elif type == 'Layer':
layers.append(child)
elif type.startswith('On'):
scripts.append(child)
else:
remaining.append(child)
if anchors:
remaining.append(({'type': 'Anchors'}, anchors))
if frames:
remaining.append(({'type': 'Frames'}, frames))
if layers:
remaining.append(({'type': 'Layers'}, layers))
if scripts:
remaining.append(({'type': 'Scripts'}, scripts))
children = remaining
return (block, children)
block = Delayed()
line = Or(
blank_line,
block,
comment_line,
declaration_line > _parse_block,
script_line
)
block += (Line(declaration & colon) & Block(line[1:])) > _parse_block
lines = assignment_line | line
source = lines[:] & Eos()
source.config.no_memoize()
source.config.compile_to_dfa()
source.config.lines(block_policy=to_right)
parser = source.get_parse()
def construct_element(block, children):
element = Element(block.pop('type'))
for key, value in block.iteritems():
if key not in ('__args__', 'script'):
element.set(key, value)
for child in children:
element.append(construct_element(*child))
if 'script' in block:
element.text = '\n%s\n' % block['script']
return element
script_pattern = re.compile(r'(?ms)(^[ ]*<On[^>]+>.*?<\/On[^>]+>)')
def _format_script(match):
script = match.group(1)
indent = ' ' * script.find('<')
lines = script.split('\n')
for i in range(1, len(lines) - 1):
lines[i] = indent + ' ' + lines[i]
lines[-1] = indent + lines[-1]
return '\n'.join(lines)
def | construct | identifier_name |
|
wx.py | not (3 <= len(args) <= 4):
raise ValueError()
block.update(type='Color', r=args[0], g=args[1], b=args[2])
if len(args) == 4:
block['a'] = args[3]
return (block, [])
elif macro == 'hitrectinsets':
if len(args) != 4:
raise ValueError()
return ({'type': 'HitRectInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
elif macro in ('normalfont', 'highlightfont'):
block['type'] = block['type'][1:]
if 'inherits' in block:
if 'style' not in block:
block['style'] = block['inherits']
del block['inherits']
for arg in args:
if arg in ('LEFT', 'CENTER', 'RIGHT'):
if 'justifyH' not in block:
block['justifyH'] = arg
elif arg in ('TOP', 'MIDDLE', 'BOTTOM'):
if 'justifyV' not in block:
block['justifyV'] = arg
return (block, [])
elif macro == 'pushedtextoffset':
block['type'] = 'AbsDimension'
if len(args) == 2 and 'x' not in block and 'y' not in block:
block.update(x=args[0], y=args[1])
return ({'type': 'PushedTextOffset'}, [(block, [])])
elif macro == 'size':
block['type'] = 'AbsDimension'
if args and 'x' not in block and 'y' not in block:
block['x'] = args[0]
if len(args) >= 2:
block['y'] = args[1]
return ({'type': 'Size'}, [(block, [])])
elif macro == 'texcoords':
if len(args) != 4:
raise ValueError()
return ({'type': 'TexCoords', 'left': args[0], 'right': args[1], 'top': args[2], 'bottom': args[3]}, [])
elif macro == 'textinsets':
if len(args) != 4:
raise ValueError('textinsets')
return ({'type': 'TextInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
else:
raise ValueError(macro)
colon = ~Token(':')
comma = ~Token(',')
double_quoted_string = Token(r'"[^"]*"') > (lambda v: v[0].strip('"'))
integer = Token(Integer())
real = Token(Real())
single_quoted_string = Token(r"'[^']*'") > (lambda v: v[0].strip("'"))
token = Token(Regexp('[!%\$A-Za-z_][A-Za-z0-9_]*'))
value = double_quoted_string | integer | real | single_quoted_string | token
keyvalue = token & colon & value > (lambda v: (v[0], v[1]))
parent = ~Token('\^') & token > (lambda v: ('parent', v[0]))
parentkey = ~Token('\.') & token > (lambda v: ('parentKey', v[0]))
flag = Token(Any('+-')) & token > (lambda v: (v[1], 'true' if v[0] == '+' else 'false'))
def _is_number(value):
try:
int(value)
except ValueError:
try:
float(value)
except ValueError:
return False
return True
def _parse_arguments(tokens):
arguments = {}
if not tokens:
return arguments
if isinstance(tokens[0], basestring) and tokens[0] not in implicit_flags and not _is_number(tokens[0]):
arguments['inherits'] = tokens.pop(0)
for token in tokens:
if isinstance(token, tuple):
arguments[token[0]] = token[1]
elif token in implicit_flags:
arguments[token] = 'true'
else:
if '__args__' not in arguments:
arguments['__args__'] = []
arguments['__args__'].append(token)
return arguments
def _parse_script_tokens(tokens):
offset = 1
if tokens[0][1] == ' ':
offset = 2
return '\n'.join([token[offset:] for token in tokens])
candidates = flag | keyvalue | parent | parentkey | token | value
arguments = ~Token('\(') & Extend(candidates[:, comma]) & ~Token('\)') > _parse_arguments
declaration = token[1:] & arguments[0:1]
assignment_line = Line(Token('%[A-Za-z]+') & ~Token('=') & value) > (lambda v: {v[0]: v[1]})
blank_line = ~Line(Empty(), indent=False)
comment_line = ~Line(Token('#.*'), indent=False)
declaration_line = Line(declaration)
script_line = Line(Token('\*[^\n]*'))
def _parse_block(tokens):
equivalent = frame_type_equivalents.get(tokens[0])
if equivalent:
block = {'type': equivalent[0], 'inherits': equivalent[1]}
else:
block = {'type': tokens[0]}
print 'parsing ' + tokens[0]
i = 1
if len(tokens) > i and isinstance(tokens[i], basestring) and not tokens[i].startswith('*'):
block['name'] = tokens[i]
print ' name = ' + block['name']
i += 1
if len(tokens) > i and isinstance(tokens[i], dict):
block.update(tokens[i])
if block.get('inherits') == 'NoInherit':
del block['inherits']
i += 1
if len(tokens) > i and isinstance(tokens[i], basestring) and tokens[i].startswith('*'):
block['script'] = _parse_script_tokens(tokens[i:])
#block['script'] = '\n'.join(token.lstrip('*') for token in tokens[i:])
return (block, [])
children = tokens[i:]
if block['type'].startswith('!'):
macro = block['type'].lstrip('!').lower()
block, children = _evaluate_macro(macro, block, children)
if children and block['type'] != 'ScrollChild':
anchors, frames, layers, scripts, remaining = [], [], [], [], []
for child in children:
type = child[0]['type']
if type == 'Anchor':
anchors.append(child)
elif type in frame_types:
frames.append(child)
elif type == 'Layer':
layers.append(child)
elif type.startswith('On'):
scripts.append(child)
else:
remaining.append(child)
if anchors:
remaining.append(({'type': 'Anchors'}, anchors))
if frames:
remaining.append(({'type': 'Frames'}, frames))
if layers:
remaining.append(({'type': 'Layers'}, layers))
if scripts:
remaining.append(({'type': 'Scripts'}, scripts))
children = remaining
return (block, children)
block = Delayed()
line = Or(
blank_line,
block,
comment_line,
declaration_line > _parse_block,
script_line
)
block += (Line(declaration & colon) & Block(line[1:])) > _parse_block
lines = assignment_line | line
source = lines[:] & Eos()
source.config.no_memoize()
source.config.compile_to_dfa()
source.config.lines(block_policy=to_right)
parser = source.get_parse()
def construct_element(block, children):
element = Element(block.pop('type'))
for key, value in block.iteritems():
if key not in ('__args__', 'script'):
element.set(key, value)
for child in children:
element.append(construct_element(*child))
if 'script' in block:
element.text = '\n%s\n' % block['script']
return element
script_pattern = re.compile(r'(?ms)(^[ ]*<On[^>]+>.*?<\/On[^>]+>)')
def _format_script(match):
script = match.group(1)
indent = ' ' * script.find('<')
lines = script.split('\n')
for i in range(1, len(lines) - 1):
lines[i] = indent + ' ' + lines[i]
lines[-1] = indent + lines[-1]
return '\n'.join(lines)
def construct(source):
assignments = {}
ui = Element('Ui')
for element in parser(source):
if isinstance(element, dict):
assignments.update(element)
else:
ui.append(construct_element(*element))
xml = tostring(ui, pretty_print=True)
xml = script_pattern.sub(_format_script, xml)
for key, value in assignments.iteritems():
xml = xml.replace(key, value)
header, body = xml.split('\n', 1)
return proper_header + body
def parse(source_filename, xml_filename):
with open(source_filename) as openfile:
source = openfile.read()
xml = construct(source)
with open(xml_filename, 'w+') as openfile:
openfile.write(xml)
def process_file(source_file, target_dir): | xmlfile = path.join(target_dir, path.basename(source_file).replace('.wx', '.xml')) | random_line_split |
|
wx.py | return ({'type': 'Anchor', 'point': point}, [
({'type': 'Offset'}, [(block, [])])
])
else:
return ({'type': 'Anchor', 'point': point}, [])
elif macro in layer_macros:
return ({'type': 'Layer', 'level': macro.upper()}, children)
elif macro == 'anchor':
anchor = {'type': 'Anchor'}
for key in ('point', 'relativePoint', 'relativeTo'):
if key in block:
anchor[key] = block[key]
dimension = {}
if 'x' in block:
dimension['x'] = block['x']
if 'y' in block:
dimension['y'] = block['y']
children = []
if dimension:
dimension['type'] = 'AbsDimension'
children.append(({'type': 'Offset'}, [(dimension, [])]))
return (anchor, children)
elif macro == 'color':
if not (3 <= len(args) <= 4):
raise ValueError()
block.update(type='Color', r=args[0], g=args[1], b=args[2])
if len(args) == 4:
block['a'] = args[3]
return (block, [])
elif macro == 'hitrectinsets':
if len(args) != 4:
raise ValueError()
return ({'type': 'HitRectInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
elif macro in ('normalfont', 'highlightfont'):
block['type'] = block['type'][1:]
if 'inherits' in block:
if 'style' not in block:
block['style'] = block['inherits']
del block['inherits']
for arg in args:
if arg in ('LEFT', 'CENTER', 'RIGHT'):
if 'justifyH' not in block:
block['justifyH'] = arg
elif arg in ('TOP', 'MIDDLE', 'BOTTOM'):
if 'justifyV' not in block:
block['justifyV'] = arg
return (block, [])
elif macro == 'pushedtextoffset':
block['type'] = 'AbsDimension'
if len(args) == 2 and 'x' not in block and 'y' not in block:
block.update(x=args[0], y=args[1])
return ({'type': 'PushedTextOffset'}, [(block, [])])
elif macro == 'size':
block['type'] = 'AbsDimension'
if args and 'x' not in block and 'y' not in block:
block['x'] = args[0]
if len(args) >= 2:
block['y'] = args[1]
return ({'type': 'Size'}, [(block, [])])
elif macro == 'texcoords':
if len(args) != 4:
raise ValueError()
return ({'type': 'TexCoords', 'left': args[0], 'right': args[1], 'top': args[2], 'bottom': args[3]}, [])
elif macro == 'textinsets':
if len(args) != 4:
raise ValueError('textinsets')
return ({'type': 'TextInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
else:
raise ValueError(macro)
colon = ~Token(':')
comma = ~Token(',')
double_quoted_string = Token(r'"[^"]*"') > (lambda v: v[0].strip('"'))
integer = Token(Integer())
real = Token(Real())
single_quoted_string = Token(r"'[^']*'") > (lambda v: v[0].strip("'"))
token = Token(Regexp('[!%\$A-Za-z_][A-Za-z0-9_]*'))
value = double_quoted_string | integer | real | single_quoted_string | token
keyvalue = token & colon & value > (lambda v: (v[0], v[1]))
parent = ~Token('\^') & token > (lambda v: ('parent', v[0]))
parentkey = ~Token('\.') & token > (lambda v: ('parentKey', v[0]))
flag = Token(Any('+-')) & token > (lambda v: (v[1], 'true' if v[0] == '+' else 'false'))
def _is_number(value):
try:
int(value)
except ValueError:
try:
float(value)
except ValueError:
return False
return True
def _parse_arguments(tokens):
arguments = {}
if not tokens:
return arguments
if isinstance(tokens[0], basestring) and tokens[0] not in implicit_flags and not _is_number(tokens[0]):
arguments['inherits'] = tokens.pop(0)
for token in tokens:
if isinstance(token, tuple):
arguments[token[0]] = token[1]
elif token in implicit_flags:
arguments[token] = 'true'
else:
if '__args__' not in arguments:
arguments['__args__'] = []
arguments['__args__'].append(token)
return arguments
def _parse_script_tokens(tokens):
offset = 1
if tokens[0][1] == ' ':
offset = 2
return '\n'.join([token[offset:] for token in tokens])
candidates = flag | keyvalue | parent | parentkey | token | value
arguments = ~Token('\(') & Extend(candidates[:, comma]) & ~Token('\)') > _parse_arguments
declaration = token[1:] & arguments[0:1]
assignment_line = Line(Token('%[A-Za-z]+') & ~Token('=') & value) > (lambda v: {v[0]: v[1]})
blank_line = ~Line(Empty(), indent=False)
comment_line = ~Line(Token('#.*'), indent=False)
declaration_line = Line(declaration)
script_line = Line(Token('\*[^\n]*'))
def _parse_block(tokens):
equivalent = frame_type_equivalents.get(tokens[0])
if equivalent:
block = {'type': equivalent[0], 'inherits': equivalent[1]}
else:
block = {'type': tokens[0]}
print 'parsing ' + tokens[0]
i = 1
if len(tokens) > i and isinstance(tokens[i], basestring) and not tokens[i].startswith('*'):
block['name'] = tokens[i]
print ' name = ' + block['name']
i += 1
if len(tokens) > i and isinstance(tokens[i], dict):
block.update(tokens[i])
if block.get('inherits') == 'NoInherit':
del block['inherits']
i += 1
if len(tokens) > i and isinstance(tokens[i], basestring) and tokens[i].startswith('*'):
block['script'] = _parse_script_tokens(tokens[i:])
#block['script'] = '\n'.join(token.lstrip('*') for token in tokens[i:])
return (block, [])
children = tokens[i:]
if block['type'].startswith('!'):
macro = block['type'].lstrip('!').lower()
block, children = _evaluate_macro(macro, block, children)
if children and block['type'] != 'ScrollChild':
anchors, frames, layers, scripts, remaining = [], [], [], [], []
for child in children:
type = child[0]['type']
if type == 'Anchor':
anchors.append(child)
elif type in frame_types:
frames.append(child)
elif type == 'Layer':
layers.append(child)
elif type.startswith('On'):
scripts.append(child)
else:
remaining.append(child)
if anchors:
remaining.append(({'type': 'Anchors'}, anchors))
if frames:
remaining.append(({'type': 'Frames'}, frames))
if layers:
remaining.append(({'type': 'Layers'}, layers))
if scripts:
remaining.append(({'type': 'Scripts'}, scripts))
children = remaining
return (block, children)
block = Delayed()
line = Or(
blank_line,
block,
comment_line,
declaration_line > _parse_block,
script_line
)
block += (Line(declaration & colon) & Block(line[1:])) > _parse_block
lines = assignment_line | line
source = lines[:] & Eos()
source.config.no_memoize()
source.config.compile_to_dfa()
source.config.lines(block_policy=to_right)
parser = source.get_parse()
def construct_element(block, children):
element = Element(block.pop('type'))
for key, value in block.iteritems():
if key not in ('__args__', 'script'):
element.set(key, value)
for child in children:
element.append(construct_element(*child))
if 'script' in block:
element.text = '\n%s\n' % block['script']
return element
script_pattern = re.compile(r'(?ms)(^[ ]*<On[^>]+>.*?<\/On[^>]+>)')
def _format_script(match):
script = match.group(1)
indent = ' ' * script.find('<')
lines = script.split('\n')
for i in range(1, len(lines) - 1):
| lines[i] = indent + ' ' + lines[i] | conditional_block |
|
wx.py | 'LeftLabel': ['FontString', 'epLeftLabel'],
'ListBuilder': ['Frame', 'epListBuilder'],
'MessageFrame': ['Frame', 'epMessageFrame'],
'MessageFrameBase': ['Frame', 'epMessageFrameBase'],
'MultiButton': ['Button', 'epMultiButton'],
'MultiFrame': ['Frame', 'epMultiFrame'],
'Panel': ['Frame', 'epPanel'],
'ScrollFrame': ['ScrollFrame', 'epScrollFrame'],
'Spinner': ['EditBox', 'epSpinner'],
'TabbedFrame': ['Frame', 'epTabbedFrame'],
'Tree': ['Frame', 'epTree'],
'VerticalScrollBar': ['Slider', 'epVerticalScrollBar'],
'VerticalSlider': ['Slider', 'epVerticalSlider'],
}
implicit_flags = ['enableMouse', 'hidden', 'setAllPoints', 'virtual']
anchor_macros = ['bottomleft', 'bottomright', 'center', 'left', 'right', 'top', 'topleft', 'topright']
layer_macros = ['artwork', 'background', 'border', 'overlay', 'highlight']
def _evaluate_macro(macro, block, children):
args = block.get('__args__', [])
if macro in anchor_macros:
point = macro.upper()
if 'x' in block or 'y' in block:
block['type'] = 'AbsDimension'
return ({'type': 'Anchor', 'point': point}, [
({'type': 'Offset'}, [(block, [])])
])
elif args:
block = {'type': 'AbsDimension', 'x': args[0]}
if len(args) >= 2:
block['y'] = args[1]
return ({'type': 'Anchor', 'point': point}, [
({'type': 'Offset'}, [(block, [])])
])
else:
return ({'type': 'Anchor', 'point': point}, [])
elif macro in layer_macros:
return ({'type': 'Layer', 'level': macro.upper()}, children)
elif macro == 'anchor':
anchor = {'type': 'Anchor'}
for key in ('point', 'relativePoint', 'relativeTo'):
if key in block:
anchor[key] = block[key]
dimension = {}
if 'x' in block:
dimension['x'] = block['x']
if 'y' in block:
dimension['y'] = block['y']
children = []
if dimension:
dimension['type'] = 'AbsDimension'
children.append(({'type': 'Offset'}, [(dimension, [])]))
return (anchor, children)
elif macro == 'color':
if not (3 <= len(args) <= 4):
raise ValueError()
block.update(type='Color', r=args[0], g=args[1], b=args[2])
if len(args) == 4:
block['a'] = args[3]
return (block, [])
elif macro == 'hitrectinsets':
if len(args) != 4:
raise ValueError()
return ({'type': 'HitRectInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
elif macro in ('normalfont', 'highlightfont'):
block['type'] = block['type'][1:]
if 'inherits' in block:
if 'style' not in block:
block['style'] = block['inherits']
del block['inherits']
for arg in args:
if arg in ('LEFT', 'CENTER', 'RIGHT'):
if 'justifyH' not in block:
block['justifyH'] = arg
elif arg in ('TOP', 'MIDDLE', 'BOTTOM'):
if 'justifyV' not in block:
block['justifyV'] = arg
return (block, [])
elif macro == 'pushedtextoffset':
block['type'] = 'AbsDimension'
if len(args) == 2 and 'x' not in block and 'y' not in block:
block.update(x=args[0], y=args[1])
return ({'type': 'PushedTextOffset'}, [(block, [])])
elif macro == 'size':
block['type'] = 'AbsDimension'
if args and 'x' not in block and 'y' not in block:
block['x'] = args[0]
if len(args) >= 2:
block['y'] = args[1]
return ({'type': 'Size'}, [(block, [])])
elif macro == 'texcoords':
if len(args) != 4:
raise ValueError()
return ({'type': 'TexCoords', 'left': args[0], 'right': args[1], 'top': args[2], 'bottom': args[3]}, [])
elif macro == 'textinsets':
if len(args) != 4:
raise ValueError('textinsets')
return ({'type': 'TextInsets'}, [({'type': 'AbsInset', 'left': args[0], 'right': args[1],
'top': args[2], 'bottom': args[3]}, [])])
else:
raise ValueError(macro)
colon = ~Token(':')
comma = ~Token(',')
double_quoted_string = Token(r'"[^"]*"') > (lambda v: v[0].strip('"'))
integer = Token(Integer())
real = Token(Real())
single_quoted_string = Token(r"'[^']*'") > (lambda v: v[0].strip("'"))
token = Token(Regexp('[!%\$A-Za-z_][A-Za-z0-9_]*'))
value = double_quoted_string | integer | real | single_quoted_string | token
keyvalue = token & colon & value > (lambda v: (v[0], v[1]))
parent = ~Token('\^') & token > (lambda v: ('parent', v[0]))
parentkey = ~Token('\.') & token > (lambda v: ('parentKey', v[0]))
flag = Token(Any('+-')) & token > (lambda v: (v[1], 'true' if v[0] == '+' else 'false'))
def _is_number(value):
try:
int(value)
except ValueError:
try:
float(value)
except ValueError:
return False
return True
def _parse_arguments(tokens):
arguments = {}
if not tokens:
return arguments
if isinstance(tokens[0], basestring) and tokens[0] not in implicit_flags and not _is_number(tokens[0]):
arguments['inherits'] = tokens.pop(0)
for token in tokens:
if isinstance(token, tuple):
arguments[token[0]] = token[1]
elif token in implicit_flags:
arguments[token] = 'true'
else:
if '__args__' not in arguments:
arguments['__args__'] = []
arguments['__args__'].append(token)
return arguments
def _parse_script_tokens(tokens):
|
candidates = flag | keyvalue | parent | parentkey | token | value
arguments = ~Token('\(') & Extend(candidates[:, comma]) & ~Token('\)') > _parse_arguments
declaration = token[1:] & arguments[0:1]
assignment_line = Line(Token('%[A-Za-z]+') & ~Token('=') & value) > (lambda v: {v[0]: v[1]})
blank_line = ~Line(Empty(), indent=False)
comment_line = ~Line(Token('#.*'), indent=False)
declaration_line = Line(declaration)
script_line = Line(Token('\*[^\n]*'))
def _parse_block(tokens):
equivalent = frame_type_equivalents.get(tokens[0])
if equivalent:
block = {'type': equivalent[0], 'inherits': equivalent[1]}
else:
block = {'type': tokens[0]}
print 'parsing ' + tokens[0]
i = 1
if len(tokens) > i and isinstance(tokens[i], basestring) and not tokens[i].startswith('*'):
block['name'] = tokens[i]
print ' name = ' + block['name']
i += 1
if len(tokens) > i and isinstance(tokens[i], dict):
block.update(tokens[i])
if block.get('inherits') == 'NoInherit':
del block['inherits']
i += 1
if len(tokens) > i and isinstance(tokens[i], basestring) and tokens[i].startswith('*'):
block['script'] = _parse_script_tokens(tokens[i:])
#block['script'] = '\n'.join(token.lstrip('*') for token in tokens[i:])
return (block, [])
children = tokens[i:]
if block['type'].startswith('!'):
macro = block['type'].lstrip('!').lower()
block, children = _evaluate_macro(macro, block, children)
if children and block['type'] != 'ScrollChild':
anchors, frames, layers, scripts, remaining = [], [], [], [], []
for child in children:
type = child[0]['type']
if type == 'Anchor':
anchors.append(child)
elif type in frame_types:
frames.append(child)
elif type == 'Layer':
layers.append(child)
elif type | offset = 1
if tokens[0][1] == ' ':
offset = 2
return '\n'.join([token[offset:] for token in tokens]) | identifier_body |
writer.go | ups" ini:"maxbackups"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress" ini:"compress"`
CustomerBackupFormat string
rotateRunning bool
size int64
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
defaultWriter *FileWriter
)
func NewDefaultWriter() *FileWriter {
f := &FileWriter{
RotateCron: defaultRotateCron,
FileName: defaultLogName,
MaxBackups: defaultMaxBackups,
Compress: true,
}
f.startRotateCron()
return f
}
func getTime() string {
return time.Now().Format(printTimeFormat)
}
func (p *FileWriter) Init() {
if p.FileName == "" {
p.FileName = "./default.log"
}
if p.RotateCron == defaultLogName {
p.RotateCron = defaultRotateCron
}
p.startRotateCron()
}
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *FileWriter) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.file == nil {
if err = l.openExistingOrNew(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *FileWriter) | () error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// rotate
func (l *FileWriter) startRotateCron() {
c := cron.New()
c.AddFunc(l.RotateCron, func() {
if l.rotateRunning {
return
}
l.rotateRunning = true
if err := l.Rotate(); err != nil {
}
l.rotateRunning = false
})
c.Start()
}
// close closes the file if it is open.
func (l *FileWriter) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes FileWriter to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *FileWriter) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// post-rotation processing and removal.
func (l *FileWriter) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *FileWriter) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := l.backupName(name)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
func (l *FileWriter) customerBackupName(name string) string {
dir := filepath.Dir(name)
t := currentTime()
fileName := t.Format(l.CustomerBackupFormat)
fileName = fileName
return filepath.Join(dir, fileName)
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func (l *FileWriter) backupName(name string) string {
if l.CustomerBackupFormat != "" {
return l.customerBackupName(name)
}
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *FileWriter) openExistingOrNew() error {
l.mill()
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *FileWriter) filename() string {
if l.FileName != "" {
return l.FileName
}
return defaultLogName
}
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *FileWriter) millRunOnce() error {
if l.MaxBackups == 0 && !l.Compress {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups+1 {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
fmt.Println("l.Compress", l.FileName, l.Compress)
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return err
}
// millRun runs in a goroutine to manage post-rotation compression and removal | Close | identifier_name |
writer.go | ups" ini:"maxbackups"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress" ini:"compress"`
CustomerBackupFormat string
rotateRunning bool
size int64
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
defaultWriter *FileWriter
)
func NewDefaultWriter() *FileWriter {
f := &FileWriter{
RotateCron: defaultRotateCron,
FileName: defaultLogName,
MaxBackups: defaultMaxBackups,
Compress: true,
}
f.startRotateCron()
return f
}
func getTime() string {
return time.Now().Format(printTimeFormat)
}
func (p *FileWriter) Init() {
if p.FileName == "" {
p.FileName = "./default.log"
}
if p.RotateCron == defaultLogName {
p.RotateCron = defaultRotateCron
}
p.startRotateCron()
}
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *FileWriter) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.file == nil {
if err = l.openExistingOrNew(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *FileWriter) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// rotate
func (l *FileWriter) startRotateCron() {
c := cron.New()
c.AddFunc(l.RotateCron, func() {
if l.rotateRunning {
return
}
l.rotateRunning = true
if err := l.Rotate(); err != nil {
}
l.rotateRunning = false
})
c.Start()
}
// close closes the file if it is open.
func (l *FileWriter) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes FileWriter to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *FileWriter) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// post-rotation processing and removal.
func (l *FileWriter) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *FileWriter) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := l.backupName(name)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
func (l *FileWriter) customerBackupName(name string) string {
dir := filepath.Dir(name)
t := currentTime()
fileName := t.Format(l.CustomerBackupFormat)
fileName = fileName
return filepath.Join(dir, fileName)
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func (l *FileWriter) backupName(name string) string {
if l.CustomerBackupFormat != "" {
return l.customerBackupName(name)
}
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *FileWriter) openExistingOrNew() error {
l.mill() | }
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *FileWriter) filename() string {
if l.FileName != "" {
return l.FileName
}
return defaultLogName
}
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *FileWriter) millRunOnce() error {
if l.MaxBackups == 0 && !l.Compress {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups+1 {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
fmt.Println("l.Compress", l.FileName, l.Compress)
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return err
}
// millRun runs in a goroutine to manage post-rotation compression and removal |
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew() | random_line_split |
writer.go | p.FileName = "./default.log"
}
if p.RotateCron == defaultLogName {
p.RotateCron = defaultRotateCron
}
p.startRotateCron()
}
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *FileWriter) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.file == nil {
if err = l.openExistingOrNew(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *FileWriter) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// rotate
func (l *FileWriter) startRotateCron() {
c := cron.New()
c.AddFunc(l.RotateCron, func() {
if l.rotateRunning {
return
}
l.rotateRunning = true
if err := l.Rotate(); err != nil {
}
l.rotateRunning = false
})
c.Start()
}
// close closes the file if it is open.
func (l *FileWriter) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes FileWriter to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *FileWriter) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// post-rotation processing and removal.
func (l *FileWriter) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *FileWriter) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := l.backupName(name)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
func (l *FileWriter) customerBackupName(name string) string {
dir := filepath.Dir(name)
t := currentTime()
fileName := t.Format(l.CustomerBackupFormat)
fileName = fileName
return filepath.Join(dir, fileName)
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func (l *FileWriter) backupName(name string) string {
if l.CustomerBackupFormat != "" {
return l.customerBackupName(name)
}
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *FileWriter) openExistingOrNew() error {
l.mill()
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *FileWriter) filename() string {
if l.FileName != "" {
return l.FileName
}
return defaultLogName
}
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *FileWriter) millRunOnce() error {
if l.MaxBackups == 0 && !l.Compress {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups+1 {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
fmt.Println("l.Compress", l.FileName, l.Compress)
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return err
}
// millRun runs in a goroutine to manage post-rotation compression and removal
// of old log files.
func (l *FileWriter) millRun() {
for _ = range l.millCh {
// what am I going to do, log this?
_ = l.millRunOnce()
}
}
// mill performs post-rotation compression and removal of stale log files,
// starting the mill goroutine if necessary.
func (l *FileWriter) mill() {
l.startMill.Do(func() {
l.millCh = make(chan bool, 1)
go l.millRun()
})
select {
case l.millCh <- true:
default:
}
}
// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *FileWriter) oldLogFiles() ([]logInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []logInfo{}
if len(files) == 0 {
return nil, nil
}
for _, e := range files {
logFiles = append(logFiles, e)
}
sort.Sort(byName(logFiles))
return logFiles, nil
}
// dir returns the directory for the current filename.
func (l *FileWriter) dir() string | {
return filepath.Dir(l.filename())
} | identifier_body |
|
writer.go | " ini:"maxbackups"`
// Compress determines if the rotated log files should be compressed
// using gzip. The default is not to perform compression.
Compress bool `json:"compress" ini:"compress"`
CustomerBackupFormat string
rotateRunning bool
size int64
file *os.File
mu sync.Mutex
millCh chan bool
startMill sync.Once
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
defaultWriter *FileWriter
)
func NewDefaultWriter() *FileWriter {
f := &FileWriter{
RotateCron: defaultRotateCron,
FileName: defaultLogName,
MaxBackups: defaultMaxBackups,
Compress: true,
}
f.startRotateCron()
return f
}
func getTime() string {
return time.Now().Format(printTimeFormat)
}
func (p *FileWriter) Init() {
if p.FileName == "" {
p.FileName = "./default.log"
}
if p.RotateCron == defaultLogName {
p.RotateCron = defaultRotateCron
}
p.startRotateCron()
}
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *FileWriter) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.file == nil {
if err = l.openExistingOrNew(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *FileWriter) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// rotate
func (l *FileWriter) startRotateCron() {
c := cron.New()
c.AddFunc(l.RotateCron, func() {
if l.rotateRunning {
return
}
l.rotateRunning = true
if err := l.Rotate(); err != nil |
l.rotateRunning = false
})
c.Start()
}
// close closes the file if it is open.
func (l *FileWriter) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes FileWriter to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates compression and removal of old log
// files according to the configuration.
func (l *FileWriter) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// post-rotation processing and removal.
func (l *FileWriter) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
l.mill()
return nil
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *FileWriter) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := l.backupName(name)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
func (l *FileWriter) customerBackupName(name string) string {
dir := filepath.Dir(name)
t := currentTime()
fileName := t.Format(l.CustomerBackupFormat)
fileName = fileName
return filepath.Join(dir, fileName)
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func (l *FileWriter) backupName(name string) string {
if l.CustomerBackupFormat != "" {
return l.customerBackupName(name)
}
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *FileWriter) openExistingOrNew() error {
l.mill()
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *FileWriter) filename() string {
if l.FileName != "" {
return l.FileName
}
return defaultLogName
}
// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *FileWriter) millRunOnce() error {
if l.MaxBackups == 0 && !l.Compress {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var compress, remove []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []logInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.Name()
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true
if len(preserved) > l.MaxBackups+1 {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
fmt.Println("l.Compress", l.FileName, l.Compress)
if l.Compress {
for _, f := range files {
if !strings.HasSuffix(f.Name(), compressSuffix) {
compress = append(compress, f)
}
}
}
for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.Name())
errCompress := compressLogFile(fn, fn+compressSuffix)
if err == nil && errCompress != nil {
err = errCompress
}
}
return err
}
// millRun runs in a goroutine to manage post-rotation compression and removal | {
} | conditional_block |
converter.go | atchdog(vmi.Spec.Domain.Devices.Watchdog, newWatchdog, c)
// if err != nil {
// return err
// }
// domain.Spec.Devices.Watchdog = newWatchdog
// }
// if vmi.Spec.Domain.Devices.Rng != nil {
// newRng := &Rng{}
// err := Convert_v1_Rng_To_api_Rng(vmi.Spec.Domain.Devices.Rng, newRng, c)
// if err != nil {
// return err
// }
// domain.Spec.Devices.Rng = newRng
// }
devicePerBus := make(map[string]int)
for _, diskCfg := range taskCfg.Disks {
if newDisk, err := setDiskSpec(&diskCfg, devicePerBus); err == nil {
domainSpec.Devices.Disks = append(domainSpec.Devices.Disks, newDisk)
} else {
return err
}
}
// if vmi.Spec.Domain.Clock != nil {
// clock := vmi.Spec.Domain.Clock
// newClock := &Clock{}
// err := Convert_v1_Clock_To_api_Clock(clock, newClock, c)
// if err != nil {
// return err
// }
// domain.Spec.Clock = newClock
// }
// if vmi.Spec.Domain.Features != nil {
// domain.Spec.Features = &Features{}
// err := Convert_v1_Features_To_api_Features(vmi.Spec.Domain.Features, domain.Spec.Features, c)
// if err != nil {
// return err
// }
// }
// apiOst := &vmi.Spec.Domain.Machine
// err = Convert_v1_Machine_To_api_OSType(apiOst, &domainSpec.OS.Type, c)
// if err != nil {
// return err
// }
//run qemu-system-x86_64 -machine help to see supported machine type
domainSpec.OS.Type.Machine = taskCfg.Machine
// Set VM CPU cores
// CPU topology will be created everytime, because user can specify
// number of cores in vmi.Spec.Domain.Resources.Requests/Limits, not only
// in vmi.Spec.Domain.CPU
domainSpec.VCPU = &VCPU{
Placement: "static",
CPUs: taskCfg.VCPU,
}
// Set VM CPU model and vendor
if taskCfg.CPU.Model != "" {
if taskCfg.CPU.Model == CPUModeHostModel || taskCfg.CPU.Model == CPUModeHostPassthrough {
domainSpec.CPU.Mode = taskCfg.CPU.Model
} else {
domainSpec.CPU.Mode = "custom"
domainSpec.CPU.Model = taskCfg.CPU.Model
}
}
// Adjust guest vcpu config. Currenty will handle vCPUs to pCPUs pinning
// if vmi.IsCPUDedicated() {
// if err := formatDomainCPUTune(vmi, domain, c); err != nil {
// log.Log.Reason(err).Error("failed to format domain cputune.")
// return err
// }
// if useIOThreads {
// if err := formatDomainIOThreadPin(vmi, domain, c); err != nil {
// log.Log.Reason(err).Error("failed to format domain iothread pinning.")
// return err
// }
// }
// }
if taskCfg.CPU.Model == "" {
domainSpec.CPU.Mode = CPUModeHostModel
}
// if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == nil || *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == true {
// var heads uint = 1
// var vram uint = 16384
// domain.Spec.Devices.Video = []Video{
// {
// Model: VideoModel{
// Type: "vga",
// Heads: &heads,
// VRam: &vram,
// },
// },
// }
// domain.Spec.Devices.Graphics = []Graphics{
// {
// Listen: &GraphicsListen{
// Type: "socket",
// Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/virt-vnc", vmi.ObjectMeta.UID),
// },
// Type: "vnc",
// },
// }
// }
getInterfaceType := func(iface *InterfaceConfig) string {
if iface.InterfaceBindingMethod == "slirp" {
// Slirp configuration works only with e1000 or rtl8139
if iface.Model != "e1000" && iface.Model != "rtl8139" {
fmt.Println("Network interface type of %s was changed to e1000 due to unsupported interface type by qemu slirp network", iface.Name)
return "e1000"
}
return iface.Model
}
if iface.Model != "" {
return iface.Model
}
return "virtio"
}
for _, iface := range taskCfg.Interfaces {
switch iface.InterfaceBindingMethod {
case "sriov":
//not sure what to do here
case "bridge", "masquerade", "slirp", "network":
ifaceType := getInterfaceType(&iface)
domainIface := Interface{
Model: &Model{
Type: ifaceType,
},
Alias: &Alias{
Name: iface.Name,
},
}
// if UseEmulation unset and at least one NIC model is virtio,
// /dev/vhost-net must be present as we should have asked for it.
if ifaceType == "virtio" && virtioNetProhibited {
return fmt.Errorf("virtio interface cannot be used when in-kernel virtio-net device emulation '/dev/vhost-net' not present")
}
// Add a pciAddress if specifed, will be auto-generated if not set
if iface.PciAddress != "" {
addr, err := decoratePciAddressField(iface.PciAddress)
if err != nil {
return fmt.Errorf("failed to configure interface %s: %v", iface.Name, err)
}
domainIface.Address = addr
}
if iface.InterfaceBindingMethod == "bridge" || iface.InterfaceBindingMethod == "masquerade" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "bridge"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Bridge: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Bridge: DefaultBridgeName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "network" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "network"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Network: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Network: DefaultNetworkName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "slirp" {
//not sure what to do here
}
domainSpec.Devices.Interfaces = append(domainSpec.Devices.Interfaces, domainIface)
}
}
for _, deviceCfg := range taskCfg.HostDevices {
if newHostDevice, err := setHostDeviceSpec(&deviceCfg); err == nil {
domainSpec.Devices.HostDevices = append(domainSpec.Devices.HostDevices, newHostDevice)
} else {
return err
}
}
return nil
}
func setMemorySpec(cfg MemoryConfig) (Memory, error) | {
if cfg.Value < 0 {
return Memory{Unit: "B"}, fmt.Errorf("Memory size '%d' must be greater than or equal to 0", cfg.Value)
}
var memorySize uint64
switch cfg.Unit {
case "gib":
memorySize = cfg.Value * 1024 * 1024 * 1024
case "mib":
memorySize = cfg.Value * 1024 * 1024
case "kib":
memorySize = cfg.Value * 1024
case "b":
//do nothing
default:
return Memory{Unit: "B"}, fmt.Errorf("memory unit for domain not recognized")
}
return Memory{
Value: memorySize, | identifier_body |
|
converter.go | return err
// }
// }
// }
if taskCfg.CPU.Model == "" {
domainSpec.CPU.Mode = CPUModeHostModel
}
// if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == nil || *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == true {
// var heads uint = 1
// var vram uint = 16384
// domain.Spec.Devices.Video = []Video{
// {
// Model: VideoModel{
// Type: "vga",
// Heads: &heads,
// VRam: &vram,
// },
// },
// }
// domain.Spec.Devices.Graphics = []Graphics{
// {
// Listen: &GraphicsListen{
// Type: "socket",
// Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/virt-vnc", vmi.ObjectMeta.UID),
// },
// Type: "vnc",
// },
// }
// }
getInterfaceType := func(iface *InterfaceConfig) string {
if iface.InterfaceBindingMethod == "slirp" {
// Slirp configuration works only with e1000 or rtl8139
if iface.Model != "e1000" && iface.Model != "rtl8139" {
fmt.Println("Network interface type of %s was changed to e1000 due to unsupported interface type by qemu slirp network", iface.Name)
return "e1000"
}
return iface.Model
}
if iface.Model != "" {
return iface.Model
}
return "virtio"
}
for _, iface := range taskCfg.Interfaces {
switch iface.InterfaceBindingMethod {
case "sriov":
//not sure what to do here
case "bridge", "masquerade", "slirp", "network":
ifaceType := getInterfaceType(&iface)
domainIface := Interface{
Model: &Model{
Type: ifaceType,
},
Alias: &Alias{
Name: iface.Name,
},
}
// if UseEmulation unset and at least one NIC model is virtio,
// /dev/vhost-net must be present as we should have asked for it.
if ifaceType == "virtio" && virtioNetProhibited {
return fmt.Errorf("virtio interface cannot be used when in-kernel virtio-net device emulation '/dev/vhost-net' not present")
}
// Add a pciAddress if specifed, will be auto-generated if not set
if iface.PciAddress != "" {
addr, err := decoratePciAddressField(iface.PciAddress)
if err != nil {
return fmt.Errorf("failed to configure interface %s: %v", iface.Name, err)
}
domainIface.Address = addr
}
if iface.InterfaceBindingMethod == "bridge" || iface.InterfaceBindingMethod == "masquerade" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "bridge"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Bridge: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Bridge: DefaultBridgeName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "network" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "network"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Network: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Network: DefaultNetworkName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "slirp" {
//not sure what to do here
}
domainSpec.Devices.Interfaces = append(domainSpec.Devices.Interfaces, domainIface)
}
}
for _, deviceCfg := range taskCfg.HostDevices {
if newHostDevice, err := setHostDeviceSpec(&deviceCfg); err == nil {
domainSpec.Devices.HostDevices = append(domainSpec.Devices.HostDevices, newHostDevice)
} else {
return err
}
}
return nil
}
func setMemorySpec(cfg MemoryConfig) (Memory, error) {
if cfg.Value < 0 {
return Memory{Unit: "B"}, fmt.Errorf("Memory size '%d' must be greater than or equal to 0", cfg.Value)
}
var memorySize uint64
switch cfg.Unit {
case "gib":
memorySize = cfg.Value * 1024 * 1024 * 1024
case "mib":
memorySize = cfg.Value * 1024 * 1024
case "kib":
memorySize = cfg.Value * 1024
case "b":
//do nothing
default:
return Memory{Unit: "B"}, fmt.Errorf("memory unit for domain not recognized")
}
return Memory{
Value: memorySize,
Unit: "B",
}, nil
}
func setDiskSpec(cfg *DiskConfig, devicePerBus map[string]int) (Disk, error) {
if cfg == nil {
return Disk{}, fmt.Errorf("disk config cannot be nil")
}
disk := Disk{}
switch cfg.Device {
case "disk":
disk.Device = "disk"
disk.Type = cfg.Type
disk.Target.Bus = cfg.TargetBus
disk.Target.Device = makeDeviceName(cfg.TargetBus, devicePerBus)
disk.ReadOnly = toApiReadOnly(cfg.ReadOnly)
//only support file type disk now
if cfg.Type == "file" {
disk.Source = DiskSource{File: cfg.Source}
} else if cfg.Type == "block" {
disk.Source = DiskSource{Dev: cfg.Source}
}
case "lun":
disk.Device = "lun"
disk.Target.Bus = cfg.TargetBus
disk.Target.Device = makeDeviceName(cfg.TargetBus, devicePerBus)
disk.ReadOnly = toApiReadOnly(cfg.ReadOnly)
case "floppy":
disk.Device = "floppy"
disk.Target.Bus = "fdc"
disk.Target.Device = makeDeviceName(disk.Target.Bus, devicePerBus)
disk.ReadOnly = toApiReadOnly(cfg.ReadOnly)
case "cdrom":
disk.Device = "cdrom"
disk.Target.Bus = cfg.TargetBus
disk.Target.Device = makeDeviceName(cfg.TargetBus, devicePerBus)
disk.ReadOnly = toApiReadOnly(cfg.ReadOnly)
default:
return Disk{}, fmt.Errorf("unknown disk type")
}
disk.Driver = &DiskDriver{
Name: "qemu",
Cache: string(cfg.Cache),
}
if cfg.Type == "file" {
disk.Driver.Type = "qcow2"
} else if cfg.Type == "block" {
disk.Driver.Type = "raw"
}
return disk, nil
}
func setHostDeviceSpec(cfg *HostDeviceConfig) (HostDevice, error) {
if cfg == nil {
return HostDevice{}, fmt.Errorf("HostDevice config cannot be nil")
}
hostDevice := HostDevice{}
switch cfg.Type {
case "pci":
hostDevice.Type = cfg.Type
hostDevice.Managed = cfg.Managed
hostDevice.Source.Address = &Address{
Domain: cfg.Domain,
Bus: cfg.Bus,
Slot: cfg.Slot,
Function: cfg.Function,
}
}
return hostDevice, nil
}
func makeDeviceName(bus string, devicePerBus map[string]int) string {
index := devicePerBus[bus]
devicePerBus[bus] += 1
prefix := ""
switch bus {
case "virtio":
prefix = "vd"
case "sata", "scsi":
prefix = "sd"
case "fdc":
prefix = "fd"
default:
fmt.Printf("Unrecognized bus '%s'", bus)
return ""
}
return formatDeviceName(prefix, index)
}
// port of http://elixir.free-electrons.com/linux/v4.15/source/drivers/scsi/sd.c#L3211
func formatDeviceName(prefix string, index int) string {
base := int('z' - 'a' + 1)
name := ""
for index >= 0 {
name = string('a'+(index%base)) + name
index = (index / base) - 1
}
return prefix + name
}
func toApiReadOnly(src bool) *ReadOnly {
if src | {
return &ReadOnly{}
} | conditional_block |
|
converter.go | )
// if err != nil {
// return err
// }
// domain.Spec.Devices.Watchdog = newWatchdog
// }
// if vmi.Spec.Domain.Devices.Rng != nil {
// newRng := &Rng{}
// err := Convert_v1_Rng_To_api_Rng(vmi.Spec.Domain.Devices.Rng, newRng, c)
// if err != nil {
// return err
// }
// domain.Spec.Devices.Rng = newRng
// }
devicePerBus := make(map[string]int)
for _, diskCfg := range taskCfg.Disks {
if newDisk, err := setDiskSpec(&diskCfg, devicePerBus); err == nil {
domainSpec.Devices.Disks = append(domainSpec.Devices.Disks, newDisk)
} else {
return err
}
}
// if vmi.Spec.Domain.Clock != nil {
// clock := vmi.Spec.Domain.Clock
// newClock := &Clock{}
// err := Convert_v1_Clock_To_api_Clock(clock, newClock, c)
// if err != nil {
// return err
// }
// domain.Spec.Clock = newClock
// }
// if vmi.Spec.Domain.Features != nil {
// domain.Spec.Features = &Features{}
// err := Convert_v1_Features_To_api_Features(vmi.Spec.Domain.Features, domain.Spec.Features, c)
// if err != nil {
// return err
// }
// }
// apiOst := &vmi.Spec.Domain.Machine
// err = Convert_v1_Machine_To_api_OSType(apiOst, &domainSpec.OS.Type, c)
// if err != nil {
// return err
// }
//run qemu-system-x86_64 -machine help to see supported machine type
domainSpec.OS.Type.Machine = taskCfg.Machine
// Set VM CPU cores
// CPU topology will be created everytime, because user can specify
// number of cores in vmi.Spec.Domain.Resources.Requests/Limits, not only
// in vmi.Spec.Domain.CPU
domainSpec.VCPU = &VCPU{
Placement: "static",
CPUs: taskCfg.VCPU,
}
// Set VM CPU model and vendor
if taskCfg.CPU.Model != "" {
if taskCfg.CPU.Model == CPUModeHostModel || taskCfg.CPU.Model == CPUModeHostPassthrough {
domainSpec.CPU.Mode = taskCfg.CPU.Model
} else {
domainSpec.CPU.Mode = "custom"
domainSpec.CPU.Model = taskCfg.CPU.Model
}
}
// Adjust guest vcpu config. Currenty will handle vCPUs to pCPUs pinning
// if vmi.IsCPUDedicated() {
// if err := formatDomainCPUTune(vmi, domain, c); err != nil {
// log.Log.Reason(err).Error("failed to format domain cputune.")
// return err
// }
// if useIOThreads {
// if err := formatDomainIOThreadPin(vmi, domain, c); err != nil {
// log.Log.Reason(err).Error("failed to format domain iothread pinning.")
// return err
// }
// }
// }
if taskCfg.CPU.Model == "" {
domainSpec.CPU.Mode = CPUModeHostModel
}
// if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == nil || *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == true {
// var heads uint = 1
// var vram uint = 16384
// domain.Spec.Devices.Video = []Video{
// {
// Model: VideoModel{
// Type: "vga",
// Heads: &heads,
// VRam: &vram,
// },
// },
// }
// domain.Spec.Devices.Graphics = []Graphics{
// {
// Listen: &GraphicsListen{
// Type: "socket",
// Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/virt-vnc", vmi.ObjectMeta.UID),
// },
// Type: "vnc",
// },
// }
// }
getInterfaceType := func(iface *InterfaceConfig) string {
if iface.InterfaceBindingMethod == "slirp" {
// Slirp configuration works only with e1000 or rtl8139
if iface.Model != "e1000" && iface.Model != "rtl8139" {
fmt.Println("Network interface type of %s was changed to e1000 due to unsupported interface type by qemu slirp network", iface.Name)
return "e1000"
}
return iface.Model
}
if iface.Model != "" {
return iface.Model
}
return "virtio"
}
for _, iface := range taskCfg.Interfaces {
switch iface.InterfaceBindingMethod {
case "sriov":
//not sure what to do here
case "bridge", "masquerade", "slirp", "network":
ifaceType := getInterfaceType(&iface)
domainIface := Interface{
Model: &Model{
Type: ifaceType,
},
Alias: &Alias{
Name: iface.Name,
},
}
// if UseEmulation unset and at least one NIC model is virtio,
// /dev/vhost-net must be present as we should have asked for it.
if ifaceType == "virtio" && virtioNetProhibited {
return fmt.Errorf("virtio interface cannot be used when in-kernel virtio-net device emulation '/dev/vhost-net' not present")
}
// Add a pciAddress if specifed, will be auto-generated if not set
if iface.PciAddress != "" {
addr, err := decoratePciAddressField(iface.PciAddress)
if err != nil {
return fmt.Errorf("failed to configure interface %s: %v", iface.Name, err)
}
domainIface.Address = addr
}
if iface.InterfaceBindingMethod == "bridge" || iface.InterfaceBindingMethod == "masquerade" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "bridge"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Bridge: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Bridge: DefaultBridgeName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "network" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "network"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Network: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Network: DefaultNetworkName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "slirp" {
//not sure what to do here
}
domainSpec.Devices.Interfaces = append(domainSpec.Devices.Interfaces, domainIface)
}
}
for _, deviceCfg := range taskCfg.HostDevices {
if newHostDevice, err := setHostDeviceSpec(&deviceCfg); err == nil {
domainSpec.Devices.HostDevices = append(domainSpec.Devices.HostDevices, newHostDevice)
} else {
return err
}
}
return nil
}
func setMemorySpec(cfg MemoryConfig) (Memory, error) {
if cfg.Value < 0 {
return Memory{Unit: "B"}, fmt.Errorf("Memory size '%d' must be greater than or equal to 0", cfg.Value)
}
var memorySize uint64
switch cfg.Unit {
case "gib":
memorySize = cfg.Value * 1024 * 1024 * 1024
case "mib":
memorySize = cfg.Value * 1024 * 1024
case "kib":
memorySize = cfg.Value * 1024
case "b":
//do nothing
default:
return Memory{Unit: "B"}, fmt.Errorf("memory unit for domain not recognized")
}
return Memory{
Value: memorySize,
Unit: "B",
}, nil
}
func | setDiskSpec | identifier_name |
|
converter.go | ioThreadId := defaultIOThread
// dedicatedThread := false
// if disk.DedicatedIOThread != nil {
// dedicatedThread = *disk.DedicatedIOThread
// }
// if dedicatedThread {
// ioThreadId = currentDedicatedThread
// currentDedicatedThread += 1
// } else {
// ioThreadId = currentAutoThread
// // increment the threadId to be used next but wrap around at the thread limit
// // the odd math here is because thread ID's start at 1, not 0
// currentAutoThread = (currentAutoThread % uint(autoThreads)) + 1
// }
// newDisk.Driver.IOThread = &ioThreadId
// }
// domain.Spec.Devices.Disks = append(domain.Spec.Devices.Disks, newDisk)
// }
// if vmi.Spec.Domain.Devices.Watchdog != nil {
// newWatchdog := &Watchdog{}
// err := Convert_v1_Watchdog_To_api_Watchdog(vmi.Spec.Domain.Devices.Watchdog, newWatchdog, c)
// if err != nil {
// return err
// }
// domain.Spec.Devices.Watchdog = newWatchdog
// }
// if vmi.Spec.Domain.Devices.Rng != nil {
// newRng := &Rng{}
// err := Convert_v1_Rng_To_api_Rng(vmi.Spec.Domain.Devices.Rng, newRng, c)
// if err != nil {
// return err
// }
// domain.Spec.Devices.Rng = newRng
// }
devicePerBus := make(map[string]int)
for _, diskCfg := range taskCfg.Disks {
if newDisk, err := setDiskSpec(&diskCfg, devicePerBus); err == nil {
domainSpec.Devices.Disks = append(domainSpec.Devices.Disks, newDisk)
} else {
return err
}
}
// if vmi.Spec.Domain.Clock != nil {
// clock := vmi.Spec.Domain.Clock
// newClock := &Clock{}
// err := Convert_v1_Clock_To_api_Clock(clock, newClock, c)
// if err != nil {
// return err
// }
// domain.Spec.Clock = newClock
// }
// if vmi.Spec.Domain.Features != nil {
// domain.Spec.Features = &Features{}
// err := Convert_v1_Features_To_api_Features(vmi.Spec.Domain.Features, domain.Spec.Features, c)
// if err != nil {
// return err
// }
// }
// apiOst := &vmi.Spec.Domain.Machine
// err = Convert_v1_Machine_To_api_OSType(apiOst, &domainSpec.OS.Type, c)
// if err != nil {
// return err
// }
//run qemu-system-x86_64 -machine help to see supported machine type
domainSpec.OS.Type.Machine = taskCfg.Machine
// Set VM CPU cores
// CPU topology will be created everytime, because user can specify
// number of cores in vmi.Spec.Domain.Resources.Requests/Limits, not only
// in vmi.Spec.Domain.CPU
domainSpec.VCPU = &VCPU{
Placement: "static",
CPUs: taskCfg.VCPU,
}
// Set VM CPU model and vendor
if taskCfg.CPU.Model != "" {
if taskCfg.CPU.Model == CPUModeHostModel || taskCfg.CPU.Model == CPUModeHostPassthrough {
domainSpec.CPU.Mode = taskCfg.CPU.Model
} else {
domainSpec.CPU.Mode = "custom"
domainSpec.CPU.Model = taskCfg.CPU.Model
}
}
// Adjust guest vcpu config. Currenty will handle vCPUs to pCPUs pinning
// if vmi.IsCPUDedicated() {
// if err := formatDomainCPUTune(vmi, domain, c); err != nil {
// log.Log.Reason(err).Error("failed to format domain cputune.")
// return err
// }
// if useIOThreads {
// if err := formatDomainIOThreadPin(vmi, domain, c); err != nil {
// log.Log.Reason(err).Error("failed to format domain iothread pinning.")
// return err
// }
// }
// }
if taskCfg.CPU.Model == "" {
domainSpec.CPU.Mode = CPUModeHostModel
}
// if vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == nil || *vmi.Spec.Domain.Devices.AutoattachGraphicsDevice == true {
// var heads uint = 1
// var vram uint = 16384
// domain.Spec.Devices.Video = []Video{
// {
// Model: VideoModel{
// Type: "vga",
// Heads: &heads,
// VRam: &vram,
// },
// },
// }
// domain.Spec.Devices.Graphics = []Graphics{
// {
// Listen: &GraphicsListen{
// Type: "socket",
// Socket: fmt.Sprintf("/var/run/kubevirt-private/%s/virt-vnc", vmi.ObjectMeta.UID),
// },
// Type: "vnc",
// },
// }
// }
getInterfaceType := func(iface *InterfaceConfig) string {
if iface.InterfaceBindingMethod == "slirp" {
// Slirp configuration works only with e1000 or rtl8139
if iface.Model != "e1000" && iface.Model != "rtl8139" {
fmt.Println("Network interface type of %s was changed to e1000 due to unsupported interface type by qemu slirp network", iface.Name)
return "e1000"
}
return iface.Model
}
if iface.Model != "" {
return iface.Model
}
return "virtio"
}
for _, iface := range taskCfg.Interfaces {
switch iface.InterfaceBindingMethod {
case "sriov":
//not sure what to do here
case "bridge", "masquerade", "slirp", "network":
ifaceType := getInterfaceType(&iface)
domainIface := Interface{
Model: &Model{
Type: ifaceType,
},
Alias: &Alias{
Name: iface.Name,
},
}
// if UseEmulation unset and at least one NIC model is virtio,
// /dev/vhost-net must be present as we should have asked for it.
if ifaceType == "virtio" && virtioNetProhibited {
return fmt.Errorf("virtio interface cannot be used when in-kernel virtio-net device emulation '/dev/vhost-net' not present")
}
// Add a pciAddress if specifed, will be auto-generated if not set
if iface.PciAddress != "" {
addr, err := decoratePciAddressField(iface.PciAddress)
if err != nil {
return fmt.Errorf("failed to configure interface %s: %v", iface.Name, err)
}
domainIface.Address = addr
}
if iface.InterfaceBindingMethod == "bridge" || iface.InterfaceBindingMethod == "masquerade" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "bridge"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Bridge: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Bridge: DefaultBridgeName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "network" {
// TODO:(ihar) consider abstracting interface type conversion /
// detection into drivers
domainIface.Type = "network"
if iface.SourceName != "" {
domainIface.Source = InterfaceSource{
Network: iface.SourceName,
}
} else {
domainIface.Source = InterfaceSource{
Network: DefaultNetworkName,
}
}
if iface.BootOrder != nil {
domainIface.BootOrder = &BootOrder{Order: *iface.BootOrder}
}
} else if iface.InterfaceBindingMethod == "slirp" {
//not sure what to do here
}
domainSpec.Devices.Interfaces = append(domainSpec.Devices.Interfaces, domainIface)
}
}
| random_line_split |
||
router.rs | verifier router routes requests to either the checkpoint verifier or the
/// semantic block verifier, depending on the maximum checkpoint height.
///
/// # Correctness
///
/// Block verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
struct BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+ 'static,
V::Future: Send + 'static,
{
/// The checkpointing block verifier.
///
/// Always used for blocks before `Canopy`, optionally used for the entire checkpoint list.
checkpoint: CheckpointVerifier<S>,
/// The highest permitted checkpoint block.
///
/// This height must be in the `checkpoint` verifier's checkpoint list.
max_checkpoint_height: block::Height,
/// The full semantic block verifier, used for blocks after `max_checkpoint_height`.
block: SemanticBlockVerifier<S, V>,
}
/// An error while semantically verifying a block.
//
// One or both of these error variants are at least 140 bytes
#[derive(Debug, Display, Error)]
#[allow(missing_docs)]
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
RouterError::Block {
source: Box::new(err),
}
}
}
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
RouterError::Checkpoint { source, .. } => source.is_duplicate_request(),
RouterError::Block { source, .. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+ 'static,
V::Future: Send + 'static,
{
type Response = block::Hash;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// CORRECTNESS
//
// The current task must be scheduled for wakeup every time we return
// `Poll::Pending`.
//
// If either verifier is unready, this task is scheduled for wakeup when it becomes
// ready.
//
// We acquire checkpoint readiness before block readiness, to avoid an unlikely
// hang during the checkpoint to block verifier transition. If the checkpoint and
// block verifiers are contending for the same buffer/batch, we want the checkpoint
// verifier to win, so that checkpoint verification completes, and block verification
// can start. (Buffers and batches have multiple slots, so this contention is unlikely.)
use futures::ready;
// The chain verifier holds one slot in each verifier, for each concurrent task.
// Therefore, any shared buffers or batches polled by these verifiers should double
// their bounds. (For example, the state service buffer.)
ready!(self.checkpoint.poll_ready(cx))?;
ready!(self.block.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let block = request.block();
match block.coinbase_height() {
#[cfg(feature = "getblocktemplate-rpcs")]
// There's currently no known use case for block proposals below the checkpoint height,
// so it's okay to immediately return an error here.
Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => {
async {
// TODO: Add a `ValidateProposalError` enum with a `BelowCheckpoint` variant?
Err(VerifyBlockError::ValidateProposal(
"block proposals must be above checkpoint height".into(),
))?
}
.boxed()
}
Some(height) if height <= self.max_checkpoint_height => {
self.checkpoint.call(block).map_err(Into::into).boxed()
}
// This also covers blocks with no height, which the block verifier
// will reject immediately.
_ => self.block.call(request).map_err(Into::into).boxed(),
}
}
}
/// Initialize block and transaction verification services,
/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload`
/// config parameter and if the download is not already started.
///
/// Returns a block verifier, transaction verifier,
/// the Groth16 parameter download task [`JoinHandle`],
/// and the maximum configured checkpoint verification height.
///
/// The consensus configuration is specified by `config`, and the Zcash network
/// to verify blocks for is specified by `network`.
///
/// The block verification service asynchronously performs semantic verification
/// checks. Blocks that pass semantic verification are submitted to the supplied
/// `state_service` for contextual verification before being committed to the chain.
///
/// The transaction verification service asynchronously performs semantic verification
/// checks. Transactions that pass semantic verification return an `Ok` result to the caller.
///
/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed,
/// checks they were downloaded correctly, and loads them into Zebra.
/// (The transaction verifier automatically downloads the parameters on first use.
/// But the parameter downloads can take around 10 minutes.
/// So we pre-download the parameters, to avoid verification timeouts.)
///
/// This function should only be called once for a particular state service.
///
/// Dropped requests are cancelled on a best-effort basis, but may continue to be processed.
///
/// # Correctness
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn | <S>(
config: Config,
network: Network,
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
>,
BackgroundTaskHandles,
Height,
)
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
{
// Give other tasks priority before spawning the download and checkpoint tasks.
tokio::task::yield_now().await;
// Pre-download Groth16 parameters in a separate thread.
// The parameter download thread must be launched before initializing any verifiers.
// Otherwise, the download might happen on the startup thread.
let span = Span::current();
let groth16_download_handle = tokio::task::spawn_blocking(move || {
span.in_scope(|| {
if !debug_skip_parameter_preload {
// The lazy static initializer does the download, if needed,
// and the file hash checks.
lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS);
}
})
});
// Make sure the state contains the known best chain checkpoints, in a separate thread.
let checkpoint_state_service = state_service.clone();
let checkpoint_sync = config.checkpoint_sync;
let state_checkpoint_verify_handle = tokio::task::spawn(
// TODO: move this into an async function?
async move {
tracing::info!("starting state checkpoint validation");
// # Consensus
//
// We want to verify all available checkpoints, even if the node is not configured
// to use them for syncing. Zebra's checkpoints are updated with every release,
// which makes sure they include the latest settled network upgrade.
//
// > A network upgrade is settled on a given network when there is a social
// > consensus that it has activated with a given activation block hash.
// > A full validator that potentially risks Mainnet funds or displays Mainnet
// > transaction information to a user MUST do so only for a block chain that
// > includes the activation block of the most recent settled network upgrade,
// > with the corresponding activation block hash. Currently, there is social
// > consensus that NU5 has activated on the Zcash Mainnet and Testnet with the
// > activation block hashes given in § 3.12 ‘Mainnet and Testnet’ on p. 2 | init | identifier_name |
router.rs | , Error)]
#[allow(missing_docs)]
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
RouterError::Block {
source: Box::new(err),
}
}
}
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
RouterError::Checkpoint { source, .. } => source.is_duplicate_request(),
RouterError::Block { source, .. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+ 'static,
V::Future: Send + 'static,
{
type Response = block::Hash;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// CORRECTNESS
//
// The current task must be scheduled for wakeup every time we return
// `Poll::Pending`.
//
// If either verifier is unready, this task is scheduled for wakeup when it becomes
// ready.
//
// We acquire checkpoint readiness before block readiness, to avoid an unlikely
// hang during the checkpoint to block verifier transition. If the checkpoint and
// block verifiers are contending for the same buffer/batch, we want the checkpoint
// verifier to win, so that checkpoint verification completes, and block verification
// can start. (Buffers and batches have multiple slots, so this contention is unlikely.)
use futures::ready;
// The chain verifier holds one slot in each verifier, for each concurrent task.
// Therefore, any shared buffers or batches polled by these verifiers should double
// their bounds. (For example, the state service buffer.)
ready!(self.checkpoint.poll_ready(cx))?;
ready!(self.block.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let block = request.block();
match block.coinbase_height() {
#[cfg(feature = "getblocktemplate-rpcs")]
// There's currently no known use case for block proposals below the checkpoint height,
// so it's okay to immediately return an error here.
Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => {
async {
// TODO: Add a `ValidateProposalError` enum with a `BelowCheckpoint` variant?
Err(VerifyBlockError::ValidateProposal(
"block proposals must be above checkpoint height".into(),
))?
}
.boxed()
}
Some(height) if height <= self.max_checkpoint_height => {
self.checkpoint.call(block).map_err(Into::into).boxed()
}
// This also covers blocks with no height, which the block verifier
// will reject immediately.
_ => self.block.call(request).map_err(Into::into).boxed(),
}
}
}
/// Initialize block and transaction verification services,
/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload`
/// config parameter and if the download is not already started.
///
/// Returns a block verifier, transaction verifier,
/// the Groth16 parameter download task [`JoinHandle`],
/// and the maximum configured checkpoint verification height.
///
/// The consensus configuration is specified by `config`, and the Zcash network
/// to verify blocks for is specified by `network`.
///
/// The block verification service asynchronously performs semantic verification
/// checks. Blocks that pass semantic verification are submitted to the supplied
/// `state_service` for contextual verification before being committed to the chain.
///
/// The transaction verification service asynchronously performs semantic verification
/// checks. Transactions that pass semantic verification return an `Ok` result to the caller.
///
/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed,
/// checks they were downloaded correctly, and loads them into Zebra.
/// (The transaction verifier automatically downloads the parameters on first use.
/// But the parameter downloads can take around 10 minutes.
/// So we pre-download the parameters, to avoid verification timeouts.)
///
/// This function should only be called once for a particular state service.
///
/// Dropped requests are cancelled on a best-effort basis, but may continue to be processed.
///
/// # Correctness
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn init<S>(
config: Config,
network: Network,
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
>,
BackgroundTaskHandles,
Height,
)
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
{
// Give other tasks priority before spawning the download and checkpoint tasks.
tokio::task::yield_now().await;
// Pre-download Groth16 parameters in a separate thread.
// The parameter download thread must be launched before initializing any verifiers.
// Otherwise, the download might happen on the startup thread.
let span = Span::current();
let groth16_download_handle = tokio::task::spawn_blocking(move || {
span.in_scope(|| {
if !debug_skip_parameter_preload {
// The lazy static initializer does the download, if needed,
// and the file hash checks.
lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS);
}
})
});
// Make sure the state contains the known best chain checkpoints, in a separate thread.
let checkpoint_state_service = state_service.clone();
let checkpoint_sync = config.checkpoint_sync;
let state_checkpoint_verify_handle = tokio::task::spawn(
// TODO: move this into an async function?
async move {
tracing::info!("starting state checkpoint validation");
// # Consensus
//
// We want to verify all available checkpoints, even if the node is not configured
// to use them for syncing. Zebra's checkpoints are updated with every release,
// which makes sure they include the latest settled network upgrade.
//
// > A network upgrade is settled on a given network when there is a social
// > consensus that it has activated with a given activation block hash.
// > A full validator that potentially risks Mainnet funds or displays Mainnet
// > transaction information to a user MUST do so only for a block chain that
// > includes the activation block of the most recent settled network upgrade,
// > with the corresponding activation block hash. Currently, there is social
// > consensus that NU5 has activated on the Zcash Mainnet and Testnet with the
// > activation block hashes given in § 3.12 ‘Mainnet and Testnet’ on p. 20.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockchain>
let full_checkpoints = CheckpointList::new(network);
for (height, checkpoint_hash) in full_checkpoints.iter() {
let checkpoint_state_service = checkpoint_state_service.clone();
let request = zebra_state::Request::BestChainBlockHash(*height);
match checkpoint_state_service.oneshot(request).await {
Ok(zebra_state::Response::BlockHash(Some(state_hash))) => assert_eq!(
*checkpoint_hash, state_hash,
"invalid block in state: a previous Zebra instance followed an \
incorrect chain. Delete and re-sync your state to use the best chain"
),
Ok(zebra_state::Response::BlockHash(None)) => {
if checkpoint_sync {
tracing::info!(
"state is not fully synced yet, remaining checkpoints will be \
verified during syncing"
);
} else {
tracing::warn!(
"state is not fully synced yet, remaining checkpoints will be \
verified next time Zebra starts up. Zebra will be less secure \
until it is restarted. Use consensus.checkpoint_sync = true \
in zebrad.toml to make sure you are following a valid chain"
);
}
break;
}
Ok(response) => {
| unreachable!("unexpected response type: {response:?} from state request")
}
| conditional_block |
|
router.rs | verifier router routes requests to either the checkpoint verifier or the
/// semantic block verifier, depending on the maximum checkpoint height.
///
/// # Correctness
///
/// Block verification requests should be wrapped in a timeout, so that
/// out-of-order and invalid requests do not hang indefinitely. See the [`router`](`crate::router`)
/// module documentation for details.
struct BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+ 'static,
V::Future: Send + 'static,
{
/// The checkpointing block verifier.
///
/// Always used for blocks before `Canopy`, optionally used for the entire checkpoint list.
checkpoint: CheckpointVerifier<S>,
/// The highest permitted checkpoint block.
///
/// This height must be in the `checkpoint` verifier's checkpoint list.
max_checkpoint_height: block::Height,
/// The full semantic block verifier, used for blocks after `max_checkpoint_height`.
block: SemanticBlockVerifier<S, V>,
}
/// An error while semantically verifying a block.
//
// One or both of these error variants are at least 140 bytes
#[derive(Debug, Display, Error)]
#[allow(missing_docs)]
pub enum RouterError {
/// Block could not be checkpointed
Checkpoint { source: Box<VerifyCheckpointError> },
/// Block could not be full-verified
Block { source: Box<VerifyBlockError> },
}
impl From<VerifyCheckpointError> for RouterError {
fn from(err: VerifyCheckpointError) -> Self {
RouterError::Checkpoint {
source: Box::new(err),
}
}
}
impl From<VerifyBlockError> for RouterError {
fn from(err: VerifyBlockError) -> Self {
RouterError::Block {
source: Box::new(err),
}
}
}
impl RouterError {
/// Returns `true` if this is definitely a duplicate request.
/// Some duplicate requests might not be detected, and therefore return `false`.
pub fn is_duplicate_request(&self) -> bool {
match self {
RouterError::Checkpoint { source, .. } => source.is_duplicate_request(),
RouterError::Block { source, .. } => source.is_duplicate_request(),
}
}
}
impl<S, V> Service<Request> for BlockVerifierRouter<S, V>
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
V: Service<transaction::Request, Response = transaction::Response, Error = BoxError>
+ Send
+ Clone
+ 'static,
V::Future: Send + 'static,
{
type Response = block::Hash;
type Error = RouterError;
type Future =
Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// CORRECTNESS
//
// The current task must be scheduled for wakeup every time we return
// `Poll::Pending`.
//
// If either verifier is unready, this task is scheduled for wakeup when it becomes
// ready.
//
// We acquire checkpoint readiness before block readiness, to avoid an unlikely
// hang during the checkpoint to block verifier transition. If the checkpoint and
// block verifiers are contending for the same buffer/batch, we want the checkpoint
// verifier to win, so that checkpoint verification completes, and block verification
// can start. (Buffers and batches have multiple slots, so this contention is unlikely.)
use futures::ready;
// The chain verifier holds one slot in each verifier, for each concurrent task.
// Therefore, any shared buffers or batches polled by these verifiers should double
// their bounds. (For example, the state service buffer.)
ready!(self.checkpoint.poll_ready(cx))?;
ready!(self.block.poll_ready(cx))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let block = request.block();
match block.coinbase_height() {
#[cfg(feature = "getblocktemplate-rpcs")]
// There's currently no known use case for block proposals below the checkpoint height,
// so it's okay to immediately return an error here.
Some(height) if height <= self.max_checkpoint_height && request.is_proposal() => {
async {
// TODO: Add a `ValidateProposalError` enum with a `BelowCheckpoint` variant?
Err(VerifyBlockError::ValidateProposal(
"block proposals must be above checkpoint height".into(),
))?
}
.boxed()
}
Some(height) if height <= self.max_checkpoint_height => {
self.checkpoint.call(block).map_err(Into::into).boxed()
}
// This also covers blocks with no height, which the block verifier
// will reject immediately.
_ => self.block.call(request).map_err(Into::into).boxed(),
}
}
}
/// Initialize block and transaction verification services,
/// and pre-download Groth16 parameters if requested by the `debug_skip_parameter_preload`
/// config parameter and if the download is not already started.
///
/// Returns a block verifier, transaction verifier,
/// the Groth16 parameter download task [`JoinHandle`],
/// and the maximum configured checkpoint verification height.
///
/// The consensus configuration is specified by `config`, and the Zcash network
/// to verify blocks for is specified by `network`.
///
/// The block verification service asynchronously performs semantic verification
/// checks. Blocks that pass semantic verification are submitted to the supplied
/// `state_service` for contextual verification before being committed to the chain.
///
/// The transaction verification service asynchronously performs semantic verification
/// checks. Transactions that pass semantic verification return an `Ok` result to the caller.
///
/// Pre-downloads the Sapling and Sprout Groth16 parameters if needed,
/// checks they were downloaded correctly, and loads them into Zebra.
/// (The transaction verifier automatically downloads the parameters on first use.
/// But the parameter downloads can take around 10 minutes.
/// So we pre-download the parameters, to avoid verification timeouts.)
///
/// This function should only be called once for a particular state service.
///
/// Dropped requests are cancelled on a best-effort basis, but may continue to be processed.
///
/// # Correctness
///
/// Block and transaction verification requests should be wrapped in a timeout,
/// so that out-of-order and invalid requests do not hang indefinitely.
/// See the [`router`](`crate::router`) module documentation for details.
#[instrument(skip(state_service))]
pub async fn init<S>(
config: Config,
network: Network,
mut state_service: S,
debug_skip_parameter_preload: bool,
) -> (
Buffer<BoxService<Request, block::Hash, RouterError>, Request>,
Buffer<
BoxService<transaction::Request, transaction::Response, TransactionError>,
transaction::Request,
>,
BackgroundTaskHandles,
Height,
)
where
S: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone + 'static,
S::Future: Send + 'static,
{
// Give other tasks priority before spawning the download and checkpoint tasks.
tokio::task::yield_now().await;
// Pre-download Groth16 parameters in a separate thread.
// The parameter download thread must be launched before initializing any verifiers.
// Otherwise, the download might happen on the startup thread.
let span = Span::current();
let groth16_download_handle = tokio::task::spawn_blocking(move || {
span.in_scope(|| {
if !debug_skip_parameter_preload {
// The lazy static initializer does the download, if needed,
// and the file hash checks.
lazy_static::initialize(&crate::groth16::GROTH16_PARAMETERS);
}
})
});
// Make sure the state contains the known best chain checkpoints, in a separate thread.
let checkpoint_state_service = state_service.clone();
let checkpoint_sync = config.checkpoint_sync;
let state_checkpoint_verify_handle = tokio::task::spawn(
// TODO: move this into an async function?
async move { |
// # Consensus
//
// We want to verify all available checkpoints, even if the node is not configured
// to use them for syncing. Zebra's checkpoints are updated with every release,
// which makes sure they include the latest settled network upgrade.
//
// > A network upgrade is settled on a given network when there is a social
// > consensus that it has activated with a given activation block hash.
// > A full validator that potentially risks Mainnet funds or displays Mainnet
// > transaction information to a user MUST do so only for a block chain that
// > includes the activation block of the most recent settled network upgrade,
// > with the corresponding activation block hash. Currently, there is social
// > consensus that NU5 has activated on the Zcash Mainnet and Testnet with the
// > activation block hashes given in § 3.12 ‘Mainnet and Testnet’ on p. 2 | tracing::info!("starting state checkpoint validation"); | random_line_split |
javascript.rs | : &'a LineNumbers,
module: &'a TypedModule,
float_division_used: bool,
object_equality_used: bool,
module_scope: im::HashMap<String, usize>,
}
impl<'a> Generator<'a> {
pub fn new(line_numbers: &'a LineNumbers, module: &'a TypedModule) -> Self {
Self {
line_numbers,
module,
float_division_used: false,
object_equality_used: false,
module_scope: Default::default(),
}
}
pub fn compile(&mut self) -> Output<'a> {
let statements = std::iter::once(Ok(r#""use strict";"#.to_doc())).chain(
self.module
.statements
.iter()
.flat_map(|s| self.statement(s)),
);
// Two lines between each statement
let statements = Itertools::intersperse(statements, Ok(lines(2)));
let mut statements = statements.collect::<Result<Vec<_>, _>>()?;
// If float division has been used render an appropriate function
if self.float_division_used {
statements.push(FUNCTION_DIVIDE.to_doc());
};
if self.object_equality_used {
statements.push(DEEP_EQUAL.to_doc());
};
statements.push(line());
Ok(statements.to_doc())
}
pub fn statement(&mut self, statement: &'a TypedStatement) -> Option<Output<'a>> {
match statement {
Statement::TypeAlias { .. } => None,
Statement::CustomType { .. } => None,
Statement::Import {
module,
as_name,
unqualified,
package,
..
} => Some(Ok(self.import(package, module, as_name, unqualified))),
Statement::ExternalType { .. } => None,
Statement::ModuleConstant {
public,
name,
value,
..
} => Some(self.module_constant(*public, name, value)),
Statement::Fn {
arguments,
name,
body,
public,
..
} => Some(self.module_function(*public, name, arguments, body)),
Statement::ExternalFn {
public,
name,
arguments,
module,
fun,
..
} => Some(Ok(
self.external_function(*public, name, arguments, module, fun)
)),
}
}
fn import_path(&mut self, package: &'a str, module: &'a [String]) -> Document<'a> {
let path = Document::String(module.join("/"));
if package == self.module.type_info.package {
// Same package uses relative paths
let prefix = match self.module.name.len() {
1 => "./".to_doc(),
_ => Document::String("../".repeat(module.len() - 1)),
};
docvec!["\"", prefix, path, ".js\""]
} else {
// Different packages uses absolute imports
docvec!["\"", package, "/", path, ".js\""]
}
}
fn import(
&mut self,
package: &'a str,
module: &'a [String],
as_name: &'a Option<String>,
unqualified: &'a [UnqualifiedImport],
) -> Document<'a> {
let module_name = as_name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
module
.last()
.gleam_expect("JavaScript code generator could not identify imported module name.")
});
self.register_in_scope(module_name);
let module_name = maybe_escape_identifier(module_name);
let path: Document<'a> = self.import_path(package, module);
let import_line = docvec!["import * as ", module_name.clone(), " from ", path, ";"];
let mut any_unqualified_values = false;
let matches = unqualified
.iter()
.filter(|i| {
// We do not create a JS import for uppercase names are they are
// type or record constructors, both of which are not used at runtime
i.name
.chars()
.next()
.map(char::is_lowercase)
.unwrap_or(false)
})
.map(|i| {
any_unqualified_values = true;
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n)
});
(maybe_escape_identifier(&i.name), alias)
});
let matches = wrap_object(matches);
if any_unqualified_values {
docvec![
import_line,
line(),
"const ",
matches,
" = ",
module_name,
";"
]
} else {
import_line
}
}
fn module_constant(
&mut self,
public: bool,
name: &'a str,
value: &'a TypedConstant,
) -> Output<'a> {
let head = if public { "export const " } else { "const " };
self.register_in_scope(name);
Ok(docvec![
head,
maybe_escape_identifier(name),
" = ",
expression::constant_expression(value)?,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.to_string(), 0);
}
fn module_function(
&mut self,
public: bool,
name: &'a str,
args: &'a [TypedArg],
body: &'a TypedExpr,
) -> Output<'a> {
self.register_in_scope(name);
let argument_names = args
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
&self.module.name,
self.line_numbers,
name,
argument_names,
&mut self.float_division_used,
&mut self.object_equality_used,
self.module_scope.clone(),
);
let head = if public {
"export function "
} else {
"function "
};
Ok(docvec![
head,
maybe_escape_identifier(name),
fun_args(args),
" {",
docvec![line(), generator.function_body(body)?]
.nest(INDENT)
.group(),
line(),
"}",
])
}
fn external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
module: &'a str,
fun: &'a str,
) -> Document<'a> |
fn imported_external_function(
&mut self,
public: bool,
name: &'a str,
module: &'a str,
fun: &'a str,
) -> Document<'a> {
let import = if name == fun {
docvec!["import { ", name, r#" } from ""#, module, r#"";"#]
} else {
docvec![
"import { ",
fun,
" as ",
name,
r#" } from ""#,
module,
r#"";"#
]
};
if public {
import
.append(line())
.append("export { ")
.append(name)
.append(" };")
} else {
import
}
}
fn global_external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
fun: &'a str,
) -> Document<'a> {
let head = if public {
"export function "
} else {
"function "
};
let arguments = external_fn_args(arguments);
let body = docvec!["return ", fun, arguments.clone()];
docvec![
head,
name,
arguments,
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
]
}
}
fn external_fn_args<T>(arguments: &[ExternalFnArg<T>]) -> Document<'_> {
wrap_args(arguments.iter().enumerate().map(|a| {
match a {
(index, ExternalFnArg { label, .. }) => label
.as_ref()
.map(|l| l.as_str().to_doc())
.unwrap_or_else(|| Document::String(format!("arg{}", index))),
}
}))
}
pub fn module(
module: &TypedModule,
line_numbers: &LineNumbers,
writer: &mut impl Utf8Writer,
) -> Result<(), crate::Error> {
Generator::new(line_numbers, module)
.compile()
.map_err(crate::Error::JavaScript)?
.pretty_print(80, writer)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
Unsupported { feature: String },
}
fn unsupported<M: ToString, T>(label: M) -> Result<T, Error> {
Err(Error::Unsupported {
feature: label.to_string(),
})
}
fn fun_args(args: &'_ [TypedArg]) -> Document<'_> {
wrap_args(args.iter().map(|a| match &a.names {
ArgNames::Discard { .. } | ArgNames::LabelledDiscard { .. } => "_".to_doc(),
ArgNames::Named { name } | ArgNames::NamedLabelled { name, .. } => name.to_doc(),
}))
}
fn | {
if module.is_empty() {
self.global_external_function(public, name, arguments, fun)
} else {
self.imported_external_function(public, name, module, fun)
}
} | identifier_body |
javascript.rs | ,
float_division_used: false,
object_equality_used: false,
module_scope: Default::default(),
}
}
pub fn compile(&mut self) -> Output<'a> {
let statements = std::iter::once(Ok(r#""use strict";"#.to_doc())).chain(
self.module
.statements
.iter()
.flat_map(|s| self.statement(s)),
);
// Two lines between each statement
let statements = Itertools::intersperse(statements, Ok(lines(2)));
let mut statements = statements.collect::<Result<Vec<_>, _>>()?;
// If float division has been used render an appropriate function
if self.float_division_used {
statements.push(FUNCTION_DIVIDE.to_doc());
};
if self.object_equality_used {
statements.push(DEEP_EQUAL.to_doc());
};
statements.push(line());
Ok(statements.to_doc())
}
pub fn statement(&mut self, statement: &'a TypedStatement) -> Option<Output<'a>> {
match statement {
Statement::TypeAlias { .. } => None,
Statement::CustomType { .. } => None,
Statement::Import {
module,
as_name,
unqualified,
package,
..
} => Some(Ok(self.import(package, module, as_name, unqualified))),
Statement::ExternalType { .. } => None,
Statement::ModuleConstant {
public,
name,
value,
..
} => Some(self.module_constant(*public, name, value)),
Statement::Fn {
arguments,
name,
body,
public,
..
} => Some(self.module_function(*public, name, arguments, body)),
Statement::ExternalFn {
public,
name,
arguments,
module,
fun,
..
} => Some(Ok(
self.external_function(*public, name, arguments, module, fun)
)),
}
}
fn import_path(&mut self, package: &'a str, module: &'a [String]) -> Document<'a> {
let path = Document::String(module.join("/"));
if package == self.module.type_info.package {
// Same package uses relative paths
let prefix = match self.module.name.len() {
1 => "./".to_doc(),
_ => Document::String("../".repeat(module.len() - 1)),
};
docvec!["\"", prefix, path, ".js\""]
} else {
// Different packages uses absolute imports
docvec!["\"", package, "/", path, ".js\""]
}
}
fn import(
&mut self,
package: &'a str,
module: &'a [String],
as_name: &'a Option<String>,
unqualified: &'a [UnqualifiedImport],
) -> Document<'a> {
let module_name = as_name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
module
.last()
.gleam_expect("JavaScript code generator could not identify imported module name.")
});
self.register_in_scope(module_name);
let module_name = maybe_escape_identifier(module_name);
let path: Document<'a> = self.import_path(package, module);
let import_line = docvec!["import * as ", module_name.clone(), " from ", path, ";"];
let mut any_unqualified_values = false;
let matches = unqualified
.iter()
.filter(|i| {
// We do not create a JS import for uppercase names are they are
// type or record constructors, both of which are not used at runtime
i.name
.chars()
.next()
.map(char::is_lowercase)
.unwrap_or(false)
})
.map(|i| {
any_unqualified_values = true;
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n)
});
(maybe_escape_identifier(&i.name), alias)
});
let matches = wrap_object(matches);
if any_unqualified_values {
docvec![
import_line,
line(),
"const ",
matches,
" = ",
module_name,
";"
]
} else {
import_line
}
}
fn module_constant(
&mut self,
public: bool,
name: &'a str,
value: &'a TypedConstant,
) -> Output<'a> {
let head = if public { "export const " } else { "const " };
self.register_in_scope(name);
Ok(docvec![
head,
maybe_escape_identifier(name),
" = ",
expression::constant_expression(value)?,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.to_string(), 0);
}
fn module_function(
&mut self,
public: bool,
name: &'a str,
args: &'a [TypedArg],
body: &'a TypedExpr,
) -> Output<'a> {
self.register_in_scope(name);
let argument_names = args
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
&self.module.name,
self.line_numbers,
name,
argument_names,
&mut self.float_division_used,
&mut self.object_equality_used,
self.module_scope.clone(),
);
let head = if public {
"export function "
} else {
"function "
};
Ok(docvec![
head,
maybe_escape_identifier(name),
fun_args(args),
" {",
docvec![line(), generator.function_body(body)?]
.nest(INDENT)
.group(),
line(),
"}",
])
}
fn external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
module: &'a str,
fun: &'a str,
) -> Document<'a> {
if module.is_empty() {
self.global_external_function(public, name, arguments, fun)
} else {
self.imported_external_function(public, name, module, fun)
}
}
fn imported_external_function(
&mut self,
public: bool,
name: &'a str,
module: &'a str,
fun: &'a str,
) -> Document<'a> {
let import = if name == fun {
docvec!["import { ", name, r#" } from ""#, module, r#"";"#]
} else {
docvec![
"import { ",
fun,
" as ",
name,
r#" } from ""#,
module,
r#"";"#
]
};
if public {
import
.append(line())
.append("export { ")
.append(name)
.append(" };")
} else {
import
}
}
fn global_external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
fun: &'a str,
) -> Document<'a> {
let head = if public {
"export function "
} else {
"function "
};
let arguments = external_fn_args(arguments);
let body = docvec!["return ", fun, arguments.clone()];
docvec![
head,
name,
arguments,
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
]
}
}
fn external_fn_args<T>(arguments: &[ExternalFnArg<T>]) -> Document<'_> {
wrap_args(arguments.iter().enumerate().map(|a| {
match a {
(index, ExternalFnArg { label, .. }) => label
.as_ref()
.map(|l| l.as_str().to_doc())
.unwrap_or_else(|| Document::String(format!("arg{}", index))),
}
}))
}
pub fn module(
module: &TypedModule,
line_numbers: &LineNumbers,
writer: &mut impl Utf8Writer,
) -> Result<(), crate::Error> {
Generator::new(line_numbers, module)
.compile()
.map_err(crate::Error::JavaScript)?
.pretty_print(80, writer)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
Unsupported { feature: String },
}
fn unsupported<M: ToString, T>(label: M) -> Result<T, Error> {
Err(Error::Unsupported {
feature: label.to_string(),
})
}
fn fun_args(args: &'_ [TypedArg]) -> Document<'_> {
wrap_args(args.iter().map(|a| match &a.names {
ArgNames::Discard { .. } | ArgNames::LabelledDiscard { .. } => "_".to_doc(),
ArgNames::Named { name } | ArgNames::NamedLabelled { name, .. } => name.to_doc(),
}))
}
fn wrap_args<'a, I>(args: I) -> Document<'a>
where
I: Iterator<Item = Document<'a>>,
{
break_("", "")
.append(concat(Itertools::intersperse(args, break_(",", ", "))))
.nest(INDENT)
.append(break_("", ""))
.surround("(", ")")
.group()
}
fn | wrap_object | identifier_name |
|
javascript.rs | _numbers: &'a LineNumbers,
module: &'a TypedModule,
float_division_used: bool,
object_equality_used: bool,
module_scope: im::HashMap<String, usize>,
}
impl<'a> Generator<'a> {
pub fn new(line_numbers: &'a LineNumbers, module: &'a TypedModule) -> Self {
Self {
line_numbers,
module,
float_division_used: false,
object_equality_used: false,
module_scope: Default::default(),
}
}
pub fn compile(&mut self) -> Output<'a> {
let statements = std::iter::once(Ok(r#""use strict";"#.to_doc())).chain(
self.module
.statements
.iter()
.flat_map(|s| self.statement(s)),
);
// Two lines between each statement
let statements = Itertools::intersperse(statements, Ok(lines(2)));
let mut statements = statements.collect::<Result<Vec<_>, _>>()?;
// If float division has been used render an appropriate function
if self.float_division_used {
statements.push(FUNCTION_DIVIDE.to_doc());
};
if self.object_equality_used {
statements.push(DEEP_EQUAL.to_doc());
};
statements.push(line());
Ok(statements.to_doc())
}
pub fn statement(&mut self, statement: &'a TypedStatement) -> Option<Output<'a>> {
match statement {
Statement::TypeAlias { .. } => None,
Statement::CustomType { .. } => None,
Statement::Import {
module,
as_name,
unqualified,
package,
..
} => Some(Ok(self.import(package, module, as_name, unqualified))),
Statement::ExternalType { .. } => None,
Statement::ModuleConstant {
public,
name,
value,
..
} => Some(self.module_constant(*public, name, value)),
Statement::Fn {
arguments,
name,
body,
public,
..
} => Some(self.module_function(*public, name, arguments, body)),
Statement::ExternalFn {
public,
name,
arguments,
module,
fun,
..
} => Some(Ok(
self.external_function(*public, name, arguments, module, fun)
)),
}
}
fn import_path(&mut self, package: &'a str, module: &'a [String]) -> Document<'a> {
let path = Document::String(module.join("/"));
if package == self.module.type_info.package {
// Same package uses relative paths
let prefix = match self.module.name.len() {
1 => "./".to_doc(),
_ => Document::String("../".repeat(module.len() - 1)),
};
docvec!["\"", prefix, path, ".js\""]
} else {
// Different packages uses absolute imports
docvec!["\"", package, "/", path, ".js\""]
}
}
fn import(
&mut self,
package: &'a str,
module: &'a [String],
as_name: &'a Option<String>,
unqualified: &'a [UnqualifiedImport],
) -> Document<'a> {
let module_name = as_name.as_ref().map(|n| n.as_str()).unwrap_or_else(|| {
module
.last()
.gleam_expect("JavaScript code generator could not identify imported module name.")
});
self.register_in_scope(module_name);
let module_name = maybe_escape_identifier(module_name);
let path: Document<'a> = self.import_path(package, module);
let import_line = docvec!["import * as ", module_name.clone(), " from ", path, ";"];
let mut any_unqualified_values = false;
let matches = unqualified
.iter()
.filter(|i| {
// We do not create a JS import for uppercase names are they are
// type or record constructors, both of which are not used at runtime
i.name
.chars()
.next()
.map(char::is_lowercase)
.unwrap_or(false)
})
.map(|i| {
any_unqualified_values = true;
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n)
});
(maybe_escape_identifier(&i.name), alias)
});
let matches = wrap_object(matches);
if any_unqualified_values {
docvec![
import_line,
line(),
"const ",
matches,
" = ",
module_name,
";"
]
} else {
import_line
}
}
fn module_constant(
&mut self,
public: bool,
name: &'a str,
value: &'a TypedConstant,
) -> Output<'a> {
let head = if public { "export const " } else { "const " };
self.register_in_scope(name);
Ok(docvec![
head,
maybe_escape_identifier(name),
" = ",
expression::constant_expression(value)?,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.to_string(), 0);
}
fn module_function(
&mut self,
public: bool,
name: &'a str,
args: &'a [TypedArg],
body: &'a TypedExpr,
) -> Output<'a> {
self.register_in_scope(name);
let argument_names = args
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
&self.module.name,
self.line_numbers,
name,
argument_names,
&mut self.float_division_used, | "export function "
} else {
"function "
};
Ok(docvec![
head,
maybe_escape_identifier(name),
fun_args(args),
" {",
docvec![line(), generator.function_body(body)?]
.nest(INDENT)
.group(),
line(),
"}",
])
}
fn external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
module: &'a str,
fun: &'a str,
) -> Document<'a> {
if module.is_empty() {
self.global_external_function(public, name, arguments, fun)
} else {
self.imported_external_function(public, name, module, fun)
}
}
fn imported_external_function(
&mut self,
public: bool,
name: &'a str,
module: &'a str,
fun: &'a str,
) -> Document<'a> {
let import = if name == fun {
docvec!["import { ", name, r#" } from ""#, module, r#"";"#]
} else {
docvec![
"import { ",
fun,
" as ",
name,
r#" } from ""#,
module,
r#"";"#
]
};
if public {
import
.append(line())
.append("export { ")
.append(name)
.append(" };")
} else {
import
}
}
fn global_external_function<T>(
&mut self,
public: bool,
name: &'a str,
arguments: &'a [ExternalFnArg<T>],
fun: &'a str,
) -> Document<'a> {
let head = if public {
"export function "
} else {
"function "
};
let arguments = external_fn_args(arguments);
let body = docvec!["return ", fun, arguments.clone()];
docvec![
head,
name,
arguments,
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
]
}
}
fn external_fn_args<T>(arguments: &[ExternalFnArg<T>]) -> Document<'_> {
wrap_args(arguments.iter().enumerate().map(|a| {
match a {
(index, ExternalFnArg { label, .. }) => label
.as_ref()
.map(|l| l.as_str().to_doc())
.unwrap_or_else(|| Document::String(format!("arg{}", index))),
}
}))
}
pub fn module(
module: &TypedModule,
line_numbers: &LineNumbers,
writer: &mut impl Utf8Writer,
) -> Result<(), crate::Error> {
Generator::new(line_numbers, module)
.compile()
.map_err(crate::Error::JavaScript)?
.pretty_print(80, writer)
}
#[derive(Debug, Clone, PartialEq)]
pub enum Error {
Unsupported { feature: String },
}
fn unsupported<M: ToString, T>(label: M) -> Result<T, Error> {
Err(Error::Unsupported {
feature: label.to_string(),
})
}
fn fun_args(args: &'_ [TypedArg]) -> Document<'_> {
wrap_args(args.iter().map(|a| match &a.names {
ArgNames::Discard { .. } | ArgNames::LabelledDiscard { .. } => "_".to_doc(),
ArgNames::Named { name } | ArgNames::NamedLabelled { name, .. } => name.to_doc(),
}))
}
fn wrap | &mut self.object_equality_used,
self.module_scope.clone(),
);
let head = if public { | random_line_split |
benchmarks.rs | let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let optimization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random()); | let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
FastStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand:: | random_line_split |
|
benchmarks.rs | <T> {
strategy: T,
playouts: usize,
total_score: f64,
}
fn playout_result(state: & CombatState, strategy: & impl Strategy)->CombatResult {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let optimization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random());
let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
FastStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16 | CandidateStrategy | identifier_name |
|
benchmarks.rs | mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let optimization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random());
let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
Fa | let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand:: | stStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
| conditional_block |
benchmarks.rs | Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
}
// Note: This meta strategy often performed WORSE than the naive strategy it's based on,
// probably because it chose lucky moves rather than good moves
struct MetaStrategy <'a, T>(&'a T);
impl <'a, T: Strategy> Strategy for MetaStrategy <'a, T> {
fn choose_choice(&self, state: &CombatState) -> Vec<Choice> {
let combos = collect_starting_points(state.clone(), 200);
let choices = combos.into_iter().map(|(mut state, choices)| {
run_until_unable(&mut Runner::new(&mut state, true, false));
let num_attempts = 200;
let score = (0..num_attempts).map (|_| {
playout_result(& state, self.0).score
}).sum::<f64>()/num_attempts as f64;
(choices, score)
});
choices
.max_by_key(|(_, score)| OrderedFloat(*score))
.unwrap()
.0
}
}
pub struct ExplorationOptimizer <T, F> {
candidate_strategies: Vec<CandidateStrategy <T>>,
new_strategy: F,
passes: usize,
current_pass_index: usize,
}
impl <T, F> ExplorationOptimizer <T, F> {
pub fn max_strategy_playouts(&self) -> usize {
((self.passes as f64).sqrt() + 2.0) as usize
}
pub fn new (new_strategy: F)->Self {
ExplorationOptimizer {
candidate_strategies: Vec::new(),
new_strategy,
passes: 0,
current_pass_index: 0,
}
}
fn best_strategy(&self)->& CandidateStrategy <T> {
// not the best average score, but the most-explored, which comes out to best average score at last sorting among strategies that are at the max playouts
// note that this function may be called in the middle of a pass, when the current best strategy has not yet been visited to increase its number of playouts to the new maximum, so don't rely on the given maximum;
// since this function chooses the FIRST qualifying strategy, it's based on the most recent time the strategies were sorted, so the score-dependence of this choice isn't biased by the change in score variance from some of them having one extra playout.
&self.candidate_strategies.iter().enumerate().max_by_key(| (index, strategy) | {
(strategy.playouts, -(*index as i32))
}).unwrap().1
}
}
impl <T: Strategy, F: Fn (& [CandidateStrategy <T>])->T> StrategyOptimizer for ExplorationOptimizer <T, F> {
type Strategy = T;
fn step (&mut self, state: & CombatState) {
loop {
if self.current_pass_index >= self.candidate_strategies.len() {
self.candidate_strategies.sort_by_key (| strategy | OrderedFloat (- strategy.total_score/strategy.playouts as f64));
let mut index = 0;
self.candidate_strategies.retain(| strategy | {
index += 1;
strategy.playouts >= index
});
self.passes += 1;
self.candidate_strategies.push (CandidateStrategy {
strategy: (self.new_strategy)(&self.candidate_strategies),
playouts: 0,
total_score: 0.0,
});
self.current_pass_index = 0;
}
let max_strategy_playouts = self.max_strategy_playouts();
let strategy = &mut self.candidate_strategies [self.current_pass_index];
self.current_pass_index += 1;
if strategy.playouts < max_strategy_playouts {
let result = playout_result(state, & strategy.strategy);
strategy.total_score += result.score;
strategy.playouts += 1;
return
}
}
}
fn report (&self)->& Self::Strategy {
let best = self.best_strategy();
println!( "ExplorationOptimizer reporting strategy with {} playouts, running average {}", best.playouts, (best.total_score/best.playouts as f64));
& best.strategy
}
}
impl StrategyOptimizer for NeuralStrategy {
type Strategy = NeuralStrategy ;
fn step (&mut self, state: & CombatState) {
self.do_training_playout(state);
}
fn report (&self)->& Self::Strategy {
self
}
}
pub fn benchmark_step(name: & str, state: & CombatState, optimizer: &mut impl StrategyOptimizer) {
println!( "Optimizing {}…", name);
let start = Instant::now();
let mut steps = 0;
let elapsed = loop {
optimizer.step(state);
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(2000) {
break elapsed;
}
};
println!( "Optimized {} for {:.2?} ({} steps). Reporting…", name, elapsed, steps) ;
let strategy = optimizer.report();
let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, strategy).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(500) {
break elapsed;
}
};
println!( "Evaluated {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;
/*let start = Instant::now();
let mut steps = 0;
let mut total_test_score = 0.0;
let elapsed = loop {
total_test_score += playout_result(state, &MetaStrategy(strategy)).score;
steps += 1;
let elapsed = start.elapsed();
if elapsed > Duration::from_millis(5000*20) {
break elapsed;
}
};
println!( "Evaluated meta-strategy for {} for {:.2?} ({} playouts). Average score: {}", name, elapsed, steps, total_test_score / steps as f64) ;*/
}
/*
pub fn run_benchmark (name: & str, state: & CombatState, optimization_playouts: usize, test_playouts: usize, mut optimizer: impl StrategyOptimizer) {
println!( "Starting benchmark for {}, doing {} optimization playouts…", name, optimization_playouts);
for iteration in 0..optimization_playouts {
optimizer.step (| strategy | {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
strategy,
);
CombatResult::new (& state)
});
if iteration % 10000 == 9999 {
println!( "Completed {} playouts…", iteration + 1);
}
}
let (best_strategy, anticipated_score) = optimizer.current_best();
println!( "Optimization completed for {}. Found strategy with anticipated score {}. Doing {} test playouts…", name, anticipated_score, test_playouts);
let total_test_score: f64 = (0..test_playouts)
.map(|_| {
let mut state = state.clone();
play_out (
&mut Runner::new (&mut state, true, false),
best_strategy,
);
CombatResult::new (& state).score
})
.sum();
println!( "Testing completed for {}. Final average score: {}.", name, total_test_score/test_playouts as f64);
println!();
}*/
pub fn run_benchmarks() {
let op | timization_playouts = 1000000;
let test_playouts = 10000;
let ghost_file = std::fs::File::open ("data/hexaghost.json").unwrap();
let ghost_state: CombatState = serde_json::from_reader (std::io::BufReader::new (ghost_file)).unwrap();
let mut fast_random: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <FastStrategy>] | FastStrategy::random());
let mut fast_genetic: ExplorationOptimizer<FastStrategy, _> = ExplorationOptimizer::new (| candidates: & [CandidateStrategy <FastStrategy>] | {
if candidates.len() < 2 {
FastStrategy::random()
}
else {
FastStrategy::offspring(& candidates.choose_multiple(&mut rand::thread_rng(), 2).map (| candidate | & candidate.strategy).collect::<Vec<_>>())
}
});
let mut neural_random_only: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|_: &[CandidateStrategy <NeuralStrategy>] | NeuralStrategy::new_random(&ghost_state, 16));
let mut neural_training_only = NeuralStrategy::new_random(&ghost_state, 16);
let mut neural_random_training: ExplorationOptimizer<NeuralStrategy, _> = ExplorationOptimizer::new (|candidates: &[CandidateStrategy <NeuralStrategy>] | {
if candidates.len() < 1 || rand::random::<f64>() < 0.4 { | identifier_body |
|
project-payment-record.ts | .href.split('#')[0]; // 当前网页的URL,不包含#及其后面部分
// let data = { url: url };
this.http.get(hideAttentionMenuUrl).subscribe(res => {
if (res['code'] == 200) {
wx.config({
debug: false,
appId: res['data'].appid,
timestamp: res['data'].timestamp,
nonceStr: res['data'].nonceStr,
signature: res['data'].signature,
jsApiList: ['hideOptionMenu']
});
wx.ready(function () {
//wx.showOptionMenu();
wx.hideOptionMenu();
});
}
})
}
fileupload() {
// ionic 官方文档例子漏写了这句话
// http://ionicframework.com/docs/native/file-transfer/
//
const fileTransfer: FileTransferObject = this.transfer.create();
// 更多的 Options 可以点进去自己看看,不懂的就谷歌翻译他的注释
let options: FileUploadOptions = {
fileKey: 'file',
fileName: 'name.jpg', // 文件类型
headers: {},
params: {} // 如果要传参数,写这里
}
console.log(12312);
fileTransfer.upload('', 'http://100.168.1.48:8181/mafile/upload.jsp', options)
.then((data) => {
// success
}, (err) => {
// error
})
}
filedownload() {
console.log(3123);
const fileTransfer: FileTransferObject = this.transfer.create();
const url = 'http://www.example.com/file.pdf';
fileTransfer.download(url, this.file.dataDirectory + 'file.pdf').then((entry) => {
console.log('download complete: ' + entry.toURL());
}, (error) => {
// handle error
});
}
/** 跳转到上传页面 */
gouploadfile() {
this.navCtrl.push(UploadfilePage, {
callback: this.setuploadfile,
})
}
// 弹框提示确定返回
sureComplete() {
this.isComplete = !this.isComplete;
}
setuploadfile = (obj, name) => {
this.filestatus = true;
this.filetitle = name;
//console.log(obj)
// var a = obj.fileSize / 1048576;
this.filesize = (obj.fileSize / 1048576).toPrecision(3);
this.fileurl = obj.url;
this.paymentRecordData['sourceName'] = this.filetitle
this.paymentRecordData['size'] = this.filesize
this.paymentRecordData['certifiedUrl'] = this.fileurl
this.paymentRecordData['fid'] = obj.fid
var types = obj.fileType;
if (this.filesize > 1) {
this.filesize = this.filesize + ' MB'
} else {
this.filesize = this.filesize * 1024 + ' KB'
}
if (types.indexOf('doc') == 0 || types.indexOf('docx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'doc.png'
} else if (types.indexOf('ppt') == 0 || types.indexOf('pptx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'ppt.png'
} else if (types.indexOf('xls') == 0 || types.indexOf('xlsx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'xls.png'
} else if (types.indexOf('jpg') == 0 || types.indexOf('png') == 0) {
this.filetypeicon = 'assets/imgs/' + 'png.png'
} else if (types.indexOf('pdf') == 0) {
this.filetypeicon = 'assets/imgs/' + 'pdf.png'
}
this.paymentRecordData['typeStr'] = this.filetypeicon
//console.log(this.filetypeicon)
}
/*列表编辑*/
goFormEditPage(field, value, type) {
let cid = this.navParams.get('cid')
if (type == 'selectPaymentBank') {
this.navCtrl.push(ProjectCollectBankPage, { callback: this.setValue, field: field, data: this.paymentRecordData, type: 'clientType', cid: cid });
} else {
this.navCtrl.push(FormEditPage, { callback: this.setValue, value: value, field: field, type: type });
}
}
/*设置值(回调函数)*/
// setValue = (field,value)=> {
// this.paymentRecordData[field] = value;
// }
/*设置值(回调函数)*/
setValue = (field, value) => {
if (field == 'taxNumber' && value) {
this.paymentRecordData['payer'] = value.name;
this.paymentRecordData['payerBank'] = value.bankName;
this.paymentRecordData['payerAccount'] = value.account;
} else {
this.paymentRecordData[field] = value;
}
}
getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return encodeURI(r[2]); //返回参数值
} else {
return null;
}
}
/*项目申请发票附加信息数据请求*/
getProjectPaymentRecordDetail(psid) {
// let projectInvoiceDetailUrl = 'http://mamon.yemindream.com/mamon/customer/getPayMentByPsid';
const openId = window.sessionStorage.getItem('openId') || this.getUrlParam('openId');
let projectInvoiceDetailUrl = getPayMentByPsidUrl + '?openId=' + openId + '&psid=' + psid;
this.Provider.getMamenSwiperData(projectInvoiceDetailUrl).subscribe(res => {
if (res.code == 200) {
this.paymentRecordData = res.data;
console.log('paymentRecordData', this.paymentRecordData)
this.paymentRecordData['size'] = (this.paymentRecordData['size'] / 1048576).toPrecision(3)
if (this.paymentRecordData['size'] > 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] + ' MB'
} else if (this.paymentRecordData['size'] < 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] * 1024 + ' KB'
}
if (this.paymentRecordData['typeStr']) {
if (this.paymentRecordData['typeStr'].search(/doc/) !== -1 || this.paymentRecordData['typeStr'].search(/docx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'doc.png'
} else if (this.paymentRecordData['typeStr'].search(/ppt/) !== -1 || this.paymentRecordData['typeStr'].search(/pptx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'ppt.png'
} else if (this.paymentRecordData['typeStr'].search(/xls/) !== -1 || this.paymentRecordData['typeStr'].search(/xlsx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'xls.png'
} else if (this.paymentRecordData['typeStr'].search(/jpg/) !== -1 || this.paymentRecordData['typeStr'].search(/png/) !== -1 || this.paymentRecordData['typeStr'].search(/jpeg/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'png.png'
} else if (this.paymentRecordData['typeStr'].search(/pdf/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'pdf.png'
}
}
this.paymentRecordData && this.paymentRecordData['payerList'] && this.paymentRecordData['payerList'].length > 0 && this.paymentRecordData['payerList'].map(d => {
if (d.type == 1) {
this.paymentRecordData['payer'] = d.name;
this.paymentRecordData['payerBank'] = d.bankName;
this.paymentRecordData['payerAccount'] = d.account;
}
})
// this.payerList = this.paymentRecordData && this.paymentRecordData['payerList'] && this.paymentRecordData['payerList'].length>0 ?
} else if (res.code == 207) {
window.localStorage.removeItem('openId');
} else {
//alert('请求出错:' + res.msg);
}
}, error => {
console.log('erros===', error);
})
}
// || !invoiceData['payee'] || !invoiceData['payeeBank'] || !invoiceData['payeeAccount']收款人项目
sureCompleteRecord() {
this.isCompleteRecord = true
let invoiceData = this.paymentRecordD | ata;
// let projectInvoiceDetailUrl = 'http://mam | conditional_block |
|
project-payment-record.ts | Urlvalue: any
public paymentRecordData = {}
public payerList = {};
public isComplete = false;
public data = {};
public isCompleteRecord = false
public tipstext: any
public isFailed: any
constructor(public navCtrl: NavController, public navParams: NavParams, private transfer: FileTransfer, private file: File, private Provider: MamenDataProvider, private http: HttpClient) {
}
ionViewDidLoad() {
let psid = this.navParams.get('id')
this.getProjectPaymentRecordDetail(psid);
this.data = this.navParams.get('data');
console.log('ionViewDidLoad ProjectPaymentRecordPage');
}
ionViewDidEnter() {
this.isAttention();
}
//隐藏底部分享菜单
isAttention() {
// let url = location.href.split('#')[0]; // 当前网页的URL,不包含#及其后面部分
// let data = { url: url };
this.http.get(hideAttentionMenuUrl).subscribe(res => {
if (res['code'] == 200) {
wx.config({
debug: false,
appId: res['data'].appid,
timestamp: res['data'].timestamp,
nonceStr: res['data'].nonceStr,
signature: res['data'].signature,
jsApiList: ['hideOptionMenu']
});
wx.ready(function () {
//wx.showOptionMenu();
wx.hideOptionMenu();
});
}
})
}
fileupload() {
// ionic 官方文档例子漏写了这句话
// http://ionicframework.com/docs/native/file-transfer/
//
const fileTransfer: FileTransferObject = this.transfer.create();
// 更多的 Options 可以点进去自己看看,不懂的就谷歌翻译他的注释
let options: FileUploadOptions = {
fileKey: 'file',
fileName: 'name.jpg', // 文件类型
headers: {},
params: {} // 如果要传参数,写这里
}
console.log(12312);
fileTransfer.upload('', 'http://100.168.1.48:8181/mafile/upload.jsp', options)
.then((data) => {
// success
}, (err) => {
// error
})
}
filedownload() {
console.log(3123);
const fileTransfer: FileTransferObject = this.transfer.create();
const url = 'http://www.example.com/file.pdf';
fileTransfer.download(url, this.file.dataDirectory + 'file.pdf').then((entry) => {
console.log('download complete: ' + entry.toURL());
}, (error) => {
// handle error
});
}
/** 跳转到上传页面 */
gouploadfile() {
this.navCtrl.push(UploadfilePage, {
callback: this.setuploadfile,
})
}
// 弹框提示确定返回
sureComplete() {
this.isComplete = !this.isComplete;
}
setuploadfile = (obj, name) => {
this.filestatus = true;
this.filetitle = name;
//console.log(obj)
// var a = obj.fileSize / 1048576;
this.filesize = (obj.fileSize / 1048576).toPrecision(3);
this.fileurl = obj.url;
this.paymentRecordData['sourceName'] = this.filetitle
this.paymentRecordData['size'] = this.filesize
this.paymentRecordData['certifiedUrl'] = this.fileurl
this.paymentRecordData['fid'] = obj.fid
var types = obj.fileType;
if (this.filesize > 1) {
this.filesize = this.filesize + ' MB'
} else {
this.filesize = this.filesize * 1024 + ' KB'
}
if (types.indexOf('doc') == 0 || types.indexOf('docx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'doc.png'
} else if (types.indexOf('ppt') == 0 || types.indexOf('pptx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'ppt.png'
} else if (types.indexOf('xls') == 0 || types.indexOf('xlsx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'xls.png'
} else if (types.indexOf('jpg') == 0 || types.indexOf('png') == 0) {
this.filetypeicon = 'assets/imgs/' + 'png.png'
} else if (types.indexOf('pdf') == 0) {
this.filetypeicon = 'assets/imgs/' + 'pdf.png'
}
this.paymentRecordData['typeStr'] = this.filetypeicon
//console.log(this.filetypeicon)
}
/*列表编辑*/
goFormEditPage(field, value, type) {
let cid = this.navParams.get('cid')
if (type == 'selectPaymentBank') {
this.navCtrl.push(ProjectCollectBankPage, { callback: this.setValue, field: field, data: this.payme | value) {
this.paymentRecordData['payer'] = value.name;
this.paymentRecordData['payerBank'] = value.bankName;
this.paymentRecordData['payerAccount'] = value.account;
} else {
this.paymentRecordData[field] = value;
}
}
getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return encodeURI(r[2]); //返回参数值
} else {
return null;
}
}
/*项目申请发票附加信息数据请求*/
getProjectPaymentRecordDetail(psid) {
// let projectInvoiceDetailUrl = 'http://mamon.yemindream.com/mamon/customer/getPayMentByPsid';
const openId = window.sessionStorage.getItem('openId') || this.getUrlParam('openId');
let projectInvoiceDetailUrl = getPayMentByPsidUrl + '?openId=' + openId + '&psid=' + psid;
this.Provider.getMamenSwiperData(projectInvoiceDetailUrl).subscribe(res => {
if (res.code == 200) {
this.paymentRecordData = res.data;
console.log('paymentRecordData', this.paymentRecordData)
this.paymentRecordData['size'] = (this.paymentRecordData['size'] / 1048576).toPrecision(3)
if (this.paymentRecordData['size'] > 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] + ' MB'
} else if (this.paymentRecordData['size'] < 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] * 1024 + ' KB'
}
if (this.paymentRecordData['typeStr']) {
if (this.paymentRecordData['typeStr'].search(/doc/) !== -1 || this.paymentRecordData['typeStr'].search(/docx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'doc.png'
} else if (this.paymentRecordData['typeStr'].search(/ppt/) !== -1 || this.paymentRecordData['typeStr'].search(/pptx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'ppt.png'
} else if (this.paymentRecordData['typeStr'].search(/xls/) !== -1 || this.paymentRecordData['typeStr'].search(/xlsx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'xls.png'
} else if (this.paymentRecordData['typeStr'].search(/jpg/) !== -1 || this.paymentRecordData['typeStr'].search(/png/) !== -1 || this.paymentRecordData['typeStr'].search(/jpeg/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'png.png'
} else if (this.paymentRecordData['typeStr'].search(/pdf/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'pdf.png'
}
}
this.paymentRecordData && this.paymentRecordData['payerList'] && this.paymentRecordData['payerList'].length > 0 && this.paymentRecordData['payerList'].map(d => {
if (d.type == 1) {
this.paymentRecordData['payer'] = d.name;
this.paymentRecordData['payerBank'] = d.bankName;
this | ntRecordData, type: 'clientType', cid: cid });
} else {
this.navCtrl.push(FormEditPage, { callback: this.setValue, value: value, field: field, type: type });
}
}
/*设置值(回调函数)*/
// setValue = (field,value)=> {
// this.paymentRecordData[field] = value;
// }
/*设置值(回调函数)*/
setValue = (field, value) => {
if (field == 'taxNumber' && | identifier_body |
project-payment-record.ts | Urlvalue: any
public paymentRecordData = {}
public payerList = {};
public isComplete = false;
public data = {};
public isCompleteRecord = false
public tipstext: any
public isFailed: any
constructor(public navCtrl: NavController, public navParams: NavParams, private transfer: FileTransfer, private file: File, private Provider: MamenDataProvider, private http: HttpClient) {
}
ionViewDidLoad() {
let psid = this.navParams.get('id')
this.getProjectPaymentRecordDetail(psid);
this.data = this.navParams.get('data');
console.log('ionViewDidLoad ProjectPaymentRecordPage');
}
ionViewDidEnter() {
this.isAttention();
}
//隐藏底部分享菜单
isAttention() {
// let url = location.href.split('#')[0]; // 当前网页的URL,不包含#及其后面部分
// let data = { url: url };
this.http.get(hideAttentionMenuUrl).subscribe(res => {
if (res['code'] == 200) {
wx.config({
debug: false,
appId: res['data'].appid,
timestamp: res['data'].timestamp,
nonceStr: res['data'].nonceStr,
signature: res['data'].signature,
jsApiList: ['hideOptionMenu']
});
wx.ready(function () {
//wx.showOptionMenu();
wx.hideOptionMenu();
});
}
})
}
fileupload() {
// ionic 官方文档例子漏写了这句话
// http://ionicframework.com/docs/native/file-transfer/
//
const fileTransfer: FileTransferObject = this.transfer.create();
// 更多的 Options 可以点进去自己看看,不懂的就谷歌翻译他的注释
let options: FileUploadOptions = {
fileKey: 'file',
fileName: 'name.jpg', // 文件类型
headers: {},
params: {} // 如果要传参数,写这里
}
console.log(12312);
fileTransfer.upload('', 'http://100.168.1.48:8181/mafile/upload.jsp', options)
.then((data) => {
// success
}, (err) => {
// error
})
}
filedownload() {
console.log(3123);
const fileTransfer: FileTransferObject = this.transfer.create();
const url = 'http://www.example.com/file.pdf';
fileTransfer.download(url, this.file.dataDirectory + 'file.pdf').then((entry) => {
console.log('download complete: ' + entry.toURL());
}, (error) => {
// handle error
});
}
/** 跳转到上传页面 */
gouploadfile() {
this.navCtrl.push(UploadfilePage, {
callback: this.setuploadfile,
})
}
// 弹框提示确定返回
sureComplete() {
this.isComplete = !this.isComplete;
}
setuploadfile = (obj, name) => {
this.filestatus = true;
this.filetitle = name;
//console.log(obj)
| bj.fileSize / 1048576;
this.filesize = (obj.fileSize / 1048576).toPrecision(3);
this.fileurl = obj.url;
this.paymentRecordData['sourceName'] = this.filetitle
this.paymentRecordData['size'] = this.filesize
this.paymentRecordData['certifiedUrl'] = this.fileurl
this.paymentRecordData['fid'] = obj.fid
var types = obj.fileType;
if (this.filesize > 1) {
this.filesize = this.filesize + ' MB'
} else {
this.filesize = this.filesize * 1024 + ' KB'
}
if (types.indexOf('doc') == 0 || types.indexOf('docx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'doc.png'
} else if (types.indexOf('ppt') == 0 || types.indexOf('pptx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'ppt.png'
} else if (types.indexOf('xls') == 0 || types.indexOf('xlsx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'xls.png'
} else if (types.indexOf('jpg') == 0 || types.indexOf('png') == 0) {
this.filetypeicon = 'assets/imgs/' + 'png.png'
} else if (types.indexOf('pdf') == 0) {
this.filetypeicon = 'assets/imgs/' + 'pdf.png'
}
this.paymentRecordData['typeStr'] = this.filetypeicon
//console.log(this.filetypeicon)
}
/*列表编辑*/
goFormEditPage(field, value, type) {
let cid = this.navParams.get('cid')
if (type == 'selectPaymentBank') {
this.navCtrl.push(ProjectCollectBankPage, { callback: this.setValue, field: field, data: this.paymentRecordData, type: 'clientType', cid: cid });
} else {
this.navCtrl.push(FormEditPage, { callback: this.setValue, value: value, field: field, type: type });
}
}
/*设置值(回调函数)*/
// setValue = (field,value)=> {
// this.paymentRecordData[field] = value;
// }
/*设置值(回调函数)*/
setValue = (field, value) => {
if (field == 'taxNumber' && value) {
this.paymentRecordData['payer'] = value.name;
this.paymentRecordData['payerBank'] = value.bankName;
this.paymentRecordData['payerAccount'] = value.account;
} else {
this.paymentRecordData[field] = value;
}
}
getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return encodeURI(r[2]); //返回参数值
} else {
return null;
}
}
/*项目申请发票附加信息数据请求*/
getProjectPaymentRecordDetail(psid) {
// let projectInvoiceDetailUrl = 'http://mamon.yemindream.com/mamon/customer/getPayMentByPsid';
const openId = window.sessionStorage.getItem('openId') || this.getUrlParam('openId');
let projectInvoiceDetailUrl = getPayMentByPsidUrl + '?openId=' + openId + '&psid=' + psid;
this.Provider.getMamenSwiperData(projectInvoiceDetailUrl).subscribe(res => {
if (res.code == 200) {
this.paymentRecordData = res.data;
console.log('paymentRecordData', this.paymentRecordData)
this.paymentRecordData['size'] = (this.paymentRecordData['size'] / 1048576).toPrecision(3)
if (this.paymentRecordData['size'] > 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] + ' MB'
} else if (this.paymentRecordData['size'] < 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] * 1024 + ' KB'
}
if (this.paymentRecordData['typeStr']) {
if (this.paymentRecordData['typeStr'].search(/doc/) !== -1 || this.paymentRecordData['typeStr'].search(/docx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'doc.png'
} else if (this.paymentRecordData['typeStr'].search(/ppt/) !== -1 || this.paymentRecordData['typeStr'].search(/pptx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'ppt.png'
} else if (this.paymentRecordData['typeStr'].search(/xls/) !== -1 || this.paymentRecordData['typeStr'].search(/xlsx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'xls.png'
} else if (this.paymentRecordData['typeStr'].search(/jpg/) !== -1 || this.paymentRecordData['typeStr'].search(/png/) !== -1 || this.paymentRecordData['typeStr'].search(/jpeg/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'png.png'
} else if (this.paymentRecordData['typeStr'].search(/pdf/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'pdf.png'
}
}
this.paymentRecordData && this.paymentRecordData['payerList'] && this.paymentRecordData['payerList'].length > 0 && this.paymentRecordData['payerList'].map(d => {
if (d.type == 1) {
this.paymentRecordData['payer'] = d.name;
this.paymentRecordData['payerBank'] = d.bankName;
this.payment | // var a = o | identifier_name |
project-payment-record.ts | fileUrlvalue: any
public paymentRecordData = {}
public payerList = {};
public isComplete = false;
public data = {};
public isCompleteRecord = false
public tipstext: any
public isFailed: any
constructor(public navCtrl: NavController, public navParams: NavParams, private transfer: FileTransfer, private file: File, private Provider: MamenDataProvider, private http: HttpClient) {
}
ionViewDidLoad() {
let psid = this.navParams.get('id')
this.getProjectPaymentRecordDetail(psid);
this.data = this.navParams.get('data');
console.log('ionViewDidLoad ProjectPaymentRecordPage');
}
ionViewDidEnter() {
this.isAttention();
}
| // let data = { url: url };
this.http.get(hideAttentionMenuUrl).subscribe(res => {
if (res['code'] == 200) {
wx.config({
debug: false,
appId: res['data'].appid,
timestamp: res['data'].timestamp,
nonceStr: res['data'].nonceStr,
signature: res['data'].signature,
jsApiList: ['hideOptionMenu']
});
wx.ready(function () {
//wx.showOptionMenu();
wx.hideOptionMenu();
});
}
})
}
fileupload() {
// ionic 官方文档例子漏写了这句话
// http://ionicframework.com/docs/native/file-transfer/
//
const fileTransfer: FileTransferObject = this.transfer.create();
// 更多的 Options 可以点进去自己看看,不懂的就谷歌翻译他的注释
let options: FileUploadOptions = {
fileKey: 'file',
fileName: 'name.jpg', // 文件类型
headers: {},
params: {} // 如果要传参数,写这里
}
console.log(12312);
fileTransfer.upload('', 'http://100.168.1.48:8181/mafile/upload.jsp', options)
.then((data) => {
// success
}, (err) => {
// error
})
}
filedownload() {
console.log(3123);
const fileTransfer: FileTransferObject = this.transfer.create();
const url = 'http://www.example.com/file.pdf';
fileTransfer.download(url, this.file.dataDirectory + 'file.pdf').then((entry) => {
console.log('download complete: ' + entry.toURL());
}, (error) => {
// handle error
});
}
/** 跳转到上传页面 */
gouploadfile() {
this.navCtrl.push(UploadfilePage, {
callback: this.setuploadfile,
})
}
// 弹框提示确定返回
sureComplete() {
this.isComplete = !this.isComplete;
}
setuploadfile = (obj, name) => {
this.filestatus = true;
this.filetitle = name;
//console.log(obj)
// var a = obj.fileSize / 1048576;
this.filesize = (obj.fileSize / 1048576).toPrecision(3);
this.fileurl = obj.url;
this.paymentRecordData['sourceName'] = this.filetitle
this.paymentRecordData['size'] = this.filesize
this.paymentRecordData['certifiedUrl'] = this.fileurl
this.paymentRecordData['fid'] = obj.fid
var types = obj.fileType;
if (this.filesize > 1) {
this.filesize = this.filesize + ' MB'
} else {
this.filesize = this.filesize * 1024 + ' KB'
}
if (types.indexOf('doc') == 0 || types.indexOf('docx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'doc.png'
} else if (types.indexOf('ppt') == 0 || types.indexOf('pptx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'ppt.png'
} else if (types.indexOf('xls') == 0 || types.indexOf('xlsx') == 0) {
this.filetypeicon = 'assets/imgs/' + 'xls.png'
} else if (types.indexOf('jpg') == 0 || types.indexOf('png') == 0) {
this.filetypeicon = 'assets/imgs/' + 'png.png'
} else if (types.indexOf('pdf') == 0) {
this.filetypeicon = 'assets/imgs/' + 'pdf.png'
}
this.paymentRecordData['typeStr'] = this.filetypeicon
//console.log(this.filetypeicon)
}
/*列表编辑*/
goFormEditPage(field, value, type) {
let cid = this.navParams.get('cid')
if (type == 'selectPaymentBank') {
this.navCtrl.push(ProjectCollectBankPage, { callback: this.setValue, field: field, data: this.paymentRecordData, type: 'clientType', cid: cid });
} else {
this.navCtrl.push(FormEditPage, { callback: this.setValue, value: value, field: field, type: type });
}
}
/*设置值(回调函数)*/
// setValue = (field,value)=> {
// this.paymentRecordData[field] = value;
// }
/*设置值(回调函数)*/
setValue = (field, value) => {
if (field == 'taxNumber' && value) {
this.paymentRecordData['payer'] = value.name;
this.paymentRecordData['payerBank'] = value.bankName;
this.paymentRecordData['payerAccount'] = value.account;
} else {
this.paymentRecordData[field] = value;
}
}
getUrlParam(name) {
var reg = new RegExp("(^|&)" + name + "=([^&]*)(&|$)"); //构造一个含有目标参数的正则表达式对象
var r = window.location.search.substr(1).match(reg); //匹配目标参数
if (r != null) {
return encodeURI(r[2]); //返回参数值
} else {
return null;
}
}
/*项目申请发票附加信息数据请求*/
getProjectPaymentRecordDetail(psid) {
// let projectInvoiceDetailUrl = 'http://mamon.yemindream.com/mamon/customer/getPayMentByPsid';
const openId = window.sessionStorage.getItem('openId') || this.getUrlParam('openId');
let projectInvoiceDetailUrl = getPayMentByPsidUrl + '?openId=' + openId + '&psid=' + psid;
this.Provider.getMamenSwiperData(projectInvoiceDetailUrl).subscribe(res => {
if (res.code == 200) {
this.paymentRecordData = res.data;
console.log('paymentRecordData', this.paymentRecordData)
this.paymentRecordData['size'] = (this.paymentRecordData['size'] / 1048576).toPrecision(3)
if (this.paymentRecordData['size'] > 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] + ' MB'
} else if (this.paymentRecordData['size'] < 1) {
this.paymentRecordData['size'] = this.paymentRecordData['size'] * 1024 + ' KB'
}
if (this.paymentRecordData['typeStr']) {
if (this.paymentRecordData['typeStr'].search(/doc/) !== -1 || this.paymentRecordData['typeStr'].search(/docx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'doc.png'
} else if (this.paymentRecordData['typeStr'].search(/ppt/) !== -1 || this.paymentRecordData['typeStr'].search(/pptx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'ppt.png'
} else if (this.paymentRecordData['typeStr'].search(/xls/) !== -1 || this.paymentRecordData['typeStr'].search(/xlsx/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'xls.png'
} else if (this.paymentRecordData['typeStr'].search(/jpg/) !== -1 || this.paymentRecordData['typeStr'].search(/png/) !== -1 || this.paymentRecordData['typeStr'].search(/jpeg/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'png.png'
} else if (this.paymentRecordData['typeStr'].search(/pdf/) !== -1) {
this.paymentRecordData['typeStr'] = 'assets/imgs/' + 'pdf.png'
}
}
this.paymentRecordData && this.paymentRecordData['payerList'] && this.paymentRecordData['payerList'].length > 0 && this.paymentRecordData['payerList'].map(d => {
if (d.type == 1) {
this.paymentRecordData['payer'] = d.name;
this.paymentRecordData['payerBank'] = d.bankName;
this.paymentRecord | //隐藏底部分享菜单
isAttention() {
// let url = location.href.split('#')[0]; // 当前网页的URL,不包含#及其后面部分 | random_line_split |
odor-finder.py | ile_l,77)
maxattr,_=torch.max(attr,dim=1)
minattr,_=torch.min(attr,dim=1)
relevance=maxattr+minattr
relevance=relevance.cpu().detach().numpy()
data_relevance=pd.DataFrame()
data_relevance["values"]=relevance
len_smile=min(len(x_input_smile), smile_l)
# cropped_smile_relevance=data_relevance.iloc[0:len_smile]
cropped_smile_relevance=data_relevance.head(len_smile)
x_smile_labels=pd.Series(list(x_input_smile[:len_smile]))
cropped_smile_relevance['smile_char']=x_smile_labels
impacts=[]
cropped_smile_relevance['positive']=['']*len_smile
cropped_smile_relevance['negative']=['']*len_smile
for row in range(len_smile):
if (ord(cropped_smile_relevance['smile_char'][row])<65 or ord(cropped_smile_relevance['smile_char'][row])>90):
cropped_smile_relevance['values'][row]=0
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
else:
if(cropped_smile_relevance['values'][row]>0):
cropped_smile_relevance['positive'][row]=cropped_smile_relevance['values'][row]
cropped_smile_relevance['negative'][row]=0
elif(cropped_smile_relevance['values'][row]<0):
cropped_smile_relevance['negative'][row]=cropped_smile_relevance['values'][row]
cropped_smile_relevance['positive'][row]=0
else:
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
impacts.append(cropped_smile_relevance['values'][row])
# print(cropped_smile_relevance)
ax=cropped_smile_relevance.plot( y=["positive", "negative"], color=['green', 'red'], kind="bar", figsize=(25,15))
ax.legend(['Contribution to Binding', 'Contribution to Non-Binding'],prop={'size': 16})
ax.set_xticklabels(cropped_smile_relevance['smile_char'],fontsize=15,rotation=0)
ax.set_xlabel("SMILES", fontsize=15)
ax.set_ylabel("Relevance", fontsize=15)
ax.figure.savefig(f"{filename}_{count}_SmileInterpretability.pdf")
#ax.close()
# Structural Interpretability
mol=x_input_smile
m = Chem.MolFromSmiles(mol)
num_atoms = m.GetNumAtoms()
labels = [ m.GetAtomWithIdx(i).GetSymbol().upper() for i in range(num_atoms) ]
colors = {}
i=0
k=0
y_max = np.max(impacts)
y_min = np.min(impacts)
dist = y_max - y_min
while i < len(mol):
c = mol[i]
n = ""
if c.upper() not in "CBONSPFIK":
print(mol[i], 0.0, "0xFFFFFF")
else:
if i + 1 < len(mol):
n = mol[i+1]
sym = c + n
sym = sym.strip()
com = sym.upper()
if com == "BR" or com == "CL" or com == "NA":
i = i + 1
else:
com = c.upper()
sym = c
if com == labels[k]:
color = "0xBBBBBB"
triple = [0, 0 ,0]
if impacts[k] > 0.0:
y = int(math.floor(255.0 - 155.0 * impacts[k] / y_max))
color = "0x00" + hex(y)[-2:] + "00"
triple[1] = y /255.0
if impacts[k] < 0.0:
y = int(math.floor(255.0 - 155.0 * impacts[k] / y_min))
color = "0x" + hex(y)[-2:] + "0000"
triple[0] = y / 255.0
colors[k]= tuple(triple)
print(sym, impacts[k], color)
k = k + 1
i = i + 1
drawer = rdMolDraw2D.MolDraw2DSVG(400, 400)
drawer.DrawMolecule(m,highlightAtoms = [i for i in range(num_atoms)], highlightBonds=[], highlightAtomColors = colors)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace('svg:','')
fp = open(f"{filename}_{count}_mol.svg", "w")
print(svg, file=fp)
fp.close()
# Sequence Interpretability
ax=plt.figure()
baseline = torch.zeros(2, seq_l, 27)
ig = IntegratedGradients(model)
attr,delta= ig.attribute((x_user_smile.to(device),x_user_seq.to(device)), target=1,return_convergence_delta=True)
smile_attr=attr[0].view(smile_l,77)
seq_attr=attr[1].view(seq_l,27)
maxattr,_=torch.max(seq_attr,dim=1)
minattr,_=torch.min(seq_attr,dim=1)
relevance=maxattr+minattr
relevance=relevance.cpu().detach().numpy()
data_relevance=pd.DataFrame()
data_relevance["values"]=relevance
len_seq=min(len(x_input_seq), seq_l)
# cropped_seq_relevance=data_relevance.iloc[0:len_seq]
cropped_seq_relevance=data_relevance.head(len_seq)
x_seq_labels=pd.Series(list(x_input_seq))
cropped_seq_relevance['seq_char']=x_seq_labels
cropped_seq_relevance['positive']=['']*len_seq
cropped_seq_relevance['negative']=['']*len_seq
for row in range(len_seq):
if (ord(cropped_seq_relevance['seq_char'][row])<65 or ord(cropped_seq_relevance['seq_char'][row])>90):
cropped_seq_relevance['values'][row]=0
cropped_seq_relevance['positive'][row]=0
cropped_seq_relevance['negative'][row]=0
else:
if(cropped_seq_relevance['values'][row]>0):
cropped_seq_relevance['positive'][row]=cropped_seq_relevance['values'][row]
cropped_seq_relevance['negative'][row]=0
elif(cropped_seq_relevance['values'][row]<0):
cropped_seq_relevance['negative'][row]=cropped_seq_relevance['values'][row]
cropped_seq_relevance['positive'][row]=0
else:
cropped_seq_relevance['positive'][row]=0
cropped_seq_relevance['negative'][row]=0
ax=cropped_seq_relevance.plot( y=["positive", "negative"], color=['green', 'red'], kind="bar", figsize=(35, 15) )
ax.legend(['Contribution to Binding', 'Contribution to Non-Binding'])
ax=cropped_seq_relevance.plot( y=["positive", "negative"], color=['green', 'red'], kind="barh", figsize=(20, 70) )
ax.legend(['Contribution to Binding', 'Contribution to non binding'],prop={'size': 16})
ax.set_yticklabels(cropped_seq_relevance['seq_char'],fontsize=12,rotation=0)
ax.set_ylabel("Receptor Sequence",fontsize=15)
ax.set_xlabel("Relevance",fontsize=15,rotation=0)
ax.figure.savefig(f'{filename}_{count}_SequenceInterpretability.pdf')
#ax.close()
# In[15]:
df = pd.read_csv(TRAIN_DATA_FILE)
unique_smiles=df["SMILES"].unique().tolist()
class CPU_Unpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'torch.storage' and name == '_load_from_bytes':
return lambda b: torch.load(io.BytesIO(b), map_location='cpu')
else: return super().find_class(module, name)
f=open(model_filename, 'rb')
loaded_model = CPU_Unpickler(f).load()
# loaded_model=pickle.load(f)
loaded_model.to(device)
f = pd.read_csv(apply_data_file)
input_seq= f["seq"][0]
input_k=f["k"][0]
df_topk=pd.DataFrame(columns=['Smiles','Probability'])
k=0
for smile in unique_smiles:
prob,pred=prediction(loaded_model, smile, input_seq )
if(pred==1):
df_topk.loc[k]=[smile,prob]
k+=1
df_topk=df_topk.sort_values("Probability", ascending=False)
min_k = min(input_k,len(df_topk))
df_topk=df_topk.head(min_k)
for just in range(min_k):
combined_user_predict(loaded_model, input_seq,df_topk["Smiles"].tolist()[just], str(just+1),filename)
if(len(df_topk)==0):
df | _topk.loc[0]=['Empty','Empty']
| conditional_block |
|
odor-finder.py |
TRAIN_DATA_FILE= getConfig("Task","train_data_file")
apply_data_file= getConfig("Task","apply_data_file")
result_file= getConfig("Task","result_file")
smile_l=int(getConfig("Task","smile_length","75"))
seq_l=int(getConfig("Task","sequence_length","315"))
filename=getConfig("Task","filename")
model_filename = getConfig("Task","model_file")
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
class BLSTM(nn.Module):
def __init__(self, input_smile_dim, hidden_smile_dim, layer_smile_dim, input_seq_dim, hidden_seq_dim, layer_seq_dim, output_dim):
super(BLSTM, self).__init__()
self.hidden_smile_dim = hidden_smile_dim
self.layer_smile_dim = layer_smile_dim
self.hidden_seq_dim = hidden_seq_dim
self.layer_seq_dim = layer_seq_dim
self.output_dim = output_dim
self.smile_len = smile_l
self.seq_len = seq_l
self.num_smile_dir=2
self.num_seq_dir=2
self.lstm_smile = nn.LSTM(input_smile_dim, hidden_smile_dim, layer_smile_dim,bidirectional=True)
self.lstm_seq = nn.LSTM(input_seq_dim, hidden_seq_dim, layer_seq_dim,bidirectional=True)
self.dropout = nn.Dropout(0.5)
self.fc_seq= nn.Linear(self.seq_len*hidden_seq_dim*self.num_seq_dir,smile_o)
self.fc_smile= nn.Linear(self.smile_len*hidden_smile_dim*self.num_smile_dir,seq_o)
self.batch_norm_combined = nn.BatchNorm1d(smile_o+seq_o, affine = False)
# self.fc_combined = nn.Sequential(nn.Linear(1000,100),nn.ReLU(),nn.Linear(100,100),nn.ReLU(),nn.Linear(100,100),nn.ReLU(),nn.Linear(100,100),nn.ReLU(),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
# self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,100),nn.ReLU(),nn.BatchNorm1d(100, affine = False),nn.Dropout(.5),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
# self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,10),nn.ReLU(),nn.Linear(10,output_dim))
self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,100),nn.ReLU(),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
def forward(self, x1,x2):
h0_smile = torch.zeros(self.layer_smile_dim*self.num_smile_dir, x1.size(1), self.hidden_smile_dim).requires_grad_()
c0_smile = torch.zeros(self.layer_smile_dim*self.num_smile_dir, x1.size(1), self.hidden_smile_dim).requires_grad_()
h0_seq = torch.zeros(self.layer_seq_dim*self.num_seq_dir, x2.size(1), self.hidden_seq_dim).requires_grad_()
c0_seq = torch.zeros(self.layer_seq_dim*self.num_seq_dir, x2.size(1), self.hidden_seq_dim).requires_grad_()
h0_smile=h0_smile.to(device)
c0_smile=c0_smile.to(device)
h0_seq=h0_seq.to(device)
c0_seq=c0_seq.to(device)
out_smile, (hn_smile, cn_smile) = self.lstm_smile(x1, (h0_smile, c0_smile))
out_seq, (hn_seq, cn_seq) = self.lstm_seq(x2, (h0_seq, c0_seq))
out_smile = self.dropout(out_smile)
out_seq = self.dropout(out_seq)
out_seq=self.fc_seq(out_seq.view(-1,self.seq_len*self.hidden_seq_dim*self.num_seq_dir))
out_seq = self.dropout(out_seq)
out_smile=self.fc_smile(out_smile.view(-1,self.smile_len*self.hidden_smile_dim*self.num_smile_dir))
out_smile = self.dropout(out_smile)
out_combined=torch.cat((out_smile,out_seq), dim=1)
out_combined = self.batch_norm_combined(out_combined)
out_combined=self.fc_combined(out_combined)
prob=nn.Softmax(dim=1)(out_combined)
pred=nn.LogSoftmax(dim=1)(out_combined)
return pred
# In[3]:
def one_hot_smile(smile):
key="()+–./-0123456789=#@$ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]abcdefghijklmnopqrstuvwxyz^"
test_list=list(key)
res = {val : idx for idx, val in enumerate(test_list)}
threshold=smile_l
if len(smile)<=threshold:
smile=smile+("^"*(threshold-len(smile)))
else:
smile=smile[0:threshold]
array=[[0 for j in range(len(key))] for i in range(threshold)]
for i in range(len(smile)):
array[i][res[smile[i]]]=1
array=torch.Tensor(array)
return array
# In[5]:
def one_hot_seq(seq):
key="ABCDEFGHIJKLMNOPQRSTUVWXYZ^"
seq=seq.upper()
test_list=list(key)
res = {val : idx for idx, val in enumerate(test_list)}
threshold=seq_l
if len(seq)<=threshold:
seq=seq+("^"*(threshold-len(seq)))
else:
seq=seq[0:threshold]
array=[[0 for j in range(len(key))] for i in range(threshold)]
for i in range(len(seq)):
array[i][res[seq[i]]]=1
array=torch.Tensor(array)
return array
# In[6]:
def prediction(model, x_input_smile, x_input_seq):
x_user_smile=one_hot_smile(x_input_smile)
x_user_smile=list(x_user_smile)
x_user_smile=torch.stack(x_user_smile)
x_user_smile=x_user_smile.view(1,smile_l,77)
x_user_seq=one_hot_seq(x_input_seq)
x_user_seq=list(x_user_seq)
x_user_seq=torch.stack(x_user_seq)
x_user_seq=x_user_seq.view(1,seq_l,27)
model.eval()
scores = model(x_user_smile.to(device),x_user_seq.to(device))
_, predictions = scores.max(1)
prob=torch.exp(scores)
prob=prob.tolist()
return float(str(prob[0][predictions.item()])[:5]), predictions.item()
def combined_user_predict(model, x_input_seq, x_input_smile, count,filename):
mol = Chem.MolFromSmiles(x_input_smile)
Chem.Kekulize(mol)
x_input_smile=Chem.MolToSmiles(mol, kekuleSmiles=True)
ax=plt.figure()
x_user_seq=one_hot_seq(x_input_seq)
x_user_seq=list(x_user_seq)
x_user_seq=torch.stack(x_user_seq)
x_user_seq=x_user_seq.view(1,seq_l,27)
x_user_smile=one_hot_smile(x_input_smile)
x_user_smile=list(x_user_smile)
x_user_smile=torch.stack(x_user_smile)
x_user_smile=x_user_smile.view(1,smile_l,77)
model.eval()
ax=plt.figure()
torch.backends.cudnn.enabled=False
ig = IntegratedGradients(model)
baseline = torch.zeros(1, smile_l, 77)
for i in baseline[0]:
i[-1]=1
attr,delta= ig.attribute((x_user_smile.to(device),x_user_seq.to(device)),target=1,return_convergence_delta=True)
attr=attr[0].view(smile_l,77)
maxattr,_=torch.max(attr,dim=1)
minattr,_=torch.min(attr,dim=1)
relevance=maxattr+minattr
relevance=relevance.cpu().detach().numpy()
data_relevance=pd.DataFrame()
data_relevance["values"]=relevance
len_smile=min(len(x_input_smile), smile_l)
# cropped_smile_relevance=data_relevance.iloc[0:len_smile]
cropped_smile_relevance=data_relevance.head(len_smile)
x_smile_labels=pd.Series(list(x_input_smile[:len_smile]))
cropped_smile_relevance['smile_char']=x_smile_labels
impacts=[]
cropped_smile_relevance['positive']=['']*len_smile
cropped_smile_relevance['negative']=['']*len_smile
for row in range(len_smile):
if (ord(cropped_smile_relevance['smile_char'][row])<65 or ord(cropped_smile_relevance['smile_char'][row])>90):
cropped_smile_relevance['values'][row]=0
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
else:
if(cropped_smile_relevance[' | try:
return config[section][attribute]
except:
return default | identifier_body |
|
odor-finder.py | )
# self.fc_combined = nn.Sequential(nn.Linear(1000,100),nn.ReLU(),nn.Linear(100,100),nn.ReLU(),nn.Linear(100,100),nn.ReLU(),nn.Linear(100,100),nn.ReLU(),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
# self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,100),nn.ReLU(),nn.BatchNorm1d(100, affine = False),nn.Dropout(.5),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
# self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,10),nn.ReLU(),nn.Linear(10,output_dim))
self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,100),nn.ReLU(),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
def forward(self, x1,x2):
h0_smile = torch.zeros(self.layer_smile_dim*self.num_smile_dir, x1.size(1), self.hidden_smile_dim).requires_grad_()
c0_smile = torch.zeros(self.layer_smile_dim*self.num_smile_dir, x1.size(1), self.hidden_smile_dim).requires_grad_()
h0_seq = torch.zeros(self.layer_seq_dim*self.num_seq_dir, x2.size(1), self.hidden_seq_dim).requires_grad_()
c0_seq = torch.zeros(self.layer_seq_dim*self.num_seq_dir, x2.size(1), self.hidden_seq_dim).requires_grad_()
h0_smile=h0_smile.to(device)
c0_smile=c0_smile.to(device)
h0_seq=h0_seq.to(device)
c0_seq=c0_seq.to(device)
out_smile, (hn_smile, cn_smile) = self.lstm_smile(x1, (h0_smile, c0_smile))
out_seq, (hn_seq, cn_seq) = self.lstm_seq(x2, (h0_seq, c0_seq))
out_smile = self.dropout(out_smile)
out_seq = self.dropout(out_seq)
out_seq=self.fc_seq(out_seq.view(-1,self.seq_len*self.hidden_seq_dim*self.num_seq_dir))
out_seq = self.dropout(out_seq)
out_smile=self.fc_smile(out_smile.view(-1,self.smile_len*self.hidden_smile_dim*self.num_smile_dir))
out_smile = self.dropout(out_smile)
out_combined=torch.cat((out_smile,out_seq), dim=1)
out_combined = self.batch_norm_combined(out_combined)
out_combined=self.fc_combined(out_combined)
prob=nn.Softmax(dim=1)(out_combined)
pred=nn.LogSoftmax(dim=1)(out_combined)
return pred
# In[3]:
def one_hot_smile(smile):
key="()+–./-0123456789=#@$ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]abcdefghijklmnopqrstuvwxyz^"
test_list=list(key)
res = {val : idx for idx, val in enumerate(test_list)}
threshold=smile_l
if len(smile)<=threshold:
smile=smile+("^"*(threshold-len(smile)))
else:
smile=smile[0:threshold]
array=[[0 for j in range(len(key))] for i in range(threshold)]
for i in range(len(smile)):
array[i][res[smile[i]]]=1
array=torch.Tensor(array)
return array
# In[5]:
def on | eq):
key="ABCDEFGHIJKLMNOPQRSTUVWXYZ^"
seq=seq.upper()
test_list=list(key)
res = {val : idx for idx, val in enumerate(test_list)}
threshold=seq_l
if len(seq)<=threshold:
seq=seq+("^"*(threshold-len(seq)))
else:
seq=seq[0:threshold]
array=[[0 for j in range(len(key))] for i in range(threshold)]
for i in range(len(seq)):
array[i][res[seq[i]]]=1
array=torch.Tensor(array)
return array
# In[6]:
def prediction(model, x_input_smile, x_input_seq):
x_user_smile=one_hot_smile(x_input_smile)
x_user_smile=list(x_user_smile)
x_user_smile=torch.stack(x_user_smile)
x_user_smile=x_user_smile.view(1,smile_l,77)
x_user_seq=one_hot_seq(x_input_seq)
x_user_seq=list(x_user_seq)
x_user_seq=torch.stack(x_user_seq)
x_user_seq=x_user_seq.view(1,seq_l,27)
model.eval()
scores = model(x_user_smile.to(device),x_user_seq.to(device))
_, predictions = scores.max(1)
prob=torch.exp(scores)
prob=prob.tolist()
return float(str(prob[0][predictions.item()])[:5]), predictions.item()
def combined_user_predict(model, x_input_seq, x_input_smile, count,filename):
mol = Chem.MolFromSmiles(x_input_smile)
Chem.Kekulize(mol)
x_input_smile=Chem.MolToSmiles(mol, kekuleSmiles=True)
ax=plt.figure()
x_user_seq=one_hot_seq(x_input_seq)
x_user_seq=list(x_user_seq)
x_user_seq=torch.stack(x_user_seq)
x_user_seq=x_user_seq.view(1,seq_l,27)
x_user_smile=one_hot_smile(x_input_smile)
x_user_smile=list(x_user_smile)
x_user_smile=torch.stack(x_user_smile)
x_user_smile=x_user_smile.view(1,smile_l,77)
model.eval()
ax=plt.figure()
torch.backends.cudnn.enabled=False
ig = IntegratedGradients(model)
baseline = torch.zeros(1, smile_l, 77)
for i in baseline[0]:
i[-1]=1
attr,delta= ig.attribute((x_user_smile.to(device),x_user_seq.to(device)),target=1,return_convergence_delta=True)
attr=attr[0].view(smile_l,77)
maxattr,_=torch.max(attr,dim=1)
minattr,_=torch.min(attr,dim=1)
relevance=maxattr+minattr
relevance=relevance.cpu().detach().numpy()
data_relevance=pd.DataFrame()
data_relevance["values"]=relevance
len_smile=min(len(x_input_smile), smile_l)
# cropped_smile_relevance=data_relevance.iloc[0:len_smile]
cropped_smile_relevance=data_relevance.head(len_smile)
x_smile_labels=pd.Series(list(x_input_smile[:len_smile]))
cropped_smile_relevance['smile_char']=x_smile_labels
impacts=[]
cropped_smile_relevance['positive']=['']*len_smile
cropped_smile_relevance['negative']=['']*len_smile
for row in range(len_smile):
if (ord(cropped_smile_relevance['smile_char'][row])<65 or ord(cropped_smile_relevance['smile_char'][row])>90):
cropped_smile_relevance['values'][row]=0
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
else:
if(cropped_smile_relevance['values'][row]>0):
cropped_smile_relevance['positive'][row]=cropped_smile_relevance['values'][row]
cropped_smile_relevance['negative'][row]=0
elif(cropped_smile_relevance['values'][row]<0):
cropped_smile_relevance['negative'][row]=cropped_smile_relevance['values'][row]
cropped_smile_relevance['positive'][row]=0
else:
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
impacts.append(cropped_smile_relevance['values'][row])
# print(cropped_smile_relevance)
ax=cropped_smile_relevance.plot( y=["positive", "negative"], color=['green', 'red'], kind="bar", figsize=(25,15))
ax.legend(['Contribution to Binding', 'Contribution to Non-Binding'],prop={'size': 16})
ax.set_xticklabels(cropped_smile_relevance['smile_char'],fontsize=15,rotation=0)
ax.set_xlabel("SMILES", fontsize=15)
ax.set_ylabel("Relevance", fontsize=15)
ax.figure.savefig(f"{filename}_{count}_SmileInterpretability.pdf")
#ax.close()
# Structural Interpretability
mol=x_input_smile
m = Chem.MolFromSmiles(mol)
num_atoms = m.GetNumAtoms()
labels = [ m.GetAtomWithIdx(i).GetSymbol().upper() for i in range(num_atoms) ]
colors = {}
i=0
k=0
y_max = np.max(impacts)
y_min = np.min(impacts)
dist = y_max - y_min
while i < len(mol):
| e_hot_seq(s | identifier_name |
odor-finder.py | 00),nn.ReLU(),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
# self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,100),nn.ReLU(),nn.BatchNorm1d(100, affine = False),nn.Dropout(.5),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
# self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,10),nn.ReLU(),nn.Linear(10,output_dim))
self.fc_combined = nn.Sequential(nn.Linear(smile_o+seq_o,100),nn.ReLU(),nn.Linear(100,10),nn.ReLU(),nn.Linear(10,output_dim))
def forward(self, x1,x2):
h0_smile = torch.zeros(self.layer_smile_dim*self.num_smile_dir, x1.size(1), self.hidden_smile_dim).requires_grad_()
c0_smile = torch.zeros(self.layer_smile_dim*self.num_smile_dir, x1.size(1), self.hidden_smile_dim).requires_grad_()
h0_seq = torch.zeros(self.layer_seq_dim*self.num_seq_dir, x2.size(1), self.hidden_seq_dim).requires_grad_()
c0_seq = torch.zeros(self.layer_seq_dim*self.num_seq_dir, x2.size(1), self.hidden_seq_dim).requires_grad_()
h0_smile=h0_smile.to(device)
c0_smile=c0_smile.to(device)
h0_seq=h0_seq.to(device)
c0_seq=c0_seq.to(device)
out_smile, (hn_smile, cn_smile) = self.lstm_smile(x1, (h0_smile, c0_smile))
out_seq, (hn_seq, cn_seq) = self.lstm_seq(x2, (h0_seq, c0_seq))
out_smile = self.dropout(out_smile)
out_seq = self.dropout(out_seq)
out_seq=self.fc_seq(out_seq.view(-1,self.seq_len*self.hidden_seq_dim*self.num_seq_dir))
out_seq = self.dropout(out_seq)
out_smile=self.fc_smile(out_smile.view(-1,self.smile_len*self.hidden_smile_dim*self.num_smile_dir))
out_smile = self.dropout(out_smile)
out_combined=torch.cat((out_smile,out_seq), dim=1)
out_combined = self.batch_norm_combined(out_combined)
out_combined=self.fc_combined(out_combined)
prob=nn.Softmax(dim=1)(out_combined)
pred=nn.LogSoftmax(dim=1)(out_combined)
return pred
# In[3]:
def one_hot_smile(smile):
key="()+–./-0123456789=#@$ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]abcdefghijklmnopqrstuvwxyz^"
test_list=list(key)
res = {val : idx for idx, val in enumerate(test_list)}
threshold=smile_l
if len(smile)<=threshold:
smile=smile+("^"*(threshold-len(smile)))
else:
smile=smile[0:threshold]
array=[[0 for j in range(len(key))] for i in range(threshold)]
for i in range(len(smile)):
array[i][res[smile[i]]]=1
array=torch.Tensor(array)
return array
# In[5]:
def one_hot_seq(seq):
key="ABCDEFGHIJKLMNOPQRSTUVWXYZ^"
seq=seq.upper()
test_list=list(key)
res = {val : idx for idx, val in enumerate(test_list)}
threshold=seq_l
if len(seq)<=threshold:
seq=seq+("^"*(threshold-len(seq)))
else:
seq=seq[0:threshold]
array=[[0 for j in range(len(key))] for i in range(threshold)]
for i in range(len(seq)):
array[i][res[seq[i]]]=1
array=torch.Tensor(array)
return array
# In[6]:
def prediction(model, x_input_smile, x_input_seq):
x_user_smile=one_hot_smile(x_input_smile)
x_user_smile=list(x_user_smile)
x_user_smile=torch.stack(x_user_smile)
x_user_smile=x_user_smile.view(1,smile_l,77)
x_user_seq=one_hot_seq(x_input_seq)
x_user_seq=list(x_user_seq)
x_user_seq=torch.stack(x_user_seq)
x_user_seq=x_user_seq.view(1,seq_l,27)
model.eval()
scores = model(x_user_smile.to(device),x_user_seq.to(device))
_, predictions = scores.max(1)
prob=torch.exp(scores)
prob=prob.tolist()
return float(str(prob[0][predictions.item()])[:5]), predictions.item()
def combined_user_predict(model, x_input_seq, x_input_smile, count,filename):
mol = Chem.MolFromSmiles(x_input_smile)
Chem.Kekulize(mol)
x_input_smile=Chem.MolToSmiles(mol, kekuleSmiles=True)
ax=plt.figure()
x_user_seq=one_hot_seq(x_input_seq)
x_user_seq=list(x_user_seq)
x_user_seq=torch.stack(x_user_seq)
x_user_seq=x_user_seq.view(1,seq_l,27)
x_user_smile=one_hot_smile(x_input_smile)
x_user_smile=list(x_user_smile)
x_user_smile=torch.stack(x_user_smile)
x_user_smile=x_user_smile.view(1,smile_l,77)
model.eval()
ax=plt.figure()
torch.backends.cudnn.enabled=False
ig = IntegratedGradients(model)
baseline = torch.zeros(1, smile_l, 77)
for i in baseline[0]:
i[-1]=1
attr,delta= ig.attribute((x_user_smile.to(device),x_user_seq.to(device)),target=1,return_convergence_delta=True)
attr=attr[0].view(smile_l,77)
maxattr,_=torch.max(attr,dim=1)
minattr,_=torch.min(attr,dim=1)
relevance=maxattr+minattr
relevance=relevance.cpu().detach().numpy()
data_relevance=pd.DataFrame()
data_relevance["values"]=relevance
len_smile=min(len(x_input_smile), smile_l)
# cropped_smile_relevance=data_relevance.iloc[0:len_smile]
cropped_smile_relevance=data_relevance.head(len_smile)
x_smile_labels=pd.Series(list(x_input_smile[:len_smile]))
cropped_smile_relevance['smile_char']=x_smile_labels
impacts=[]
cropped_smile_relevance['positive']=['']*len_smile
cropped_smile_relevance['negative']=['']*len_smile
for row in range(len_smile):
if (ord(cropped_smile_relevance['smile_char'][row])<65 or ord(cropped_smile_relevance['smile_char'][row])>90):
cropped_smile_relevance['values'][row]=0
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
else:
if(cropped_smile_relevance['values'][row]>0):
cropped_smile_relevance['positive'][row]=cropped_smile_relevance['values'][row]
cropped_smile_relevance['negative'][row]=0
elif(cropped_smile_relevance['values'][row]<0):
cropped_smile_relevance['negative'][row]=cropped_smile_relevance['values'][row]
cropped_smile_relevance['positive'][row]=0
else:
cropped_smile_relevance['positive'][row]=0
cropped_smile_relevance['negative'][row]=0
impacts.append(cropped_smile_relevance['values'][row])
# print(cropped_smile_relevance)
ax=cropped_smile_relevance.plot( y=["positive", "negative"], color=['green', 'red'], kind="bar", figsize=(25,15))
ax.legend(['Contribution to Binding', 'Contribution to Non-Binding'],prop={'size': 16})
ax.set_xticklabels(cropped_smile_relevance['smile_char'],fontsize=15,rotation=0)
ax.set_xlabel("SMILES", fontsize=15)
ax.set_ylabel("Relevance", fontsize=15)
ax.figure.savefig(f"{filename}_{count}_SmileInterpretability.pdf")
#ax.close()
# Structural Interpretability
mol=x_input_smile
m = Chem.MolFromSmiles(mol)
num_atoms = m.GetNumAtoms()
labels = [ m.GetAtomWithIdx(i).GetSymbol().upper() for i in range(num_atoms) ]
colors = {}
i=0
k=0
y_max = np.max(impacts)
y_min = np.min(impacts)
dist = y_max - y_min
while i < len(mol):
c = mol[i]
n = "" | if c.upper() not in "CBONSPFIK":
print(mol[i], 0.0, "0xFFFFFF")
else:
if i + 1 < len(mol):
n = mol[i+1] | random_line_split |
|
record_trace.go | Name: "snapshotter",
Usage: "snapshotter name.",
Value: "overlaybd",
},
cli.StringFlag{
Name: "runtime",
Usage: "runtime name",
Value: defaults.DefaultRuntime,
},
cli.IntFlag{
Name: "max-concurrent-downloads",
Usage: "Set the max concurrent downloads for each pull",
Value: 8,
},
cli.BoolFlag{
Name: "tty,t",
Usage: "allocate a TTY for the container",
},
cli.BoolFlag{
Name: "disable-network-isolation",
Usage: "Do not use cni to provide network isolation, default is false",
},
cli.StringFlag{
Name: "cni-plugin-dir",
Usage: "cni plugin dir",
Value: "/opt/cni/bin/",
},
},
Action: func(cliCtx *cli.Context) (err error) {
// Create client
client, ctx, cancel, err := commands.NewClient(cliCtx)
if err != nil {
return err
}
defer cancel()
cs := client.ContentStore()
var con console.Console
if cliCtx.Bool("tty") {
if cliCtx.Uint("time") != 0 {
return errors.New("Cannot assign tty and time at the same time")
}
con = console.Current()
defer con.Reset()
if err := con.SetRaw(); err != nil {
return err
}
}
// Validate arguments
ref := cliCtx.Args().Get(0)
if ref == "" {
return errors.New("image ref must be provided")
}
newRef := cliCtx.Args().Get(1)
if newRef == "" {
return errors.New("new image ref must be provided")
}
if _, err = client.ImageService().Get(ctx, ref); err == nil {
return errors.Errorf("Please remove old image %s first", ref)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", ref)
}
if _, err = client.ImageService().Get(ctx, newRef); err == nil {
return errors.Errorf("New image %s exists", newRef)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", newRef)
}
// Fetch image metadata by rpull
fetchConfig, err := ctrcontent.NewFetchConfig(ctx, cliCtx)
if err != nil {
return err
}
if err := rpull(ctx, client, ref, cliCtx.String("snapshotter"), fetchConfig); err != nil {
return errors.Wrapf(err, "Fail to pull image metadata")
}
// Get image instance
imgInstance, err := client.ImageService().Get(ctx, ref)
if err != nil {
return err
}
image := containerd.NewImage(client, imgInstance)
imageManifest, err := images.Manifest(ctx, cs, image.Target(), platforms.Default())
if err != nil {
return err
}
// Validate top layer
topLayer := imageManifest.Layers[len(imageManifest.Layers)-1]
if _, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-digest"]; !ok {
return errors.New("Must be an overlaybd image")
}
if topLayer.Annotations["containerd.io/snapshot/overlaybd/acceleration-layer"] == "yes" {
return errors.New("Acceleration layer already exists")
}
fsType, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-fs-type"]
if !ok {
fsType = ""
}
// Fetch all layer blobs into content
if _, err = ctrcontent.Fetch(ctx, client, ref, fetchConfig); err != nil {
return err
}
// Create trace file
if err := os.Mkdir(cliCtx.String("working-dir"), 0644); err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "failed to create working dir")
}
traceFile := filepath.Join(cliCtx.String("working-dir"), uniqueObjectString())
if _, err := os.Create(traceFile); err != nil {
return errors.New("failed to create trace file")
}
defer os.Remove(traceFile)
// Create lease
ctx, deleteLease, err := client.WithLease(ctx,
leases.WithID(uniqueObjectString()),
leases.WithExpiration(1*time.Hour),
)
if err != nil {
return errors.Wrap(err, "failed to create lease")
}
defer deleteLease(ctx)
// Create isolated network
if !cliCtx.Bool("disable-network-isolation") {
networkNamespace = uniqueObjectString()
namespacePath = "/var/run/netns/" + networkNamespace
if err = exec.Command("ip", "netns", "add", networkNamespace).Run(); err != nil {
return errors.Wrapf(err, "failed to add netns")
}
defer func() {
if nextErr := exec.Command("ip", "netns", "delete", networkNamespace).Run(); err == nil && nextErr != nil {
err = errors.Wrapf(err, "failed to delete netns")
}
}()
cniObj, err := createIsolatedNetwork(cliCtx)
if err != nil {
return err
}
defer func() {
if nextErr := cniObj.Remove(ctx, networkNamespace, namespacePath); err == nil && nextErr != nil {
err = errors.Wrapf(nextErr, "failed to teardown network")
}
}()
if _, err = cniObj.Setup(ctx, networkNamespace, namespacePath); err != nil {
return errors.Wrapf(err, "failed to setup network for namespace")
}
}
// Create container and run task
container, err := createContainer(ctx, client, cliCtx, image, traceFile)
if err != nil {
return err
}
defer container.Delete(ctx, containerd.WithSnapshotCleanup)
task, err := tasks.NewTask(ctx, client, container, "", con, false, "", nil)
if err != nil {
return err
}
defer task.Delete(ctx)
if cliCtx.Bool("tty") {
if err := tasks.HandleConsoleResize(ctx, task, con); err != nil {
return errors.Wrapf(err, "failed to resize console")
}
}
var statusC <-chan containerd.ExitStatus
if statusC, err = task.Wait(ctx); err != nil {
return err
}
if err := task.Start(ctx); err != nil {
return err
}
fmt.Println("Task is running ...")
timer := time.NewTimer(time.Duration(cliCtx.Uint("time")) * time.Second)
watchStop := make(chan bool)
// Start a thread to watch timeout and signals
if !cliCtx.Bool("tty") {
go watchThread(ctx, timer, task, watchStop)
}
// Wait task stopped
status := <-statusC
if _, _, err := status.Result(); err != nil {
return errors.Wrapf(err, "failed to get exit status")
}
if timer.Stop() {
watchStop <- true
fmt.Println("Task finished before timeout ...")
}
collectTrace(traceFile)
// Load trace file into content, and generate an acceleration layer
//loader := newContentLoader(true, contentFile{traceFile, "trace"})
loader := newContentLoaderWithFsType(true, fsType, contentFile{traceFile, "trace"})
accelLayer, err := loader.Load(ctx, cs)
if err != nil {
return fmt.Errorf("loadCommittedSnapshotInContent failed: %v", err)
}
// Create image with the acceleration layer on top
newManifestDesc, err := createImageWithAccelLayer(ctx, cs, imageManifest, accelLayer)
if err != nil {
return fmt.Errorf("createImageWithAccelLayer failed: %v", err)
}
newImage := images.Image{
Name: cliCtx.Args().Get(1),
Target: newManifestDesc,
}
if err = createImage(ctx, client.ImageService(), newImage); err != nil {
return fmt.Errorf("createImage failed: %v", err)
}
fmt.Printf("New image %s is created\n", newRef)
return nil
},
}
func watchThread(ctx context.Context, timer *time.Timer, task containerd.Task, watchStop chan bool) | if err != nil {
fmt.Printf("Failed to get task status: %v\n", err)
}
if st.Status == containerd.Running {
if err = task | {
// Allow termination by user signals
sigStop := make(chan bool)
sigChan := registerSignals(ctx, task, sigStop)
select {
case <-sigStop:
timer.Stop()
break
case <-watchStop:
break
case <-timer.C:
fmt.Println("Timeout, stop recording ...")
break
}
signal.Stop(sigChan)
close(sigChan)
st, err := task.Status(ctx) | identifier_body |
record_trace.go | Name: "snapshotter",
Usage: "snapshotter name.",
Value: "overlaybd",
},
cli.StringFlag{
Name: "runtime",
Usage: "runtime name",
Value: defaults.DefaultRuntime,
},
cli.IntFlag{
Name: "max-concurrent-downloads",
Usage: "Set the max concurrent downloads for each pull",
Value: 8,
},
cli.BoolFlag{
Name: "tty,t",
Usage: "allocate a TTY for the container",
},
cli.BoolFlag{
Name: "disable-network-isolation",
Usage: "Do not use cni to provide network isolation, default is false",
},
cli.StringFlag{
Name: "cni-plugin-dir",
Usage: "cni plugin dir",
Value: "/opt/cni/bin/",
},
},
Action: func(cliCtx *cli.Context) (err error) {
// Create client
client, ctx, cancel, err := commands.NewClient(cliCtx)
if err != nil {
return err
}
defer cancel()
cs := client.ContentStore()
var con console.Console
if cliCtx.Bool("tty") {
if cliCtx.Uint("time") != 0 {
return errors.New("Cannot assign tty and time at the same time")
}
con = console.Current()
defer con.Reset()
if err := con.SetRaw(); err != nil {
return err
}
}
// Validate arguments
ref := cliCtx.Args().Get(0)
if ref == "" {
return errors.New("image ref must be provided")
}
newRef := cliCtx.Args().Get(1)
if newRef == "" {
return errors.New("new image ref must be provided")
}
if _, err = client.ImageService().Get(ctx, ref); err == nil {
return errors.Errorf("Please remove old image %s first", ref)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", ref)
}
if _, err = client.ImageService().Get(ctx, newRef); err == nil {
return errors.Errorf("New image %s exists", newRef)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", newRef)
}
// Fetch image metadata by rpull
fetchConfig, err := ctrcontent.NewFetchConfig(ctx, cliCtx)
if err != nil {
return err
}
if err := rpull(ctx, client, ref, cliCtx.String("snapshotter"), fetchConfig); err != nil {
return errors.Wrapf(err, "Fail to pull image metadata")
}
// Get image instance
imgInstance, err := client.ImageService().Get(ctx, ref)
if err != nil {
return err
}
image := containerd.NewImage(client, imgInstance)
imageManifest, err := images.Manifest(ctx, cs, image.Target(), platforms.Default())
if err != nil {
return err
}
// Validate top layer
topLayer := imageManifest.Layers[len(imageManifest.Layers)-1]
if _, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-digest"]; !ok {
return errors.New("Must be an overlaybd image")
}
if topLayer.Annotations["containerd.io/snapshot/overlaybd/acceleration-layer"] == "yes" {
return errors.New("Acceleration layer already exists")
}
fsType, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-fs-type"]
if !ok {
fsType = ""
}
// Fetch all layer blobs into content
if _, err = ctrcontent.Fetch(ctx, client, ref, fetchConfig); err != nil {
return err
}
// Create trace file
if err := os.Mkdir(cliCtx.String("working-dir"), 0644); err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "failed to create working dir")
}
traceFile := filepath.Join(cliCtx.String("working-dir"), uniqueObjectString())
if _, err := os.Create(traceFile); err != nil {
return errors.New("failed to create trace file")
}
defer os.Remove(traceFile)
// Create lease
ctx, deleteLease, err := client.WithLease(ctx,
leases.WithID(uniqueObjectString()),
leases.WithExpiration(1*time.Hour),
)
if err != nil {
return errors.Wrap(err, "failed to create lease")
}
defer deleteLease(ctx)
// Create isolated network
if !cliCtx.Bool("disable-network-isolation") {
networkNamespace = uniqueObjectString()
namespacePath = "/var/run/netns/" + networkNamespace
if err = exec.Command("ip", "netns", "add", networkNamespace).Run(); err != nil {
return errors.Wrapf(err, "failed to add netns")
}
defer func() {
if nextErr := exec.Command("ip", "netns", "delete", networkNamespace).Run(); err == nil && nextErr != nil {
err = errors.Wrapf(err, "failed to delete netns")
}
}()
cniObj, err := createIsolatedNetwork(cliCtx)
if err != nil {
return err
}
defer func() {
if nextErr := cniObj.Remove(ctx, networkNamespace, namespacePath); err == nil && nextErr != nil {
err = errors.Wrapf(nextErr, "failed to teardown network")
}
}()
if _, err = cniObj.Setup(ctx, networkNamespace, namespacePath); err != nil {
return errors.Wrapf(err, "failed to setup network for namespace")
}
}
| // Create container and run task
container, err := createContainer(ctx, client, cliCtx, image, traceFile)
if err != nil {
return err
}
defer container.Delete(ctx, containerd.WithSnapshotCleanup)
task, err := tasks.NewTask(ctx, client, container, "", con, false, "", nil)
if err != nil {
return err
}
defer task.Delete(ctx)
if cliCtx.Bool("tty") {
if err := tasks.HandleConsoleResize(ctx, task, con); err != nil {
return errors.Wrapf(err, "failed to resize console")
}
}
var statusC <-chan containerd.ExitStatus
if statusC, err = task.Wait(ctx); err != nil {
return err
}
if err := task.Start(ctx); err != nil {
return err
}
fmt.Println("Task is running ...")
timer := time.NewTimer(time.Duration(cliCtx.Uint("time")) * time.Second)
watchStop := make(chan bool)
// Start a thread to watch timeout and signals
if !cliCtx.Bool("tty") {
go watchThread(ctx, timer, task, watchStop)
}
// Wait task stopped
status := <-statusC
if _, _, err := status.Result(); err != nil {
return errors.Wrapf(err, "failed to get exit status")
}
if timer.Stop() {
watchStop <- true
fmt.Println("Task finished before timeout ...")
}
collectTrace(traceFile)
// Load trace file into content, and generate an acceleration layer
//loader := newContentLoader(true, contentFile{traceFile, "trace"})
loader := newContentLoaderWithFsType(true, fsType, contentFile{traceFile, "trace"})
accelLayer, err := loader.Load(ctx, cs)
if err != nil {
return fmt.Errorf("loadCommittedSnapshotInContent failed: %v", err)
}
// Create image with the acceleration layer on top
newManifestDesc, err := createImageWithAccelLayer(ctx, cs, imageManifest, accelLayer)
if err != nil {
return fmt.Errorf("createImageWithAccelLayer failed: %v", err)
}
newImage := images.Image{
Name: cliCtx.Args().Get(1),
Target: newManifestDesc,
}
if err = createImage(ctx, client.ImageService(), newImage); err != nil {
return fmt.Errorf("createImage failed: %v", err)
}
fmt.Printf("New image %s is created\n", newRef)
return nil
},
}
func watchThread(ctx context.Context, timer *time.Timer, task containerd.Task, watchStop chan bool) {
// Allow termination by user signals
sigStop := make(chan bool)
sigChan := registerSignals(ctx, task, sigStop)
select {
case <-sigStop:
timer.Stop()
break
case <-watchStop:
break
case <-timer.C:
fmt.Println("Timeout, stop recording ...")
break
}
signal.Stop(sigChan)
close(sigChan)
st, err := task.Status(ctx)
if err != nil {
fmt.Printf("Failed to get task status: %v\n", err)
}
if st.Status == containerd.Running {
if err = task | random_line_split |
|
record_trace.go | "cni-plugin-dir",
Usage: "cni plugin dir",
Value: "/opt/cni/bin/",
},
},
Action: func(cliCtx *cli.Context) (err error) {
// Create client
client, ctx, cancel, err := commands.NewClient(cliCtx)
if err != nil {
return err
}
defer cancel()
cs := client.ContentStore()
var con console.Console
if cliCtx.Bool("tty") {
if cliCtx.Uint("time") != 0 {
return errors.New("Cannot assign tty and time at the same time")
}
con = console.Current()
defer con.Reset()
if err := con.SetRaw(); err != nil {
return err
}
}
// Validate arguments
ref := cliCtx.Args().Get(0)
if ref == "" {
return errors.New("image ref must be provided")
}
newRef := cliCtx.Args().Get(1)
if newRef == "" {
return errors.New("new image ref must be provided")
}
if _, err = client.ImageService().Get(ctx, ref); err == nil {
return errors.Errorf("Please remove old image %s first", ref)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", ref)
}
if _, err = client.ImageService().Get(ctx, newRef); err == nil {
return errors.Errorf("New image %s exists", newRef)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", newRef)
}
// Fetch image metadata by rpull
fetchConfig, err := ctrcontent.NewFetchConfig(ctx, cliCtx)
if err != nil {
return err
}
if err := rpull(ctx, client, ref, cliCtx.String("snapshotter"), fetchConfig); err != nil {
return errors.Wrapf(err, "Fail to pull image metadata")
}
// Get image instance
imgInstance, err := client.ImageService().Get(ctx, ref)
if err != nil {
return err
}
image := containerd.NewImage(client, imgInstance)
imageManifest, err := images.Manifest(ctx, cs, image.Target(), platforms.Default())
if err != nil {
return err
}
// Validate top layer
topLayer := imageManifest.Layers[len(imageManifest.Layers)-1]
if _, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-digest"]; !ok {
return errors.New("Must be an overlaybd image")
}
if topLayer.Annotations["containerd.io/snapshot/overlaybd/acceleration-layer"] == "yes" {
return errors.New("Acceleration layer already exists")
}
fsType, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-fs-type"]
if !ok {
fsType = ""
}
// Fetch all layer blobs into content
if _, err = ctrcontent.Fetch(ctx, client, ref, fetchConfig); err != nil {
return err
}
// Create trace file
if err := os.Mkdir(cliCtx.String("working-dir"), 0644); err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "failed to create working dir")
}
traceFile := filepath.Join(cliCtx.String("working-dir"), uniqueObjectString())
if _, err := os.Create(traceFile); err != nil {
return errors.New("failed to create trace file")
}
defer os.Remove(traceFile)
// Create lease
ctx, deleteLease, err := client.WithLease(ctx,
leases.WithID(uniqueObjectString()),
leases.WithExpiration(1*time.Hour),
)
if err != nil {
return errors.Wrap(err, "failed to create lease")
}
defer deleteLease(ctx)
// Create isolated network
if !cliCtx.Bool("disable-network-isolation") {
networkNamespace = uniqueObjectString()
namespacePath = "/var/run/netns/" + networkNamespace
if err = exec.Command("ip", "netns", "add", networkNamespace).Run(); err != nil {
return errors.Wrapf(err, "failed to add netns")
}
defer func() {
if nextErr := exec.Command("ip", "netns", "delete", networkNamespace).Run(); err == nil && nextErr != nil {
err = errors.Wrapf(err, "failed to delete netns")
}
}()
cniObj, err := createIsolatedNetwork(cliCtx)
if err != nil {
return err
}
defer func() {
if nextErr := cniObj.Remove(ctx, networkNamespace, namespacePath); err == nil && nextErr != nil {
err = errors.Wrapf(nextErr, "failed to teardown network")
}
}()
if _, err = cniObj.Setup(ctx, networkNamespace, namespacePath); err != nil {
return errors.Wrapf(err, "failed to setup network for namespace")
}
}
// Create container and run task
container, err := createContainer(ctx, client, cliCtx, image, traceFile)
if err != nil {
return err
}
defer container.Delete(ctx, containerd.WithSnapshotCleanup)
task, err := tasks.NewTask(ctx, client, container, "", con, false, "", nil)
if err != nil {
return err
}
defer task.Delete(ctx)
if cliCtx.Bool("tty") {
if err := tasks.HandleConsoleResize(ctx, task, con); err != nil {
return errors.Wrapf(err, "failed to resize console")
}
}
var statusC <-chan containerd.ExitStatus
if statusC, err = task.Wait(ctx); err != nil {
return err
}
if err := task.Start(ctx); err != nil {
return err
}
fmt.Println("Task is running ...")
timer := time.NewTimer(time.Duration(cliCtx.Uint("time")) * time.Second)
watchStop := make(chan bool)
// Start a thread to watch timeout and signals
if !cliCtx.Bool("tty") {
go watchThread(ctx, timer, task, watchStop)
}
// Wait task stopped
status := <-statusC
if _, _, err := status.Result(); err != nil {
return errors.Wrapf(err, "failed to get exit status")
}
if timer.Stop() {
watchStop <- true
fmt.Println("Task finished before timeout ...")
}
collectTrace(traceFile)
// Load trace file into content, and generate an acceleration layer
//loader := newContentLoader(true, contentFile{traceFile, "trace"})
loader := newContentLoaderWithFsType(true, fsType, contentFile{traceFile, "trace"})
accelLayer, err := loader.Load(ctx, cs)
if err != nil {
return fmt.Errorf("loadCommittedSnapshotInContent failed: %v", err)
}
// Create image with the acceleration layer on top
newManifestDesc, err := createImageWithAccelLayer(ctx, cs, imageManifest, accelLayer)
if err != nil {
return fmt.Errorf("createImageWithAccelLayer failed: %v", err)
}
newImage := images.Image{
Name: cliCtx.Args().Get(1),
Target: newManifestDesc,
}
if err = createImage(ctx, client.ImageService(), newImage); err != nil {
return fmt.Errorf("createImage failed: %v", err)
}
fmt.Printf("New image %s is created\n", newRef)
return nil
},
}
func watchThread(ctx context.Context, timer *time.Timer, task containerd.Task, watchStop chan bool) {
// Allow termination by user signals
sigStop := make(chan bool)
sigChan := registerSignals(ctx, task, sigStop)
select {
case <-sigStop:
timer.Stop()
break
case <-watchStop:
break
case <-timer.C:
fmt.Println("Timeout, stop recording ...")
break
}
signal.Stop(sigChan)
close(sigChan)
st, err := task.Status(ctx)
if err != nil {
fmt.Printf("Failed to get task status: %v\n", err)
}
if st.Status == containerd.Running {
if err = task.Kill(ctx, unix.SIGTERM); err != nil {
fmt.Printf("Failed to kill task: %v\n", err)
}
}
}
func collectTrace(traceFile string) {
lockFile := traceFile + ".lock"
okFile := traceFile + ".ok"
if err := os.Remove(lockFile); err != nil && !os.IsNotExist(err) {
fmt.Printf("Remove lock file %s failed: %v\n", lockFile, err)
return
}
for {
time.Sleep(time.Second)
if _, err := os.Stat(okFile); err == nil {
fmt.Printf("Found OK file, trace is available now at %s\n", traceFile)
_ = os.Remove(okFile)
break
}
}
}
func | createImageWithAccelLayer | identifier_name |
|
record_trace.go | Name: "snapshotter",
Usage: "snapshotter name.",
Value: "overlaybd",
},
cli.StringFlag{
Name: "runtime",
Usage: "runtime name",
Value: defaults.DefaultRuntime,
},
cli.IntFlag{
Name: "max-concurrent-downloads",
Usage: "Set the max concurrent downloads for each pull",
Value: 8,
},
cli.BoolFlag{
Name: "tty,t",
Usage: "allocate a TTY for the container",
},
cli.BoolFlag{
Name: "disable-network-isolation",
Usage: "Do not use cni to provide network isolation, default is false",
},
cli.StringFlag{
Name: "cni-plugin-dir",
Usage: "cni plugin dir",
Value: "/opt/cni/bin/",
},
},
Action: func(cliCtx *cli.Context) (err error) {
// Create client
client, ctx, cancel, err := commands.NewClient(cliCtx)
if err != nil {
return err
}
defer cancel()
cs := client.ContentStore()
var con console.Console
if cliCtx.Bool("tty") {
if cliCtx.Uint("time") != 0 {
return errors.New("Cannot assign tty and time at the same time")
}
con = console.Current()
defer con.Reset()
if err := con.SetRaw(); err != nil {
return err
}
}
// Validate arguments
ref := cliCtx.Args().Get(0)
if ref == "" {
return errors.New("image ref must be provided")
}
newRef := cliCtx.Args().Get(1)
if newRef == "" {
return errors.New("new image ref must be provided")
}
if _, err = client.ImageService().Get(ctx, ref); err == nil {
return errors.Errorf("Please remove old image %s first", ref)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", ref)
}
if _, err = client.ImageService().Get(ctx, newRef); err == nil {
return errors.Errorf("New image %s exists", newRef)
} else if !errdefs.IsNotFound(err) {
return errors.Errorf("Fail to lookup image %s", newRef)
}
// Fetch image metadata by rpull
fetchConfig, err := ctrcontent.NewFetchConfig(ctx, cliCtx)
if err != nil {
return err
}
if err := rpull(ctx, client, ref, cliCtx.String("snapshotter"), fetchConfig); err != nil {
return errors.Wrapf(err, "Fail to pull image metadata")
}
// Get image instance
imgInstance, err := client.ImageService().Get(ctx, ref)
if err != nil {
return err
}
image := containerd.NewImage(client, imgInstance)
imageManifest, err := images.Manifest(ctx, cs, image.Target(), platforms.Default())
if err != nil {
return err
}
// Validate top layer
topLayer := imageManifest.Layers[len(imageManifest.Layers)-1]
if _, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-digest"]; !ok {
return errors.New("Must be an overlaybd image")
}
if topLayer.Annotations["containerd.io/snapshot/overlaybd/acceleration-layer"] == "yes" {
return errors.New("Acceleration layer already exists")
}
fsType, ok := topLayer.Annotations["containerd.io/snapshot/overlaybd/blob-fs-type"]
if !ok {
fsType = ""
}
// Fetch all layer blobs into content
if _, err = ctrcontent.Fetch(ctx, client, ref, fetchConfig); err != nil {
return err
}
// Create trace file
if err := os.Mkdir(cliCtx.String("working-dir"), 0644); err != nil && !os.IsExist(err) {
return errors.Wrapf(err, "failed to create working dir")
}
traceFile := filepath.Join(cliCtx.String("working-dir"), uniqueObjectString())
if _, err := os.Create(traceFile); err != nil {
return errors.New("failed to create trace file")
}
defer os.Remove(traceFile)
// Create lease
ctx, deleteLease, err := client.WithLease(ctx,
leases.WithID(uniqueObjectString()),
leases.WithExpiration(1*time.Hour),
)
if err != nil {
return errors.Wrap(err, "failed to create lease")
}
defer deleteLease(ctx)
// Create isolated network
if !cliCtx.Bool("disable-network-isolation") {
networkNamespace = uniqueObjectString()
namespacePath = "/var/run/netns/" + networkNamespace
if err = exec.Command("ip", "netns", "add", networkNamespace).Run(); err != nil {
return errors.Wrapf(err, "failed to add netns")
}
defer func() {
if nextErr := exec.Command("ip", "netns", "delete", networkNamespace).Run(); err == nil && nextErr != nil |
}()
cniObj, err := createIsolatedNetwork(cliCtx)
if err != nil {
return err
}
defer func() {
if nextErr := cniObj.Remove(ctx, networkNamespace, namespacePath); err == nil && nextErr != nil {
err = errors.Wrapf(nextErr, "failed to teardown network")
}
}()
if _, err = cniObj.Setup(ctx, networkNamespace, namespacePath); err != nil {
return errors.Wrapf(err, "failed to setup network for namespace")
}
}
// Create container and run task
container, err := createContainer(ctx, client, cliCtx, image, traceFile)
if err != nil {
return err
}
defer container.Delete(ctx, containerd.WithSnapshotCleanup)
task, err := tasks.NewTask(ctx, client, container, "", con, false, "", nil)
if err != nil {
return err
}
defer task.Delete(ctx)
if cliCtx.Bool("tty") {
if err := tasks.HandleConsoleResize(ctx, task, con); err != nil {
return errors.Wrapf(err, "failed to resize console")
}
}
var statusC <-chan containerd.ExitStatus
if statusC, err = task.Wait(ctx); err != nil {
return err
}
if err := task.Start(ctx); err != nil {
return err
}
fmt.Println("Task is running ...")
timer := time.NewTimer(time.Duration(cliCtx.Uint("time")) * time.Second)
watchStop := make(chan bool)
// Start a thread to watch timeout and signals
if !cliCtx.Bool("tty") {
go watchThread(ctx, timer, task, watchStop)
}
// Wait task stopped
status := <-statusC
if _, _, err := status.Result(); err != nil {
return errors.Wrapf(err, "failed to get exit status")
}
if timer.Stop() {
watchStop <- true
fmt.Println("Task finished before timeout ...")
}
collectTrace(traceFile)
// Load trace file into content, and generate an acceleration layer
//loader := newContentLoader(true, contentFile{traceFile, "trace"})
loader := newContentLoaderWithFsType(true, fsType, contentFile{traceFile, "trace"})
accelLayer, err := loader.Load(ctx, cs)
if err != nil {
return fmt.Errorf("loadCommittedSnapshotInContent failed: %v", err)
}
// Create image with the acceleration layer on top
newManifestDesc, err := createImageWithAccelLayer(ctx, cs, imageManifest, accelLayer)
if err != nil {
return fmt.Errorf("createImageWithAccelLayer failed: %v", err)
}
newImage := images.Image{
Name: cliCtx.Args().Get(1),
Target: newManifestDesc,
}
if err = createImage(ctx, client.ImageService(), newImage); err != nil {
return fmt.Errorf("createImage failed: %v", err)
}
fmt.Printf("New image %s is created\n", newRef)
return nil
},
}
func watchThread(ctx context.Context, timer *time.Timer, task containerd.Task, watchStop chan bool) {
// Allow termination by user signals
sigStop := make(chan bool)
sigChan := registerSignals(ctx, task, sigStop)
select {
case <-sigStop:
timer.Stop()
break
case <-watchStop:
break
case <-timer.C:
fmt.Println("Timeout, stop recording ...")
break
}
signal.Stop(sigChan)
close(sigChan)
st, err := task.Status(ctx)
if err != nil {
fmt.Printf("Failed to get task status: %v\n", err)
}
if st.Status == containerd.Running {
if err = | {
err = errors.Wrapf(err, "failed to delete netns")
} | conditional_block |
parser.js | 5', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013'];
function fetchRunnersFromPage(error, callback, url, start, year) {
var options = {
'url': url,
'form': {
'start':start,
'next':'Next 25 Records'
},
'headers': {
'User-Agent':"Rested/2009 CFNetwork/673.2.1 Darwin/13.1.0 (x86_64) (MacBookPro11%2C2)"
}
}
request.post(
options,
function (err, httpResponse, body) {
if (body == null) {
callback([]);
}
jsdom.env(
{
html: body,
scripts: [
'http://code.jquery.com/jquery-1.5.min.js'
],
done: function (err, window) {
var $ = window.jQuery;
var runners = [];
var lastRunnerWithHeader;
$($($('.tablegrid_table')[0]).find('tbody')[0]).find('tr').each(function(trIndex, row) {
var c = $(row).attr('class');
if (c === 'tr_header') {
var runner = parseRunnerHeader($, row);
lastRunnerWithHeader = runner;
//console.log(runner);
} else {
var runner = parseRunnerBody($, row, lastRunnerWithHeader);
if (runner) {
runner.year = year;
runners.push(runner);
}
}
});
callback(runners);
}
}
);
}
);
}
function runYear(error, callback, year) {
var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0";
var start = 1;
var yearsRunners = [];
runYearRecursive(
error,
callback,
url,
start,
year
);
}
function runYearRecursive(error, callback, url, start, year) {
fetchRunnersFromPage(
function (err) {
console.log(err);
error(err);
},
function (pagesRunners) {
if (pagesRunners.length < 25) {
// We're on the last page
callback(pagesRunners);
} else {
// Go get the next page
console.log(pagesRunners.length + " runners from start: " + start);
runYearRecursive(
error,
function (recursiveRunners) {
// add the next page's list onto the end of ours
var runners = pagesRunners.concat(recursiveRunners);
callback(runners);
},
url,
start + 25,
year
);
}
},
url,
start,
year
);
}
function runYearSafe(error, callback, year) {
var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0";
var start = 1;
//var start = 21001;
var yearsRunners = [];
var outputFileNumber = 1;
//var outputFileNumber = 22;
function save () {
saveRunners(yearsRunners, 'marathonResults' + year + '-' + outputFileNumber + '.json');
outputFileNumber += 1;
yearsRunners = [];
}
function doNext (runners) {
yearsRunners = yearsRunners.concat(runners);
if (runners.length < 25) {
// We're on the last page
save();
callback();
} else {
// See if we should save
if (yearsRunners.length == 1000) {
save();
}
// Go get the next page
start += 25;
runYearSubproblem (
error,
doNext,
url,
start,
year
);
}
}
runYearSubproblem (
error,
doNext,
url,
start,
year
);
}
function runYearSubproblem(error, callback, url, start, year) {
fetchRunnersFromPage(
function (err) {
console.log(err);
error(err);
},
function (pagesRunners) {
console.log(pagesRunners.length + " runners from subproblem start: " + start);
callback(pagesRunners);
},
url,
start,
year
);
}
function runAllYears(error, callback) {
var currentYearIndex = 0;
var years = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013']
var runners = [];
function handleError (err) {
currentYearIndex += 1;
}
function doNext() {
if (currentYearIndex >= years.length) {
console.log("Done");
callback(runners);
return;
}
runYear(
handleError,
function (runners) {
var thatYear = years[currentYearIndex];
console.log("### " + runners.length + " runners in year " + thatYear);
currentYearIndex += 1;
saveRunners(runners, 'marathonResults' + thatYear + '.json')
doNext();
},
years[currentYearIndex]
);
}
doNext();
}
function runYearsSafe(error, callback, years) {
var currentYearIndex = 0;
var runners = [];
function handleError (err) |
function doNext() {
if (currentYearIndex >= years.length) {
console.log("Done");
callback(runners);
return;
| {
currentYearIndex += 1;
} | identifier_body |
parser.js | 05', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013'];
function fetchRunnersFromPage(error, callback, url, start, year) {
var options = {
'url': url,
'form': {
'start':start,
'next':'Next 25 Records'
},
'headers': {
'User-Agent':"Rested/2009 CFNetwork/673.2.1 Darwin/13.1.0 (x86_64) (MacBookPro11%2C2)"
}
}
request.post(
options,
function (err, httpResponse, body) {
if (body == null) {
callback([]);
}
jsdom.env(
{
html: body,
scripts: [
'http://code.jquery.com/jquery-1.5.min.js'
],
done: function (err, window) {
var $ = window.jQuery;
var runners = [];
var lastRunnerWithHeader;
$($($('.tablegrid_table')[0]).find('tbody')[0]).find('tr').each(function(trIndex, row) {
var c = $(row).attr('class');
if (c === 'tr_header') {
var runner = parseRunnerHeader($, row);
lastRunnerWithHeader = runner;
//console.log(runner); | }
}
});
callback(runners);
}
}
);
}
);
}
function runYear(error, callback, year) {
var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0";
var start = 1;
var yearsRunners = [];
runYearRecursive(
error,
callback,
url,
start,
year
);
}
function runYearRecursive(error, callback, url, start, year) {
fetchRunnersFromPage(
function (err) {
console.log(err);
error(err);
},
function (pagesRunners) {
if (pagesRunners.length < 25) {
// We're on the last page
callback(pagesRunners);
} else {
// Go get the next page
console.log(pagesRunners.length + " runners from start: " + start);
runYearRecursive(
error,
function (recursiveRunners) {
// add the next page's list onto the end of ours
var runners = pagesRunners.concat(recursiveRunners);
callback(runners);
},
url,
start + 25,
year
);
}
},
url,
start,
year
);
}
function runYearSafe(error, callback, year) {
var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0";
var start = 1;
//var start = 21001;
var yearsRunners = [];
var outputFileNumber = 1;
//var outputFileNumber = 22;
function save () {
saveRunners(yearsRunners, 'marathonResults' + year + '-' + outputFileNumber + '.json');
outputFileNumber += 1;
yearsRunners = [];
}
function doNext (runners) {
yearsRunners = yearsRunners.concat(runners);
if (runners.length < 25) {
// We're on the last page
save();
callback();
} else {
// See if we should save
if (yearsRunners.length == 1000) {
save();
}
// Go get the next page
start += 25;
runYearSubproblem (
error,
doNext,
url,
start,
year
);
}
}
runYearSubproblem (
error,
doNext,
url,
start,
year
);
}
function runYearSubproblem(error, callback, url, start, year) {
fetchRunnersFromPage(
function (err) {
console.log(err);
error(err);
},
function (pagesRunners) {
console.log(pagesRunners.length + " runners from subproblem start: " + start);
callback(pagesRunners);
},
url,
start,
year
);
}
function runAllYears(error, callback) {
var currentYearIndex = 0;
var years = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013']
var runners = [];
function handleError (err) {
currentYearIndex += 1;
}
function doNext() {
if (currentYearIndex >= years.length) {
console.log("Done");
callback(runners);
return;
}
runYear(
handleError,
function (runners) {
var thatYear = years[currentYearIndex];
console.log("### " + runners.length + " runners in year " + thatYear);
currentYearIndex += 1;
saveRunners(runners, 'marathonResults' + thatYear + '.json')
doNext();
},
years[currentYearIndex]
);
}
doNext();
}
function runYearsSafe(error, callback, years) {
var currentYearIndex = 0;
var runners = [];
function handleError (err) {
currentYearIndex += 1;
}
function doNext() {
if (currentYearIndex >= years.length) {
console.log("Done");
callback(runners);
return;
}
| } else {
var runner = parseRunnerBody($, row, lastRunnerWithHeader);
if (runner) {
runner.year = year;
runners.push(runner); | random_line_split |
parser.js | FirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0";
var start = 1;
var yearsRunners = [];
runYearRecursive(
error,
callback,
url,
start,
year
);
}
function runYearRecursive(error, callback, url, start, year) {
fetchRunnersFromPage(
function (err) {
console.log(err);
error(err);
},
function (pagesRunners) {
if (pagesRunners.length < 25) {
// We're on the last page
callback(pagesRunners);
} else {
// Go get the next page
console.log(pagesRunners.length + " runners from start: " + start);
runYearRecursive(
error,
function (recursiveRunners) {
// add the next page's list onto the end of ours
var runners = pagesRunners.concat(recursiveRunners);
callback(runners);
},
url,
start + 25,
year
);
}
},
url,
start,
year
);
}
function runYearSafe(error, callback, year) {
var url = "http://registration.baa.org/cfm_Archive/iframe_ArchiveSearch.cfm?mode=results&criteria=&StoredProcParamsOn=yes&VarAgeLowID=0&VarAgeHighID=0&VarGenderID=0&VarBibNumber=&VarLastName=&VarFirstName=&VarStateID=0&VarCountryOfResidenceID=0&VarCity=&VarZip=&VarTimeLowHr=&VarTimeLowMin=&VarTimeLowSec=00&VarTimeHighHr=&VarTimeHighMin=&VarTimeHighSec=59&VarSortOrder=ByName&VarAddInactiveYears=0&records=25&headerexists=Yes&bordersize=0&bordercolor=%23ffffff&rowcolorone=%23FFCC33&rowcolortwo=%23FFCC33&headercolor=%23ffffff&headerfontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&headerfontcolor=%23004080&headerfontsize=12px&fontface=Verdana%2CArial%2CHelvetica%2Csans%2Dserif&fontcolor=%23000099&fontsize=10px&linkfield=&linkurl=&linkparams=&queryname=SearchResults&tablefields=RaceYear%2CFullBibNumber%2CFormattedSortName%2CAgeOnRaceDay%2CGenderCode%2CCity%2CStateAbbrev%2CCountryOfResAbbrev%2CReportingSegment&VarRaceYearLowID=" + year + "&VarRaceYearHighID=0";
var start = 1;
//var start = 21001;
var yearsRunners = [];
var outputFileNumber = 1;
//var outputFileNumber = 22;
function save () {
saveRunners(yearsRunners, 'marathonResults' + year + '-' + outputFileNumber + '.json');
outputFileNumber += 1;
yearsRunners = [];
}
function doNext (runners) {
yearsRunners = yearsRunners.concat(runners);
if (runners.length < 25) {
// We're on the last page
save();
callback();
} else {
// See if we should save
if (yearsRunners.length == 1000) {
save();
}
// Go get the next page
start += 25;
runYearSubproblem (
error,
doNext,
url,
start,
year
);
}
}
runYearSubproblem (
error,
doNext,
url,
start,
year
);
}
function runYearSubproblem(error, callback, url, start, year) {
fetchRunnersFromPage(
function (err) {
console.log(err);
error(err);
},
function (pagesRunners) {
console.log(pagesRunners.length + " runners from subproblem start: " + start);
callback(pagesRunners);
},
url,
start,
year
);
}
function runAllYears(error, callback) {
var currentYearIndex = 0;
var years = ['2001', '2002', '2003', '2004', '2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013']
var runners = [];
function handleError (err) {
currentYearIndex += 1;
}
function doNext() {
if (currentYearIndex >= years.length) {
console.log("Done");
callback(runners);
return;
}
runYear(
handleError,
function (runners) {
var thatYear = years[currentYearIndex];
console.log("### " + runners.length + " runners in year " + thatYear);
currentYearIndex += 1;
saveRunners(runners, 'marathonResults' + thatYear + '.json')
doNext();
},
years[currentYearIndex]
);
}
doNext();
}
function runYearsSafe(error, callback, years) {
var currentYearIndex = 0;
var runners = [];
function handleError (err) {
currentYearIndex += 1;
}
function doNext() {
if (currentYearIndex >= years.length) {
console.log("Done");
callback(runners);
return;
}
runYearSafe(
handleError,
function () {
currentYearIndex += 1;
doNext();
},
years[currentYearIndex]
);
}
doNext();
}
function runAllYearsSafe(error, callback) {
runYearsSafe(error, callback, YEARS);
}
function saveRunners(runners, outputFilename, indent) {
if (indent == undefined) {
indent = 4;
}
console.log("Saving to " + outputFilename + "...");
try {
fs.writeFile(
outputFilename,
JSON.stringify(runners, null, indent),
function(err) {
if(err) {
console.log(err);
} else {
console.log("JSON saved to " + outputFilename + " with " + runners.length + " runners");
}
}
);
} catch (e) {
console.log("ooops");
console.log("got in catch loop for " + outputFilename);
console.log(e);
}
}
function loadRunnersFromFile (fileName) {
console.log("loading from " + fileName + "...");
var runners = JSON.parse(fs.readFileSync(fileName, 'utf8'));
console.log("Loaded " + runners.length + " runners from " + fileName);
return runners;
}
function loadRunnersFromYear (year) {
var fileName = 'marathonResults' + year + '.json';
var runners = loadRunnersFromFile(fileName);
return runners;
}
function stitchYearTogether (year) {
fileNamesArray = fs.readdirSync('./');
fileNames = {};
for (var i = fileNamesArray.length - 1; i >= 0; i--) {
fileNames[fileNamesArray[i]] = true;
};
var runners = [];
var outputFileNumber = 1;
while (true) {
var nextName = 'marathonResults' + year + '-' + outputFileNumber + '.json';
console.log(nextName);
if (nextName in fileNames) {
var obj = loadRunnersFromFile(nextName);
runners = runners.concat(obj);
outputFileNumber += 1;
} else {
break;
}
}
var outputFilename = 'marathonResults' + year + '.json';
saveRunners(runners, outputFilename);
}
function | stitchAllYearsTogether | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.